From 1143285cd84022196b00cde4ce170aedd1cae896 Mon Sep 17 00:00:00 2001 From: Pmarquez <48651252+pxp9@users.noreply.github.com> Date: Thu, 31 Aug 2023 11:37:31 +0200 Subject: [PATCH] Better makefile (#140) * proper alignment in `Makefile` * use diesel cli arguments in `Makefile` * fix mistake with diesel cli * better organization * user/password consistency in `Makefile` * add `.PHONY` target * move `clippy` target * remove unnecessary environment variables * fix problem from 4d22be3e5 * maybe this is better for users/passwords * fix fuck up again * better organization and wait for MySQL * format `README.md` * format `README.md` Rust code * update `README.md` * another probably better approach * better output * bold echoes * update `make doc` * use variables in `Makefile` * use `--migration-dir` for Diesel * add variables for migrations * add `fang/mysql_migrations/diesel.toml` and use variables for Diesel config files * add default goal * add `.env` recipe * add variables for URLs of DBs * format `Makefile` * fix fuck-up in last commit * setup `Makefile` for parallel execution * fix workflow * fix workflow var * move `make` variables into `.env` file This commit makes the repository dependent on `.env` being both a valid `.env` file for `dotenvy` to use and being valid to include in the `Makefile`. * add quotation marks for `Makefile` substitution * solve race condition in `clean_sqlite` * fix `README.md` * stupid problems require stupid solutions * remove unnecessary clean * add `sleep`s * add docker image versions as a `.env` and `Makefile` variable * latest databases but without latest * small clarification in `README.md` --------- Co-authored-by: Dopplerian --- .env | 28 +++++- .github/workflows/rust.yml | 3 + .gitignore | 1 - Makefile | 129 ++++++++++++++++++++----- fang/README.md | 153 ++++++++++++++++-------------- fang/mysql_migrations/diesel.toml | 2 + 6 files changed, 216 insertions(+), 100 deletions(-) create mode 100644 fang/mysql_migrations/diesel.toml diff --git a/.env b/.env index 6d0480c8..43ea1787 100644 --- a/.env +++ b/.env @@ -1 +1,27 @@ -DATABASE_URL=postgres://postgres:postgres@localhost/fang +POSTGRES_CONTAINER=postgres +POSTGRES_VERSION=15.4 +POSTGRES_DB=fang +POSTGRES_USER=postgres +POSTGRES_PASSWORD=postgres +POSTGRES_DIESEL_DIR=fang/postgres_migrations +POSTGRES_MIGRATIONS=${POSTGRES_DIESEL_DIR}/migrations +POSTGRES_CONFIG=${POSTGRES_DIESEL_DIR}/diesel.toml + +MYSQL_CONTAINER=mysql +MYSQL_VERSION=8.1 +MYSQL_DB=fang +MYSQL_USER=root +MYSQL_PASSWORD=mysql +MYSQL_DIESEL_DIR=fang/mysql_migrations +MYSQL_MIGRATIONS=${MYSQL_DIESEL_DIR}/migrations +MYSQL_CONFIG=${MYSQL_DIESEL_DIR}/diesel.toml + +SQLITE_FILE=fang.db +SQLITE_DIESEL_DIR=fang/sqlite_migrations +SQLITE_MIGRATIONS=${SQLITE_DIESEL_DIR}/migrations +SQLITE_CONFIG=${SQLITE_DIESEL_DIR}/diesel.toml + +HOST=127.0.0.1 +POSTGRES_URL=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${HOST}/${POSTGRES_DB} +MYSQL_URL=mysql://${MYSQL_USER}:${MYSQL_PASSWORD}@${HOST}/${MYSQL_DB} +DATABASE_URL=${POSTGRES_URL} diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 6e6d43a1..a36adf33 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -7,6 +7,9 @@ on: # Check if it works with current dependencies (weekly on Wednesday 2:32 UTC) - cron: '32 2 * * 3' +env : + DATABASE_URL : postgres://postgres:postgres@localhost/fang + jobs: test: name: Test diff --git a/.gitignore b/.gitignore index 57b9c821..61005740 100644 --- a/.gitignore +++ b/.gitignore @@ -1,6 +1,5 @@ **/target Cargo.lock -src/schema.rs docs/content/docs/CHANGELOG.md docs/content/docs/README.md fang.db diff --git a/Makefile b/Makefile index 08a0723d..fc340091 100644 --- a/Makefile +++ b/Makefile @@ -1,46 +1,123 @@ +include .env + +BOLD='\033[1m' +END_BOLD='\033[0m' + +DB_TARGETS=db_postgres db_mysql db_sqlite +WAIT_TARGETS=wait_for_postgres wait_for_mysql wait_for_sqlite +DIESEL_TARGETS=diesel_postgres diesel_mysql diesel_sqlite +CLEAN_TARGETS=clean_postgres clean_mysql clean_sqlite +STOP_TARGETS=stop_postgres stop_mysql stop_sqlite + +.DEFAULT_GOAL=default + +default: db tests ignored stop + +.PHONY: db $(DB_TARGETS) \ + $(WAIT_TARGETS) \ + diesel $(DIESEL_TARGETS) \ + clean $(CLEAN_TARGETS) + stop $(STOP_TARGETS) \ + default clippy tests ignored doc .FORCE + +.SILENT: $(DB_TARGETS) $(WAIT_TARGETS) $(DIESEL_TARGETS) $(CLEAN_TARGETS) $(STOP_TARGETS) + +.NOTPARALLEL: default + +db: $(DB_TARGETS) + db_postgres: - docker run --rm -d --name postgres -p 5432:5432 \ - -e POSTGRES_DB=fang \ - -e POSTGRES_USER=postgres \ - -e POSTGRES_PASSWORD=postgres \ - postgres:latest + @echo -e $(BOLD)Setting up Postgres database...$(END_BOLD) + docker run --rm -d --name "$(POSTGRES_CONTAINER)" -p 5432:5432 \ + -e POSTGRES_DB="$(POSTGRES_DB)" \ + -e POSTGRES_USER="$(POSTGRES_USER)" \ + -e POSTGRES_PASSWORD="$(POSTGRES_PASSWORD)" \ + postgres:"$(POSTGRES_VERSION)" + $(MAKE) diesel_postgres -# login is root fang db_mysql: - docker run --rm -d --name mysql -p 3306:3306 \ - -e MYSQL_DATABASE=fang \ - -e MYSQL_ROOT_PASSWORD=fang \ - -e TZ=UTC \ - mysql:latest + @echo -e $(BOLD)Setting up MySQL database...$(END_BOLD) + docker run --rm -d --name "$(MYSQL_CONTAINER)" -p 3306:3306 \ + -e MYSQL_DATABASE="$(MYSQL_DB)" \ + -e MYSQL_ROOT_PASSWORD="$(MYSQL_PASSWORD)" \ + -e TZ=UTC \ + mysql:"$(MYSQL_VERSION)" + $(MAKE) diesel_mysql db_sqlite: - sqlite3 fang.db "VACUUM;" - -clippy: - cargo clippy --verbose --all-targets --all-features -- -D warnings + @echo -e $(BOLD)Setting up SQLite database...$(END_BOLD) + sqlite3 "$(SQLITE_FILE)" "VACUUM;" + $(MAKE) diesel_sqlite -diesel_sqlite: - cd fang/sqlite_migrations && DATABASE_URL=sqlite://../../fang.db diesel migration run +diesel: $(DIESEL_TARGETS) diesel_postgres: - cd fang/postgres_migrations && DATABASE_URL=postgres://postgres:postgres@localhost/fang diesel migration run + @echo -e $(BOLD)Running Diesel migrations on Postgres database...$(END_BOLD) + while ! diesel migration run --database-url "$(POSTGRES_URL)" --migration-dir "$(POSTGRES_MIGRATIONS)" --config-file "$(POSTGRES_CONFIG)" 2> /dev/null; \ + do \ + sleep 1; \ + done diesel_mysql: - cd fang/mysql_migrations && DATABASE_URL=mysql://root:fang@127.0.0.1/fang diesel migration run + @echo -e $(BOLD)Running Diesel migrations on MySQL database...$(END_BOLD) + while ! diesel migration run --database-url "$(MYSQL_URL)" --migration-dir "$(MYSQL_MIGRATIONS)" --config-file "$(MYSQL_CONFIG)" 2> /dev/null; \ + do \ + sleep 1; \ + done + +diesel_sqlite: + @echo -e $(BOLD)Running Diesel migrations on SQLite database...$(END_BOLD) + while ! diesel migration run --database-url sqlite://"$(SQLITE_FILE)" --migration-dir "$(SQLITE_MIGRATIONS)" --config-file "$(SQLITE_CONFIG)" 2> /dev/null; \ + do \ + sleep 1; \ + done + +clean: $(CLEAN_TARGETS) + +clean_postgres: + @echo -e $(BOLD)Cleaning Postgres database...$(END_BOLD) + docker exec "$(POSTGRES_CONTAINER)" dropdb -U "$(POSTGRES_USER)" "$(POSTGRES_DB)" + docker exec "$(POSTGRES_CONTAINER)" psql -U "$(POSTGRES_USER)" --command="CREATE DATABASE $(POSTGRES_DB);" + $(MAKE) diesel_postgres + +clean_mysql: + @echo -e $(BOLD)Cleaning MySQL database...$(END_BOLD) + docker exec "$(MYSQL_CONTAINER)" mysql \ + --user="$(MYSQL_USER)" \ + --password="$(MYSQL_PASSWORD)" \ + --execute="DROP DATABASE $(MYSQL_DB); CREATE DATABASE $(MYSQL_DB);" + $(MAKE) diesel_mysql -stop_mysql: - docker kill mysql +clean_sqlite: + @echo -e $(BOLD)Cleaning SQLite database...$(END_BOLD) + $(MAKE) stop_sqlite + $(MAKE) db_sqlite + +stop: $(STOP_TARGETS) stop_postgres: - docker kill postgres + @echo -e $(BOLD)Stopping Postgres database...$(END_BOLD) + docker kill "$(POSTGRES_CONTAINER)" + +stop_mysql: + @echo -e $(BOLD)Stopping MySQL database...$(END_BOLD) + docker kill "$(MYSQL_CONTAINER)" stop_sqlite: - rm fang.db + @echo -e $(BOLD)Stopping SQLite database...$(END_BOLD) + rm "$(SQLITE_FILE)" + +clippy: + cargo clippy --verbose --all-targets --all-features -- -D warnings + tests: - DATABASE_URL=postgres://postgres:postgres@localhost/fang cargo test --all-features -- --color always --nocapture + @echo -e $(BOLD)Running tests...$(END_BOLD) + cargo test --all-features -- --color always --nocapture ignored: - DATABASE_URL=postgres://postgres:postgres@localhost/fang cargo test --all-features -- --color always --nocapture --ignored + @echo -e $(BOLD)Running ignored tests...$(END_BOLD) + cargo test --all-features -- --color always --nocapture --ignored + $(MAKE) clean doc: - cargo doc --open + cargo doc --package fang --open diff --git a/fang/README.md b/fang/README.md index df52d776..d4b1e092 100644 --- a/fang/README.md +++ b/fang/README.md @@ -8,50 +8,53 @@ Background task processing library for Rust. It uses Postgres DB as a task queue ## Key Features - Here are some of the fang's key features: - - - Async and threaded workers. - Workers can be started in threads (threaded workers) or `tokio` tasks (async workers) - - Scheduled tasks. - Tasks can be scheduled at any time in the future - - Periodic (CRON) tasks. - Tasks can be scheduled using cron expressions - - Unique tasks. - Tasks are not duplicated in the queue if they are unique - - Single-purpose workers. - Tasks are stored in a single table but workers can execute only tasks of the specific type - - Retries. - Tasks can be retried with a custom backoff mode +Here are some of the fang's key features: + +- Async and threaded workers. + Workers can be started in threads (threaded workers) or `tokio` tasks (async workers) +- Scheduled tasks. + Tasks can be scheduled at any time in the future +- Periodic (CRON) tasks. + Tasks can be scheduled using cron expressions +- Unique tasks. + Tasks are not duplicated in the queue if they are unique +- Single-purpose workers. + Tasks are stored in a single table but workers can execute only tasks of the specific type +- Retries. + Tasks can be retried with a custom backoff mode ## Installation 1. Add this to your Cargo.toml - #### the Blocking feature + ```toml [dependencies] fang = { version = "0.10.4" , features = ["blocking"], default-features = false } ``` #### the Asynk feature + ```toml [dependencies] fang = { version = "0.10.4" , features = ["asynk"], default-features = false } ``` #### the Asynk feature with derive macro + ```toml [dependencies] fang = { version = "0.10.4" , features = ["asynk", "derive-error" ], default-features = false } ``` #### All features + ```toml fang = { version = "0.10.4" } ``` -*Supports rustc 1.62+* +_Supports rustc 1.62+_ 2. Create the `fang_tasks` table in the Postgres database. The migration can be found in [the migrations directory](https://github.com/ayrat555/fang/blob/master/fang/postgres_migrations/migrations/2022-08-20-151615_create_fang_tasks/up.sql). @@ -60,11 +63,11 @@ fang = { version = "0.10.4" } ### Defining a task #### Blocking feature + Every task should implement the `fang::Runnable` trait which is used by `fang` to execute it. If you have a `CustomError`, it is recommended to implement `From`. So this way you can use [? operator](https://stackoverflow.com/questions/42917566/what-is-this-question-mark-operator-about#42921174) inside the `run` function available in `fang::Runnable` trait. - You can easily implement it with the macro `ToFangError`. This macro is only available in the feature `derive-error`. ```rust @@ -79,21 +82,20 @@ use std::fmt::Debug; #[derive(Debug, ToFangError)] enum CustomError { - ErrorOne(String), - ErrorTwo(u32), + ErrorOne(String), + ErrorTwo(u32), } fn my_func(num : u16) -> Result<(), CustomError> { + if num == 0 { + Err(CustomError::ErrorOne("is zero".to_string())) + } - if num == 0 { - Err(CustomError::ErrorOne("is zero".to_string())) - } - - if num > 500 { - Err(CustomError::ErrorTwo(num)) - } + if num > 500 { + Err(CustomError::ErrorTwo(num)) + } - Ok(()) + Ok(()) } #[derive(Serialize, Deserialize)] @@ -107,8 +109,8 @@ impl Runnable for MyTask { fn run(&self, _queue: &dyn Queueable) -> Result<(), FangError> { println!("the number is {}", self.number); - my_func(self.number)?; - // You can use ? operator because + my_func(self.number)?; + // You can use ? operator because // From is implemented thanks to ToFangError derive macro. Ok(()) @@ -117,13 +119,13 @@ impl Runnable for MyTask { // If `uniq` is set to true and the task is already in the storage, it won't be inserted again // The existing record will be returned for for any insertions operaiton fn uniq(&self) -> bool { - true + true } // This will be useful if you want to filter tasks. // the default value is `common` fn task_type(&self) -> String { - "my_task".to_string() + "my_task".to_string() } // This will be useful if you would like to schedule tasks. @@ -136,12 +138,12 @@ impl Runnable for MyTask { // the maximum number of retries. Set it to 0 to make it not retriable // the default value is 20 fn max_retries(&self) -> i32 { - 20 + 20 } // backoff mode for retries fn backoff(&self, attempt: u32) -> u32 { - u32::pow(2, attempt) + u32::pow(2, attempt) } } ``` @@ -150,11 +152,12 @@ As you can see from the example above, the trait implementation has `#[typetag:: The second parameter of the `run` function is a struct that implements `fang::Queueable`. You can re-use it to manipulate the task queue, for example, to add a new job during the current job's execution. If you don't need it, just ignore it. - #### Asynk feature + Every task should implement `fang::AsyncRunnable` trait which is used by `fang` to execute it. Be careful not to call two implementations of the AsyncRunnable trait with the same name, because it will cause a failure in the `typetag` crate. + ```rust use fang::AsyncRunnable; use fang::asynk::async_queue::AsyncQueueable; @@ -164,7 +167,7 @@ use fang::async_trait; #[derive(Serialize, Deserialize)] #[serde(crate = "fang::serde")] struct AsyncTask { - pub number: u16, + pub number: u16, } #[typetag::serde] @@ -183,7 +186,7 @@ impl AsyncRunnable for AsyncTask { // If `uniq` is set to true and the task is already in the storage, it won't be inserted again // The existing record will be returned for for any insertions operaiton fn uniq(&self) -> bool { - true + true } // This will be useful if you would like to schedule tasks. @@ -196,12 +199,12 @@ impl AsyncRunnable for AsyncTask { // the maximum number of retries. Set it to 0 to make it not retriable // the default value is 20 fn max_retries(&self) -> i32 { - 20 + 20 } // backoff mode for retries fn backoff(&self, attempt: u32) -> u32 { - u32::pow(2, attempt) + u32::pow(2, attempt) } } ``` @@ -215,13 +218,13 @@ Example: If your timezone is UTC + 2 and you want to schedule at 11:00: ```rust - let expression = "0 0 9 * * * *"; +let expression = "0 0 9 * * * *"; ``` - ### Enqueuing a task #### the Blocking feature + To enqueue a task use `Queue::enqueue_task` ```rust @@ -231,16 +234,17 @@ use fang::Queue; // create a fang queue - let queue = Queue::builder().connection_pool(pool).build(); - - let task_inserted = queue.insert_task(&MyTask::new(1)).unwrap(); +let queue = Queue::builder().connection_pool(pool).build(); +let task_inserted = queue.insert_task(&MyTask::new(1)).unwrap(); ``` #### the Asynk feature + To enqueue a task use `AsyncQueueable::insert_task`. -For Postgres backend. +For Postgres backend: + ```rust use fang::asynk::async_queue::AsyncQueue; use fang::NoTls; @@ -258,27 +262,27 @@ let mut queue = AsyncQueue::builder() // Always connect first in order to perform any operation queue.connect(NoTls).await.unwrap(); - ``` -As an easy example, we are using NoTls type. If for some reason you would like to encrypt Postgres requests, you can use [openssl](https://docs.rs/postgres-openssl/latest/postgres_openssl/) or [native-tls](https://docs.rs/postgres-native-tls/latest/postgres_native_tls/). + +As an easy example, we are using NoTls type. If for some reason you would like to encrypt Postgres requests, you can use [openssl](https://docs.rs/postgres-openssl/latest/postgres_openssl/) or [native-tls](https://docs.rs/postgres-native-tls/latest/postgres_native_tls/). ```rust // AsyncTask from the first example let task = AsyncTask { 8 }; let task_returned = queue - .insert_task(&task as &dyn AsyncRunnable) - .await - .unwrap(); + .insert_task(&task as &dyn AsyncRunnable) + .await + .unwrap(); ``` ### Starting workers #### the Blocking feature + Every worker runs in a separate thread. In case of panic, they are always restarted. Use `WorkerPool` to start workers. Use `WorkerPool::builder` to create your worker pool and run tasks. - ```rust use fang::WorkerPool; use fang::Queue; @@ -288,7 +292,7 @@ use fang::Queue; let mut worker_pool = WorkerPool::::builder() .queue(queue) .number_of_workers(3_u32) - // if you want to run tasks of the specific kind + // if you want to run tasks of the specific kind .task_type("my_task_type") .build(); @@ -296,6 +300,7 @@ worker_pool.start(); ``` #### the Asynk feature + Every worker runs in a separate `tokio` task. In case of panic, they are always restarted. Use `AsyncWorkerPool` to start workers. @@ -308,14 +313,13 @@ use fang::asynk::async_worker_pool::AsyncWorkerPool; let mut pool: AsyncWorkerPool> = AsyncWorkerPool::builder() .number_of_workers(max_pool_size) .queue(queue.clone()) - // if you want to run tasks of the specific kind + // if you want to run tasks of the specific kind .task_type("my_task_type") .build(); pool.start().await; ``` - Check out: - [Simple Worker Example](https://github.com/ayrat555/fang/tree/master/fang_examples/blocking/simple_worker) - simple worker example @@ -370,8 +374,8 @@ pub struct SleepParams { If there are no tasks in the DB, a worker sleeps for `sleep_period` and each time this value increases by `sleep_step` until it reaches `max_sleep_period`. `min_sleep_period` is the initial value for `sleep_period`. All values are in seconds. - Use `set_sleep_params` to set it: + ```rust let sleep_params = SleepParams { sleep_period: Duration::from_secs(2), @@ -392,44 +396,49 @@ Set sleep params with worker pools `TypeBuilder` in both modules. 5. Create a new Pull Request ### Running tests locally + - Install diesel_cli. -``` + +```sh cargo install diesel_cli --no-default-features --features "postgres sqlite mysql" ``` + - Install docker on your machine. -- Run a Postgres docker container. (See in Makefile.) -``` -make db_postgres -``` +- Install SQLite 3 on your machine. -- Run the migrations -``` -make diesel_postgres -``` +- Setup databases for testing. -- Run tests -``` -make tests +```sh +make -j db ``` -- Run dirty//long tests, DB must be recreated afterwards. -``` -make ignored +- Run tests. `make db` does not need to be run in between each test cycle. + +```sh +make -j tests ``` -- Kill the docker container +- Run dirty/long tests. + +```sh +make -j ignored ``` -make stop + +- Take down databases. + +```sh +make -j stop ``` +The `-j` flag in the above examples enables parallelism for `make`, is not necessary but highly recommended. + ## Authors - Ayrat Badykov (@ayrat555) - Pepe Márquez (@pxp9) - [s1]: https://img.shields.io/crates/v/fang.svg [docs-badge]: https://img.shields.io/badge/docs-website-blue.svg [ci]: https://crates.io/crates/fang diff --git a/fang/mysql_migrations/diesel.toml b/fang/mysql_migrations/diesel.toml new file mode 100644 index 00000000..ddc9bd5d --- /dev/null +++ b/fang/mysql_migrations/diesel.toml @@ -0,0 +1,2 @@ +[print_schema] +file = "../src/blocking/mysql_schema.rs" \ No newline at end of file