diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..a626bc6 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +version: 2 +updates: + - package-ecosystem: cargo + directory: "/" + schedule: + interval: weekly + open-pull-requests-limit: 10 diff --git a/.github/workflows/auto_dependabot.yml b/.github/workflows/auto_dependabot.yml new file mode 100644 index 0000000..e0da855 --- /dev/null +++ b/.github/workflows/auto_dependabot.yml @@ -0,0 +1,27 @@ +name: Auto Dependabot +on: pull_request + +permissions: + contents: write + pull-requests: write + +jobs: + dependabot: + runs-on: ubuntu-latest + if: ${{ github.actor == 'dependabot[bot]' }} + steps: + - name: Dependabot metadata + id: metadata + uses: dependabot/fetch-metadata@v1.1.1 + with: + github-token: "${{ secrets.GITHUB_TOKEN }}" + - name: Approve a PR + run: gh pr review --approve "$PR_URL" + env: + PR_URL: ${{github.event.pull_request.html_url}} + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} + - name: Enable auto-merge for Dependabot PRs + run: gh pr merge --auto --squash "$PR_URL" + env: + PR_URL: ${{github.event.pull_request.html_url}} + GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..431327b --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,16 @@ +name: Release + +on: + release: + types: [released] + +jobs: + release-crate: + name: release crate + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions-rs/toolchain@v1 + with: + toolchain: "1.81.0" + - run: cargo publish --locked --token ${{ secrets.CARGO_REGISTRY_TOKEN }} diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000..9a6e40e --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,75 @@ +name: Tests + +on: + push: + branches: + - master + pull_request: + +jobs: + pre_job: + runs-on: ubuntu-latest + outputs: + should_skip: ${{ steps.skip_check.outputs.should_skip }} + steps: + - id: skip_check + uses: fkirc/skip-duplicate-actions@master + with: + skip_after_successful_duplicate: "true" + paths_ignore: '["**/*.md", "**/*.svg", "**/*.png", ".gitignore"]' + + lint_check: + needs: pre_job + if: ${{ needs.pre_job.outputs.should_skip != 'true' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v1 + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + - name: Install toolchain + uses: actions-rs/toolchain@v1 + with: + profile: minimal + toolchain: "1.81.0" + components: clippy + override: true + - name: Checking code format + run: cargo fmt -- --check --config use_try_shorthand=true + - name: Clippy + uses: clechasseur/rs-clippy-check@v3 + with: + args: --all-features --bins --examples --tests --benches -- -W clippy::all -W clippy::pedantic -D warnings + + tests: + needs: pre_job + if: ${{ needs.pre_job.outputs.should_skip != 'true' }} + runs-on: ubuntu-latest + container: + image: docker.binary.picodata.io/picodata:24.6.1 + options: --user root + steps: + - uses: actions/checkout@v2 + - uses: actions/cache@v2 + with: + path: | + ~/.cargo/registry + ~/.cargo/git + target + key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} + - name: Install system dependencies + run: dnf install -y gcc git + - name: Install toolchain + uses: actions-rs/toolchain@v1 + with: + toolchain: "1.81.0" + override: true + - name: Run tests + uses: actions-rs/cargo@v1 + with: + command: test + args: --all-features diff --git a/.gitignore b/.gitignore index 2b789c6..55d6941 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ /target -tmp -local +/local +/tests/tmp/* +!/tests/tmp/.keep diff --git a/Cargo.lock b/Cargo.lock index 15d948d..13edca6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -178,6 +178,12 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "constcat" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4938185353434999ef52c81753c8cca8955ed38042fc29913db3751916f3b7ab" + [[package]] name = "cpufeatures" version = "0.2.16" @@ -594,6 +600,7 @@ dependencies = [ "anyhow", "clap", "colog", + "constcat", "ctrlc", "flate2", "include_dir", diff --git a/Cargo.toml b/Cargo.toml index 02078fb..ca834a1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,3 +34,6 @@ log = "0.4.22" [[bin]] name = "cargo-pike" path = "src/main.rs" + +[dev-dependencies] +constcat = "0.5.1" diff --git a/MAINTENANCE.md b/MAINTENANCE.md new file mode 100644 index 0000000..1c37b09 --- /dev/null +++ b/MAINTENANCE.md @@ -0,0 +1,41 @@ +# Maintenance guide + +## Making a new release + +1. Update master branch + + ```shell + git checkout master && git pull + ``` + +1. Update project version in `Cargo.toml` + + ```shell + vim Cargo.toml + ``` + +1. Update `Cargo.lock` + + ```shell + cargo update + ``` + +1. Commit `Cargo.toml` and `Cargo.lock` with the version + + ```shell + git commit -m "bump version" Cargo.toml Cargo.lock + ``` + +1. Make a new git tag + + ```shell + git tag -a + ``` + +1. Push all to upstream + + ```shell + git push origin master --follow-tags + ``` + +1. [Create](https://github.com/lowitea/gitlobster/releases/new) a new release specifying pushed tag diff --git a/README.md b/README.md index f74f376..5018c62 100644 --- a/README.md +++ b/README.md @@ -69,12 +69,12 @@ cargo pike run --topology topology.toml --data-dir ./tmp #### Доступные опции -- `-t, --topology ` - Путь к файлу топологии. Значение по-умолчанию: `topology.toml` -- `-d, --data-dir ` - Путь к директории хранения файлов кластера. Значение по-умолчанию: `./tmp` +- `-t, --topology ` - Путь к файлу топологии. Значение по умолчанию: `topology.toml` +- `--data-dir ` - Путь к директории хранения файлов кластера. Значение по умолчанию: `./tmp` - `--disable-install-plugins` - Отключение автоматической установки плагинов -- `-b, --base-http-port ` - Базовый http-порт, с которого начнут открываться http-порты отдельных инстансов. Значение по-умолчанию: `8000` -- `-b, --base-pg-port ` - Базовый порт постгрес протокола, с которого начнут открываться порты отдельных инстансов. Значение по-умолчанию: `5432` -- `--picodata-path ` - Путь до исполняемого файла Пикодаты. Значение по-умолчанию: `picodata` +- `--base-http-port ` - Базовый http-порт, с которого начнут открываться http-порты отдельных инстансов. Значение по умолчанию: `8000` +- `--base-pg-port ` - Базовый порт постгрес протокола, с которого начнут открываться порты отдельных инстансов. Значение по умолчанию: `5432` +- `--picodata-path ` - Путь до исполняемого файла Пикодаты. Значение по умолчанию: `picodata` - `--release` - Сборка и запуск релизной версии плагина. #### config.yaml @@ -130,7 +130,7 @@ cargo pike stop --data-dir ./tmp #### Доступные опции -- `-d, --data-dir ` - Путь к директории хранения файлов кластера. Значение по-умолчанию: `./tmp` +- `--data-dir ` - Путь к директории хранения файлов кластера. Значение по умолчанию: `./tmp` ### `plugin clean` @@ -142,7 +142,7 @@ cargo pike clean #### Доступные опции -- `-d, --data-dir ` - Путь к директории хранения файлов кластера. Значение по-умолчанию: `./tmp` +- `--data-dir ` - Путь к директории хранения файлов кластера. Значение по умолчанию: `./tmp` ### `plugin new` @@ -157,7 +157,7 @@ cargo pike plugin new name_of_new_plugin #### Доступные опции - `--without-git` - Отключение автоматической инициализации git-репозитория -- `-w, --workspace` - Создание проекта плагина как воркспейса +- `--workspace` - Создание проекта плагина как воркспейса ### `plugin init` @@ -172,7 +172,7 @@ cargo pike plugin init #### Доступные опции - `--without-git` - Отключение автоматической инициализации git-репозитория -- `-w, --workspace` - Создание проекта плагина как воркспейса +- `--workspace` - Создание проекта плагина как воркспейса ### `plugin pack` @@ -207,5 +207,5 @@ cargo pike config apply #### Доступные опции -- `-c, --config-path ` - Путь к файлу конфига. Значение по-умолчанию: `plugin_config.yaml` -- `-d, --data-dir ` - Путь к директории хранения файлов кластера. Значение по-умолчанию: `./tmp` +- `-c, --config-path ` - Путь к файлу конфига. Значение по умолчанию: `plugin_config.yaml` +- `--data-dir ` - Путь к директории хранения файлов кластера. Значение по умолчанию: `./tmp` diff --git a/src/commands/clean.rs b/src/commands/clean.rs index 0f36ebc..5798f45 100644 --- a/src/commands/clean.rs +++ b/src/commands/clean.rs @@ -9,7 +9,7 @@ pub fn cmd(data_dir: &Path) -> Result<()> { .context(format!("failed to remove directory {}", data_dir.display()))?; info!("Successfully removed : {}", data_dir.to_string_lossy()); } else { - warn!("Data directory does not exist") + warn!("Data directory does not exist"); } Ok(()) diff --git a/src/commands/config/apply.rs b/src/commands/config/apply.rs index e8d5595..7b456d5 100644 --- a/src/commands/config/apply.rs +++ b/src/commands/config/apply.rs @@ -13,12 +13,12 @@ fn apply_service_config( plugin_name: &str, plugin_version: &str, service_name: &str, - config: HashMap, + config: &HashMap, admin_socket: &Path, ) -> Result<()> { let mut queries: Vec = Vec::new(); - for (key, value) in &config { + for (key, value) in config { let value = serde_json::to_string(&value) .context(format!("failed to serialize the string with key {key}"))?; queries.push(format!( @@ -83,12 +83,12 @@ pub fn cmd(config_path: &Path, data_dir: &Path) -> Result<()> { &cargo_manifest.package.name, &cargo_manifest.package.version, &service_name, - service_config, + &service_config, &admin_socket, ) .context(format!( "failed to apply service config for service {service_name}" - ))? + ))?; } Ok(()) diff --git a/src/commands/lib.rs b/src/commands/lib.rs index b723e4c..98cfeda 100644 --- a/src/commands/lib.rs +++ b/src/commands/lib.rs @@ -6,6 +6,7 @@ pub enum BuildType { Debug, } +#[allow(clippy::needless_pass_by_value)] pub fn cargo_build(build_type: BuildType) -> Result<()> { let output = match build_type { BuildType::Release => Command::new("cargo") diff --git a/src/commands/plugin/new.rs b/src/commands/plugin/new.rs index b34e11e..ad18175 100644 --- a/src/commands/plugin/new.rs +++ b/src/commands/plugin/new.rs @@ -4,7 +4,7 @@ use std::{ ffi::OsStr, fs::{self, File}, io::Write, - path::{Path, PathBuf}, + path::Path, process::Command, }; @@ -71,14 +71,14 @@ where Ok(()) } -fn workspace_init(root_path: &PathBuf, project_name: &str) -> Result<()> { +fn workspace_init(root_path: &Path, project_name: &str) -> Result<()> { let cargo_toml_path = root_path.join("Cargo.toml"); let mut cargo_toml = File::create(cargo_toml_path).context("failed to create Cargo.toml for workspace")?; cargo_toml - .write_all(format!("[workspace]\nmembers = [\n \"{}\",\n]", project_name).as_bytes())?; + .write_all(format!("[workspace]\nmembers = [\n \"{project_name}\",\n]").as_bytes())?; fs::copy( root_path.join(project_name).join("topology.toml"), @@ -114,7 +114,7 @@ pub fn cmd(path: Option<&Path>, without_git: bool, init_workspace: bool) -> Resu let plugin_path = if init_workspace { path.join(project_name) } else { - path.to_path_buf() + path.clone() }; std::fs::create_dir_all(&plugin_path) diff --git a/src/commands/plugin/pack.rs b/src/commands/plugin/pack.rs index 21402c1..5765f74 100644 --- a/src/commands/plugin/pack.rs +++ b/src/commands/plugin/pack.rs @@ -65,7 +65,7 @@ pub fn cmd(pack_debug: bool) -> Result<()> { ) .context("failed to parse Cargo.toml")?; - let normalized_package_name = cargo_manifest.package.name.replace("-", "_"); + let normalized_package_name = cargo_manifest.package.name.replace('-', "_"); let compressed_file = File::create(format!( "target/{}-{}.tar.gz", @@ -77,7 +77,7 @@ pub fn cmd(pack_debug: bool) -> Result<()> { let lib_name = format!("lib{normalized_package_name}.{LIB_EXT}"); let mut lib_file = - File::open(build_dir.join(&lib_name)).context(format!("failed to open {}", lib_name))?; + File::open(build_dir.join(&lib_name)).context(format!("failed to open {lib_name}"))?; let mut manifest_file = File::open(build_dir.join("manifest.yaml")).context("failed to open file manifest.yaml")?; diff --git a/src/commands/run.rs b/src/commands/run.rs index d15972b..f28d732 100644 --- a/src/commands/run.rs +++ b/src/commands/run.rs @@ -58,20 +58,20 @@ fn enable_plugins( continue; }; for service in services { - let plugin_dir = plugins_dir.join(service.plugin.clone()); + let current_plugin_dir = plugins_dir.join(service.plugin.clone()); - if !plugin_dir.exists() { + if !current_plugin_dir.exists() { bail!( "directory {} does not exist, run \"cargo build\" inside plugin directory", - plugin_dir.display() + current_plugin_dir.display() ); } plugins.entry(service.plugin.clone()).or_insert_with(|| { - let mut versions: Vec<_> = fs::read_dir(plugin_dir) + let mut versions: Vec<_> = fs::read_dir(current_plugin_dir) .unwrap() .map(|r| r.unwrap()) .collect(); - versions.sort_by_key(|dir| dir.path()); + versions.sort_by_key(std::fs::DirEntry::path); versions .last() .unwrap() @@ -85,8 +85,7 @@ fn enable_plugins( let mut queries: Vec = Vec::new(); // Queries to set migration variables, order of commands is not important - push_migration_envs_queries(&mut queries, topology, &plugins) - .context("failed to push migration variables")?; + push_migration_envs_queries(&mut queries, topology, &plugins); for (plugin, version) in &plugins { queries.push(format!(r#"CREATE PLUGIN "{plugin}" {version};"#)); @@ -134,10 +133,10 @@ fn push_migration_envs_queries( queries: &mut Vec, topology: &Topology, plugins: &HashMap, -) -> Result<()> { +) { info!("setting migration variables"); - for (_, tier) in &topology.tiers { + for tier in topology.tiers.values() { let Some(migration_envs) = &tier.migration_envs else { continue; }; @@ -150,8 +149,6 @@ fn push_migration_envs_queries( } } } - - Ok(()) } fn kill_picodata_instances() -> Result<()> { @@ -169,9 +166,9 @@ pub fn cmd( topology_path: &PathBuf, data_dir: &Path, disable_plugin_install: bool, - base_http_port: &i32, + base_http_port: i32, picodata_path: &PathBuf, - base_pg_port: &i32, + base_pg_port: i32, use_release: bool, ) -> Result<()> { fs::create_dir_all(data_dir).unwrap(); @@ -213,7 +210,7 @@ pub fn cmd( let bin_port = 3000 + instance_id; let http_port = base_http_port + instance_id; let pg_port = base_pg_port + instance_id; - let instance_data_dir = data_dir.join("cluster").join(format!("i_{}", instance_id)); + let instance_data_dir = data_dir.join("cluster").join(format!("i_{instance_id}")); // TODO: make it as child processes with catch output and redirect it to main // output @@ -227,34 +224,31 @@ pub fn cmd( "--plugin-dir", plugins_dir, "--listen", - &format!("127.0.0.1:{}", bin_port), + &format!("127.0.0.1:{bin_port}"), "--peer", - &format!("127.0.0.1:{}", first_instance_bin_port), + &format!("127.0.0.1:{first_instance_bin_port}"), "--init-replication-factor", &tier.replication_factor.to_string(), "--http-listen", - &format!("127.0.0.1:{}", http_port), + &format!("127.0.0.1:{http_port}"), "--pg-listen", - &format!("127.0.0.1:{}", pg_port), + &format!("127.0.0.1:{pg_port}"), "--tier", tier_name, ]) .spawn() - .context(format!( - "failed to start picodata instance: {}", - instance_id - ))?; + .context(format!("failed to start picodata instance: {instance_id}"))?; thread::sleep(Duration::from_secs(5)); // Save pid of picodata process to kill it after let pid = process.id(); let pid_location = instance_data_dir.join("pid"); let mut file = File::create(pid_location)?; - writeln!(file, "{}", pid)?; + writeln!(file, "{pid}")?; let processes_lock = Arc::clone(&get_picodata_processes()); let mut processes = processes_lock.lock().unwrap(); - processes.push(process) + processes.push(process); } } diff --git a/src/main.rs b/src/main.rs index fc511d3..f6fabb1 100644 --- a/src/main.rs +++ b/src/main.rs @@ -23,16 +23,16 @@ enum Command { Run { #[arg(short, long, value_name = "TOPOLOGY", default_value = "topology.toml")] topology: PathBuf, - #[arg(short, long, value_name = "DATA_DIR", default_value = "./tmp")] + #[arg(long, value_name = "DATA_DIR", default_value = "./tmp")] data_dir: PathBuf, /// Disable the automatic installation of plugins #[arg(long)] disable_install_plugins: bool, /// Base http port for picodata instances - #[arg(short, long, default_value = "8000")] + #[arg(long, default_value = "8000")] base_http_port: i32, /// Port for Pgproto server - #[arg(short, long, default_value = "5432")] + #[arg(long, default_value = "5432")] base_pg_port: i32, /// Specify path to picodata binary #[arg(long, value_name = "BINARY_PATH", default_value = "picodata")] @@ -44,12 +44,12 @@ enum Command { }, /// Stop Picodata cluster Stop { - #[arg(short, long, value_name = "DATA_DIR", default_value = "./tmp")] + #[arg(long, value_name = "DATA_DIR", default_value = "./tmp")] data_dir: PathBuf, }, /// Remove all data files of previous cluster run Clean { - #[arg(short, long, value_name = "DATA_DIR", default_value = "./tmp")] + #[arg(long, value_name = "DATA_DIR", default_value = "./tmp")] data_dir: PathBuf, }, /// Helpers for work with plugins @@ -80,7 +80,7 @@ enum Plugin { #[arg(long)] without_git: bool, /// Initiate plugin as a subcrate of workspace - #[arg(long, short)] + #[arg(long)] workspace: bool, }, /// Create a new Picodata plugin in an existing directory @@ -89,7 +89,7 @@ enum Plugin { #[arg(long)] without_git: bool, /// Initiate plugin as a subcrate of workspace - #[arg(long, short)] + #[arg(long)] workspace: bool, }, } @@ -107,7 +107,7 @@ enum Config { )] config_path: PathBuf, /// Path to data directory of the cluster - #[arg(short, long, value_name = "DATA_DIR", default_value = "./tmp")] + #[arg(long, value_name = "DATA_DIR", default_value = "./tmp")] data_dir: PathBuf, }, } @@ -129,21 +129,21 @@ fn main() -> Result<()> { &topology, &data_dir, disable_plugin_install, - &base_http_port, + base_http_port, &picodata_path, - &base_pg_port, + base_pg_port, release, ) .context("failed to execute Run command")?, Command::Stop { data_dir } => { - commands::stop::cmd(&data_dir).context("failed to execute \"stop\" command")? + commands::stop::cmd(&data_dir).context("failed to execute \"stop\" command")?; } Command::Clean { data_dir } => { - commands::clean::cmd(&data_dir).context("failed to execute \"clean\" command")? + commands::clean::cmd(&data_dir).context("failed to execute \"clean\" command")?; } Command::Plugin { command } => match command { Plugin::Pack { debug } => { - commands::plugin::pack::cmd(debug).context("failed to execute \"pack\" command")? + commands::plugin::pack::cmd(debug).context("failed to execute \"pack\" command")?; } Plugin::New { path, diff --git a/tests/helpers/helpers.rs b/tests/helpers/helpers.rs deleted file mode 100644 index 45d342b..0000000 --- a/tests/helpers/helpers.rs +++ /dev/null @@ -1,184 +0,0 @@ -use log::info; -use std::ffi::OsStr; -use std::io::{BufRead, BufReader, Write}; -use std::thread; -use std::{ - fs::{self}, - io::ErrorKind, - path::Path, - process::{Child, Command, Stdio}, - time::{Duration, Instant}, -}; - -pub const PLUGIN_DIR: &str = "./tests/test_plugin/"; -pub const TESTS_DIR: &str = "./tests"; - -pub struct Cluster {} - -impl Drop for Cluster { - fn drop(&mut self) { - run_pike(vec!["stop"], PLUGIN_DIR.to_string()).unwrap(); - } -} - -impl Cluster { - pub fn new() -> Cluster { - info!("cleaning artefacts from previous run"); - - match fs::remove_file(Path::new(TESTS_DIR).join("instance.log")) { - Ok(_) => info!("Clearing logs."), - Err(e) if e.kind() == ErrorKind::NotFound => { - info!("instance.log not found, skipping cleanup") - } - Err(e) => panic!("failed to delete instance.log: {}", e), - } - - match fs::remove_dir_all(PLUGIN_DIR) { - Ok(_) => info!("clearing test plugin dir."), - Err(e) if e.kind() == ErrorKind::NotFound => { - info!("plugin dir not found, skipping cleanup") - } - Err(e) => panic!("failed to delete plugin_dir: {}", e), - } - - Cluster {} - } -} - -pub fn run_cluster(timeout: Duration, total_instances: i32) -> Result { - // Set up cleanup function - let cluster_handle = Cluster::new(); - - // Create plugin from template - let mut plugin_creation_proc = - run_pike(vec!["plugin", "new", "test_plugin"], TESTS_DIR).unwrap(); - - wait_for_proc(&mut plugin_creation_proc, Duration::from_secs(10)); - - // Build the plugin - Command::new("cargo") - .args(vec!["build"]) - .current_dir(PLUGIN_DIR) - .output()?; - - // Setup the cluster - run_pike(vec!["run"], PLUGIN_DIR).unwrap(); - - let start_time = Instant::now(); - - // Run in the loop until we get info about successful plugin installation - loop { - // Check if cluster set up correctly - let mut picodata_admin = await_picodata_admin(Duration::from_secs(60))?; - let stdout = picodata_admin - .stdout - .take() - .expect("Failed to capture stdout"); - - if start_time.elapsed() >= timeout { - panic!("cluster setup timeouted"); - } - - let queries = vec![ - r#"SELECT enabled FROM _pico_plugin;"#, - r#"SELECT current_state FROM _pico_instance;"#, - r#"\help;"#, - ]; - - // New scope to avoid infinite cycle while reading picodata stdout - { - let picodata_stdin = picodata_admin.stdin.as_mut().unwrap(); - for query in queries { - picodata_stdin.write_all(query.as_bytes()).unwrap() - } - picodata_admin.wait().unwrap(); - } - - let mut plugin_ready = false; - let mut can_connect = false; - let mut online_instances_counter = 0; - - let reader = BufReader::new(stdout); - for line in reader.lines() { - let line = line.expect("failed to read picodata stdout"); - println!("{}", line); - - if line.contains("true") { - plugin_ready = true; - } - if line.contains("Connected to admin console by socket") { - can_connect = true; - } - if line.contains("Online") { - online_instances_counter += 1; - } - } - - picodata_admin.kill().unwrap(); - - if can_connect && plugin_ready && online_instances_counter == total_instances { - return Ok(cluster_handle); - } - - thread::sleep(Duration::from_secs(5)); - } -} - -pub fn run_pike(args: Vec, current_dir: P) -> Result -where - A: AsRef, - P: AsRef, -{ - let root_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap(); - Command::new(format!("{root_dir}/target/debug/cargo-pike")) - .arg("pike") - .args(args) - .current_dir(current_dir) - .spawn() -} - -pub fn wait_for_proc(proc: &mut Child, timeout: Duration) { - let start_time = Instant::now(); - - loop { - if start_time.elapsed() >= timeout { - panic!("Process hanging for too long"); - } - - match proc.try_wait().unwrap() { - Some(_) => { - break; - } - None => { - std::thread::sleep(std::time::Duration::from_millis(100)); - } - } - } -} - -pub fn await_picodata_admin(timeout: Duration) -> Result { - let start_time = Instant::now(); - - loop { - if start_time.elapsed() >= timeout { - panic!("process hanging for too long"); - } - - let picodata_admin = Command::new("picodata") - .arg("admin") - .arg("./tests/test_plugin/tmp/cluster/i_1/admin.sock") - .stdin(Stdio::piped()) - .stdout(Stdio::piped()) - .spawn(); - - match picodata_admin { - Ok(process) => { - info!("successfully connected to picodata cluster."); - return Ok(process); - } - Err(_) => { - std::thread::sleep(Duration::from_secs(1)); - } - } - } -} diff --git a/tests/helpers/mod.rs b/tests/helpers/mod.rs index 39ef443..37958cc 100644 --- a/tests/helpers/mod.rs +++ b/tests/helpers/mod.rs @@ -1 +1,183 @@ -pub(crate) mod helpers; +use constcat::concat; +use log::info; +use std::ffi::OsStr; +use std::io::{BufRead, BufReader, Write}; +use std::thread; +use std::{ + fs::{self}, + io::ErrorKind, + path::Path, + process::{Child, Command, Stdio}, + time::{Duration, Instant}, +}; + +pub const TESTS_DIR: &str = "./tests/tmp/"; +pub const PLUGIN_DIR: &str = concat!(TESTS_DIR, "test_plugin/"); + +pub struct Cluster {} + +impl Drop for Cluster { + fn drop(&mut self) { + run_pike(vec!["stop"], PLUGIN_DIR).unwrap(); + } +} + +impl Cluster { + pub fn new() -> Cluster { + info!("cleaning artefacts from previous run"); + + match fs::remove_file(Path::new(TESTS_DIR).join("instance.log")) { + Ok(()) => info!("Clearing logs."), + Err(e) if e.kind() == ErrorKind::NotFound => { + info!("instance.log not found, skipping cleanup"); + } + Err(e) => panic!("failed to delete instance.log: {e}"), + } + + match fs::remove_dir_all(PLUGIN_DIR) { + Ok(()) => info!("clearing test plugin dir."), + Err(e) if e.kind() == ErrorKind::NotFound => { + info!("plugin dir not found, skipping cleanup"); + } + Err(e) => panic!("failed to delete plugin_dir: {e}"), + } + + Cluster {} + } +} + +pub fn run_cluster(timeout: Duration, total_instances: i32) -> Result { + // Set up cleanup function + let cluster_handle = Cluster::new(); + + // Create plugin from template + let mut plugin_creation_proc = + run_pike(vec!["plugin", "new", "test_plugin"], TESTS_DIR).unwrap(); + + wait_for_proc(&mut plugin_creation_proc, Duration::from_secs(10)); + + // Build the plugin + Command::new("cargo") + .args(vec!["build"]) + .current_dir(PLUGIN_DIR) + .output()?; + + // Setup the cluster + run_pike(vec!["run"], PLUGIN_DIR).unwrap(); + + let start_time = Instant::now(); + + // Run in the loop until we get info about successful plugin installation + loop { + // Check if cluster set up correctly + let mut picodata_admin = await_picodata_admin(Duration::from_secs(60))?; + let stdout = picodata_admin + .stdout + .take() + .expect("Failed to capture stdout"); + + assert!(start_time.elapsed() < timeout, "cluster setup timeouted"); + + let queries = vec![ + r"SELECT enabled FROM _pico_plugin;", + r"SELECT current_state FROM _pico_instance;", + r"\help;", + ]; + + // New scope to avoid infinite cycle while reading picodata stdout + { + let picodata_stdin = picodata_admin.stdin.as_mut().unwrap(); + for query in queries { + picodata_stdin.write_all(query.as_bytes()).unwrap(); + } + picodata_admin.wait().unwrap(); + } + + let mut plugin_ready = false; + let mut can_connect = false; + let mut online_instances_counter = 0; + + let reader = BufReader::new(stdout); + for line in reader.lines() { + let line = line.expect("failed to read picodata stdout"); + if line.contains("true") { + plugin_ready = true; + } + if line.contains("Connected to admin console by socket") { + can_connect = true; + } + if line.contains("Online") { + online_instances_counter += 1; + } + } + + picodata_admin.kill().unwrap(); + + if can_connect && plugin_ready && online_instances_counter == total_instances { + return Ok(cluster_handle); + } + + thread::sleep(Duration::from_secs(5)); + } +} + +pub fn run_pike(args: Vec, current_dir: P) -> Result +where + A: AsRef, + P: AsRef, +{ + let root_dir = std::env::var("CARGO_MANIFEST_DIR").unwrap(); + Command::new(format!("{root_dir}/target/debug/cargo-pike")) + .arg("pike") + .args(args) + .current_dir(current_dir) + .spawn() +} + +pub fn wait_for_proc(proc: &mut Child, timeout: Duration) { + let start_time = Instant::now(); + + loop { + assert!( + start_time.elapsed() < timeout, + "Process hanging for too long" + ); + + match proc.try_wait().unwrap() { + Some(_) => { + break; + } + None => { + std::thread::sleep(std::time::Duration::from_millis(100)); + } + } + } +} + +pub fn await_picodata_admin(timeout: Duration) -> Result { + let start_time = Instant::now(); + + loop { + assert!( + start_time.elapsed() < timeout, + "process hanging for too long" + ); + + let picodata_admin = Command::new("picodata") + .arg("admin") + .arg(PLUGIN_DIR.to_string() + "tmp/cluster/i_1/admin.sock") + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .spawn(); + + match picodata_admin { + Ok(process) => { + info!("successfully connected to picodata cluster."); + return Ok(process); + } + Err(_) => { + std::thread::sleep(Duration::from_secs(1)); + } + } + } +} diff --git a/tests/run.rs b/tests/run.rs index ef5e6b9..5145a43 100644 --- a/tests/run.rs +++ b/tests/run.rs @@ -1,5 +1,6 @@ mod helpers; -use helpers::helpers::run_cluster; + +use helpers::run_cluster; use std::time::Duration; const TOTAL_INSTANCES: i32 = 4; diff --git a/tests/tmp/.keep b/tests/tmp/.keep new file mode 100644 index 0000000..e69de29