diff --git a/Cargo.lock b/Cargo.lock index 851cc162c9..247ba9ad9d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3218,6 +3218,7 @@ dependencies = [ "serde", "serde_json", "sqlx", + "tempfile", "tokio", "url", ] diff --git a/Cargo.toml b/Cargo.toml index 2e78e946b5..9653a389c6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -176,7 +176,7 @@ url = "2.2.2" rand = "0.8.4" rand_xoshiro = "0.6.0" hex = "0.4.3" -tempfile = "3.9.0" +tempfile = "3.10.1" criterion = { version = "0.5.1", features = ["async_tokio"] } # If this is an unconditional dev-dependency then Cargo will *always* try to build `libsqlite3-sys`, diff --git a/sqlx-cli/Cargo.toml b/sqlx-cli/Cargo.toml index 80de8e150d..1591cfa89d 100644 --- a/sqlx-cli/Cargo.toml +++ b/sqlx-cli/Cargo.toml @@ -67,3 +67,4 @@ completions = ["dep:clap_complete"] [dev-dependencies] assert_cmd = "2.0.11" +tempfile = "3.10.1" diff --git a/sqlx-cli/tests/add.rs b/sqlx-cli/tests/add.rs new file mode 100644 index 0000000000..523ce4767e --- /dev/null +++ b/sqlx-cli/tests/add.rs @@ -0,0 +1,271 @@ +use assert_cmd::Command; +use std::cmp::Ordering; +use std::fs::read_dir; +use std::path::{Path, PathBuf}; +use tempfile::TempDir; + +#[test] +fn add_migration_ambiguous() -> anyhow::Result<()> { + for reversible in [true, false] { + let files = AddMigrations::new()? + .run("hello world", reversible, true, true, false)? + .fs_output()?; + assert_eq!(files.0, Vec::::new()); + } + Ok(()) +} + +#[derive(Debug, PartialEq, Eq)] +struct FileName { + id: usize, + description: String, + suffix: String, +} + +impl PartialOrd for FileName { + fn partial_cmp(&self, other: &Self) -> Option { + if self.id != other.id { + self.id.partial_cmp(&other.id) + } else { + self.suffix.partial_cmp(&other.suffix) + } + } +} + +impl FileName { + fn assert_is_timestamp(&self) { + //if the library is still used in 2050, this will need bumping ^^ + assert!( + self.id < 20500101000000, + "{self:?} is too high for a timestamp" + ); + assert!( + self.id > 20200101000000, + "{self:?} is too low for a timestamp" + ); + } +} + +impl From for FileName { + fn from(path: PathBuf) -> Self { + let filename = path.file_name().unwrap().to_string_lossy(); + let (id, rest) = filename.split_once("_").unwrap(); + let id: usize = id.parse().unwrap(); + let (description, suffix) = rest.split_once(".").unwrap(); + Self { + id, + description: description.to_string(), + suffix: suffix.to_string(), + } + } +} +#[test] +fn add_migration_sequential() -> anyhow::Result<()> { + { + let files = AddMigrations::new()? + .run("hello world", false, false, true, true)? + .fs_output()?; + assert_eq!(files.len(), 1); + files.assert_is_not_reversible(); + assert_eq!(files.0[0].id, 1); + } + { + let files = AddMigrations::new()? + .run("hello world1", false, false, true, true)? + .run("hello world2", true, false, true, true)? + .fs_output()?; + assert_eq!(files.len(), 2); + files.assert_is_not_reversible(); + assert_eq!(files.0[0].id, 1); + assert_eq!(files.0[1].id, 2); + } + Ok(()) +} +#[test] +fn add_migration_sequential_reversible() -> anyhow::Result<()> { + { + let files = AddMigrations::new()? + .run("hello world", true, false, true, true)? + .fs_output()?; + assert_eq!(files.len(), 2); + files.assert_is_reversible(); + assert_eq!(files.0[0].id, 1); + assert_eq!(files.0[0].id, 1); + } + { + let files = AddMigrations::new()? + .run("hello world1", true, false, true, true)? + .run("hello world2", true, true, false, true)? + .run("hello world3", true, false, true, true)? + .fs_output()?; + assert_eq!(files.len(), 6); + files.assert_is_reversible(); + assert_eq!(files.0[0].id, 1); + assert_eq!(files.0[1].id, 1); + // sequential -> timestamp is one way + files.0[2].assert_is_timestamp(); + files.0[3].assert_is_timestamp(); + files.0[4].assert_is_timestamp(); + files.0[5].assert_is_timestamp(); + } + Ok(()) +} + +#[test] +fn add_migration_timestamp() -> anyhow::Result<()> { + { + let files = AddMigrations::new()? + .run("hello world", false, true, false, true)? + .fs_output()?; + assert_eq!(files.len(), 1); + files.assert_is_not_reversible(); + files.0[0].assert_is_timestamp(); + } + { + let files = AddMigrations::new()? + .run("hello world1", false, true, false, true)? + .run("hello world2", true, false, true, true)? + .fs_output()?; + assert_eq!(files.len(), 2); + files.assert_is_not_reversible(); + files.0[0].assert_is_timestamp(); + // sequential -> timestamp is one way + files.0[1].assert_is_timestamp(); + } + Ok(()) +} +#[test] +fn add_migration_timestamp_reversible() -> anyhow::Result<()> { + { + let files = AddMigrations::new()? + .run("hello world", true, false, false, true)? + .fs_output()?; + assert_eq!(files.len(), 2); + files.assert_is_reversible(); + files.0[0].assert_is_timestamp(); + files.0[1].assert_is_timestamp(); + } + { + let files = AddMigrations::new()? + .run("hello world", true, true, false, true)? + .fs_output()?; + assert_eq!(files.len(), 2); + files.assert_is_reversible(); + files.0[0].assert_is_timestamp(); + files.0[1].assert_is_timestamp(); + } + { + let files = AddMigrations::new()? + .run("hello world1", true, true, false, true)? + .run("hello world2", true, false, true, true)? + .fs_output()?; + assert_eq!(files.len(), 4); + files.assert_is_reversible(); + files.0[0].assert_is_timestamp(); + files.0[1].assert_is_timestamp(); + files.0[2].assert_is_timestamp(); + files.0[3].assert_is_timestamp(); + } + Ok(()) +} + +struct AddMigrationsResult(Vec); +impl AddMigrationsResult { + fn len(&self) -> usize { + self.0.len() + } + fn assert_is_reversible(&self) { + let mut up_cnt = 0; + let mut down_cnt = 0; + for file in self.0.iter() { + if file.suffix == "down.sql" { + down_cnt += 1; + } else if file.suffix == "up.sql" { + up_cnt += 1; + } else { + panic!("unknown suffix for {file:?}"); + } + assert!(file.description.starts_with("hello_world")); + } + assert_eq!(up_cnt, down_cnt); + } + fn assert_is_not_reversible(&self) { + for file in self.0.iter() { + assert_eq!(file.suffix, "sql"); + assert!(file.description.starts_with("hello_world")); + } + } +} +struct AddMigrations(TempDir); + +impl AddMigrations { + fn new() -> anyhow::Result { + anyhow::Ok(Self(TempDir::new()?)) + } + fn run( + self, + description: &str, + revesible: bool, + timestamp: bool, + sequential: bool, + expect_success: bool, + ) -> anyhow::Result { + let cmd_result = Command::cargo_bin("cargo-sqlx")? + .current_dir(&self.0) + .args( + [ + vec!["sqlx", "migrate", "add", description], + match revesible { + true => vec!["-r"], + false => vec![], + }, + match timestamp { + true => vec!["--timestamp"], + false => vec![], + }, + match sequential { + true => vec!["--sequential"], + false => vec![], + }, + ] + .concat(), + ) + .assert(); + if expect_success { + cmd_result.success(); + } else { + cmd_result.failure(); + } + anyhow::Ok(self) + } + fn fs_output(&self) -> anyhow::Result { + let files = recurse_files(&self.0)?; + let mut fs_paths = Vec::with_capacity(files.len()); + for path in files { + let relative_path = path.strip_prefix(self.0.path())?.to_path_buf(); + fs_paths.push(FileName::from(relative_path)); + } + Ok(AddMigrationsResult(fs_paths)) + } +} + +fn recurse_files(path: impl AsRef) -> anyhow::Result> { + let mut buf = vec![]; + let entries = read_dir(path)?; + + for entry in entries { + let entry = entry?; + let meta = entry.metadata()?; + + if meta.is_dir() { + let mut subdir = recurse_files(entry.path())?; + buf.append(&mut subdir); + } + + if meta.is_file() { + buf.push(entry.path()); + } + } + buf.sort(); + Ok(buf) +} diff --git a/sqlx-macros-core/Cargo.toml b/sqlx-macros-core/Cargo.toml index df867201d1..fec34b6528 100644 --- a/sqlx-macros-core/Cargo.toml +++ b/sqlx-macros-core/Cargo.toml @@ -59,6 +59,6 @@ serde = { version = "1.0.132", features = ["derive"] } serde_json = { version = "1.0.73" } sha2 = { version = "0.10.0" } syn = { version = "2.0.52", default-features = false, features = ["full", "derive", "parsing", "printing", "clone-impls"] } -tempfile = { version = "3.3.0" } +tempfile = { version = "3.10.1" } quote = { version = "1.0.26", default-features = false } url = { version = "2.2.2", default-features = false }