diff --git a/benches/template_benchmark.rs b/benches/template_benchmark.rs index deaad8b9..1a654f4c 100644 --- a/benches/template_benchmark.rs +++ b/benches/template_benchmark.rs @@ -902,7 +902,11 @@ fn bench_filter_join_opt(c: &mut Criterion) { let template = "{{ list | join(',') }}"; group.bench_function("join_1000_items", |b| { - b.iter(|| engine.render(black_box(template), black_box(&vars)).unwrap()) + b.iter(|| { + engine + .render(black_box(template), black_box(&vars)) + .unwrap() + }) }); group.finish(); @@ -920,7 +924,11 @@ fn bench_filter_title_opt(c: &mut Criterion) { let template = "{{ text | title }}"; group.bench_function("title_2000_words", |b| { - b.iter(|| engine.render(black_box(template), black_box(&vars)).unwrap()) + b.iter(|| { + engine + .render(black_box(template), black_box(&vars)) + .unwrap() + }) }); group.finish(); diff --git a/src/cli/commands/drift.rs b/src/cli/commands/drift.rs index 57306da7..b23ba58c 100644 --- a/src/cli/commands/drift.rs +++ b/src/cli/commands/drift.rs @@ -717,10 +717,8 @@ impl DriftArgs { ctx.output.plan_header("Drift Detection Report"); ctx.output.info(&format!("Playbook: {}", report.playbook)); - ctx.output.info(&format!( - "Hosts: {}", - report.hosts_checked.join(", ") - )); + ctx.output + .info(&format!("Hosts: {}", report.hosts_checked.join(", "))); ctx.output.info(&format!( "Timestamp: {}", report.timestamp.format("%Y-%m-%d %H:%M:%S UTC") @@ -738,7 +736,8 @@ impl DriftArgs { // Print resource changes if there are any drifted/missing resources if report.summary.drifted > 0 || report.summary.missing > 0 || report.summary.extra > 0 { println!(); - ctx.output.info("Rustible will perform the following actions to remediate drift:"); + ctx.output + .info("Rustible will perform the following actions to remediate drift:"); println!(); // Sort hosts for consistent output @@ -764,7 +763,8 @@ impl DriftArgs { } // Print host header - ctx.output.info(&format!("# {} ({})", host, relevant_findings.len())); + ctx.output + .info(&format!("# {} ({})", host, relevant_findings.len())); println!(); for finding in relevant_findings { @@ -816,9 +816,9 @@ impl DriftArgs { // Print Terraform-style summary ctx.output.plan_summary( - report.summary.missing, // to_add (missing resources need to be created) - report.summary.drifted, // to_change (drifted resources need updates) - report.summary.extra, // to_destroy (extra resources should be removed) + report.summary.missing, // to_add (missing resources need to be created) + report.summary.drifted, // to_change (drifted resources need updates) + report.summary.extra, // to_destroy (extra resources should be removed) ); // Additional statistics diff --git a/src/cli/commands/provision.rs b/src/cli/commands/provision.rs index d2920015..3a875d23 100644 --- a/src/cli/commands/provision.rs +++ b/src/cli/commands/provision.rs @@ -280,7 +280,9 @@ fn project_root_for_config(config_file: &Path) -> PathBuf { #[cfg(feature = "provisioning")] fn default_state_path(project_root: &Path) -> PathBuf { - project_root.join(".rustible").join("provisioning.state.json") + project_root + .join(".rustible") + .join("provisioning.state.json") } #[cfg(feature = "provisioning")] @@ -841,8 +843,10 @@ impl ImportTerraformArgs { let tf_state_json = if let Some(tfstate_path) = &self.tfstate { if !tfstate_path.exists() { - ctx.output - .error(&format!("Terraform state not found: {}", tfstate_path.display())); + ctx.output.error(&format!( + "Terraform state not found: {}", + tfstate_path.display() + )); return Ok(1); } std::fs::read_to_string(tfstate_path)? @@ -871,10 +875,8 @@ impl ImportTerraformArgs { save_state(&backend_config, &state_path, &mut state).await?; ctx.output.section("Import Successful"); - ctx.output.info(&format!( - "Imported {} resources.", - state.resource_count() - )); + ctx.output + .info(&format!("Imported {} resources.", state.resource_count())); ctx.output .info(&format!("Outputs: {}", state.outputs.len())); @@ -1021,15 +1023,18 @@ outputs: {} ctx.output .info(&format!("Created: {}", backend_config_path.display())); } else { - ctx.output - .info(&format!("Backend config exists: {}", backend_config_path.display())); + ctx.output.info(&format!( + "Backend config exists: {}", + backend_config_path.display() + )); } if backend_source_provided || matches!(backend_config, BackendConfig::Local { .. }) { match backend_config.create_backend().await { Ok(backend) => { if backend.exists().await? { - ctx.output.info("State already exists; skipping initialization."); + ctx.output + .info("State already exists; skipping initialization."); } else { let mut state = ProvisioningState::new(); state.prepare_for_save(); @@ -1038,10 +1043,8 @@ outputs: {} } } Err(err) => { - ctx.output.warning(&format!( - "Skipping backend initialization: {}", - err - )); + ctx.output + .warning(&format!("Skipping backend initialization: {}", err)); } } } else { @@ -1651,12 +1654,9 @@ mod tests { #[test] fn test_import_terraform_args_with_tfstate() { - let cli = TestImportTerraformCli::try_parse_from([ - "test", - "--tfstate", - "terraform.tfstate", - ]) - .unwrap(); + let cli = + TestImportTerraformCli::try_parse_from(["test", "--tfstate", "terraform.tfstate"]) + .unwrap(); assert_eq!(cli.args.tfstate, Some(PathBuf::from("terraform.tfstate"))); } @@ -1767,9 +1767,13 @@ mod tests { #[test] fn test_provision_commands_import_terraform() { - let cli = - TestProvisionCli::try_parse_from(["rustible", "import-terraform", "--tfstate", "state.tfstate"]) - .unwrap(); + let cli = TestProvisionCli::try_parse_from([ + "rustible", + "import-terraform", + "--tfstate", + "state.tfstate", + ]) + .unwrap(); assert!(matches!(cli.command, ProvisionCommands::ImportTerraform(_))); } diff --git a/src/cli/commands/run.rs b/src/cli/commands/run.rs index 0763c271..f5dc7a99 100644 --- a/src/cli/commands/run.rs +++ b/src/cli/commands/run.rs @@ -611,7 +611,11 @@ impl RunArgs { total_tasks: 0, }, hosts: HashMap::new(), - plan: if plan_lines.is_empty() { None } else { Some(plan_lines) }, + plan: if plan_lines.is_empty() { + None + } else { + Some(plan_lines) + }, }; bundle.finish(&summary)?; @@ -836,22 +840,22 @@ impl RunArgs { ctx, &mut plan_lines, format!( - "{}[Play {}/{}] {} {}", - if play_idx > 0 { "\n" } else { "" }, - play_idx + 1, - plays.len(), - "*".to_string(), - play_name + "{}[Play {}/{}] {} {}", + if play_idx > 0 { "\n" } else { "" }, + play_idx + 1, + plays.len(), + "*".to_string(), + play_name ), ); emit_plan_line( ctx, &mut plan_lines, format!( - " Hosts: {} ({} host{})", - hosts_pattern, - hosts.len(), - if hosts.len() == 1 { "" } else { "s" } + " Hosts: {} ({} host{})", + hosts_pattern, + hosts.len(), + if hosts.len() == 1 { "" } else { "s" } ), ); @@ -936,9 +940,9 @@ impl RunArgs { ctx, &mut plan_lines, format!( - " Tasks: {} task{}", - total_play_tasks, - if total_play_tasks == 1 { "" } else { "s" } + " Tasks: {} task{}", + total_play_tasks, + if total_play_tasks == 1 { "" } else { "s" } ), ); @@ -968,11 +972,11 @@ impl RunArgs { ctx, plan_lines, format!( - "\n {} Task {}/{}: {}", - ">".to_string(), - task_num, - total, - task_name + "\n {} Task {}/{}: {}", + ">".to_string(), + task_num, + total, + task_name ), ); emit_plan_line(ctx, plan_lines, format!(" Module: {}", module)); @@ -996,7 +1000,11 @@ impl RunArgs { vec![] }; if !handlers.is_empty() { - emit_plan_line(ctx, plan_lines, format!(" Notify: {}", handlers.join(", "))); + emit_plan_line( + ctx, + plan_lines, + format!(" Notify: {}", handlers.join(", ")), + ); } } }; @@ -1004,7 +1012,16 @@ impl RunArgs { // Show pre_tasks for task in &pre_tasks { task_num += 1; - show_task(ctx, &mut plan_lines, task, task_num, total_play_tasks, &hosts, &vars, self); + show_task( + ctx, + &mut plan_lines, + task, + task_num, + total_play_tasks, + &hosts, + &vars, + self, + ); } // Show role tasks @@ -1050,13 +1067,31 @@ impl RunArgs { // Show tasks for task in &tasks { task_num += 1; - show_task(ctx, &mut plan_lines, task, task_num, total_play_tasks, &hosts, &vars, self); + show_task( + ctx, + &mut plan_lines, + task, + task_num, + total_play_tasks, + &hosts, + &vars, + self, + ); } // Show post_tasks for task in &post_tasks { task_num += 1; - show_task(ctx, &mut plan_lines, task, task_num, total_play_tasks, &hosts, &vars, self); + show_task( + ctx, + &mut plan_lines, + task, + task_num, + total_play_tasks, + &hosts, + &vars, + self, + ); } } @@ -1145,11 +1180,11 @@ impl RunArgs { ctx, &mut plan_lines, format!( - "Plan: {} task{} across {} host{}", - total_tasks, - if total_tasks == 1 { "" } else { "s" }, - total_hosts.len(), - if total_hosts.len() == 1 { "" } else { "s" } + "Plan: {} task{} across {} host{}", + total_tasks, + if total_tasks == 1 { "" } else { "s" }, + total_hosts.len(), + if total_hosts.len() == 1 { "" } else { "s" } ), ); diff --git a/src/cli/commands/state.rs b/src/cli/commands/state.rs index 6a69e764..b05f077c 100644 --- a/src/cli/commands/state.rs +++ b/src/cli/commands/state.rs @@ -250,9 +250,8 @@ impl StateArgs { // Check if state config already exists let config_path = PathBuf::from(".rustible/backend.json"); if config_path.exists() && !reconfigure { - ctx.output.warning( - "Backend already configured. Use --reconfigure to overwrite.", - ); + ctx.output + .warning("Backend already configured. Use --reconfigure to overwrite."); return Ok(1); } @@ -262,7 +261,8 @@ impl StateArgs { // Build backend configuration let backend_config = match backend { BackendType::Local => { - ctx.output.info(&format!("Initializing local backend at {:?}", path)); + ctx.output + .info(&format!("Initializing local backend at {:?}", path)); serde_json::json!({ "type": "local", "path": path.to_string_lossy() @@ -272,13 +272,14 @@ impl StateArgs { let bucket = bucket.as_ref().ok_or_else(|| { anyhow::anyhow!("--bucket is required for S3 backend") })?; - let key = key.as_ref().map(|k| k.as_str()).unwrap_or("terraform.tfstate"); + let key = key + .as_ref() + .map(|k| k.as_str()) + .unwrap_or("terraform.tfstate"); let region = region.as_ref().map(|r| r.as_str()).unwrap_or("us-east-1"); - ctx.output.info(&format!( - "Initializing S3 backend: s3://{}/{}", - bucket, key - )); + ctx.output + .info(&format!("Initializing S3 backend: s3://{}/{}", bucket, key)); let mut config = serde_json::json!({ "type": "s3", @@ -290,7 +291,8 @@ impl StateArgs { if let Some(table) = dynamodb_table { config["dynamodb_table"] = serde_json::json!(table); - ctx.output.info(&format!(" DynamoDB locking enabled: {}", table)); + ctx.output + .info(&format!(" DynamoDB locking enabled: {}", table)); } config @@ -299,7 +301,10 @@ impl StateArgs { let bucket = bucket.as_ref().ok_or_else(|| { anyhow::anyhow!("--bucket is required for GCS backend") })?; - let key = key.as_ref().map(|k| k.as_str()).unwrap_or("terraform.tfstate"); + let key = key + .as_ref() + .map(|k| k.as_str()) + .unwrap_or("terraform.tfstate"); ctx.output.info(&format!( "Initializing GCS backend: gs://{}/{}", @@ -319,7 +324,10 @@ impl StateArgs { let container = container.as_ref().ok_or_else(|| { anyhow::anyhow!("--container is required for Azure backend") })?; - let blob_name = key.as_ref().map(|k| k.as_str()).unwrap_or("terraform.tfstate"); + let blob_name = key + .as_ref() + .map(|k| k.as_str()) + .unwrap_or("terraform.tfstate"); ctx.output.info(&format!( "Initializing Azure backend: {}/{}/{}", @@ -334,7 +342,9 @@ impl StateArgs { }) } BackendType::Consul => { - let addr = address.as_ref().map(|a| a.as_str()) + let addr = address + .as_ref() + .map(|a| a.as_str()) .unwrap_or("http://127.0.0.1:8500"); let path = key.as_ref().map(|k| k.as_str()).unwrap_or("rustible/state"); @@ -354,10 +364,8 @@ impl StateArgs { anyhow::anyhow!("--address is required for HTTP backend") })?; - ctx.output.info(&format!( - "Initializing HTTP backend: {}", - addr - )); + ctx.output + .info(&format!("Initializing HTTP backend: {}", addr)); serde_json::json!({ "type": "http", @@ -370,10 +378,12 @@ impl StateArgs { let config_content = serde_json::to_string_pretty(&backend_config)?; std::fs::write(&config_path, &config_content)?; - ctx.output.info(&format!("Backend configuration saved to {:?}", config_path)); + ctx.output + .info(&format!("Backend configuration saved to {:?}", config_path)); ctx.output.info(""); ctx.output.info("Successfully configured the backend!"); - ctx.output.info("You may now begin working with Rustible provisioning."); + ctx.output + .info("You may now begin working with Rustible provisioning."); Ok(0) } @@ -386,14 +396,14 @@ impl StateArgs { force, } => { ctx.output.banner("STATE MIGRATE"); - ctx.output.info(&format!("Migrating state from {} to {}", from, to)); + ctx.output + .info(&format!("Migrating state from {} to {}", from, to)); ctx.output.info(&format!(" Source: {}", from_path)); ctx.output.info(&format!(" Destination: {}", to_path)); if !force { - ctx.output.warning( - "This will copy state data. Use --force to confirm.", - ); + ctx.output + .warning("This will copy state data. Use --force to confirm."); return Ok(1); } @@ -402,7 +412,8 @@ impl StateArgs { BackendType::Local => { let path = PathBuf::from(from_path); if !path.exists() { - ctx.output.error(&format!("Source state not found: {:?}", path)); + ctx.output + .error(&format!("Source state not found: {:?}", path)); return Ok(1); } std::fs::read_to_string(&path)? @@ -454,15 +465,15 @@ impl StateArgs { // Check source exists if !tfstate.exists() { - ctx.output.error(&format!("Terraform state file not found: {:?}", tfstate)); + ctx.output + .error(&format!("Terraform state file not found: {:?}", tfstate)); return Ok(1); } // Check destination doesn't exist (unless force) if output.exists() && !force { - ctx.output.warning( - "Output file already exists. Use --force to overwrite.", - ); + ctx.output + .warning("Output file already exists. Use --force to overwrite."); return Ok(1); } @@ -472,18 +483,19 @@ impl StateArgs { // Validate it's a Terraform state file if tf_state.get("version").is_none() { - ctx.output.error("Invalid Terraform state file: missing version field"); + ctx.output + .error("Invalid Terraform state file: missing version field"); return Ok(1); } - let tf_version = tf_state.get("terraform_version") + let tf_version = tf_state + .get("terraform_version") .and_then(|v| v.as_str()) .unwrap_or("unknown"); - let serial = tf_state.get("serial") - .and_then(|v| v.as_u64()) - .unwrap_or(0); + let serial = tf_state.get("serial").and_then(|v| v.as_u64()).unwrap_or(0); - ctx.output.info(&format!(" Terraform version: {}", tf_version)); + ctx.output + .info(&format!(" Terraform version: {}", tf_version)); ctx.output.info(&format!(" State serial: {}", serial)); // Import using the provisioning module (feature-gated) @@ -495,8 +507,14 @@ impl StateArgs { .map_err(|e| anyhow::anyhow!("Failed to import state: {}", e))?; // Report what was imported - ctx.output.info(&format!(" Resources imported: {}", rustible_state.resource_count())); - ctx.output.info(&format!(" Outputs imported: {}", rustible_state.outputs.len())); + ctx.output.info(&format!( + " Resources imported: {}", + rustible_state.resource_count() + )); + ctx.output.info(&format!( + " Outputs imported: {}", + rustible_state.outputs.len() + )); // Create output directory if needed if let Some(parent) = output.parent() { @@ -509,22 +527,27 @@ impl StateArgs { ctx.output.info(""); ctx.output.info("Successfully imported Terraform state!"); - ctx.output.info("You can now use 'rustible provision plan' to see the current state."); + ctx.output.info( + "You can now use 'rustible provision plan' to see the current state.", + ); } #[cfg(not(feature = "provisioning"))] { // Fallback: simple JSON-to-JSON conversion for basic import - let resources = tf_state.get("resources") + let resources = tf_state + .get("resources") .and_then(|r| r.as_array()) .map(|a| a.len()) .unwrap_or(0); - let outputs = tf_state.get("outputs") + let outputs = tf_state + .get("outputs") .and_then(|o| o.as_object()) .map(|o| o.len()) .unwrap_or(0); - ctx.output.info(&format!(" Resources found: {}", resources)); + ctx.output + .info(&format!(" Resources found: {}", resources)); ctx.output.info(&format!(" Outputs found: {}", outputs)); // Create output directory if needed @@ -539,7 +562,8 @@ impl StateArgs { ctx.output.info(""); ctx.output.info("Successfully imported Terraform state!"); - ctx.output.info("Note: Enable 'provisioning' feature for full state management."); + ctx.output + .info("Note: Enable 'provisioning' feature for full state management."); } Ok(0) @@ -791,31 +815,36 @@ fn convert_terraform_state(tf_state: &serde_json::Value) -> serde_json::Value { let mut outputs: HashMap = HashMap::new(); // Extract lineage and serial - let lineage = tf_state.get("lineage") + let lineage = tf_state + .get("lineage") .and_then(|v| v.as_str()) .unwrap_or("") .to_string(); - let serial = tf_state.get("serial") - .and_then(|v| v.as_u64()) - .unwrap_or(0); + let serial = tf_state.get("serial").and_then(|v| v.as_u64()).unwrap_or(0); // Convert resources if let Some(tf_resources) = tf_state.get("resources").and_then(|r| r.as_array()) { for resource in tf_resources { - let resource_type = resource.get("type") + let resource_type = resource + .get("type") .and_then(|v| v.as_str()) .unwrap_or("unknown"); - let name = resource.get("name") + let name = resource + .get("name") .and_then(|v| v.as_str()) .unwrap_or("unknown"); - let mode = resource.get("mode") + let mode = resource + .get("mode") .and_then(|v| v.as_str()) .unwrap_or("managed"); - let provider = resource.get("provider") + let provider = resource + .get("provider") .and_then(|v| v.as_str()) .map(|p| { // Extract provider name from full provider path - p.split('/').last().unwrap_or(p) + p.split('/') + .last() + .unwrap_or(p) .trim_start_matches("provider[\"") .trim_end_matches("\"]") .split('.') @@ -829,10 +858,12 @@ fn convert_terraform_state(tf_state: &serde_json::Value) -> serde_json::Value { // Process instances if let Some(instances) = resource.get("instances").and_then(|i| i.as_array()) { for (idx, instance) in instances.iter().enumerate() { - let attributes = instance.get("attributes") + let attributes = instance + .get("attributes") .cloned() .unwrap_or(serde_json::Value::Object(serde_json::Map::new())); - let cloud_id = attributes.get("id") + let cloud_id = attributes + .get("id") .and_then(|v| v.as_str()) .unwrap_or("") .to_string(); @@ -842,25 +873,28 @@ fn convert_terraform_state(tf_state: &serde_json::Value) -> serde_json::Value { let resource_key = format!("{}.{}", resource_type, name); - resources.insert(resource_key.clone(), serde_json::json!({ - "id": { + resources.insert( + resource_key.clone(), + serde_json::json!({ + "id": { + "resource_type": resource_type, + "name": name + }, + "cloud_id": cloud_id, "resource_type": resource_type, - "name": name - }, - "cloud_id": cloud_id, - "resource_type": resource_type, - "provider": provider, - "config": {}, - "attributes": attributes, - "dependencies": [], - "dependents": [], - "created_at": Utc::now().to_rfc3339(), - "updated_at": Utc::now().to_rfc3339(), - "metadata": {}, - "tainted": false, - "index": index, - "mode": mode - })); + "provider": provider, + "config": {}, + "attributes": attributes, + "dependencies": [], + "dependents": [], + "created_at": Utc::now().to_rfc3339(), + "updated_at": Utc::now().to_rfc3339(), + "metadata": {}, + "tainted": false, + "index": index, + "mode": mode + }), + ); } } } @@ -869,18 +903,25 @@ fn convert_terraform_state(tf_state: &serde_json::Value) -> serde_json::Value { // Convert outputs if let Some(tf_outputs) = tf_state.get("outputs").and_then(|o| o.as_object()) { for (name, output) in tf_outputs { - let value = output.get("value").cloned().unwrap_or(serde_json::Value::Null); - let sensitive = output.get("sensitive") + let value = output + .get("value") + .cloned() + .unwrap_or(serde_json::Value::Null); + let sensitive = output + .get("sensitive") .and_then(|v| v.as_bool()) .unwrap_or(false); let output_type = output.get("type").cloned(); - outputs.insert(name.clone(), serde_json::json!({ - "value": value, - "sensitive": sensitive, - "type": output_type, - "description": null - })); + outputs.insert( + name.clone(), + serde_json::json!({ + "value": value, + "sensitive": sensitive, + "type": output_type, + "description": null + }), + ); } } diff --git a/src/cli/output.rs b/src/cli/output.rs index e5340c97..1440e902 100644 --- a/src/cli/output.rs +++ b/src/cli/output.rs @@ -114,7 +114,7 @@ impl OutputFormatter { return; } - let line = "=".repeat(title.len() + 4); + let line = "─".repeat(title.len() + 4); if self.use_color { println!("\n{}", line.bright_blue()); println!("{}", format!(" {} ", title).bright_blue().bold()); @@ -734,7 +734,7 @@ impl OutputFormatter { "{spinner:.green} [{elapsed_precise}] [{bar:40.cyan/blue}] {pos}/{len} {msg}", ) .unwrap() - .progress_chars("#>-"), + .progress_chars("━╸ "), ); pb.set_message(message.to_string()); @@ -753,11 +753,12 @@ impl OutputFormatter { sp.set_style( ProgressStyle::default_spinner() + .tick_chars("⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏") .template("{spinner:.green} {msg} {elapsed}") .unwrap(), ); sp.set_message(message.to_string()); - sp.enable_steady_tick(Duration::from_millis(100)); + sp.enable_steady_tick(Duration::from_millis(80)); Some(sp) } @@ -858,15 +859,9 @@ impl OutputFormatter { println!(); if self.use_color { - println!( - "{}", - "─".repeat(78).bright_black() - ); + println!("{}", "─".repeat(78).bright_black()); println!("{}", title.bright_white().bold()); - println!( - "{}", - "─".repeat(78).bright_black() - ); + println!("{}", "─".repeat(78).bright_black()); } else { println!("{}", "─".repeat(78)); println!("{}", title); @@ -941,7 +936,11 @@ impl OutputFormatter { let old_str = old_value.unwrap_or("(not set)"); let new_str = new_value.unwrap_or("(not set)"); - let force_marker = if forces_replacement { " # forces replacement" } else { "" }; + let force_marker = if forces_replacement { + " # forces replacement" + } else { + "" + }; if self.use_color { let arrow = "→".bright_black(); @@ -976,10 +975,7 @@ impl OutputFormatter { println!(); if self.use_color { - println!( - "{}", - "─".repeat(78).bright_black() - ); + println!("{}", "─".repeat(78).bright_black()); let total = to_add + to_change + to_destroy; if total == 0 { diff --git a/src/executor/mod.rs b/src/executor/mod.rs index a0014d0f..00a43c67 100644 --- a/src/executor/mod.rs +++ b/src/executor/mod.rs @@ -1507,7 +1507,6 @@ impl Executor { ) -> ExecutorResult> { debug!("Running task '{}' on {} hosts", task.name, hosts.len()); - // Set task-level vars (including block vars merged during parsing) on runtime if !task.vars.is_empty() { let mut rt = self.runtime.write().await; @@ -1568,10 +1567,7 @@ impl Executor { task.name.clone(), failed_result.clone(), )); - results.insert( - host.clone(), - failed_result, - ); + results.insert(host.clone(), failed_result); } } if let Some(rm) = &self.recovery_manager { diff --git a/src/modules/hpc/common.rs b/src/modules/hpc/common.rs index 2b04eaa2..82eb30f2 100644 --- a/src/modules/hpc/common.rs +++ b/src/modules/hpc/common.rs @@ -24,12 +24,19 @@ impl Module for HpcBaselineModule { context: &ModuleContext, ) -> ModuleResult { if context.check_mode { - return Ok(ModuleOutput::ok("Would validate HPC baseline configuration")); + return Ok(ModuleOutput::ok( + "Would validate HPC baseline configuration", + )); } - Ok(ModuleOutput::ok("HPC baseline validation: stub - not yet implemented") - .with_data("status", serde_json::json!("stub")) - .with_data("supported_distros", serde_json::json!(["rocky-9", "alma-9", "ubuntu-22.04"]))) + Ok( + ModuleOutput::ok("HPC baseline validation: stub - not yet implemented") + .with_data("status", serde_json::json!("stub")) + .with_data( + "supported_distros", + serde_json::json!(["rocky-9", "alma-9", "ubuntu-22.04"]), + ), + ) } fn required_params(&self) -> &[&'static str] { diff --git a/src/modules/hpc/fs.rs b/src/modules/hpc/fs.rs index ee7ca286..5ffb64fc 100644 --- a/src/modules/hpc/fs.rs +++ b/src/modules/hpc/fs.rs @@ -2,9 +2,7 @@ //! //! Manages Lustre and BeeGFS client installation and mount configuration. -use crate::modules::{ - Module, ModuleContext, ModuleOutput, ModuleParams, ModuleResult, -}; +use crate::modules::{Module, ModuleContext, ModuleOutput, ModuleParams, ModuleResult}; pub struct LustreClientModule; @@ -26,8 +24,10 @@ impl Module for LustreClientModule { return Ok(ModuleOutput::ok("Would configure Lustre client")); } - Ok(ModuleOutput::ok("Lustre client: stub - not yet implemented") - .with_data("status", serde_json::json!("stub"))) + Ok( + ModuleOutput::ok("Lustre client: stub - not yet implemented") + .with_data("status", serde_json::json!("stub")), + ) } fn required_params(&self) -> &[&'static str] { @@ -55,8 +55,10 @@ impl Module for BeegfsClientModule { return Ok(ModuleOutput::ok("Would configure BeeGFS client")); } - Ok(ModuleOutput::ok("BeeGFS client: stub - not yet implemented") - .with_data("status", serde_json::json!("stub"))) + Ok( + ModuleOutput::ok("BeeGFS client: stub - not yet implemented") + .with_data("status", serde_json::json!("stub")), + ) } fn required_params(&self) -> &[&'static str] { diff --git a/src/modules/hpc/gpu.rs b/src/modules/hpc/gpu.rs index 18224e0d..4de78337 100644 --- a/src/modules/hpc/gpu.rs +++ b/src/modules/hpc/gpu.rs @@ -2,9 +2,7 @@ //! //! Manages NVIDIA GPU drivers and configuration. -use crate::modules::{ - Module, ModuleContext, ModuleOutput, ModuleParams, ModuleResult, -}; +use crate::modules::{Module, ModuleContext, ModuleOutput, ModuleParams, ModuleResult}; pub struct NvidiaGpuModule; @@ -26,8 +24,10 @@ impl Module for NvidiaGpuModule { return Ok(ModuleOutput::ok("Would configure NVIDIA GPU")); } - Ok(ModuleOutput::ok("NVIDIA GPU management: stub - not yet implemented") - .with_data("status", serde_json::json!("stub"))) + Ok( + ModuleOutput::ok("NVIDIA GPU management: stub - not yet implemented") + .with_data("status", serde_json::json!("stub")), + ) } fn required_params(&self) -> &[&'static str] { diff --git a/src/modules/hpc/lmod.rs b/src/modules/hpc/lmod.rs index c2e0f4b6..0794dc6a 100644 --- a/src/modules/hpc/lmod.rs +++ b/src/modules/hpc/lmod.rs @@ -2,9 +2,7 @@ //! //! Manages Lmod installation and module path configuration. -use crate::modules::{ - Module, ModuleContext, ModuleOutput, ModuleParams, ModuleResult, -}; +use crate::modules::{Module, ModuleContext, ModuleOutput, ModuleParams, ModuleResult}; pub struct LmodModule; @@ -26,8 +24,10 @@ impl Module for LmodModule { return Ok(ModuleOutput::ok("Would configure Lmod")); } - Ok(ModuleOutput::ok("Lmod configuration: stub - not yet implemented") - .with_data("status", serde_json::json!("stub"))) + Ok( + ModuleOutput::ok("Lmod configuration: stub - not yet implemented") + .with_data("status", serde_json::json!("stub")), + ) } fn required_params(&self) -> &[&'static str] { diff --git a/src/modules/hpc/mod.rs b/src/modules/hpc/mod.rs index 2e3d6398..87738bb4 100644 --- a/src/modules/hpc/mod.rs +++ b/src/modules/hpc/mod.rs @@ -42,25 +42,25 @@ //! - Missing prerequisites → `ModuleError::ExecutionFailed` with install hint pub mod common; -#[cfg(feature = "slurm")] -pub mod slurm; -pub mod lmod; -pub mod mpi; +#[cfg(feature = "parallel_fs")] +pub mod fs; #[cfg(feature = "gpu")] pub mod gpu; +pub mod lmod; +pub mod mpi; #[cfg(feature = "ofed")] pub mod ofed; -#[cfg(feature = "parallel_fs")] -pub mod fs; +#[cfg(feature = "slurm")] +pub mod slurm; pub use common::HpcBaselineModule; -#[cfg(feature = "slurm")] -pub use slurm::{SlurmConfigModule, SlurmOpsModule}; -pub use lmod::LmodModule; -pub use mpi::MpiModule; +#[cfg(feature = "parallel_fs")] +pub use fs::{BeegfsClientModule, LustreClientModule}; #[cfg(feature = "gpu")] pub use gpu::NvidiaGpuModule; +pub use lmod::LmodModule; +pub use mpi::MpiModule; #[cfg(feature = "ofed")] pub use ofed::RdmaStackModule; -#[cfg(feature = "parallel_fs")] -pub use fs::{LustreClientModule, BeegfsClientModule}; +#[cfg(feature = "slurm")] +pub use slurm::{SlurmConfigModule, SlurmOpsModule}; diff --git a/src/modules/hpc/mpi.rs b/src/modules/hpc/mpi.rs index ca080fea..5cc472f3 100644 --- a/src/modules/hpc/mpi.rs +++ b/src/modules/hpc/mpi.rs @@ -2,9 +2,7 @@ //! //! Manages MPI library installation and configuration (OpenMPI, Intel MPI). -use crate::modules::{ - Module, ModuleContext, ModuleOutput, ModuleParams, ModuleResult, ParamExt, -}; +use crate::modules::{Module, ModuleContext, ModuleOutput, ModuleParams, ModuleResult, ParamExt}; pub struct MpiModule; @@ -22,15 +20,23 @@ impl Module for MpiModule { params: &ModuleParams, context: &ModuleContext, ) -> ModuleResult { - let flavor = params.get_string("flavor")?.unwrap_or_else(|| "openmpi".to_string()); + let flavor = params + .get_string("flavor")? + .unwrap_or_else(|| "openmpi".to_string()); if context.check_mode { - return Ok(ModuleOutput::ok(format!("Would configure MPI ({})", flavor))); + return Ok(ModuleOutput::ok(format!( + "Would configure MPI ({})", + flavor + ))); } - Ok(ModuleOutput::ok(format!("MPI config ({}): stub - not yet implemented", flavor)) - .with_data("status", serde_json::json!("stub")) - .with_data("flavor", serde_json::json!(flavor))) + Ok(ModuleOutput::ok(format!( + "MPI config ({}): stub - not yet implemented", + flavor + )) + .with_data("status", serde_json::json!("stub")) + .with_data("flavor", serde_json::json!(flavor))) } fn required_params(&self) -> &[&'static str] { diff --git a/src/modules/hpc/ofed.rs b/src/modules/hpc/ofed.rs index da87f654..7cf6447c 100644 --- a/src/modules/hpc/ofed.rs +++ b/src/modules/hpc/ofed.rs @@ -2,9 +2,7 @@ //! //! Manages RDMA userland packages and kernel module configuration. -use crate::modules::{ - Module, ModuleContext, ModuleOutput, ModuleParams, ModuleResult, -}; +use crate::modules::{Module, ModuleContext, ModuleOutput, ModuleParams, ModuleResult}; pub struct RdmaStackModule; @@ -26,8 +24,10 @@ impl Module for RdmaStackModule { return Ok(ModuleOutput::ok("Would configure RDMA stack")); } - Ok(ModuleOutput::ok("RDMA stack configuration: stub - not yet implemented") - .with_data("status", serde_json::json!("stub"))) + Ok( + ModuleOutput::ok("RDMA stack configuration: stub - not yet implemented") + .with_data("status", serde_json::json!("stub")), + ) } fn required_params(&self) -> &[&'static str] { diff --git a/src/modules/hpc/slurm.rs b/src/modules/hpc/slurm.rs index ac538cc9..d9fb2402 100644 --- a/src/modules/hpc/slurm.rs +++ b/src/modules/hpc/slurm.rs @@ -4,9 +4,7 @@ //! - `slurm_config`: Manage slurm.conf, cgroup.conf, gres.conf //! - `slurm_ops`: Cluster operations (reconfigure, drain, resume) -use crate::modules::{ - Module, ModuleContext, ModuleOutput, ModuleParams, ModuleResult, ParamExt, -}; +use crate::modules::{Module, ModuleContext, ModuleOutput, ModuleParams, ModuleResult, ParamExt}; pub struct SlurmConfigModule; @@ -28,8 +26,10 @@ impl Module for SlurmConfigModule { return Ok(ModuleOutput::ok("Would configure Slurm")); } - Ok(ModuleOutput::ok("Slurm configuration: stub - not yet implemented") - .with_data("status", serde_json::json!("stub"))) + Ok( + ModuleOutput::ok("Slurm configuration: stub - not yet implemented") + .with_data("status", serde_json::json!("stub")), + ) } fn required_params(&self) -> &[&'static str] { @@ -56,12 +56,18 @@ impl Module for SlurmOpsModule { let action = params.get_string("action")?.unwrap_or_default(); if context.check_mode { - return Ok(ModuleOutput::ok(format!("Would perform Slurm action: {}", action))); + return Ok(ModuleOutput::ok(format!( + "Would perform Slurm action: {}", + action + ))); } - Ok(ModuleOutput::ok(format!("Slurm ops '{}': stub - not yet implemented", action)) - .with_data("status", serde_json::json!("stub")) - .with_data("action", serde_json::json!(action))) + Ok(ModuleOutput::ok(format!( + "Slurm ops '{}': stub - not yet implemented", + action + )) + .with_data("status", serde_json::json!("stub")) + .with_data("action", serde_json::json!(action))) } fn required_params(&self) -> &[&'static str] { diff --git a/src/modules/script.rs b/src/modules/script.rs index bdac4fb6..711ad275 100644 --- a/src/modules/script.rs +++ b/src/modules/script.rs @@ -640,7 +640,7 @@ mod tests { .collect::>() .join(" "); - format!( + format!( "{} {} {}", safe_exec, shell_escape(&remote_path), @@ -650,7 +650,7 @@ mod tests { .join(" ") ) } else { - String::new() + String::new() }; // Injection should be neutralized (comment stripped, parts escaped) diff --git a/src/modules/template.rs b/src/modules/template.rs index dd3623d1..92344948 100644 --- a/src/modules/template.rs +++ b/src/modules/template.rs @@ -10,7 +10,7 @@ use super::{ use crate::connection::TransferOptions; use crate::template::TEMPLATE_ENGINE; use crate::utils::shell_escape; -use serde::ser::{Serialize, Serializer, SerializeMap}; +use serde::ser::{Serialize, SerializeMap, Serializer}; use std::collections::{BTreeSet, HashMap}; use std::fs; #[cfg(unix)] diff --git a/src/provisioning/executor.rs b/src/provisioning/executor.rs index 7f05576b..2ecfe095 100644 --- a/src/provisioning/executor.rs +++ b/src/provisioning/executor.rs @@ -230,13 +230,12 @@ impl ProvisioningExecutor { executor_config: ExecutorConfig, ) -> ProvisioningResult { // Resolve backend and load state - let state_backend: Arc = if let Some(ref backend_config) = - executor_config.state_backend - { - Arc::from(backend_config.create_backend().await?) - } else { - Arc::new(LocalBackend::new(executor_config.state_path.clone())) - }; + let state_backend: Arc = + if let Some(ref backend_config) = executor_config.state_backend { + Arc::from(backend_config.create_backend().await?) + } else { + Arc::new(LocalBackend::new(executor_config.state_path.clone())) + }; let state = match state_backend.load().await? { Some(state) => state, diff --git a/src/provisioning/state_backends.rs b/src/provisioning/state_backends.rs index 73961e82..a7736bcb 100644 --- a/src/provisioning/state_backends.rs +++ b/src/provisioning/state_backends.rs @@ -2816,8 +2816,7 @@ path: state.json .mount(&server) .await; - let backend = ConsulBackend::new("rustible/state".to_string()) - .with_address(server.uri()); + let backend = ConsulBackend::new("rustible/state".to_string()).with_address(server.uri()); let loaded = backend.load().await.unwrap().unwrap(); assert_eq!(loaded.resources.len(), 1); @@ -2833,10 +2832,7 @@ path: state.json Mock::given(method("PUT")) .and(path("/v1/session/create")) - .respond_with( - ResponseTemplate::new(200) - .set_body_string(r#"{"ID":"session-123"}"#), - ) + .respond_with(ResponseTemplate::new(200).set_body_string(r#"{"ID":"session-123"}"#)) .mount(&server) .await; Mock::given(method("PUT")) @@ -2871,8 +2867,7 @@ path: state.json .mount(&server) .await; - let backend = ConsulBackend::new("rustible/state".to_string()) - .with_address(server.uri()); + let backend = ConsulBackend::new("rustible/state".to_string()).with_address(server.uri()); let lock_backend = backend.lock_backend().unwrap(); assert!(lock_backend diff --git a/src/secrets/vault.rs b/src/secrets/vault.rs index 29bab9b1..b2cfc7e7 100644 --- a/src/secrets/vault.rs +++ b/src/secrets/vault.rs @@ -458,9 +458,10 @@ impl VaultClient for HttpVaultClient { request = request.header("X-Vault-Namespace", namespace); } - let response = request.send().await.map_err(|e| { - SecretError::Connection(format!("Failed to connect to Vault: {}", e)) - })?; + let response = request + .send() + .await + .map_err(|e| SecretError::Connection(format!("Failed to connect to Vault: {}", e)))?; let status = response.status(); if status == reqwest::StatusCode::NOT_FOUND { @@ -524,14 +525,15 @@ impl VaultClient for HttpVaultClient { let client = reqwest::Client::new(); let mut request = client - .request(reqwest::Method::from_bytes(b"LIST").unwrap_or(reqwest::Method::GET), &url) + .request( + reqwest::Method::from_bytes(b"LIST").unwrap_or(reqwest::Method::GET), + &url, + ) .header("X-Vault-Token", token); // Fallback: use GET with list=true query param let url_with_list = format!("{}?list=true", url); - let mut request_fallback = client - .get(&url_with_list) - .header("X-Vault-Token", token); + let mut request_fallback = client.get(&url_with_list).header("X-Vault-Token", token); if let Some(ref namespace) = self.config.namespace { request = request.header("X-Vault-Namespace", namespace); @@ -541,9 +543,10 @@ impl VaultClient for HttpVaultClient { // Try LIST method first, fall back to GET with list=true let response = match request.send().await { Ok(resp) if resp.status().is_success() => resp, - _ => request_fallback.send().await.map_err(|e| { - SecretError::Connection(format!("Failed to list secrets: {}", e)) - })?, + _ => request_fallback + .send() + .await + .map_err(|e| SecretError::Connection(format!("Failed to list secrets: {}", e)))?, }; let status = response.status(); @@ -562,10 +565,7 @@ impl VaultClient for HttpVaultClient { SecretError::Serialization(format!("Failed to parse Vault list response: {}", e)) })?; - Ok(vault_response - .data - .and_then(|d| d.keys) - .unwrap_or_default()) + Ok(vault_response.data.and_then(|d| d.keys).unwrap_or_default()) } async fn put_secret(&self, path: &str, data: HashMap) -> SecretResult<()> { @@ -588,18 +588,16 @@ impl VaultClient for HttpVaultClient { }; let client = reqwest::Client::new(); - let mut request = client - .post(&url) - .header("X-Vault-Token", token) - .json(&body); + let mut request = client.post(&url).header("X-Vault-Token", token).json(&body); if let Some(ref namespace) = self.config.namespace { request = request.header("X-Vault-Namespace", namespace); } - let response = request.send().await.map_err(|e| { - SecretError::Connection(format!("Failed to write secret: {}", e)) - })?; + let response = request + .send() + .await + .map_err(|e| SecretError::Connection(format!("Failed to write secret: {}", e)))?; let status = response.status(); if status == reqwest::StatusCode::FORBIDDEN { @@ -642,9 +640,10 @@ impl VaultClient for HttpVaultClient { request = request.header("X-Vault-Namespace", namespace); } - let response = request.send().await.map_err(|e| { - SecretError::Connection(format!("Failed to delete secret: {}", e)) - })?; + let response = request + .send() + .await + .map_err(|e| SecretError::Connection(format!("Failed to delete secret: {}", e)))?; let status = response.status(); if status == reqwest::StatusCode::NOT_FOUND { @@ -701,7 +700,9 @@ impl VaultClient for HttpVaultClient { let status = response.status(); match status.as_u16() { 200 | 429 | 472 | 473 => Ok(true), - 501 => Err(SecretError::Configuration("Vault is not initialized".into())), + 501 => Err(SecretError::Configuration( + "Vault is not initialized".into(), + )), 503 => Err(SecretError::Sealed("Vault is sealed".into())), _ => { let body_text = response.text().await.unwrap_or_default(); @@ -968,11 +969,7 @@ mod tests { Ok(self.secrets.lock().unwrap().keys().cloned().collect()) } - async fn put_secret( - &self, - path: &str, - data: HashMap, - ) -> SecretResult<()> { + async fn put_secret(&self, path: &str, data: HashMap) -> SecretResult<()> { self.secrets.lock().unwrap().insert(path.to_string(), data); Ok(()) } @@ -1040,10 +1037,7 @@ mod tests { let mut data = HashMap::new(); data.insert("api_key".to_string(), "sk-12345".to_string()); - provider - .put_secret("secret/data/api", data) - .await - .unwrap(); + provider.put_secret("secret/data/api", data).await.unwrap(); let secret = provider.get_secret("secret/data/api").await.unwrap(); assert_eq!(secret.get_string("api_key").unwrap(), "sk-12345"); diff --git a/src/template.rs b/src/template.rs index 3b6349f7..3487ba5d 100644 --- a/src/template.rs +++ b/src/template.rs @@ -310,7 +310,11 @@ impl TemplateEngine { /// /// This allows rendering with custom structs or optimized context wrappers /// without converting to serde_json::Value first. - pub fn render_serialize(&self, template: &str, vars: &S) -> Result { + pub fn render_serialize( + &self, + template: &str, + vars: &S, + ) -> Result { self.render_cached(template, vars) } @@ -1710,9 +1714,7 @@ mod tests { ); // Test default indent (should be 4) - let result_default = engine - .render("{{ data | to_nice_json }}", &vars) - .unwrap(); + let result_default = engine.render("{{ data | to_nice_json }}", &vars).unwrap(); assert!(result_default.contains(r#""a": 1"#)); assert!(result_default.contains(r#""b": ["#)); diff --git a/tests/hpc_blueprint_tests.rs b/tests/hpc_blueprint_tests.rs index aea1a059..692ee348 100644 --- a/tests/hpc_blueprint_tests.rs +++ b/tests/hpc_blueprint_tests.rs @@ -59,8 +59,7 @@ fn test_healthcheck_playbook_exists() { #[test] fn test_onprem_inventory_parses_as_valid_yaml() { - let content = - std::fs::read_to_string("examples/hpc/inventories/onprem/hosts.yml").unwrap(); + let content = std::fs::read_to_string("examples/hpc/inventories/onprem/hosts.yml").unwrap(); let value: serde_yaml::Value = serde_yaml::from_str(&content).unwrap(); assert!(value.is_mapping(), "Inventory must be a YAML mapping"); @@ -99,8 +98,7 @@ fn test_cloud_burst_inventory_parses_as_valid_yaml() { #[test] fn test_onprem_has_expected_host_count() { - let content = - std::fs::read_to_string("examples/hpc/inventories/onprem/hosts.yml").unwrap(); + let content = std::fs::read_to_string("examples/hpc/inventories/onprem/hosts.yml").unwrap(); let value: serde_yaml::Value = serde_yaml::from_str(&content).unwrap(); let all = value.get("all").unwrap(); let children = all.get("children").unwrap(); @@ -170,7 +168,10 @@ fn test_all_group_vars_parse_as_valid_yaml() { for entry in std::fs::read_dir(dir_path).unwrap() { let entry = entry.unwrap(); let path = entry.path(); - if path.extension().map_or(false, |e| e == "yml" || e == "yaml") { + if path + .extension() + .map_or(false, |e| e == "yml" || e == "yaml") + { let content = std::fs::read_to_string(&path).unwrap(); let _: serde_yaml::Value = serde_yaml::from_str(&content).unwrap_or_else(|e| { panic!("Failed to parse {}: {}", path.display(), e);