diff --git a/kani-driver/src/args/mod.rs b/kani-driver/src/args/mod.rs index 2481d2477554..8fec5c8f295d 100644 --- a/kani-driver/src/args/mod.rs +++ b/kani-driver/src/args/mod.rs @@ -242,6 +242,9 @@ pub struct VerificationArgs { #[arg(long)] pub default_unwind: Option, + #[arg(long = "export-json")] + pub export_json: Option, + /// When specified, the harness filter will only match the exact fully qualified name of a harness #[arg(long, requires("harnesses"))] pub exact: bool, diff --git a/kani-driver/src/call_cbmc.rs b/kani-driver/src/call_cbmc.rs index 3eeede9f68e4..162f14b3b708 100644 --- a/kani-driver/src/call_cbmc.rs +++ b/kani-driver/src/call_cbmc.rs @@ -26,6 +26,154 @@ use crate::coverage::cov_results::{CoverageRegion, CoverageTerm}; use crate::session::KaniSession; use crate::util::render_command; +/// CBMC version and system information +#[derive(Debug, Clone)] +pub struct CbmcInfo { + pub version: String, + pub os_info: String, +} + +/// CBMC runtime and execution statistics +#[derive(Debug, Clone, Default)] +pub struct CbmcStats { + pub runtime_symex_s: Option, + pub size_program_expression: Option, + pub slicing_removed_assignments: Option, + pub vccs_generated: Option, + pub vccs_remaining: Option, + pub runtime_postprocess_equation_s: Option, + pub runtime_convert_ssa_s: Option, + pub runtime_post_process_s: Option, + pub runtime_solver_s: Option, + pub runtime_decision_procedure_s: Option, +} + +impl KaniSession { + /// Get CBMC version and system information + pub fn get_cbmc_info(&self) -> Result { + let output = std::process::Command::new("cbmc") + .arg("--version") + .output() + .map_err(|_| anyhow::Error::msg("Failed to run cbmc --version"))?; + + let version_output = String::from_utf8_lossy(&output.stdout); + let lines: Vec<&str> = version_output.lines().collect(); + + // Extract version from first line (e.g., "6.7.1 (cbmc-6.7.1)") + let version = lines + .first() + .and_then(|line| line.split_whitespace().next()) + .unwrap_or("unknown") + .to_string(); + + // For OS info, we'll use the system information since CBMC --version doesn't provide it + let os_info = format!( + "{} {} {}", + std::env::consts::ARCH, + std::env::consts::OS, + std::env::consts::FAMILY + ); + + Ok(CbmcInfo { version, os_info }) + } + + /// Extract CBMC statistics from a message + fn extract_cbmc_stats_from_message(message: &str) -> Option { + let mut stats = CbmcStats::default(); + let mut found_any = false; + + // Example: "Runtime Symex: 0.00408627s" + if let Some(captures) = + regex::Regex::new(r"Runtime Symex: ([-e\d\.]+)s").ok()?.captures(message) + && let Ok(val) = captures[1].parse::() + { + stats.runtime_symex_s = Some(val); + found_any = true; + } + + // Example: "size of program expression: 150 steps" + if let Some(captures) = + regex::Regex::new(r"size of program expression: (\d+) steps").ok()?.captures(message) + && let Ok(val) = captures[1].parse::() + { + stats.size_program_expression = Some(val); + found_any = true; + } + + // Example: "slicing removed 81 assignments" + if let Some(captures) = + regex::Regex::new(r"slicing removed (\d+) assignments").ok()?.captures(message) + && let Ok(val) = captures[1].parse::() + { + stats.slicing_removed_assignments = Some(val); + found_any = true; + } + + // Example: "Generated 1 VCC(s), 1 remaining after simplification" + if let Some(captures) = + regex::Regex::new(r"Generated (\d+) VCC\(s\), (\d+) remaining after simplification") + .ok()? + .captures(message) + { + if let Ok(generated) = captures[1].parse::() { + stats.vccs_generated = Some(generated); + found_any = true; + } + if let Ok(remaining) = captures[2].parse::() { + stats.vccs_remaining = Some(remaining); + found_any = true; + } + } + + // Example: "Runtime Postprocess Equation: 0.000767182s" + if let Some(captures) = + regex::Regex::new(r"Runtime Postprocess Equation: ([-e\d\.]+)s").ok()?.captures(message) + && let Ok(val) = captures[1].parse::() + { + stats.runtime_postprocess_equation_s = Some(val); + found_any = true; + } + + // Example: "Runtime Convert SSA: 0.000516981s" + if let Some(captures) = + regex::Regex::new(r"Runtime Convert SSA: ([-e\d\.]+)s").ok()?.captures(message) + && let Ok(val) = captures[1].parse::() + { + stats.runtime_convert_ssa_s = Some(val); + found_any = true; + } + + // Example: "Runtime Post-process: 0.000189636s" + if let Some(captures) = + regex::Regex::new(r"Runtime Post-process: ([-e\d\.]+)s").ok()?.captures(message) + && let Ok(val) = captures[1].parse::() + { + stats.runtime_post_process_s = Some(val); + found_any = true; + } + + // Example: "Runtime Solver: 0.00167592s" + if let Some(captures) = + regex::Regex::new(r"Runtime Solver: ([-e\d\.]+)s").ok()?.captures(message) + && let Ok(val) = captures[1].parse::() + { + stats.runtime_solver_s = Some(val); + found_any = true; + } + + // Example: "Runtime decision procedure: 0.00452419s" + if let Some(captures) = + regex::Regex::new(r"Runtime decision procedure: ([-e\d\.]+)s").ok()?.captures(message) + && let Ok(val) = captures[1].parse::() + { + stats.runtime_decision_procedure_s = Some(val); + found_any = true; + } + + if found_any { Some(stats) } else { None } + } +} + /// We will use Cadical by default since it performed better than MiniSAT in our analysis. /// Note: Kissat was marginally better, but it is an external solver which could be more unstable. static DEFAULT_SOLVER: CbmcSolver = CbmcSolver::Cadical; @@ -75,6 +223,8 @@ pub struct VerificationResult { pub generated_concrete_test: bool, /// The coverage results pub coverage_results: Option, + /// CBMC execution statistics extracted from messages + pub cbmc_stats: Option, } impl KaniSession { @@ -161,6 +311,7 @@ impl KaniSession { runtime: start_time.elapsed(), generated_concrete_test: false, coverage_results: None, + cbmc_stats: None, }) } } @@ -325,7 +476,56 @@ impl VerificationResult { start_time: Instant, ) -> VerificationResult { let runtime = start_time.elapsed(); - let (_, results) = extract_results(output.processed_items); + let (remaining_items, results) = extract_results(output.processed_items); + + // Collect CBMC stats from messages + let mut cbmc_stats = CbmcStats::default(); + for item in &remaining_items { + if let crate::cbmc_output_parser::ParserItem::Message { message_text, .. } = item + && let Some(stats) = KaniSession::extract_cbmc_stats_from_message(message_text) + { + // Merge stats (later messages may have more complete info) + if stats.runtime_symex_s.is_some() { + cbmc_stats.runtime_symex_s = stats.runtime_symex_s; + } + if stats.size_program_expression.is_some() { + cbmc_stats.size_program_expression = stats.size_program_expression; + } + if stats.slicing_removed_assignments.is_some() { + cbmc_stats.slicing_removed_assignments = stats.slicing_removed_assignments; + } + if stats.vccs_generated.is_some() { + cbmc_stats.vccs_generated = stats.vccs_generated; + } + if stats.vccs_remaining.is_some() { + cbmc_stats.vccs_remaining = stats.vccs_remaining; + } + if stats.runtime_postprocess_equation_s.is_some() { + cbmc_stats.runtime_postprocess_equation_s = + stats.runtime_postprocess_equation_s; + } + if stats.runtime_convert_ssa_s.is_some() { + cbmc_stats.runtime_convert_ssa_s = stats.runtime_convert_ssa_s; + } + if stats.runtime_post_process_s.is_some() { + cbmc_stats.runtime_post_process_s = stats.runtime_post_process_s; + } + if stats.runtime_solver_s.is_some() { + cbmc_stats.runtime_solver_s = stats.runtime_solver_s; + } + if stats.runtime_decision_procedure_s.is_some() { + cbmc_stats.runtime_decision_procedure_s = stats.runtime_decision_procedure_s; + } + } + } + + let cbmc_stats = if cbmc_stats.runtime_symex_s.is_some() + || cbmc_stats.size_program_expression.is_some() + { + Some(cbmc_stats) + } else { + None + }; if let Some(results) = results { let (status, failed_properties) = @@ -338,6 +538,7 @@ impl VerificationResult { runtime, generated_concrete_test: false, coverage_results, + cbmc_stats, } } else { // We never got results from CBMC - something went wrong (e.g. crash) so it's failure @@ -353,6 +554,7 @@ impl VerificationResult { runtime, generated_concrete_test: false, coverage_results: None, + cbmc_stats, } } } @@ -365,6 +567,7 @@ impl VerificationResult { runtime: Duration::from_secs(0), generated_concrete_test: false, coverage_results: None, + cbmc_stats: None, } } @@ -379,6 +582,7 @@ impl VerificationResult { runtime: Duration::from_secs(0), generated_concrete_test: false, coverage_results: None, + cbmc_stats: None, } } diff --git a/kani-driver/src/frontend/json_handler.rs b/kani-driver/src/frontend/json_handler.rs new file mode 100644 index 000000000000..f9214ab73437 --- /dev/null +++ b/kani-driver/src/frontend/json_handler.rs @@ -0,0 +1,51 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +use serde_json::{Value, json}; +use std::path::PathBuf; + +/// A handler for building and exporting JSON data structures. +/// +/// `JsonHandler` provides a convenient interface for constructing JSON objects, +/// adding key-value pairs, appending to arrays, and exporting the final result +/// to a file. +pub struct JsonHandler { + /// The JSON data being constructed. + pub(crate) data: Value, + /// Optional path where the JSON data will be exported. + export_path: Option, +} + +impl JsonHandler { + /// Creates a new `JsonHandler` with an optional export path. + /// If `export_path` is `None`, calls to `export()` will be no-ops. + pub fn new(export_path: Option) -> Self { + Self { data: json!({}), export_path } + } + + /// Adds or updates a key-value pair in the JSON object. + /// If the key already exists, its value will be overwritten. + pub fn add_item(&mut self, key: &str, value: Value) { + self.data[key] = value; + } + + /// Appends a value to the array at the specified key. + /// Creates a new array if the key doesn't exist or is null. + /// Panics if the key exists but is not an array or null. + pub fn add_harness_detail(&mut self, key: &str, value: Value) { + if self.data[key].is_null() { + self.data[key] = json!([]); + } + self.data[key].as_array_mut().unwrap().push(value); + } + + /// Exports the JSON data to the configured file path with pretty-printing. + /// Returns an error if the file cannot be written. + pub fn export(&self) -> Result<(), std::io::Error> { + if let Some(path) = &self.export_path { + std::fs::write(path, serde_json::to_string_pretty(&self.data)?) + } else { + Ok(()) + } + } +} diff --git a/kani-driver/src/frontend/mod.rs b/kani-driver/src/frontend/mod.rs new file mode 100644 index 000000000000..61431fb24ffc --- /dev/null +++ b/kani-driver/src/frontend/mod.rs @@ -0,0 +1,14 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! Frontend module for handling different output formats and JSON generation +//! This module separates the JSON handling logic from the main verification logic + +pub mod json_handler; +pub mod schema_utils; + +pub use json_handler::JsonHandler; +pub use schema_utils::*; + +#[cfg(test)] +mod tests; diff --git a/kani-driver/src/frontend/schema_utils.rs b/kani-driver/src/frontend/schema_utils.rs new file mode 100644 index 000000000000..49e380165ef4 --- /dev/null +++ b/kani-driver/src/frontend/schema_utils.rs @@ -0,0 +1,283 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +// Utility functions for creating structured JSON schemas +// This module contains helper functions to convert Kani internal structures to JSON + +use crate::call_cbmc::VerificationStatus; +use crate::frontend::JsonHandler; +use crate::harness_runner::HarnessResult; +use crate::project::Project; +use crate::session::KaniSession; +use anyhow::Result; +use kani_metadata::HarnessMetadata; +use serde::Serialize; +use serde_json::{Value, json}; +use time::OffsetDateTime; +use time::format_description::well_known::Rfc3339; + +/// Creates structured JSON metadata for an export run +/// This utility function captures basic environment for the whole session +pub fn create_metadata_json() -> Value { + let timestamp = OffsetDateTime::now_utc() + .format(&Rfc3339) + .unwrap_or_else(|_| "1970-01-01T00:00:00Z".to_string()); + + let kani_version = env!("CARGO_PKG_VERSION"); + let target = env!("TARGET"); + let build_mode = if cfg!(debug_assertions) { "debug" } else { "release" }; + + json!({ + "version": "1.0", + "timestamp": timestamp, + "kani_version": kani_version, + "target": target, + "build_mode": build_mode, + }) +} + +/// Creates structured JSON metadata for the project +/// This utility function captures detailed info for the project +pub fn create_project_metadata_json(project: &Project) -> Value { + json!({ + "crate_name": project.metadata.iter() + .map(|m| m.crate_name.clone()) + .collect::>(), + "workspace_root": project.outdir.clone(), + }) +} +/// Creates structured JSON metadata for a harness +/// This utility function separates harness metadata creation from the main verification logic +pub fn create_harness_metadata_json(h: &HarnessMetadata) -> Value { + json!({ + "pretty_name": h.pretty_name, // use this as identifier + "mangled_name": h.mangled_name, + "crate_name": h.crate_name, + "source": { + "file": h.original_file, + "start_line": h.original_start_line, + "end_line": h.original_end_line + }, + "goto_file": h.goto_file.as_ref().map(|p| p.to_string_lossy().to_string()), + "attributes": { + "kind": format!("{:?}", h.attributes.kind), + "should_panic": h.attributes.should_panic, + }, + "contract":{ + "contracted_function_name": h.contract.as_ref() + .map(|c| c.contracted_function_name.as_str()), + "recursion_tracker": h.contract.as_ref() + .and_then(|c| c.recursion_tracker.as_ref()), + }, + "has_loop_contracts": h.has_loop_contracts, + "is_automatically_generated": h.is_automatically_generated, + + }) +} + +/// Creates verification result JSON with harness reference +/// This reduces duplication between harness metadata and verification results +pub fn create_verification_result_json(result: &HarnessResult) -> Value { + // Extract detailed verification results as "checks" + let checks = match &result.result.results { + Ok(properties) => { + properties.iter().enumerate().map(|(i, prop)| { + json!({ + "id": i + 1, + "function": prop.property_id.fn_name.as_ref().unwrap_or(&"unknown".to_string()), + "status": format!("{:?}", prop.status), + "description": prop.description, + "location": { + "file": prop.source_location.file.as_ref().unwrap_or(&"unknown".to_string()), + "line": prop.source_location.line.as_ref().unwrap_or(&"unknown".to_string()), + "column": prop.source_location.column.as_ref().unwrap_or(&"unknown".to_string()), + }, + "category": prop.property_id.class, + }) + }).collect::>() + }, + Err(_) => vec![], + }; + + json!({ + "harness_id": result.harness.pretty_name, // Reference to harness instead of duplicating name + "status": match result.result.status { + VerificationStatus::Success => "Success", + VerificationStatus::Failure => "Failure", + }, + "duration_ms": (result.result.runtime.as_millis() as u64), + "checks": checks, + }) +} + +/// Creates a verification summary with clean structure +pub fn create_verification_summary_json( + results: &[HarnessResult], + selected: usize, + status_label: &str, +) -> Value { + let successful = + results.iter().filter(|r| r.result.status == VerificationStatus::Success).count(); + let failed = results.len() - successful; + let total_duration_ms: u64 = results.iter().map(|r| r.result.runtime.as_millis() as u64).sum(); + + let verification_results: Vec<_> = + results.iter().map(|r| create_verification_result_json(r)).collect(); + + json!({ + "summary": { + "total_harnesses": selected, + "executed": results.len(), + "status": status_label, + "successful": successful, + "failed": failed, + "duration_ms": total_duration_ms + }, + "results": verification_results + }) +} + +/// Helper function to add verification results to JsonHandler +/// This utility function encapsulates the logic for adding verification summary to JSON output +pub fn add_runner_results_to_json( + handler: &mut JsonHandler, + results: &[HarnessResult], + selected: usize, + status_label: &str, +) { + // Use frontend utility to create structured verification summary + let summary = create_verification_summary_json(results, selected, status_label); + handler.add_item("verification_results", summary); +} + +/// Process harness results and enrich JSON handler with additional metadata. +/// This function handles the complex harness processing logic, combining verification results +/// with harness metadata to create enriched JSON output. +pub fn process_harness_results( + handler: &mut JsonHandler, + harnesses: &[&HarnessMetadata], + results: &[HarnessResult], +) -> Result<()> { + // The main verification results are handled by the harness runner + for h in harnesses { + let harness_result = results.iter().find(|r| r.harness.pretty_name == h.pretty_name); + + // Add error details for this harness + if let Some(result) = harness_result { + handler.add_item("error_details", match result.result.status { + VerificationStatus::Failure => { + json!({ + "has_errors": true, + "error_type": match result.result.failed_properties { + crate::call_cbmc::FailedProperties::None => "unknown_failure", + crate::call_cbmc::FailedProperties::PanicsOnly => "assertion_failure", + crate::call_cbmc::FailedProperties::Other => "verification_failure", + }, + "failed_properties_type": format!("{:?}", result.result.failed_properties), + "exit_status": match &result.result.results { + Err(crate::call_cbmc::ExitStatus::Timeout) => "timeout".to_string(), + Err(crate::call_cbmc::ExitStatus::OutOfMemory) => "out_of_memory".to_string(), + Err(crate::call_cbmc::ExitStatus::Other(code)) => format!("exit_code_{}", code), + Ok(_) => "properties_failed".to_string() + } + }) + }, + VerificationStatus::Success => json!({ + "has_errors": false + }) + }); + + // Add property details for this harness + handler.add_harness_detail("property_details", json!({ + "property_details": match &result.result.results { + Ok(properties) => { + let total_properties = properties.len(); + let passed_properties = properties.iter().filter(|p| matches!(p.status, crate::cbmc_output_parser::CheckStatus::Success)).count(); + let failed_properties = properties.iter().filter(|p| matches!(p.status, crate::cbmc_output_parser::CheckStatus::Failure)).count(); + + json!({ + "total_properties": total_properties, + "passed": passed_properties, + "failed": failed_properties, + "unreachable": total_properties - passed_properties - failed_properties + }) + }, + Err(_) => json!({ + "total_properties": 0, + "error": "Could not extract property details due to verification failure" + }) + } + })); + } + } + + Ok(()) +} + +pub fn process_cbmc_results( + handler: &mut JsonHandler, + harnesses: &[&HarnessMetadata], + results: &[HarnessResult], + session: &KaniSession, +) -> Result<()> { + let cbmc_info_opt = session.get_cbmc_info().ok(); + for h in harnesses { + let harness_result = results.iter().find(|r| r.harness.pretty_name == h.pretty_name); + handler.add_harness_detail("cbmc", json!({ + // basic name for harnesses + "harness_id": h.pretty_name, + + // Per-harness CBMC info (key-value pairs) without parsing CBMC stdout + "cbmc_metadata": { + // Version / OS info (same for all harnesses in a run) + "version": cbmc_info_opt.as_ref().map(|i| i.version.clone()), + "os_info": cbmc_info_opt.as_ref().map(|i| i.os_info.clone()), + }, + // Configuration passed to CBMC for this harness + "configuration": { + "object_bits": session.args.cbmc_object_bits(), + "solver": h.attributes.solver.as_ref().map(|s| format!("{:?}", s)).unwrap_or_else(|| "Cadical".to_string()), + }, + + // CBMC execution statistics extracted from messages + "cbmc_stats": harness_result.and_then(|r| r.result.cbmc_stats.as_ref()).map(|s| json!({ + "runtime_symex_s": s.runtime_symex_s, + "size_program_expression": s.size_program_expression, + "slicing_removed_assignments": s.slicing_removed_assignments, + "vccs_generated": s.vccs_generated, + "vccs_remaining": s.vccs_remaining, + "runtime_postprocess_equation_s": s.runtime_postprocess_equation_s, + "runtime_convert_ssa_s": s.runtime_convert_ssa_s, + "runtime_post_process_s": s.runtime_post_process_s, + "runtime_solver_s": s.runtime_solver_s, + "runtime_decision_procedure_s": s.runtime_decision_procedure_s + })) + })); + } + Ok(()) +} + +/// Simple container to standardize tool outputs captured during verification +#[derive(Serialize)] +#[allow(dead_code)] +pub struct ToolOutput<'a> { + /// Arbitrary tool name key under which this output will be grouped + pub tool: &'a str, + /// Harness identifier this output belongs to + pub harness_id: &'a str, + /// Unparsed stdout text emitted by the tool + pub stdout: &'a str, +} + +/// Add a tool output entry to the JSON under a tool-named array +#[allow(dead_code)] +pub fn add_tool_output(handler: &mut JsonHandler, output: ToolOutput<'_>) { + // structure: top-level key is the tool name, value is an array of entries + handler.add_harness_detail( + output.tool, + json!({ + "harness_id": output.harness_id, + "stdout": output.stdout, + }), + ); +} diff --git a/kani-driver/src/frontend/tests/mod.rs b/kani-driver/src/frontend/tests/mod.rs new file mode 100644 index 000000000000..cb53347d30eb --- /dev/null +++ b/kani-driver/src/frontend/tests/mod.rs @@ -0,0 +1,9 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +//! Tests for the frontend module +//! This module contains tests for the schema_utils module +//! and the json_handler module + +#[cfg(test)] +mod schema_utils_test; diff --git a/kani-driver/src/frontend/tests/schema_utils_test.rs b/kani-driver/src/frontend/tests/schema_utils_test.rs new file mode 100644 index 000000000000..ac2bbb54842a --- /dev/null +++ b/kani-driver/src/frontend/tests/schema_utils_test.rs @@ -0,0 +1,220 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +/// Tests for the schema_utils module +/// This module contains tests for the schema_utils module +/// and the json_handler module +use crate::call_cbmc::{ExitStatus, FailedProperties, VerificationResult, VerificationStatus}; +use crate::cbmc_output_parser::{CheckStatus, Property, PropertyId, SourceLocation}; +use crate::frontend::JsonHandler; +use crate::frontend::schema_utils::{ + add_runner_results_to_json, create_harness_metadata_json, create_metadata_json, + create_project_metadata_json, create_verification_result_json, + create_verification_summary_json, +}; +use crate::harness_runner::HarnessResult; +use crate::project::Project; +use kani_metadata::{HarnessAttributes, HarnessKind, HarnessMetadata, KaniMetadata}; +use std::path::PathBuf; +use std::time::Duration; +#[test] +fn test_create_metadata_json() { + let json = create_metadata_json(); + + assert!(json.is_object()); + assert_eq!(json["version"], "1.0"); + assert!(json["timestamp"].as_str().unwrap().contains('T')); + assert!(["debug", "release"].contains(&json["build_mode"].as_str().unwrap())); +} + +#[test] +fn test_create_project_metadata_json() { + let mut project = Project::default(); + let metadata = KaniMetadata { + crate_name: "sample_crate".to_string(), + proof_harnesses: vec![], + test_harnesses: vec![], + unsupported_features: vec![], + contracted_functions: vec![], + autoharness_md: None, + }; + project.outdir = PathBuf::from("/tmp/outdir"); + project.metadata.push(metadata); + + let json = create_project_metadata_json(&project); + assert_eq!(json["crate_name"][0], "sample_crate"); + assert_eq!(json["workspace_root"], "/tmp/outdir"); +} + +#[test] +fn test_create_harness_metadata_json() { + let harness = HarnessMetadata { + pretty_name: "crate::mod::my_harness".to_string(), + mangled_name: "mangled::harness".to_string(), + crate_name: "sample_crate".to_string(), + original_file: "src/lib.rs".to_string(), + original_start_line: 10, + original_end_line: 20, + goto_file: Some(PathBuf::from("target/goto_file.goto")), + attributes: HarnessAttributes::new(HarnessKind::Proof), + contract: None, + has_loop_contracts: true, + is_automatically_generated: false, + }; + + let json = create_harness_metadata_json(&harness); + + assert_eq!(json["pretty_name"], "crate::mod::my_harness"); + assert_eq!(json["crate_name"], "sample_crate"); + assert_eq!(json["source"]["file"], "src/lib.rs"); + assert_eq!(json["source"]["start_line"], 10); + assert_eq!(json["source"]["end_line"], 20); + assert_eq!(json["has_loop_contracts"], true); + assert_eq!(json["is_automatically_generated"], false); +} + +#[test] +fn test_create_verification_result_json() { + let harness = HarnessMetadata { + pretty_name: "crate::my_harness".to_string(), + mangled_name: "mangled_name".to_string(), + crate_name: "sample_crate".to_string(), + original_file: "src/lib.rs".to_string(), + original_start_line: 1, + original_end_line: 2, + goto_file: None, + attributes: HarnessAttributes::new(HarnessKind::Proof), + contract: None, + has_loop_contracts: false, + is_automatically_generated: false, + }; + + let properties = vec![ + Property { + property_id: PropertyId { + id: 1, + fn_name: Some("foo".to_string()), + class: "safety".to_string(), + }, + status: CheckStatus::Success, + description: "no overflow".to_string(), + source_location: SourceLocation { + file: Some("src/lib.rs".to_string()), + function: Some("foo".to_string()), + line: Some("42".to_string()), + column: Some("5".to_string()), + }, + reach: None, + trace: None, + }, + Property { + property_id: PropertyId { + id: 2, + fn_name: Some("bar".to_string()), + class: "assertion".to_string(), + }, + status: CheckStatus::Failure, + description: "assert failed".to_string(), + source_location: SourceLocation { + file: Some("src/main.rs".to_string()), + function: Some("bar".to_string()), + line: Some("10".to_string()), + column: Some("3".to_string()), + }, + reach: None, + trace: None, + }, + ]; + + let verification_result = VerificationResult { + status: VerificationStatus::Failure, + failed_properties: FailedProperties::Other, + results: Ok(properties), + runtime: Duration::from_millis(120), + generated_concrete_test: false, + coverage_results: None, + cbmc_stats: None, + }; + + let harness_result = HarnessResult { harness: &harness, result: verification_result }; + + let json = create_verification_result_json(&harness_result); + + // --- Assertions --- + assert_eq!(json["harness_id"], "crate::my_harness"); + assert_eq!(json["status"], "Failure"); + assert_eq!(json["checks"][0]["function"], "foo"); + assert_eq!(json["checks"][1]["function"], "bar"); + assert_eq!(json["checks"][1]["location"]["file"], "src/main.rs"); + + // Optional extra check + assert!(json["duration_ms"].as_u64().unwrap() >= 100); +} + +#[test] +fn test_create_verification_summary_json_real() { + // Create a real harness + let harness = HarnessMetadata { + pretty_name: "foo::harness_ok".into(), + mangled_name: "foo_harness_ok".into(), + crate_name: "sample".into(), + original_file: "src/lib.rs".into(), + original_start_line: 10, + original_end_line: 20, + goto_file: None, + attributes: HarnessAttributes::new(HarnessKind::Proof), + contract: None, + has_loop_contracts: false, + is_automatically_generated: false, + }; + + // Create a VerificationResult + let verification_result = VerificationResult::mock_success(); + + let harness_result = HarnessResult { harness: &harness, result: verification_result }; + + let json = create_verification_summary_json(&[harness_result], 1, "Completed"); + + assert_eq!(json["summary"]["status"], "Completed"); + assert_eq!(json["summary"]["total_harnesses"], 1); + assert_eq!(json["summary"]["executed"], 1); + assert_eq!(json["summary"]["successful"], 1); + assert_eq!(json["summary"]["failed"], 0); + assert!(json["results"].is_array()); +} + +#[test] +fn test_add_runner_results_to_json_real() { + let harness = HarnessMetadata { + pretty_name: "bar::harness_fail".into(), + mangled_name: "bar_harness_fail".into(), + crate_name: "sample".into(), + original_file: "src/main.rs".into(), + original_start_line: 5, + original_end_line: 15, + goto_file: None, + attributes: HarnessAttributes::new(HarnessKind::Proof), + contract: None, + has_loop_contracts: false, + is_automatically_generated: false, + }; + + let verification_result = VerificationResult { + status: VerificationStatus::Failure, + failed_properties: FailedProperties::Other, + results: Err(ExitStatus::Other(42)), + runtime: Duration::from_millis(120), + generated_concrete_test: false, + coverage_results: None, + cbmc_stats: None, + }; + + let harness_result = HarnessResult { harness: &harness, result: verification_result }; + + let mut handler = JsonHandler::new(None); + add_runner_results_to_json(&mut handler, &[harness_result], 1, "Failed"); + + let summary = &handler.data["verification_results"]["summary"]; + assert_eq!(summary["status"], "Failed"); + assert_eq!(summary["failed"], 1); +} diff --git a/kani-driver/src/harness_runner.rs b/kani-driver/src/harness_runner.rs index 708a4064cd72..bb5fe72bae55 100644 --- a/kani-driver/src/harness_runner.rs +++ b/kani-driver/src/harness_runner.rs @@ -10,6 +10,7 @@ use std::path::Path; use crate::args::{NumThreads, OutputFormat}; use crate::call_cbmc::{VerificationResult, VerificationStatus}; +use crate::frontend::{JsonHandler, schema_utils::add_runner_results_to_json}; use crate::project::Project; use crate::session::{BUG_REPORT_URL, KaniSession}; @@ -54,6 +55,7 @@ impl<'pr> HarnessRunner<'_, 'pr> { pub(crate) fn check_all_harnesses( &self, harnesses: &'pr [&HarnessMetadata], + mut json_handler: Option<&mut JsonHandler>, ) -> Result>> { let sorted_harnesses = crate::metadata::sort_harnesses_by_loc(harnesses); let pool = { @@ -98,14 +100,30 @@ impl<'pr> HarnessRunner<'_, 'pr> { .collect::>>() }); match results { - Ok(results) => Ok(results), + Ok(results) => { + if let Some(handler) = json_handler.as_deref_mut() { + add_runner_results_to_json(handler, &results, harnesses.len(), "completed"); + } + Ok(results) + } Err(err) => { if err.is::() { let failed = err.downcast::().unwrap(); - Ok(vec![HarnessResult { + let result = vec![HarnessResult { harness: sorted_harnesses[failed.index_to_failing_harness], result: failed.result, - }]) + }]; + + if let Some(handler) = json_handler { + add_runner_results_to_json( + handler, + &result, + harnesses.len(), + "completed_with_fail_fast", + ); + } + + Ok(result) } else { Err(err) } diff --git a/kani-driver/src/main.rs b/kani-driver/src/main.rs index 21b74e8672c7..246ee6d48d45 100644 --- a/kani-driver/src/main.rs +++ b/kani-driver/src/main.rs @@ -12,11 +12,16 @@ use args_toml::join_args; use crate::args::StandaloneSubcommand; use crate::concrete_playback::playback::{playback_cargo, playback_standalone}; +use crate::frontend::{ + JsonHandler, create_harness_metadata_json, create_metadata_json, create_project_metadata_json, + process_cbmc_results, process_harness_results, +}; use crate::list::collect_metadata::{list_cargo, list_standalone}; use crate::project::Project; use crate::session::KaniSession; use crate::version::print_kani_version; use clap::Parser; +use serde_json::json; use tracing::debug; mod args; @@ -36,6 +41,8 @@ mod harness_runner; mod list; mod metadata; mod project; + +mod frontend; mod session; mod util; mod version; @@ -128,12 +135,26 @@ fn standalone_main() -> Result<()> { /// Run verification on the given project. fn verify_project(project: Project, session: KaniSession) -> Result<()> { debug!(?project, "verify_project"); + let mut handler = JsonHandler::new(session.args.export_json.clone()); let harnesses = session.determine_targets(project.get_all_harnesses())?; debug!(n = harnesses.len(), ?harnesses, "verify_project"); + // Add project and export run metadata using frontend utility + handler.add_item("metadata", create_metadata_json()); + handler.add_item("project", create_project_metadata_json(&project)); + + // Add harness metadata using frontend utility + for h in &harnesses { + handler.add_harness_detail("harness_metadata", create_harness_metadata_json(h)); + } + // Verification let runner = harness_runner::HarnessRunner { sess: &session, project: &project }; - let results = runner.check_all_harnesses(&harnesses)?; + let results = runner.check_all_harnesses(&harnesses, Some(&mut handler))?; + + // Process harness results and add additional metadata using frontend utility function + process_harness_results(&mut handler, &harnesses, &results)?; + process_cbmc_results(&mut handler, &harnesses, &results, &session)?; if session.args.coverage { // We generate a timestamp to save the coverage data in a folder named @@ -150,8 +171,14 @@ fn verify_project(project: Project, session: KaniSession) -> Result<()> { session.save_coverage_metadata(&project, ×tamp)?; session.save_coverage_results(&project, &results, ×tamp)?; + + handler.add_item("coverage", json!({"enabled": true})); + } else { + handler.add_item("coverage", json!({"enabled": false})); } + handler.export()?; + session.print_final_summary(&results) } diff --git a/rfc/src/rfcs/0015-json-handler.md b/rfc/src/rfcs/0015-json-handler.md new file mode 100644 index 000000000000..1ad5f40687dd --- /dev/null +++ b/rfc/src/rfcs/0015-json-handler.md @@ -0,0 +1,288 @@ +- **Feature Name:** JSON Export (`json-export`) +- **Feature Request Issue:** + - + - + - + - + - + - + - + - + - +- **RFC PR:** +- **Status:** Unstable +- **Version:** 0 + +------------------- + +## Summary + +Export Kani verification results in structured JSON format with a validated schema, enabling programmatic analysis and tool integration. + +## User Impact + +It is difficult to integrate Kani verification results into automated workflows because results are only available in human-readable output text format. This limits: +- Cross-run analysis +- Integration with external applications/tools (dashboards, databases, LLMs) +- Automated harness result generation + +This RFC adds JSON export as an opt-in feature, maintaining backward compatibility while enabling these automation scenarios. + +## User Experience + +Users export verification results using the `--export-json` flag: + +```bash +cargo kani --export-json results.json +``` + +This works with all existing Kani options: + +```bash +cargo kani --harness my_harness --export-json output.json +cargo kani --tests --export-json test_results.json +``` + +### JSON Schema + +The output contains eight top-level blocks: + +**1. Metadata** - Execution environment +```json +{ + "metadata": { + "version": "1.0", + "timestamp": "2025-10-30T12:00:00.000000Z", + "kani_version": "0.65.0", + "target": "x86_64-unknown-linux-gnu", + "build_mode": "debug" + } +} +``` + +**2. Project** - Codebase identification +```json +{ + "project": { + "crate_name": ["example_crate"], + "workspace_root": "/path/to/workspace" + } +} +``` + +**3. Harness Metadata** - Source locations and attributes +```json +{ + "harness_metadata": [{ + "pretty_name": "example_harness", + "mangled_name": "_RNvCs_example", + "source": { + "file": "src/lib.rs", + "start_line": 10, + "end_line": 15 + }, + "attributes": { + "kind": "Proof", + "should_panic": false + }, + "contract": { + "contracted_function_name": null + }, + "has_loop_contracts": false + }] +} +``` + +**4. Verification Results** - Harness results and checks +```json +{ + "verification_results": { + "summary": { + "total_harnesses": 1, + "executed": 1, + "successful": 1, + "failed": 0, + "duration_ms": 500 + }, + "results": [{ + "harness_id": "example_harness", + "status": "Success", + "duration_ms": 500, + "checks": [{ + "id": 1, + "function": "example_function", + "status": "Success", + "description": "assertion failed: x > 0", + "location": { + "file": "src/lib.rs", + "line": "20", + "column": "13" + }, + "category": "assertion" + }] + }] + } +} +``` + +**5. Error Details** - Top-level error classification +```json +{ + "error_details": { + "has_errors": false, + "error_type": null, + "failed_properties_type": null, + "exit_status": "success" + } +} +``` + +**6. Property Details** - Property statistics +```json +{ + "property_details": [{ + "property_details": { + "total_properties": 1, + "passed": 1, + "failed": 0, + "unreachable": 0 + } + }] +} +``` + +**7. CBMC Statistics** - Backend performance metrics +```json +{ + "cbmc": [{ + "harness_id": "example_harness", + "cbmc_metadata": { + "version": "6.7.1", + "os_info": "x86_64 linux unix" + }, + "configuration": { + "object_bits": 16, + "solver": "Cadical" + }, + "cbmc_stats": { + "runtime_symex_s": 0.005, + "runtime_solver_s": 0.0003, + "vccs_generated": 1, + "vccs_remaining": 1 + } + }] +} +``` + +**8. Coverage** - Coverage configuration +```json +{ + "coverage": { + "enabled": false + } +} +``` + +### Design Notes + +- **Harness correlation**: Data is keyed by `harness_id` across blocks (`verification_results.results[]`, `cbmc[]`) for easy filtering +- **Optional fields**: `error_details` only populates `error_type`, `failed_properties_type`, and `exit_status` on failure +- **Complete state**: Captures all verification data including CBMC performance metrics for analysis + +### Error Handling + +- File write errors produce clear error messages with non-zero exit codes +- Schema validation (during regression testing) catches missing/malformed fields + +## Software Design + +The implementation touches several components in the Kani driver. We'll describe each component and how they work together to produce the JSON export. + +### Core Implementation + +The main change is in `kani-driver`, where we add a new frontend module `frontend/schema_utils.rs` that handles all JSON serialization. This module defines a `JsonHandler` struct that collects all the verification data (harness metadata, verification results, CBMC statistics) and serializes it to JSON using standard Rust serialization. + +To support this, we enhance `call_cbmc.rs` to extract CBMC performance statistics from CBMC's output. Since CBMC prints timing and statistics information in a structured format, we can parse this using regular expressions to extract values like symbolic execution time, solver time, and VCC counts. + +The driver's main entry point (`main.rs`) is modified to accept the `--export-json ` flag. When this flag is present, after verification completes successfully (or fails), we trigger the JSON serialization and write the output to the specified file. File I/O errors are reported clearly to the user with appropriate error messages. + +### Schema Validation and Testing + +Rather than hardcoding expected JSON fields in our tests, we take a template-based approach. The schema file itself (`kani_json_schema.json`) serves as both documentation and validation template. A Python validation script (`scripts/validate_json_export.py`) reads this schema file and recursively validates that any JSON export matches the expected structure. + +The validation approach treats all fields as required by default. However, some fields only make sense in certain contexts—for example, `error_details` contains detailed error information only when verification fails. To handle this, the schema supports `_optional` arrays that list fields which may be absent: + +```json +{ + "error_details": { + "_optional": ["error_type", "failed_properties_type", "exit_status"], + "has_errors": false, + "error_type": null + } +} +``` + +When validation encounters a field listed in `_optional`, it validates the field only if present in the actual data. This keeps the schema flexible while ensuring we catch missing required fields. Metadata fields (those starting with underscore) are excluded from validation entirely. + +The validator supports a `--field-path` option for validating specific sections of the JSON (e.g., the `cbmc` block). + +### Test Suite Organization + +We add a new test suite under `tests/json-handler/` with four test scenarios that cover different aspects of JSON export: + +The `basic-export/` test verifies that the JSON export flag works and produces valid JSON with the expected top-level structure. The `schema-validation/` test is more comprehensive—it runs a verification with multiple harnesses and validates the entire JSON structure against the schema template. This directory also houses the canonical `kani_json_schema.json` file, making it easy to find and update. + +The `multiple-harnesses/` test specifically checks that results from multiple harnesses are correctly aggregated and that the `harness_id` correlation works across different blocks. Finally, `failed-verification/` tests that error information is correctly captured and that optional error fields are populated on failure. + +### Implementation Flow + +When a user runs `cargo kani --export-json output.json`, the following happens: + +1. Kani driver parses the command-line arguments and enables JSON export mode +2. During verification, CBMC is invoked for each harness as usual +3. For each harness, we capture and parse CBMC's output to extract statistics +4. After all harnesses complete, we aggregate the results into the `VerificationOutput` struct +5. We serialize this struct to JSON format +6. The JSON is written to the specified file, with any I/O errors reported to the user + +The schema structure organizes information by category (metadata, project info, harness details, results, CBMC stats) with `harness_id` serving as the correlation key across blocks. This organization makes it easy for external tools to either process everything or filter by specific harnesses. + +### Corner Cases + +Several edge cases need consideration: + +**CBMC output parsing**: We extract CBMC statistics using regex patterns. If CBMC's output format changes in future versions, parsing may fail. To handle this gracefully, we can omit CBMC statistics in the json schema. + +**Large output**: For projects with hundreds of harnesses, JSON files can grow to multi-megabyte sizes. While this is acceptable for current use cases, we may need to consider streaming serialization or compression if users report performance issues. + +**Partial verification runs**: If verification is interrupted (user cancellation, system crash), the JSON file may be incomplete or missing entirely. Since JSON is written only after verification completes, interrupted runs produce no output rather than partial/corrupt JSON. + +**Schema evolution**: The schema file and the `VerificationOutput` struct must stay synchronized. During development, if we add fields to the struct but forget to update the schema template, our tests will catch this mismatch and fail. This is by design—the tests serve as a contract enforcement mechanism. + +## Rationale and alternatives + +### Why JSON? + +JSON provides universal language support, human readability for debugging, and established tooling without the complexity of binary formats or custom parsers. + +### Template-based validation + +The schema file (`kani_json_schema.json`) serves as both documentation and validation template. Validation logic reads the schema dynamically rather than hardcoding expected fields. + +**Advantage**: Single source of truth; schema changes don't require code changes; prevents drift between documentation and implementation. + +**Disadvantage**: More complex validation implementation than hardcoded checks. + +## Open questions + +- How should we handle breaking schema changes? +- Should users be able to filter which sections are exported? +- For very large projects with hundreds of harnesses, should we support incremental output (e.g., JSONL format)? + +## Future possibilities + +**Formal JSON Schema**: Generate a formal JSON Schema (Draft 7+) document from our template for automatic client code generation and integration with third-party validators. + +**Streaming export**: For projects with hundreds of harnesses, a streaming approach (JSONL format with one harness per line) could reduce memory pressure. Wait for user performance reports before implementing. + +**Database integration**: With structured JSON output, users could build tools to store verification results in databases (PostgreSQL) for historical analysis and tracking performance trends. The harness-based organization with `harness_id` keys naturally maps to relational schemas with foreign key relationships. diff --git a/scripts/ci/copyright-exclude b/scripts/ci/copyright-exclude index 157d88fbc379..09665d2690d9 100644 --- a/scripts/ci/copyright-exclude +++ b/scripts/ci/copyright-exclude @@ -1,6 +1,7 @@ .clang-format .diff .ico +.json .md .png .props diff --git a/scripts/kani-regression.sh b/scripts/kani-regression.sh index 5c7767ded3ce..968336865edb 100755 --- a/scripts/kani-regression.sh +++ b/scripts/kani-regression.sh @@ -60,6 +60,7 @@ TESTS=( "cargo-coverage cargo-coverage" "kani-docs cargo-kani" "kani-fixme kani-fixme" + "json-handler exec" ) # Build compiletest and print configuration. We pick suite / mode combo so there's no test. diff --git a/scripts/validate_json_export.py b/scripts/validate_json_export.py new file mode 100755 index 000000000000..e87634c2e8bf --- /dev/null +++ b/scripts/validate_json_export.py @@ -0,0 +1,223 @@ +#!/usr/bin/env python3 +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +# +""" +JSON Export Validation Script for Kani Integration Tests + +Validates JSON exports against the kani_json_schema.json template. +""" + +import json +import sys +import os +from pathlib import Path + + +def load_schema_template(): + """Load the JSON schema template""" + # Find schema template in tests/json-handler/schema-validation directory + script_dir = Path(__file__).parent + schema_path = ( + script_dir.parent + / "tests" + / "json-handler" + / "schema-validation" + / "kani_json_schema.json" + ) + + if not schema_path.exists(): + print(f"ERROR: Schema template not found at {schema_path}") + return None + + with open(schema_path, "r") as f: + return json.load(f) + + +def validate_structure_recursive(data, schema, path=""): + """ + Recursively validate data structure against schema template. + + Args: + data: The JSON data to validate + schema: The schema template to validate against + path: Current path in the structure (for error messages) + + Returns: + (success: bool, errors: list) + """ + errors = [] + + # Handle dict validation + if isinstance(schema, dict) and isinstance(data, dict): + # Get optional fields list (fields that may or may not be present) + optional_fields = schema.get("_optional", []) + + # All fields in schema are required except metadata fields (starting with _) and optional fields + schema_keys = [k for k in schema.keys() if not k.startswith("_")] + + # Check each key in schema + for key in schema_keys: + # Check if field exists in data + if key not in data: + # Only report error if field is required (not in optional list) + if key not in optional_fields: + current_path = f"{path}.{key}" if path else key + errors.append(f"Missing required field: {current_path}") + continue + + # Recursively validate nested structure + current_path = f"{path}.{key}" if path else key + sub_errors = validate_structure_recursive( + data[key], schema[key], current_path + )[1] + errors.extend(sub_errors) + + # Handle array validation + elif isinstance(schema, list) and len(schema) > 0: + if not isinstance(data, list): + errors.append(f"Expected array at {path}, got {type(data).__name__}") + elif len(data) > 0: + # Validate first item against schema template + sub_errors = validate_structure_recursive(data[0], schema[0], f"{path}[0]")[ + 1 + ] + errors.extend(sub_errors) + + # Leaf values - no validation needed + + success = len(errors) == 0 + return success, errors + + +def validate_json_structure(json_file, schema=None): + """ + Validate that JSON export matches the schema template structure. + """ + try: + with open(json_file, "r") as f: + data = json.load(f) + except FileNotFoundError: + print(f"ERROR: JSON file {json_file} not found") + return False + except json.JSONDecodeError as e: + print(f"ERROR: Invalid JSON in {json_file}: {e}") + return False + + # Load schema if not provided + if schema is None: + schema = load_schema_template() + if schema is None: + return False + + # All schema fields are required - validate structure recursively + # The recursive validator will catch any missing required fields + success, all_errors = validate_structure_recursive(data, schema, "") + + if not success or all_errors: + print(f"ERROR: Validation failed for {json_file}:") + for error in all_errors: + print(f" - {error}") + return False + + print(f"JSON structure validation passed for {json_file}") + return True + + +def validate_field_path(json_file, field_path, schema=None): + """ + Validate specific fields at a given path. + + Args: + json_file: Path to JSON file + field_path: Dot-separated path (e.g., 'metadata', 'verification_results.summary') + schema: Optional pre-loaded schema + """ + try: + with open(json_file, "r") as f: + data = json.load(f) + except Exception as e: + print(f"ERROR: Failed to load {json_file}: {e}") + return False + + # Load schema if not provided + if schema is None: + schema = load_schema_template() + if schema is None: + return False + + # Navigate to the field in both data and schema + parts = field_path.split(".") + current_data = data + current_schema = schema + + for part in parts: + if part not in current_data: + print(f"ERROR: Field path '{field_path}' not found in data") + return False + current_data = current_data[part] + + if part not in current_schema: + print(f"ERROR: Field path '{field_path}' not found in schema template") + return False + current_schema = current_schema[part] + + # Handle arrays - check first item + if isinstance(current_schema, list) and len(current_schema) > 0: + current_schema = current_schema[0] + if isinstance(current_data, list) and len(current_data) > 0: + current_data = current_data[0] + + # Validate structure at this path + success, errors = validate_structure_recursive( + current_data, current_schema, field_path + ) + + if not success: + print(f"ERROR: Validation failed for {field_path}:") + for error in errors: + print(f" - {error}") + return False + + print(f"Field validation passed for {field_path}") + return True + + +def main(): + if len(sys.argv) < 2: + print( + "Usage: python3 validate_json_export.py [--field-path ]" + ) + sys.exit(1) + + json_file = sys.argv[1] + + # Check if specific field validation requested + if len(sys.argv) > 2 and sys.argv[2] == "--field-path": + if len(sys.argv) < 4: + print("ERROR: --field-path requires a path argument") + sys.exit(1) + + field_path = sys.argv[3] + if validate_field_path(json_file, field_path): + sys.exit(0) + else: + sys.exit(1) + + # Load schema once + schema = load_schema_template() + if schema is None: + print("ERROR: Could not load schema template") + sys.exit(1) + + # Run full validation + if validate_json_structure(json_file, schema): + print(f"\nAll validations passed for {json_file}") + sys.exit(0) + else: + print(f"\nValidation failed for {json_file}") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/tests/.gitignore b/tests/.gitignore index 814a069a9424..68879fed17a0 100644 --- a/tests/.gitignore +++ b/tests/.gitignore @@ -1,5 +1,6 @@ # Temporary files and folders *.json +!json-handler/schema-validation/*.json kani_concrete_playback rmet*/ target/ diff --git a/tests/json-handler/basic-export/config.yml b/tests/json-handler/basic-export/config.yml new file mode 100644 index 000000000000..938834ea42cb --- /dev/null +++ b/tests/json-handler/basic-export/config.yml @@ -0,0 +1,4 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +script: test.sh + diff --git a/tests/json-handler/basic-export/test.rs b/tests/json-handler/basic-export/test.rs new file mode 100644 index 000000000000..1b43dc7373f2 --- /dev/null +++ b/tests/json-handler/basic-export/test.rs @@ -0,0 +1,22 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +// kani-flags: --export-json basic_test_output.json + +//! Basic test for JSON export functionality +//! This verifies that the frontend module can export verification results + +fn add_numbers(a: u32, b: u32) -> u32 { + a + b +} + +#[kani::proof] +fn verify_add_numbers() { + let x: u32 = kani::any(); + let y: u32 = kani::any(); + kani::assume(x < 100); + kani::assume(y < 100); + let result = add_numbers(x, y); + assert!(result >= x); + assert!(result >= y); +} diff --git a/tests/json-handler/basic-export/test.sh b/tests/json-handler/basic-export/test.sh new file mode 100755 index 000000000000..fa237cabf55d --- /dev/null +++ b/tests/json-handler/basic-export/test.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT + +# Test JSON export basic functionality - validates schema_utils.rs functions + +set -eu + +OUTPUT_FILE="test_output.json" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" +VALIDATOR="$PROJECT_ROOT/scripts/validate_json_export.py" + +# Run Kani with JSON export +kani test.rs --export-json "$OUTPUT_FILE" + +# Check that JSON file was created +if [ ! -f "$OUTPUT_FILE" ]; then + echo "ERROR: JSON file $OUTPUT_FILE was not created" + exit 1 +fi + +# Validate JSON structure using the validation script (suppress verbose output) +python3 "$VALIDATOR" "$OUTPUT_FILE" 2>&1 | tail -1 + +# Clean up +rm -f "$OUTPUT_FILE" + diff --git a/tests/json-handler/failed-verification/config.yml b/tests/json-handler/failed-verification/config.yml new file mode 100644 index 000000000000..938834ea42cb --- /dev/null +++ b/tests/json-handler/failed-verification/config.yml @@ -0,0 +1,4 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +script: test.sh + diff --git a/tests/json-handler/failed-verification/test.rs b/tests/json-handler/failed-verification/test.rs new file mode 100644 index 000000000000..d5c459c09a96 --- /dev/null +++ b/tests/json-handler/failed-verification/test.rs @@ -0,0 +1,20 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +// kani-flags: --export-json failed_output.json + +//! Test JSON export with failed verification +//! Verifies that frontend correctly captures failure information in JSON + +fn unsafe_subtract(a: u32, b: u32) -> u32 { + a - b // This will fail with overflow +} + +#[kani::proof] +fn verify_unsafe_subtract() { + let x: u32 = kani::any(); + let y: u32 = kani::any(); + // No assumptions - will trigger overflow + let result = unsafe_subtract(x, y); + assert!(result <= x); // This will also fail when y > x +} diff --git a/tests/json-handler/failed-verification/test.sh b/tests/json-handler/failed-verification/test.sh new file mode 100755 index 000000000000..e4477d5dfde9 --- /dev/null +++ b/tests/json-handler/failed-verification/test.sh @@ -0,0 +1,97 @@ +#!/usr/bin/env bash +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT + +# Test JSON export with failed verification - validates error capture + +set -eu + +OUTPUT_FILE="failed_output.json" + +# Find the project root (where scripts/ directory is) +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" + +# Run Kani with JSON export (expect failure, so don't use -e) +set +e +kani test.rs --export-json "$OUTPUT_FILE" +EXIT_CODE=$? +set -e + +# Kani should exit with failure +if [ $EXIT_CODE -eq 0 ]; then + echo "ERROR: Expected Kani to fail but it succeeded" + exit 1 +fi + +echo "Kani failed as expected" + +# Check that JSON file was created despite failure +if [ ! -f "$OUTPUT_FILE" ]; then + echo "ERROR: JSON file $OUTPUT_FILE was not created" + exit 1 +fi + +echo "JSON file created despite failure" + +# Validate that JSON contains failure information +python3 << 'EOF' +import json +import sys + +with open('failed_output.json', 'r') as f: + data = json.load(f) + +# Check verification_results shows failure +vr = data['verification_results'] +summary = vr['summary'] + +if summary['successful'] != 0: + print(f"ERROR: Expected 0 successful, got {summary['successful']}") + sys.exit(1) + +if summary['failed'] != 1: + print(f"ERROR: Expected 1 failed, got {summary['failed']}") + sys.exit(1) + +print("Summary shows correct failure count") + +# Check that results array contains failure status +results = vr['results'] +if len(results) != 1: + print(f"ERROR: Expected 1 result, got {len(results)}") + sys.exit(1) + +if results[0]['status'] != 'Failure': + print(f"ERROR: Expected status 'Failure', got {results[0]['status']}") + sys.exit(1) + +print("Result status is 'Failure'") + +# Check that error_details exists and has_errors is true +if 'error_details' not in data: + print("ERROR: error_details field missing") + sys.exit(1) + +# error_details is an object (single harness) +error_details = data['error_details'] +if not error_details.get('has_errors'): + print("ERROR: has_errors should be true") + sys.exit(1) + +print("error_details.has_errors is true") + +# Verify error_type is present +if 'error_type' not in error_details: + print("ERROR: error_type field missing") + sys.exit(1) + +print("error_type field present") + +EOF + +echo "All failure validation checks passed!" + +# Clean up +rm -f "$OUTPUT_FILE" + diff --git a/tests/json-handler/multiple-harnesses/config.yml b/tests/json-handler/multiple-harnesses/config.yml new file mode 100644 index 000000000000..938834ea42cb --- /dev/null +++ b/tests/json-handler/multiple-harnesses/config.yml @@ -0,0 +1,4 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +script: test.sh + diff --git a/tests/json-handler/multiple-harnesses/test.rs b/tests/json-handler/multiple-harnesses/test.rs new file mode 100644 index 000000000000..f13ac2ae9360 --- /dev/null +++ b/tests/json-handler/multiple-harnesses/test.rs @@ -0,0 +1,42 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +// kani-flags: --export-json multi_harness_output.json + +//! Test JSON export with multiple harnesses +//! Verifies that frontend correctly handles multiple verification harnesses + +fn multiply(a: i32, b: i32) -> i32 { + a * b +} + +fn divide(a: i32, b: i32) -> i32 { + a / b +} + +#[kani::proof] +fn verify_multiply_positive() { + let x: i32 = kani::any(); + let y: i32 = kani::any(); + kani::assume(x > 0 && x < 10); + kani::assume(y > 0 && y < 10); + let result = multiply(x, y); + assert!(result > 0); +} + +#[kani::proof] +fn verify_multiply_zero() { + let x: i32 = kani::any(); + let result = multiply(x, 0); + assert_eq!(result, 0); +} + +#[kani::proof] +fn verify_divide_nonzero() { + let x: i32 = kani::any(); + let y: i32 = kani::any(); + kani::assume(x >= 0 && x < 100); + kani::assume(y > 0 && y < 100); + let result = divide(x, y); + assert!(result <= x); +} diff --git a/tests/json-handler/multiple-harnesses/test.sh b/tests/json-handler/multiple-harnesses/test.sh new file mode 100755 index 000000000000..83c3e4add71f --- /dev/null +++ b/tests/json-handler/multiple-harnesses/test.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT + +# Test JSON export with multiple harnesses - validates aggregation logic + +set -eu + +OUTPUT_FILE="multi_harness_output.json" + +# Find the project root +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" +VALIDATOR="$PROJECT_ROOT/scripts/validate_json_export.py" + +# Run Kani with JSON export +kani test.rs --export-json "$OUTPUT_FILE" + +# Check that JSON file was created +if [ ! -f "$OUTPUT_FILE" ]; then + echo "ERROR: JSON file $OUTPUT_FILE was not created" + exit 1 +fi + +# Validate JSON structure (suppress verbose output) +python3 "$VALIDATOR" "$OUTPUT_FILE" 2>&1 | tail -1 + +# Check that JSON contains all 3 harnesses +HARNESS_COUNT=$(python3 -c "import json; data=json.load(open('$OUTPUT_FILE')); print(len(data['harness_metadata']))") + +if [ "$HARNESS_COUNT" != "3" ]; then + echo "ERROR: Expected 3 harnesses in JSON, found $HARNESS_COUNT" + exit 1 +fi + +echo "Found 3 harnesses in JSON" + +# Clean up +rm -f "$OUTPUT_FILE" + diff --git a/tests/json-handler/schema-validation/config.yml b/tests/json-handler/schema-validation/config.yml new file mode 100644 index 000000000000..938834ea42cb --- /dev/null +++ b/tests/json-handler/schema-validation/config.yml @@ -0,0 +1,4 @@ +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT +script: test.sh + diff --git a/tests/json-handler/schema-validation/kani_json_schema.json b/tests/json-handler/schema-validation/kani_json_schema.json new file mode 100644 index 000000000000..a683050454c7 --- /dev/null +++ b/tests/json-handler/schema-validation/kani_json_schema.json @@ -0,0 +1,119 @@ +{ + "_comment": "All fields are required unless listed in _optional. Metadata fields (starting with _) are not validated.", + "metadata": { + "version": "1.0", + "timestamp": "2025-10-30T12:00:00.000000Z", + "kani_version": "0.65.0", + "target": "x86_64-unknown-linux-gnu", + "build_mode": "debug" + }, + "project": { + "crate_name": [ + "example_crate" + ], + "workspace_root": "/path/to/workspace" + }, + "harness_metadata": [ + { + "pretty_name": "example_harness", + "mangled_name": "_RNvCs_example_mangled_name", + "crate_name": "example_crate", + "source": { + "file": "src/lib.rs", + "start_line": 10, + "end_line": 15 + }, + "goto_file": "/path/to/goto/file.out", + "attributes": { + "kind": "Proof", + "should_panic": false + }, + "contract": { + "contracted_function_name": null, + "recursion_tracker": null + }, + "has_loop_contracts": false, + "is_automatically_generated": false + } + ], + "verification_results": { + "summary": { + "total_harnesses": 1, + "executed": 1, + "status": "completed", + "successful": 1, + "failed": 0, + "duration_ms": 500 + }, + "results": [ + { + "harness_id": "example_harness", + "status": "Success", + "duration_ms": 500, + "checks": [ + { + "id": 1, + "function": "example_function", + "status": "Success", + "description": "assertion failed: x > 0", + "location": { + "file": "src/lib.rs", + "line": "20", + "column": "13" + }, + "category": "assertion" + } + ] + } + ] + }, + "error_details": { + "_optional": [ + "error_type", + "failed_properties_type", + "exit_status" + ], + "has_errors": false, + "error_type": null, + "failed_properties_type": null, + "exit_status": "success" + }, + "property_details": [ + { + "property_details": { + "total_properties": 1, + "passed": 1, + "failed": 0, + "unreachable": 0 + } + } + ], + "cbmc": [ + { + "harness_id": "example_harness", + "cbmc_metadata": { + "version": "6.7.1", + "os_info": "x86_64 linux unix" + }, + "configuration": { + "object_bits": 16, + "solver": "Cadical" + }, + "cbmc_stats": { + "runtime_symex_s": 0.005, + "size_program_expression": 150, + "slicing_removed_assignments": 80, + "vccs_generated": 1, + "vccs_remaining": 1, + "runtime_postprocess_equation_s": 0.00002, + "runtime_convert_ssa_s": 0.002, + "runtime_post_process_s": 0.000005, + "runtime_solver_s": 0.0003, + "runtime_decision_procedure_s": 0.003 + } + } + ], + "coverage": { + "enabled": false + } +} \ No newline at end of file diff --git a/tests/json-handler/schema-validation/test.rs b/tests/json-handler/schema-validation/test.rs new file mode 100644 index 000000000000..df2bea6da730 --- /dev/null +++ b/tests/json-handler/schema-validation/test.rs @@ -0,0 +1,28 @@ +// Copyright Kani Contributors +// SPDX-License-Identifier: Apache-2.0 OR MIT + +// kani-flags: --export-json schema_validation_output.json + +//! Test JSON schema structure validation +//! Ensures frontend exports well-formed JSON with correct metadata + +fn calculate(x: u32, y: u32) -> u32 { + if x > y { x - y } else { y - x } +} + +#[kani::proof] +fn verify_calculate() { + let a: u32 = kani::any(); + let b: u32 = kani::any(); + kani::assume(a < 1000); + kani::assume(b < 1000); + let result = calculate(a, b); + assert!(result < 1000); +} + +#[kani::proof] +fn verify_calculate_same() { + let x: u32 = kani::any(); + let result = calculate(x, x); + assert_eq!(result, 0); +} diff --git a/tests/json-handler/schema-validation/test.sh b/tests/json-handler/schema-validation/test.sh new file mode 100755 index 000000000000..479cba03e0e7 --- /dev/null +++ b/tests/json-handler/schema-validation/test.sh @@ -0,0 +1,77 @@ +#!/usr/bin/env bash +# Copyright Kani Contributors +# SPDX-License-Identifier: Apache-2.0 OR MIT + +# Test JSON schema structure from schema_utils.rs functions +# This test uses the schema template to validate the JSON output + +set -eu + +OUTPUT_FILE="schema_output.json" + +# Find the project root (where scripts/ directory is) +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_ROOT="$(cd "$SCRIPT_DIR/../../.." && pwd)" +VALIDATOR="$PROJECT_ROOT/scripts/validate_json_export.py" + +# Run Kani with JSON export +kani test.rs --export-json "$OUTPUT_FILE" + +# Check that JSON file was created +if [ ! -f "$OUTPUT_FILE" ]; then + echo "ERROR: JSON file $OUTPUT_FILE was not created" + exit 1 +fi + +echo "JSON file created" + +# Validate basic structure using schema template +python3 "$VALIDATOR" "$OUTPUT_FILE" + +# Validate each field from schema template dynamically +echo "" +echo "Validating individual fields from schema:" + +# Get all top-level keys from schema and validate each that exists in JSON +SCHEMA_FILE="$(dirname "$0")/kani_json_schema.json" +python3 << EOF +import json +import subprocess +import sys + +with open('$SCHEMA_FILE', 'r') as f: + schema = json.load(f) + +with open('$OUTPUT_FILE', 'r') as f: + data = json.load(f) + +all_passed = True +for key in schema.keys(): + if key not in data: + continue + + result = subprocess.run( + ['python3', '$VALIDATOR', '$OUTPUT_FILE', '--field-path', key], + capture_output=True + ) + if result.returncode == 0: + print(f" {key}: PASSED") + else: + print(f" {key}: FAILED") + all_passed = False + +if not all_passed: + sys.exit(1) +EOF + +if [ $? -ne 0 ]; then + echo "Field validation failed" + exit 1 +fi + +# Clean up +rm -f "$OUTPUT_FILE" + +echo "" +echo "All validations passed" +