diff --git a/Cargo.lock b/Cargo.lock index 8f8f79596d96..7c350946416b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1477,6 +1477,17 @@ dependencies = [ "memchr", ] +[[package]] +name = "comfy-table" +version = "7.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b03b7db8e0b4b2fdad6c551e634134e99ec000e5c8c3b6856c65e8bbaded7a3b" +dependencies = [ + "crossterm", + "unicode-segmentation", + "unicode-width 0.2.0", +] + [[package]] name = "concurrent-queue" version = "2.5.0" @@ -1774,6 +1785,29 @@ version = "0.8.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" +[[package]] +name = "crossterm" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8b9f2e4c67f833b660cdb0a3523065869fb35570177239812ed4c905aeff87b" +dependencies = [ + "bitflags 2.9.0", + "crossterm_winapi", + "document-features", + "parking_lot", + "rustix 1.0.7", + "winapi", +] + +[[package]] +name = "crossterm_winapi" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acdd7c62a3665c7f6830a51635d9ac9b23ed385797f70a83bb8bafe9c572ab2b" +dependencies = [ + "winapi", +] + [[package]] name = "crunchy" version = "0.2.3" @@ -2726,6 +2760,8 @@ dependencies = [ "chrono", "clap", "cliclack", + "colored", + "comfy-table", "console", "dotenvy", "etcetera", @@ -2734,6 +2770,7 @@ dependencies = [ "goose-bench", "goose-mcp", "http 1.2.0", + "humansize", "indicatif", "is-terminal", "jsonschema", @@ -2846,6 +2883,7 @@ dependencies = [ "goose", "goose-mcp", "http 1.2.0", + "once_cell", "reqwest 0.12.12", "rmcp", "schemars", @@ -3128,6 +3166,15 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +[[package]] +name = "humansize" +version = "2.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6cb51c9a029ddc91b07a787f1d86b53ccfa49b0e86688c946ebe8d3555685dd7" +dependencies = [ + "libm", +] + [[package]] name = "hyper" version = "0.14.32" diff --git a/crates/goose-cli/Cargo.toml b/crates/goose-cli/Cargo.toml index 83ebb8644a92..fd8550180b34 100644 --- a/crates/goose-cli/Cargo.toml +++ b/crates/goose-cli/Cargo.toml @@ -39,6 +39,9 @@ rand = "0.8.5" rustyline = "15.0.0" tracing = "0.1" chrono = "0.4" +colored = "2.2" +comfy-table = "7.1" +humansize = "2.1" tracing-subscriber = { version = "0.3", features = ["env-filter", "fmt", "json", "time"] } tracing-appender = "0.2" once_cell = "1.20.2" diff --git a/crates/goose-cli/src/cli.rs b/crates/goose-cli/src/cli.rs index efaad9e47f91..85d83435cce1 100644 --- a/crates/goose-cli/src/cli.rs +++ b/crates/goose-cli/src/cli.rs @@ -293,6 +293,63 @@ enum SchedulerCommand { CronHelp {}, } +#[derive(Subcommand)] +enum DbCommand { + #[command(about = "Show database status and statistics")] + Status, + + #[command(about = "Create a manual database backup")] + Backup { + #[arg( + short, + long, + help = "Custom backup name (saved in default backup directory)" + )] + name: Option, + }, + + #[command(about = "Restore database from a backup file")] + Restore { + #[arg(help = "Backup filename (e.g., backup_YYYYMMDD_HHMMSS.db) or full path")] + backup_file: PathBuf, + + #[arg(short, long, help = "Skip confirmation prompt")] + force: bool, + }, + + #[command(about = "Show database file path")] + Path, + + #[command(about = "List all available database backups")] + ListBackups { + #[arg( + short, + long, + default_value = "table", + help = "Output format (table/json)" + )] + format: String, + }, + + #[command(about = "Delete database backup files")] + DeleteBackup { + #[arg( + help = "Backup filename(s) (e.g., backup_YYYYMMDD_HHMMSS.db) or full path(s)", + conflicts_with = "all" + )] + backup_files: Vec, + + #[arg(long, help = "Delete all backups", conflicts_with = "backup_files")] + all: bool, + + #[arg(long, help = "Also clean up orphaned WAL/SHM files")] + cleanup: bool, + + #[arg(short, long, help = "Skip confirmation prompt")] + force: bool, + }, +} + #[derive(Subcommand)] pub enum BenchCommand { #[command(name = "init-config", about = "Create a new starter-config")] @@ -403,6 +460,13 @@ enum Command { #[command(about = "Configure goose settings")] Configure {}, + /// Database management commands + #[command(about = "Database management commands")] + Db { + #[command(subcommand)] + command: DbCommand, + }, + /// Display goose configuration information #[command(about = "Display goose information")] Info { @@ -848,6 +912,7 @@ pub async fn cli() -> anyhow::Result<()> { let command_name = match &cli.command { Some(Command::Configure {}) => "configure", + Some(Command::Db { .. }) => "db", Some(Command::Info { .. }) => "info", Some(Command::Mcp { .. }) => "mcp", Some(Command::Acp {}) => "acp", @@ -873,6 +938,29 @@ pub async fn cli() -> anyhow::Result<()> { Some(Command::Configure {}) => { handle_configure().await?; } + Some(Command::Db { command }) => { + match command { + DbCommand::Status => crate::commands::db::handle_db_status().await?, + DbCommand::Backup { name } => crate::commands::db::handle_db_backup(name).await?, + DbCommand::Restore { backup_file, force } => { + crate::commands::db::handle_db_restore(backup_file, force).await? + } + DbCommand::Path => crate::commands::db::handle_db_path().await?, + DbCommand::ListBackups { format } => { + crate::commands::db::handle_db_list_backups(format).await? + } + DbCommand::DeleteBackup { + backup_files, + all, + cleanup, + force, + } => { + crate::commands::db::handle_db_delete_backup(backup_files, all, cleanup, force) + .await? + } + } + return Ok(()); + } Some(Command::Info { verbose }) => { handle_info(verbose)?; } diff --git a/crates/goose-cli/src/commands/db.rs b/crates/goose-cli/src/commands/db.rs new file mode 100644 index 000000000000..394c779b3aa7 --- /dev/null +++ b/crates/goose-cli/src/commands/db.rs @@ -0,0 +1,478 @@ +use anyhow::Result; +use chrono::{DateTime, Utc}; +use cliclack::confirm; +use colored::Colorize; +use comfy_table::{presets::UTF8_FULL, Cell, CellAlignment, ContentArrangement, Table}; +use goose::config::paths::Paths; +use goose::session::session_manager::SessionManager; +use humansize::{format_size, BINARY}; +use std::path::PathBuf; + +fn format_age(created_at: DateTime) -> String { + let age = chrono::Utc::now() - created_at; + if age.num_days() > 0 { + format!("{} days ago", age.num_days()) + } else if age.num_hours() > 0 { + format!("{} hours ago", age.num_hours()) + } else if age.num_minutes() > 0 { + format!("{} mins ago", age.num_minutes()) + } else { + "just now".to_string() + } +} + +pub async fn handle_db_status() -> Result<()> { + let stats = SessionManager::get_database_stats().await?; + + println!("\n{}", "Goose Database Status".bold().cyan()); + + let mut db_info_table = Table::new(); + db_info_table + .load_preset(UTF8_FULL) + .set_content_arrangement(ContentArrangement::Dynamic); + + db_info_table.add_row(vec!["Path", &stats.db_path.display().to_string()]); + db_info_table.add_row(vec!["Size", &format_size(stats.db_size, BINARY)]); + db_info_table.add_row(vec![ + "Schema Version", + &if stats.is_latest_version { + format!("{} (up to date ✓)", stats.schema_version) + } else { + format!("{} (update available)", stats.schema_version) + }, + ]); + db_info_table.add_row(vec![ + "Backup Directory", + &stats.backup_dir.display().to_string(), + ]); + + let mut stats_table = Table::new(); + stats_table + .load_preset(UTF8_FULL) + .set_content_arrangement(ContentArrangement::Dynamic); + + stats_table.add_row(vec!["Sessions", &stats.session_count.to_string()]); + stats_table.add_row(vec!["Messages", &stats.message_count.to_string()]); + if stats.total_tokens > 0 { + stats_table.add_row(vec!["Total Tokens", &stats.total_tokens.to_string()]); + if stats.session_count > 0 { + stats_table.add_row(vec![ + "Avg Tokens/Session", + &(stats.total_tokens / stats.session_count as i64).to_string(), + ]); + } + } + + println!("\n{}", "Database Information".green().bold()); + println!("{}", db_info_table); + + println!("\n{}", "Statistics".green().bold()); + println!("{}", stats_table); + + if let Some(backup) = stats.latest_backup { + let mut backup_table = Table::new(); + backup_table + .load_preset(UTF8_FULL) + .set_content_arrangement(ContentArrangement::Dynamic); + + let filename = backup + .path + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or("unknown"); + let age_str = format_age(backup.created_at); + + backup_table.add_row(vec!["Latest Backup", filename]); + backup_table.add_row(vec!["Age", &age_str]); + backup_table.add_row(vec!["Size", &format_size(backup.size, BINARY)]); + backup_table.add_row(vec!["Total Backups", &stats.backup_count.to_string()]); + + println!("\n{}", "Backup Information".green().bold()); + println!("{}", backup_table); + } else { + let mut backup_table = Table::new(); + backup_table + .load_preset(UTF8_FULL) + .set_content_arrangement(ContentArrangement::Dynamic); + backup_table.add_row(vec!["Latest Backup", "None"]); + backup_table.add_row(vec!["Total Backups", &stats.backup_count.to_string()]); + + println!("\n{}", "Backup Information".green().bold()); + println!("{}", backup_table); + } + + println!(); + Ok(()) +} + +pub async fn handle_db_backup(name: Option) -> Result<()> { + println!("{}", "Creating database backup...".cyan()); + + let backup_path = SessionManager::create_backup(name).await?; + + let size = std::fs::metadata(&backup_path)?.len(); + + println!("\n{}", "✓ Backup created successfully".green().bold()); + println!(" Location: {}", backup_path.display()); + println!(" Size: {}", format_size(size, BINARY)); + println!(); + + Ok(()) +} + +pub async fn handle_db_restore(backup_file: PathBuf, force: bool) -> Result<()> { + let backup_path = if backup_file.components().count() == 1 { + Paths::backup_dir().join(&backup_file) + } else { + backup_file.clone() + }; + + if !backup_path.exists() { + if backup_file.components().count() == 1 { + anyhow::bail!( + "Backup file '{}' not found in backup directory.\nRun 'goose db list-backups' to see available backups.", + backup_file.display() + ); + } else { + anyhow::bail!("Backup file not found: {}", backup_path.display()); + } + } + + println!("{}", "Database Restore".bold().cyan()); + println!("{}", "─".repeat(50)); + println!("\n{}:", "Source".green()); + println!(" {}", backup_path.display()); + + let stats = SessionManager::get_database_stats().await?; + println!("\n{}:", "Current Database".yellow()); + println!(" Schema version: {}", stats.schema_version); + println!(" Sessions: {}", stats.session_count); + println!(" Messages: {}", stats.message_count); + + println!("\n{}", "⚠️ Warning:".yellow().bold()); + println!(" This will replace your current database with the backup."); + println!(" A safety backup will be created first."); + + if !force { + let should_restore = confirm("Do you want to proceed with the restore?") + .initial_value(false) + .interact()?; + + if !should_restore { + println!("\n{}", "Restore cancelled.".yellow()); + return Ok(()); + } + } + + println!("\n{}", "Restoring database...".cyan()); + + SessionManager::restore_backup(&backup_path).await?; + + println!("\n{}", "✓ Database restored successfully".green().bold()); + println!( + " {}", + "Important: Please restart goose for changes to take full effect." + .yellow() + .bold() + ); + println!(); + + Ok(()) +} + +pub async fn handle_db_path() -> Result<()> { + let session_dir = goose::session::session_manager::ensure_session_dir().await?; + let db_path = session_dir.join("sessions.db"); + println!("{}", db_path.display()); + Ok(()) +} + +pub async fn handle_db_list_backups(format: String) -> Result<()> { + let backups = SessionManager::list_backups().await?; + + if backups.is_empty() { + println!("{}", "No backups found.".yellow()); + return Ok(()); + } + + match format.as_str() { + "json" => { + println!("{}", serde_json::to_string_pretty(&backups)?); + } + "table" => { + let mut table = Table::new(); + table + .load_preset(UTF8_FULL) + .set_content_arrangement(ContentArrangement::Dynamic) + .set_header(vec![ + Cell::new("Filename").set_alignment(CellAlignment::Left), + Cell::new("Schema Version").set_alignment(CellAlignment::Right), + Cell::new("Size").set_alignment(CellAlignment::Right), + Cell::new("Age").set_alignment(CellAlignment::Right), + ]); + + for backup in &backups { + let filename = backup + .path + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or("unknown"); + + let version_str = backup + .schema_version + .map(|v| v.to_string()) + .unwrap_or_else(|| "?".to_string()); + + let size_str = format_size(backup.size, BINARY); + let age_str = format_age(backup.created_at); + + table.add_row(vec![ + Cell::new(filename).set_alignment(CellAlignment::Left), + Cell::new(version_str).set_alignment(CellAlignment::Right), + Cell::new(size_str).set_alignment(CellAlignment::Right), + Cell::new(age_str).set_alignment(CellAlignment::Right), + ]); + } + + println!("\n{}", "Database Backups".bold().cyan()); + println!("{}", table); + println!("{} backups found\n", backups.len()); + } + _ => { + println!("Invalid format: {}", format); + anyhow::bail!("Invalid format: {}", format); + } + } + + Ok(()) +} + +async fn resolve_backup_files_for_deletion( + backup_files: Vec, + all: bool, + cleanup_only: bool, +) -> Result> { + if all { + let backups = SessionManager::list_backups().await?; + if backups.is_empty() { + println!("{}", "No backups found to delete.".yellow()); + return Ok(vec![]); + } + return Ok(backups.into_iter().map(|b| b.path).collect()); + } + + if backup_files.is_empty() { + if cleanup_only { + return Ok(vec![]); + } + anyhow::bail!("No backup files specified. Use --all to delete all backups, or provide specific filenames."); + } + + let backup_dir = Paths::backup_dir(); + let canonical_backup_dir = backup_dir + .canonicalize() + .map_err(|e| anyhow::anyhow!("Failed to resolve backup directory: {}", e))?; + + backup_files + .into_iter() + .map(|file| { + let path = if file.components().count() == 1 { + backup_dir.join(&file) + } else { + file.clone() + }; + + if !path.exists() { + anyhow::bail!( + "Backup file '{}' not found{}", + file.display(), + if file.components().count() == 1 { + ". Run 'goose db list-backups' to see available backups." + } else { + "" + } + ); + } + + let canonical_path = path.canonicalize().map_err(|e| { + anyhow::anyhow!("Failed to resolve path '{}': {}", path.display(), e) + })?; + + if !canonical_path.starts_with(&canonical_backup_dir) { + anyhow::bail!( + "Security error: Path '{}' is outside the backup directory", + file.display() + ); + } + + Ok(path) + }) + .collect() +} + +fn report_deletion_results(success_count: usize, failed_files: &[(&PathBuf, std::io::Error)]) { + if success_count > 0 { + println!( + "\n{}", + format!( + "✓ Deleted {} backup{} successfully", + success_count, + if success_count == 1 { "" } else { "s" } + ) + .green() + .bold() + ); + } + + if !failed_files.is_empty() { + println!( + "\n{} Failed to delete {} backup{}:", + "⚠️".yellow(), + failed_files.len(), + if failed_files.len() == 1 { "" } else { "s" } + ); + for (path, err) in failed_files { + println!( + " {} - {}", + path.file_name() + .and_then(|n| n.to_str()) + .unwrap_or("unknown"), + err + ); + } + } +} + +async fn cleanup_orphaned_files(backup_dir: &std::path::Path) -> Result { + let mut orphaned_count = 0; + let mut entries = tokio::fs::read_dir(backup_dir).await?; + + while let Some(entry) = entries.next_entry().await? { + let path = entry.path(); + if let Some(ext) = path.extension() { + if ext == "db-wal" || ext == "db-shm" { + if let Some(stem) = path.file_stem().and_then(|s| s.to_str()) { + let db_path = backup_dir.join(format!("{}.db", stem)); + if !db_path.exists() && tokio::fs::remove_file(&path).await.is_ok() { + orphaned_count += 1; + } + } + } + } + } + + Ok(orphaned_count) +} + +pub async fn handle_db_delete_backup( + backup_files: Vec, + all: bool, + cleanup: bool, + force: bool, +) -> Result<()> { + let files_to_delete = resolve_backup_files_for_deletion(backup_files, all, cleanup).await?; + + if !files_to_delete.is_empty() { + let mut table = Table::new(); + table + .load_preset(UTF8_FULL) + .set_content_arrangement(ContentArrangement::Dynamic) + .set_header(vec![ + Cell::new("Filename").set_alignment(CellAlignment::Left), + Cell::new("Size").set_alignment(CellAlignment::Right), + Cell::new("Age").set_alignment(CellAlignment::Right), + ]); + + for path in &files_to_delete { + let metadata = std::fs::metadata(path)?; + let age_secs = std::time::SystemTime::now() + .duration_since(metadata.modified()?) + .unwrap_or_default() + .as_secs(); + + table.add_row(vec![ + Cell::new( + path.file_name() + .and_then(|n| n.to_str()) + .unwrap_or("unknown"), + ), + Cell::new(format_size(metadata.len(), BINARY)), + Cell::new(match age_secs { + s if s > 86400 => format!("{} days ago", s / 86400), + s if s > 3600 => format!("{} hours ago", s / 3600), + s if s > 60 => format!("{} mins ago", s / 60), + _ => "just now".to_string(), + }), + ]); + } + + println!("\n{}", "Backups to Delete".bold().yellow()); + println!("{}", table); + + if !force + && !confirm(format!( + "Do you want to delete {} backup{}?", + files_to_delete.len(), + if files_to_delete.len() == 1 { "" } else { "s" } + )) + .initial_value(false) + .interact()? + { + println!("\n{}", "Deletion cancelled.".yellow()); + return Ok(()); + } + + println!("\n{}", "Deleting backups...".cyan()); + } + + let (successes, failures): (Vec<_>, Vec<_>) = + futures::future::join_all(files_to_delete.iter().map(|path| async move { + let result = tokio::fs::remove_file(path) + .await + .map(|_| path) + .map_err(|e| (path, e)); + + if result.is_ok() { + if let Some(stem) = path.file_stem().and_then(|s| s.to_str()) { + let wal_path = path.with_file_name(format!("{}.db-wal", stem)); + let shm_path = path.with_file_name(format!("{}.db-shm", stem)); + let _ = tokio::fs::remove_file(wal_path).await; + let _ = tokio::fs::remove_file(shm_path).await; + } + } + + result + })) + .await + .into_iter() + .partition(Result::is_ok); + + let success_count = successes.len(); + let failed_files: Vec<_> = failures.into_iter().filter_map(Result::err).collect(); + + report_deletion_results(success_count, &failed_files); + + if cleanup { + println!("\n{}", "Cleaning up orphaned auxiliary files...".cyan()); + let backup_dir = Paths::backup_dir(); + let orphaned_count = cleanup_orphaned_files(&backup_dir).await?; + + if orphaned_count > 0 { + println!( + "{}", + format!( + "✓ Cleaned up {} orphaned auxiliary file{}", + orphaned_count, + if orphaned_count == 1 { "" } else { "s" } + ) + .green() + .bold() + ); + } else { + println!("{}", "No orphaned auxiliary files found".bright_black()); + } + } + + println!(); + Ok(()) +} diff --git a/crates/goose-cli/src/commands/mod.rs b/crates/goose-cli/src/commands/mod.rs index 698fd02fdfe4..4b165c5dc7c8 100644 --- a/crates/goose-cli/src/commands/mod.rs +++ b/crates/goose-cli/src/commands/mod.rs @@ -1,6 +1,7 @@ pub mod acp; pub mod bench; pub mod configure; +pub mod db; pub mod info; pub mod project; pub mod recipe; diff --git a/crates/goose-server/Cargo.toml b/crates/goose-server/Cargo.toml index 59d4f2041823..938d898bd1c1 100644 --- a/crates/goose-server/Cargo.toml +++ b/crates/goose-server/Cargo.toml @@ -39,6 +39,7 @@ reqwest = { version = "0.12.9", features = ["json", "rustls-tls", "blocking", "m tokio-util = "0.7.15" uuid = { version = "1.11", features = ["v4"] } serde_path_to_error = "0.1.20" +once_cell = "1.19" [[bin]] name = "goosed" diff --git a/crates/goose-server/src/openapi.rs b/crates/goose-server/src/openapi.rs index 4667b93135ac..134b1cca713d 100644 --- a/crates/goose-server/src/openapi.rs +++ b/crates/goose-server/src/openapi.rs @@ -326,6 +326,11 @@ derive_utoipa!(Icon as IconSchema); paths( super::routes::status::status, super::routes::status::diagnostics, + super::routes::database::database_status, + super::routes::database::create_backup, + super::routes::database::list_backups, + super::routes::database::delete_backups, + super::routes::database::restore_backup, super::routes::config_management::backup_config, super::routes::config_management::recover_config, super::routes::config_management::validate_config, @@ -444,6 +449,15 @@ derive_utoipa!(Icon as IconSchema); ModelInfo, Session, SessionInsights, + super::routes::database::DatabaseStatusResponse, + super::routes::database::BackupInfo, + super::routes::database::CreateBackupRequest, + super::routes::database::CreateBackupResponse, + super::routes::database::BackupsListResponse, + super::routes::database::RestoreBackupRequest, + super::routes::database::RestoreBackupResponse, + super::routes::database::DeleteBackupsRequest, + super::routes::database::DeleteBackupsResponse, SessionType, Conversation, IconSchema, diff --git a/crates/goose-server/src/routes/database.rs b/crates/goose-server/src/routes/database.rs new file mode 100644 index 000000000000..c31fe60000f4 --- /dev/null +++ b/crates/goose-server/src/routes/database.rs @@ -0,0 +1,472 @@ +use axum::{ + http::StatusCode, + routing::{delete, get, post}, + Json, Router, +}; +use chrono::{DateTime, Utc}; +use goose::config::paths::Paths; +use goose::session::session_manager::SessionManager; +use once_cell::sync::Lazy; +use serde::{Deserialize, Serialize}; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use tokio::sync::Mutex; +use utoipa::ToSchema; + +use crate::routes::errors::ErrorResponse; + +static BACKUP_MUTEX: Lazy>> = Lazy::new(|| Arc::new(Mutex::new(()))); +static RESTORE_MUTEX: Lazy>> = Lazy::new(|| Arc::new(Mutex::new(()))); + +#[derive(Serialize, Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct BackupInfo { + pub filename: String, + pub created_at: DateTime, + pub size: u64, + pub schema_version: Option, +} + +#[derive(Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct CreateBackupRequest { + /// Optional custom name for the backup + pub name: Option, +} + +#[derive(Serialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct CreateBackupResponse { + /// Filename of the created backup + pub filename: String, + /// Size of the backup file in bytes + pub size: u64, + /// When the backup was created + pub created_at: DateTime, +} + +#[derive(Serialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct BackupsListResponse { + /// List of available backups + pub backups: Vec, +} + +#[derive(Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct RestoreBackupRequest { + /// Filename of the backup to restore + pub filename: String, +} + +#[derive(Serialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct RestoreBackupResponse { + /// Success message + pub message: String, + /// Backup that was restored + pub restored_from: String, +} + +#[derive(Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct DeleteBackupsRequest { + /// List of backup filenames to delete + pub filenames: Vec, + /// Delete all backups if true + pub delete_all: bool, + /// Clean up orphaned WAL/SHM files + pub cleanup_orphaned: bool, +} + +#[derive(Serialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct DeleteBackupsResponse { + /// Successfully deleted files + pub deleted: Vec, + /// Files that failed to delete + pub failed: Vec, + /// Number of orphaned files cleaned + pub orphaned_cleaned: usize, +} + +#[derive(Serialize, Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct DatabaseStatusResponse { + pub db_size: u64, + pub schema_version: i32, + pub is_latest_version: bool, + pub session_count: usize, + pub message_count: usize, + pub total_tokens: i64, + pub latest_backup: Option, + pub backup_count: usize, + pub timestamp: DateTime, +} + +#[utoipa::path( + get, + path = "/database/status", + tag = "Database Management", + responses( + (status = 200, description = "Database status retrieved successfully", + body = DatabaseStatusResponse), + (status = 401, description = "Unauthorized - Invalid or missing API key"), + (status = 500, description = "Internal server error") + ), + security( + ("api_key" = []) + ) +)] +pub async fn database_status() -> Result, StatusCode> { + let stats = SessionManager::get_database_stats() + .await + .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?; + + let latest_backup = stats.latest_backup.map(|backup| { + let filename = backup + .path + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or("unknown") + .to_string(); + + BackupInfo { + filename, + created_at: backup.created_at, + size: backup.size, + schema_version: backup.schema_version, + } + }); + + Ok(Json(DatabaseStatusResponse { + db_size: stats.db_size, + schema_version: stats.schema_version, + is_latest_version: stats.is_latest_version, + session_count: stats.session_count, + message_count: stats.message_count, + total_tokens: stats.total_tokens, + latest_backup, + backup_count: stats.backup_count, + timestamp: Utc::now(), + })) +} + +#[utoipa::path( + post, + path = "/database/backup", + request_body = CreateBackupRequest, + responses( + (status = 201, description = "Backup created successfully", body = CreateBackupResponse), + (status = 401, description = "Unauthorized - Invalid or missing API key"), + (status = 500, description = "Internal server error") + ), + security( + ("api_key" = []) + ), + tag = "Database Management" +)] +pub async fn create_backup( + Json(request): Json, +) -> Result<(StatusCode, Json), ErrorResponse> { + let _lock = BACKUP_MUTEX.try_lock().map_err(|_| ErrorResponse { + message: "A backup operation is already in progress".to_string(), + status: StatusCode::CONFLICT, + })?; + + let backup_path = SessionManager::create_backup(request.name) + .await + .map_err(|e| ErrorResponse { + message: format!("Failed to create backup: {}", e), + status: StatusCode::INTERNAL_SERVER_ERROR, + })?; + + let filename = backup_path + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or("unknown") + .to_string(); + + let metadata = tokio::fs::metadata(&backup_path) + .await + .map_err(|e| ErrorResponse { + message: format!("Failed to read backup file metadata: {}", e), + status: StatusCode::INTERNAL_SERVER_ERROR, + })?; + + let size = metadata.len(); + let created_at = Utc::now(); + + Ok(( + StatusCode::CREATED, + Json(CreateBackupResponse { + filename, + size, + created_at, + }), + )) +} + +#[utoipa::path( + get, + path = "/database/backups", + responses( + (status = 200, description = "List of backups retrieved successfully", body = BackupsListResponse), + (status = 401, description = "Unauthorized - Invalid or missing API key"), + (status = 500, description = "Internal server error") + ), + security( + ("api_key" = []) + ), + tag = "Database Management" +)] +pub async fn list_backups() -> Result, ErrorResponse> { + let backups = SessionManager::list_backups() + .await + .map_err(|e| ErrorResponse { + message: format!("Failed to list backups: {}", e), + status: StatusCode::INTERNAL_SERVER_ERROR, + })?; + + let sanitized_backups = backups + .into_iter() + .map(|backup| { + let filename = backup + .path + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or("unknown") + .to_string(); + + BackupInfo { + filename, + created_at: backup.created_at, + size: backup.size, + schema_version: backup.schema_version, + } + }) + .collect(); + + Ok(Json(BackupsListResponse { + backups: sanitized_backups, + })) +} + +#[utoipa::path( + post, + path = "/database/restore", + request_body = RestoreBackupRequest, + responses( + (status = 200, description = "Database restored successfully", body = RestoreBackupResponse), + (status = 400, description = "Bad request - Invalid backup filename"), + (status = 401, description = "Unauthorized - Invalid or missing API key"), + (status = 404, description = "Backup not found"), + (status = 500, description = "Internal server error") + ), + security( + ("api_key" = []) + ), + tag = "Database Management" +)] +pub async fn restore_backup( + Json(request): Json, +) -> Result, ErrorResponse> { + let _lock = RESTORE_MUTEX.try_lock().map_err(|_| ErrorResponse { + message: "A restore operation is already in progress".to_string(), + status: StatusCode::CONFLICT, + })?; + + let filename = request.filename.trim(); + if filename.is_empty() { + return Err(ErrorResponse { + message: "Backup filename cannot be empty".to_string(), + status: StatusCode::BAD_REQUEST, + }); + } + + let backup_dir = Paths::backup_dir(); + let backup_path = backup_dir.join(filename); + + let canonical_backup = backup_path.canonicalize().map_err(|_| ErrorResponse { + message: format!("Backup '{}' not found", filename), + status: StatusCode::NOT_FOUND, + })?; + let canonical_backup_dir = backup_dir.canonicalize().map_err(|e| ErrorResponse { + message: format!("Failed to access backup directory: {}", e), + status: StatusCode::INTERNAL_SERVER_ERROR, + })?; + + if !canonical_backup.starts_with(&canonical_backup_dir) { + tracing::warn!("Path traversal attempt detected: {}", filename); + return Err(ErrorResponse { + message: "Invalid backup path".to_string(), + status: StatusCode::FORBIDDEN, + }); + } + + if !canonical_backup.exists() { + return Err(ErrorResponse { + message: format!("Backup '{}' not found", filename), + status: StatusCode::NOT_FOUND, + }); + } + + SessionManager::restore_backup(&canonical_backup) + .await + .map_err(|e| ErrorResponse { + message: format!("Failed to restore backup: {}", e), + status: StatusCode::INTERNAL_SERVER_ERROR, + })?; + + Ok(Json(RestoreBackupResponse { + message: "Database restored successfully. Server restart required.".to_string(), + restored_from: filename.to_string(), + })) +} + +#[utoipa::path( + delete, + path = "/database/backups/delete", + request_body = DeleteBackupsRequest, + responses( + (status = 200, description = "Backups deleted successfully", body = DeleteBackupsResponse), + (status = 400, description = "Bad request - Invalid parameters"), + (status = 401, description = "Unauthorized - Invalid or missing API key"), + (status = 500, description = "Internal server error") + ), + security( + ("api_key" = []) + ), + tag = "Database Management" +)] +pub async fn delete_backups( + Json(request): Json, +) -> Result, ErrorResponse> { + let _lock = BACKUP_MUTEX.try_lock().map_err(|_| ErrorResponse { + message: "A backup operation is in progress, cannot delete".to_string(), + status: StatusCode::CONFLICT, + })?; + + if request.delete_all && !request.filenames.is_empty() { + return Err(ErrorResponse { + message: "Cannot specify both deleteAll and specific filenames".to_string(), + status: StatusCode::BAD_REQUEST, + }); + } + + if !request.delete_all && request.filenames.is_empty() { + return Err(ErrorResponse { + message: "Must specify either deleteAll or at least one filename".to_string(), + status: StatusCode::BAD_REQUEST, + }); + } + + let backup_dir = Paths::backup_dir(); + + let files_to_delete: Vec = if request.delete_all { + let backups = SessionManager::list_backups() + .await + .map_err(|e| ErrorResponse { + message: format!("Failed to list backups: {}", e), + status: StatusCode::INTERNAL_SERVER_ERROR, + })?; + backups.into_iter().map(|b| b.path).collect() + } else { + let mut resolved_paths = Vec::new(); + for filename in &request.filenames { + let path = backup_dir.join(filename); + if path.exists() { + let canonical = path.canonicalize().map_err(|e| ErrorResponse { + message: format!("Failed to resolve backup path: {}", e), + status: StatusCode::INTERNAL_SERVER_ERROR, + })?; + let canonical_backup_dir = + backup_dir.canonicalize().map_err(|e| ErrorResponse { + message: format!("Failed to access backup directory: {}", e), + status: StatusCode::INTERNAL_SERVER_ERROR, + })?; + + if !canonical.starts_with(&canonical_backup_dir) { + tracing::warn!("Path traversal attempt detected: {}", filename); + return Err(ErrorResponse { + message: "Invalid backup path".to_string(), + status: StatusCode::FORBIDDEN, + }); + } + resolved_paths.push(canonical); + } + } + resolved_paths + }; + + let mut deleted = Vec::new(); + let mut failed = Vec::new(); + + for path in files_to_delete { + let filename = path + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or("unknown") + .to_string(); + + match tokio::fs::remove_file(&path).await { + Ok(_) => { + deleted.push(filename.clone()); + + let wal_path = path.with_extension("db-wal"); + if wal_path.exists() { + let _ = tokio::fs::remove_file(&wal_path).await; + } + + let shm_path = path.with_extension("db-shm"); + if shm_path.exists() { + let _ = tokio::fs::remove_file(&shm_path).await; + } + } + Err(_) => { + failed.push(filename); + } + } + } + + let mut orphaned_cleaned = 0; + if request.cleanup_orphaned { + orphaned_cleaned = cleanup_orphaned_files(&backup_dir).await; + } + + Ok(Json(DeleteBackupsResponse { + deleted, + failed, + orphaned_cleaned, + })) +} + +async fn cleanup_orphaned_files(backup_dir: &Path) -> usize { + let mut cleaned = 0; + + if let Ok(mut entries) = tokio::fs::read_dir(backup_dir).await { + while let Ok(Some(entry)) = entries.next_entry().await { + let path = entry.path(); + if let Some(extension) = path.extension() { + let ext_str = extension.to_string_lossy(); + if ext_str == "db-wal" || ext_str == "db-shm" { + let db_path = path.with_extension("db"); + if !db_path.exists() && tokio::fs::remove_file(&path).await.is_ok() { + cleaned += 1; + } + } + } + } + } + + cleaned +} + +pub fn routes() -> Router { + Router::new() + .route("/database/status", get(database_status)) + .route("/database/backup", post(create_backup)) + .route("/database/backups", get(list_backups)) + .route("/database/backups/delete", delete(delete_backups)) + .route("/database/restore", post(restore_backup)) +} diff --git a/crates/goose-server/src/routes/mod.rs b/crates/goose-server/src/routes/mod.rs index 9ca3799e5d89..abc44992b1d5 100644 --- a/crates/goose-server/src/routes/mod.rs +++ b/crates/goose-server/src/routes/mod.rs @@ -1,6 +1,7 @@ pub mod agent; pub mod audio; pub mod config_management; +pub mod database; pub mod errors; pub mod recipe; pub mod recipe_utils; @@ -22,6 +23,7 @@ pub fn configure(state: Arc) -> Router { .merge(reply::routes(state.clone())) .merge(agent::routes(state.clone())) .merge(audio::routes(state.clone())) + .merge(database::routes()) .merge(config_management::routes(state.clone())) .merge(recipe::routes(state.clone())) .merge(session::routes(state.clone())) diff --git a/crates/goose/src/config/paths.rs b/crates/goose/src/config/paths.rs index 839f8f1b6598..66ea5e4f6807 100644 --- a/crates/goose/src/config/paths.rs +++ b/crates/goose/src/config/paths.rs @@ -40,6 +40,10 @@ impl Paths { Self::get_dir(DirType::State) } + pub fn backup_dir() -> PathBuf { + Self::data_dir().join("backups") + } + pub fn in_state_dir(subpath: &str) -> PathBuf { Self::state_dir().join(subpath) } diff --git a/crates/goose/src/scheduler.rs b/crates/goose/src/scheduler.rs index 84ce29560a23..9047beede313 100644 --- a/crates/goose/src/scheduler.rs +++ b/crates/goose/src/scheduler.rs @@ -1409,6 +1409,7 @@ mod tests { fs::create_dir_all(&recipe_dir)?; let _ = crate::session::session_manager::ensure_session_dir() + .await .expect("Failed to ensure app session dir"); let schedule_id_str = "test_schedule_001_scheduler_check".to_string(); diff --git a/crates/goose/src/session/session_manager.rs b/crates/goose/src/session/session_manager.rs index 2f960cd8497e..ec4d552d4548 100644 --- a/crates/goose/src/session/session_manager.rs +++ b/crates/goose/src/session/session_manager.rs @@ -5,15 +5,15 @@ use crate::providers::base::{Provider, MSG_COUNT_FOR_SESSION_NAME_GENERATION}; use crate::recipe::Recipe; use crate::session::extension_data::ExtensionData; use anyhow::Result; -use chrono::{DateTime, Utc}; +use chrono::{DateTime, Local, Utc}; use rmcp::model::Role; use serde::{Deserialize, Serialize}; -use sqlx::sqlite::SqliteConnectOptions; -use sqlx::{Pool, Sqlite}; +use sqlx::sqlite::{SqliteConnectOptions, SqliteConnection}; +use sqlx::{Connection, Pool, Sqlite}; use std::collections::HashMap; -use std::fs; use std::path::{Path, PathBuf}; use std::sync::Arc; +use tokio::fs; use tokio::sync::OnceCell; use tracing::{info, warn}; use utoipa::ToSchema; @@ -114,6 +114,33 @@ pub struct SessionInsights { total_tokens: i64, } +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct DatabaseStats { + #[schema(value_type = String)] + pub db_path: PathBuf, + pub db_size: u64, + pub schema_version: i32, + pub is_latest_version: bool, + pub session_count: usize, + pub message_count: usize, + pub total_tokens: i64, + #[schema(value_type = String)] + pub backup_dir: PathBuf, + pub latest_backup: Option, + pub backup_count: usize, +} + +#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)] +#[serde(rename_all = "camelCase")] +pub struct BackupInfo { + #[schema(value_type = String)] + pub path: PathBuf, + pub created_at: DateTime, + pub size: u64, + pub schema_version: Option, +} + impl SessionUpdateBuilder { fn new(session_id: String) -> Self { Self { @@ -328,17 +355,33 @@ impl SessionManager { .search_chat_history(query, limit, after_date, before_date, exclude_session_id) .await } + + pub async fn get_database_stats() -> Result { + Self::instance().await?.get_database_stats().await + } + + pub async fn create_backup(backup_name: Option) -> Result { + Self::instance().await?.create_backup(backup_name).await + } + + pub async fn list_backups() -> Result> { + Self::instance().await?.list_backups().await + } + + pub async fn restore_backup(backup_path: &Path) -> Result<()> { + Self::instance().await?.restore_backup(backup_path).await + } } pub struct SessionStorage { pool: Pool, } -pub fn ensure_session_dir() -> Result { +pub async fn ensure_session_dir() -> Result { let session_dir = Paths::data_dir().join("sessions"); if !session_dir.exists() { - fs::create_dir_all(&session_dir)?; + fs::create_dir_all(&session_dir).await?; } Ok(session_dir) @@ -438,7 +481,7 @@ impl sqlx::FromRow<'_, sqlx::sqlite::SqliteRow> for Session { impl SessionStorage { async fn new() -> Result { - let session_dir = ensure_session_dir()?; + let session_dir = ensure_session_dir().await?; let db_path = session_dir.join("sessions.db"); let storage = if db_path.exists() { @@ -658,6 +701,26 @@ impl SessionStorage { current_version, CURRENT_SCHEMA_VERSION ); + let backup_name = format!( + "pre_migration_v{}_to_v{}", + current_version, CURRENT_SCHEMA_VERSION + ); + + match self.create_backup(Some(backup_name)).await { + Ok(backup_path) => { + info!("✓ Backup created and validated: {}", backup_path.display()); + info!( + " If migration fails, restore with: goose db restore {}", + backup_path.display() + ); + } + Err(e) => { + warn!("⚠️ Failed to create backup before migration: {}", e); + warn!("⚠️ Proceeding with migration WITHOUT backup"); + warn!(" Migration will continue as backups are best-effort"); + } + }; + for version in (current_version + 1)..=CURRENT_SCHEMA_VERSION { info!(" Applying migration v{}...", version); self.apply_migration(version).await?; @@ -1150,6 +1213,263 @@ impl SessionStorage { .execute() .await } + + pub async fn get_database_stats(&self) -> Result { + let session_dir = ensure_session_dir().await?; + let db_path = session_dir.join("sessions.db"); + + let db_size = if db_path.exists() { + fs::metadata(&db_path).await?.len() + } else { + 0 + }; + + let schema_version = self.get_schema_version().await?; + let is_latest_version = schema_version == CURRENT_SCHEMA_VERSION; + + let session_count = sqlx::query_scalar::<_, i64>("SELECT COUNT(*) FROM sessions") + .fetch_one(&self.pool) + .await? as usize; + + let message_count = sqlx::query_scalar::<_, i64>("SELECT COUNT(*) FROM messages") + .fetch_one(&self.pool) + .await? as usize; + + let insights = self.get_insights().await?; + + let backup_dir = Paths::backup_dir(); + let backups = Self::scan_backup_directory().await?; + let latest_backup = backups.first().cloned(); + let backup_count = backups.len(); + + Ok(DatabaseStats { + db_path, + db_size, + schema_version, + is_latest_version, + session_count, + message_count, + total_tokens: insights.total_tokens, + backup_dir, + latest_backup, + backup_count, + }) + } + + pub async fn create_backup(&self, backup_name: Option) -> Result { + let session_dir = ensure_session_dir().await?; + let db_path = session_dir.join("sessions.db"); + + if !db_path.exists() { + anyhow::bail!("Database file does not exist at {}", db_path.display()); + } + + let backup_dir = Paths::backup_dir(); + fs::create_dir_all(&backup_dir).await?; + + let timestamp = Local::now().format("%Y%m%d_%H%M%S"); + let name = if let Some(custom_name) = backup_name { + format!("{}_{}", custom_name, timestamp) + } else { + format!("backup_{}", timestamp) + }; + + let backup_path = backup_dir.join(format!("{}.db", name)); + + fs::copy(&db_path, &backup_path).await?; + + // Validate backup + let original_size = fs::metadata(&db_path).await?.len(); + let backup_size = match fs::metadata(&backup_path).await { + Ok(metadata) => metadata.len(), + Err(e) => { + anyhow::bail!("Backup verification failed: backup file not found - {}", e); + } + }; + + if original_size != backup_size { + fs::remove_file(&backup_path).await?; + anyhow::bail!( + "Backup verification failed: size mismatch (original: {}, backup: {})", + original_size, + backup_size + ); + } + + // Optional: SQLite integrity check + let options = SqliteConnectOptions::new() + .filename(&backup_path) + .create_if_missing(false); + + match SqliteConnection::connect_with(&options).await { + Ok(mut conn) => { + match sqlx::query_scalar::<_, String>("PRAGMA quick_check") + .fetch_one(&mut conn) + .await + { + Ok(result) => { + if result != "ok" { + fs::remove_file(&backup_path).await?; + anyhow::bail!("Backup integrity check failed: {}", result); + } + } + Err(e) => { + warn!("Backup integrity check could not complete: {}", e); + } + } + } + Err(e) => { + warn!("Could not open backup for integrity check: {}", e); + } + } + + info!( + "Created and validated database backup: {}", + backup_path.display() + ); + + Ok(backup_path) + } + + async fn read_backup_schema_version(path: &Path) -> Option { + let options = SqliteConnectOptions::new() + .filename(path) + .create_if_missing(false) + .read_only(true); + + let mut conn = match SqliteConnection::connect_with(&options).await { + Ok(conn) => conn, + Err(_) => return None, + }; + + let table_exists = sqlx::query_scalar::<_, bool>( + r#" + SELECT EXISTS ( + SELECT name FROM sqlite_master + WHERE type='table' AND name='schema_version' + ) + "#, + ) + .fetch_one(&mut conn) + .await + .ok()?; + + if !table_exists { + return Some(0); + } + + sqlx::query_scalar::<_, i32>("SELECT MAX(version) FROM schema_version") + .fetch_one(&mut conn) + .await + .ok() + } + + async fn scan_backup_directory() -> Result> { + let backup_dir = Paths::backup_dir(); + + if !backup_dir.exists() { + return Ok(Vec::new()); + } + + let mut backups: Vec = Vec::new(); + + let mut entries = fs::read_dir(&backup_dir).await?; + while let Ok(Some(entry)) = entries.next_entry().await { + if let Ok(metadata) = entry.metadata().await { + if metadata.is_file() && entry.path().extension().is_some_and(|ext| ext == "db") { + if let Ok(modified) = metadata.modified() { + let created_at: DateTime = modified.into(); + let path = entry.path(); + let schema_version = Self::read_backup_schema_version(&path).await; + backups.push(BackupInfo { + path, + created_at, + size: metadata.len(), + schema_version, + }); + } + } + } + } + + backups.sort_by(|a, b| b.created_at.cmp(&a.created_at)); + + Ok(backups) + } + + pub async fn list_backups(&self) -> Result> { + Self::scan_backup_directory().await + } + + pub async fn restore_backup(&self, backup_path: &Path) -> Result<()> { + if !backup_path.exists() { + anyhow::bail!("Backup file does not exist: {}", backup_path.display()); + } + + if backup_path.extension().is_none_or(|ext| ext != "db") { + warn!( + "Restoring from file without .db extension: {}", + backup_path.display() + ); + } + + let backup_dir = Paths::backup_dir(); + if let Ok(canonical_backup) = backup_path.canonicalize() { + if let Ok(canonical_backup_dir) = backup_dir.canonicalize() { + if !canonical_backup.starts_with(&canonical_backup_dir) { + warn!( + "Restoring from file outside official backup directory: {}", + backup_path.display() + ); + } + } + } + + let session_dir = ensure_session_dir().await?; + let db_path = session_dir.join("sessions.db"); + + let options = SqliteConnectOptions::new() + .filename(backup_path) + .create_if_missing(false); + + match SqliteConnection::connect_with(&options).await { + Ok(mut conn) => { + match sqlx::query_scalar::<_, String>("PRAGMA integrity_check") + .fetch_one(&mut conn) + .await + { + Ok(result) => { + if result != "ok" { + anyhow::bail!("Backup file integrity check failed: {}", result); + } + } + Err(e) => { + anyhow::bail!("Could not validate backup file: {}", e); + } + } + } + Err(e) => { + anyhow::bail!("Backup file is not a valid SQLite database: {}", e); + } + } + + let safety_backup_name = format!("pre_restore_{}", Utc::now().format("%Y%m%d_%H%M%S")); + let safety_backup = self.create_backup(Some(safety_backup_name)).await?; + + info!( + "Created safety backup before restore: {}", + safety_backup.display() + ); + + self.pool.close().await; + + fs::copy(backup_path, &db_path).await?; + + info!("Database restored from: {}", backup_path.display()); + info!("Please restart the application for changes to take full effect."); + + Ok(()) + } } #[cfg(test)] @@ -1356,4 +1676,90 @@ mod tests { assert!(imported.user_set_name); assert_eq!(imported.working_dir, PathBuf::from("/tmp/test")); } + + #[tokio::test] + async fn test_backup_restore_round_trip() { + let temp_dir = TempDir::new().unwrap(); + let db_path = temp_dir.path().join("test_backup_restore.db"); + + let storage = Arc::new(SessionStorage::create(&db_path).await.unwrap()); + + let session1 = storage + .create_session( + PathBuf::from("/tmp/test1"), + "Original session".to_string(), + SessionType::User, + ) + .await + .unwrap(); + + storage + .add_message( + &session1.id, + &Message { + id: None, + role: Role::User, + created: chrono::Utc::now().timestamp_millis(), + content: vec![MessageContent::text("test message")], + metadata: Default::default(), + }, + ) + .await + .unwrap(); + + let session1_with_messages = storage.get_session(&session1.id, true).await.unwrap(); + assert_eq!(session1_with_messages.message_count, 1); + + let backup_path = temp_dir.path().join("backup.db"); + fs::copy(&db_path, &backup_path).await.unwrap(); + + storage.delete_session(&session1.id).await.unwrap(); + + let sessions_after_delete = storage.list_sessions().await.unwrap(); + assert_eq!(sessions_after_delete.len(), 0); + + storage.pool.close().await; + + fs::copy(&backup_path, &db_path).await.unwrap(); + + let storage2 = Arc::new(SessionStorage::open(&db_path).await.unwrap()); + let sessions_after_restore = storage2.list_sessions().await.unwrap(); + assert_eq!(sessions_after_restore.len(), 1); + assert_eq!(sessions_after_restore[0].id, session1.id); + } + + #[tokio::test] + async fn test_backup_validation() { + let temp_dir = TempDir::new().unwrap(); + let db_path = temp_dir.path().join("test_validation.db"); + + let storage = Arc::new(SessionStorage::create(&db_path).await.unwrap()); + + storage + .create_session( + PathBuf::from("/tmp/test"), + "Test session".to_string(), + SessionType::User, + ) + .await + .unwrap(); + + let backup_dir = temp_dir.path().join("backups"); + fs::create_dir_all(&backup_dir).await.unwrap(); + let valid_backup_path = backup_dir.join("valid_backup.db"); + fs::copy(&db_path, &valid_backup_path).await.unwrap(); + + assert!(valid_backup_path.exists()); + + let backup_size = tokio::fs::metadata(&valid_backup_path).await.unwrap().len(); + assert!(backup_size > 0); + + let corrupted_backup = backup_dir.join("corrupted.db"); + tokio::fs::write(&corrupted_backup, b"not a database") + .await + .unwrap(); + + let result = storage.restore_backup(&corrupted_backup).await; + assert!(result.is_err()); + } } diff --git a/documentation/docs/guides/goose-cli-commands.md b/documentation/docs/guides/goose-cli-commands.md index f1d28e03eea2..f2380b94b320 100644 --- a/documentation/docs/guides/goose-cli-commands.md +++ b/documentation/docs/guides/goose-cli-commands.md @@ -393,6 +393,126 @@ goose schedule remove --id daily-report --- +### Database Management + +#### db status +Display comprehensive database information including location, size, schema version, content statistics, and backup details. + +**Usage:** +```bash +goose db status +``` + +**Output includes:** +- Database file path and size +- Current schema version and migration status +- Session and message counts +- Token usage statistics +- Backup directory location and latest backup information + +--- + +#### db backup [options] +Create a manual backup of the session database with validation. + +**Options:** +- **`--output `**: Custom name for the backup file + +**Usage:** +```bash +# Create backup with auto-generated timestamp name +goose db backup + +# Create backup with custom name +goose db backup --output my-backup +``` + +Backups are validated automatically using file size verification and SQLite integrity checks. + +--- + +#### db restore [options] +Restore the session database from a backup file. + +**Options:** +- **`--force`**: Skip confirmation prompt + +**Usage:** +```bash +# Restore from backup filename (searches backup directory) +goose db restore backup_20251031_143022.db + +# Restore from full path +goose db restore /path/to/backup.db + +# Restore without confirmation +goose db restore backup_20251031_143022.db --force +``` + +:::caution +A safety backup of the current database is automatically created before restoration. You must restart any active Goose sessions after restoring. +::: + +--- + +#### db list-backups [options] +List all available database backups with metadata. + +**Options:** +- **`--format `**: Output format (`table` or `json`). Default is `table` + +**Usage:** +```bash +# List backups in table format +goose db list-backups + +# List backups in JSON format for automation +goose db list-backups --format json +``` + +Shows backup filename, size, creation time, age, and schema version for each backup. + +--- + +#### db delete-backup [options] [files...] +Delete backup files and clean up orphaned SQLite auxiliary files. + +**Options:** +- **`--all`**: Delete all backup files +- **`--cleanup`**: Remove orphaned .db-wal and .db-shm files +- **`--force`**: Skip confirmation prompt + +**Usage:** +```bash +# Delete specific backup files +goose db delete-backup backup1.db backup2.db + +# Delete all backups +goose db delete-backup --all + +# Clean up orphaned SQLite auxiliary files +goose db delete-backup --cleanup + +# Delete all backups and clean up, without confirmation +goose db delete-backup --all --cleanup --force +``` + +:::caution +Deletion is permanent and cannot be undone. A preview table is shown before deletion unless `--force` is used. +::: + +--- + +#### db path +Output the database file path for scripting and automation. + +**Usage:** +```bash +goose db path +``` + +--- + #### mcp Run an enabled MCP server specified by `` (e.g. `'Google Drive'`). diff --git a/documentation/docs/troubleshooting/known-issues.md b/documentation/docs/troubleshooting/known-issues.md index 8bb34e0370d1..80958dec1cd3 100644 --- a/documentation/docs/troubleshooting/known-issues.md +++ b/documentation/docs/troubleshooting/known-issues.md @@ -462,7 +462,60 @@ goose Desktop uses **"shims"** (packaged versions of `npx` and `uvx`) that autom 4. **Require more changes**: In a corporate proxy environment or airgapped environment where the above doesn't work, it is recommended that you customize and package up goose desktop with shims/config that will work given the network constraints you have (for example, TLS certificate limitations, proxies, inability to download required content etc). --- -### Need Further Help? + +### Database Corruption or Recovery Issues + +If you experience database corruption, failed migrations, or other database-related problems, Goose provides several recovery options. + +#### Automatic Backup Protection + +Goose automatically creates backups before schema migrations (during version updates). If you experience issues after an update, you can restore from these automatic backups. + +#### Recovery Steps + +1. **List available backups:** + ```bash + goose db list-backups + ``` + This shows all available backups with their creation dates and schema versions. Automatic migration backups follow the pattern `pre_migration_v{old}_to_v{new}_{timestamp}.db`. + +2. **Restore from backup:** + ```bash + goose db restore backup_20251031_143022.db + ``` + You can use either the backup filename (as shown in list-backups) or the full path. A safety backup of your current database is automatically created before restoration. + +3. **Verify restoration:** + ```bash + goose db status + ``` + Check that your database is healthy and contains the expected data. + +:::warning Restart Required +After restoring a database, you must restart any active Goose sessions (CLI or Desktop) for the changes to take effect. +::: + +#### Manual Backup Before Risky Operations + +If you're about to perform operations that might affect your database, create a manual backup first: + +```bash +goose db backup --output before-operation +``` + +#### Database Location + +To find where your database is stored (useful for manual recovery): + +```bash +goose db path +``` + +See the [Database Management commands](/docs/guides/goose-cli-commands#database-management) and [Database Backups guide](/docs/guides/sessions/session-management#database-backups-and-recovery) for complete documentation. + +--- + +### Need Further Help? Still running into issues? We're here to help! Join our [Discord Community][discord] where the goose team and community members are happy to assist. diff --git a/scripts/goose-db-helper.sh b/scripts/goose-db-helper.sh deleted file mode 100755 index 96d3cf824092..000000000000 --- a/scripts/goose-db-helper.sh +++ /dev/null @@ -1,876 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -BACKUP_DIR="${HOME}/.local/share/goose/goose-db-backups" - -RED='\033[0;31m' -GREEN='\033[0;32m' -YELLOW='\033[1;33m' -BLUE='\033[0;34m' -CYAN='\033[0;36m' -NC='\033[0m' - -DRY_RUN=false -SKIP_CONFIRM=false -CLEAN_GENERATE=false - -MIGRATIONS_DIR="${HOME}/.local/share/goose/migrations" -RUST_SESSION_MANAGER="crates/goose/src/session/session_manager.rs" - -get_latest_version() { - if [[ ! -d "${MIGRATIONS_DIR}" ]]; then - echo "0" - return - fi - - local latest=$(find "${MIGRATIONS_DIR}" -mindepth 1 -maxdepth 1 -type d -name "[0-9]*" 2>/dev/null | \ - sed 's/.*\/\([0-9]*\).*/\1/' | \ - sed 's/^0*//' | \ - sort -n | \ - tail -1) - - echo "${latest:-0}" -} - -find_migration_dir() { - local version=$1 - - if [[ ! -d "${MIGRATIONS_DIR}" ]]; then - return - fi - - local version_num=$(echo "${version}" | sed 's/^0*//') - - for dir in "${MIGRATIONS_DIR}"/*; do - if [[ -d "${dir}" ]]; then - local dir_version=$(basename "${dir}" | sed 's/^\([0-9]*\).*/\1/' | sed 's/^0*//') - if [[ "${dir_version}" == "${version_num}" ]]; then - echo "${dir}" - return - fi - fi - done -} - -get_migration_info() { - local version=$1 - - if [[ "${version}" == "0" ]]; then - echo "Initial schema (no schema_version table)" - return - fi - - local migration_dir=$(find_migration_dir "${version}") - if [[ -z "${migration_dir}" ]]; then - echo "Unknown migration" - return - fi - - local metadata_file="${migration_dir}/metadata.txt" - if [[ -f "${metadata_file}" ]]; then - local description=$(grep "^DESCRIPTION=" "${metadata_file}" | cut -d= -f2-) - echo "${description}" - else - echo "Migration ${version}" - fi -} - -list_available_migrations() { - echo -e "${BLUE}=== Available Migrations ===${NC}" - echo "" - echo -e "${CYAN}Version 0:${NC} Initial schema (no schema_version table)" - echo "" - - if [[ ! -d "${MIGRATIONS_DIR}" ]]; then - echo -e "${YELLOW}No migration files found in ${MIGRATIONS_DIR}${NC}" - return - fi - - for migration_dir in $(find "${MIGRATIONS_DIR}" -mindepth 1 -maxdepth 1 -type d -name "[0-9]*" | sort -V); do - local version=$(basename "${migration_dir}" | sed 's/^\([0-9]*\).*/\1/') - local info=$(get_migration_info "${version}") - - echo -e "${CYAN}Version ${version}:${NC} ${info}" - echo "" - done -} - -get_goose_db_path() { - if [[ -n "${GOOSE_PATH_ROOT:-}" ]]; then - echo "${GOOSE_PATH_ROOT}/data/sessions/sessions.db" - else - local possible_paths=( - "${HOME}/.local/share/goose/sessions/sessions.db" - "${HOME}/Library/Application Support/Block/goose/data/sessions/sessions.db" - ) - - for path in "${possible_paths[@]}"; do - if [[ -f "${path}" ]]; then - echo "${path}" - return - fi - done - - echo "${possible_paths[0]}" - fi -} - -DB_PATH=$(get_goose_db_path) - -confirm_action() { - local action="$1" - - if [[ "${SKIP_CONFIRM}" == "true" ]]; then - return 0 - fi - - echo -e "${YELLOW}You are about to: ${action}${NC}" - read -p "Continue? (y/N) " -n 1 -r - echo - if [[ $REPLY =~ ^[Yy]$ ]]; then - return 0 - else - return 1 - fi -} - -check_db_exists() { - if [[ ! -f "${DB_PATH}" ]]; then - echo -e "${RED}ERROR: Database not found at ${DB_PATH}${NC}" >&2 - exit 1 - fi -} - -get_schema_version() { - check_db_exists - local version=$(sqlite3 "${DB_PATH}" "SELECT MAX(version) FROM schema_version;" 2>/dev/null || echo "0") - echo "${version}" -} - -check_column_exists() { - local table=$1 - local column=$2 - check_db_exists - sqlite3 "${DB_PATH}" "PRAGMA table_info(${table});" | grep -q "^[0-9]*|${column}|" -} - -get_table_schema() { - local table=$1 - check_db_exists - sqlite3 "${DB_PATH}" "PRAGMA table_info(${table});" 2>/dev/null || echo "" -} - -create_backup() { - check_db_exists - mkdir -p "${BACKUP_DIR}" - local timestamp=$(date +%Y%m%d_%H%M%S) - local backup_path="${BACKUP_DIR}/sessions_v$(get_schema_version)_${timestamp}.db" - cp "${DB_PATH}" "${backup_path}" - echo -e "${GREEN}✓ Backup created: ${backup_path}${NC}" - echo "${backup_path}" -} - -show_version_history() { - list_available_migrations -} - -show_status() { - echo -e "${BLUE}=== Goose Database Status ===${NC}" - echo "Database path: ${DB_PATH}" - echo "" - - if [[ ! -f "${DB_PATH}" ]]; then - echo -e "${YELLOW}Status: No database found${NC}" - echo "" - echo "This is normal if you haven't run Goose yet." - echo "Once you run Goose, a database will be created automatically." - return - fi - - local version=$(get_schema_version) - local version_info=$(get_migration_info "${version}") - local latest_version=$(get_latest_version) - - echo -e "Current schema version: ${CYAN}${version}${NC}" - echo -e "Version info: ${version_info}" - echo "" - - echo -e "${BLUE}Sessions table schema:${NC}" - get_table_schema "sessions" | while IFS='|' read -r cid name type notnull dflt_value pk; do - echo " - ${name} (${type})" - done - echo "" - - local session_count=$(sqlite3 "${DB_PATH}" "SELECT COUNT(*) FROM sessions;" 2>/dev/null || echo "0") - local message_count=$(sqlite3 "${DB_PATH}" "SELECT COUNT(*) FROM messages;" 2>/dev/null || echo "0") - echo -e "${BLUE}Database contents:${NC}" - echo " Sessions: ${session_count}" - echo " Messages: ${message_count}" - echo "" - - if [[ ${version} -eq ${latest_version} ]]; then - echo -e "${GREEN}✓ Database is at the latest schema version${NC}" - elif [[ ${version} -lt ${latest_version} ]]; then - echo -e "${YELLOW}⚠ Database can be upgraded to v${latest_version}${NC}" - echo " Run: $0 migrate-to ${latest_version}" - fi -} - -apply_migration() { - local target_version=$1 - - if [[ "${target_version}" == "0" ]]; then - echo -e "${RED}ERROR: Cannot migrate forward to version 0${NC}" >&2 - return 1 - fi - - local migration_dir=$(find_migration_dir "${target_version}") - if [[ -z "${migration_dir}" ]]; then - echo -e "${RED}ERROR: Migration files not found for version ${target_version}${NC}" >&2 - echo -e "${YELLOW}Expected to find directory: ${MIGRATIONS_DIR}/${target_version}_*${NC}" - return 1 - fi - - local up_sql="${migration_dir}/up.sql" - if [[ ! -f "${up_sql}" ]]; then - echo -e "${RED}ERROR: Migration file not found: ${up_sql}${NC}" >&2 - return 1 - fi - - if ! sqlite3 "${DB_PATH}" < "${up_sql}"; then - echo -e "${RED}ERROR: Migration to v${target_version} failed${NC}" >&2 - echo -e "${YELLOW}Check the SQL file: ${up_sql}${NC}" - return 1 - fi -} - -rollback_migration() { - local from_version=$1 - - if [[ "${from_version}" == "0" ]]; then - echo -e "${RED}ERROR: Cannot rollback from version 0${NC}" >&2 - return 1 - fi - - local migration_dir=$(find_migration_dir "${from_version}") - if [[ -z "${migration_dir}" ]]; then - echo -e "${RED}ERROR: Migration files not found for version ${from_version}${NC}" >&2 - echo -e "${YELLOW}Expected to find directory: ${MIGRATIONS_DIR}/${from_version}_*${NC}" - return 1 - fi - - local down_sql="${migration_dir}/down.sql" - if [[ ! -f "${down_sql}" ]]; then - echo -e "${RED}ERROR: Rollback file not found: ${down_sql}${NC}" >&2 - return 1 - fi - - if ! sqlite3 "${DB_PATH}" < "${down_sql}"; then - echo -e "${RED}ERROR: Rollback from v${from_version} failed${NC}" >&2 - echo -e "${YELLOW}Check the SQL file: ${down_sql}${NC}" - return 1 - fi -} - -migrate_to_version() { - local target_version=$1 - local latest_version=$(get_latest_version) - - if [[ -z "${target_version}" ]]; then - echo -e "${RED}ERROR: Please specify a target version${NC}" >&2 - echo "Usage: $0 migrate-to " - echo "" - echo "Available versions: 0 to ${latest_version}" - return 1 - fi - - if [[ ! "${target_version}" =~ ^[0-9]+$ ]] || [[ ${target_version} -lt 0 ]] || [[ ${target_version} -gt ${latest_version} ]]; then - echo -e "${RED}ERROR: Invalid version: ${target_version}${NC}" >&2 - echo "Valid versions are: 0 to ${latest_version}" - return 1 - fi - - check_db_exists - local current_version=$(get_schema_version) - - if [[ ${current_version} -eq ${target_version} ]]; then - echo -e "${YELLOW}Already at version ${target_version}${NC}" - return 0 - fi - - echo -e "${BLUE}=== Migrating database from v${current_version} to v${target_version} ===${NC}" - echo "" - - if [[ "${DRY_RUN}" == "true" ]]; then - echo -e "${CYAN}[DRY RUN] Would perform the following actions:${NC}" - echo "" - echo "1. Create backup at: ${BACKUP_DIR}/sessions_v${current_version}_.db" - echo "" - - if [[ ${target_version} -gt ${current_version} ]]; then - echo "2. Apply forward migrations:" - for version in $(seq $((current_version + 1)) ${target_version}); do - local migration_info=$(get_migration_info "${version}") - local migration_dir=$(find_migration_dir "${version}") - echo " - Migrate to v${version}: ${migration_info}" - echo " SQL file: ${migration_dir}/up.sql" - done - else - echo "2. Apply rollback migrations:" - for version in $(seq ${current_version} -1 $((target_version + 1))); do - local migration_info=$(get_migration_info "${version}") - local migration_dir=$(find_migration_dir "${version}") - echo " - Rollback from v${version}: ${migration_info}" - echo " SQL file: ${migration_dir}/down.sql" - done - fi - - echo "" - echo "3. Update schema_version table to ${target_version}" - echo "" - echo -e "${CYAN}[DRY RUN] No changes were made${NC}" - return 0 - fi - - if ! confirm_action "migrate database from v${current_version} to v${target_version}"; then - echo -e "${YELLOW}Migration cancelled${NC}" - return 2 - fi - - local backup_path=$(create_backup) - echo "" - - if [[ ${target_version} -gt ${current_version} ]]; then - for version in $(seq $((current_version + 1)) ${target_version}); do - local migration_info=$(get_migration_info "${version}") - echo -e "Applying migration to v${version}..." - apply_migration ${version} - echo -e "${GREEN}✓ Migrated to v${version}: ${migration_info}${NC}" - done - else - for version in $(seq ${current_version} -1 $((target_version + 1))); do - local migration_info=$(get_migration_info "${version}") - echo -e "Rolling back from v${version}..." - rollback_migration ${version} - echo -e "${GREEN}✓ Rolled back from v${version}${NC}" - done - fi - - echo "" - echo -e "${GREEN}✓ Migration complete!${NC}" - echo -e "Database is now at version ${target_version}" - echo "" - echo "Backup saved at: ${backup_path}" -} - -list_backups() { - if [[ ! -d "${BACKUP_DIR}" ]] || [[ -z "$(ls -A "${BACKUP_DIR}" 2>/dev/null)" ]]; then - echo -e "${YELLOW}No backups found${NC}" - return - fi - - echo -e "${BLUE}=== Available Backups ===${NC}" - echo "" - ls -lh "${BACKUP_DIR}" | tail -n +2 | while read -r line; do - local filename=$(echo "${line}" | awk '{print $NF}') - local size=$(echo "${line}" | awk '{print $5}') - local date=$(echo "${line}" | awk '{print $6, $7, $8}') - - if [[ "${filename}" =~ _v([0-9]+)_ ]]; then - local version="${BASH_REMATCH[1]}" - echo -e "${filename}" - echo -e " Size: ${size}, Date: ${date}, Schema: v${version}" - echo "" - else - echo -e "${filename}" - echo -e " Size: ${size}, Date: ${date}" - echo "" - fi - done -} - -restore_backup() { - local backup_file=$1 - - if [[ -z "${backup_file}" ]]; then - echo -e "${RED}ERROR: Please specify a backup file to restore${NC}" >&2 - echo "Usage: $0 restore " - echo "" - list_backups - exit 1 - fi - - if [[ ! -f "${backup_file}" ]]; then - echo -e "${RED}ERROR: Backup file not found: ${backup_file}${NC}" >&2 - exit 1 - fi - - check_db_exists - - if [[ "${DRY_RUN}" == "true" ]]; then - echo -e "${CYAN}[DRY RUN] Would perform the following actions:${NC}" - echo "" - echo "1. Create backup of current database at: ${BACKUP_DIR}/sessions_v_.db" - echo "2. Restore backup from: ${backup_file}" - echo "3. Replace current database at: ${DB_PATH}" - echo "" - echo -e "${CYAN}[DRY RUN] No changes were made${NC}" - return 0 - fi - - if ! confirm_action "restore backup from ${backup_file} (this will replace your current database)"; then - echo -e "${YELLOW}Restore cancelled${NC}" - return 2 - fi - - local current_backup=$(create_backup) - echo "" - - cp "${backup_file}" "${DB_PATH}" - echo -e "${GREEN}✓ Restored backup from: ${backup_file}${NC}" - echo "Current database backed up to: ${current_backup}" -} - -validate_sql_syntax() { - local sql=$1 - local file_desc=$2 - - if [[ -z "$sql" ]]; then - echo -e "${YELLOW}⚠ WARNING: Empty SQL in $file_desc${NC}" >&2 - return 1 - fi - - if ! echo "$sql" | grep -q ";"; then - echo -e "${YELLOW}⚠ WARNING: No semicolons found in $file_desc${NC}" >&2 - return 1 - fi - - local lines=$(echo "$sql" | grep -v "^--" | grep -v "^BEGIN" | grep -v "^COMMIT" | grep -v "^$") - while IFS= read -r line; do - if [[ -n "$line" ]]; then - if ! echo "$line" | grep -q ";$"; then - local next_line=$(echo "$lines" | grep -A1 "^$line$" | tail -1) - if [[ -n "$next_line" && ! "$next_line" =~ ^(BEGIN|COMMIT|INSERT|DELETE|$) ]]; then - echo -e "${YELLOW}⚠ WARNING: Possible missing semicolon in $file_desc:${NC}" >&2 - echo " $line" >&2 - return 1 - fi - fi - fi - done <<< "$lines" - - return 0 -} - -extract_migration_sql() { - local version=$1 - local rust_file=$2 - - awk -v ver="$version" ' - BEGIN { in_migration=0; sql=""; query_count=0; current_query="" } - /async fn apply_migration/ { found_func=1 } - found_func && $0 ~ ver " =>" { in_migration=1; next } - in_migration && /}$/ && !/=>/ { exit } - in_migration && /sqlx::query/ { - getline - if ($0 ~ /r#"/) { - if (current_query != "") { - if (sql != "") sql = sql ";\n" - sql = sql current_query - current_query = "" - } - getline - while ($0 !~ /"#/) { - if (current_query != "") current_query = current_query "\n" - current_query = current_query $0 - getline - } - query_count++ - } - } - END { - if (current_query != "") { - if (sql != "") sql = sql ";\n" - sql = sql current_query - } - if (sql != "") print sql ";" - } - ' "$rust_file" -} - -generate_rollback_sql() { - local version=$1 - local up_sql=$2 - - echo "BEGIN TRANSACTION;" - echo "" - - local statements=() - mapfile -d $'\0' -t statements < <(echo "$up_sql" | awk 'BEGIN{RS=";"} {gsub(/^[ \t\n]+|[ \t\n]+$/, ""); if (length($0) > 0) {print $0; printf "%c", 0}}') - - local rollback_stmts=() - local has_unsupported=false - - for stmt in "${statements[@]}"; do - if echo "$stmt" | grep -q "CREATE TABLE.*schema_version"; then - rollback_stmts+=("DROP TABLE IF EXISTS schema_version;") - elif echo "$stmt" | grep -q "RENAME COLUMN"; then - local table=$(echo "$stmt" | sed -n 's/.*ALTER TABLE \([^ ]*\).*/\1/p') - local old_col=$(echo "$stmt" | sed -n 's/.*RENAME COLUMN \([^ ]*\) TO.*/\1/p') - local new_col=$(echo "$stmt" | sed -n 's/.*TO \([^ ;]*\).*/\1/p') - rollback_stmts+=("ALTER TABLE $table RENAME COLUMN $new_col TO $old_col;") - elif echo "$stmt" | grep -q "ADD COLUMN"; then - local table=$(echo "$stmt" | sed -n 's/.*ALTER TABLE \([^ ]*\).*/\1/p') - local column=$(echo "$stmt" | sed -n 's/.*ADD COLUMN \([^ ]*\).*/\1/p') - rollback_stmts+=("ALTER TABLE $table DROP COLUMN $column;") - else - rollback_stmts+=("-- TODO: Unable to auto-generate rollback for: $stmt") - has_unsupported=true - fi - done - - for ((i=${#rollback_stmts[@]}-1; i>=0; i--)); do - echo "${rollback_stmts[$i]}" - done - - echo "" - echo "DELETE FROM schema_version WHERE version = $version;" - echo "" - echo "COMMIT;" - - if [[ "$has_unsupported" == "true" ]]; then - return 1 - fi -} - -generate_metadata() { - local version=$1 - local sql=$2 - local author=${USER:-system} - local date=$(date +%Y-%m-%d) - - local description="Migration $version" - if echo "$sql" | grep -q "CREATE TABLE.*schema_version"; then - description="Added schema_version tracking" - elif echo "$sql" | grep -q "ALTER TABLE.*ADD COLUMN"; then - local column=$(echo "$sql" | sed -n 's/.*ADD COLUMN \([^ ]*\).*/\1/p') - description="Added $column column" - elif echo "$sql" | grep -q "RENAME COLUMN"; then - local old_col=$(echo "$sql" | sed -n 's/.*RENAME COLUMN \([^ ]*\) TO.*/\1/p') - local new_col=$(echo "$sql" | sed -n 's/.*TO \([^ ]*\).*/\1/p') - description="Renamed $old_col to $new_col" - fi - - cat <&2 - echo "Make sure you're running this from the goose repository root." - exit 1 - fi - - echo -e "${BLUE}=== Generating Migrations from Rust Source ===${NC}" - echo "" - echo "Reading migrations from: ${RUST_SESSION_MANAGER}" - echo "Output directory: ${MIGRATIONS_DIR}" - echo "" - - if [[ "${CLEAN_GENERATE}" == "true" ]]; then - if [[ -d "${MIGRATIONS_DIR}" ]]; then - local migration_count=$(find "${MIGRATIONS_DIR}" -mindepth 1 -maxdepth 1 -type d -name "[0-9]*" 2>/dev/null | wc -l) - if [[ ${migration_count} -gt 0 ]]; then - echo -e "${YELLOW}⚠ Clean mode: This will remove all ${migration_count} existing migration(s)${NC}" - if ! confirm_action "remove all existing migrations and regenerate from source"; then - echo -e "${YELLOW}Generation cancelled${NC}" - return 2 - fi - echo "Removing existing migrations..." - rm -rf "${MIGRATIONS_DIR}" - fi - fi - fi - - mkdir -p "${MIGRATIONS_DIR}" - - local max_version=$(grep -E '^\s+[0-9]+ =>' "${RUST_SESSION_MANAGER}" | \ - sed 's/[^0-9]//g' | \ - sort -n | \ - tail -1) - - if [[ -z "$max_version" ]]; then - max_version=2 - fi - - local generated_count=0 - local skipped_count=0 - - for version in $(seq 1 $max_version); do - local padded_version=$(printf "%03d" $version) - local sql=$(extract_migration_sql "$version" "${RUST_SESSION_MANAGER}") - - if [[ -z "$sql" ]]; then - echo -e "${YELLOW}⚠ No SQL found for version $version, skipping...${NC}" - skipped_count=$((skipped_count + 1)) - continue - fi - - if ! validate_sql_syntax "$sql" "migration v$version"; then - echo -e "${YELLOW}⚠ Validation warning for version $version, but continuing...${NC}" - fi - - local migration_name - if echo "$sql" | grep -q "CREATE TABLE.*schema_version"; then - migration_name="add_schema_version" - elif echo "$sql" | grep -q "ALTER TABLE.*ADD COLUMN"; then - local column=$(echo "$sql" | sed -n 's/.*ADD COLUMN \([^ ]*\).*/\1/p' | head -1) - migration_name="add_${column}" - elif echo "$sql" | grep -q "RENAME COLUMN"; then - local old_col=$(echo "$sql" | sed -n 's/.*RENAME COLUMN \([^ ]*\) TO.*/\1/p') - local new_col=$(echo "$sql" | sed -n 's/.*TO \([^ ]*\).*/\1/p') - migration_name="rename_${old_col}_to_${new_col}" - else - migration_name="migration_${version}" - fi - - local migration_dir="${MIGRATIONS_DIR}/${padded_version}_${migration_name}" - mkdir -p "$migration_dir" - - echo "BEGIN TRANSACTION;" > "${migration_dir}/up.sql" - echo "" >> "${migration_dir}/up.sql" - echo "$sql" >> "${migration_dir}/up.sql" - echo "" >> "${migration_dir}/up.sql" - echo "INSERT INTO schema_version (version) VALUES ($version);" >> "${migration_dir}/up.sql" - echo "" >> "${migration_dir}/up.sql" - echo "COMMIT;" >> "${migration_dir}/up.sql" - - generate_rollback_sql "$version" "$sql" > "${migration_dir}/down.sql" - - generate_metadata "$version" "$sql" > "${migration_dir}/metadata.txt" - - echo -e "${GREEN}✓ Generated migration $padded_version: ${migration_dir##*/}${NC}" - generated_count=$((generated_count + 1)) - done - - echo "" - echo -e "${GREEN}✓ Generation complete!${NC}" - echo "Generated: $generated_count migrations" - if [[ $skipped_count -gt 0 ]]; then - echo "Skipped: $skipped_count migrations" - fi - echo "" - echo -e "${YELLOW}Note:${NC} Please review generated rollback SQL (down.sql) files." - echo "Some migrations may require manual rollback implementation." -} - -show_help() { - local latest_version=$(get_latest_version) - - echo -e "${BLUE}Goose Database Migration Helper${NC}" - echo "" - echo "This script is a developer utility for manually managing database schema" - echo "versions when switching between branches with different schema requirements." - echo "Migrations are stored in ${MIGRATIONS_DIR}." - echo "" - echo -e "${CYAN}Usage:${NC} $0 [flags] [arguments] [flags]" - echo "" - echo -e "${CYAN}Global Flags (can be placed before or after the command):${NC}" - echo -e " ${GREEN}--dry-run${NC}" - echo " Preview changes without modifying the database" - echo " Works with: migrate-to, restore" - echo "" - echo -e " ${GREEN}--yes, -y${NC}" - echo " Skip confirmation prompts (useful for automation)" - echo " Works with: migrate-to, restore, generate-migrations --clean" - echo "" - echo -e " ${GREEN}--clean${NC}" - echo " Remove all existing migrations before regenerating" - echo " Works with: generate-migrations" - echo " Useful when switching between branches with different migrations" - echo "" - echo -e "${CYAN}Commands:${NC}" - echo -e " ${GREEN}status${NC}" - echo " Show current database schema version, table structure, and statistics" - echo "" - echo -e " ${GREEN}migrate-to ${NC}" - echo " Migrate database to a specific schema version (0-${latest_version})" - echo " Automatically handles forward migrations and rollbacks" - echo "" - echo -e " ${GREEN}history${NC}" - echo " Show all available migrations and their descriptions" - echo "" - echo -e " ${GREEN}generate-migrations${NC}" - echo " Auto-generate migration files from Rust source code (session_manager.rs)" - echo " Creates up.sql, down.sql, and metadata.txt for each migration" - echo "" - echo -e " ${GREEN}backup${NC}" - echo " Create a manual backup of the current database" - echo "" - echo -e " ${GREEN}list-backups${NC}" - echo " Show all available backups with their versions and sizes" - echo "" - echo -e " ${GREEN}restore ${NC}" - echo " Restore database from a backup file" - echo "" - echo -e " ${GREEN}help${NC}" - echo " Show this help message" - echo "" - echo -e "${CYAN}Examples:${NC}" - echo " # Check current status" - echo " $0 status" - echo "" - echo " # View all available migrations" - echo " $0 history" - echo "" - echo " # Preview migration without making changes (dry-run before)" - echo " $0 --dry-run migrate-to 3" - echo "" - echo " # Flags can also be placed after the command and arguments" - echo " $0 migrate-to 3 --dry-run" - echo "" - echo " # Migrate to version 2" - echo " $0 migrate-to 2" - echo "" - echo " # Rollback to version 1 without confirmation prompt" - echo " $0 migrate-to 1 --yes" - echo "" - echo " # Create a backup" - echo " $0 backup" - echo "" - echo " # Clean regenerate migrations (useful when switching branches)" - echo " $0 generate-migrations --clean" - echo "" - echo " # Clean regenerate without confirmation" - echo " $0 generate-migrations --clean --yes" - echo "" - echo -e "${CYAN}Adding New Migrations:${NC}" - echo " After adding a migration to session_manager.rs, run:" - echo "" - echo " $0 generate-migrations" - echo "" - echo " This will automatically extract migrations from the Rust source" - echo " and create the necessary SQL files in ${MIGRATIONS_DIR}." - echo "" - echo -e " ${YELLOW}Note:${NC} Review generated down.sql files, as some rollbacks" - echo -e " may require manual implementation." - echo "" - echo -e "${CYAN}Switching Branches:${NC}" - echo " When switching between branches with different migrations:" - echo "" - echo " # Clean and regenerate to match current branch" - echo " git checkout main" - echo " $0 generate-migrations --clean" - echo "" - echo " # Or manually remove specific migrations" - echo " rm -rf ~/.local/share/goose/migrations/004_*" - echo " $0 generate-migrations" - echo "" - echo -e "${CYAN}Configuration:${NC}" - echo " Database: ${DB_PATH}" - echo " Backups: ${BACKUP_DIR}" - echo " Migrations: ${MIGRATIONS_DIR}" - echo " Latest: v${latest_version}" - echo "" - echo -e "${YELLOW}Note:${NC} All migrations automatically create backups before making changes." -} - -main() { - local non_flag_args=() - - while [[ $# -gt 0 ]]; do - case "$1" in - --dry-run) - DRY_RUN=true - shift - ;; - --yes|-y) - SKIP_CONFIRM=true - shift - ;; - --clean) - CLEAN_GENERATE=true - shift - ;; - --help|-h) - show_help - exit 0 - ;; - -*) - echo -e "${RED}ERROR: Unknown flag: $1${NC}" >&2 - echo "" - show_help - exit 1 - ;; - *) - non_flag_args+=("$1") - shift - ;; - esac - done - - local command=${non_flag_args[0]:-help} - - case "${command}" in - status) - show_status - ;; - migrate-to) - migrate_to_version "${non_flag_args[1]}" - ;; - history) - show_version_history - ;; - generate-migrations) - generate_migrations - ;; - backup) - create_backup - ;; - list-backups) - list_backups - ;; - restore) - restore_backup "${non_flag_args[1]}" - ;; - migrate) - local latest_version=$(get_latest_version) - echo -e "${YELLOW}Note: 'migrate' is deprecated. Use 'migrate-to ${latest_version}' instead.${NC}" - echo "" - migrate_to_version ${latest_version} - ;; - rollback) - echo -e "${YELLOW}Note: 'rollback' is deprecated. Use 'migrate-to ' instead.${NC}" - echo -e "${YELLOW}Use '$0 history' to see available versions.${NC}" - echo "" - show_version_history - ;; - compatible-with) - echo -e "${RED}ERROR: 'compatible-with' command has been removed.${NC}" >&2 - echo "" - echo "The script now uses a generic migration system." - echo "To migrate your database, use: $0 migrate-to " - echo "" - echo "Available migrations:" - show_version_history - exit 1 - ;; - help) - show_help - ;; - *) - echo -e "${RED}ERROR: Unknown command: ${command}${NC}" >&2 - echo "" - show_help - exit 1 - ;; - esac -} - -main "$@" diff --git a/ui/desktop/openapi.json b/ui/desktop/openapi.json index d4478cbad647..3ce30bd18fbb 100644 --- a/ui/desktop/openapi.json +++ b/ui/desktop/openapi.json @@ -930,6 +930,200 @@ } } }, + "/database/backup": { + "post": { + "tags": [ + "Database Management" + ], + "operationId": "create_backup", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateBackupRequest" + } + } + }, + "required": true + }, + "responses": { + "201": { + "description": "Backup created successfully", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/CreateBackupResponse" + } + } + } + }, + "401": { + "description": "Unauthorized - Invalid or missing API key" + }, + "500": { + "description": "Internal server error" + } + }, + "security": [ + { + "api_key": [] + } + ] + } + }, + "/database/backups": { + "get": { + "tags": [ + "Database Management" + ], + "operationId": "list_backups", + "responses": { + "200": { + "description": "List of backups retrieved successfully", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/BackupsListResponse" + } + } + } + }, + "401": { + "description": "Unauthorized - Invalid or missing API key" + }, + "500": { + "description": "Internal server error" + } + }, + "security": [ + { + "api_key": [] + } + ] + } + }, + "/database/backups/delete": { + "delete": { + "tags": [ + "Database Management" + ], + "operationId": "delete_backups", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeleteBackupsRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Backups deleted successfully", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DeleteBackupsResponse" + } + } + } + }, + "400": { + "description": "Bad request - Invalid parameters" + }, + "401": { + "description": "Unauthorized - Invalid or missing API key" + }, + "500": { + "description": "Internal server error" + } + }, + "security": [ + { + "api_key": [] + } + ] + } + }, + "/database/restore": { + "post": { + "tags": [ + "Database Management" + ], + "operationId": "restore_backup", + "requestBody": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RestoreBackupRequest" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Database restored successfully", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/RestoreBackupResponse" + } + } + } + }, + "400": { + "description": "Bad request - Invalid backup filename" + }, + "401": { + "description": "Unauthorized - Invalid or missing API key" + }, + "404": { + "description": "Backup not found" + }, + "500": { + "description": "Internal server error" + } + }, + "security": [ + { + "api_key": [] + } + ] + } + }, + "/database/status": { + "get": { + "tags": [ + "Database Management" + ], + "operationId": "database_status", + "responses": { + "200": { + "description": "Database status retrieved successfully", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/DatabaseStatusResponse" + } + } + } + }, + "401": { + "description": "Unauthorized - Invalid or missing API key" + }, + "500": { + "description": "Internal server error" + } + }, + "security": [ + { + "api_key": [] + } + ] + } + }, "/diagnostics/{session_id}": { "get": { "tags": [ @@ -2126,6 +2320,48 @@ } } }, + "BackupInfo": { + "type": "object", + "required": [ + "filename", + "createdAt", + "size" + ], + "properties": { + "createdAt": { + "type": "string", + "format": "date-time" + }, + "filename": { + "type": "string" + }, + "schemaVersion": { + "type": "integer", + "format": "int32", + "nullable": true + }, + "size": { + "type": "integer", + "format": "int64", + "minimum": 0 + } + } + }, + "BackupsListResponse": { + "type": "object", + "required": [ + "backups" + ], + "properties": { + "backups": { + "type": "array", + "items": { + "$ref": "#/components/schemas/BackupInfo" + }, + "description": "List of available backups" + } + } + }, "ChatRequest": { "type": "object", "required": [ @@ -2257,6 +2493,41 @@ "$ref": "#/components/schemas/Message" } }, + "CreateBackupRequest": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "Optional custom name for the backup", + "nullable": true + } + } + }, + "CreateBackupResponse": { + "type": "object", + "required": [ + "filename", + "size", + "createdAt" + ], + "properties": { + "createdAt": { + "type": "string", + "format": "date-time", + "description": "When the backup was created" + }, + "filename": { + "type": "string", + "description": "Filename of the created backup" + }, + "size": { + "type": "integer", + "format": "int64", + "description": "Size of the backup file in bytes", + "minimum": 0 + } + } + }, "CreateRecipeRequest": { "type": "object", "required": [ @@ -2316,6 +2587,61 @@ } } }, + "DatabaseStatusResponse": { + "type": "object", + "required": [ + "dbSize", + "schemaVersion", + "isLatestVersion", + "sessionCount", + "messageCount", + "totalTokens", + "backupCount", + "timestamp" + ], + "properties": { + "backupCount": { + "type": "integer", + "minimum": 0 + }, + "dbSize": { + "type": "integer", + "format": "int64", + "minimum": 0 + }, + "isLatestVersion": { + "type": "boolean" + }, + "latestBackup": { + "allOf": [ + { + "$ref": "#/components/schemas/BackupInfo" + } + ], + "nullable": true + }, + "messageCount": { + "type": "integer", + "minimum": 0 + }, + "schemaVersion": { + "type": "integer", + "format": "int32" + }, + "sessionCount": { + "type": "integer", + "minimum": 0 + }, + "timestamp": { + "type": "string", + "format": "date-time" + }, + "totalTokens": { + "type": "integer", + "format": "int64" + } + } + }, "DeclarativeProviderConfig": { "type": "object", "required": [ @@ -2393,6 +2719,60 @@ } } }, + "DeleteBackupsRequest": { + "type": "object", + "required": [ + "filenames", + "deleteAll", + "cleanupOrphaned" + ], + "properties": { + "cleanupOrphaned": { + "type": "boolean", + "description": "Clean up orphaned WAL/SHM files" + }, + "deleteAll": { + "type": "boolean", + "description": "Delete all backups if true" + }, + "filenames": { + "type": "array", + "items": { + "type": "string" + }, + "description": "List of backup filenames to delete" + } + } + }, + "DeleteBackupsResponse": { + "type": "object", + "required": [ + "deleted", + "failed", + "orphanedCleaned" + ], + "properties": { + "deleted": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Successfully deleted files" + }, + "failed": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Files that failed to delete" + }, + "orphanedCleaned": { + "type": "integer", + "description": "Number of orphaned files cleaned", + "minimum": 0 + } + } + }, "DeleteRecipeRequest": { "type": "object", "required": [ @@ -3983,6 +4363,35 @@ } } }, + "RestoreBackupRequest": { + "type": "object", + "required": [ + "filename" + ], + "properties": { + "filename": { + "type": "string", + "description": "Filename of the backup to restore" + } + } + }, + "RestoreBackupResponse": { + "type": "object", + "required": [ + "message", + "restoredFrom" + ], + "properties": { + "message": { + "type": "string", + "description": "Success message" + }, + "restoredFrom": { + "type": "string", + "description": "Backup that was restored" + } + } + }, "ResumeAgentRequest": { "type": "object", "required": [ diff --git a/ui/desktop/src/api/sdk.gen.ts b/ui/desktop/src/api/sdk.gen.ts index 7f65f47dc5a7..0c8b2fd8433f 100644 --- a/ui/desktop/src/api/sdk.gen.ts +++ b/ui/desktop/src/api/sdk.gen.ts @@ -2,7 +2,7 @@ import type { Client, Options as Options2, TDataShape } from './client'; import { client } from './client.gen'; -import type { AddExtensionData, AddExtensionErrors, AddExtensionResponses, AgentAddExtensionData, AgentAddExtensionErrors, AgentAddExtensionResponses, AgentRemoveExtensionData, AgentRemoveExtensionErrors, AgentRemoveExtensionResponses, BackupConfigData, BackupConfigErrors, BackupConfigResponses, ConfirmPermissionData, ConfirmPermissionErrors, ConfirmPermissionResponses, CreateCustomProviderData, CreateCustomProviderErrors, CreateCustomProviderResponses, CreateRecipeData, CreateRecipeErrors, CreateRecipeResponses, CreateScheduleData, CreateScheduleErrors, CreateScheduleResponses, DecodeRecipeData, DecodeRecipeErrors, DecodeRecipeResponses, DeleteRecipeData, DeleteRecipeErrors, DeleteRecipeResponses, DeleteScheduleData, DeleteScheduleErrors, DeleteScheduleResponses, DeleteSessionData, DeleteSessionErrors, DeleteSessionResponses, DiagnosticsData, DiagnosticsErrors, DiagnosticsResponses, EncodeRecipeData, EncodeRecipeErrors, EncodeRecipeResponses, ExportSessionData, ExportSessionErrors, ExportSessionResponses, GetCustomProviderData, GetCustomProviderErrors, GetCustomProviderResponses, GetExtensionsData, GetExtensionsErrors, GetExtensionsResponses, GetProviderModelsData, GetProviderModelsErrors, GetProviderModelsResponses, GetSessionData, GetSessionErrors, GetSessionInsightsData, GetSessionInsightsErrors, GetSessionInsightsResponses, GetSessionResponses, GetToolsData, GetToolsErrors, GetToolsResponses, ImportSessionData, ImportSessionErrors, ImportSessionResponses, InitConfigData, InitConfigErrors, InitConfigResponses, InspectRunningJobData, InspectRunningJobErrors, InspectRunningJobResponses, KillRunningJobData, KillRunningJobResponses, ListRecipesData, ListRecipesErrors, ListRecipesResponses, ListSchedulesData, ListSchedulesErrors, ListSchedulesResponses, ListSessionsData, ListSessionsErrors, ListSessionsResponses, ParseRecipeData, ParseRecipeErrors, ParseRecipeResponses, PauseScheduleData, PauseScheduleErrors, PauseScheduleResponses, ProvidersData, ProvidersResponses, ReadAllConfigData, ReadAllConfigResponses, ReadConfigData, ReadConfigErrors, ReadConfigResponses, RecoverConfigData, RecoverConfigErrors, RecoverConfigResponses, RemoveConfigData, RemoveConfigErrors, RemoveConfigResponses, RemoveCustomProviderData, RemoveCustomProviderErrors, RemoveCustomProviderResponses, RemoveExtensionData, RemoveExtensionErrors, RemoveExtensionResponses, ReplyData, ReplyErrors, ReplyResponses, ResumeAgentData, ResumeAgentErrors, ResumeAgentResponses, RunNowHandlerData, RunNowHandlerErrors, RunNowHandlerResponses, SaveRecipeData, SaveRecipeErrors, SaveRecipeResponses, ScanRecipeData, ScanRecipeResponses, SessionsHandlerData, SessionsHandlerErrors, SessionsHandlerResponses, StartAgentData, StartAgentErrors, StartAgentResponses, StartOpenrouterSetupData, StartOpenrouterSetupResponses, StartTetrateSetupData, StartTetrateSetupResponses, StatusData, StatusResponses, UnpauseScheduleData, UnpauseScheduleErrors, UnpauseScheduleResponses, UpdateAgentProviderData, UpdateAgentProviderErrors, UpdateAgentProviderResponses, UpdateCustomProviderData, UpdateCustomProviderErrors, UpdateCustomProviderResponses, UpdateFromSessionData, UpdateFromSessionErrors, UpdateFromSessionResponses, UpdateRouterToolSelectorData, UpdateRouterToolSelectorErrors, UpdateRouterToolSelectorResponses, UpdateScheduleData, UpdateScheduleErrors, UpdateScheduleResponses, UpdateSessionNameData, UpdateSessionNameErrors, UpdateSessionNameResponses, UpdateSessionUserRecipeValuesData, UpdateSessionUserRecipeValuesErrors, UpdateSessionUserRecipeValuesResponses, UpsertConfigData, UpsertConfigErrors, UpsertConfigResponses, UpsertPermissionsData, UpsertPermissionsErrors, UpsertPermissionsResponses, ValidateConfigData, ValidateConfigErrors, ValidateConfigResponses } from './types.gen'; +import type { AddExtensionData, AddExtensionErrors, AddExtensionResponses, AgentAddExtensionData, AgentAddExtensionErrors, AgentAddExtensionResponses, AgentRemoveExtensionData, AgentRemoveExtensionErrors, AgentRemoveExtensionResponses, BackupConfigData, BackupConfigErrors, BackupConfigResponses, ConfirmPermissionData, ConfirmPermissionErrors, ConfirmPermissionResponses, CreateBackupData, CreateBackupErrors, CreateBackupResponses, CreateCustomProviderData, CreateCustomProviderErrors, CreateCustomProviderResponses, CreateRecipeData, CreateRecipeErrors, CreateRecipeResponses, CreateScheduleData, CreateScheduleErrors, CreateScheduleResponses, DatabaseStatusData, DatabaseStatusErrors, DatabaseStatusResponses, DecodeRecipeData, DecodeRecipeErrors, DecodeRecipeResponses, DeleteBackupsData, DeleteBackupsErrors, DeleteBackupsResponses, DeleteRecipeData, DeleteRecipeErrors, DeleteRecipeResponses, DeleteScheduleData, DeleteScheduleErrors, DeleteScheduleResponses, DeleteSessionData, DeleteSessionErrors, DeleteSessionResponses, DiagnosticsData, DiagnosticsErrors, DiagnosticsResponses, EncodeRecipeData, EncodeRecipeErrors, EncodeRecipeResponses, ExportSessionData, ExportSessionErrors, ExportSessionResponses, GetCustomProviderData, GetCustomProviderErrors, GetCustomProviderResponses, GetExtensionsData, GetExtensionsErrors, GetExtensionsResponses, GetProviderModelsData, GetProviderModelsErrors, GetProviderModelsResponses, GetSessionData, GetSessionErrors, GetSessionInsightsData, GetSessionInsightsErrors, GetSessionInsightsResponses, GetSessionResponses, GetToolsData, GetToolsErrors, GetToolsResponses, ImportSessionData, ImportSessionErrors, ImportSessionResponses, InitConfigData, InitConfigErrors, InitConfigResponses, InspectRunningJobData, InspectRunningJobErrors, InspectRunningJobResponses, KillRunningJobData, KillRunningJobResponses, ListBackupsData, ListBackupsErrors, ListBackupsResponses, ListRecipesData, ListRecipesErrors, ListRecipesResponses, ListSchedulesData, ListSchedulesErrors, ListSchedulesResponses, ListSessionsData, ListSessionsErrors, ListSessionsResponses, ParseRecipeData, ParseRecipeErrors, ParseRecipeResponses, PauseScheduleData, PauseScheduleErrors, PauseScheduleResponses, ProvidersData, ProvidersResponses, ReadAllConfigData, ReadAllConfigResponses, ReadConfigData, ReadConfigErrors, ReadConfigResponses, RecoverConfigData, RecoverConfigErrors, RecoverConfigResponses, RemoveConfigData, RemoveConfigErrors, RemoveConfigResponses, RemoveCustomProviderData, RemoveCustomProviderErrors, RemoveCustomProviderResponses, RemoveExtensionData, RemoveExtensionErrors, RemoveExtensionResponses, ReplyData, ReplyErrors, ReplyResponses, RestoreBackupData, RestoreBackupErrors, RestoreBackupResponses, ResumeAgentData, ResumeAgentErrors, ResumeAgentResponses, RunNowHandlerData, RunNowHandlerErrors, RunNowHandlerResponses, SaveRecipeData, SaveRecipeErrors, SaveRecipeResponses, ScanRecipeData, ScanRecipeResponses, SessionsHandlerData, SessionsHandlerErrors, SessionsHandlerResponses, StartAgentData, StartAgentErrors, StartAgentResponses, StartOpenrouterSetupData, StartOpenrouterSetupResponses, StartTetrateSetupData, StartTetrateSetupResponses, StatusData, StatusResponses, UnpauseScheduleData, UnpauseScheduleErrors, UnpauseScheduleResponses, UpdateAgentProviderData, UpdateAgentProviderErrors, UpdateAgentProviderResponses, UpdateCustomProviderData, UpdateCustomProviderErrors, UpdateCustomProviderResponses, UpdateFromSessionData, UpdateFromSessionErrors, UpdateFromSessionResponses, UpdateRouterToolSelectorData, UpdateRouterToolSelectorErrors, UpdateRouterToolSelectorResponses, UpdateScheduleData, UpdateScheduleErrors, UpdateScheduleResponses, UpdateSessionNameData, UpdateSessionNameErrors, UpdateSessionNameResponses, UpdateSessionUserRecipeValuesData, UpdateSessionUserRecipeValuesErrors, UpdateSessionUserRecipeValuesResponses, UpsertConfigData, UpsertConfigErrors, UpsertConfigResponses, UpsertPermissionsData, UpsertPermissionsErrors, UpsertPermissionsResponses, ValidateConfigData, ValidateConfigErrors, ValidateConfigResponses } from './types.gen'; export type Options = Options2 & { /** @@ -267,6 +267,53 @@ export const confirmPermission = (options: }); }; +export const createBackup = (options: Options) => { + return (options.client ?? client).post({ + url: '/database/backup', + ...options, + headers: { + 'Content-Type': 'application/json', + ...options.headers + } + }); +}; + +export const listBackups = (options?: Options) => { + return (options?.client ?? client).get({ + url: '/database/backups', + ...options + }); +}; + +export const deleteBackups = (options: Options) => { + return (options.client ?? client).delete({ + url: '/database/backups/delete', + ...options, + headers: { + 'Content-Type': 'application/json', + ...options.headers + } + }); +}; + +export const restoreBackup = (options: Options) => { + return (options.client ?? client).post({ + url: '/database/restore', + ...options, + headers: { + 'Content-Type': 'application/json', + ...options.headers + } + }); +}; + +export const databaseStatus = (options?: Options) => { + return (options?.client ?? client).get({ + url: '/database/status', + ...options + }); +}; + export const diagnostics = (options: Options) => { return (options.client ?? client).get({ url: '/diagnostics/{session_id}', diff --git a/ui/desktop/src/api/types.gen.ts b/ui/desktop/src/api/types.gen.ts index 6468fd3ee3cb..36224435df36 100644 --- a/ui/desktop/src/api/types.gen.ts +++ b/ui/desktop/src/api/types.gen.ts @@ -25,6 +25,20 @@ export type AuthorRequest = { metadata?: string | null; }; +export type BackupInfo = { + createdAt: string; + filename: string; + schemaVersion?: number | null; + size: number; +}; + +export type BackupsListResponse = { + /** + * List of available backups + */ + backups: Array; +}; + export type ChatRequest = { messages: Array; recipe_name?: string | null; @@ -74,6 +88,28 @@ export type Content = RawTextContent | RawImageContent | RawEmbeddedResource | R export type Conversation = Array; +export type CreateBackupRequest = { + /** + * Optional custom name for the backup + */ + name?: string | null; +}; + +export type CreateBackupResponse = { + /** + * When the backup was created + */ + createdAt: string; + /** + * Filename of the created backup + */ + filename: string; + /** + * Size of the backup file in bytes + */ + size: number; +}; + export type CreateRecipeRequest = { author?: AuthorRequest | null; session_id: string; @@ -91,6 +127,18 @@ export type CreateScheduleRequest = { recipe_source: string; }; +export type DatabaseStatusResponse = { + backupCount: number; + dbSize: number; + isLatestVersion: boolean; + latestBackup?: BackupInfo | null; + messageCount: number; + schemaVersion: number; + sessionCount: number; + timestamp: string; + totalTokens: number; +}; + export type DeclarativeProviderConfig = { api_key_env: string; base_url: string; @@ -114,6 +162,36 @@ export type DecodeRecipeResponse = { recipe: Recipe; }; +export type DeleteBackupsRequest = { + /** + * Clean up orphaned WAL/SHM files + */ + cleanupOrphaned: boolean; + /** + * Delete all backups if true + */ + deleteAll: boolean; + /** + * List of backup filenames to delete + */ + filenames: Array; +}; + +export type DeleteBackupsResponse = { + /** + * Successfully deleted files + */ + deleted: Array; + /** + * Files that failed to delete + */ + failed: Array; + /** + * Number of orphaned files cleaned + */ + orphanedCleaned: number; +}; + export type DeleteRecipeRequest = { id: string; }; @@ -609,6 +687,24 @@ export type Response = { json_schema?: unknown; }; +export type RestoreBackupRequest = { + /** + * Filename of the backup to restore + */ + filename: string; +}; + +export type RestoreBackupResponse = { + /** + * Success message + */ + message: string; + /** + * Backup that was restored + */ + restoredFrom: string; +}; + export type ResumeAgentRequest = { load_model_and_extensions: boolean; session_id: string; @@ -1649,6 +1745,153 @@ export type ConfirmPermissionResponses = { 200: unknown; }; +export type CreateBackupData = { + body: CreateBackupRequest; + path?: never; + query?: never; + url: '/database/backup'; +}; + +export type CreateBackupErrors = { + /** + * Unauthorized - Invalid or missing API key + */ + 401: unknown; + /** + * Internal server error + */ + 500: unknown; +}; + +export type CreateBackupResponses = { + /** + * Backup created successfully + */ + 201: CreateBackupResponse; +}; + +export type CreateBackupResponse2 = CreateBackupResponses[keyof CreateBackupResponses]; + +export type ListBackupsData = { + body?: never; + path?: never; + query?: never; + url: '/database/backups'; +}; + +export type ListBackupsErrors = { + /** + * Unauthorized - Invalid or missing API key + */ + 401: unknown; + /** + * Internal server error + */ + 500: unknown; +}; + +export type ListBackupsResponses = { + /** + * List of backups retrieved successfully + */ + 200: BackupsListResponse; +}; + +export type ListBackupsResponse = ListBackupsResponses[keyof ListBackupsResponses]; + +export type DeleteBackupsData = { + body: DeleteBackupsRequest; + path?: never; + query?: never; + url: '/database/backups/delete'; +}; + +export type DeleteBackupsErrors = { + /** + * Bad request - Invalid parameters + */ + 400: unknown; + /** + * Unauthorized - Invalid or missing API key + */ + 401: unknown; + /** + * Internal server error + */ + 500: unknown; +}; + +export type DeleteBackupsResponses = { + /** + * Backups deleted successfully + */ + 200: DeleteBackupsResponse; +}; + +export type DeleteBackupsResponse2 = DeleteBackupsResponses[keyof DeleteBackupsResponses]; + +export type RestoreBackupData = { + body: RestoreBackupRequest; + path?: never; + query?: never; + url: '/database/restore'; +}; + +export type RestoreBackupErrors = { + /** + * Bad request - Invalid backup filename + */ + 400: unknown; + /** + * Unauthorized - Invalid or missing API key + */ + 401: unknown; + /** + * Backup not found + */ + 404: unknown; + /** + * Internal server error + */ + 500: unknown; +}; + +export type RestoreBackupResponses = { + /** + * Database restored successfully + */ + 200: RestoreBackupResponse; +}; + +export type RestoreBackupResponse2 = RestoreBackupResponses[keyof RestoreBackupResponses]; + +export type DatabaseStatusData = { + body?: never; + path?: never; + query?: never; + url: '/database/status'; +}; + +export type DatabaseStatusErrors = { + /** + * Unauthorized - Invalid or missing API key + */ + 401: unknown; + /** + * Internal server error + */ + 500: unknown; +}; + +export type DatabaseStatusResponses = { + /** + * Database status retrieved successfully + */ + 200: DatabaseStatusResponse; +}; + +export type DatabaseStatusResponse2 = DatabaseStatusResponses[keyof DatabaseStatusResponses]; + export type DiagnosticsData = { body?: never; path: {