From c182ca304b389d6e89144a846ff986513eababa8 Mon Sep 17 00:00:00 2001 From: benthecarman Date: Wed, 3 Dec 2025 16:03:49 -0600 Subject: [PATCH] Add configurable log level and log file Before we were dropping all our logs on the floor besides a few printlns in the code. This implements a logger for the log facade that writes logs to the console as well as to the log file. We also add to the config options for setting the log file and log level. --- Cargo.lock | 2 + ldk-server/Cargo.toml | 2 + ldk-server/ldk-server-config.toml | 3 + ldk-server/src/main.rs | 91 +++++++++++----- ldk-server/src/util/config.rs | 41 ++++++++ ldk-server/src/util/logger.rs | 169 ++++++++++++++++++++++++++++++ ldk-server/src/util/mod.rs | 1 + 7 files changed, 281 insertions(+), 28 deletions(-) create mode 100644 ldk-server/src/util/logger.rs diff --git a/Cargo.lock b/Cargo.lock index 60a3b4d..828cb3c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1715,6 +1715,7 @@ version = "0.1.0" dependencies = [ "async-trait", "bytes", + "chrono", "futures-util", "hex-conservative 0.2.1", "http-body-util", @@ -1723,6 +1724,7 @@ dependencies = [ "lapin", "ldk-node", "ldk-server-protos", + "log", "prost", "rand 0.8.5", "rusqlite", diff --git a/ldk-server/Cargo.toml b/ldk-server/Cargo.toml index c2c46ec..e1053f7 100644 --- a/ldk-server/Cargo.toml +++ b/ldk-server/Cargo.toml @@ -18,6 +18,8 @@ rusqlite = { version = "0.31.0", features = ["bundled"] } rand = { version = "0.8.5", default-features = false } async-trait = { version = "0.1.85", default-features = false } toml = { version = "0.8.9", default-features = false, features = ["parse"] } +chrono = { version = "0.4", default-features = false, features = ["clock"] } +log = "0.4.28" # Required for RabittMQ based EventPublisher. Only enabled for `events-rabbitmq` feature. lapin = { version = "2.4.0", features = ["rustls"], default-features = false, optional = true } diff --git a/ldk-server/ldk-server-config.toml b/ldk-server/ldk-server-config.toml index 7727cff..45967f5 100644 --- a/ldk-server/ldk-server-config.toml +++ b/ldk-server/ldk-server-config.toml @@ -8,6 +8,9 @@ rest_service_address = "127.0.0.1:3002" # LDK Server REST address [storage.disk] dir_path = "/tmp/ldk-server/" # Path for LDK and BDK data persistence +[log] +level = "Debug" # Log level (Error, Warn, Info, Debug, Trace) +file_path = "/tmp/ldk-server/ldk-server.log" # Log file path # Must set either bitcoind or esplora settings, but not both diff --git a/ldk-server/src/main.rs b/ldk-server/src/main.rs index 0962a3c..72f5fb1 100644 --- a/ldk-server/src/main.rs +++ b/ldk-server/src/main.rs @@ -25,6 +25,7 @@ use crate::io::persist::{ PAYMENTS_PERSISTENCE_SECONDARY_NAMESPACE, }; use crate::util::config::{load_config, ChainSource}; +use crate::util::logger::ServerLogger; use crate::util::proto_adapter::{forwarded_payment_to_proto, payment_to_proto}; use hex::DisplayHex; use ldk_node::config::Config; @@ -32,6 +33,7 @@ use ldk_node::lightning::ln::channelmanager::PaymentId; use ldk_server_protos::events; use ldk_server_protos::events::{event_envelope, EventEnvelope}; use ldk_server_protos::types::Payment; +use log::{error, info}; use prost::Message; use rand::Rng; use std::fs; @@ -70,6 +72,25 @@ fn main() { }, }; + let log_file_path = config_file.log_file_path.map(|p| PathBuf::from(p)).unwrap_or_else(|| { + let mut default_log_path = PathBuf::from(&config_file.storage_dir_path); + default_log_path.push("ldk-server.log"); + default_log_path + }); + + if log_file_path == PathBuf::from(&config_file.storage_dir_path) { + eprintln!("Log file path cannot be the same as storage directory path."); + std::process::exit(-1); + } + + let logger = match ServerLogger::init(config_file.log_level, &log_file_path) { + Ok(logger) => logger, + Err(e) => { + eprintln!("Failed to initialize logger: {e}"); + std::process::exit(-1); + }, + }; + ldk_node_config.storage_dir_path = config_file.storage_dir_path.clone(); ldk_node_config.listening_addresses = Some(vec![config_file.listening_addr]); ldk_node_config.network = config_file.network; @@ -79,7 +100,7 @@ fn main() { if let Some(alias) = config_file.alias { if let Err(e) = builder.set_node_alias(alias.to_string()) { - eprintln!("Failed to set node alias: {e}"); + error!("Failed to set node alias: {e}"); std::process::exit(-1); } } @@ -107,7 +128,7 @@ fn main() { let runtime = match tokio::runtime::Builder::new_multi_thread().enable_all().build() { Ok(runtime) => Arc::new(runtime), Err(e) => { - eprintln!("Failed to setup tokio runtime: {}", e); + error!("Failed to setup tokio runtime: {e}"); std::process::exit(-1); }, }; @@ -117,7 +138,7 @@ fn main() { let node = match builder.build() { Ok(node) => Arc::new(node), Err(e) => { - eprintln!("Failed to build LDK Node: {}", e); + error!("Failed to build LDK Node: {e}"); std::process::exit(-1); }, }; @@ -126,7 +147,7 @@ fn main() { Arc::new(match SqliteStore::new(PathBuf::from(config_file.storage_dir_path), None, None) { Ok(store) => store, Err(e) => { - eprintln!("Failed to create SqliteStore: {:?}", e); + error!("Failed to create SqliteStore: {e:?}"); std::process::exit(-1); }, }); @@ -142,26 +163,35 @@ fn main() { Arc::new(RabbitMqEventPublisher::new(rabbitmq_config)) }; - println!("Starting up..."); + info!("Starting up..."); match node.start() { Ok(()) => {}, Err(e) => { - eprintln!("Failed to start up LDK Node: {}", e); + error!("Failed to start up LDK Node: {e}"); std::process::exit(-1); }, } - println!( + info!( "CONNECTION_STRING: {}@{}", node.node_id(), node.config().listening_addresses.as_ref().unwrap().first().unwrap() ); runtime.block_on(async { + // Register SIGHUP handler for log rotation + let mut sighup_stream = match tokio::signal::unix::signal(SignalKind::hangup()) { + Ok(stream) => stream, + Err(e) => { + error!("Failed to register SIGHUP handler: {e}"); + std::process::exit(-1); + } + }; + let mut sigterm_stream = match tokio::signal::unix::signal(SignalKind::terminate()) { Ok(stream) => stream, Err(e) => { - println!("Failed to register for SIGTERM stream: {}", e); + error!("Failed to register for SIGTERM stream: {e}"); std::process::exit(-1); } }; @@ -174,25 +204,25 @@ fn main() { event = event_node.next_event_async() => { match event { Event::ChannelPending { channel_id, counterparty_node_id, .. } => { - println!( + info!( "CHANNEL_PENDING: {} from counterparty {}", channel_id, counterparty_node_id ); if let Err(e) = event_node.event_handled() { - eprintln!("Failed to mark event as handled: {e}"); + error!("Failed to mark event as handled: {e}"); } }, Event::ChannelReady { channel_id, counterparty_node_id, .. } => { - println!( + info!( "CHANNEL_READY: {} from counterparty {:?}", channel_id, counterparty_node_id ); if let Err(e) = event_node.event_handled() { - eprintln!("Failed to mark event as handled: {e}"); + error!("Failed to mark event as handled: {e}"); } }, Event::PaymentReceived { payment_id, payment_hash, amount_msat, .. } => { - println!( + info!( "PAYMENT_RECEIVED: with id {:?}, hash {}, amount_msat {}", payment_id, payment_hash, amount_msat ); @@ -233,7 +263,7 @@ fn main() { let payment = payment_to_proto(payment_details); upsert_payment_details(&event_node, Arc::clone(&paginated_store), &payment); } else { - eprintln!("Unable to find payment with paymentId: {}", payment_id.to_string()); + error!("Unable to find payment with paymentId: {}", payment_id.to_string()); } }, Event::PaymentForwarded { @@ -249,7 +279,7 @@ fn main() { outbound_amount_forwarded_msat } => { - println!("PAYMENT_FORWARDED: with outbound_amount_forwarded_msat {}, total_fee_earned_msat: {}, inbound channel: {}, outbound channel: {}", + info!("PAYMENT_FORWARDED: with outbound_amount_forwarded_msat {}, total_fee_earned_msat: {}, inbound channel: {}, outbound channel: {}", outbound_amount_forwarded_msat.unwrap_or(0), total_fee_earned_msat.unwrap_or(0), prev_channel_id, next_channel_id ); @@ -281,7 +311,7 @@ fn main() { }).await { Ok(_) => {}, Err(e) => { - println!("Failed to publish 'PaymentForwarded' event: {}", e); + error!("Failed to publish 'PaymentForwarded' event: {}", e); continue; } }; @@ -293,17 +323,17 @@ fn main() { ) { Ok(_) => { if let Err(e) = event_node.event_handled() { - eprintln!("Failed to mark event as handled: {e}"); + error!("Failed to mark event as handled: {e}"); } } Err(e) => { - println!("Failed to write forwarded payment to persistence: {}", e); + error!("Failed to write forwarded payment to persistence: {}", e); } } }, _ => { if let Err(e) = event_node.event_handled() { - eprintln!("Failed to mark event as handled: {e}"); + error!("Failed to mark event as handled: {e}"); } }, } @@ -315,19 +345,24 @@ fn main() { let node_service = NodeService::new(Arc::clone(&node), Arc::clone(&paginated_store)); runtime.spawn(async move { if let Err(err) = http1::Builder::new().serve_connection(io_stream, node_service).await { - eprintln!("Failed to serve connection: {}", err); + error!("Failed to serve connection: {}", err); } }); }, - Err(e) => eprintln!("Failed to accept connection: {}", e), + Err(e) => error!("Failed to accept connection: {}", e), } } _ = tokio::signal::ctrl_c() => { - println!("Received CTRL-C, shutting down.."); + info!("Received CTRL-C, shutting down.."); break; } + _ = sighup_stream.recv() => { + if let Err(e) = logger.reopen() { + error!("Failed to reopen log file on SIGHUP: {e}"); + } + } _ = sigterm_stream.recv() => { - println!("Received SIGTERM, shutting down.."); + info!("Received SIGTERM, shutting down.."); break; } } @@ -335,7 +370,7 @@ fn main() { }); node.stop().expect("Shutdown should always succeed."); - println!("Shutdown complete.."); + info!("Shutdown complete.."); } async fn publish_event_and_upsert_payment( @@ -351,14 +386,14 @@ async fn publish_event_and_upsert_payment( match event_publisher.publish(EventEnvelope { event: Some(event) }).await { Ok(_) => {}, Err(e) => { - println!("Failed to publish '{}' event, : {}", event_name, e); + error!("Failed to publish '{event_name}' event, : {e}"); return; }, }; upsert_payment_details(event_node, Arc::clone(&paginated_store), &payment); } else { - eprintln!("Unable to find payment with paymentId: {}", payment_id); + error!("Unable to find payment with paymentId: {payment_id}"); } } @@ -377,11 +412,11 @@ fn upsert_payment_details( ) { Ok(_) => { if let Err(e) = event_node.event_handled() { - eprintln!("Failed to mark event as handled: {e}"); + error!("Failed to mark event as handled: {e}"); } }, Err(e) => { - eprintln!("Failed to write payment to persistence: {}", e); + error!("Failed to write payment to persistence: {e}"); }, } } diff --git a/ldk-server/src/util/config.rs b/ldk-server/src/util/config.rs index 11e5bdf..b2d896c 100644 --- a/ldk-server/src/util/config.rs +++ b/ldk-server/src/util/config.rs @@ -2,6 +2,7 @@ use ldk_node::bitcoin::Network; use ldk_node::lightning::ln::msgs::SocketAddress; use ldk_node::lightning::routing::gossip::NodeAlias; use ldk_node::liquidity::LSPS2ServiceConfig; +use log::LevelFilter; use serde::{Deserialize, Serialize}; use std::net::SocketAddr; use std::path::Path; @@ -20,6 +21,8 @@ pub struct Config { pub rabbitmq_connection_string: String, pub rabbitmq_exchange_name: String, pub lsps2_service_config: Option, + pub log_level: LevelFilter, + pub log_file_path: Option, } #[derive(Debug)] @@ -86,6 +89,21 @@ impl TryFrom for Config { None }; + let log_level = toml_config + .log + .as_ref() + .and_then(|log_config| log_config.level.as_ref()) + .map(|level_str| { + LevelFilter::from_str(level_str).map_err(|e| { + io::Error::new( + io::ErrorKind::InvalidInput, + format!("Invalid log level configured: {}", e), + ) + }) + }) + .transpose()? + .unwrap_or(LevelFilter::Debug); + let (rabbitmq_connection_string, rabbitmq_exchange_name) = { let rabbitmq = toml_config.rabbitmq.unwrap_or(RabbitmqConfig { connection_string: String::new(), @@ -122,6 +140,8 @@ impl TryFrom for Config { rabbitmq_connection_string, rabbitmq_exchange_name, lsps2_service_config, + log_level, + log_file_path: toml_config.log.and_then(|l| l.file), }) } } @@ -135,6 +155,7 @@ pub struct TomlConfig { esplora: Option, rabbitmq: Option, liquidity: Option, + log: Option, } #[derive(Deserialize, Serialize)] @@ -167,6 +188,12 @@ struct EsploraConfig { server_url: String, } +#[derive(Deserialize, Serialize)] +struct LogConfig { + level: Option, + file: Option, +} + #[derive(Deserialize, Serialize)] struct RabbitmqConfig { connection_string: String, @@ -260,6 +287,10 @@ mod tests { [storage.disk] dir_path = "/tmp" + + [log] + level = "Trace" + file = "/var/log/ldk-server.log" [esplora] server_url = "https://mempool.space/api" @@ -310,6 +341,8 @@ mod tests { max_payment_size_msat: 25000000000, client_trusts_lsp: true, }), + log_level: LevelFilter::Trace, + log_file_path: Some("/var/log/ldk-server.log".to_string()), }; assert_eq!(config.listening_addr, expected.listening_addr); @@ -339,6 +372,10 @@ mod tests { [storage.disk] dir_path = "/tmp" + + [log] + level = "Trace" + file = "/var/log/ldk-server.log" [bitcoind] rpc_address = "127.0.0.1:8332" # RPC endpoint @@ -383,6 +420,10 @@ mod tests { [storage.disk] dir_path = "/tmp" + + [log] + level = "Trace" + file = "/var/log/ldk-server.log" [bitcoind] rpc_address = "127.0.0.1:8332" # RPC endpoint diff --git a/ldk-server/src/util/logger.rs b/ldk-server/src/util/logger.rs new file mode 100644 index 0000000..66a236c --- /dev/null +++ b/ldk-server/src/util/logger.rs @@ -0,0 +1,169 @@ +use log::{Level, LevelFilter, Log, Metadata, Record}; +use std::fs::{self, File, OpenOptions}; +use std::io::{self, BufWriter, Write}; +use std::path::{Path, PathBuf}; +use std::sync::{Arc, Mutex}; + +/// A logger implementation that writes logs to both stderr and a file. +/// +/// The logger formats log messages with RFC3339 timestamps and writes them to: +/// - stdout/stderr for console output +/// - A file specified during initialization +/// +/// All log messages follow the format: +/// `[TIMESTAMP LEVEL TARGET FILE:LINE] MESSAGE` +/// +/// Example: `[2025-12-04T10:30:45Z INFO ldk_server:42] Starting up...` +/// +/// The logger handles SIGHUP for log rotation by reopening the file handle when signaled. +pub struct ServerLogger { + /// The maximum log level to display + level: LevelFilter, + /// The file to write logs to, protected by a mutex for thread-safe access + file: Mutex>, + /// Path to the log file for reopening on SIGHUP + log_file_path: PathBuf, +} + +impl ServerLogger { + /// Initializes the global logger with the specified level and file path. + /// + /// Opens or creates the log file at the given path. If the file exists, logs are appended. + /// If the file doesn't exist, it will be created along with any necessary parent directories. + /// + /// This should be called once at application startup. Subsequent calls will fail. + /// + /// Returns an Arc to the logger for signal handling purposes. + pub fn init(level: LevelFilter, log_file_path: &Path) -> Result, io::Error> { + // Create parent directories if they don't exist + if let Some(parent) = log_file_path.parent() { + fs::create_dir_all(parent)?; + } + + let file = open_log_file(log_file_path)?; + + let logger = Arc::new(ServerLogger { + level, + file: Mutex::new(file), + log_file_path: log_file_path.to_path_buf(), + }); + + log::set_boxed_logger(Box::new(LoggerWrapper(Arc::clone(&logger)))) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; + log::set_max_level(level); + Ok(logger) + } + + /// Reopens the log file. Called on SIGHUP for log rotation. + pub fn reopen(&self) -> Result<(), io::Error> { + let new_file = open_log_file(&self.log_file_path)?; + match self.file.lock() { + Ok(mut file) => { + // Flush the old buffer before replacing with the new file + file.flush()?; + *file = new_file; + Ok(()) + }, + Err(e) => { + Err(io::Error::new(io::ErrorKind::Other, format!("Failed to acquire lock: {}", e))) + }, + } + } +} + +impl Log for ServerLogger { + fn enabled(&self, metadata: &Metadata) -> bool { + metadata.level() <= self.level + } + + fn log(&self, record: &Record) { + if self.enabled(record.metadata()) { + let level_str = format_level(record.level()); + let line = record.line().unwrap_or(0); + + // Log to console + let _ = match record.level() { + Level::Error => { + write!( + io::stderr(), + "[{} {} {}:{}] {}\n", + format_timestamp(), + level_str, + record.target(), + line, + record.args() + ) + }, + _ => { + write!( + io::stdout(), + "[{} {} {}:{}] {}\n", + format_timestamp(), + level_str, + record.target(), + line, + record.args() + ) + }, + }; + + // Log to file + if let Ok(mut file) = self.file.lock() { + let _ = write!( + file, + "[{} {} {}:{}] {}\n", + format_timestamp(), + level_str, + record.target(), + line, + record.args() + ); + } + } + } + + fn flush(&self) { + let _ = io::stdout().flush(); + let _ = io::stderr().flush(); + if let Ok(mut file) = self.file.lock() { + let _ = file.flush(); + } + } +} + +fn format_timestamp() -> String { + let now = chrono::Utc::now(); + now.to_rfc3339_opts(chrono::SecondsFormat::Millis, true) +} + +fn format_level(level: Level) -> &'static str { + match level { + Level::Error => "ERROR", + Level::Warn => "WARN ", + Level::Info => "INFO ", + Level::Debug => "DEBUG", + Level::Trace => "TRACE", + } +} + +fn open_log_file(log_file_path: &Path) -> Result, io::Error> { + let file = OpenOptions::new().create(true).append(true).open(log_file_path)?; + Ok(BufWriter::new(file)) +} + +/// Wrapper to allow Arc to implement Log trait +struct LoggerWrapper(Arc); + +impl Log for LoggerWrapper { + fn enabled(&self, metadata: &Metadata) -> bool { + self.0.enabled(metadata) + } + + fn log(&self, record: &Record) { + self.0.log(record) + } + + fn flush(&self) { + self.0.flush() + } +} diff --git a/ldk-server/src/util/mod.rs b/ldk-server/src/util/mod.rs index 02789f0..2bcbee6 100644 --- a/ldk-server/src/util/mod.rs +++ b/ldk-server/src/util/mod.rs @@ -1,2 +1,3 @@ pub(crate) mod config; +pub(crate) mod logger; pub(crate) mod proto_adapter;