diff --git a/src/accountsdb/db.zig b/src/accountsdb/db.zig index 1d03d7ed2..3b1ba838c 100644 --- a/src/accountsdb/db.zig +++ b/src/accountsdb/db.zig @@ -30,6 +30,8 @@ const AccountRef = sig.accounts_db.index.AccountRef; const DiskMemoryAllocator = sig.utils.allocators.DiskMemoryAllocator; const RwMux = sig.sync.RwMux; const Logger = sig.trace.log.Logger; +const StandardErrLogger = sig.trace.log.ChannelPrintLogger; +const Level = sig.trace.level.Level; const NestedHashTree = sig.common.merkle_tree.NestedHashTree; const GetMetricError = sig.prometheus.registry.GetMetricError; const Counter = sig.prometheus.counter.Counter; @@ -219,7 +221,7 @@ pub const AccountsDB = struct { var index_bin_dir = try snapshot_dir.makeOpenPath("index", .{}); errdefer index_bin_dir.close(); - logger.infof("using disk index in {s}", .{sig.utils.fmt.tryRealPath(index_bin_dir, ".")}); + logger.info().logf("using disk index in {s}", .{sig.utils.fmt.tryRealPath(index_bin_dir, ".")}); const ptr = try allocator.create(DiskMemoryAllocator); ptr.* = .{ @@ -229,7 +231,7 @@ pub const AccountsDB = struct { break :blk .{ ptr, ptr.allocator() }; } else { - logger.infof("using ram index", .{}); + logger.info().logf("using ram index", .{}); break :blk .{ null, allocator }; } }; @@ -322,7 +324,7 @@ pub const AccountsDB = struct { allocator, accounts_per_file_estimate, ); - self.logger.infof("loaded from snapshot in {s}", .{load_duration}); + self.logger.info().logf("loaded from snapshot in {s}", .{load_duration}); if (validate) { const full_snapshot = snapshot_fields_and_paths.full; @@ -332,7 +334,7 @@ pub const AccountsDB = struct { full_snapshot.bank_fields.capitalization, snapshot_fields.accounts_db_fields.bank_hash_info.accounts_hash, ); - self.logger.infof("validated from snapshot in {s}", .{validate_duration}); + self.logger.info().logf("validated from snapshot in {s}", .{validate_duration}); } return snapshot_fields; @@ -348,7 +350,7 @@ pub const AccountsDB = struct { per_thread_allocator: std.mem.Allocator, accounts_per_file_estimate: u64, ) !sig.time.Duration { - self.logger.infof("loading from snapshot...", .{}); + self.logger.info().log("loading from snapshot..."); // used to read account files const n_parse_threads = n_threads; @@ -361,7 +363,7 @@ pub const AccountsDB = struct { var timer = try sig.time.Timer.start(); const n_account_files = snapshot_manifest.file_map.count(); - self.logger.infof("found {d} account files", .{n_account_files}); + self.logger.info().logf("found {d} account files", .{n_account_files}); std.debug.assert(n_account_files > 0); @@ -436,7 +438,7 @@ pub const AccountsDB = struct { loading_threads.deinit(); } - self.logger.infof("[{d} threads]: reading and indexing accounts...", .{n_parse_threads}); + self.logger.info().logf("[{d} threads]: reading and indexing accounts...", .{n_parse_threads}); { var handles = std.ArrayList(std.Thread).init(self.allocator); defer { @@ -457,7 +459,6 @@ pub const AccountsDB = struct { n_parse_threads, ); } - self.logger.infof("total time: {s}", .{timer.read()}); // if geyser, send end of data signal if (self.geyser_writer) |geyser_writer| { @@ -465,10 +466,10 @@ pub const AccountsDB = struct { try geyser_writer.writePayloadToPipe(end_of_snapshot); } - self.logger.infof("[{d} threads]: combining thread accounts...", .{n_combine_threads}); + self.logger.info().logf("[{d} threads]: combining thread accounts...", .{n_combine_threads}); var merge_timer = try sig.time.Timer.start(); try self.mergeMultipleDBs(loading_threads.items, n_combine_threads); - self.logger.debugf("combining thread indexes took: {s}", .{merge_timer.read()}); + self.logger.debug().logf("combining thread indexes took: {s}", .{merge_timer.read()}); return timer.read(); } @@ -571,13 +572,13 @@ pub const AccountsDB = struct { const file_name_bounded = sig.utils.fmt.boundedFmt("{d}.{d}", .{ slot, file_info.id.toInt() }); const accounts_file_file = accounts_dir.openFile(file_name_bounded.constSlice(), .{ .mode = .read_write }) catch |err| { - self.logger.errf("Failed to open accounts/{s}: {s}", .{ file_name_bounded.constSlice(), @errorName(err) }); + self.logger.err().logf("Failed to open accounts/{s}: {s}", .{ file_name_bounded.constSlice(), @errorName(err) }); return err; }; errdefer accounts_file_file.close(); break :blk AccountFile.init(accounts_file_file, file_info, slot) catch |err| { - self.logger.errf("failed to *open* AccountsFile {s}: {s}\n", .{ file_name_bounded.constSlice(), @errorName(err) }); + self.logger.err().logf("failed to *open* AccountsFile {s}: {s}\n", .{ file_name_bounded.constSlice(), @errorName(err) }); return err; }; }; @@ -590,7 +591,7 @@ pub const AccountsDB = struct { &references, geyser_slot_storage, ) catch |err| { - self.logger.errf("failed to *validate/index* AccountsFile: {d}.{d}: {s}\n", .{ + self.logger.err().logf("failed to *validate/index* AccountsFile: {d}.{d}: {s}\n", .{ accounts_file.slot, accounts_file.id.toInt(), @errorName(err), @@ -670,7 +671,7 @@ pub const AccountsDB = struct { // NOTE: this is good for debugging what to set `accounts_per_file_est` to if (print_progress) { - self.logger.infof("accounts_per_file: actual vs estimated: {d} vs {d}", .{ + self.logger.info().logf("accounts_per_file: actual vs estimated: {d} vs {d}", .{ total_accounts / n_account_files, accounts_per_file_est, }); @@ -886,7 +887,7 @@ pub const AccountsDB = struct { @memset(lamports, 0); // split processing the bins over muliple threads - self.logger.infof("collecting hashes from accounts...", .{}); + self.logger.info().logf("collecting hashes from accounts...", .{}); { var handles = std.ArrayList(std.Thread).init(self.allocator); @@ -909,13 +910,13 @@ pub const AccountsDB = struct { ); } - self.logger.debugf("took: {s}", .{std.fmt.fmtDuration(timer.read())}); + self.logger.debug().logf("took: {s}", .{std.fmt.fmtDuration(timer.read())}); timer.reset(); - self.logger.infof("computing the merkle root over accounts...", .{}); + self.logger.info().logf("computing the merkle root over accounts...", .{}); var hash_tree = NestedHashTree{ .hashes = hashes }; const accounts_hash = try hash_tree.computeMerkleRoot(MERKLE_FANOUT); - self.logger.debugf("took {s}", .{std.fmt.fmtDuration(timer.read())}); + self.logger.debug().logf("took {s}", .{std.fmt.fmtDuration(timer.read())}); timer.reset(); var total_lamports: u64 = 0; @@ -943,7 +944,7 @@ pub const AccountsDB = struct { var timer = try sig.time.Timer.start(); // validate the full snapshot - self.logger.infof("validating the full snapshot", .{}); + self.logger.info().logf("validating the full snapshot", .{}); const accounts_hash, const total_lamports = try self.computeAccountHashesAndLamports(.{ .FullAccountHash = .{ .max_slot = full_snapshot_slot, @@ -951,14 +952,14 @@ pub const AccountsDB = struct { }); if (expected_accounts_hash.order(&accounts_hash) != .eq) { - self.logger.errf( + self.logger.err().logf( \\ incorrect accounts hash \\ expected vs calculated: {d} vs {d} , .{ expected_accounts_hash, accounts_hash }); return error.IncorrectAccountsHash; } if (expected_full_lamports != total_lamports) { - self.logger.errf( + self.logger.err().logf( \\ incorrect total lamports \\ expected vs calculated: {d} vs {d} , .{ expected_full_lamports, total_lamports }); @@ -970,7 +971,7 @@ pub const AccountsDB = struct { return timer.read(); } - self.logger.infof("validating the incremental snapshot", .{}); + self.logger.info().logf("validating the incremental snapshot", .{}); const expected_accounts_delta_hash = incremental_snapshot_persistence.?.incremental_hash; const expected_incremental_lamports = incremental_snapshot_persistence.?.incremental_capitalization; @@ -981,7 +982,7 @@ pub const AccountsDB = struct { }); if (expected_incremental_lamports != incremental_lamports) { - self.logger.errf( + self.logger.err().logf( \\ incorrect incremental lamports \\ expected vs calculated: {d} vs {d} , .{ expected_incremental_lamports, incremental_lamports }); @@ -989,7 +990,7 @@ pub const AccountsDB = struct { } if (expected_accounts_delta_hash.order(&accounts_delta_hash) != .eq) { - self.logger.errf( + self.logger.err().logf( \\ incorrect accounts delta hash \\ expected vs calculated: {d} vs {d} , .{ expected_accounts_delta_hash, accounts_delta_hash }); @@ -1257,7 +1258,7 @@ pub const AccountsDB = struct { }; if (must_flush_slots) { - self.logger.debugf("flushing slots: min: {}...{}", std.mem.minMax(Slot, flush_slots.items)); + self.logger.debug().logf("flushing slots: min: {}...{}", std.mem.minMax(Slot, flush_slots.items)); // flush the slots try unclean_account_files.ensureTotalCapacityPrecise(flush_slots.items.len); @@ -1266,7 +1267,7 @@ pub const AccountsDB = struct { for (flush_slots.items) |flush_slot| { const unclean_file_id = self.flushSlot(flush_slot) catch |err| { // flush fail = loss of account data on slot -- should never happen - self.logger.errf("flushing slot {d} error: {s}", .{ flush_slot, @errorName(err) }); + self.logger.err().logf("flushing slot {d} error: {s}", .{ flush_slot, @errorName(err) }); continue; }; unclean_account_files.appendAssumeCapacity(unclean_file_id); @@ -1283,7 +1284,7 @@ pub const AccountsDB = struct { break :blk if (latest_full_snapshot_info.*) |info| info.slot else 0; }; if (largest_flushed_slot - latest_full_snapshot_slot >= slots_per_full_snapshot) { - self.logger.infof("accountsdb[manager]: generating full snapshot for slot {d}", .{largest_flushed_slot}); + self.logger.info().logf("accountsdb[manager]: generating full snapshot for slot {d}", .{largest_flushed_slot}); var snapshot_gen_pkg, const snapshot_gen_info = try self.makeFullSnapshotGenerationPackage( largest_flushed_slot, @@ -1325,7 +1326,7 @@ pub const AccountsDB = struct { if (largest_flushed_slot < latest_full_snapshot_info.*.?.slot + slots_per_incremental_snapshot) break :inc_blk; } - self.logger.infof("accountsdb[manager]: generating incremental snapshot from {d} to {d}", .{ + self.logger.info().logf("accountsdb[manager]: generating incremental snapshot from {d} to {d}", .{ latest_full_snapshot_slot, largest_flushed_slot, }); @@ -1365,7 +1366,7 @@ pub const AccountsDB = struct { &delete_account_files, ); _ = clean_result; - // self.logger.debugf("clean_result: {any}", .{clean_result}); + // self.logger.debug().logf("clean_result: {any}", .{clean_result}); // shrink any account files which have been cleaned const shrink_result = try self.shrinkAccountFiles( @@ -1373,7 +1374,7 @@ pub const AccountsDB = struct { &delete_account_files, ); _ = shrink_result; - // self.logger.debugf("shrink_results: {any}", .{shrink_results}); + // self.logger.debug().logf("shrink_results: {any}", .{shrink_results}); // delete any empty account files if (delete_account_files.count() > DELETE_ACCOUNT_FILES_MIN) { @@ -1422,14 +1423,14 @@ pub const AccountsDB = struct { const full_path = try archive_dir.realpathAlloc(self.allocator, archive_file_name); defer self.allocator.free(full_path); - self.logger.infof("writing full snapshot to {s}", .{full_path}); + self.logger.info().logf("writing full snapshot to {s}", .{full_path}); // write the snapshot to disk, compressed var timer = try sig.time.Timer.start(); const zstd_write_ctx = zstd.writerCtx(archive_file.writer(), &zstd_compressor, zstd_buffer); try snapshot_gen_pkg.write(zstd_write_ctx.writer(), status_cache); try zstd_write_ctx.finish(); - self.logger.infof("writing full snapshot took {any}", .{timer.read()}); + self.logger.info().logf("writing full snapshot took {any}", .{timer.read()}); // track new snapshot try self.commitFullSnapshotInfo(snapshot_gen_info, .ignore_old); @@ -1510,7 +1511,7 @@ pub const AccountsDB = struct { std.debug.assert(did_update); } - self.logger.debugf("flushed {} accounts, totalling size {}", .{ account_file.number_of_accounts, size }); + self.logger.debug().logf("flushed {} accounts, totalling size {}", .{ account_file.number_of_accounts, size }); // remove old references { @@ -1679,7 +1680,7 @@ pub const AccountsDB = struct { } references_to_delete.clearRetainingCapacity(); self.stats.clean_references_deleted.set(references_to_delete.items.len); - self.logger.debugf( + self.logger.debug().logf( "cleaned slot {} - old_state: {}, zero_lamports: {}", .{ account_file.slot, num_old_states, num_zero_lamports }, ); @@ -1739,13 +1740,13 @@ pub const AccountsDB = struct { for (delete_queue.items) |account_file| { const slot = account_file.slot; - self.logger.infof("deleting slot: {}...", .{slot}); + self.logger.info().logf("deleting slot: {}...", .{slot}); account_file.deinit(); // delete file from disk self.deleteAccountFile(slot, account_file.id) catch |err| { // NOTE: this should always succeed or something is wrong - self.logger.errf( + self.logger.err().logf( "failed to delete account file slot.file_id: {d}.{d}: {s}", .{ slot, account_file.id, @errorName(err) }, ); @@ -1774,7 +1775,7 @@ pub const AccountsDB = struct { const file_path_bounded = sig.utils.fmt.boundedFmt("accounts/{d}.{d}", .{ slot, file_id.toInt() }); self.snapshot_dir.deleteFile(file_path_bounded.constSlice()) catch |err| switch (err) { error.FileNotFound => { - self.logger.warnf("trying to delete accounts file which does not exist: {s}", .{sig.utils.fmt.tryRealPath(self.snapshot_dir, file_path_bounded.constSlice())}); + self.logger.warn().logf("trying to delete accounts file which does not exist: {s}", .{sig.utils.fmt.tryRealPath(self.snapshot_dir, file_path_bounded.constSlice())}); return error.InvalidAccountFile; }, else => |e| return e, @@ -1811,7 +1812,7 @@ pub const AccountsDB = struct { }; const slot = shrink_account_file.slot; - self.logger.debugf("shrinking slot: {}...", .{slot}); + self.logger.debug().logf("shrinking slot: {}...", .{slot}); // compute size of alive accounts (read) var is_alive_flags = try std.ArrayList(bool).initCapacity( @@ -1857,9 +1858,9 @@ pub const AccountsDB = struct { self.stats.shrink_dead_accounts.observe(@floatFromInt(accounts_dead_count)); self.stats.shrink_file_shrunk_by.observe(@floatFromInt(accounts_dead_size)); - self.logger.debugf("n alive accounts: {}", .{accounts_alive_count}); - self.logger.debugf("n dead accounts: {}", .{accounts_dead_count}); - self.logger.debugf("shrunk by: {}", .{accounts_dead_size}); + self.logger.debug().logf("n alive accounts: {}", .{accounts_alive_count}); + self.logger.debug().logf("n dead accounts: {}", .{accounts_dead_count}); + self.logger.debug().logf("shrunk by: {}", .{accounts_dead_size}); // alloc account file for accounts const new_file, const new_file_id, const new_memory = try self.createAccountFile( @@ -2279,7 +2280,7 @@ pub const AccountsDB = struct { const was_inserted = self.account_index.indexRefIfNotDuplicateSlotAssumeCapacity(ref); if (!was_inserted) { accounts_dead_count += 1; - self.logger.warnf( + self.logger.warn().logf( "account was not referenced because its slot was a duplicate: {any}", .{.{ .slot = ref.slot, @@ -2381,7 +2382,7 @@ pub const AccountsDB = struct { const was_inserted = self.account_index.indexRefIfNotDuplicateSlotAssumeCapacity(ref_ptr); if (!was_inserted) { - self.logger.warnf( + self.logger.warn().logf( "duplicate reference not inserted: slot: {d} pubkey: {s}", .{ ref_ptr.slot, ref_ptr.pubkey }, ); @@ -2528,7 +2529,7 @@ pub const AccountsDB = struct { if (bank_hash_stats_map.get(account_file.slot)) |other_stats| { bank_hash_stats.accumulate(other_stats); } else { - self.logger.warnf("No bank hash stats for slot {}.", .{account_file.slot}); + self.logger.warn().logf("No bank hash stats for slot {}.", .{account_file.slot}); } serializable_file_map.putAssumeCapacityNoClobber(account_file.slot, .{ @@ -2604,7 +2605,7 @@ pub const AccountsDB = struct { }); const old_name = old_name_bounded.constSlice(); - self.logger.infof("deleting old full snapshot archive: {s}", .{old_name}); + self.logger.info().logf("deleting old full snapshot archive: {s}", .{old_name}); try self.snapshot_dir.deleteFile(old_name); }, } @@ -2720,7 +2721,7 @@ pub const AccountsDB = struct { if (bank_hash_stats_map.get(account_file.slot)) |other_stats| { bank_hash_stats.accumulate(other_stats); } else { - self.logger.warnf("No bank hash stats for slot {}.", .{account_file.slot}); + self.logger.warn().logf("No bank hash stats for slot {}.", .{account_file.slot}); } serializable_file_map.putAssumeCapacityNoClobber(account_file.slot, .{ @@ -2805,7 +2806,7 @@ pub const AccountsDB = struct { }); const old_name = old_name_bounded.constSlice(); - self.logger.infof("deleting old incremental snapshot archive: {s}", .{old_name}); + self.logger.info().logf("deleting old incremental snapshot archive: {s}", .{old_name}); try self.snapshot_dir.deleteFile(old_name); }, } @@ -3295,8 +3296,7 @@ fn loadTestAccountsDB(allocator: std.mem.Allocator, use_disk: bool, n_threads: u const snapshot_files = try SnapshotFiles.find(allocator, dir); - const logger = Logger{ .noop = {} }; - // var logger = Logger.init(std.heap.page_allocator, .debug); + const logger = .noop; var snapshots = try AllSnapshotFields.fromFiles(allocator, logger, dir, snapshot_files); errdefer snapshots.deinit(allocator); @@ -3323,8 +3323,7 @@ test "geyser stream on load" { const snapshot_files = try SnapshotFiles.find(allocator, dir); - const logger = Logger{ .noop = {} }; - // var logger = Logger.init(std.heap.page_allocator, .debug); + const logger = .noop; var snapshots = try AllSnapshotFields.fromFiles(allocator, logger, dir, snapshot_files); errdefer snapshots.deinit(allocator); @@ -3525,7 +3524,7 @@ test "load other sysvars" { test "flushing slots works" { const allocator = std.testing.allocator; - const logger = Logger{ .noop = {} }; + const logger = .noop; var snapshot_dir = try std.fs.cwd().makeOpenPath(sig.TEST_DATA_DIR, .{}); defer snapshot_dir.close(); var accounts_db = try AccountsDB.init(allocator, logger, snapshot_dir, .{ @@ -3576,7 +3575,7 @@ test "flushing slots works" { test "purge accounts in cache works" { const allocator = std.testing.allocator; - const logger = Logger{ .noop = {} }; + const logger = .noop; var snapshot_dir = try std.fs.cwd().makeOpenPath(sig.TEST_DATA_DIR, .{}); defer snapshot_dir.close(); var accounts_db = try AccountsDB.init(allocator, logger, snapshot_dir, .{ @@ -3633,7 +3632,7 @@ test "purge accounts in cache works" { test "clean to shrink account file works with zero-lamports" { const allocator = std.testing.allocator; - const logger = Logger{ .noop = {} }; + const logger = .noop; var snapshot_dir = try std.fs.cwd().makeOpenPath(sig.TEST_DATA_DIR, .{}); defer snapshot_dir.close(); var accounts_db = try AccountsDB.init(allocator, logger, snapshot_dir, .{ @@ -3709,7 +3708,7 @@ test "clean to shrink account file works with zero-lamports" { test "clean to shrink account file works" { const allocator = std.testing.allocator; - const logger = Logger{ .noop = {} }; + const logger = .noop; var snapshot_dir = try std.fs.cwd().makeOpenPath(sig.TEST_DATA_DIR, .{}); defer snapshot_dir.close(); var accounts_db = try AccountsDB.init(allocator, logger, snapshot_dir, .{ @@ -3777,7 +3776,7 @@ test "clean to shrink account file works" { test "full clean account file works" { const allocator = std.testing.allocator; - const logger = Logger{ .noop = {} }; + const logger = .noop; var snapshot_dir = try std.fs.cwd().makeOpenPath(sig.TEST_DATA_DIR, .{}); defer snapshot_dir.close(); var accounts_db = try AccountsDB.init(allocator, logger, snapshot_dir, .{ @@ -3862,7 +3861,7 @@ test "full clean account file works" { test "shrink account file works" { const allocator = std.testing.allocator; - const logger = Logger{ .noop = {} }; + const logger = .noop; var snapshot_dir = try std.fs.cwd().makeOpenPath(sig.TEST_DATA_DIR, .{}); defer snapshot_dir.close(); var accounts_db = try AccountsDB.init(allocator, logger, snapshot_dir, .{ @@ -4030,10 +4029,14 @@ pub const BenchmarkAccountsDBSnapshotLoad = struct { pub fn loadSnapshot(bench_args: BenchArgs) !sig.time.Duration { const allocator = std.heap.c_allocator; - // const logger = Logger{ .noop = {} }; - const logger = Logger.init(allocator, .debug); - defer logger.deinit(); - logger.spawn(); + var std_logger = StandardErrLogger.init(.{ + .allocator = allocator, + .max_level = Level.debug, + .max_buffer = 1 << 30, + }) catch @panic("Logger init failed"); + defer std_logger.deinit(); + + const logger = std_logger.logger(); // unpack the snapshot // NOTE: usually this will be an incremental snapshot @@ -4252,7 +4255,7 @@ pub const BenchmarkAccountsDB = struct { var snapshot_dir = try std.fs.cwd().makeOpenPath(sig.VALIDATOR_DIR ++ "accounts_db", .{}); defer snapshot_dir.close(); - const logger = Logger{ .noop = {} }; + const logger = .noop; var accounts_db: AccountsDB = try AccountsDB.init(allocator, logger, snapshot_dir, .{ .number_of_index_bins = ACCOUNT_INDEX_BINS, .use_disk_index = bench_args.index == .disk, diff --git a/src/accountsdb/download.zig b/src/accountsdb/download.zig index 9fb059251..b1ff8ef66 100644 --- a/src/accountsdb/download.zig +++ b/src/accountsdb/download.zig @@ -164,7 +164,9 @@ pub fn downloadSnapshotsFromGossip( output_dir: std.fs.Dir, min_mb_per_sec: usize, ) !void { - logger.infof("starting snapshot download with min download speed: {d} MB/s", .{min_mb_per_sec}); + logger + .info() + .logf("starting snapshot download with min download speed: {d} MB/s", .{min_mb_per_sec}); // TODO: maybe make this bigger? or dynamic? var contact_info_buf: [1_000]ThreadSafeContactInfo = undefined; @@ -208,7 +210,9 @@ pub fn downloadSnapshotsFromGossip( i += r.len; } } - logger.infof("searched for snapshot peers: {s}", .{write_buf[0..i]}); + logger + .info() + .logf("searched for snapshot peers: {s}", .{write_buf[0..i]}); } for (available_snapshot_peers.items) |peer| { @@ -230,7 +234,10 @@ pub fn downloadSnapshotsFromGossip( }); defer allocator.free(snapshot_url); - logger.infof("downloading full_snapshot from: {s}", .{snapshot_url}); + logger + .info() + .logf("downloading full_snapshot from: {s}", .{snapshot_url}); + downloadFile( allocator, logger, @@ -244,11 +251,11 @@ pub fn downloadSnapshotsFromGossip( // downloadFile function error.Unexpected => {}, error.TooSlow => { - logger.infof("peer is too slow, skipping", .{}); + logger.info().logf("peer is too slow, skipping", .{}); try slow_peer_pubkeys.append(peer.contact_info.pubkey); }, else => { - logger.infof("failed to download full_snapshot: {s}", .{@errorName(err)}); + logger.info().logf("failed to download full_snapshot: {s}", .{@errorName(err)}); }, } continue; @@ -271,7 +278,7 @@ pub fn downloadSnapshotsFromGossip( }); defer allocator.free(inc_snapshot_url); - logger.infof("downloading inc_snapshot from: {s}", .{inc_snapshot_url}); + logger.info().logf("downloading inc_snapshot from: {s}", .{inc_snapshot_url}); _ = downloadFile( allocator, logger, @@ -282,13 +289,13 @@ pub fn downloadSnapshotsFromGossip( null, ) catch |err| { // failure here is ok (for now?) - logger.warnf("failed to download inc_snapshot: {s}", .{@errorName(err)}); + logger.warn().logf("failed to download inc_snapshot: {s}", .{@errorName(err)}); return; }; } // success - logger.infof("snapshot downloaded finished", .{}); + logger.info().logf("snapshot downloaded finished", .{}); return; } } @@ -359,7 +366,7 @@ const DownloadProgress = struct { // each MB const mb_read = self.bytes_read / 1024 / 1024; if (mb_read == 0) { - self.logger.infof("download speed is too slow (<1MB/s) -- disconnecting", .{}); + self.logger.info().logf("download speed is too slow (<1MB/s) -- disconnecting", .{}); return 0; } @@ -379,14 +386,14 @@ const DownloadProgress = struct { self.has_checked_speed = true; if (mb_per_second < self.min_mb_per_second.?) { // not fast enough => abort - self.logger.infof("[download progress]: speed is too slow ({d} MB/s) -- disconnecting", .{mb_per_second}); + self.logger.info().logf("[download progress]: speed is too slow ({d} MB/s) -- disconnecting", .{mb_per_second}); return 0; } else { - self.logger.infof("[download progress]: speed is ok ({d} MB/s) -- maintaining", .{mb_per_second}); + self.logger.info().logf("[download progress]: speed is ok ({d} MB/s) -- maintaining", .{mb_per_second}); } } - self.logger.infof("[download progress]: {d}% done ({d} MB/s - {d}/{d}) (time left: {d})", .{ + self.logger.info().logf("[download progress]: {d}% done ({d} MB/s - {d}/{d}) (time left: {d})", .{ self.file_memory_index * 100 / self.download_size, mb_per_second, self.file_memory_index, @@ -437,7 +444,7 @@ pub fn downloadFile( if (try head_resp.getHeader("content-length")) |content_length| { download_size = try std.fmt.parseInt(usize, content_length.get(), 10); } else { - logger.debugf("header request didnt have content-length...", .{}); + logger.debug().logf("header request didnt have content-length...", .{}); return error.NoContentLength; } diff --git a/src/accountsdb/fuzz.zig b/src/accountsdb/fuzz.zig index a1f8b51ee..e2b8917f6 100644 --- a/src/accountsdb/fuzz.zig +++ b/src/accountsdb/fuzz.zig @@ -4,6 +4,8 @@ const zstd = @import("zstd"); const AccountsDB = sig.accounts_db.AccountsDB; const Logger = sig.trace.Logger; +const StandardErrLogger = sig.trace.ChannelPrintLogger; +const Level = sig.trace.Level; const Account = sig.core.Account; const Slot = sig.core.time.Slot; const Pubkey = sig.core.pubkey.Pubkey; @@ -55,9 +57,14 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { defer _ = gpa_state.deinit(); const allocator = gpa_state.allocator(); - const logger = Logger.init(allocator, .debug); - defer logger.deinit(); - logger.spawn(); + var std_logger = StandardErrLogger.init(.{ + .allocator = allocator, + .max_level = Level.debug, + .max_buffer = 1 << 30, + }) catch @panic("Logger init failed"); + defer std_logger.deinit(); + + const logger = std_logger.logger(); const use_disk = rand.boolean(); @@ -255,7 +262,7 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { true, ); - logger.infof("fuzz[validate]: unpacked full snapshot at slot: {}", .{snapshot_info.slot}); + logger.info().logf("fuzz[validate]: unpacked full snapshot at slot: {}", .{snapshot_info.slot}); var snapshot_files: sig.accounts_db.SnapshotFiles = .{ .full_snapshot = .{ .hash = snapshot_info.hash, @@ -296,7 +303,6 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { const inc_archive_file = try alternative_snapshot_dir.openFile(inc_archive_name.slice(), .{}); defer inc_archive_file.close(); - try sig.accounts_db.snapshots.parallelUnpackZstdTarBall( allocator, .noop, @@ -305,7 +311,7 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { 5, true, ); - logger.infof("fuzz[validate]: unpacked inc snapshot at slot: {}", .{inc_snapshot_info.slot}); + logger.info().logf("fuzz[validate]: unpacked inc snapshot at slot: {}", .{inc_snapshot_info.slot}); snapshot_files.incremental_snapshot = .{ .base_slot = inc_snapshot_info.base_slot, @@ -328,7 +334,7 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { _ = try alt_accounts_db.loadWithDefaults(allocator, &snapshot_fields, 1, true, 1_500); const maybe_inc_slot = if (snapshot_files.incremental_snapshot) |inc| inc.slot else null; - logger.infof("loaded and validated snapshot at slot: {} (and inc snapshot @ slot {any})", .{ snapshot_info.slot, maybe_inc_slot }); + logger.info().logf("loaded and validated snapshot at slot: {} (and inc snapshot @ slot {any})", .{ snapshot_info.slot, maybe_inc_slot }); } } diff --git a/src/accountsdb/snapshots.zig b/src/accountsdb/snapshots.zig index bd4201d47..f805cc4e6 100644 --- a/src/accountsdb/snapshots.zig +++ b/src/accountsdb/snapshots.zig @@ -1745,7 +1745,7 @@ pub const AllSnapshotFields = struct { const rel_path_bounded = sig.utils.fmt.boundedFmt("snapshots/{0}/{0}", .{files.full_snapshot.slot}); const rel_path = rel_path_bounded.constSlice(); - logger.infof("reading snapshot fields from: {s}", .{sig.utils.fmt.tryRealPath(snapshot_dir, rel_path)}); + logger.info().logf("reading snapshot fields from: {s}", .{sig.utils.fmt.tryRealPath(snapshot_dir, rel_path)}); const full_file = try snapshot_dir.openFile(rel_path, .{}); defer full_file.close(); @@ -1759,7 +1759,7 @@ pub const AllSnapshotFields = struct { const rel_path_bounded = sig.utils.fmt.boundedFmt("snapshots/{0}/{0}", .{incremental_snapshot_path.slot}); const rel_path = rel_path_bounded.constSlice(); - logger.infof("reading inc snapshot fields from: {s}", .{sig.utils.fmt.tryRealPath(snapshot_dir, rel_path)}); + logger.info().logf("reading inc snapshot fields from: {s}", .{sig.utils.fmt.tryRealPath(snapshot_dir, rel_path)}); const incremental_file = try snapshot_dir.openFile(rel_path, .{}); defer incremental_file.close(); @@ -1769,7 +1769,7 @@ pub const AllSnapshotFields = struct { break :blk incremental_fields; } else { - logger.info("no incremental snapshot fields found"); + logger.info().log("no incremental snapshot fields found"); break :blk null; } }; diff --git a/src/benchmarks.zig b/src/benchmarks.zig index b04521a55..bbc33d81a 100644 --- a/src/benchmarks.zig +++ b/src/benchmarks.zig @@ -13,7 +13,6 @@ const meta = std.meta; /// zig build benchmark -- gossip pub fn main() !void { const allocator = std.heap.c_allocator; - sig.trace.log.default_logger.* = sig.trace.Logger.init(allocator, .debug); if (builtin.mode == .Debug) std.debug.print("warning: running benchmark in Debug mode\n", .{}); diff --git a/src/cmd/cmd.zig b/src/cmd/cmd.zig index b2e02cc51..5e89a1627 100644 --- a/src/cmd/cmd.zig +++ b/src/cmd/cmd.zig @@ -21,6 +21,7 @@ const GossipService = sig.gossip.GossipService; const IpAddr = sig.net.IpAddr; const Logger = sig.trace.Logger; const Network = config.Network; +const ChannelPrintLogger = sig.trace.ChannelPrintLogger; const Pubkey = sig.core.Pubkey; const ShredCollectorDependencies = sig.shred_collector.ShredCollectorDependencies; const LeaderSchedule = sig.core.leader_schedule.LeaderSchedule; @@ -595,9 +596,14 @@ pub fn run() !void { /// entrypoint to print (and create if NONE) pubkey in ~/.sig/identity.key fn identity() !void { - var logger = Logger.init(gpa_allocator, config.current.log_level); - defer logger.deinit(); - logger.spawn(); + var std_logger = ChannelPrintLogger.init(.{ + .allocator = gpa_allocator, + .max_level = config.current.log_level, + .max_buffer = 1 << 30, + }) catch @panic("Logger init failed"); + defer std_logger.deinit(); + + const logger = std_logger.logger(); const keypair = try getOrInitIdentity(gpa_allocator, logger); var pubkey: [50]u8 = undefined; @@ -844,7 +850,7 @@ const GeyserWriter = sig.geyser.GeyserWriter; fn buildGeyserWriter(allocator: std.mem.Allocator, logger: Logger) !?*GeyserWriter { var geyser_writer: ?*GeyserWriter = null; if (config.current.geyser.enable) { - logger.infof("starting GeyserWriter ({s})...", .{config.current.geyser.pipe_path}); + logger.info().log("Starting GeyserWriter..."); const exit = try allocator.create(Atomic(bool)); exit.* = Atomic(bool).init(false); @@ -860,7 +866,7 @@ fn buildGeyserWriter(allocator: std.mem.Allocator, logger: Logger) !?*GeyserWrit // start the geyser writer try geyser_writer.?.spawnIOLoop(); } else { - logger.info("GeyserWriter is disabled."); + logger.info().log("GeyserWriter is disabled."); } return geyser_writer; @@ -924,13 +930,13 @@ fn createSnapshot() !void { defer bin_lg.unlock(); n_accounts_indexed += bin.count(); } - app_base.logger.infof("accountsdb: indexed {d} accounts", .{n_accounts_indexed}); + app_base.logger.info().logf("accountsdb: indexed {d} accounts", .{n_accounts_indexed}); const output_dir_name = "alt_" ++ sig.VALIDATOR_DIR; // TODO: pull out to cli arg var output_dir = try std.fs.cwd().makeOpenPath(output_dir_name, .{}); defer output_dir.close(); - app_base.logger.infof("accountsdb[manager]: generating full snapshot for slot {d}", .{slot}); + app_base.logger.info().logf("accountsdb[manager]: generating full snapshot for slot {d}", .{slot}); try accounts_db.buildFullSnapshot( slot, output_dir, @@ -979,7 +985,7 @@ fn printLeaderSchedule() !void { } const start_slot, const leader_schedule = try getLeaderScheduleFromCli(allocator) orelse b: { - app_base.logger.info("Downloading a snapshot to calculate the leader schedule."); + app_base.logger.info().log("Downloading a snapshot to calculate the leader schedule."); const loaded_snapshot = loadSnapshot( allocator, app_base.logger, @@ -988,7 +994,7 @@ fn printLeaderSchedule() !void { null, ) catch |err| { if (err == error.SnapshotsNotFoundAndNoGossipService) { - app_base.logger.err( + app_base.logger.err().log( \\\ No snapshot found and no gossip service to download a snapshot from. \\\ Download using the `snapshot-download` command. ); @@ -1098,12 +1104,11 @@ const AppBase = struct { my_ip: IpAddr, fn init(allocator: Allocator) !AppBase { - var logger = try spawnLogger(); - // var logger: Logger = .noop; + const logger = try spawnLogger(); errdefer logger.deinit(); const metrics_registry = globalRegistry(); - logger.infof("metrics port: {d}", .{config.current.metrics_port}); + logger.info().logf("metrics port: {d}", .{config.current.metrics_port}); const metrics_thread = try spawnMetrics(gpa_allocator, config.current.metrics_port); errdefer metrics_thread.detach(); @@ -1154,8 +1159,8 @@ fn initGossip( sockets: []const struct { tag: SocketTag, port: u16 }, ) !GossipService { const gossip_port: u16 = config.current.gossip.port; - logger.infof("gossip host: {any}", .{gossip_host_ip}); - logger.infof("gossip port: {d}", .{gossip_port}); + logger.info().logf("gossip host: {any}", .{gossip_host_ip}); + logger.info().logf("gossip port: {d}", .{gossip_port}); // setup contact info const my_pubkey = Pubkey.fromPublicKey(&my_keypair.public_key); @@ -1182,8 +1187,8 @@ fn startGossip( extra_sockets: []const struct { tag: SocketTag, port: u16 }, ) !struct { *GossipService, sig.utils.service_manager.ServiceManager } { const gossip_port = config.current.gossip.port; - app_base.logger.infof("gossip host: {any}", .{app_base.my_ip}); - app_base.logger.infof("gossip port: {d}", .{gossip_port}); + app_base.logger.info().logf("gossip host: {any}", .{app_base.my_ip}); + app_base.logger.info().logf("gossip port: {d}", .{gossip_port}); // setup contact info const my_pubkey = Pubkey.fromPublicKey(&app_base.my_keypair.public_key); @@ -1241,7 +1246,7 @@ fn getMyDataFromIpEcho( if (my_ip_from_entrypoint == null) my_ip_from_entrypoint = response.address; if (response.shred_version) |shred_version| { var addr_str = entrypoint.toString(); - logger.infof( + logger.info().logf( "shred version: {} - from entrypoint ip echo: {s}", .{ shred_version.value, addr_str[0][0..addr_str[1]] }, ); @@ -1249,11 +1254,11 @@ fn getMyDataFromIpEcho( } } else |_| {} } else { - logger.warn("could not get a shred version from an entrypoint"); + logger.warn().log("could not get a shred version from an entrypoint"); break :loop 0; }; const my_ip = try (config.current.gossip.getHost() orelse (my_ip_from_entrypoint orelse IpAddr.newIpv4(127, 0, 0, 1))); - logger.infof("my ip: {}", .{my_ip}); + logger.info().logf("my ip: {}", .{my_ip}); return .{ .shred_version = my_shred_version, .ip = my_ip, @@ -1262,17 +1267,20 @@ fn getMyDataFromIpEcho( fn resolveSocketAddr(entrypoint: []const u8, logger: Logger) !SocketAddr { const domain_port_sep = std.mem.indexOfScalar(u8, entrypoint, ':') orelse { - logger.field("entrypoint", entrypoint).err("entrypoint port missing"); + logger.err() + .field("entrypoint", entrypoint) + .log("entrypoint port missing"); + return error.EntrypointPortMissing; }; const domain_str = entrypoint[0..domain_port_sep]; if (domain_str.len == 0) { - logger.errf("'{s}': entrypoint domain not valid", .{entrypoint}); + logger.err().logf("'{s}': entrypoint domain not valid", .{entrypoint}); return error.EntrypointDomainNotValid; } // parse port from string const port = std.fmt.parseInt(u16, entrypoint[domain_port_sep + 1 ..], 10) catch { - logger.errf("'{s}': entrypoint port not valid", .{entrypoint}); + logger.err().logf("'{s}': entrypoint port not valid", .{entrypoint}); return error.EntrypointPortNotValid; }; @@ -1281,7 +1289,7 @@ fn resolveSocketAddr(entrypoint: []const u8, logger: Logger) !SocketAddr { defer addr_list.deinit(); if (addr_list.addrs.len == 0) { - logger.errf("'{s}': entrypoint resolve dns failed (no records found)", .{entrypoint}); + logger.err().logf("'{s}': entrypoint resolve dns failed (no records found)", .{entrypoint}); return error.EntrypointDnsResolutionFailure; } @@ -1306,7 +1314,7 @@ fn getEntrypoints(logger: Logger) !std.ArrayList(SocketAddr) { if (try config.current.gossip.getNetwork()) |cluster| { for (cluster.entrypoints()) |entrypoint| { - logger.infof("adding predefined entrypoint: {s}", .{entrypoint}); + logger.info().logf("adding predefined entrypoint: {s}", .{entrypoint}); const socket_addr = try resolveSocketAddr(entrypoint, .noop); try entrypoints.append(socket_addr); } @@ -1324,15 +1332,18 @@ fn getEntrypoints(logger: Logger) !std.ArrayList(SocketAddr) { } // log entrypoints - logger.infof("entrypoints: {any}", .{entrypoints.items}); + logger.info().logf("entrypoints: {any}", .{entrypoints.items}); return entrypoints; } fn spawnLogger() !Logger { - var logger = Logger.init(gpa_allocator, config.current.log_level); - logger.spawn(); - return logger; + var std_logger = ChannelPrintLogger.init(.{ + .allocator = gpa_allocator, + .max_level = config.current.log_level, + .max_buffer = 1 << 30, + }) catch @panic("Logger init failed"); + return std_logger.logger(); } const LoadedSnapshot = struct { @@ -1383,11 +1394,11 @@ fn loadSnapshot( }); result.snapshot_fields = all_snapshot_fields; - logger.infof("full snapshot: {s}", .{ + logger.info().logf("full snapshot: {s}", .{ sig.utils.fmt.tryRealPath(snapshot_dir, snapshot_files.full_snapshot.snapshotNameStr().constSlice()), }); if (snapshot_files.incremental_snapshot) |inc_snap| { - logger.infof("incremental snapshot: {s}", .{ + logger.info().logf("incremental snapshot: {s}", .{ sig.utils.fmt.tryRealPath(snapshot_dir, inc_snap.snapshotNameStr().constSlice()), }); } @@ -1402,7 +1413,7 @@ fn loadSnapshot( break :blk cli_n_threads_snapshot_load; } }; - logger.infof("n_threads_snapshot_load: {d}", .{n_threads_snapshot_load}); + logger.info().logf("n_threads_snapshot_load: {d}", .{n_threads_snapshot_load}); result.accounts_db = try AccountsDB.init( allocator, @@ -1429,23 +1440,23 @@ fn loadSnapshot( const bank_fields = &snapshot_fields.bank_fields; // this should exist before we start to unpack - logger.infof("reading genesis...", .{}); + logger.info().log("reading genesis..."); result.genesis_config = readGenesisConfig(allocator, genesis_file_path) catch |err| { if (err == error.GenesisNotFound) { - logger.errf("genesis config not found - expecting {s} to exist", .{genesis_file_path}); + logger.err().logf("genesis config not found - expecting {s} to exist", .{genesis_file_path}); } return err; }; errdefer result.genesis_config.deinit(allocator); - logger.infof("validating bank...", .{}); + logger.info().log("validating bank..."); result.bank = Bank.init(&result.accounts_db, bank_fields); try Bank.validateBankFields(result.bank.bank_fields, &result.genesis_config); // validate the status cache result.status_cache = readStatusCache(allocator, snapshot_dir) catch |err| { if (err == error.StatusCacheNotFound) { - logger.errf("status-cache.bin not found - expecting {s}/snapshots/status-cache to exist", .{snapshot_dir_str}); + logger.err().logf("status-cache.bin not found - expecting {s}/snapshots/status-cache to exist", .{snapshot_dir_str}); } return err; }; @@ -1455,7 +1466,7 @@ fn loadSnapshot( defer slot_history.deinit(result.accounts_db.allocator); try result.status_cache.validate(allocator, bank_fields.slot, &slot_history); - logger.infof("accounts-db setup done...", .{}); + logger.info().log("accounts-db setup done..."); return result; } @@ -1481,7 +1492,7 @@ fn readStatusCache(allocator: Allocator, snapshot_dir: std.fs.Dir) !StatusCache /// entrypoint to download snapshot fn downloadSnapshot() !void { - var logger = try spawnLogger(); + const logger = try spawnLogger(); defer logger.deinit(); var counter = std.atomic.Value(usize).init(0); @@ -1600,7 +1611,7 @@ fn getOrDownloadSnapshots( }; if (snapshot_files.incremental_snapshot == null) { - logger.infof("no incremental snapshot found", .{}); + logger.info().log("no incremental snapshot found"); } // if this exists, we wont look for a .tar.zstd @@ -1625,19 +1636,19 @@ fn getOrDownloadSnapshots( const dir_size = (try accounts_dir.stat()).size; if (dir_size <= 100) { should_unpack_snapshot = true; - logger.infof("empty accounts/ directory found, will unpack snapshot...", .{}); + logger.info().log("empty accounts/ directory found, will unpack snapshot..."); } else { - logger.infof("accounts/ directory found, will not unpack snapshot...", .{}); + logger.info().log("accounts/ directory found, will not unpack snapshot..."); } } var timer = try std.time.Timer.start(); if (should_unpack_snapshot) { - logger.infof("unpacking snapshots...", .{}); + logger.info().log("unpacking snapshots..."); // if accounts/ doesnt exist then we unpack the found snapshots // TODO: delete old accounts/ dir if it exists timer.reset(); - logger.infof("unpacking {s}...", .{snapshot_files.full_snapshot.snapshotNameStr().constSlice()}); + logger.info().logf("unpacking {s}...", .{snapshot_files.full_snapshot.snapshotNameStr().constSlice()}); { const archive_file = try snapshot_dir.openFile(snapshot_files.full_snapshot.snapshotNameStr().constSlice(), .{}); defer archive_file.close(); @@ -1650,12 +1661,12 @@ fn getOrDownloadSnapshots( true, ); } - logger.infof("unpacked snapshot in {s}", .{std.fmt.fmtDuration(timer.read())}); + logger.info().logf("unpacked snapshot in {s}", .{std.fmt.fmtDuration(timer.read())}); // TODO: can probs do this in parallel with full snapshot if (snapshot_files.incremental_snapshot) |incremental_snapshot| { timer.reset(); - logger.infof("unpacking {s}...", .{incremental_snapshot.snapshotNameStr().constSlice()}); + logger.info().logf("unpacking {s}...", .{incremental_snapshot.snapshotNameStr().constSlice()}); const archive_file = try snapshot_dir.openFile(incremental_snapshot.snapshotNameStr().constSlice(), .{}); defer archive_file.close(); @@ -1668,16 +1679,16 @@ fn getOrDownloadSnapshots( n_threads_snapshot_unpack, false, ); - logger.infof("unpacked snapshot in {s}", .{std.fmt.fmtDuration(timer.read())}); + logger.info().logf("unpacked snapshot in {s}", .{std.fmt.fmtDuration(timer.read())}); } } else { - logger.infof("not unpacking snapshot...", .{}); + logger.info().log("not unpacking snapshot..."); } timer.reset(); - logger.infof("reading snapshot metadata...", .{}); + logger.info().log("reading snapshot metadata..."); const snapshots = try AllSnapshotFields.fromFiles(allocator, logger, snapshot_dir, snapshot_files); - logger.infof("read snapshot metdata in {s}", .{std.fmt.fmtDuration(timer.read())}); + logger.info().logf("read snapshot metdata in {s}", .{std.fmt.fmtDuration(timer.read())}); return .{ snapshots, snapshot_files }; } diff --git a/src/cmd/helpers.zig b/src/cmd/helpers.zig index 91a6b8b4d..89f98791a 100644 --- a/src/cmd/helpers.zig +++ b/src/cmd/helpers.zig @@ -29,7 +29,7 @@ pub fn getOrInitIdentity(allocator: std.mem.Allocator, logger: Logger) !Keypair // create ~/.sig dir const dir = try std.mem.concat(allocator, u8, &[_][]const u8{ home_dir, IDENTITY_KEYPAIR_DIR }); std.fs.makeDirAbsolute(dir) catch { - logger.debugf("sig directory already exists...", .{}); + logger.debug().log("sig directory already exists..."); }; // create new keypair diff --git a/src/core/hard_forks.zig b/src/core/hard_forks.zig index 91b66d1a9..8a171d2e7 100644 --- a/src/core/hard_forks.zig +++ b/src/core/hard_forks.zig @@ -63,10 +63,12 @@ pub const HardForks = struct { test "core.hard_forks: test hardforks" { const Logger = @import("../trace/log.zig").Logger; + const TestingLogger = @import("../trace/log.zig").DirectPrintLogger; const testing_alloc = std.testing.allocator; - var logger = Logger.init(testing_alloc, Logger.TEST_DEFAULT_LEVEL); - defer logger.deinit(); - logger.spawn(); + + var test_logger = TestingLogger.init(testing_alloc, Logger.TEST_DEFAULT_LEVEL); + + const logger = test_logger.logger(); var hard_forks = HardForks.default(testing_alloc); defer hard_forks.deinit(); @@ -82,13 +84,13 @@ test "core.hard_forks: test hardforks" { const hash_data_one = hard_forks.get_hash_data(9, 0); try expect(hash_data_one == null); - logger.debugf("hash_data_one: {any}", .{hash_data_one}); + logger.debug().logf("hash_data_one: {any}", .{hash_data_one}); var hash_data_two = hard_forks.get_hash_data(10, 0); try expect(hash_data_two != null); try expect(std.mem.eql(u8, &hash_data_two.?, &[8]u8{ 1, 0, 0, 0, 0, 0, 0, 0 })); - logger.debugf("hard_forks_two: {any}", .{hash_data_two}); + logger.debug().logf("hard_forks_two: {any}", .{hash_data_two}); try expect(eql(u8, &hard_forks.get_hash_data(19, 0).?, &[8]u8{ 1, 0, 0, 0, 0, 0, 0, 0 })); try expect(eql(u8, &hard_forks.get_hash_data(20, 0).?, &[8]u8{ 2, 0, 0, 0, 0, 0, 0, 0 })); diff --git a/src/core/shred.zig b/src/core/shred.zig index 2aa55b8cb..ac5577bb5 100644 --- a/src/core/shred.zig +++ b/src/core/shred.zig @@ -38,33 +38,36 @@ pub const ShredVersion = struct { test "core.shred: test ShredVersion" { const Logger = @import("../trace/log.zig").Logger; + const DirectPrintLogger = @import("../trace/log.zig").DirectPrintLogger; + var hash = Hash{ .data = [_]u8{ 180, 194, 54, 239, 216, 26, 164, 170, 3, 72, 104, 87, 32, 189, 12, 254, 9, 103, 99, 155, 117, 158, 241, 0, 95, 128, 64, 174, 42, 158, 205, 26 } }; const version = ShredVersion.versionFromHash(&hash); try std.testing.expect(version == 44810); - var logger = Logger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); - defer logger.deinit(); - logger.spawn(); const testing_alloc = std.testing.allocator; + var test_logger = DirectPrintLogger.init(testing_alloc, Logger.TEST_DEFAULT_LEVEL); + + const logger = test_logger.logger(); + const shred_version_one = ShredVersion.computeShredVersion(Hash.default(), null); try std.testing.expect(shred_version_one == 1); - logger.debugf("shred_version_one: {}", .{shred_version_one}); + logger.debug().logf("shred_version_one: {}", .{shred_version_one}); var hard_forks = HardForks.default(testing_alloc); defer _ = hard_forks.deinit(); const shred_version_two = ShredVersion.computeShredVersion(Hash.default(), hard_forks); try std.testing.expect(shred_version_two == 1); - logger.debugf("shred_version_two: {}", .{shred_version_two}); + logger.debug().logf("shred_version_two: {}", .{shred_version_two}); try hard_forks.register(1); const shred_version_three = ShredVersion.computeShredVersion(Hash.default(), hard_forks); try std.testing.expect(shred_version_three == 55551); - logger.debugf("shred_version_three: {}", .{shred_version_three}); + logger.debug().logf("shred_version_three: {}", .{shred_version_three}); try hard_forks.register(1); const shred_version_four = ShredVersion.computeShredVersion(Hash.default(), hard_forks); try std.testing.expect(shred_version_four == 46353); - logger.debugf("shred_version_three: {}", .{shred_version_four}); + logger.debug().logf("shred_version_three: {}", .{shred_version_four}); } diff --git a/src/fuzz.zig b/src/fuzz.zig index 1b3ea350a..a3db069b8 100644 --- a/src/fuzz.zig +++ b/src/fuzz.zig @@ -6,7 +6,8 @@ const accountsdb_fuzz = sig.accounts_db.fuzz; const gossip_fuzz_service = sig.gossip.fuzz_service; const gossip_fuzz_table = sig.gossip.fuzz_table; const accountsdb_snapshot_fuzz = sig.accounts_db.fuzz_snapshot; -const logger = sig.trace.log; +const StandardErrLogger = sig.trace.ChannelPrintLogger; +const Level = sig.trace.Level; const spawnMetrics = sig.prometheus.spawnMetrics; @@ -27,12 +28,19 @@ pub fn main() !void { const allocator = gpa.allocator(); defer _ = gpa.deinit(); - logger.default_logger.* = logger.Logger.init(std.heap.c_allocator, .debug); + var std_logger = StandardErrLogger.init(.{ + .allocator = std.heap.c_allocator, + .max_level = Level.debug, + .max_buffer = 1 << 30, + }) catch @panic("Logger init failed"); + defer std_logger.deinit(); + + const logger = std_logger.logger(); var cli_args = try std.process.argsWithAllocator(allocator); defer cli_args.deinit(); - logger.default_logger.infof("metrics port: {d}", .{config.current.metrics_port}); + logger.info().logf("metrics port: {d}", .{config.current.metrics_port}); const metrics_thread = try spawnMetrics( // TODO: use the GPA here, the server is just leaking because we're losing the handle // to it and never deiniting. diff --git a/src/geyser/main.zig b/src/geyser/main.zig index 6e4e840ce..c2ce1f7bf 100644 --- a/src/geyser/main.zig +++ b/src/geyser/main.zig @@ -153,40 +153,45 @@ pub fn csvDump() !void { std.heap.c_allocator; defer _ = gpa.deinit(); - var logger = sig.trace.Logger.init(allocator, .info); - defer logger.deinit(); - logger.spawn(); + var std_logger = sig.trace.ChannelPrintLogger.init(.{ + .allocator = std.heap.c_allocator, + .max_level = sig.trace.Level.debug, + .max_buffer = 1 << 30, + }) catch @panic("Logger init failed"); + defer std_logger.deinit(); + + const logger = std_logger.logger(); const metrics_thread = try sig.prometheus.spawnMetrics(allocator, 12355); metrics_thread.detach(); - logger.info("spawing metrics thread on port 12355"); + logger.info().log("spawing metrics thread on port 12355"); const pipe_path = config.pipe_path; - logger.infof("using pipe path: {s}", .{pipe_path}); + logger.info().logf("using pipe path: {s}", .{pipe_path}); // owner filters var maybe_owner_pubkeys = try getOwnerFilters(allocator); defer if (maybe_owner_pubkeys) |*owners| owners.deinit(); if (maybe_owner_pubkeys) |owner_pubkeys| { - logger.infof("owner filters: {s}", .{owner_pubkeys.keys()}); + logger.info().logf("owner filters: {s}", .{owner_pubkeys.keys()}); } else { - logger.info("owner filters: none"); + logger.info().log("owner filters: none"); } // account filters var maybe_account_pubkeys = try getAccountFilters(allocator); defer if (maybe_account_pubkeys) |*accounts| accounts.deinit(); if (maybe_account_pubkeys) |account_pubkeys| { - logger.infof("account filters: {s}", .{account_pubkeys.keys()}); + logger.info().logf("account filters: {s}", .{account_pubkeys.keys()}); } else { - logger.info("account filters: none"); + logger.info().log("account filters: none"); } // csv file to dump to const dump_csv_path = sig.VALIDATOR_DIR ++ "accounts.csv"; const csv_file = try std.fs.cwd().createFile(dump_csv_path, .{}); defer csv_file.close(); - logger.infof("dumping to csv: {s}", .{dump_csv_path}); + logger.info().logf("dumping to csv: {s}", .{dump_csv_path}); // setup reader var exit = std.atomic.Value(bool).init(false); @@ -227,7 +232,7 @@ pub fn csvDump() !void { // NOTE: since accounts-db isnt hooked up to the rest to the validator (svm, consensus, etc.) // valid account state is only from snapshots. we can safely exit here because no new accounts // are expected. - logger.infof("recv end of snapshot loading signal", .{}); + logger.info().log("recv end of snapshot loading signal"); exit.store(true, .monotonic); break; }, diff --git a/src/gossip/dump_service.zig b/src/gossip/dump_service.zig index 129ce032c..b02e35afb 100644 --- a/src/gossip/dump_service.zig +++ b/src/gossip/dump_service.zig @@ -89,6 +89,6 @@ pub const GossipDumpService = struct { // output results try file.writeAll("message_type,pubkey,hash,wallclock,gossip_addr,shred_version\n"); try file.writeAll(data.buf[0..data.buf_len]); - self.logger.infof("gossip table size at {}s: {}", .{ now -| start_time, data.table_len }); + self.logger.info().logf("gossip table size at {}s: {}", .{ now -| start_time, data.table_len }); } }; diff --git a/src/gossip/fuzz_service.zig b/src/gossip/fuzz_service.zig index 875008bab..bd35e079f 100644 --- a/src/gossip/fuzz_service.zig +++ b/src/gossip/fuzz_service.zig @@ -269,7 +269,6 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { const client_pubkey = Pubkey.fromPublicKey(&client_keypair.public_key); var client_contact_info = ContactInfo.init(allocator, client_pubkey, 0, 19); try client_contact_info.setSocket(.gossip, client_address); - var gossip_service_client = try GossipService.init( allocator, allocator, @@ -286,7 +285,6 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { .dump = false, }, }); - // this is used to respond to pings var gossip_service_fuzzer = try GossipService.init( allocator, diff --git a/src/gossip/fuzz_table.zig b/src/gossip/fuzz_table.zig index 127988dd8..ace80f0b0 100644 --- a/src/gossip/fuzz_table.zig +++ b/src/gossip/fuzz_table.zig @@ -15,6 +15,8 @@ const GossipKey = sig.gossip.data.GossipKey; const Signature = sig.core.Signature; const ThreadPool = sig.sync.thread_pool.ThreadPool; const Duration = sig.time.Duration; +const StandardErrLogger = sig.trace.ChannelPrintLogger; +const Level = sig.trace.Level; const TRIM_INTERVAL = Duration.fromSecs(2); const MAX_N_THREADS = 2; @@ -34,9 +36,14 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { const allocator = gpa.allocator(); defer _ = gpa.deinit(); - const logger = Logger.init(allocator, .debug); - defer logger.deinit(); - logger.spawn(); + var std_logger = StandardErrLogger.init(.{ + .allocator = allocator, + .max_level = Level.debug, + .max_buffer = 1 << 30, + }) catch @panic("Logger init failed"); + defer std_logger.deinit(); + + const logger = std_logger.logger(); var prng = std.rand.DefaultPrng.init(seed); const rand = prng.random(); @@ -88,7 +95,7 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { while (true) { if (maybe_max_actions) |max_actions| { if (total_action_count >= max_actions) { - logger.infof("reached max actions: {}", .{max_actions}); + logger.info().logf("reached max actions: {}", .{max_actions}); break; } } @@ -118,7 +125,7 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { signed_data.wallclockPtr().* = now; // ! - logger.debugf("putting pubkey: {}", .{pubkey}); + logger.debug().logf("putting pubkey: {}", .{pubkey}); const result = try gossip_table.insert(signed_data, now); std.debug.assert(result.wasInserted()); @@ -142,10 +149,10 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { const should_overwrite = rand.boolean(); if (should_overwrite) { - logger.debugf("overwriting pubkey: {}", .{pubkey}); + logger.debug().logf("overwriting pubkey: {}", .{pubkey}); signed_data.wallclockPtr().* = now; } else { - logger.debugf("writing old pubkey: {}", .{pubkey}); + logger.debug().logf("writing old pubkey: {}", .{pubkey}); const other_insertion_time = insertion_times.items[index]; signed_data.wallclockPtr().* = other_insertion_time -| rand.intRangeAtMost(u64, 10, 100); } @@ -157,7 +164,7 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { std.debug.assert(!should_overwrite); } if (result == .IgnoredDuplicateValue) { - logger.debugf("duplicate value: {}", .{pubkey}); + logger.debug().logf("duplicate value: {}", .{pubkey}); } if (!should_overwrite and did_insert) { @@ -182,22 +189,22 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { const search_key = keys.items[index]; errdefer { - logger.errf("pubkey failed: {} with key: {}", .{ pubkey, search_key }); + logger.err().logf("pubkey failed: {} with key: {}", .{ pubkey, search_key }); } const versioned_data = gossip_table.get(search_key) orelse { - logger.errf("failed to get pubkey: {}", .{search_key}); + logger.err().logf("failed to get pubkey: {}", .{search_key}); return error.PubkeyNotFound; }; if (!versioned_data.value.signature.eql(&signatures.get(pubkey).?)) { - logger.errf("signature mismatch: {}", .{pubkey}); + logger.err().logf("signature mismatch: {}", .{pubkey}); return error.SignatureMismatch; } // via direct method if (gossip_table.getThreadSafeContactInfo(pubkey) == null) { - logger.errf("failed to get contact info: {}", .{pubkey}); + logger.err().logf("failed to get contact info: {}", .{pubkey}); return error.ContactInfoNotFound; } @@ -209,7 +216,7 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { } } else false; if (!found) { - logger.errf("failed to find pubkey: {}", .{pubkey}); + logger.err().logf("failed to find pubkey: {}", .{pubkey}); return error.ContactInfoNotFound; } @@ -227,14 +234,14 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { const pubkeys_droppped_count = try gossip_table.attemptTrim(now, max_pubkey_capacity); if (pubkeys_droppped_count == 0) continue; - logger.infof("op(trim): table size: {} -> {}", .{ size, gossip_table.len() }); + logger.info().logf("op(trim): table size: {} -> {}", .{ size, gossip_table.len() }); } else { // NOTE: not completely accurate, but good enough const middle_index = insertion_times.items.len / 2; const middle_insert_time = insertion_times.items[middle_index]; _ = try gossip_table.removeOldLabels(middle_insert_time, 0); - logger.infof("op(remove-old-labels): table size: {} -> {}", .{ size, gossip_table.len() }); + logger.info().logf("op(remove-old-labels): table size: {} -> {}", .{ size, gossip_table.len() }); } // reset the pubkey list @@ -260,13 +267,13 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { } } - logger.infof("put: {}, get: {}", .{ put_count, get_count }); + logger.info().logf("put: {}, get: {}", .{ put_count, get_count }); put_count = 0; get_count = 0; if (maybe_max_actions) |max_actions| { const percent_int = (total_action_count * 100) / max_actions; - logger.infof("total actions: {} / {} ({}%)", .{ total_action_count, max_actions, percent_int }); + logger.info().logf("total actions: {} / {} ({}%)", .{ total_action_count, max_actions, percent_int }); } } } diff --git a/src/gossip/service.zig b/src/gossip/service.zig index 34baae5ea..b838858c5 100644 --- a/src/gossip/service.zig +++ b/src/gossip/service.zig @@ -200,7 +200,7 @@ pub const GossipService = struct { .max_threads = @intCast(n_threads), .stack_size = 2 * 1024 * 1024, }); - logger.debugf("using n_threads in gossip: {}", .{n_threads}); + logger.debug().logf("using n_threads in gossip: {}", .{n_threads}); var gossip_table = try GossipTable.init(gossip_value_allocator, thread_pool); errdefer gossip_table.deinit(); @@ -420,18 +420,18 @@ pub const GossipService = struct { packet.data[0..packet.size], bincode.Params.standard, ) catch |e| { - self.logger.errf("gossip: packet_verify: failed to deserialize: {s}", .{@errorName(e)}); + self.logger.err().logf("gossip: packet_verify: failed to deserialize: {s}", .{@errorName(e)}); return; }; message.sanitize() catch |e| { - self.logger.errf("gossip: packet_verify: failed to sanitize: {s}", .{@errorName(e)}); + self.logger.err().logf("gossip: packet_verify: failed to sanitize: {s}", .{@errorName(e)}); bincode.free(self.gossip_value_allocator, message); return; }; message.verifySignature() catch |e| { - self.logger.errf( + self.logger.err().logf( "gossip: packet_verify: failed to verify signature from {}: {s}", .{ packet.addr, @errorName(e) }, ); @@ -454,7 +454,7 @@ pub const GossipService = struct { defer { // trigger the next service in the chain to close self.counter.store(idx + 1, .release); - self.logger.debugf("verifyPackets loop closed", .{}); + self.logger.debug().log("verifyPackets loop closed"); } const tasks = try VerifyMessageTask.init(self.allocator, VERIFY_PACKET_PARALLEL_TASKS); @@ -486,7 +486,7 @@ pub const GossipService = struct { const task_ptr = &tasks[acquired_task_idx]; task_ptr.entry.packet = packet; - task_ptr.result catch |err| self.logger.errf("VerifyMessageTask encountered error: {s}", .{@errorName(err)}); + task_ptr.result catch |err| self.logger.err().logf("VerifyMessageTask encountered error: {s}", .{@errorName(err)}); const batch = Batch.from(&task_ptr.task); self.thread_pool.schedule(batch); @@ -495,7 +495,7 @@ pub const GossipService = struct { for (tasks) |*task| { task.blockUntilCompletion(); - task.result catch |err| self.logger.errf("VerifyMessageTask encountered error: {s}", .{@errorName(err)}); + task.result catch |err| self.logger.err().logf("VerifyMessageTask encountered error: {s}", .{@errorName(err)}); } } @@ -532,7 +532,7 @@ pub const GossipService = struct { defer { // even if we fail, trigger the next thing self.counter.store(idx + 1, .release); - self.logger.debugf("processMessages loop closed", .{}); + self.logger.debug().log("processMessages loop closed"); } // we batch messages bc: @@ -701,7 +701,7 @@ pub const GossipService = struct { if (push_messages.items.len > 0) { var x_timer = try sig.time.Timer.start(); self.handleBatchPushMessages(&push_messages) catch |err| { - self.logger.errf("handleBatchPushMessages failed: {}", .{err}); + self.logger.err().logf("handleBatchPushMessages failed: {}", .{err}); }; const elapsed = x_timer.read().asMillis(); self.stats.handle_batch_push_time.observe(@floatFromInt(elapsed)); @@ -721,7 +721,7 @@ pub const GossipService = struct { if (pull_requests.items.len > 0) { var x_timer = try sig.time.Timer.start(); self.handleBatchPullRequest(pull_requests, seed + msg_count) catch |err| { - self.logger.errf("handleBatchPullRequest failed: {}", .{err}); + self.logger.err().logf("handleBatchPullRequest failed: {}", .{err}); }; const elapsed = x_timer.read().asMillis(); self.stats.handle_batch_pull_req_time.observe(@floatFromInt(elapsed)); @@ -732,7 +732,7 @@ pub const GossipService = struct { if (pull_responses.items.len > 0) { var x_timer = try sig.time.Timer.start(); self.handleBatchPullResponses(pull_responses.items) catch |err| { - self.logger.errf("handleBatchPullResponses failed: {}", .{err}); + self.logger.err().logf("handleBatchPullResponses failed: {}", .{err}); }; const elapsed = x_timer.read().asMillis(); self.stats.handle_batch_pull_resp_time.observe(@floatFromInt(elapsed)); @@ -743,7 +743,7 @@ pub const GossipService = struct { if (ping_messages.items.len > 0) { var x_timer = try sig.time.Timer.start(); self.handleBatchPingMessages(&ping_messages) catch |err| { - self.logger.errf("handleBatchPingMessages failed: {}", .{err}); + self.logger.err().logf("handleBatchPingMessages failed: {}", .{err}); }; const elapsed = x_timer.read().asMillis(); self.stats.handle_batch_ping_time.observe(@floatFromInt(elapsed)); @@ -790,7 +790,7 @@ pub const GossipService = struct { var x_timer = sig.time.Timer.start() catch unreachable; const now = getWallclockMs(); const n_pubkeys_dropped = gossip_table.attemptTrim(now, UNIQUE_PUBKEY_CAPACITY) catch |err| err_blk: { - self.logger.warnf("gossip_table.attemptTrim failed: {s}", .{@errorName(err)}); + self.logger.warn().logf("gossip_table.attemptTrim failed: {s}", .{@errorName(err)}); break :err_blk 0; }; const elapsed = x_timer.read().asMillis(); @@ -808,7 +808,7 @@ pub const GossipService = struct { fn buildMessages(self: *Self, seed: u64, idx: usize) !void { defer { self.counter.store(idx + 1, .release); - self.logger.infof("buildMessages loop closed", .{}); + self.logger.info().log("buildMessages loop closed"); } var loop_timer = try sig.time.Timer.start(); @@ -836,7 +836,7 @@ pub const GossipService = struct { pull_request.MAX_BLOOM_SIZE, now, ) catch |e| { - self.logger.errf("failed to generate pull requests: {any}", .{e}); + self.logger.err().logf("failed to generate pull requests: {any}", .{e}); break :pull_blk; }; for (packets.items) |packet| { @@ -848,7 +848,7 @@ pub const GossipService = struct { // new push msgs self.drainPushQueueToGossipTable(getWallclockMs()); const maybe_push_packets = self.buildPushMessages(&push_cursor) catch |e| blk: { - self.logger.errf("failed to generate push messages: {any}", .{e}); + self.logger.err().logf("failed to generate push messages: {any}", .{e}); break :blk null; }; if (maybe_push_packets) |push_packets| { @@ -1411,10 +1411,10 @@ pub const GossipService = struct { const endpoint_str = try endpointToString(self.allocator, ping_message.from_endpoint); defer endpoint_str.deinit(); - self.logger + self.logger.debug() .field("from_endpoint", endpoint_str.items) .field("from_pubkey", ping_message.ping.from.string().slice()) - .debug("gossip: recv ping"); + .log("gossip: recv ping"); try self.packet_outgoing_channel.send(packet); self.stats.pong_messages_sent.add(1); @@ -1707,9 +1707,10 @@ pub const GossipService = struct { const msg = GossipMessage{ .PruneMessage = .{ self.my_pubkey, prune_data } }; self.logger + .debug() .field("n_pruned_origins", prune_size) .field("to_addr", from_pubkey.string().slice()) - .debug("gossip: send prune_message"); + .log("gossip: send prune_message"); var packet = Packet.default(); const written_slice = bincode.writeToSlice(&packet.data, msg, bincode.Params{}) catch unreachable; @@ -1785,7 +1786,7 @@ pub const GossipService = struct { if (entrypoint.info) |info| { if (info.shred_version != 0) { var addr_str = entrypoint.addr.toString(); - self.logger.infof( + self.logger.info().logf( "shred version: {} - from entrypoint contact info: {s}", .{ info.shred_version, addr_str[0][0..addr_str[1]] }, ); @@ -2092,7 +2093,7 @@ pub const GossipStats = struct { .prune_messages_sent = self.prune_messages_sent.get(), }; - logging_fields.logger.infof( + logging_fields.logger.info().logf( "gossip: recv {}: {} ping, {} pong, {} push, {} pull request, {} pull response, {} prune", .{ current_stats.gossip_packets_received_total - logging_fields.last_logged_snapshot.gossip_packets_received_total, @@ -2104,7 +2105,7 @@ pub const GossipStats = struct { current_stats.prune_messages_recv - logging_fields.last_logged_snapshot.prune_messages_recv, }, ); - logging_fields.logger.infof( + logging_fields.logger.info().logf( "gossip: sent: {} ping, {} pong, {} push, {} pull request, {} pull response, {} prune", .{ current_stats.ping_messages_sent - logging_fields.last_logged_snapshot.ping_messages_sent, @@ -2201,6 +2202,8 @@ pub fn chunkValuesIntoPacketIndexes( return packet_indexs; } +const TestingLogger = @import("../trace/log.zig").DirectPrintLogger; + test "handle pong messages" { const allocator = std.testing.allocator; @@ -2277,9 +2280,12 @@ test "build messages startup and shutdown" { const my_pubkey = Pubkey.fromPublicKey(&my_keypair.public_key); const contact_info = try localhostTestContactInfo(my_pubkey); - var logger = Logger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); - defer logger.deinit(); - logger.spawn(); + var test_logger = TestingLogger.init( + std.testing.allocator, + Logger.TEST_DEFAULT_LEVEL, + ); + + const logger = test_logger.logger(); var counter = Atomic(usize).init(0); var gossip_service = try GossipService.init( @@ -2336,9 +2342,9 @@ test "handling prune messages" { const my_pubkey = Pubkey.fromPublicKey(&my_keypair.public_key); const contact_info = try localhostTestContactInfo(my_pubkey); - var logger = Logger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); - defer logger.deinit(); - logger.spawn(); + var test_logger = TestingLogger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); + + const logger = test_logger.logger(); var counter = Atomic(usize).init(0); var gossip_service = try GossipService.init( @@ -2413,9 +2419,9 @@ test "handling pull responses" { var my_pubkey = Pubkey.fromPublicKey(&my_keypair.public_key); const contact_info = try localhostTestContactInfo(my_pubkey); - var logger = Logger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); - defer logger.deinit(); - logger.spawn(); + var test_logger = TestingLogger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); + + const logger = test_logger.logger(); var counter = Atomic(usize).init(0); var gossip_service = try GossipService.init( @@ -2560,10 +2566,9 @@ test "handle pull request" { var contact_info = try localhostTestContactInfo(my_pubkey); contact_info.shred_version = 99; - var logger = Logger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); - defer logger.deinit(); - logger.spawn(); + var test_logger = TestingLogger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); + const logger = test_logger.logger(); var counter = Atomic(usize).init(0); var gossip_service = try GossipService.init( allocator, @@ -2669,9 +2674,9 @@ test "test build prune messages and handle push messages" { const my_pubkey = Pubkey.fromPublicKey(&my_keypair.public_key); const contact_info = try localhostTestContactInfo(my_pubkey); - var logger = Logger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); - defer logger.deinit(); - logger.spawn(); + var test_logger = TestingLogger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); + + const logger = test_logger.logger(); var counter = Atomic(usize).init(0); var gossip_service = try GossipService.init( @@ -2748,9 +2753,9 @@ test "build pull requests" { const my_pubkey = Pubkey.fromPublicKey(&my_keypair.public_key); const contact_info = try localhostTestContactInfo(my_pubkey); - var logger = Logger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); - defer logger.deinit(); - logger.spawn(); + var test_logger = TestingLogger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); + + const logger = test_logger.logger(); var counter = Atomic(usize).init(0); var gossip_service = try GossipService.init( @@ -2806,9 +2811,9 @@ test "test build push messages" { const my_pubkey = Pubkey.fromPublicKey(&my_keypair.public_key); const contact_info = try localhostTestContactInfo(my_pubkey); - var logger = Logger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); - defer logger.deinit(); - logger.spawn(); + var test_logger = TestingLogger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); + + const logger = test_logger.logger(); var counter = Atomic(usize).init(0); var gossip_service = try GossipService.init( @@ -2885,9 +2890,9 @@ test "test large push messages" { const my_pubkey = Pubkey.fromPublicKey(&my_keypair.public_key); const contact_info = try localhostTestContactInfo(my_pubkey); - var logger = Logger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); - defer logger.deinit(); - logger.spawn(); + var test_logger = TestingLogger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); + + const logger = test_logger.logger(); var counter = Atomic(usize).init(0); var gossip_service = try GossipService.init( @@ -2945,8 +2950,6 @@ test "test packet verification" { const contact_info = try localhostTestContactInfo(id); // noop for this case because this tests error failed verification - const logger: Logger = .noop; - var counter = Atomic(usize).init(0); var gossip_service = try GossipService.init( allocator, @@ -2955,7 +2958,7 @@ test "test packet verification" { keypair, null, &counter, - logger, + .noop, ); defer gossip_service.deinit(); @@ -3063,9 +3066,9 @@ test "process contact info push packet" { const my_pubkey = Pubkey.fromPublicKey(&my_keypair.public_key); const contact_info = try localhostTestContactInfo(my_pubkey); - var logger = Logger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); - defer logger.deinit(); - logger.spawn(); + var test_logger = TestingLogger.init(allocator, Logger.TEST_DEFAULT_LEVEL); + + const logger = test_logger.logger(); var counter = Atomic(usize).init(0); var gossip_service = try GossipService.init( @@ -3158,9 +3161,9 @@ test "init, exit, and deinit" { var counter = Atomic(usize).init(0); - var logger = Logger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); - defer logger.deinit(); - logger.spawn(); + var test_logger = TestingLogger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); + + const logger = test_logger.logger(); var gossip_service = try GossipService.init( std.testing.allocator, @@ -3239,11 +3242,11 @@ pub const BenchmarkGossipServiceGeneral = struct { var contact_info = ContactInfo.init(allocator, pubkey, 0, 19); try contact_info.setSocket(.gossip, address); - // var logger = Logger.init(allocator, .debug); + // const logger = Logger.init(allocator, .debug); // defer logger.deinit(); // logger.spawn(); - const logger: Logger = .noop; + const logger = .noop; // process incoming packets/messsages var counter = Atomic(usize).init(0); @@ -3354,11 +3357,11 @@ pub const BenchmarkGossipServicePullRequests = struct { var contact_info = ContactInfo.init(allocator, pubkey, 0, 19); try contact_info.setSocket(.gossip, address); - // var logger = Logger.init(allocator, .debug); + // const logger = Logger.init(allocator, .debug); // defer logger.deinit(); // logger.spawn(); - const logger: Logger = .noop; + const logger = .noop; // process incoming packets/messsages var counter = Atomic(usize).init(0); diff --git a/src/ledger/cleanup_service.zig b/src/ledger/cleanup_service.zig index f79aba9d0..8dfe8edc3 100644 --- a/src/ledger/cleanup_service.zig +++ b/src/ledger/cleanup_service.zig @@ -40,7 +40,7 @@ pub fn run( ) !void { var last_purge_slot: Slot = 0; - logger.info("Starting blockstore cleanup service"); + logger.info().log("Starting blockstore cleanup service"); while (!exit.load(.acquire)) { last_purge_slot = try cleanBlockstore( logger, @@ -93,7 +93,7 @@ pub fn cleanBlockstore( // NOTE: this will clean everything past the lowest slot in the blockstore const root: Slot = try blockstore_reader.lowestSlot(); const result = try findSlotsToClean(blockstore_reader, root, max_ledger_shreds); - logger.infof("findSlotsToClean result: {any}", .{result}); + logger.info().logf("findSlotsToClean result: {any}", .{result}); if (result.should_clean) { const slot, var lock = lowest_cleanup_slot.writeWithLock(); @@ -101,9 +101,9 @@ pub fn cleanBlockstore( slot.* = result.highest_slot_to_purge; const did_purge = try purgeSlots(db, 0, result.highest_slot_to_purge); if (did_purge) { - logger.info("Purged slots..."); + logger.info().log("Purged slots..."); } else { - logger.info("No slots purged"); + logger.info().log("No slots purged"); } // // TODO: Is this needed, it updates the OldestSlot data structure in // // agave which is owned and used by the blockstore database backend. diff --git a/src/ledger/database.zig b/src/ledger/database.zig index 14b23a00e..0a8cb4330 100644 --- a/src/ledger/database.zig +++ b/src/ledger/database.zig @@ -1,6 +1,7 @@ const std = @import("std"); const sig = @import("../sig.zig"); const blockstore = @import("lib.zig"); +const log = @import("../trace/log.zig"); const Allocator = std.mem.Allocator; @@ -294,7 +295,7 @@ fn tests(comptime Impl: fn ([]const ColumnFamily) type) type { }; const DB = Database(Impl(&.{ cf1, cf2 })); - const logger = sig.trace.TestLogger.default.logger(); + const logger = .noop; return struct { pub fn basic() !void { diff --git a/src/ledger/insert_shred.zig b/src/ledger/insert_shred.zig index 30d16ffa5..884c50cf9 100644 --- a/src/ledger/insert_shred.zig +++ b/src/ledger/insert_shred.zig @@ -72,6 +72,10 @@ pub const ShredInserter = struct { }; } + pub fn deinit(self: *Self) void { + self.logger.deinit(); + } + pub const InsertShredsResult = struct { completed_data_set_infos: ArrayList(CompletedDataSetInfo), duplicate_shreds: ArrayList(PossibleDuplicateShred), @@ -518,7 +522,7 @@ pub const ShredInserter = struct { .shred2 = shred.fields.payload, }) catch |e| { // TODO: only log a database error? - self.logger.errf( + self.logger.err().logf( "Unable to store conflicting erasure meta duplicate proof for: {} {any} {}", .{ slot, erasure_set_id, e }, ); @@ -531,7 +535,7 @@ pub const ShredInserter = struct { }, }); } else { - self.logger.errf(&newlinesToSpaces( + self.logger.err().logf(&newlinesToSpaces( \\Unable to find the conflicting code shred that set {any}. \\This should only happen in extreme cases where blockstore cleanup has \\caught up to the root. Skipping the erasure meta duplicate shred check @@ -539,8 +543,8 @@ pub const ShredInserter = struct { } } // TODO (agave): This is a potential slashing condition - self.logger.warn("Received multiple erasure configs for the same erasure set!!!"); - self.logger.warnf(&newlinesToSpaces( + self.logger.warn().log("Received multiple erasure configs for the same erasure set!!!"); + self.logger.warn().logf(&newlinesToSpaces( \\Slot: {}, shred index: {}, erasure_set: {any}, is_duplicate: {}, \\stored config: {any}, new shred: {any} ), .{ @@ -613,7 +617,7 @@ pub const ShredInserter = struct { // just purge all shreds > the new last index slot, but because replay may have already // replayed entries past the newly detected "last" shred, then mark the slot as dead // and wait for replay to dump and repair the correct version. - self.logger.warnf( + self.logger.warn().logf( "Received *last* shred index {} less than previous shred index {}, and slot {} is not full, marking slot dead", .{ shred_index, slot_meta.received, slot }, ); @@ -726,7 +730,7 @@ pub const ShredInserter = struct { // FIXME: leak - decide how to free shred const maybe_shred = try shred_store.get(shred_id); const ending_shred = if (maybe_shred) |s| s else { - self.logger.errf(&newlinesToSpaces( + self.logger.err().logf(&newlinesToSpaces( \\Last received data shred {any} indicated by slot meta \ \\{any} is missing from blockstore. This should only happen in \ \\extreme cases where blockstore cleanup has caught up to the root. \ @@ -740,7 +744,7 @@ pub const ShredInserter = struct { }; self.db.put(schema.duplicate_slots, slot, dupe) catch |e| { // TODO: only log a database error? - self.logger.errf("failed to store duplicate slot: {}", .{e}); + self.logger.err().logf("failed to store duplicate slot: {}", .{e}); }; // FIXME data ownership try duplicate_shreds.append(.{ .LastIndexConflict = .{ @@ -750,7 +754,7 @@ pub const ShredInserter = struct { } const leader_pubkey = slotLeader(leader_schedule, slot); - self.logger.errf( + self.logger.err().logf( "Leader {any}, slot {}: received shred_index {} < slot.received {}, shred_source: {any}", .{ leader_pubkey, slot, shred_index_u32, slot_meta.received, shred_source }, ); @@ -908,7 +912,7 @@ pub const ShredInserter = struct { self.submitRecoveryMetrics(index.slot, erasure_meta, true, "complete", shreds.items.len); return shreds; } else |e| { - self.logger.errf("shred recovery error: {}", .{e}); + self.logger.err().logf("shred recovery error: {}", .{e}); self.submitRecoveryMetrics(index.slot, erasure_meta, true, "incomplete", 0); return std.ArrayList(Shred).init(self.allocator); } @@ -940,7 +944,7 @@ pub const ShredInserter = struct { recovered: usize, ) void { const start, const end = erasure_meta.dataShredsIndices(); - self.logger.debugf( + self.logger.debug().logf( \\datapoint: blockstore-erasure \\ slot: {[slot]} \\ start_index: {[start_index]} @@ -1114,6 +1118,8 @@ pub const BlockstoreInsertionMetrics = struct { const test_shreds = @import("test_shreds.zig"); const comptimePrint = std.fmt.comptimePrint; const TestState = ledger.tests.TestState("insert_shred"); +const DirectPrintLogger = @import("../trace/log.zig").DirectPrintLogger; +const Logger = @import("../trace/log.zig").Logger; fn assertOk(result: anytype) void { std.debug.assert(if (result) |_| true else |_| false); @@ -1127,11 +1133,13 @@ const ShredInserterTestState = struct { inserter: ShredInserter, pub fn init(allocator_: std.mem.Allocator, comptime test_name: []const u8) !ShredInserterTestState { - return initWithLogger(allocator_, test_name, sig.trace.TestLogger.default.logger()); + var test_logger = DirectPrintLogger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); + const logger = test_logger.logger(); + return initWithLogger(allocator_, test_name, logger); } fn initWithLogger(allocator_: std.mem.Allocator, comptime test_name: []const u8, logger: sig.trace.Logger) !ShredInserterTestState { - const state = try TestState.init(allocator_, test_name); + const state = try TestState.init(allocator_, test_name, logger); const inserter = try ShredInserter.init( state.allocator, logger, @@ -1183,6 +1191,7 @@ const ShredInserterTestState = struct { pub fn deinit(self: *@This()) void { self.state.deinit(); + self.inserter.deinit(); } }; diff --git a/src/ledger/insert_shreds_working_state.zig b/src/ledger/insert_shreds_working_state.zig index ec2db69c7..db90556d7 100644 --- a/src/ledger/insert_shreds_working_state.zig +++ b/src/ledger/insert_shreds_working_state.zig @@ -414,7 +414,7 @@ pub const WorkingShredStore = struct { shred else if (index.contains(shred_index)) blk: { const shred = try self.db.getBytes(cf, .{ slot, @intCast(id.index) }) orelse { - self.logger.errf(&newlinesToSpaces( + self.logger.err().logf(&newlinesToSpaces( \\Unable to read the {s} with slot {}, index {} for shred \\recovery. The shred is marked present in the slot's index, \\but the shred could not be found in the column. diff --git a/src/ledger/merkle_root_checks.zig b/src/ledger/merkle_root_checks.zig index f3c3a1c9a..4b517283b 100644 --- a/src/ledger/merkle_root_checks.zig +++ b/src/ledger/merkle_root_checks.zig @@ -49,7 +49,7 @@ pub fn checkMerkleRootConsistency( return true; } - logger.warnf(&newlinesToSpaces( + logger.warn().logf(&newlinesToSpaces( \\Received conflicting merkle roots for slot: {}, erasure_set: {any} original merkle \\root meta {any} vs conflicting merkle root {any} shred index {} type {any}. Reporting \\as duplicate @@ -77,7 +77,7 @@ pub fn checkMerkleRootConsistency( }, }); } else { - logger.errf(&newlinesToSpaces( + logger.err().logf(&newlinesToSpaces( \\Shred {any} indiciated by merkle root meta {any} is \\missing from blockstore. This should only happen in extreme cases where \\blockstore cleanup has caught up to the root. Skipping the merkle root @@ -116,7 +116,7 @@ pub fn checkForwardChainedMerkleRootConsistency( // If a shred from the next fec set has already been inserted, check the chaining const next_fec_set_index = if (erasure_meta.nextFecSetIndex()) |n| n else { - logger.errf( + logger.err().logf( "Invalid erasure meta, unable to compute next fec set index {any}", .{erasure_meta}, ); @@ -139,7 +139,7 @@ pub fn checkForwardChainedMerkleRootConsistency( const next_shred = if (try shred_store.get(next_shred_id)) |ns| ns else { - logger.errf(&newlinesToSpaces( + logger.err().logf(&newlinesToSpaces( \\Shred {any} indicated by merkle root meta {any} \ \\is missing from blockstore. This should only happen in extreme cases where \ \\blockstore cleanup has caught up to the root. Skipping the forward chained \ @@ -151,7 +151,7 @@ pub fn checkForwardChainedMerkleRootConsistency( const chained_merkle_root = shred_mod.layout.getChainedMerkleRoot(next_shred); if (!checkChaining(merkle_root, chained_merkle_root)) { - logger.warnf(&newlinesToSpaces( + logger.warn().logf(&newlinesToSpaces( \\Received conflicting chained merkle roots for slot: {}, shred \ \\{any} type {any} has merkle root {any}, however next fec set \ \\shred {any} type {any} chains to merkle root \ @@ -229,7 +229,7 @@ pub fn checkBackwardsChainedMerkleRootConsistency( }; const prev_shred = if (try shred_store.get(prev_shred_id)) |ps| ps else { - logger.warnf(&newlinesToSpaces( + logger.warn().logf(&newlinesToSpaces( \\Shred {any} indicated by the erasure meta {any} \ \\is missing from blockstore. This can happen if you have recently upgraded \ \\from a version < v1.18.13, or if blockstore cleanup has caught up to the root. \ @@ -241,7 +241,7 @@ pub fn checkBackwardsChainedMerkleRootConsistency( const chained_merkle_root = shred.chainedMerkleRoot() catch null; if (!checkChaining(merkle_root, chained_merkle_root)) { - logger.warnf(&newlinesToSpaces( + logger.warn().logf(&newlinesToSpaces( \\Received conflicting chained merkle roots for slot: {}, shred {any} type {any} \ \\chains to merkle root {any}, however previous fec set code \ \\shred {any} has merkle root {any}. Reporting as duplicate diff --git a/src/ledger/reader.zig b/src/ledger/reader.zig index ddf3fd768..4df9719e5 100644 --- a/src/ledger/reader.zig +++ b/src/ledger/reader.zig @@ -451,12 +451,12 @@ pub const BlockstoreReader = struct { allow_dead_slots: bool, ) !VersionedConfirmedBlockWithEntries { var slot_meta: SlotMeta = try self.db.get(self.allocator, schema.slot_meta, slot) orelse { - self.logger.debugf("getCompleteBlockWithEntries failed for slot {} (missing SlotMeta)", .{slot}); + self.logger.debug().logf("getCompleteBlockWithEntries failed for slot {} (missing SlotMeta)", .{slot}); return error.SlotUnavailable; }; defer slot_meta.deinit(); if (!slot_meta.isFull()) { - self.logger.debugf("getCompleteBlockWithEntries failed for slot {} (slot not full)", .{slot}); + self.logger.debug().logf("getCompleteBlockWithEntries failed for slot {} (slot not full)", .{slot}); return error.SlotUnavailable; } @@ -466,7 +466,7 @@ pub const BlockstoreReader = struct { slot_entries.deinit(); } if (slot_entries.items.len == 0) { - self.logger.debugf("getCompleteBlockWithEntries failed for slot {} (missing slot entries)", .{slot}); + self.logger.debug().logf("getCompleteBlockWithEntries failed for slot {} (missing slot entries)", .{slot}); return error.SlotUnavailable; } @@ -512,7 +512,7 @@ pub const BlockstoreReader = struct { } for (slot_transactions.items) |transaction| { transaction.sanitize() catch |err| { - self.logger.warnf( + self.logger.warn().logf( "getCompleteeBlockWithEntries sanitize failed: {any}, slot: {any}, {any}", .{ err, slot, transaction }, ); @@ -717,7 +717,7 @@ pub const BlockstoreReader = struct { for (entry.transactions.items) |transaction| { // NOTE perf: redundant calls to sanitize every time this is called if (transaction.sanitize()) |_| {} else |err| { - self.logger.warnf( + self.logger.warn().logf( "BlockstoreReader.findTransactionInSlot sanitize failed: {any}, slot: {}, {any}", .{ err, slot, transaction }, ); @@ -1128,7 +1128,7 @@ pub const BlockstoreReader = struct { var lcs = self.lowest_cleanup_slot.read(); defer lcs.unlock(); if (slot > lcs.get().*) { - self.logger.errf( + self.logger.err().logf( // TODO write a function to clean up newlines for cases like this \\Shred with slot: {}, index: {}, consumed: {}, completed_indexes: {any} \\must exist if shred index was included in a range: {} {} @@ -1145,7 +1145,7 @@ pub const BlockstoreReader = struct { return error.CorruptedBlockstore; } } - self.logger.errf("Missing shred for slot {}, index {}", .{ slot, index }); + self.logger.err().logf("Missing shred for slot {}, index {}", .{ slot, index }); return error.InvalidShredData; } } @@ -1170,13 +1170,13 @@ pub const BlockstoreReader = struct { // self.logger.tracef("{any} data shreds in last FEC set", data_shreds.items.len); const bytes = shredder.deshred(allocator, range_shreds) catch |e| { - self.logger.errf("failed to deshred entries buffer from shreds: {}", .{e}); + self.logger.err().logf("failed to deshred entries buffer from shreds: {}", .{e}); return e; }; defer bytes.deinit(); const these_entries = sig.bincode .readFromSlice(allocator, []Entry, bytes.items, .{}) catch |e| { - self.logger.errf("failed to deserialize entries from shreds: {}", .{e}); + self.logger.err().logf("failed to deserialize entries from shreds: {}", .{e}); return e; }; defer allocator.free(these_entries); @@ -1324,7 +1324,7 @@ pub const BlockstoreReader = struct { if (shred.retransmitterSignature()) |signature| { shred_layout.setRetransmitterSignature(other.items, signature) catch |err| { - self.logger.errf("set retransmitter signature failed: {any}", .{err}); + self.logger.err().logf("set retransmitter signature failed: {any}", .{err}); }; } else |_| { // TODO: agave does nothing here. is that correct? diff --git a/src/ledger/rocksdb.zig b/src/ledger/rocksdb.zig index 4c34f0759..73bab9af2 100644 --- a/src/ledger/rocksdb.zig +++ b/src/ledger/rocksdb.zig @@ -334,7 +334,7 @@ pub fn RocksDB(comptime column_families: []const ColumnFamily) type { fn callRocks(logger: Logger, comptime func: anytype, args: anytype) ReturnType(@TypeOf(func)) { var err_str: ?rocks.Data = null; return @call(.auto, func, args ++ .{&err_str}) catch |e| { - logger.errf("{} - {s}", .{ e, err_str.? }); + logger.err().logf("{} - {s}", .{ e, err_str.? }); return e; }; } diff --git a/src/ledger/tests.zig b/src/ledger/tests.zig index 387c2ff60..033db3df3 100644 --- a/src/ledger/tests.zig +++ b/src/ledger/tests.zig @@ -10,13 +10,13 @@ const BlockstoreDB = ledger.BlockstoreDB; const Entry = sig.core.Entry; const Shred = ledger.shred.Shred; const Slot = sig.core.Slot; +const DirectPrintLogger = sig.trace.DirectPrintLogger; +const Logger = sig.trace.Logger; const SlotMeta = ledger.meta.SlotMeta; const VersionedTransactionWithStatusMeta = ledger.reader.VersionedTransactionWithStatusMeta; - const comptimePrint = std.fmt.comptimePrint; const schema = ledger.schema.schema; -const test_logger = sig.trace.TestLogger.default.logger(); test "put/get data consistency for merkle root" { var rng = std.Random.DefaultPrng.init(100); @@ -50,7 +50,11 @@ test "put/get data consistency for merkle root" { // Analogous to [test_get_rooted_block](https://github.com/anza-xyz/agave/blob/a72f981370c3f566fc1becf024f3178da041547a/ledger/src/blockstore.rs#L8271) test "insert shreds and transaction statuses then get blocks" { - var state = try State.init(std.testing.allocator, "insert shreds and transaction statuses then get blocks"); + var test_logger = DirectPrintLogger.init(std.testing.allocator, Logger.TEST_DEFAULT_LEVEL); + + const logger = test_logger.logger(); + + var state = try State.init(std.testing.allocator, "insert shreds and transaction statuses then get blocks", logger); defer state.deinit(); const allocator = state.allocator; @@ -350,10 +354,11 @@ pub fn TestState(scope: []const u8) type { lowest_cleanup_slot: sig.sync.RwMux(Slot), max_root: std.atomic.Value(Slot), allocator: std.mem.Allocator, + logger: sig.trace.Logger, const Self = @This(); - pub fn init(allocator: std.mem.Allocator, comptime test_name: []const u8) !*Self { + pub fn init(allocator: std.mem.Allocator, comptime test_name: []const u8, logger: sig.trace.Logger) !*Self { const self = try allocator.create(Self); self.* = .{ .allocator = allocator, @@ -361,18 +366,19 @@ pub fn TestState(scope: []const u8) type { .registry = sig.prometheus.Registry(.{}).init(allocator), .lowest_cleanup_slot = sig.sync.RwMux(Slot).init(0), .max_root = std.atomic.Value(Slot).init(0), + .logger = logger, }; return self; } pub fn shredInserter(self: *Self) !ledger.ShredInserter { - return ledger.ShredInserter.init(self.allocator, test_logger, &self.registry, self.db); + return ledger.ShredInserter.init(self.allocator, self.logger, &self.registry, self.db); } pub fn writer(self: *Self) !ledger.BlockstoreWriter { return try ledger.BlockstoreWriter.init( self.allocator, - test_logger, + self.logger, self.db, &self.registry, &self.lowest_cleanup_slot, @@ -383,7 +389,7 @@ pub fn TestState(scope: []const u8) type { pub fn reader(self: *Self) !ledger.BlockstoreReader { return try ledger.BlockstoreReader.init( self.allocator, - test_logger, + self.logger, self.db, &self.registry, &self.lowest_cleanup_slot, @@ -410,7 +416,7 @@ pub fn TestDB(scope: []const u8) type { pub fn initCustom(allocator: Allocator, comptime test_name: []const u8) !BlockstoreDB { const path = comptimePrint("{s}/{s}/{s}", .{ dir, scope, test_name }); try sig.ledger.tests.freshDir(path); - return try BlockstoreDB.open(allocator, test_logger, path); + return try BlockstoreDB.open(allocator, .noop, path); } }; } diff --git a/src/ledger/writer.zig b/src/ledger/writer.zig index 4d5ad10a6..2fa212963 100644 --- a/src/ledger/writer.zig +++ b/src/ledger/writer.zig @@ -220,7 +220,7 @@ pub const BlockstoreWriter = struct { const find_missing_roots_us = find_missing_roots_timer.read().asMicros(); var fix_roots_timer = try Timer.start(); if (roots_to_fix.items.len != 0) { - self.logger.infof("{} slots to be rooted", .{roots_to_fix.items.len}); + self.logger.info().logf("{} slots to be rooted", .{roots_to_fix.items.len}); const chunk_size = 100; const num_chunks = (roots_to_fix.items.len - 1) / chunk_size + 1; for (0..num_chunks) |chunk_index| { @@ -234,7 +234,7 @@ pub const BlockstoreWriter = struct { try self.setRoots(chunk); } } else { - self.logger.debugf("No missing roots found in range {} to {}", .{ start_root, end_slot }); + self.logger.debug().logf("No missing roots found in range {} to {}", .{ start_root, end_slot }); } const fix_roots_us = fix_roots_timer.read().asMicros(); const num_roots_fixed = roots_to_fix.items.len; @@ -266,7 +266,7 @@ pub const BlockstoreWriter = struct { if (root_slot_meta.isConnected()) { return; } - self.logger.infof("Marking slot {} and any full children slots as connected", .{root}); + self.logger.info().logf("Marking slot {} and any full children slots as connected", .{root}); var write_batch = try self.db.initWriteBatch(); defer write_batch.deinit(); @@ -286,7 +286,7 @@ pub const BlockstoreWriter = struct { while (i < next_slots.items.len) : (i += 1) { const slot = next_slots.items[i]; var slot_meta: SlotMeta = try self.db.get(self.allocator, schema.slot_meta, slot) orelse { - self.logger.errf("Slot {} is a child but has no SlotMeta in blockstore", .{slot}); + self.logger.err().logf("Slot {} is a child but has no SlotMeta in blockstore", .{slot}); return error.CorruptedBlockstore; }; defer slot_meta.deinit(); diff --git a/src/main.zig b/src/main.zig index fb7b3b6c8..11c86a000 100644 --- a/src/main.zig +++ b/src/main.zig @@ -1,5 +1,4 @@ const cmd = @import("cmd/cmd.zig"); -const logger = @import("./trace/log.zig"); const std = @import("std"); // We set this so that std.log knows not to log .debug level messages @@ -10,13 +9,5 @@ pub const std_options: std.Options = .{ }; pub fn main() !void { - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - defer _ = gpa.deinit(); - const allocator = gpa.allocator(); - - var our_logger = logger.Logger.init(allocator, .debug); - defer our_logger.deinit(); - - logger.default_logger.* = our_logger; try cmd.run(); } diff --git a/src/net/echo.zig b/src/net/echo.zig index 62bc94394..a9ff89c7f 100644 --- a/src/net/echo.zig +++ b/src/net/echo.zig @@ -3,7 +3,6 @@ const builtin = @import("builtin"); const net = @import("net.zig"); const ShredVersion = @import("../core/shred.zig").ShredVersion; const SocketAddr = @import("net.zig").SocketAddr; -const logger = @import("../trace/log.zig").default_logger; const Atomic = std.atomic.Value; const assert = std.debug.assert; const testing = std.testing; @@ -99,13 +98,13 @@ pub fn handleEchoRequest(req: *httpz.Request, res: *httpz.Response) !void { defer ip_echo_server_message.deinit(); std.debug.print("ip echo server message received: {}\n", .{ip_echo_server_message}); - // logger.debugf("ip echo server message: {any}", .{ip_echo_server_message.value}); + // logger.debug().logf("ip echo server message: {any}", .{ip_echo_server_message.value}); // convert a u32 to Ipv4 const socket_addr = SocketAddr.fromIpV4Address(res.conn.address); std.json.stringify(IpEchoServerResponse.init(net.IpAddr{ .ipv4 = socket_addr.V4.ip }), .{}, res.writer()) catch |err| { - // logger.errf("could not json stringify IpEchoServerResponse: {any}", .{err}); + // logger.err().logf("could not json stringify IpEchoServerResponse: {any}", .{err}); std.debug.print("could not json stringify ip echo server response message: {}\n", .{err}); }; } diff --git a/src/net/socket_utils.zig b/src/net/socket_utils.zig index 79d992cc2..86f0b1f2f 100644 --- a/src/net/socket_utils.zig +++ b/src/net/socket_utils.zig @@ -22,11 +22,11 @@ pub fn readSocket( idx: if (needs_exit_order) usize else void, ) !void { defer { - logger.infof("leaving with: {}, {}, {}", .{ incoming_channel.len(), counter.load(.acquire), idx }); + logger.info().logf("leaving with: {}, {}, {}", .{ incoming_channel.len(), counter.load(.acquire), idx }); if (needs_exit_order) { counter.store(idx + 1, .release); } - logger.infof("readSocket loop closed", .{}); + logger.info().log("readSocket loop closed"); } // NOTE: we set to non-blocking to periodically check if we should exit @@ -61,7 +61,7 @@ pub fn sendSocket( // exit the next service in the chain counter.store(idx + 1, .release); } - logger.debugf("sendSocket loop closed", .{}); + logger.debug().log("sendSocket loop closed"); } const exit_condition = if (needs_exit_order) idx else true; @@ -70,7 +70,7 @@ pub fn sendSocket( { while (outgoing_channel.receive()) |p| { const bytes_sent = socket.sendTo(p.addr, p.data[0..p.size]) catch |e| { - logger.debugf("send_socket error: {s}", .{@errorName(e)}); + logger.debug().logf("send_socket error: {s}", .{@errorName(e)}); continue; }; std.debug.assert(bytes_sent == p.size); diff --git a/src/rpc/client.zig b/src/rpc/client.zig index 7bb8a0f6e..fafd428c4 100644 --- a/src/rpc/client.zig +++ b/src/rpc/client.zig @@ -265,7 +265,7 @@ pub const Client = struct { for (0..self.max_retries + 1) |curr_retries| { const result = self.fetchRequest(payload, &response.bytes) catch |fetch_error| { - self.logger.warnf("HTTP client error, attempting reinitialisation: error={any}", .{fetch_error}); + self.logger.warn().logf("HTTP client error, attempting reinitialisation: error={any}", .{fetch_error}); if (curr_retries == self.max_retries) return fetch_error; response.bytes.clearRetainingCapacity(); self.restartHttpClient(); @@ -273,7 +273,7 @@ pub const Client = struct { }; if (result.status != std.http.Status.ok) { - self.logger.warnf("HTTP request failed ({d}/{d}): {}", .{ curr_retries, self.max_retries, result.status }); + self.logger.warn().logf("HTTP request failed ({d}/{d}): {}", .{ curr_retries, self.max_retries, result.status }); if (curr_retries == self.max_retries) return error.HttpRequestFailed; response.bytes.clearRetainingCapacity(); continue; @@ -283,7 +283,7 @@ pub const Client = struct { } response.parse() catch |err| { - self.logger.errf("Failed to parse response: error={} request_payload={s} response={s}", .{ err, payload, response.bytes.items }); + self.logger.err().logf("Failed to parse response: error={} request_payload={s} response={s}", .{ err, payload, response.bytes.items }); return err; }; diff --git a/src/shred_collector/repair_service.zig b/src/shred_collector/repair_service.zig index 495829193..5085280b4 100644 --- a/src/shred_collector/repair_service.zig +++ b/src/shred_collector/repair_service.zig @@ -103,11 +103,11 @@ pub const RepairService = struct { if (self.sendNecessaryRepairs()) |_| { if (waiting_for_peers) { waiting_for_peers = false; - self.logger.infof("Acquired some repair peers.", .{}); + self.logger.info().logf("Acquired some repair peers.", .{}); } } else |e| switch (e) { error.NoRepairPeers => if (!waiting_for_peers) { - self.logger.infof("Waiting for repair peers...", .{}); + self.logger.info().logf("Waiting for repair peers...", .{}); waiting_for_peers = true; }, else => return e, @@ -141,7 +141,7 @@ pub const RepairService = struct { // TODO less often if (addressed_requests.items.len > 0) { - self.logger.debugf( + self.logger.debug().logf( "sent {} repair requests", .{addressed_requests.items.len}, ); @@ -424,6 +424,7 @@ test "RepairService sends repair request to gossip peer" { const allocator = std.testing.allocator; var rand = std.rand.DefaultPrng.init(4328095); var random = rand.random(); + const TestLogger = sig.trace.DirectPrintLogger; // my details const keypair = try KeyPair.create(null); @@ -431,8 +432,9 @@ test "RepairService sends repair request to gossip peer" { const wallclock = 100; var gossip = try GossipTable.init(allocator, undefined); defer gossip.deinit(); - var logger = Logger.init(allocator, Logger.TEST_DEFAULT_LEVEL); - defer logger.deinit(); + var test_logger = TestLogger.init(allocator, Logger.TEST_DEFAULT_LEVEL); + + const logger = test_logger.logger(); // connectivity const repair_port = random.intRangeAtMost(u16, 1000, std.math.maxInt(u16)); @@ -509,8 +511,6 @@ test "RepairPeerProvider selects correct peers" { const my_shred_version = Atomic(u16).init(random.int(u16)); var gossip = try GossipTable.init(allocator, undefined); defer gossip.deinit(); - var logger = Logger.init(allocator, Logger.TEST_DEFAULT_LEVEL); - defer logger.deinit(); // peers const peer_generator = TestPeerGenerator{ diff --git a/src/shred_collector/shred_processor.zig b/src/shred_collector/shred_processor.zig index e0f0002cb..0be342185 100644 --- a/src/shred_collector/shred_processor.zig +++ b/src/shred_collector/shred_processor.zig @@ -47,7 +47,7 @@ pub fn runShredProcessor( &is_repaired, &error_context, ) catch |e| { - logger.errf( + logger.err().logf( "failed to process verified shred {?}.{?}: {}", .{ error_context.slot, error_context.index, e }, ); diff --git a/src/shred_collector/shred_receiver.zig b/src/shred_collector/shred_receiver.zig index 063fa5999..d08eec469 100644 --- a/src/shred_collector/shred_receiver.zig +++ b/src/shred_collector/shred_receiver.zig @@ -41,8 +41,8 @@ pub const ShredReceiver = struct { /// Run threads to listen/send over socket and handle all incoming packets. /// Returns when exit is set to true. pub fn run(self: *Self) !void { - defer self.logger.err("exiting shred receiver"); - errdefer self.logger.err("error in shred receiver"); + defer self.logger.err().log("exiting shred receiver"); + errdefer self.logger.err().log("error in shred receiver"); var response_sender = try SocketThread .initSender(self.allocator, self.logger, self.repair_socket, self.exit); diff --git a/src/shred_collector/shred_tracker.zig b/src/shred_collector/shred_tracker.zig index ce7817aaa..b0438d765 100644 --- a/src/shred_collector/shred_tracker.zig +++ b/src/shred_collector/shred_tracker.zig @@ -65,7 +65,7 @@ pub const BasicShredTracker = struct { const monitored_slot = try self.observeSlot(slot); if (!monitored_slot.is_complete) { monitored_slot.is_complete = true; - self.logger.infof("skipping slot: {}", .{slot}); + self.logger.info().logf("skipping slot: {}", .{slot}); self.max_slot_processed = @max(self.max_slot_processed, slot); } } @@ -81,7 +81,7 @@ pub const BasicShredTracker = struct { const monitored_slot = try self.observeSlot(slot); const new = monitored_slot.record(shred_index); - if (new) self.logger.debugf("new slot: {}", .{slot}); + if (new) self.logger.debug().logf("new slot: {}", .{slot}); self.max_slot_processed = @max(self.max_slot_processed, slot); } @@ -122,9 +122,9 @@ pub const BasicShredTracker = struct { } if (!found_an_incomplete_slot) { if (slot % 20 == 0) { - self.logger.infof("shred tracker: received all shreds up to slot {}", .{slot}); + self.logger.info().logf("shred tracker: received all shreds up to slot {}", .{slot}); } else { - self.logger.debugf("shred tracker: received all shreds up to slot {}", .{slot}); + self.logger.debug().logf("shred tracker: received all shreds up to slot {}", .{slot}); } self.current_bottom_slot = @max(self.current_bottom_slot, slot + 1); monitored_slot.* = .{}; diff --git a/src/tests.zig b/src/tests.zig index 5128f5cb3..bd5f7565b 100644 --- a/src/tests.zig +++ b/src/tests.zig @@ -1,17 +1,9 @@ const std = @import("std"); const sig = @import("sig.zig"); -const logger = @import("./trace/log.zig"); // TODO: there is *no* guarantee that this is the first test to be ran. // we will need to rework this to not use a global logger. test { - logger.default_logger.* = logger.Logger.init( - // NOTE: we're going to ignore the leaks - // here since they're never going to be cleaned up. - std.heap.c_allocator, - .debug, - ); - std.testing.log_level = std.log.Level.err; refAllDeclsRecursive(sig, 2); } diff --git a/src/time/estimate.zig b/src/time/estimate.zig index 47cca46e9..95c62b6ca 100644 --- a/src/time/estimate.zig +++ b/src/time/estimate.zig @@ -1,6 +1,6 @@ const std = @import("std"); const sig = @import("../sig.zig"); -const Logger = @import("../trace/log.zig").Logger; +const Logger = sig.trace.Logger; // TODO: change to writer interface when logger has improved pub fn printTimeEstimate( @@ -15,7 +15,9 @@ pub fn printTimeEstimate( if (i == 0 or total == 0) return; if (i > total) { if (other_info) |info| { - logger.infof("{s} [{s}]: {d}/{d} (?%) (est: ? elp: {s})", .{ + logger + .info() + .logf("{s} [{s}]: {d}/{d} (?%) (est: ? elp: {s})", .{ name, info, i, @@ -23,7 +25,7 @@ pub fn printTimeEstimate( timer.read(), }); } else { - logger.infof("{s}: {d}/{d} (?%) (est: ? elp: {s})", .{ + logger.info().logf("{s}: {d}/{d} (?%) (est: ? elp: {s})", .{ name, i, total, @@ -41,7 +43,7 @@ pub fn printTimeEstimate( const ns_left = ns_per_vec * left; if (other_info) |info| { - logger.infof("{s} [{s}]: {d}/{d} ({d}%) (est: {s} elp: {s})", .{ + logger.info().logf("{s} [{s}]: {d}/{d} ({d}%) (est: {s} elp: {s})", .{ name, info, i, @@ -51,7 +53,7 @@ pub fn printTimeEstimate( timer.read(), }); } else { - logger.infof("{s}: {d}/{d} ({d}%) (est: {s} elp: {s})", .{ + logger.info().logf("{s}: {d}/{d} ({d}%) (est: {s} elp: {s})", .{ name, i, total, diff --git a/src/time/time.zig b/src/time/time.zig index ee2804da8..9c0a97e60 100644 --- a/src/time/time.zig +++ b/src/time/time.zig @@ -551,7 +551,7 @@ pub const Duration = struct { return self.ns == other.ns; } - pub fn format(self: @This(), comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) error{OutOfMemory}!void { + pub fn format(self: @This(), comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void { return writer.print("{s}", .{std.fmt.fmtDuration(self.ns)}) catch unreachable; } }; @@ -571,7 +571,7 @@ pub const Instant = struct { return Duration.fromNanos(self.inner.since(earlier.inner)); } - pub fn format(self: @This(), comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) error{OutOfMemory}!void { + pub fn format(self: @This(), comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) !void { return try writer.print("{s}", .{std.fmt.fmtDuration(self.ns)}); } }; diff --git a/src/trace/entry.zig b/src/trace/entry.zig index 88a431c73..35fde12ef 100644 --- a/src/trace/entry.zig +++ b/src/trace/entry.zig @@ -1,217 +1,245 @@ const std = @import("std"); -const time = @import("../time/time.zig"); -const Field = @import("field.zig").Field; -const Level = @import("level.zig").Level; const logfmt = @import("logfmt.zig"); -const Logger = @import("./log.zig").Logger; +const Level = @import("level.zig").Level; const Channel = @import("../sync/channel.zig").Channel; -const testing = std.testing; const AtomicBool = std.atomic.Value(bool); pub const Entry = union(enum) { - standard: *StandardEntry, + channel_print: ChannelPrintEntry, + direct_print: DirectPrintEntry, noop, const Self = @This(); - pub fn init(allocator: std.mem.Allocator, channel: *Channel(*StandardEntry), max_level: Level) Self { - return .{ .standard = StandardEntry.init(allocator, channel, max_level) }; + pub fn init(allocator: std.mem.Allocator, channel: *Channel(logfmt.LogMsg), log_level: Level) Self { + return .{ .channel_print = ChannelPrintEntry.init(allocator, channel, log_level) }; } pub fn deinit(self: Self) void { switch (self) { - .standard => |entry| { - entry.deinit(); - }, .noop => {}, + .channel_print => |impl| impl.deinit(), + .direct_print => |impl| impl.deinit(), } } pub fn field(self: Self, name: []const u8, value: anytype) Self { switch (self) { - .standard => |entry| { - _ = entry.field(name, value); - return self; - }, .noop => { return self; }, - } - } - - pub fn debugf(self: Self, comptime fmt: []const u8, args: anytype) void { - self.logf(.debug, fmt, args); - } - - pub fn errf(self: Self, comptime fmt: []const u8, args: anytype) void { - self.logf(.err, fmt, args); - } - - pub fn warnf(self: Self, comptime fmt: []const u8, args: anytype) void { - self.logf(.warn, fmt, args); - } - - pub fn infof(self: Self, comptime fmt: []const u8, args: anytype) void { - self.logf(.info, fmt, args); - } - - pub fn info(self: Self, msg: []const u8) void { - self.log(.info, msg); - } - - pub fn debug(self: Self, msg: []const u8) void { - self.log(.debug, msg); - } - - pub fn err(self: Self, msg: []const u8) void { - self.log(.err, msg); - } - - pub fn warn(self: Self, msg: []const u8) void { - self.log(.warn, msg); - } - - pub fn logf(self: Self, level: Level, comptime fmt: []const u8, args: anytype) void { - switch (self) { - .standard => |entry| { - entry.logf(level, fmt, args); + .channel_print => |entry| { + var log_entry = entry; + _ = log_entry.field(name, value); + return Entry{ .channel_print = log_entry }; }, - .noop => {}, - } - } - - pub fn log(self: Self, level: Level, msg: []const u8) void { - switch (self) { - .standard => |entry| { - entry.log(level, msg); + .direct_print => |entry| { + var log_entry = entry; + _ = log_entry.field(name, value); + return Entry{ .direct_print = log_entry }; }, - .noop => {}, } } - pub fn format( - self: *const Self, - fmt: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, - ) !void { + pub fn log(self: Self, comptime msg: []const u8) void { switch (self) { - .standard => |entry| { - try entry.format(fmt, options, writer); - }, .noop => {}, + .channel_print => |impl| { + var logger = impl; + logger.log(msg); + }, + .direct_print => |impl| { + var logger = impl; + logger.log(msg); + }, } } - pub fn custom_format(self: *const Self, formatter: anytype, writer: anytype) !void { + pub fn logf(self: Self, comptime fmt: []const u8, args: anytype) void { switch (self) { - .standard => |entry| { - try formatter(entry, writer); - }, .noop => {}, + .channel_print => |impl| { + var logger = impl; + logger.logf(fmt, args); + }, + .direct_print => |impl| { + var logger = impl; + logger.logf(fmt, args); + }, } } }; -pub const StandardEntry = struct { - /// Log levels more granular than this will not be logged. - max_level: Level, - /// Level to log this message as. - level: Level, +pub const ChannelPrintEntry = struct { allocator: std.mem.Allocator, - fields: std.ArrayList(Field), - time: time.DateTime, - message: std.ArrayList(u8), - channel: *Channel(*StandardEntry), - + scope: ?[]const u8, + log_level: Level, + fields: std.ArrayList(u8), + exit_sig: std.atomic.Value(bool), + channel: *Channel(logfmt.LogMsg), const Self = @This(); - pub fn init(allocator: std.mem.Allocator, channel: *Channel(*StandardEntry), max_level: Level) *Self { - const self = allocator.create(Self) catch @panic("could not allocate.Create Entry"); - self.* = Self{ + pub fn init( + allocator: std.mem.Allocator, + scope: ?[]const u8, + channel: *Channel(logfmt.LogMsg), + log_level: Level, + ) Self { + return .{ .allocator = allocator, - .fields = std.ArrayList(Field).init(allocator), - .max_level = max_level, - .level = Level.debug, + .scope = scope, + .exit_sig = AtomicBool.init(false), + .log_level = log_level, + .fields = std.ArrayList(u8).init(allocator), .channel = channel, - .time = time.DateTime.epoch_unix, - .message = std.ArrayList(u8).init(allocator), }; - return self; } pub fn deinit(self: *Self) void { - for (self.fields.items) |*f| { - f.deinit(self.allocator); - } self.fields.deinit(); - self.message.deinit(); - self.allocator.destroy(self); } pub fn field(self: *Self, name: []const u8, value: anytype) *Self { - self.fields.append(Field.init(self.allocator, name, value)) catch @panic("could not append Field"); + const min_capacity = self.fields.items.len + logfmt.countField(name, value); + self.fields.ensureTotalCapacity(min_capacity) catch return self; + logfmt.fmtField(self.fields.writer(), name, value); return self; } - pub fn logf(self: *Self, level: Level, comptime fmt: []const u8, args: anytype) void { - if (@intFromEnum(self.max_level) < @intFromEnum(level)) { + pub fn log(self: *Self, comptime msg: []const u8) void { + const log_msg = logfmt.LogMsg{ + .level = self.log_level, + .maybe_scope = self.scope, + .maybe_msg = msg, + .maybe_fields = self.fields.toOwnedSlice() catch |err| { + std.debug.print("Processing fields failed with err: {any}", .{err}); + self.deinit(); + return; + }, + .maybe_fmt = null, + }; + + self.channel.send(log_msg) catch |err| { + std.debug.print("Send msg through channel failed with err: {any}", .{err}); self.deinit(); return; - } - var message = std.ArrayList(u8).initCapacity(self.allocator, fmt.len * 2) catch @panic("could not initCapacity for message"); - std.fmt.format(message.writer(), fmt, args) catch @panic("could not format"); - self.message = message; - self.time = time.DateTime.now(); - self.level = level; - self.channel.send(self) catch @panic("could not send to channel"); + }; } - pub fn log(self: *Self, level: Level, msg: []const u8) void { - if (@intFromEnum(self.max_level) < @intFromEnum(level)) { + pub fn logf(self: *Self, comptime fmt: []const u8, args: anytype) void { + // Get memory for formatting message. + const msg_buf = self.allocBuf(std.fmt.count(fmt, args)) catch |err| { + std.debug.print("allocBuff failed with err: {any}", .{err}); + self.deinit(); + return; + }; + var fmt_message = std.io.fixedBufferStream(msg_buf); + // Format message. + logfmt.fmtMsg(fmt_message.writer(), fmt, args); + + const log_msg = logfmt.LogMsg{ + .level = self.log_level, + .maybe_scope = self.scope, + .maybe_msg = null, + .maybe_fields = self.fields.toOwnedSlice() catch |err| { + std.debug.print("Processing fields failed with err: {any}", .{err}); + self.deinit(); + return; + }, + .maybe_fmt = fmt_message.getWritten(), + }; + + self.channel.send(log_msg) catch |err| { + std.debug.print("Send msg through channel failed with err: {any}", .{err}); self.deinit(); return; + }; + } + + // Utility function for allocating memory from RecycleFBA for part of the log message. + fn allocBuf(self: *const Self, size: u64) ![]u8 { + const buf = blk: while (true) { + const buf = self.allocator.alloc(u8, size) catch { + std.time.sleep(std.time.ns_per_ms); + if (self.exit_sig.load(.unordered)) { + return error.MemoryBlockedWithExitSignaled; + } + continue; + }; + break :blk buf; + }; + errdefer { + self.allocator.free(buf); } - var message = std.ArrayList(u8).initCapacity(self.allocator, msg.len) catch @panic("could not initCapacity for message"); - message.appendSlice(msg[0..]) catch @panic("could not appendSlice for message"); - self.message = message; - self.time = time.DateTime.now(); - self.level = level; - self.channel.send(self) catch @panic("could not send to channel"); + return buf; } +}; + +/// An instance of `Entry` that logs directly to std.debug.print, instead of sending to channel. +pub const DirectPrintEntry = struct { + const builtin = @import("builtin"); + + log_level: Level, + scope: ?[]const u8, + allocator: std.mem.Allocator, + log_msg: std.ArrayList(u8), - pub fn format( - self: *const Self, - _: []const u8, - _: std.fmt.FormatOptions, - writer: anytype, - ) !void { - // default formatting style - try logfmt.formatter(self, writer); + const Self = @This(); + + pub fn init( + allocator: std.mem.Allocator, + scope: ?[]const u8, + log_level: Level, + ) Self { + return .{ + .allocator = allocator, + .scope = scope, + .log_level = log_level, + .log_msg = std.ArrayList(u8).init(allocator), + }; } - pub fn custom_format(self: *const Self, formatter: anytype, writer: anytype) !void { - try formatter(self, writer); + pub fn deinit(self: *Self) void { + self.log_msg.deinit(); } -}; -const A = enum(u8) { - some_enum_variant, -}; + pub fn field(self: *Self, name: []const u8, value: anytype) *Self { + logfmt.fmtField(self.log_msg.writer(), name, value); + return self; + } -test "trace.entry: should info log correctly" { - var logger = Logger.init(testing.allocator, Level.info); - defer logger.deinit(); - var entry = StandardEntry.init(testing.allocator, logger.standard.channel, .debug); - defer entry.deinit(); - - const anull: ?u8 = null; - - entry - .field("some_val", true) - .field("enum_field", A.some_enum_variant) - .field("name", "a-mod") - .field("elapsed", @as(i48, 135133340042)) - .field("possible_value", anull) - .logf(.info, "hello, {s}", .{"world!"}); -} + pub fn log(self: *Self, comptime msg: []const u8) void { + defer self.deinit(); + const log_msg = logfmt.LogMsg{ + .level = self.log_level, + .maybe_scope = self.scope, + .maybe_msg = msg, + .maybe_fields = null, + .maybe_fmt = null, + }; + + const writer = self.log_msg.writer(); + logfmt.writeLog(writer, log_msg) catch @panic("Failed to write log"); + std.debug.print("{s}", .{self.log_msg.items}); + } + + pub fn logf(self: *Self, comptime fmt: []const u8, args: anytype) void { + defer self.deinit(); + // Format message. + var fmt_msg = std.ArrayList(u8).initCapacity(self.allocator, 256) catch @panic("could not initCapacity for message"); + defer fmt_msg.deinit(); + logfmt.fmtMsg(fmt_msg.writer(), fmt, args); + + const log_msg = logfmt.LogMsg{ + .level = self.log_level, + .maybe_scope = self.scope, + .maybe_msg = null, + .maybe_fields = null, + .maybe_fmt = fmt_msg.items, + }; + + const writer = self.log_msg.writer(); + logfmt.writeLog(writer, log_msg) catch @panic("Failed to write log"); + std.debug.print("{s}", .{self.log_msg.items}); + } +}; diff --git a/src/trace/field.zig b/src/trace/field.zig deleted file mode 100644 index 3050b6007..000000000 --- a/src/trace/field.zig +++ /dev/null @@ -1,155 +0,0 @@ -const std = @import("std"); -const time = @import("../time/time.zig"); -const Allocator = std.mem.Allocator; - -pub const Field = struct { - name: []const u8, - value: Value, - - const Self = @This(); - - pub fn init(allocator: std.mem.Allocator, name: []const u8, value: anytype) Self { - return Self{ - .name = name, - .value = Value.init(allocator, value), - }; - } - - pub fn deinit(self: *Self, allocator: std.mem.Allocator) void { - self.value.deinit(allocator); - } - - pub fn custom_format(self: *const Self, writer: anytype) void { - std.fmt.format(writer, "{s}=", .{self.name}) catch @panic("could not format"); - self.value.format_as_str(writer); - std.fmt.format(writer, " ", .{}) catch @panic("could not format"); - } -}; - -pub const Value = union(enum(u8)) { - null: void, - string: []const u8, - bool: bool, - float: f64, - int: i64, - uint: u64, - enumm: [:0]const u8, - - const Self = @This(); - - pub fn init(allocator: std.mem.Allocator, val: anytype) Self { - return valToValue(allocator, val); - } - - fn format_as_str(self: *const Self, writer: anytype) void { - switch (self.*) { - .string => |str| { - std.fmt.format(writer, "\"{s}\"", .{str}) catch unreachable; - }, - .bool => |b| { - if (b) { - std.fmt.format(writer, "true", .{}) catch unreachable; - } else { - std.fmt.format(writer, "false", .{}) catch unreachable; - } - }, - .uint => |u| { - std.fmt.format(writer, "{d}", .{u}) catch unreachable; - }, - .int => |i| { - std.fmt.format(writer, "{d}", .{i}) catch unreachable; - }, - .float => |f| { - std.fmt.format(writer, "{d}", .{f}) catch unreachable; - }, - .null => { - std.fmt.format(writer, "null", .{}) catch unreachable; - }, - .enumm => |e| { - std.fmt.format(writer, "{s}", .{e}) catch unreachable; - }, - } - } - - pub fn deinit(self: *Self, allocator: std.mem.Allocator) void { - switch (self.*) { - Value.string => |str| { - allocator.free(str); - }, - else => {}, - } - } -}; - -fn valToValue(allocator: Allocator, val: anytype) Value { - switch (@typeInfo(@TypeOf(val))) { - .Enum => |_| { - return .{ .enumm = @tagName(val) }; - }, - .EnumLiteral => { - return .{ .enumm = @tagName(val) }; - }, - .Bool => return .{ .bool = val }, - .Optional => |info| { - _ = info; - if (val) |v| { - return valToValue(allocator, v); - } - return Value.null; - }, - .Pointer => |info| { - switch (info.size) { - .One => { - const inner_child_type = switch (@typeInfo(info.child)) { - .Array => |a| a.child, - else => unreachable, - }; - const inner_child_len = switch (@typeInfo(info.child)) { - .Array => |a| a.len, - else => unreachable, - }; - if (inner_child_type == u8) { - const str = allocator.alloc(u8, inner_child_len) catch unreachable; - @memcpy(str, val[0..]); - return .{ .string = str }; - } else { - @compileError("┓\n|\n|--> Invalid field type: can only create value for []u8, not type '" ++ @typeName(@TypeOf(val)) ++ "'\n\n"); - } - }, - .Slice => { - if (@typeInfo(info.child).Int.bits == 8) { - const str = allocator.alloc(u8, val.len) catch unreachable; - @memcpy(str, val[0..]); - return .{ .string = str }; - } else { - @compileError("┓\n|\n|--> Invalid field type: can only create value for []u8, not type '" ++ @typeName(@TypeOf(val)) ++ "'\n\n"); - } - }, - else => {}, - } - }, - .Float => |info| { - _ = info; - return .{ .float = @as(f64, val) }; - }, - .Int => |info| { - switch (info.signedness) { - .unsigned => { - return .{ .uint = @as(u64, val) }; - }, - .signed => { - return .{ .int = @as(i64, val) }; - }, - } - }, - .ComptimeInt => { - return .{ .int = @as(i64, val) }; - }, - .ComptimeFloat => { - return .{ .float = @as(f64, val) }; - }, - else => {}, - } - - @compileError("┓\n|\n|--> Invalid field type: cannot add field of type'" ++ @typeName(@TypeOf(val)) ++ "' to log entry\n\n"); -} diff --git a/src/trace/level.zig b/src/trace/level.zig index 7e6361947..3a52f9b0a 100644 --- a/src/trace/level.zig +++ b/src/trace/level.zig @@ -9,6 +9,8 @@ pub const Level = enum { info, /// Debug: messages only useful for debugging. debug, + /// Trace: fine-grained messages that track execution flow. + trace, /// Returns a string literal of the given level in full text form. pub fn asText(self: Level) []const u8 { @@ -17,6 +19,7 @@ pub const Level = enum { .warn => "warning", .info => "info", .debug => "debug", + .trace => "trace", }; } }; diff --git a/src/trace/lib.zig b/src/trace/lib.zig index 925ee3181..0850e3d1d 100644 --- a/src/trace/lib.zig +++ b/src/trace/lib.zig @@ -1,9 +1,9 @@ -pub const entry = @import("entry.zig"); -pub const field = @import("field.zig"); -pub const level = @import("level.zig"); pub const log = @import("log.zig"); +pub const level = @import("level.zig"); pub const logfmt = @import("logfmt.zig"); +pub const entry = @import("entry.zig"); pub const Logger = log.Logger; pub const Level = level.Level; -pub const TestLogger = log.TestLogger; +pub const DirectPrintLogger = log.DirectPrintLogger; +pub const ChannelPrintLogger = log.ChannelPrintLogger; diff --git a/src/trace/log.zig b/src/trace/log.zig index 2c32256ea..4136f7a91 100644 --- a/src/trace/log.zig +++ b/src/trace/log.zig @@ -1,250 +1,248 @@ const std = @import("std"); -const entry = @import("entry.zig"); const Level = @import("level.zig").Level; +const Entry = @import("entry.zig").Entry; +const ChannelPrintEntry = @import("entry.zig").ChannelPrintEntry; +const DirectPrintEntry = @import("entry.zig").DirectPrintEntry; const logfmt = @import("logfmt.zig"); -const Entry = entry.Entry; -const StandardEntry = entry.StandardEntry; -const testing = std.testing; -const AtomicBool = std.atomic.Value(bool); +const sig = @import("../sig.zig"); +// TODO Improve import. const Channel = @import("../sync/channel.zig").Channel; +const Allocator = std.mem.Allocator; +const AtomicBool = std.atomic.Value(bool); +const RecycleFBA = sig.utils.allocators.RecycleFBA; -const INITIAL_ENTRIES_CHANNEL_SIZE: usize = 1024; - -pub const default_logger: *Logger = &global; -var global: Logger = .{ .standard = undefined }; +pub const Config = struct { + max_level: Level = Level.debug, + allocator: std.mem.Allocator, + /// Maximum memory that logger can use. + max_buffer: ?u64 = null, +}; -pub const Logger = union(enum) { - standard: *StandardErrLogger, - test_logger: TestLogger, +const LogKind = enum { + channel_print, + direct_print, noop, +}; - const Self = @This(); - - pub fn init(allocator: std.mem.Allocator, max_level: Level) Self { - return .{ .standard = StandardErrLogger.init(allocator, max_level) }; - } - - pub fn spawn(self: Self) void { - switch (self) { - .standard => |logger| { - logger.spawn(); - }, - .noop, .test_logger => {}, - } - } - - pub fn deinit(self: Self) void { - switch (self) { - .standard => |logger| { - logger.deinit(); - }, - .noop, .test_logger => {}, +/// A ScopedLogger could either be: +/// - A StandardErrLogger +/// - A TestingLogger +pub fn ScopedLogger(comptime scope: ?[]const u8) type { + return union(LogKind) { + channel_print: *ChannelPrintLogger, + direct_print: *DirectPrintLogger, + noop: void, + + const Self = @This(); + + /// Can be used in tests to minimize the amount of logging during tests. + pub const TEST_DEFAULT_LEVEL: Level = .warn; + + pub fn unscoped(self: Self) Logger { + return switch (self) { + .channel_print => |logger| .{ .channel_print = logger }, + .direct_print => |logger| .{ .direct_print = logger }, + .noop => .noop, + }; } - } - pub fn field(self: Self, name: []const u8, value: anytype) Entry { - switch (self) { - inline .standard, .test_logger => |logger| { - return logger.field(name, value); - }, - .noop => { - return .noop; - }, + pub fn withScope(self: Self, comptime new_scope: []const u8) ScopedLogger(new_scope) { + return switch (self) { + .channel_print => |logger| .{ .channel_print = logger }, + .direct_print => |logger| .{ .direct_print = logger }, + .noop => .noop, + }; } - } - pub fn infof(self: Self, comptime fmt: []const u8, args: anytype) void { - switch (self) { - inline .standard, .test_logger => |logger| { - logger.infof(fmt, args); - }, - .noop => {}, + pub fn deinit(self: *const Self) void { + switch (self.*) { + .channel_print => |logger| logger.deinit(), + .direct_print, .noop => {}, + } } - } - pub fn debugf(self: Self, comptime fmt: []const u8, args: anytype) void { - switch (self) { - inline .standard, .test_logger => |logger| { - logger.debugf(fmt, args); - }, - .noop => {}, + pub fn err(self: *const Self) Entry { + return switch (self.*) { + .noop => .noop, + inline else => |impl| impl.err(scope), + }; } - } - pub fn warnf(self: Self, comptime fmt: []const u8, args: anytype) void { - switch (self) { - inline .standard, .test_logger => |logger| { - logger.warnf(fmt, args); - }, - .noop => {}, + pub fn warn(self: *const Self) Entry { + return switch (self.*) { + .noop => .noop, + inline else => |impl| impl.warn(scope), + }; } - } - pub fn errf(self: Self, comptime fmt: []const u8, args: anytype) void { - switch (self) { - inline .standard, .test_logger => |logger| { - logger.errf(fmt, args); - }, - .noop => {}, + pub fn info(self: *const Self) Entry { + return switch (self.*) { + .noop => .noop, + inline else => |impl| impl.info(scope), + }; } - } - pub fn logf(self: Self, level: Level, comptime fmt: []const u8, args: anytype) void { - switch (self) { - inline .standard, .test_logger => |logger| { - logger.logf(level, fmt, args); - }, - .noop => {}, + pub fn debug(self: *const Self) Entry { + return switch (self.*) { + .noop => .noop, + inline else => |impl| impl.debug(scope), + }; } - } - pub fn info(self: Self, msg: []const u8) void { - switch (self) { - inline .standard, .test_logger => |logger| { - logger.info(msg); - }, - .noop => {}, + pub fn trace(self: *const Self) Entry { + return switch (self.*) { + .noop => .noop, + inline else => |impl| impl.trace(scope), + }; } - } - pub fn debug(self: Self, msg: []const u8) void { - switch (self) { - inline .standard, .test_logger => |logger| { - logger.debug(msg); - }, - .noop => {}, - } - } - - pub fn warn(self: Self, msg: []const u8) void { - switch (self) { - inline .standard, .test_logger => |logger| { - logger.warn(msg); - }, - .noop => {}, - } - } - - pub fn err(self: Self, msg: []const u8) void { - switch (self) { - inline .standard, .test_logger => |logger| { - logger.err(msg); - }, - .noop => {}, + pub fn log(self: Self, level: Level, comptime message: []const u8) void { + switch (self) { + .noop => {}, + inline else => |*impl| impl.*.log(scope, level, message), + } } - } - pub fn log(self: Self, level: Level, msg: []const u8) void { - switch (self) { - inline .standard, .test_logger => |logger| { - logger.log(level, msg); - }, - .noop => {}, + pub fn logf(self: Self, level: Level, comptime fmt: []const u8, args: anytype) void { + switch (self) { + .noop => {}, + inline else => |*impl| impl.*.logf(scope, level, fmt, args), + } } - } + }; +} - /// Can be used in tests to minimize the amount of logging during tests. - pub const TEST_DEFAULT_LEVEL: Level = .warn; -}; +pub const Logger = ScopedLogger(null); -pub const StandardErrLogger = struct { - allocator: std.mem.Allocator, - /// Messages more granular than this will not be logged +/// An instance of `ScopedLogger` that logs via the channel. +pub const ChannelPrintLogger = struct { max_level: Level, - exit_sig: AtomicBool, + exit: std.atomic.Value(bool), + allocator: Allocator, + log_allocator: Allocator, + log_allocator_state: *RecycleFBA(.{}), + max_buffer: u64, + channel: *Channel(logfmt.LogMsg), handle: ?std.Thread, - channel: *Channel(*StandardEntry), const Self = @This(); - pub fn init(allocator: std.mem.Allocator, max_level: Level) *Self { - const self = allocator.create(Self) catch @panic("could not allocator.create Logger"); + pub fn init(config: Config) !*Self { + const max_buffer = config.max_buffer orelse return error.MaxBufferNotSet; + const recycle_fba = try config.allocator.create(RecycleFBA(.{})); + recycle_fba.* = try RecycleFBA(.{}).init(config.allocator, max_buffer); + const self = try config.allocator.create(Self); self.* = .{ - .allocator = allocator, - .max_level = max_level, - .exit_sig = AtomicBool.init(false), + .allocator = config.allocator, + .log_allocator = recycle_fba.allocator(), + .log_allocator_state = recycle_fba, + .max_buffer = max_buffer, + .exit = AtomicBool.init(false), + .max_level = config.max_level, .handle = null, - .channel = Channel(*StandardEntry).create(allocator) catch - @panic("could not allocate StandardEntry channel"), + .channel = Channel(logfmt.LogMsg).create(config.allocator) catch + @panic("could not allocate LogMsg channel"), }; + self.handle = try std.Thread.spawn(.{}, run, .{self}); return self; } - pub fn spawn(self: *Self) void { - self.handle = std.Thread.spawn(.{}, StandardErrLogger.run, .{self}) catch @panic("could not spawn Logger"); - } - pub fn deinit(self: *Self) void { - if (self.handle) |handle| { - self.exit_sig.store(true, .seq_cst); + if (self.handle) |*handle| { + std.time.sleep(std.time.ns_per_ms * 5); + self.exit.store(true, .seq_cst); handle.join(); } self.channel.deinit(); + self.log_allocator_state.deinit(); self.allocator.destroy(self.channel); + self.allocator.destroy(self.log_allocator_state); self.allocator.destroy(self); } - fn run(self: *Self) void { - const sink = StdErrSink{}; - - while (!self.exit_sig.load(.seq_cst)) { - std.time.sleep(std.time.ns_per_ms * 5); - - while (self.channel.receive()) |e| { - sink.consumeEntry(e); - e.deinit(); - } - } - } - - pub fn field(self: *Self, name: []const u8, value: anytype) Entry { - var e = Entry.init(self.allocator, self.channel, self.max_level); - return e.field(name, value); + pub fn logger(self: *Self) Logger { + return .{ .channel_print = self }; } - pub fn info(self: *Self, msg: []const u8) void { - self.log(.info, msg); + pub fn scopedLogger(self: *Self, comptime new_scope: anytype) ScopedLogger(new_scope) { + return .{ .channel_print = self }; } - pub fn debug(self: *Self, msg: []const u8) void { - self.log(.debug, msg); - } - - pub fn warn(self: *Self, msg: []const u8) void { - self.log(.warn, msg); + pub fn run(self: *Self) void { + while (!self.exit.load(.acquire)) { + while (self.channel.receive()) |message| { + { + const writer = std.io.getStdErr().writer(); + std.debug.lockStdErr(); + defer std.debug.unlockStdErr(); + logfmt.writeLog(writer, message) catch {}; + } + if (message.maybe_fields) |fields| { + self.log_allocator.free(fields); + } + if (message.maybe_fmt) |fmt_msg| { + self.log_allocator.free(fmt_msg); + } + } + } } - pub fn err(self: *Self, msg: []const u8) void { - self.log(.err, msg); + pub fn err(self: *Self, comptime maybe_scope: ?[]const u8) Entry { + if (@intFromEnum(self.max_level) >= @intFromEnum(Level.err)) { + return Entry{ .channel_print = ChannelPrintEntry.init(self.log_allocator, maybe_scope, self.channel, Level.err) }; + } + return .noop; } - pub fn infof(self: *Self, comptime fmt: []const u8, args: anytype) void { - self.logf(.info, fmt, args); + pub fn warn(self: *Self, comptime maybe_scope: ?[]const u8) Entry { + if (@intFromEnum(self.max_level) >= @intFromEnum(Level.warn)) { + return Entry{ .channel_print = ChannelPrintEntry.init(self.log_allocator, maybe_scope, self.channel, Level.warn) }; + } + return .noop; } - pub fn debugf(self: *Self, comptime fmt: []const u8, args: anytype) void { - self.logf(.debug, fmt, args); + pub fn info(self: *Self, comptime maybe_scope: ?[]const u8) Entry { + if (@intFromEnum(self.max_level) >= @intFromEnum(Level.info)) { + return Entry{ .channel_print = ChannelPrintEntry.init(self.log_allocator, maybe_scope, self.channel, Level.info) }; + } + return .noop; } - pub fn warnf(self: *Self, comptime fmt: []const u8, args: anytype) void { - self.logf(.warn, fmt, args); + pub fn debug(self: *Self, comptime maybe_scope: ?[]const u8) Entry { + if (@intFromEnum(self.max_level) >= @intFromEnum(Level.debug)) { + return Entry{ .channel_print = ChannelPrintEntry.init(self.log_allocator, maybe_scope, self.channel, Level.debug) }; + } + return .noop; } - pub fn errf(self: *Self, comptime fmt: []const u8, args: anytype) void { - self.logf(.err, fmt, args); + pub fn trace(self: *Self, comptime maybe_scope: ?[]const u8) Entry { + if (@intFromEnum(self.max_level) >= @intFromEnum(Level.trace)) { + return Entry{ .channel_print = ChannelPrintEntry.init(self.log_allocator, maybe_scope, self.channel, Level.trace) }; + } + return .noop; } - pub fn log(self: *Self, level: Level, msg: []const u8) void { + pub fn log(self: *Self, comptime scope: ?[]const u8, level: Level, comptime message: []const u8) void { if (@intFromEnum(self.max_level) >= @intFromEnum(level)) { - var e = Entry.init(self.allocator, self.channel, self.max_level); - e.log(level, msg); + switch (level) { + .err => self.err(scope).log(message), + .warn => self.warn(scope).log(message), + .info => self.info(scope).log(message), + .debug => self.debug(scope).log(message), + .trace => self.trace(scope).log(message), + } } } - pub fn logf(self: *Self, level: Level, comptime fmt: []const u8, args: anytype) void { + pub fn logf(self: *Self, comptime scope: ?[]const u8, level: Level, comptime fmt: []const u8, args: anytype) void { if (@intFromEnum(self.max_level) >= @intFromEnum(level)) { - var e = Entry.init(self.allocator, self.channel, self.max_level); - e.logf(level, fmt, args); + switch (level) { + .err => self.err(scope).logf(fmt, args), + .warn => self.warn(scope).logf(fmt, args), + .info => self.info(scope).logf(fmt, args), + .debug => self.debug(scope).logf(fmt, args), + .trace => self.trace(scope).logf(fmt, args), + } } } }; @@ -252,115 +250,227 @@ pub const StandardErrLogger = struct { /// Directly prints instead of running in a separate thread. This handles issues during tests /// where some log messages never get logged because the logger is deinitialized before the /// logging thread picks up the log message. -pub const TestLogger = struct { - max_level: Level = .warn, +pub const DirectPrintLogger = struct { + const builtin = @import("builtin"); + max_level: Level, + allocator: Allocator, const Self = @This(); - pub const default = TestLogger{}; - - pub fn logger(self: Self) Logger { - return .{ .test_logger = self }; - } - - pub fn field(_: Self, _: []const u8, _: anytype) Entry { - @panic("`Logger.field` not supported"); - } - - pub fn info(self: Self, msg: []const u8) void { - self.log(.info, msg); + pub fn init(allocator: std.mem.Allocator, max_level: Level) Self { + return .{ + .max_level = max_level, + .allocator = allocator, + }; } - pub fn debug(self: Self, msg: []const u8) void { - self.log(.debug, msg); + pub fn logger(self: *Self) Logger { + return .{ .direct_print = self }; } - pub fn warn(self: Self, msg: []const u8) void { - self.log(.warn, msg); + pub fn scopedLogger(self: *Self, comptime new_scope: anytype) ScopedLogger(new_scope) { + return .{ .direct_print = self }; } - pub fn err(self: Self, msg: []const u8) void { - self.log(.err, msg); + pub fn err(self: *Self, comptime scope: ?[]const u8) Entry { + if (@intFromEnum(self.max_level) >= @intFromEnum(Level.err)) { + return Entry{ .direct_print = DirectPrintEntry.init(self.allocator, scope, Level.err) }; + } + return .noop; } - pub fn infof(self: Self, comptime fmt: []const u8, args: anytype) void { - self.logf(.info, fmt, args); + pub fn warn(self: *Self, comptime scope: ?[]const u8) Entry { + if (@intFromEnum(self.max_level) >= @intFromEnum(Level.warn)) { + return Entry{ .direct_print = DirectPrintEntry.init(self.allocator, scope, Level.warn) }; + } + return .noop; } - pub fn debugf(self: Self, comptime fmt: []const u8, args: anytype) void { - self.logf(.debug, fmt, args); + pub fn info(self: *Self, comptime scope: ?[]const u8) Entry { + if (@intFromEnum(self.max_level) >= @intFromEnum(Level.info)) { + return Entry{ .direct_print = DirectPrintEntry.init(self.allocator, scope, Level.info) }; + } + return .noop; } - pub fn warnf(self: Self, comptime fmt: []const u8, args: anytype) void { - self.logf(.warn, fmt, args); + pub fn debug(self: *Self, comptime scope: ?[]const u8) Entry { + if (@intFromEnum(self.max_level) >= @intFromEnum(Level.debug)) { + return Entry{ .direct_print = DirectPrintEntry.init(self.allocator, scope, Level.debug) }; + } + return .noop; } - pub fn errf(self: Self, comptime fmt: []const u8, args: anytype) void { - self.logf(.err, fmt, args); + pub fn trace(self: *Self, comptime scope: ?[]const u8) Entry { + if (@intFromEnum(self.max_level) >= @intFromEnum(Level.trace)) { + return Entry{ .direct_print = DirectPrintEntry.init(self.allocator, scope, Level.trace) }; + } + return .noop; } - pub fn log(self: Self, level: Level, msg: []const u8) void { + pub fn log(self: *Self, comptime scope: ?[]const u8, level: Level, comptime message: []const u8) void { if (@intFromEnum(self.max_level) >= @intFromEnum(level)) { - std.debug.print("{s}\n", .{msg}); + switch (level) { + .err => self.err(scope).log(message), + .warn => self.warn(scope).log(message), + .info => self.info(scope).log(message), + .debug => self.debug(scope).log(message), + .trace => self.trace(scope).log(message), + } } } - pub fn logf(self: Self, level: Level, comptime fmt: []const u8, args: anytype) void { + pub fn logf(self: *Self, comptime scope: ?[]const u8, level: Level, comptime fmt: []const u8, args: anytype) void { if (@intFromEnum(self.max_level) >= @intFromEnum(level)) { - std.debug.print(fmt ++ "\n", args); + switch (level) { + .err => self.err(scope).logf(fmt, args), + .warn => self.warn(scope).logf(fmt, args), + .info => self.info(scope).logf(fmt, args), + .debug => self.debug(scope).logf(fmt, args), + .trace => self.trace(scope).logf(fmt, args), + } } } }; -pub const StdErrSink = struct { - const Self = @This(); +test "direct" { + const allocator = std.testing.allocator; + const std_logger = ChannelPrintLogger.init(.{ + .allocator = allocator, + .max_level = Level.err, + .max_buffer = 1 << 30, + }) catch @panic("Logger init failed"); + defer std_logger.deinit(); + + const logger = std_logger.logger(); + logger.log(.warn, "warn"); + logger.log(.info, "info"); + logger.log(.debug, "debug"); + + logger.logf(.warn, "{s}", .{"warn"}); + logger.logf(.info, "{s}", .{"info"}); + logger.logf(.debug, "{s}", .{"debug"}); +} - pub fn consumeEntry(_: Self, e: *StandardEntry) void { - const std_err_writer = std.io.getStdErr().writer(); - std.debug.lockStdErr(); - defer std.debug.unlockStdErr(); +test "trace_ngswitch" { + const StuffChild = struct { + const StuffChild = @This(); + logger: ScopedLogger(@typeName(StuffChild)), - logfmt.formatter(e, std_err_writer) catch unreachable; - } -}; + pub fn init(logger: *const Logger) StuffChild { + return .{ .logger = logger.withScope(@typeName(StuffChild)) }; + } + + pub fn doStuffDetails(self: *StuffChild) void { + self.logger.info().log("doing stuff child"); + } + }; + + const Stuff = struct { + const Stuff = @This(); + logger: ScopedLogger(@typeName(Stuff)), + + pub fn init(logger: Logger) Stuff { + return .{ .logger = logger.withScope(@typeName(Stuff)) }; + } + + pub fn doStuff(self: *Stuff) void { + self.logger.info().log("doing stuff parent"); + const logger = self.logger.unscoped(); + var child = StuffChild.init(&logger); + child.doStuffDetails(); + } + }; + + const allocator = std.testing.allocator; + + const std_logger = ChannelPrintLogger.init(.{ + .allocator = allocator, + .max_level = Level.warn, + .max_buffer = 1 << 30, + }) catch @panic("Logger init failed"); + defer std_logger.deinit(); + + const logger = std_logger.logger(); + + // Below logs out the following: + // trace_ng.log.test.trace_ng: scope switch.Stuff] time=2024-09-11T06:49:02Z level=info doing stuff parent + // [trace_ng.log.test.trace_ng: scope switch.StuffChild] time=2024-09-11T06:49:02Z level=info doing stuff child + // time=2024-09-11T06:49:02Z level=info Log from main + var stuff = Stuff.init(logger); + stuff.doStuff(); + // Log using the concrete instance also works. + std_logger.info(null).log("Log from main"); +} + +test "reclaim" { + const allocator = std.testing.allocator; + + var std_logger = ChannelPrintLogger.init(.{ + .allocator = allocator, + .max_level = Level.warn, + .max_buffer = 4048, + }) catch @panic("Logger init failed"); -test "trace.logger: works" { - var logger: Logger = .noop; // uncomment below to run visual test - // var logger = Logger.init(testing.allocator, .info); - logger.spawn(); - defer logger.deinit(); + defer std_logger.deinit(); - logger.field("elapsed", 4245).debugf("request with id {s} succeeded", .{"abcd1234"}); + const logger = std_logger.logger(); - logger.field("kind", .some_enum_kind).infof("operation was done", .{}); - logger.field("authorized", false).warnf("api call received at {d} not authorized", .{10004}); - logger.field("error", "IOError").errf("api call received at {d} broke the system!", .{10005}); + // Ensure memory can be continously requested from recycle_fba without getting stuck. + for (0..25) |_| { + logger.info() + .field("f_agent", "Firefox") + .field("f_version", 2.0) + .log("Logging with logWithFields"); + } +} + +test "level" { + const allocator = std.testing.allocator; - std.time.sleep(std.time.ns_per_ms * 100); + var std_logger = ChannelPrintLogger.init(.{ + .allocator = allocator, + .max_level = Level.err, + .max_buffer = 1 << 30, + }) catch @panic("Logger init failed"); - logger.field("elapsed", 4245).debug("request with id succeeded"); - logger.field("kind", .some_enum_kind).info("operation was done"); - logger.field("authorized", false).warn("api call received at not authorized"); - logger.field("error", "IOError").err("api call received broke the system!"); + defer std_logger.deinit(); - const s: []const u8 = "t12312"; + const logger = std_logger.logger(); + + //None should log as they are higher than set max_log. logger - .field("tmp1", 123) - .field("tmp2", 456) - .field("tmp2", s) - .info("new push message"); + .warn() + .log("Logging with log"); - std.time.sleep(std.time.ns_per_ms * 100); + logger + .info() + .logf( + "{s}", + .{"Logging with logf"}, + ); + + logger.info() + .field("f_agent", "Firefox") + .field("f_version", "2.0") + .field("f_version", "3.0") + .log("Logging with logWithFields"); + + logger.trace() + .field("f_agent", "Firefox") + .field("f_version", 120) + .field("f_local", "en") + .field("f_stock", "nvidia") + .logf("{s}", .{"Logging with logfWithFields"}); } -test "trace.logger: Logger is noop when configured as such" { - var logger: Logger = .noop; - defer logger.deinit(); - logger.spawn(); +test "test_logger" { + // TODO Replace this with a logger that is configurable with a writer + // That way, the logger can be configured to write to a file, stdout or an array list. + const allocator = std.testing.allocator; + + var test_logger = DirectPrintLogger.init(allocator, Level.warn); - logger.info("should not log"); - logger.field("key", "value").info("not logging"); - logger.err("should not log also"); + const logger = test_logger.logger(); - std.time.sleep(std.time.ms_per_s * 1); + logger.log(.info, "Logging with log"); } diff --git a/src/trace/logfmt.zig b/src/trace/logfmt.zig index 4188f5b59..1b5f2be39 100644 --- a/src/trace/logfmt.zig +++ b/src/trace/logfmt.zig @@ -1,16 +1,68 @@ const std = @import("std"); -const Entry = @import("./entry.zig").StandardEntry; +const time = @import("../time/time.zig"); +const Level = @import("level.zig").Level; +const sig = @import("../sig.zig"); + +pub const LogMsg = struct { + level: Level, + maybe_scope: ?[]const u8 = null, + maybe_msg: ?[]const u8 = null, + maybe_fields: ?[]const u8 = null, + maybe_fmt: ?[]const u8 = null, +}; + +pub fn fmtMsg(writer: anytype, comptime maybe_fmt: ?[]const u8, args: anytype) void { + if (maybe_fmt) |fmt| { + std.fmt.format(writer, fmt, args) catch @panic("could not format"); + } +} + +pub fn fmtField(writer: anytype, key: []const u8, value: anytype) void { + std.fmt.format(writer, fieldFmtString(@TypeOf(value)), .{ key, value }) catch return; +} + +/// Return the number of bytes needed to write the field. +pub fn countField(key: []const u8, value: anytype) u64 { + return std.fmt.count(fieldFmtString(@TypeOf(value)), .{ key, value }); +} + +/// Return the format string for the type when used as a value in a field. +fn fieldFmtString(comptime Value: type) []const u8 { + return switch (@typeInfo(Value)) { + // Assume arrays of u8 are strings. + .Pointer => |ptr| if (ptr.size == .One) + fieldFmtString(ptr.child) + else if (ptr.size == .One and ptr.child == u8) + "{s}={s} " + else + "{s}={any} ", + .Array => |arr| if (arr.child == u8) "{s}={s} " else "{s}={any} ", + .Int, .ComptimeInt, .Float, .ComptimeFloat => "{s}={} ", + else => "{s}={any} ", + }; +} + +pub fn writeLog(writer: anytype, message: LogMsg) !void { + if (message.maybe_scope) |scope| { + try std.fmt.format(writer, "[{s}] ", .{scope}); + } -pub fn formatter(e: *const Entry, writer: anytype) !void { // format time as ISO8601 const utc_format = "YYYY-MM-DDTHH:mm:ss"; + const now = time.DateTime.now(); try std.fmt.format(writer, "time=", .{}); - try e.time.format(utc_format, .{}, writer); + try now.format(utc_format, .{}, writer); try std.fmt.format(writer, "Z ", .{}); - try std.fmt.format(writer, "level={s} ", .{e.level.asText()}); + try std.fmt.format(writer, "level={s} ", .{message.level.asText()}); - for (e.fields.items) |f| { - f.custom_format(writer); + if (message.maybe_fields) |kv| { + try std.fmt.format(writer, "{s}", .{kv}); + } + + if (message.maybe_msg) |msg| { + try std.fmt.format(writer, "{s}\n", .{msg}); + } + if (message.maybe_fmt) |fmt| { + try std.fmt.format(writer, "{s}\n", .{fmt}); } - try std.fmt.format(writer, " {s}\n", .{e.message.items}); } diff --git a/src/transaction_sender/leader_info.zig b/src/transaction_sender/leader_info.zig index 455afc5d4..2f8c100a7 100644 --- a/src/transaction_sender/leader_info.zig +++ b/src/transaction_sender/leader_info.zig @@ -95,7 +95,7 @@ pub const LeaderInfo = struct { self.leader_schedule_cache.epoch_schedule.getEpochAndSlotIndex(slot); const leader_schedule = self.getLeaderSchedule(slot) catch |e| { - self.logger.errf("Error getting leader schedule via rpc for slot {}: {}", .{ slot, e }); + self.logger.err().logf("Error getting leader schedule via rpc for slot {}: {}", .{ slot, e }); return e; }; diff --git a/src/transaction_sender/service.zig b/src/transaction_sender/service.zig index 05aa49286..bd0dab7c1 100644 --- a/src/transaction_sender/service.zig +++ b/src/transaction_sender/service.zig @@ -144,7 +144,7 @@ pub const Service = struct { last_batch_sent = Instant.now(); self.transaction_pool.addTransactions(transaction_batch.values()) catch { - self.logger.warn("Transaction pool is full, dropping transactions"); + self.logger.warn().log("Transaction pool is full, dropping transactions"); }; transaction_batch.clearRetainingCapacity(); @@ -281,7 +281,7 @@ pub const Service = struct { /// Sends transactions to the next N leaders TPU addresses fn sendTransactions(self: *Service, transactions: []const TransactionInfo, leader_addresses: std.ArrayList(SocketAddr)) !void { if (leader_addresses.items.len == 0) { - self.logger.warn("No leader addresses found"); + self.logger.warn().log("No leader addresses found"); return; } @@ -365,7 +365,7 @@ pub const Stats = struct { } pub fn log(self: *const Stats, logger: Logger) void { - logger.infof("transaction-sender: {} received, {} pending, {} rooted, {} failed, {} expired, {} exceeded_retries", .{ + logger.info().logf("transaction-sender: {} received, {} pending, {} rooted, {} failed, {} expired, {} exceeded_retries", .{ self.transactions_received_count.get(), self.transactions_pending.get(), self.transactions_rooted_count.get(), diff --git a/src/utils/allocators.zig b/src/utils/allocators.zig index 88b8ab657..a3a582768 100644 --- a/src/utils/allocators.zig +++ b/src/utils/allocators.zig @@ -367,7 +367,7 @@ pub const DiskMemoryAllocator = struct { } fn logFailure(self: Self, err: anyerror, file_name: []const u8) void { - self.logger.errf("Disk Memory Allocator error: {s}, filepath: {s}", .{ + self.logger.err().logf("Disk Memory Allocator error: {s}, filepath: {s}", .{ @errorName(err), sig.utils.fmt.tryRealPath(self.dir, file_name), }); } diff --git a/src/utils/service.zig b/src/utils/service.zig index 6cf8d0783..be3a66c2b 100644 --- a/src/utils/service.zig +++ b/src/utils/service.zig @@ -8,7 +8,6 @@ const ArrayListUnmanaged = std.ArrayListUnmanaged; const Atomic = std.atomic.Value; const Lazy = sig.utils.lazy.Lazy; -const Level = sig.trace.Level; const Logger = sig.trace.Logger; /// High level manager for long-running threads and the state @@ -147,11 +146,11 @@ pub const ServiceManager = struct { /// 2. Wait for threads to exit. /// 3. Deinit the shared state from those threads. pub fn deinit(self: *Self) void { - self.logger.infof("Cleaning up: {s}", .{self.name}); + self.logger.info().logf("Cleaning up: {s}", .{self.name}); self.join(); self.defers.deinit(); self.arena.deinit(); - self.logger.infof("Finished cleaning up: {s}", .{self.name}); + self.logger.info().logf("Finished cleaning up: {s}", .{self.name}); } }; @@ -200,7 +199,7 @@ pub fn runService( "thread {d}", .{std.Thread.getCurrentId()}, ); - logger.infof("Starting {s}", .{name}); + logger.info().logf("Starting {s}", .{name}); var timer = try std.time.Timer.start(); var last_iteration: u64 = 0; var num_oks: u64 = 0; @@ -214,25 +213,23 @@ pub fn runService( // identify result if (result) |_| num_oks += 1 else |_| num_errors += 1; - const handler, const num_events, const event_name, const level = if (result) |_| - .{ config.return_handler, num_oks, "return", Level.info } + const handler, const num_events, const event_name, const level_logger = if (result) |_| + .{ config.return_handler, num_oks, "return", logger.info() } else |_| - .{ config.error_handler, num_errors, "error", Level.err }; + .{ config.error_handler, num_errors, "error", logger.warn() }; // handle result if (handler.log_return) { - logger.logf(level, "{s} has {s}ed: {any}", .{ name, event_name, result }); + level_logger.logf("{s} has {s}ed: {any}", .{ name, event_name, result }); } if (handler.max_iterations) |max| if (num_events >= max) { if (handler.set_exit_on_completion) { - if (handler.log_exit) logger.logf( - level, + if (handler.log_exit) level_logger.logf( "Signaling exit due to {} {s}s from {s}", .{ num_events, event_name, name }, ); exit.store(true, .release); - } else if (handler.log_exit) logger.logf( - level, + } else if (handler.log_exit) level_logger.logf( "Exiting {s} due to {} {s}s", .{ name, num_events, event_name }, ); @@ -247,7 +244,7 @@ pub fn runService( )); } - logger.infof("Exiting {s} because the exit signal was received.", .{name}); + logger.info().logf("Exiting {s} because the exit signal was received.", .{name}); } /// Defer actions until later. diff --git a/src/utils/tar.zig b/src/utils/tar.zig index 7c282f59a..e46306096 100644 --- a/src/utils/tar.zig +++ b/src/utils/tar.zig @@ -78,7 +78,9 @@ pub fn parallelUntarToFileSystem( thread_pool.deinit(); } - logger.infof("using {d} threads to unpack snapshot", .{n_threads}); + logger + .info() + .logf("using {d} threads to unpack snapshot", .{n_threads}); const tasks = try UnTarTask.init(allocator, n_threads); defer allocator.free(tasks); @@ -148,7 +150,7 @@ pub fn parallelUntarToFileSystem( try reader.skipBytes(pad_len, .{}); const task_ptr = &tasks[UnTarTask.awaitAndAcquireFirstAvailableTask(tasks, 0)]; - task_ptr.result catch |err| logger.errf("UnTarTask encountered error: {s}", .{@errorName(err)}); + task_ptr.result catch |err| logger.err().logf("UnTarTask encountered error: {s}", .{@errorName(err)}); task_ptr.entry = .{ .allocator = allocator, .contents = contents, @@ -173,7 +175,7 @@ pub fn parallelUntarToFileSystem( // wait for all tasks for (tasks) |*task| { task.blockUntilCompletion(); - task.result catch |err| logger.errf("UnTarTask encountered error: {s}", .{@errorName(err)}); + task.result catch |err| logger.err().logf("UnTarTask encountered error: {s}", .{@errorName(err)}); } }