diff --git a/src/accountsdb/db.zig b/src/accountsdb/db.zig index b678c1899..6e414945b 100644 --- a/src/accountsdb/db.zig +++ b/src/accountsdb/db.zig @@ -216,15 +216,16 @@ pub const AccountsDB = struct { const reference_allocator: std.mem.Allocator // = blk: { if (config.use_disk_index) { - var index_bin_dir = try snapshot_dir.makeOpenPath("index/bin", .{}); - defer index_bin_dir.close(); + var index_bin_dir = try snapshot_dir.makeOpenPath("index", .{}); + errdefer index_bin_dir.close(); - const disk_file_suffix = try index_bin_dir.realpathAlloc(allocator, "."); - errdefer allocator.free(disk_file_suffix); - logger.infof("using disk index in {s}", .{disk_file_suffix}); + logger.infof("using disk index in {s}", .{sig.utils.fmt.tryRealPath(index_bin_dir, ".")}); const ptr = try allocator.create(DiskMemoryAllocator); - ptr.* = DiskMemoryAllocator.init(disk_file_suffix); + ptr.* = .{ + .dir = index_bin_dir, + .logger = logger, + }; break :blk .{ ptr, ptr.allocator() }; } else { @@ -233,7 +234,7 @@ pub const AccountsDB = struct { } }; errdefer if (maybe_disk_allocator_ptr) |ptr| { - ptr.deinit(allocator); + ptr.dir.close(); allocator.destroy(ptr); }; @@ -266,19 +267,10 @@ pub const AccountsDB = struct { }; } - pub fn deinit( - self: *Self, - delete_index_files: bool, - ) void { + pub fn deinit(self: *Self) void { self.account_index.deinit(true); if (self.disk_allocator_ptr) |ptr| { - // note: we dont always deinit the allocator so we keep the index files - // because they are expensive to generate - if (delete_index_files) { - ptr.deinit(self.allocator); - } else { - self.allocator.free(ptr.filepath); - } + ptr.dir.close(); self.allocator.destroy(ptr); } @@ -3229,7 +3221,7 @@ test "testWriteSnapshot" { .number_of_index_bins = ACCOUNT_INDEX_BINS, .use_disk_index = false, }, null); - defer accounts_db.deinit(true); + defer accounts_db.deinit(); try testWriteSnapshotFull(&accounts_db, snap_files.full_snapshot.slot, snap_files.full_snapshot.hash); try testWriteSnapshotIncremental(&accounts_db, snap_files.incremental_snapshot.?.slot, snap_files.incremental_snapshot.?.hash); @@ -3288,7 +3280,7 @@ fn loadTestAccountsDB(allocator: std.mem.Allocator, use_disk: bool, n_threads: u .number_of_index_bins = 4, .use_disk_index = use_disk, }, null); - errdefer accounts_db.deinit(true); + errdefer accounts_db.deinit(); _ = try accounts_db.loadFromSnapshot(snapshot.accounts_db_fields, n_threads, allocator, 1_500); @@ -3358,7 +3350,7 @@ test "geyser stream on load" { geyser_writer, ); defer { - accounts_db.deinit(true); + accounts_db.deinit(); snapshots.deinit(allocator); } @@ -3378,7 +3370,7 @@ test "write and read an account" { var accounts_db, var snapshots = try loadTestAccountsDB(std.testing.allocator, false, 1); defer { - accounts_db.deinit(true); + accounts_db.deinit(); snapshots.deinit(allocator); } @@ -3415,7 +3407,7 @@ test "load and validate from test snapshot using disk index" { var accounts_db, var snapshots = try loadTestAccountsDB(std.testing.allocator, false, 1); defer { - accounts_db.deinit(true); + accounts_db.deinit(); snapshots.deinit(allocator); } @@ -3432,7 +3424,7 @@ test "load and validate from test snapshot parallel" { var accounts_db, var snapshots = try loadTestAccountsDB(std.testing.allocator, false, 2); defer { - accounts_db.deinit(true); + accounts_db.deinit(); snapshots.deinit(allocator); } @@ -3449,7 +3441,7 @@ test "load and validate from test snapshot" { var accounts_db, var snapshots = try loadTestAccountsDB(std.testing.allocator, false, 1); defer { - accounts_db.deinit(true); + accounts_db.deinit(); snapshots.deinit(allocator); } @@ -3466,7 +3458,7 @@ test "load clock sysvar" { var accounts_db, var snapshots = try loadTestAccountsDB(std.testing.allocator, false, 1); defer { - accounts_db.deinit(true); + accounts_db.deinit(); snapshots.deinit(allocator); } @@ -3486,7 +3478,7 @@ test "load other sysvars" { var accounts_db, var snapshots = try loadTestAccountsDB(std.testing.allocator, false, 1); defer { - accounts_db.deinit(true); + accounts_db.deinit(); snapshots.deinit(allocator); } @@ -3513,7 +3505,7 @@ test "flushing slots works" { .number_of_index_bins = 4, .use_disk_index = false, }, null); - defer accounts_db.deinit(true); + defer accounts_db.deinit(); var random = std.rand.DefaultPrng.init(19); const rng = random.random(); @@ -3564,7 +3556,7 @@ test "purge accounts in cache works" { .number_of_index_bins = 4, .use_disk_index = false, }, null); - defer accounts_db.deinit(true); + defer accounts_db.deinit(); var random = std.rand.DefaultPrng.init(19); const rng = random.random(); @@ -3621,7 +3613,7 @@ test "clean to shrink account file works with zero-lamports" { .number_of_index_bins = 4, .use_disk_index = false, }, null); - defer accounts_db.deinit(true); + defer accounts_db.deinit(); var random = std.rand.DefaultPrng.init(19); const rng = random.random(); @@ -3697,7 +3689,7 @@ test "clean to shrink account file works" { .number_of_index_bins = 4, .use_disk_index = false, }, null); - defer accounts_db.deinit(true); + defer accounts_db.deinit(); var random = std.rand.DefaultPrng.init(19); const rng = random.random(); @@ -3765,7 +3757,7 @@ test "full clean account file works" { .number_of_index_bins = 4, .use_disk_index = false, }, null); - defer accounts_db.deinit(true); + defer accounts_db.deinit(); var random = std.rand.DefaultPrng.init(19); const rng = random.random(); @@ -3850,7 +3842,7 @@ test "shrink account file works" { .number_of_index_bins = 4, .use_disk_index = false, }, null); - defer accounts_db.deinit(true); + defer accounts_db.deinit(); var random = std.rand.DefaultPrng.init(19); const rng = random.random(); @@ -4051,7 +4043,7 @@ pub const BenchmarkAccountsDBSnapshotLoad = struct { .number_of_index_bins = 32, .use_disk_index = bench_args.use_disk, }, null); - defer accounts_db.deinit(false); + defer accounts_db.deinit(); var accounts_dir = try std.fs.cwd().openDir(accounts_path, .{ .iterate = true }); defer accounts_dir.close(); @@ -4235,7 +4227,7 @@ pub const BenchmarkAccountsDB = struct { .number_of_index_bins = ACCOUNT_INDEX_BINS, .use_disk_index = bench_args.index == .disk, }, null); - defer accounts_db.deinit(true); + defer accounts_db.deinit(); var random = std.Random.DefaultPrng.init(19); const rng = random.random(); diff --git a/src/accountsdb/fuzz.zig b/src/accountsdb/fuzz.zig index 21f139a63..565e05313 100644 --- a/src/accountsdb/fuzz.zig +++ b/src/accountsdb/fuzz.zig @@ -99,7 +99,7 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { }, null, ); - defer accounts_db.deinit(true); + defer accounts_db.deinit(); const exit = try gpa.create(std.atomic.Value(bool)); defer gpa.destroy(exit); @@ -326,7 +326,7 @@ pub fn run(seed: u64, args: *std.process.ArgIterator) !void { defer snapshot_fields.deinit(allocator); var alt_accounts_db = try AccountsDB.init(std.heap.page_allocator, .noop, alternative_snapshot_dir, accounts_db.config, null); - defer alt_accounts_db.deinit(true); + defer alt_accounts_db.deinit(); _ = try alt_accounts_db.loadWithDefaults(&snapshot_fields, 1, true, 1_500); const maybe_inc_slot = if (snapshot_files.incremental_snapshot) |inc| inc.slot else null; diff --git a/src/accountsdb/readme.md b/src/accountsdb/readme.md index 7233c58c4..468be0db0 100644 --- a/src/accountsdb/readme.md +++ b/src/accountsdb/readme.md @@ -174,12 +174,17 @@ to support disk-based account references, we created a general purpose disk allocator which creates memory from mmap-ing files stored on disk. ```zig -// files are created using `data/test-data/tmp_{i}` format where `i` is -// incremented by one for each alloc call. -var allocator = try DiskMemoryAllocator.init("data/test-data/tmp"); -defer allocator.deinit(null); +// files are created using `data/test-data/bin_{i}` format where `i` is +// incremented by one for each new allocation. +var dma_dir = try std.fs.cwd().makeOpenPath("data/test-data"); +defer dma_dir.close(); +var dma_state: DiskMemoryAllocator = .{}; +const dma = dma_state.allocator(); ``` +Unlike a simpler page allocator, it stores certain metadata after the user-facing +buffer which tracks the associated file, and the true mmap'd size to allow for resizes. + ### background-threads we also run background threads in the `runManagerLoop` method which does the following: diff --git a/src/cmd/cmd.zig b/src/cmd/cmd.zig index 34c5bd1f1..73a0c57ca 100644 --- a/src/cmd/cmd.zig +++ b/src/cmd/cmd.zig @@ -1346,7 +1346,7 @@ const LoadedSnapshot = struct { self.genesis_config.deinit(self.allocator); self.status_cache.deinit(self.allocator); self.snapshot_fields.deinit(self.allocator); - self.accounts_db.deinit(false); // keep index files on disk + self.accounts_db.deinit(); self.allocator.destroy(self); } }; @@ -1409,7 +1409,7 @@ fn loadSnapshot( }, geyser_writer, ); - errdefer result.accounts_db.deinit(false); + errdefer result.accounts_db.deinit(); var snapshot_fields = try result.accounts_db.loadWithDefaults( &all_snapshot_fields, diff --git a/src/utils/allocators.zig b/src/utils/allocators.zig index 725893416..966ed43b9 100644 --- a/src/utils/allocators.zig +++ b/src/utils/allocators.zig @@ -138,36 +138,39 @@ pub fn RecycleFBA(config: struct { /// thread safe disk memory allocator pub const DiskMemoryAllocator = struct { - filepath: []const u8, - count: std.atomic.Value(u64) = std.atomic.Value(u64).init(0), - + dir: std.fs.Dir, + logger: sig.trace.Logger, + /// The amount of memory mmap'd for a particular allocation will be `file_size * mmap_ratio`. + /// See `alignedFileSize` and its usages to understand the relationship between an allocation + /// size, and the size of a file, and by extension, how this then relates to the allocated + /// address space. + mmap_ratio: MmapRatio = 1, + count: std.atomic.Value(u32) = std.atomic.Value(u32).init(0), const Self = @This(); - pub fn init(filepath: []const u8) Self { - return Self{ - .filepath = filepath, - }; + pub const MmapRatio = u16; + + /// Metadata stored at the end of each allocation. + const Metadata = extern struct { + file_index: u32, + mmap_size: usize align(4), + }; + + /// Returns the aligned size with enough space for `size` and `Metadata` at the end. + inline fn alignedFileSize(size: usize) usize { + return std.mem.alignForward(usize, size + @sizeOf(Metadata), std.mem.page_size); } - /// deletes all allocated files + optionally frees the filepath with the allocator - pub fn deinit(self: *Self, str_allocator: ?std.mem.Allocator) void { - // delete all files - var buf: [1024]u8 = undefined; - for (0..self.count.load(.acquire)) |i| { - // this should never fail since we know the file exists in alloc() - const filepath = std.fmt - .bufPrint(&buf, "{s}_{d}", .{ self.filepath, i }) catch unreachable; - std.fs.cwd().deleteFile(filepath) catch |err| { - std.debug.print("Disk Memory Allocator deinit: error: {}\n", .{err}); - }; - } - if (str_allocator) |a| { - a.free(self.filepath); - } + inline fn alignedMmapSize( + /// Must be `= alignedFileSize(size)`. + aligned_file_size: usize, + mmap_ratio: MmapRatio, + ) usize { + return aligned_file_size *| mmap_ratio; } - pub fn allocator(self: *Self) std.mem.Allocator { - return std.mem.Allocator{ + pub inline fn allocator(self: *Self) std.mem.Allocator { + return .{ .ptr = self, .vtable = &.{ .alloc = alloc, @@ -177,167 +180,140 @@ pub const DiskMemoryAllocator = struct { }; } - /// creates a new file with size aligned to page_size and returns a pointer to it - pub fn alloc(ctx: *anyopaque, n: usize, log2_align: u8, return_address: usize) ?[*]u8 { - _ = log2_align; + /// creates a new file with size aligned to page_size and returns a pointer to it. + /// + /// mmaps at least enough memory to the file for `size`, the metadata, and optionally + /// more based on the `mmap_ratio` field, in order to accommodate potential growth + /// from `resize` calls. + fn alloc(ctx: *anyopaque, size: usize, log2_align: u8, return_address: usize) ?[*]u8 { _ = return_address; const self: *Self = @ptrCast(@alignCast(ctx)); + std.debug.assert(self.mmap_ratio != 0); - const count = self.count.fetchAdd(1, .monotonic); + const alignment = @as(usize, 1) << @intCast(log2_align); + std.debug.assert(alignment <= std.mem.page_size); // the allocator interface shouldn't allow this (aside from the *Raw methods). - var buf: [1024]u8 = undefined; - const filepath = std.fmt.bufPrint(&buf, "{s}_{d}", .{ self.filepath, count }) catch |err| { - std.debug.print("Disk Memory Allocator error: {}\n", .{err}); - return null; - }; + const file_aligned_size = alignedFileSize(size); + const aligned_mmap_size = alignedMmapSize(file_aligned_size, self.mmap_ratio); + + const file_index = self.count.fetchAdd(1, .monotonic); + const file_name_bounded = fileNameBounded(file_index); + const file_name = file_name_bounded.constSlice(); - var file = std.fs.cwd().createFile(filepath, .{ .read = true }) catch |err| { - std.debug.print("Disk Memory Allocator error: {} filepath: {s}\n", .{ err, filepath }); + const file = self.dir.createFile(file_name, .{ .read = true, .truncate = true }) catch |err| { + self.logFailure(err, file_name); return null; }; defer file.close(); - const aligned_size = std.mem.alignForward(usize, n, std.mem.page_size); - const file_size = (file.stat() catch |err| { - std.debug.print("Disk Memory Allocator error: {}\n", .{err}); + // resize the file + file.setEndPos(file_aligned_size) catch |err| { + self.logFailure(err, file_name); return null; - }).size; - - if (file_size < aligned_size) { - // resize the file - file.seekTo(aligned_size - 1) catch |err| { - std.debug.print("Disk Memory Allocator error: {}\n", .{err}); - return null; - }; - _ = file.write(&[_]u8{1}) catch |err| { - std.debug.print("Disk Memory Allocator error: {}\n", .{err}); - return null; - }; - file.seekTo(0) catch |err| { - std.debug.print("Disk Memory Allocator error: {}\n", .{err}); - return null; - }; - } + }; - const memory = std.posix.mmap( + const full_alloc = std.posix.mmap( null, - aligned_size, + aligned_mmap_size, std.posix.PROT.READ | std.posix.PROT.WRITE, std.posix.MAP{ .TYPE = .SHARED }, file.handle, 0, ) catch |err| { - std.debug.print("Disk Memory Allocator error: {}\n", .{err}); + self.logFailure(err, file_name); return null; }; - return memory.ptr; - } - - /// unmaps the memory (file still exists and is removed on deinit()) - pub fn free(_: *anyopaque, buf: []u8, log2_align: u8, return_address: usize) void { - _ = log2_align; - _ = return_address; - // TODO: build a mapping from ptr to file so we can delete the corresponding file on free - const buf_aligned_len = std.mem.alignForward(usize, buf.len, std.mem.page_size); - std.posix.munmap(@alignCast(buf.ptr[0..buf_aligned_len])); + std.debug.assert(size <= file_aligned_size - @sizeOf(Metadata)); // sanity check + std.mem.bytesAsValue(Metadata, full_alloc[size..][0..@sizeOf(Metadata)]).* = .{ + .file_index = file_index, + .mmap_size = aligned_mmap_size, + }; + return full_alloc.ptr; } - /// not supported rn + /// Resizes the allocation within the bounds of the mmap'd address space if possible. fn resize( - _: *anyopaque, - buf_unaligned: []u8, - log2_buf_align: u8, + ctx: *anyopaque, + buf: []u8, + log2_align: u8, new_size: usize, return_address: usize, ) bool { - // not supported - _ = buf_unaligned; - _ = log2_buf_align; - _ = new_size; _ = return_address; - return false; - } -}; + const self: *Self = @ptrCast(@alignCast(ctx)); + std.debug.assert(self.mmap_ratio != 0); -test "recycle allocator" { - const backing_allocator = std.testing.allocator; - var allocator = try RecycleFBA(.{}).init(backing_allocator, 1024); - defer allocator.deinit(); + const alignment = @as(usize, 1) << @intCast(log2_align); + std.debug.assert(alignment <= std.mem.page_size); // the allocator interface shouldn't allow this (aside from the *Raw methods). - // alloc a slice of 100 bytes - const bytes = try allocator.allocator().alloc(u8, 100); - const ptr = bytes.ptr; - // free the slice - allocator.allocator().free(bytes); + const old_file_aligned_size = alignedFileSize(buf.len); + const new_file_aligned_size = alignedFileSize(new_size); - // realloc should be the same (ie, recycled data) - const bytes2 = try allocator.allocator().alloc(u8, 100); - try std.testing.expectEqual(ptr, bytes2.ptr); - allocator.allocator().free(bytes2); + if (new_file_aligned_size == old_file_aligned_size) { + return true; + } - // same result with smaller slice - const bytes3 = try allocator.allocator().alloc(u8, 50); - try std.testing.expectEqual(ptr, bytes3.ptr); - allocator.allocator().free(bytes3); + const buf_ptr: [*]align(std.mem.page_size) u8 = @alignCast(buf.ptr); + const metadata: Metadata = @bitCast(buf_ptr[old_file_aligned_size - @sizeOf(Metadata) ..][0..@sizeOf(Metadata)].*); - // diff result with larger slice - const bytes4 = try allocator.allocator().alloc(u8, 200); - try std.testing.expect(ptr != bytes4.ptr); - allocator.allocator().free(bytes4); -} + if (new_file_aligned_size > metadata.mmap_size) { + return false; + } -test "disk allocator on hashmaps" { - var allocator = DiskMemoryAllocator.init(sig.TEST_DATA_DIR ++ "tmp"); - defer allocator.deinit(null); + const file_name_bounded = fileNameBounded(metadata.file_index); + const file_name = file_name_bounded.constSlice(); - var refs = std.AutoHashMap(u8, u8).init(allocator.allocator()); - try refs.ensureTotalCapacity(100); + const file = self.dir.openFile(file_name, .{ .mode = .read_write }) catch |err| { + self.logFailure(err, file_name); + return false; + }; + defer file.close(); - try refs.put(10, 19); + file.setEndPos(new_file_aligned_size) catch return false; - const r = refs.get(10) orelse return error.Unreachable; - try std.testing.expectEqual(19, r); -} + std.debug.assert(new_size <= new_file_aligned_size - @sizeOf(Metadata)); // sanity check + std.mem.bytesAsValue(Metadata, buf_ptr[new_size..][0..@sizeOf(Metadata)]).* = metadata; -test "disk allocator on arraylists" { - var allocator = DiskMemoryAllocator.init(sig.TEST_DATA_DIR ++ "tmp"); + return true; + } - var disk_account_refs = try std.ArrayList(u8).initCapacity( - allocator.allocator(), - 1, - ); - defer disk_account_refs.deinit(); + /// unmaps the memory and deletes the associated file. + fn free(ctx: *anyopaque, buf: []u8, log2_align: u8, return_address: usize) void { + _ = return_address; + const self: *Self = @ptrCast(@alignCast(ctx)); + std.debug.assert(self.mmap_ratio != 0); + std.debug.assert(buf.len != 0); // should be ensured by the allocator interface - disk_account_refs.appendAssumeCapacity(19); + const alignment = @as(usize, 1) << @intCast(log2_align); + std.debug.assert(alignment <= std.mem.page_size); // the allocator interface shouldn't allow this (aside from the *Raw methods). - try std.testing.expectEqual(19, disk_account_refs.items[0]); + const file_aligned_size = alignedFileSize(buf.len); + const mmap_aligned_size = alignedMmapSize(file_aligned_size, self.mmap_ratio); - // this will lead to another allocation - try disk_account_refs.append(21); + const buf_ptr: [*]align(std.mem.page_size) u8 = @alignCast(buf.ptr); + const metadata: Metadata = @bitCast(buf_ptr[buf.len..][0..@sizeOf(Metadata)].*); - try std.testing.expectEqual(19, disk_account_refs.items[0]); - try std.testing.expectEqual(21, disk_account_refs.items[1]); + const file_name_bounded = fileNameBounded(metadata.file_index); + const file_name = file_name_bounded.constSlice(); - // these should exist - try std.fs.cwd().access(sig.TEST_DATA_DIR ++ "tmp_0", .{}); - try std.fs.cwd().access(sig.TEST_DATA_DIR ++ "tmp_1", .{}); + std.posix.munmap(buf_ptr[0..mmap_aligned_size]); + self.dir.deleteFile(file_name) catch |err| { + self.logFailure(err, file_name); + }; + } - // this should delete them - allocator.deinit(null); + fn logFailure(self: Self, err: anyerror, file_name: []const u8) void { + self.logger.errf("Disk Memory Allocator error: {s}, filepath: {s}", .{ + @errorName(err), sig.utils.fmt.tryRealPath(self.dir, file_name), + }); + } - // these should no longer exist - var did_error = false; - std.fs.cwd().access(sig.TEST_DATA_DIR ++ "tmp_0", .{}) catch { - did_error = true; - }; - try std.testing.expect(did_error); - did_error = false; - std.fs.cwd().access(sig.TEST_DATA_DIR ++ "tmp_1", .{}) catch { - did_error = true; - }; - try std.testing.expect(did_error); -} + const file_name_max_len = sig.utils.fmt.boundedLenValue("bin_{d}", .{std.math.maxInt(u32)}); + inline fn fileNameBounded(file_index: u32) std.BoundedArray(u8, file_name_max_len) { + return sig.utils.fmt.boundedFmt("bin_{d}", .{file_index}); + } +}; /// Namespace housing the different components for the stateless failing allocator. /// This allows easily importing everything related therein. @@ -400,3 +376,139 @@ pub const failing = struct { }; } }; + +test "recycle allocator" { + const backing_allocator = std.testing.allocator; + var allocator = try RecycleFBA(.{}).init(backing_allocator, 1024); + defer allocator.deinit(); + + // alloc a slice of 100 bytes + const bytes = try allocator.allocator().alloc(u8, 100); + const ptr = bytes.ptr; + // free the slice + allocator.allocator().free(bytes); + + // realloc should be the same (ie, recycled data) + const bytes2 = try allocator.allocator().alloc(u8, 100); + try std.testing.expectEqual(ptr, bytes2.ptr); + allocator.allocator().free(bytes2); + + // same result with smaller slice + const bytes3 = try allocator.allocator().alloc(u8, 50); + try std.testing.expectEqual(ptr, bytes3.ptr); + allocator.allocator().free(bytes3); + + // diff result with larger slice + const bytes4 = try allocator.allocator().alloc(u8, 200); + try std.testing.expect(ptr != bytes4.ptr); + allocator.allocator().free(bytes4); +} + +test "disk allocator stdlib test" { + var tmp_dir_root = std.testing.tmpDir(.{}); + defer tmp_dir_root.cleanup(); + const tmp_dir = tmp_dir_root.dir; + + for ([_]DiskMemoryAllocator.MmapRatio{ + 1, 2, 3, 4, 5, + 10, 11, 12, 13, 14, + 15, 20, 21, 22, 23, + 24, 25, 30, 31, 32, + 33, 34, 35, 40, 41, + 42, 43, 44, 45, 50, + 51, 52, 53, 54, 55, + }) |ratio| { + var dma_state: DiskMemoryAllocator = .{ + .dir = tmp_dir, + .logger = .noop, + .mmap_ratio = ratio, + }; + const dma = dma_state.allocator(); + + try std.heap.testAllocator(dma); + try std.heap.testAllocatorAligned(dma); + try std.heap.testAllocatorLargeAlignment(dma); + try std.heap.testAllocatorAlignedShrink(dma); + } +} + +test "disk allocator on hashmaps" { + var tmp_dir_root = std.testing.tmpDir(.{}); + defer tmp_dir_root.cleanup(); + const tmp_dir = tmp_dir_root.dir; + + var dma_state: DiskMemoryAllocator = .{ + .dir = tmp_dir, + .logger = .noop, + }; + const dma = dma_state.allocator(); + + var refs = std.AutoHashMap(u8, u8).init(dma); + defer refs.deinit(); + + try refs.ensureTotalCapacity(100); + + try refs.put(10, 19); + + const r = refs.get(10) orelse return error.Unreachable; + try std.testing.expectEqual(19, r); +} + +test "disk allocator on arraylists" { + var tmp_dir_root = std.testing.tmpDir(.{}); + defer tmp_dir_root.cleanup(); + const tmp_dir = tmp_dir_root.dir; + + var dma_state: DiskMemoryAllocator = .{ + .dir = tmp_dir, + .logger = .noop, + }; + const dma = dma_state.allocator(); + + { + try std.testing.expectError(error.FileNotFound, tmp_dir.access("bin_0", .{})); // this should not exist + + var disk_account_refs = try std.ArrayList(u8).initCapacity(dma, 1); + defer disk_account_refs.deinit(); + + disk_account_refs.appendAssumeCapacity(19); + + try std.testing.expectEqual(19, disk_account_refs.items[0]); + + try disk_account_refs.append(21); + + try std.testing.expectEqual(19, disk_account_refs.items[0]); + try std.testing.expectEqual(21, disk_account_refs.items[1]); + + try tmp_dir.access("bin_0", .{}); // this should exist + try std.testing.expectError(error.FileNotFound, tmp_dir.access("bin_1", .{})); // this should not exist + + const array_ptr = try dma.create([4096]u8); + defer dma.destroy(array_ptr); + @memset(array_ptr, 0); + + try tmp_dir.access("bin_1", .{}); // this should now exist + } + + try std.testing.expectError(error.FileNotFound, tmp_dir.access("bin_0", .{})); + try std.testing.expectError(error.FileNotFound, tmp_dir.access("bin_1", .{})); +} + +test "disk allocator large realloc" { + var tmp_dir_root = std.testing.tmpDir(.{}); + defer tmp_dir_root.cleanup(); + const tmp_dir = tmp_dir_root.dir; + + var dma_state: DiskMemoryAllocator = .{ + .dir = tmp_dir, + .logger = .noop, + }; + const dma = dma_state.allocator(); + + var page1 = try dma.alloc(u8, std.mem.page_size); + defer dma.free(page1); + + page1 = try dma.realloc(page1, std.mem.page_size * 15); + + page1[page1.len - 1] = 10; +}