Skip to content

Commit

Permalink
refactor(ledger): move allocator to first parameter of BlockstoreDB.get
Browse files Browse the repository at this point in the history
  • Loading branch information
dnut committed Sep 25, 2024
1 parent bda0e6a commit f59c210
Show file tree
Hide file tree
Showing 7 changed files with 64 additions and 64 deletions.
26 changes: 13 additions & 13 deletions src/ledger/database.zig
Original file line number Diff line number Diff line change
Expand Up @@ -75,11 +75,11 @@ pub fn Database(comptime Impl: type) type {
// this will need some changes to bincode.
pub fn get(
self: *Self,
allocator: Allocator,
comptime cf: ColumnFamily,
key: cf.Key,
allocator: Allocator,
) anyerror!?cf.Value {
return try self.impl.get(cf, key, allocator);
return try self.impl.get(allocator, cf, key);
}

/// Returns a reference to the serialized bytes.
Expand Down Expand Up @@ -304,17 +304,17 @@ fn tests(comptime Impl: fn ([]const ColumnFamily) type) type {
defer db.deinit();

try db.put(cf1, 123, .{ .hello = 345 });
const got = try db.get(cf1, 123, allocator);
const got = try db.get(allocator, cf1, 123);
try std.testing.expect(345 == got.?.hello);
const not = try db.get(cf2, 123, allocator);
const not = try db.get(allocator, cf2, 123);
try std.testing.expect(null == not);
const wrong_was_deleted = try db.delete(cf2, 123);
_ = wrong_was_deleted;
// try std.testing.expect(!wrong_was_deleted); // FIXME
const was_deleted = try db.delete(cf1, 123);
_ = was_deleted;
// try std.testing.expect(was_deleted);
const not_now = try db.get(cf1, 123, allocator);
const not_now = try db.get(allocator, cf1, 123);
try std.testing.expect(null == not_now);
}

Expand All @@ -336,17 +336,17 @@ fn tests(comptime Impl: fn ([]const ColumnFamily) type) type {
try batch.put(cf2, 133, .{ .world = 555 });
try batch.put(cf2, 133, .{ .world = 666 });

try std.testing.expectEqual(Value1{ .hello = 99 }, try db.get(cf1, 0, allocator));
try std.testing.expectEqual(null, try db.get(cf1, 123, allocator));
try std.testing.expectEqual(null, try db.get(cf2, 321, allocator));
try std.testing.expectEqual(null, try db.get(cf2, 333, allocator));
try std.testing.expectEqual(Value1{ .hello = 99 }, try db.get(allocator, cf1, 0));
try std.testing.expectEqual(null, try db.get(allocator, cf1, 123));
try std.testing.expectEqual(null, try db.get(allocator, cf2, 321));
try std.testing.expectEqual(null, try db.get(allocator, cf2, 333));

try db.commit(batch);

try std.testing.expectEqual(null, try db.get(cf1, 0, allocator));
try std.testing.expectEqual(Value1{ .hello = 100 }, try db.get(cf1, 123, allocator));
try std.testing.expectEqual(null, try db.get(cf2, 321, allocator));
try std.testing.expectEqual(Value2{ .world = 666 }, try db.get(cf2, 133, allocator));
try std.testing.expectEqual(null, try db.get(allocator, cf1, 0));
try std.testing.expectEqual(Value1{ .hello = 100 }, try db.get(allocator, cf1, 123));
try std.testing.expectEqual(null, try db.get(allocator, cf2, 321));
try std.testing.expectEqual(Value2{ .world = 666 }, try db.get(allocator, cf2, 133));
}

pub fn @"iterator forward"() !void {
Expand Down
2 changes: 1 addition & 1 deletion src/ledger/hashmap_db.zig
Original file line number Diff line number Diff line change
Expand Up @@ -73,9 +73,9 @@ pub fn SharedHashMapDB(comptime column_families: []const ColumnFamily) type {

pub fn get(
self: *Self,
allocator: Allocator,
comptime cf: ColumnFamily,
key: cf.Key,
allocator: Allocator,
) anyerror!?cf.Value {
const key_bytes = try key_serializer.serializeAlloc(self.allocator, key);
defer self.allocator.free(key_bytes);
Expand Down
32 changes: 16 additions & 16 deletions src/ledger/insert_shred.zig
Original file line number Diff line number Diff line change
Expand Up @@ -449,7 +449,7 @@ pub const ShredInserter = struct {
const erasure_set_id = shred.fields.common.erasureSetId();
// TODO: redundant get or put pattern
if (!merkle_root_metas.contains(erasure_set_id)) {
if (try self.db.get(schema.merkle_root_meta, erasure_set_id, self.allocator)) |meta_| {
if (try self.db.get(self.allocator, schema.merkle_root_meta, erasure_set_id)) |meta_| {
try merkle_root_metas.put(erasure_set_id, .{ .clean = meta_ });
}
}
Expand Down Expand Up @@ -487,7 +487,7 @@ pub const ShredInserter = struct {
// TODO: redundant get or put pattern
const erasure_meta_entry = try erasure_metas.getOrPut(erasure_set_id);
if (!erasure_meta_entry.found_existing) {
if (try self.db.get(schema.erasure_meta, erasure_set_id, self.allocator)) |meta_| {
if (try self.db.get(self.allocator, schema.erasure_meta, erasure_set_id)) |meta_| {
erasure_meta_entry.value_ptr.* = .{ .clean = meta_ };
} else {
erasure_meta_entry.value_ptr.* = .{
Expand Down Expand Up @@ -651,7 +651,7 @@ pub const ShredInserter = struct {
const erasure_set_id = shred.fields.common.erasureSetId();
// TODO: redundant get or put pattern
if (!merkle_root_metas.contains(erasure_set_id)) {
if (try self.db.get(schema.merkle_root_meta, erasure_set_id, self.allocator)) |meta_| {
if (try self.db.get(self.allocator, schema.merkle_root_meta, erasure_set_id)) |meta_| {
try merkle_root_metas.put(erasure_set_id, .{ .clean = meta_ });
}
}
Expand Down Expand Up @@ -726,7 +726,7 @@ pub const ShredInserter = struct {

// TODO: redundant get or put pattern
if (!erasure_metas.contains(erasure_set_id)) {
if (try self.db.get(schema.erasure_meta, erasure_set_id, self.allocator)) |meta_| {
if (try self.db.get(self.allocator, schema.erasure_meta, erasure_set_id)) |meta_| {
try erasure_metas.put(erasure_set_id, .{ .clean = meta_ });
}
}
Expand All @@ -746,7 +746,7 @@ pub const ShredInserter = struct {
var timer = try Timer.start();
const entry = try working_set.getOrPut(slot);
if (!entry.found_existing) {
if (try self.db.get(schema.index, slot, self.allocator)) |item| {
if (try self.db.get(self.allocator, schema.index, slot)) |item| {
entry.value_ptr.* = .{ .index = item };
} else {
entry.value_ptr.* = IndexMetaWorkingSetEntry.init(allocator, slot);
Expand All @@ -766,7 +766,7 @@ pub const ShredInserter = struct {
// TODO: redundant get or put pattern
const entry = try working_set.getOrPut(slot);
if (!entry.found_existing) {
if (try self.db.get(schema.slot_meta, slot, self.allocator)) |backup| {
if (try self.db.get(self.allocator, schema.slot_meta, slot)) |backup| {
var slot_meta: SlotMeta = try backup.clone(self.allocator);
// If parent_slot == None, then this is one of the orphans inserted
// during the chaining process, see the function find_slot_meta_in_cached_state()
Expand Down Expand Up @@ -1263,7 +1263,7 @@ pub const ShredInserter = struct {
if (entry.found_existing) {
return entry.value_ptr;
}
entry.value_ptr.* = if (try self.db.get(schema.slot_meta, slot, self.allocator)) |m|
entry.value_ptr.* = if (try self.db.get(self.allocator, schema.slot_meta, slot)) |m|
m
else
SlotMeta.init(self.allocator, slot, null);
Expand Down Expand Up @@ -1345,7 +1345,7 @@ pub const ShredInserter = struct {
const next_erasure_set = ErasureSetId{ .slot = slot, .fec_set_index = next_fec_set_index };
const next_merkle_root_meta = if (merkle_root_metas.get(next_erasure_set)) |nes|
nes.asRef().*
else if (try self.db.get(schema.merkle_root_meta, next_erasure_set, self.allocator)) |nes|
else if (try self.db.get(self.allocator, schema.merkle_root_meta, next_erasure_set)) |nes|
nes
else
// No shred from the next fec set has been received
Expand Down Expand Up @@ -2025,7 +2025,7 @@ test "chaining basic" {
// insert slot 1
_ = try state.insertShredBytes(slots[1]);
{
var slot_meta: SlotMeta = (try state.db.get(schema.slot_meta, 1, state.allocator())).?;
var slot_meta: SlotMeta = (try state.db.get(state.allocator(), schema.slot_meta, 1)).?;
defer slot_meta.deinit();
try std.testing.expectEqualSlices(u64, &.{}, slot_meta.next_slots.items);
try std.testing.expect(!slot_meta.isConnected());
Expand All @@ -2036,15 +2036,15 @@ test "chaining basic" {
// insert slot 2
_ = try state.insertShredBytes(slots[2]);
{
var slot_meta: SlotMeta = (try state.db.get(schema.slot_meta, 1, state.allocator())).?;
var slot_meta: SlotMeta = (try state.db.get(state.allocator(), schema.slot_meta, 1)).?;
defer slot_meta.deinit();
try std.testing.expectEqualSlices(u64, &.{2}, slot_meta.next_slots.items);
try std.testing.expect(!slot_meta.isConnected()); // since 0 is not yet inserted
try std.testing.expectEqual(0, slot_meta.parent_slot);
try std.testing.expectEqual(shreds_per_slot - 1, slot_meta.last_index);
}
{
var slot_meta: SlotMeta = (try state.db.get(schema.slot_meta, 2, state.allocator())).?;
var slot_meta: SlotMeta = (try state.db.get(state.allocator(), schema.slot_meta, 2)).?;
defer slot_meta.deinit();
try std.testing.expectEqualSlices(u64, &.{}, slot_meta.next_slots.items);
try std.testing.expect(!slot_meta.isConnected()); // since 0 is not yet inserted
Expand All @@ -2055,23 +2055,23 @@ test "chaining basic" {
// insert slot 0
_ = try state.insertShredBytes(slots[0]);
{
var slot_meta: SlotMeta = (try state.db.get(schema.slot_meta, 0, state.allocator())).?;
var slot_meta: SlotMeta = (try state.db.get(state.allocator(), schema.slot_meta, 0)).?;
defer slot_meta.deinit();
try std.testing.expectEqualSlices(u64, &.{1}, slot_meta.next_slots.items);
try std.testing.expect(slot_meta.isConnected());
try std.testing.expectEqual(0, slot_meta.parent_slot);
try std.testing.expectEqual(shreds_per_slot - 1, slot_meta.last_index);
}
{
var slot_meta: SlotMeta = (try state.db.get(schema.slot_meta, 1, state.allocator())).?;
var slot_meta: SlotMeta = (try state.db.get(state.allocator(), schema.slot_meta, 1)).?;
defer slot_meta.deinit();
try std.testing.expectEqualSlices(u64, &.{2}, slot_meta.next_slots.items);
try std.testing.expect(slot_meta.isConnected());
try std.testing.expectEqual(0, slot_meta.parent_slot);
try std.testing.expectEqual(shreds_per_slot - 1, slot_meta.last_index);
}
{
var slot_meta: SlotMeta = (try state.db.get(schema.slot_meta, 2, state.allocator())).?;
var slot_meta: SlotMeta = (try state.db.get(state.allocator(), schema.slot_meta, 2)).?;
defer slot_meta.deinit();
try std.testing.expectEqualSlices(u64, &.{}, slot_meta.next_slots.items);
try std.testing.expect(slot_meta.isConnected());
Expand Down Expand Up @@ -2157,9 +2157,9 @@ test "merkle root metas coding" {
const original_erasure_set_id = shreds[0].commonHeader().erasureSetId();
const original_meta_from_map = merkle_root_metas.get(original_erasure_set_id).?.asRef();
const original_meta_from_db = (try state.db.get(
state.allocator(),
schema.merkle_root_meta,
original_erasure_set_id,
state.allocator(),
)).?;
inline for (.{ original_meta_from_map, original_meta_from_db }) |original_meta| {
try std.testing.expectEqual(
Expand Down Expand Up @@ -2192,9 +2192,9 @@ test "merkle root metas coding" {
const original_erasure_set_id = shreds[0].commonHeader().erasureSetId();
const original_meta_from_map = merkle_root_metas.get(original_erasure_set_id).?.asRef();
const original_meta_from_db = (try state.db.get(
state.allocator(),
schema.merkle_root_meta,
original_erasure_set_id,
state.allocator(),
)).?;
inline for (.{ original_meta_from_map, original_meta_from_db }) |original_meta| {
try std.testing.expectEqual(
Expand Down
Loading

0 comments on commit f59c210

Please sign in to comment.