Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make shrinkFn optional #4739

Closed
wants to merge 6 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
63 changes: 30 additions & 33 deletions lib/std/heap.zig
Original file line number Diff line number Diff line change
Expand Up @@ -524,8 +524,8 @@ pub const ArenaAllocator = struct {
pub fn init(child_allocator: *Allocator) ArenaAllocator {
return ArenaAllocator{
.allocator = Allocator{
.reallocFn = realloc,
.shrinkFn = shrink,
.reallocFn = if (child_allocator.canReclaimMemory()) realloc else reallocPassthrough,
.shrinkFn = null,
},
.child_allocator = child_allocator,
.buffer_list = std.SinglyLinkedList([]u8).init(),
Expand Down Expand Up @@ -584,18 +584,14 @@ pub const ArenaAllocator = struct {
}

fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
if (new_size <= old_mem.len and new_align <= new_size) {
// We can't do anything with the memory, so tell the client to keep it.
return error.OutOfMemory;
} else {
const result = try alloc(allocator, new_size, new_align);
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
return result;
}
const result = try alloc(allocator, new_size, new_align);
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
return result;
}

fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
return old_mem[0..new_size];
fn reallocPassthrough(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
const self = @fieldParentPtr(ArenaAllocator, "allocator", allocator);
return self.child_allocator.reallocFn(self.child_allocator, old_mem, old_align, new_size, new_align);
}
};

Expand All @@ -608,7 +604,7 @@ pub const FixedBufferAllocator = struct {
return FixedBufferAllocator{
.allocator = Allocator{
.reallocFn = realloc,
.shrinkFn = shrink,
.shrinkFn = null,
},
.buffer = buffer,
.end_index = 0,
Expand All @@ -617,6 +613,7 @@ pub const FixedBufferAllocator = struct {

fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator);

const addr = @ptrToInt(self.buffer.ptr) + self.end_index;
const adjusted_addr = mem.alignForward(addr, alignment);
const adjusted_index = self.end_index + (adjusted_addr - addr);
Expand All @@ -642,20 +639,13 @@ pub const FixedBufferAllocator = struct {
const result = self.buffer[start_index..new_end_index];
self.end_index = new_end_index;
return result;
} else if (new_size <= old_mem.len and new_align <= old_align) {
// We can't do anything with the memory, so tell the client to keep it.
return error.OutOfMemory;
} else {
const result = try alloc(allocator, new_size, new_align);
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
return result;
}
}

fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
return old_mem[0..new_size];
}

pub fn reset(self: *FixedBufferAllocator) void {
self.end_index = 0;
}
Expand All @@ -675,7 +665,7 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
return ThreadSafeFixedBufferAllocator{
.allocator = Allocator{
.reallocFn = realloc,
.shrinkFn = shrink,
.shrinkFn = null,
},
.buffer = buffer,
.end_index = 0,
Expand All @@ -684,6 +674,7 @@ pub const ThreadSafeFixedBufferAllocator = blk: {

fn alloc(allocator: *Allocator, n: usize, alignment: u29) ![]u8 {
const self = @fieldParentPtr(ThreadSafeFixedBufferAllocator, "allocator", allocator);

var end_index = @atomicLoad(usize, &self.end_index, builtin.AtomicOrder.SeqCst);
while (true) {
const addr = @ptrToInt(self.buffer.ptr) + end_index;
Expand All @@ -698,18 +689,9 @@ pub const ThreadSafeFixedBufferAllocator = blk: {
}

fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 {
if (new_size <= old_mem.len and new_align <= old_align) {
// We can't do anything useful with the memory, tell the client to keep it.
return error.OutOfMemory;
} else {
const result = try alloc(allocator, new_size, new_align);
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
return result;
}
}

fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
return old_mem[0..new_size];
const result = try alloc(allocator, new_size, new_align);
@memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len));
return result;
}

pub fn reset(self: *ThreadSafeFixedBufferAllocator) void {
Expand Down Expand Up @@ -890,6 +872,21 @@ test "ArenaAllocator" {
try testAllocatorAlignedShrink(&arena_allocator.allocator);
}

test "ArenaAllocator passthrough" {
var arena_allocator = ArenaAllocator.init(page_allocator);
defer arena_allocator.deinit();

var passthrough_allocator = ArenaAllocator.init(&arena_allocator.allocator);
defer passthrough_allocator.deinit();

try testAllocator(&passthrough_allocator.allocator);
try testAllocatorAligned(&passthrough_allocator.allocator, 16);
try testAllocatorLargeAlignment(&passthrough_allocator.allocator);
try testAllocatorAlignedShrink(&passthrough_allocator.allocator);

testing.expect(passthrough_allocator.buffer_list.first == null);
}

var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined;
test "FixedBufferAllocator" {
var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
Expand Down
6 changes: 5 additions & 1 deletion lib/std/heap/logging_allocator.zig
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,11 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type {

fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
const self = @fieldParentPtr(Self, "allocator", allocator);
const result = self.parent_allocator.shrinkFn(self.parent_allocator, old_mem, old_align, new_size, new_align);
const result = if (self.parent_allocator.shrinkFn) |shrinkFn|
shrinkFn(self.parent_allocator, old_mem, old_align, new_size, new_align)
else
self.parent_allocator.shrink(old_mem, new_size);

if (new_size == 0) {
self.out_stream.print("free of {} bytes success!\n", .{old_mem.len}) catch {};
} else {
Expand Down
34 changes: 30 additions & 4 deletions lib/std/mem.zig
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ pub const Allocator = struct {
/// `reallocFn` or `shrinkFn`.
/// If `old_mem.len == 0` then this is a new allocation and `new_byte_count`
/// is guaranteed to be >= 1.
/// If `shrinkFn` is `null` then it is guaranteed that `old_mem.len == 0`.
old_mem: []u8,
/// If `old_mem.len == 0` then this is `undefined`, otherwise:
/// Guaranteed to be the same as what was returned from most recent call to
Expand All @@ -60,7 +61,12 @@ pub const Allocator = struct {
) Error![]u8,

/// This function deallocates memory. It must succeed.
shrinkFn: fn (
/// If this function is null, it means the allocator implementation cannot
/// reclaim memory. The shrink functions of the
/// Allocator interface will still work; they will trivially return the
/// old memory with adjusted length. In this case, `reallocFn` with a smaller
/// `new_byte_count` will always return `error.OutOfMemory`.
shrinkFn: ?fn (
self: *Allocator,
/// Guaranteed to be the same as what was returned from most recent call to
/// `reallocFn` or `shrinkFn`.
Expand Down Expand Up @@ -88,8 +94,9 @@ pub const Allocator = struct {
pub fn destroy(self: *Allocator, ptr: var) void {
const T = @TypeOf(ptr).Child;
if (@sizeOf(T) == 0) return;
const shrinkFn = self.shrinkFn orelse return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
const shrink_result = self.shrinkFn(self, non_const_ptr[0..@sizeOf(T)], @alignOf(T), 0, 1);
const shrink_result = shrinkFn(self, non_const_ptr[0..@sizeOf(T)], @alignOf(T), 0, 1);
assert(shrink_result.len == 0);
}

Expand Down Expand Up @@ -186,6 +193,10 @@ pub const Allocator = struct {
self.free(old_mem);
return @as([*]align(new_alignment) T, undefined)[0..0];
}
if (!self.canReclaimMemory() and new_n <= old_mem.len and new_alignment <= Slice.alignment) {
// Cannot reclaim memory; tell the client to keep it.
return error.OutOfMemory;
}

const old_byte_slice = mem.sliceAsBytes(old_mem);
const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory;
Expand Down Expand Up @@ -222,6 +233,7 @@ pub const Allocator = struct {
) []align(new_alignment) @typeInfo(@TypeOf(old_mem)).Pointer.child {
const Slice = @typeInfo(@TypeOf(old_mem)).Pointer;
const T = Slice.child;
const shrinkFn = self.shrinkFn orelse return old_mem[0..new_n];

if (new_n == 0) {
self.free(old_mem);
Expand All @@ -237,23 +249,37 @@ pub const Allocator = struct {

const old_byte_slice = mem.sliceAsBytes(old_mem);
@memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count);
const byte_slice = self.shrinkFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
const byte_slice = shrinkFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment);
assert(byte_slice.len == byte_count);
return mem.bytesAsSlice(T, @alignCast(new_alignment, byte_slice));
}

/// Free an array allocated with `alloc`. To free a single item,
/// see `destroy`.
pub fn free(self: *Allocator, memory: var) void {
const shrinkFn = self.shrinkFn orelse return;
const Slice = @typeInfo(@TypeOf(memory)).Pointer;
const bytes = mem.sliceAsBytes(memory);
const bytes_len = bytes.len + if (Slice.sentinel != null) @sizeOf(Slice.child) else 0;
if (bytes_len == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr));
@memset(non_const_ptr, undefined, bytes_len);
const shrink_result = self.shrinkFn(self, non_const_ptr[0..bytes_len], Slice.alignment, 0, 1);
const shrink_result = shrinkFn(self, non_const_ptr[0..bytes_len], Slice.alignment, 0, 1);
assert(shrink_result.len == 0);
}

/// If this returns `false`, it means that the allocator implementation
/// will only ever increase memory usage. In this case, `free` and `shrink`
/// are no-ops and will not make the freed bytes available for use.
/// It also means using `realloc` to resize downwards will always result
/// in `error.OutOfMemory`.
/// When creating an arena allocator on top of a backing allocator, it is
/// best practice to check if the backing allocator can reclaim memory.
/// If it cannot, then the backing allocator should be used directly, to
/// avoid pointless overhead.
pub fn canReclaimMemory(self: *Allocator) bool {
return self.shrinkFn != null;
}
};

/// Copy all of source into dest at position 0.
Expand Down
6 changes: 5 additions & 1 deletion lib/std/testing/failing_allocator.zig
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,11 @@ pub const FailingAllocator = struct {

fn shrink(allocator: *mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 {
const self = @fieldParentPtr(FailingAllocator, "allocator", allocator);
const r = self.internal_allocator.shrinkFn(self.internal_allocator, old_mem, old_align, new_size, new_align);
const r = if (self.internal_allocator.shrinkFn) |shrinkFn|
shrinkFn(self.internal_allocator, old_mem, old_align, new_size, new_align)
else
self.internal_allocator.shrink(old_mem, new_size);

self.freed_bytes += old_mem.len - r.len;
if (new_size == 0)
self.deallocations += 1;
Expand Down
5 changes: 4 additions & 1 deletion lib/std/testing/leak_count_allocator.zig
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,10 @@ pub const LeakCountAllocator = struct {
}
self.count -= 1;
}
return self.internal_allocator.shrinkFn(self.internal_allocator, old_mem, old_align, new_size, new_align);
return if (self.internal_allocator.shrinkFn) |shrinkFn|
shrinkFn(self.internal_allocator, old_mem, old_align, new_size, new_align)
else
self.internal_allocator.shrink(old_mem, new_size);
}

pub fn validate(self: LeakCountAllocator) !void {
Expand Down