diff --git a/lib/std/heap.zig b/lib/std/heap.zig index e4a409b21871..c85613b27f0b 100644 --- a/lib/std/heap.zig +++ b/lib/std/heap.zig @@ -16,21 +16,24 @@ const Allocator = mem.Allocator; pub const c_allocator = &c_allocator_state; var c_allocator_state = Allocator{ - .reallocFn = cRealloc, - .shrinkFn = cShrink, + .allocFn = cAlloc, + .freeFn = cFree, + // We won't be able to implement resize for the C heap without calling + // realloc which would cause gratuitous amounts of unnecessary copying + // without the caller knowing. I think the best solution here is to + // add a special case in Allocator.realloc for the C heap so that we can + // still use C's realloc when possible. + .resizeFn = null, }; -fn cRealloc(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { - assert(new_align <= @alignOf(c_longdouble)); - const old_ptr = if (old_mem.len == 0) null else @ptrCast(*c_void, old_mem.ptr); - const buf = c.realloc(old_ptr, new_size) orelse return error.OutOfMemory; - return @ptrCast([*]u8, buf)[0..new_size]; +fn cAlloc(self: *Allocator, len: usize, alignment: u29) Allocator.Error![]u8 { + assert(alignment <= @alignOf(c_longdouble)); + const buf = c.malloc(len) orelse return error.OutOfMemory; + return @ptrCast([*]u8, buf)[0..len]; } -fn cShrink(self: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { - const old_ptr = @ptrCast(*c_void, old_mem.ptr); - const buf = c.realloc(old_ptr, new_size) orelse return old_mem[0..new_size]; - return @ptrCast([*]u8, buf)[0..new_size]; +fn cFree(self: *Allocator, buf: []u8) void { + c.free(buf.ptr); } /// This allocator makes a syscall directly for every allocation and free. @@ -43,12 +46,15 @@ else &page_allocator_state; var page_allocator_state = Allocator{ - .reallocFn = PageAllocator.realloc, - .shrinkFn = PageAllocator.shrink, + .allocFn = PageAllocator.alloc, + .freeFn = PageAllocator.free, + .resizeFn = PageAllocator.resize, }; var wasm_page_allocator_state = Allocator{ - .reallocFn = WasmPageAllocator.realloc, - .shrinkFn = WasmPageAllocator.shrink, + .allocFn = WasmPageAllocator.alloc, + .freeFn = WasmPageAllocator.free, + // TODO: implement resize + .resizeFn = null, }; pub const direct_allocator = @compileError("deprecated; use std.heap.page_allocator"); @@ -141,111 +147,48 @@ const PageAllocator = struct { return @intToPtr([*]u8, aligned_addr)[0..n]; } - fn shrink(allocator: *Allocator, old_mem_unaligned: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { - const old_mem = @alignCast(mem.page_size, old_mem_unaligned); + fn free(allocator: *Allocator, buf: []u8) void { + const buf_aligned = @alignCast(mem.page_size, buf); if (builtin.os.tag == .windows) { const w = os.windows; - if (new_size == 0) { - // From the docs: - // "If the dwFreeType parameter is MEM_RELEASE, this parameter - // must be 0 (zero). The function frees the entire region that - // is reserved in the initial allocation call to VirtualAlloc." - // So we can only use MEM_RELEASE when actually releasing the - // whole allocation. - w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE); - } else { - const base_addr = @ptrToInt(old_mem.ptr); - const old_addr_end = base_addr + old_mem.len; - const new_addr_end = base_addr + new_size; - const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size); - if (old_addr_end > new_addr_end_rounded) { - // For shrinking that is not releasing, we will only - // decommit the pages not needed anymore. - w.VirtualFree( - @intToPtr(*c_void, new_addr_end_rounded), - old_addr_end - new_addr_end_rounded, - w.MEM_DECOMMIT, - ); - } - } - return old_mem[0..new_size]; - } - const base_addr = @ptrToInt(old_mem.ptr); - const old_addr_end = base_addr + old_mem.len; - const new_addr_end = base_addr + new_size; - const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size); - if (old_addr_end > new_addr_end_rounded) { - const ptr = @intToPtr([*]align(mem.page_size) u8, new_addr_end_rounded); - os.munmap(ptr[0 .. old_addr_end - new_addr_end_rounded]); + // From the docs: + // "If the dwFreeType parameter is MEM_RELEASE, this parameter + // must be 0 (zero). The function frees the entire region that + // is reserved in the initial allocation call to VirtualAlloc." + // So we can only use MEM_RELEASE when actually releasing the + // whole allocation. + w.VirtualFree(buf_aligned.ptr, 0, w.MEM_RELEASE); + } else { + os.munmap(buf_aligned); } - return old_mem[0..new_size]; } - fn realloc(allocator: *Allocator, old_mem_unaligned: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { - const old_mem = @alignCast(mem.page_size, old_mem_unaligned); - if (builtin.os.tag == .windows) { - if (old_mem.len == 0) { - return alloc(allocator, new_size, new_align); - } - - if (new_size <= old_mem.len and new_align <= old_align) { - return shrink(allocator, old_mem, old_align, new_size, new_align); - } - - const w = os.windows; - const base_addr = @ptrToInt(old_mem.ptr); - - if (new_align > old_align and base_addr & (new_align - 1) != 0) { - // Current allocation doesn't satisfy the new alignment. - // For now we'll do a new one no matter what, but maybe - // there is something smarter to do instead. - const result = try alloc(allocator, new_size, new_align); - assert(old_mem.len != 0); - @memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len)); - w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE); - - return result; - } - - const old_addr_end = base_addr + old_mem.len; - const old_addr_end_rounded = mem.alignForward(old_addr_end, mem.page_size); - const new_addr_end = base_addr + new_size; - const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size); - if (new_addr_end_rounded == old_addr_end_rounded) { - // The reallocation fits in the already allocated pages. - return @ptrCast([*]u8, old_mem.ptr)[0..new_size]; - } - assert(new_addr_end_rounded > old_addr_end_rounded); - - // We need to commit new pages. - const additional_size = new_addr_end - old_addr_end_rounded; - const realloc_addr = w.kernel32.VirtualAlloc( - @intToPtr(*c_void, old_addr_end_rounded), - additional_size, - w.MEM_COMMIT | w.MEM_RESERVE, - w.PAGE_READWRITE, - ) orelse { - // Committing new pages at the end of the existing allocation - // failed, we need to try a new one. - const new_alloc_mem = try alloc(allocator, new_size, new_align); - @memcpy(new_alloc_mem.ptr, old_mem.ptr, old_mem.len); - w.VirtualFree(old_mem.ptr, 0, w.MEM_RELEASE); - - return new_alloc_mem; - }; - - assert(@ptrToInt(realloc_addr) == old_addr_end_rounded); - return @ptrCast([*]u8, old_mem.ptr)[0..new_size]; + fn resize(allocator: *Allocator, buf_unaligned: []u8, new_size: usize) error{OutOfMemory}!void { + assert(new_size != 0); + if (new_size > buf_unaligned.len) { + return error.OutOfMemory; } - if (new_size <= old_mem.len and new_align <= old_align) { - return shrink(allocator, old_mem, old_align, new_size, new_align); + + const buf_aligned = @alignCast(mem.page_size, buf_unaligned); + const old_addr_end = @ptrToInt(buf_aligned.ptr) + buf_aligned.len; + const new_addr_end = @ptrToInt(buf_aligned.ptr) + new_size; + const new_addr_end_rounded = mem.alignForward(new_addr_end, mem.page_size); + if (old_addr_end <= new_addr_end_rounded) { + return error.OutOfMemory; } - const result = try alloc(allocator, new_size, new_align); - if (old_mem.len != 0) { - @memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len)); - os.munmap(old_mem); + if (builtin.os.tag == .windows) { + const w = os.windows; + // For shrinking that is not releasing, we will only + // decommit the pages not needed anymore. + w.VirtualFree( + @intToPtr(*c_void, new_addr_end_rounded), + old_addr_end - new_addr_end_rounded, + w.MEM_DECOMMIT, + ); + } else { + const ptr = @intToPtr([*]align(mem.page_size) u8, new_addr_end_rounded); + os.munmap(ptr[0 .. old_addr_end - new_addr_end_rounded]); } - return result; } }; @@ -364,6 +307,11 @@ const WasmPageAllocator = struct { return @intCast(usize, prev_page_count); } + fn free(allocator: *Allocator, buf: []u8) void { + // TODO: can this memory be freed? + } + + // TODO: replace realloc and shrink with a resize function pub fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) Allocator.Error![]u8 { if (new_align > std.mem.page_size) { return error.OutOfMemory; @@ -422,8 +370,10 @@ pub const HeapAllocator = switch (builtin.os.tag) { pub fn init() HeapAllocator { return HeapAllocator{ .allocator = Allocator{ - .reallocFn = realloc, - .shrinkFn = shrink, + .allocFn = alloc, + .freeFn = free, + // TODO: implement resize + .resizeFn = null, }, .heap_handle = null, }; @@ -456,6 +406,13 @@ pub const HeapAllocator = switch (builtin.os.tag) { return @intToPtr([*]u8, adjusted_addr)[0..n]; } + fn free(allocator: *Allocator, buf: []u8) void { + const self = @fieldParentPtr(HeapAllocator, "allocator", allocator); + os.windows.HeapFree(self.heap_handle.?, 0, buf.ptr); + } + + + // TODO: replace shrink and realloc with resize function fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { return realloc(allocator, old_mem, old_align, new_size, new_align) catch { const old_adjusted_addr = @ptrToInt(old_mem.ptr); @@ -524,8 +481,9 @@ pub const ArenaAllocator = struct { pub fn init(child_allocator: *Allocator) ArenaAllocator { return ArenaAllocator{ .allocator = Allocator{ - .reallocFn = realloc, - .shrinkFn = shrink, + .allocFn = alloc, + .freeFn = free, + .resizeFn = null, }, .child_allocator = child_allocator, .buffer_list = std.SinglyLinkedList([]u8).init(), @@ -583,20 +541,7 @@ pub const ArenaAllocator = struct { } } - fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { - if (new_size <= old_mem.len and new_align <= new_size) { - // We can't do anything with the memory, so tell the client to keep it. - return error.OutOfMemory; - } else { - const result = try alloc(allocator, new_size, new_align); - @memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len)); - return result; - } - } - - fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { - return old_mem[0..new_size]; - } + fn free(allocator: *Allocator, buf: []u8) void { } }; pub const FixedBufferAllocator = struct { @@ -607,8 +552,9 @@ pub const FixedBufferAllocator = struct { pub fn init(buffer: []u8) FixedBufferAllocator { return FixedBufferAllocator{ .allocator = Allocator{ - .reallocFn = realloc, - .shrinkFn = shrink, + .allocFn = alloc, + .freeFn = free, + .resizeFn = resize, }, .buffer = buffer, .end_index = 0, @@ -630,30 +576,22 @@ pub const FixedBufferAllocator = struct { return result; } - fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { + fn free(allocator: *Allocator, buf: []u8) void { } + + fn resize(allocator: *Allocator, buf: []u8, new_size: usize) error{OutOfMemory}!void { const self = @fieldParentPtr(FixedBufferAllocator, "allocator", allocator); - assert(old_mem.len <= self.end_index); - if (old_mem.ptr == self.buffer.ptr + self.end_index - old_mem.len and - mem.alignForward(@ptrToInt(old_mem.ptr), new_align) == @ptrToInt(old_mem.ptr)) - { - const start_index = self.end_index - old_mem.len; - const new_end_index = start_index + new_size; - if (new_end_index > self.buffer.len) return error.OutOfMemory; - const result = self.buffer[start_index..new_end_index]; - self.end_index = new_end_index; - return result; - } else if (new_size <= old_mem.len and new_align <= old_align) { - // We can't do anything with the memory, so tell the client to keep it. + const buf_end = buf.ptr + buf.len; + const allocated_end = self.buffer.ptr + self.end_index; + assert(@ptrToInt(buf_end) <= @ptrToInt(allocated_end)); + if (buf_end != allocated_end) { return error.OutOfMemory; - } else { - const result = try alloc(allocator, new_size, new_align); - @memcpy(result.ptr, old_mem.ptr, std.math.min(old_mem.len, result.len)); - return result; } - } - fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { - return old_mem[0..new_size]; + const start_index = self.end_index - buf.len; + const new_end_index = start_index + new_size; + if (new_end_index > self.buffer.len) return error.OutOfMemory; + const result = self.buffer[start_index..new_end_index]; + self.end_index = new_end_index; } pub fn reset(self: *FixedBufferAllocator) void { @@ -674,8 +612,10 @@ pub const ThreadSafeFixedBufferAllocator = blk: { pub fn init(buffer: []u8) ThreadSafeFixedBufferAllocator { return ThreadSafeFixedBufferAllocator{ .allocator = Allocator{ - .reallocFn = realloc, - .shrinkFn = shrink, + .allocFn = alloc, + .freeFn = free, + // TODO: implement resize + .resizeFn = null, }, .buffer = buffer, .end_index = 0, @@ -697,6 +637,8 @@ pub const ThreadSafeFixedBufferAllocator = blk: { } } + fn free(allocator: *Allocator, buf: []u8) void { } + fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { if (new_size <= old_mem.len and new_align <= old_align) { // We can't do anything useful with the memory, tell the client to keep it. @@ -725,8 +667,9 @@ pub fn stackFallback(comptime size: usize, fallback_allocator: *Allocator) Stack .fallback_allocator = fallback_allocator, .fixed_buffer_allocator = undefined, .allocator = Allocator{ - .reallocFn = StackFallbackAllocator(size).realloc, - .shrinkFn = StackFallbackAllocator(size).shrink, + .allocFn = StackFallbackAllocator(size).realloc, + .freeFn = StackFallbackAllocator(size).shrink, + .resizeFn = StackFallbackAllocator(size).resize, }, }; } @@ -745,58 +688,31 @@ pub fn StackFallbackAllocator(comptime size: usize) type { return &self.allocator; } - fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { + fn isInsideBuffer(self: *Self, ptr: [*]u8) bool { + return @ptrToInt(ptr) >= @ptrToInt(self.buffer.ptr) and + @ptrToInt(ptr) < @ptrToInt(self.buffer.ptr) + self.buffer.len ; + } + + fn alloc(allocator: *Allocator, len: usize, alignment: u29) error{OutOfMemory}![]u8 { const self = @fieldParentPtr(Self, "allocator", allocator); - const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and - @ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len; - if (in_buffer) { - return FixedBufferAllocator.realloc( - &self.fixed_buffer_allocator.allocator, - old_mem, - old_align, - new_size, - new_align, - ) catch { - const result = try self.fallback_allocator.reallocFn( - self.fallback_allocator, - &[0]u8{}, - undefined, - new_size, - new_align, - ); - mem.copy(u8, result, old_mem); - return result; - }; + return FixedBufferAllocator.alloc(&self.fixed_buffer_allocator, len, alignment) catch + return fallback_allocator.alloc(len, alignment); + } + + fn free(allocator: *Allocator, buf: []u8) void { + const self = @fieldParentPtr(Self, "allocator", allocator); + if (self.isInsideBuffer(buf.ptr)) { + return FixedBufferAllocator.free(&self.fixed_buffer_allocator.allocator, buf); } - return self.fallback_allocator.reallocFn( - self.fallback_allocator, - old_mem, - old_align, - new_size, - new_align, - ); + return self.fallback_allocator.shrinkFn(self.fallback_allocator, buf); } - fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { + fn resizeFn(self: *Allocator, buf: []u8, new_len: usize) Error!void { const self = @fieldParentPtr(Self, "allocator", allocator); - const in_buffer = @ptrToInt(old_mem.ptr) >= @ptrToInt(&self.buffer) and - @ptrToInt(old_mem.ptr) < @ptrToInt(&self.buffer) + self.buffer.len; - if (in_buffer) { - return FixedBufferAllocator.shrink( - &self.fixed_buffer_allocator.allocator, - old_mem, - old_align, - new_size, - new_align, - ); + if (self.isInsideBuffer(buf.ptr)) { + return self.fixed_buffer_allocator.resize(buf, new_len); } - return self.fallback_allocator.shrinkFn( - self.fallback_allocator, - old_mem, - old_align, - new_size, - new_align, - ); + return self.fallback_allocator.resize(buf, new_len); } }; } diff --git a/lib/std/heap/logging_allocator.zig b/lib/std/heap/logging_allocator.zig index 0d15986a76a9..bf59a4fd354c 100644 --- a/lib/std/heap/logging_allocator.zig +++ b/lib/std/heap/logging_allocator.zig @@ -15,22 +15,19 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type { pub fn init(parent_allocator: *Allocator, out_stream: OutStreamType) Self { return Self{ .allocator = Allocator{ - .reallocFn = realloc, - .shrinkFn = shrink, + .allocFn = alloc, + .freeFn = free, + .resizeFn = resize, }, .parent_allocator = parent_allocator, .out_stream = out_stream, }; } - fn realloc(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { + fn alloc(allocator: *std.mem.Allocator, len: usize, alignment: u29) error{OutOfMemory}![]u8 { const self = @fieldParentPtr(Self, "allocator", allocator); - if (old_mem.len == 0) { - self.out_stream.print("allocation of {} ", .{new_size}) catch {}; - } else { - self.out_stream.print("resize from {} to {} ", .{ old_mem.len, new_size }) catch {}; - } - const result = self.parent_allocator.reallocFn(self.parent_allocator, old_mem, old_align, new_size, new_align); + self.out_stream.print("allocation of {} ", .{len}) catch {}; + const result = self.parent_allocator.allocFn(self.parent_allocator, len, alignment); if (result) |buff| { self.out_stream.print("success!\n", .{}) catch {}; } else |err| { @@ -39,13 +36,20 @@ pub fn LoggingAllocator(comptime OutStreamType: type) type { return result; } - fn shrink(allocator: *Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { + fn free(allocator: *std.mem.Allocator, buf: []u8) void { + const self = @fieldParentPtr(Self, "allocator", allocator); + self.parent_allocator.freeFn(self.parent_allocator, buf); + self.out_stream.print("free of {} bytes success!\n", .{buf.len}) catch {}; + } + + fn resize(allocator: *std.mem.Allocator, buf: []u8, new_len: usize) error{OutOfMemory}!void { const self = @fieldParentPtr(Self, "allocator", allocator); - const result = self.parent_allocator.shrinkFn(self.parent_allocator, old_mem, old_align, new_size, new_align); - if (new_size == 0) { - self.out_stream.print("free of {} bytes success!\n", .{old_mem.len}) catch {}; - } else { - self.out_stream.print("shrink from {} bytes to {} bytes success!\n", .{ old_mem.len, new_size }) catch {}; + self.out_stream.print("resize from {} to {} ", .{ buf.len, new_len }) catch {}; + const result = self.parent_allocator.resize(buf, new_len); + if (result) |buff| { + self.out_stream.print("success!\n", .{}) catch {}; + } else |err| { + self.out_stream.print("failure!\n", .{}) catch {}; } return result; } diff --git a/lib/std/mem.zig b/lib/std/mem.zig index a21626e3a1c5..857d189c07c6 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -16,6 +16,20 @@ pub const page_size = switch (builtin.arch) { pub const Allocator = struct { pub const Error = error{OutOfMemory}; + /// Allocate memory. + allocFn: fn (self: *Allocator, len: usize, alignment: u29) Error![]u8, + + /// Free memory returned by allocFn, this function must succeed. + freeFn: fn (self: *Allocator, mem: []u8) void, + + /// Resizes memory in-place returned by allocFn. This function is optional. + resizeFn: ?fn (self: *Allocator, mem: []u8, new_len: usize) Error!void, + + pub fn resize(self: *Allocator, buf: []u8, new_len: usize) Error!void { + if (self.resizeFn) |resizeFn| return resizeFn(self, buf, new_len); + return Error.OutOfMemory; + } + /// Realloc is used to modify the size or alignment of an existing allocation, /// as well as to provide the allocator with an opportunity to move an allocation /// to a better location. @@ -24,7 +38,7 @@ pub const Allocator = struct { /// When the size/alignment is less than or equal to the previous allocation, /// this function returns `error.OutOfMemory` when the allocator decides the client /// would be better off keeping the extra alignment/size. Clients will call - /// `shrinkFn` when they require the allocator to track a new alignment/size, + /// `shrinkMem` when they require the allocator to track a new alignment/size, /// and so this function should only return success when the allocator considers /// the reallocation desirable from the allocator's perspective. /// As an example, `std.ArrayList` tracks a "capacity", and therefore can handle @@ -37,16 +51,16 @@ pub const Allocator = struct { /// as `old_mem` was when `reallocFn` is called. The bytes of /// `return_value[old_mem.len..]` have undefined values. /// The returned slice must have its pointer aligned at least to `new_alignment` bytes. - reallocFn: fn ( + fn reallocMem( self: *Allocator, /// Guaranteed to be the same as what was returned from most recent call to - /// `reallocFn` or `shrinkFn`. + /// `reallocFn` or `shrinkMem`. /// If `old_mem.len == 0` then this is a new allocation and `new_byte_count` /// is guaranteed to be >= 1. old_mem: []u8, /// If `old_mem.len == 0` then this is `undefined`, otherwise: /// Guaranteed to be the same as what was returned from most recent call to - /// `reallocFn` or `shrinkFn`. + /// `reallocFn` or `shrinkMem`. /// Guaranteed to be >= 1. /// Guaranteed to be a power of 2. old_alignment: u29, @@ -57,23 +71,62 @@ pub const Allocator = struct { /// Guaranteed to be a power of 2. /// Returned slice's pointer must have this alignment. new_alignment: u29, - ) Error![]u8, + ) Error![]u8 { + if (old_mem.len == 0) + return self.allocFn(self, new_byte_count, new_alignment); + if (new_byte_count == 0) { + self.freeFn(self, old_mem); + return old_mem[0..0]; + } + if (isAligned(@ptrToInt(old_mem.ptr), new_alignment)) { + blk: { + self.resize(old_mem, new_byte_count) catch |e| switch (e) { + error.OutOfMemory => break :blk, + }; + return old_mem.ptr[0..new_byte_count]; + } + } + if (new_byte_count <= old_mem.len and new_alignment == old_alignment) { + return error.OutOfMemory; + } + return self.moveMem(old_mem, new_byte_count, new_alignment); + } + + fn moveMem(self: *Allocator, old_mem: []u8, new_len: usize, new_alignment: u29) Error![]u8 { + assert(old_mem.len > 0); + assert(new_len > 0); + const new_mem = try self.allocFn(self, new_len, new_alignment); + @memcpy(new_mem.ptr, old_mem.ptr, std.math.min(new_len, old_mem.len)); + self.freeFn(self, old_mem); + return new_mem; + } /// This function deallocates memory. It must succeed. - shrinkFn: fn ( + fn shrinkMem( self: *Allocator, /// Guaranteed to be the same as what was returned from most recent call to - /// `reallocFn` or `shrinkFn`. + /// `reallocFn` or `shrinkMem`. old_mem: []u8, /// Guaranteed to be the same as what was returned from most recent call to - /// `reallocFn` or `shrinkFn`. + /// `reallocFn` or `shrinkMem`. old_alignment: u29, /// Guaranteed to be less than or equal to `old_mem.len`. new_byte_count: usize, /// If `new_byte_count == 0` then this is `undefined`, otherwise: /// Guaranteed to be less than or equal to `old_alignment`. new_alignment: u29, - ) []u8, + ) []u8 { + if (new_byte_count == 0) { + self.freeFn(self, old_mem); + } else if (isAligned(@ptrToInt(old_mem.ptr), new_alignment)) { + resize(self, old_mem, new_byte_count) catch |e| switch (e) { + error.OutOfMemory => {}, // ignore the error, we made a best effort to shrink + }; + } + // TODO: not sure what to do here. if old_mem is not aligned to new_alignment, then there's + // no way to guarantee that we can return an aligned buffer of the requested size. + return old_mem[0..new_byte_count]; + } /// Returns a pointer to undefined memory. /// Call `destroy` with the result to free the memory. @@ -89,8 +142,7 @@ pub const Allocator = struct { const T = @TypeOf(ptr).Child; if (@sizeOf(T) == 0) return; const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr)); - const shrink_result = self.shrinkFn(self, non_const_ptr[0..@sizeOf(T)], @alignOf(T), 0, 1); - assert(shrink_result.len == 0); + self.freeFn(self, non_const_ptr[0..@sizeOf(T)]); } /// Allocates an array of `n` items of type `T` and sets all the @@ -136,7 +188,7 @@ pub const Allocator = struct { } const byte_count = math.mul(usize, @sizeOf(T), n) catch return Error.OutOfMemory; - const byte_slice = try self.reallocFn(self, &[0]u8{}, undefined, byte_count, a); + const byte_slice = try self.allocFn(self, byte_count, a); assert(byte_slice.len == byte_count); @memset(byte_slice.ptr, undefined, byte_slice.len); if (alignment == null) { @@ -190,7 +242,7 @@ pub const Allocator = struct { const old_byte_slice = mem.sliceAsBytes(old_mem); const byte_count = math.mul(usize, @sizeOf(T), new_n) catch return Error.OutOfMemory; // Note: can't set shrunk memory to undefined as memory shouldn't be modified on realloc failure - const byte_slice = try self.reallocFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment); + const byte_slice = try self.reallocMem(old_byte_slice, Slice.alignment, byte_count, new_alignment); assert(byte_slice.len == byte_count); if (new_n > old_mem.len) { @memset(byte_slice.ptr + old_byte_slice.len, undefined, byte_slice.len - old_byte_slice.len); @@ -237,7 +289,7 @@ pub const Allocator = struct { const old_byte_slice = mem.sliceAsBytes(old_mem); @memset(old_byte_slice.ptr + byte_count, undefined, old_byte_slice.len - byte_count); - const byte_slice = self.shrinkFn(self, old_byte_slice, Slice.alignment, byte_count, new_alignment); + const byte_slice = self.shrinkMem(old_byte_slice, Slice.alignment, byte_count, new_alignment); assert(byte_slice.len == byte_count); return mem.bytesAsSlice(T, @alignCast(new_alignment, byte_slice)); } @@ -251,7 +303,7 @@ pub const Allocator = struct { if (bytes_len == 0) return; const non_const_ptr = @intToPtr([*]u8, @ptrToInt(bytes.ptr)); @memset(non_const_ptr, undefined, bytes_len); - const shrink_result = self.shrinkFn(self, non_const_ptr[0..bytes_len], Slice.alignment, 0, 1); + const shrink_result = self.shrinkMem(non_const_ptr[0..bytes_len], Slice.alignment, 0, 1); assert(shrink_result.len == 0); } }; diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig index 081a29cd9757..ec9e6f5ce366 100644 --- a/lib/std/testing/failing_allocator.zig +++ b/lib/std/testing/failing_allocator.zig @@ -39,43 +39,39 @@ pub const FailingAllocator = struct { .allocations = 0, .deallocations = 0, .allocator = mem.Allocator{ - .reallocFn = realloc, - .shrinkFn = shrink, + .allocFn = alloc, + .freeFn = free, + .resizeFn = resize, }, }; } - fn realloc(allocator: *mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { - const self = @fieldParentPtr(FailingAllocator, "allocator", allocator); + fn alloc(allocator: *std.mem.Allocator, len: usize, alignment: u29) error{OutOfMemory}![]u8 { + const self = @fieldParentPtr(@This(), "allocator", allocator); if (self.index == self.fail_index) { return error.OutOfMemory; } - const result = try self.internal_allocator.reallocFn( - self.internal_allocator, - old_mem, - old_align, - new_size, - new_align, - ); - if (new_size < old_mem.len) { - self.freed_bytes += old_mem.len - new_size; - if (new_size == 0) - self.deallocations += 1; - } else if (new_size > old_mem.len) { - self.allocated_bytes += new_size - old_mem.len; - if (old_mem.len == 0) - self.allocations += 1; - } + const result = try self.internal_allocator.allocFn(self.internal_allocator, len, alignment); + self.allocated_bytes += len; + self.allocations += 1; self.index += 1; return result; } - fn shrink(allocator: *mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { - const self = @fieldParentPtr(FailingAllocator, "allocator", allocator); - const r = self.internal_allocator.shrinkFn(self.internal_allocator, old_mem, old_align, new_size, new_align); - self.freed_bytes += old_mem.len - r.len; - if (new_size == 0) - self.deallocations += 1; - return r; + fn free(allocator: *std.mem.Allocator, buf: []u8) void { + const self = @fieldParentPtr(@This(), "allocator", allocator); + self.internal_allocator.freeFn(self.internal_allocator, buf); + self.freed_bytes += buf.len; + self.deallocations += 1; + } + + fn resize(allocator: *std.mem.Allocator, buf: []u8, new_len: usize) error{OutOfMemory}!void { + const self = @fieldParentPtr(@This(), "allocator", allocator); + const result = try self.internal_allocator.resize(buf, new_len); + if (new_len < buf.len) { + self.freed_bytes += buf.len - new_len; + } else { + self.allocated_bytes += new_len - buf.len; + } } }; diff --git a/lib/std/testing/leak_count_allocator.zig b/lib/std/testing/leak_count_allocator.zig index 65244e529bcd..ea04d4db2976 100644 --- a/lib/std/testing/leak_count_allocator.zig +++ b/lib/std/testing/leak_count_allocator.zig @@ -14,31 +14,35 @@ pub const LeakCountAllocator = struct { return .{ .count = 0, .allocator = .{ - .reallocFn = realloc, - .shrinkFn = shrink, + .allocFn = alloc, + .freeFn = free, + // Not sure why this doesn't work + //.resizeFn = allocator.resizeFn, + .resizeFn = resize, }, .internal_allocator = allocator, }; } - fn realloc(allocator: *std.mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) ![]u8 { + fn alloc(allocator: *std.mem.Allocator, len: usize, alignment: u29) error{OutOfMemory}![]u8 { const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator); - var data = try self.internal_allocator.reallocFn(self.internal_allocator, old_mem, old_align, new_size, new_align); - if (old_mem.len == 0) { - self.count += 1; - } + var data = try self.internal_allocator.allocFn(self.internal_allocator, len, alignment); + self.count += 1; return data; } - fn shrink(allocator: *std.mem.Allocator, old_mem: []u8, old_align: u29, new_size: usize, new_align: u29) []u8 { + fn free(allocator: *std.mem.Allocator, buf: []u8) void { const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator); - if (new_size == 0) { - if (self.count == 0) { - std.debug.panic("error - too many calls to free, most likely double free", .{}); - } - self.count -= 1; + if (self.count == 0) { + std.debug.panic("error - too many calls to free, most likely double free", .{}); } - return self.internal_allocator.shrinkFn(self.internal_allocator, old_mem, old_align, new_size, new_align); + self.count -= 1; + return self.internal_allocator.freeFn(self.internal_allocator, buf); + } + + fn resize(allocator: *std.mem.Allocator, buf: []u8, new_len: usize) error{OutOfMemory}!void { + const self = @fieldParentPtr(LeakCountAllocator, "allocator", allocator); + return self.internal_allocator.resize(buf, new_len); } pub fn validate(self: LeakCountAllocator) !void {