From 82cc7bdec502e75c2a48cbdaae4c97939846e71d Mon Sep 17 00:00:00 2001 From: bnuuydev Date: Thu, 16 Oct 2025 21:22:08 +0100 Subject: [PATCH 1/2] wip: upgrade to zig-master the main breakage here is caused by writergate. there's some weirdness going on to do with file paths and the build system too though, so that should be dealt with before merging. also the new writer and reader don't provide any error specificity, so we don't know why a read or a write failed now. this should also be considered: perhaps creating a custom interface is warranted? --- build.zig.zon | 10 +- src/BuildInterface.zig | 47 +++---- src/Parser.zig | 36 ++--- src/Tokenizer.zig | 4 +- src/components/FillData.zig | 6 +- src/components/fs/FatFileSystem.zig | 47 ++++--- src/components/fs/common.zig | 64 +++++---- src/components/part/GptPartitionTable.zig | 10 +- src/components/part/MbrPartitionTable.zig | 2 +- src/dim.zig | 157 +++++++++++----------- 10 files changed, 200 insertions(+), 183 deletions(-) diff --git a/build.zig.zon b/build.zig.zon index 3ceb020..56e0973 100644 --- a/build.zig.zon +++ b/build.zig.zon @@ -4,14 +4,14 @@ .fingerprint = 0x9947018c924eecb2, .dependencies = .{ .zfat = .{ - .url = "https://github.com/ZigEmbeddedGroup/zfat/archive/3ce06d43a4e04d387034dcae2f486b050701f321.tar.gz", - .hash = "zfat-0.0.0-AAAAAMYlcABdh06Mn9CNk8Ccy_3bBFgJr8wo4jKza1q-", + .url = "https://github.com/CascadeOS/zfat/archive/refs/heads/0.15.zip", + .hash = "zfat-0.16.0-SNNK9fKtTgASssfmCblZwRMLU4pndVtwxTNhYCegBOyA", }, .args = .{ - .url = "git+https://github.com/ikskuh/zig-args.git#9425b94c103a031777fdd272c555ce93a7dea581", - .hash = "args-0.0.0-CiLiqv_NAAC97fGpk9hS2K681jkiqPsWP6w3ucb_ctGH", + .url = "git+https://github.com/ikskuh/zig-args.git#8ae26b44a884ff20dca98ee84c098e8f8e94902f", + .hash = "args-0.0.0-CiLiqojRAACGzDRO7A9dw7kWSchNk29caJZkXuMCb0Cn", }, - }, + }, .paths = .{ "build.zig", "build.zig.zon", diff --git a/src/BuildInterface.zig b/src/BuildInterface.zig index 4d9ebda..10d559d 100644 --- a/src/BuildInterface.zig +++ b/src/BuildInterface.zig @@ -63,13 +63,13 @@ pub fn createDisk(dimmer: Interface, size: u64, content: Content) std.Build.Lazy } fn renderContent(wfs: *std.Build.Step.WriteFile, allocator: std.mem.Allocator, content: Content) struct { []const u8, ContentWriter.VariableMap } { - var code: std.ArrayList(u8) = .init(allocator); + var code = std.Io.Writer.Allocating.init(allocator); defer code.deinit(); var variables: ContentWriter.VariableMap = .init(allocator); var cw: ContentWriter = .{ - .code = code.writer(), + .code = &code.writer, .wfs = wfs, .vars = &variables, }; @@ -99,7 +99,7 @@ const ContentWriter = struct { pub const VariableMap = std.StringArrayHashMap(struct { std.Build.LazyPath, ContentWriter.UsageHint }); wfs: *std.Build.Step.WriteFile, - code: std.ArrayList(u8).Writer, + code: *std.Io.Writer, vars: *VariableMap, fn render(cw: ContentWriter, content: Content) !void { @@ -117,7 +117,7 @@ const ContentWriter = struct { }, .paste_file => |data| { - try cw.code.print("paste-file {}", .{cw.fmtLazyPath(data, .file)}); + try cw.code.print("paste-file {f}", .{cw.fmtLazyPath(data, .file)}); }, .mbr_part_table => |data| { @@ -176,7 +176,7 @@ const ContentWriter = struct { try cw.code.writeByte('\n'); if (part.name) |name| { - try cw.code.print(" name \"{}\"\n", .{std.zig.fmtEscapes(name)}); + try cw.code.print(" name \"{f}\"\n", .{std.zig.fmtString(name)}); } if (part.offset) |offset| { try cw.code.print(" offset {d}\n", .{offset}); @@ -198,7 +198,7 @@ const ContentWriter = struct { @tagName(data.format), }); if (data.label) |label| { - try cw.code.print(" label {}\n", .{ + try cw.code.print(" label {f}\n", .{ fmtPath(label), }); } @@ -213,29 +213,29 @@ const ContentWriter = struct { fn renderFileSystemTree(cw: ContentWriter, fs: FileSystem) !void { for (fs.items) |item| { switch (item) { - .empty_dir => |dir| try cw.code.print("mkdir {}\n", .{ + .empty_dir => |dir| try cw.code.print("mkdir {f}\n", .{ fmtPath(dir), }), - .copy_dir => |copy| try cw.code.print("copy-dir {} {}\n", .{ + .copy_dir => |copy| try cw.code.print("copy-dir {f} {f}\n", .{ fmtPath(copy.destination), cw.fmtLazyPath(copy.source, .directory), }), - .copy_file => |copy| try cw.code.print("copy-file {} {}\n", .{ + .copy_file => |copy| try cw.code.print("copy-file {f} {f}\n", .{ fmtPath(copy.destination), cw.fmtLazyPath(copy.source, .file), }), - .include_script => |script| try cw.code.print("!include {}\n", .{ + .include_script => |script| try cw.code.print("!include {f}\n", .{ cw.fmtLazyPath(script, .file), }), } } } - const PathFormatter = std.fmt.Formatter(formatPath); - const LazyPathFormatter = std.fmt.Formatter(formatLazyPath); + const PathFormatter = std.fmt.Alt([]const u8, formatPath); + const LazyPathFormatter = std.fmt.Alt(struct { ContentWriter, std.Build.LazyPath, UsageHint }, formatLazyPath); const UsageHint = enum { file, directory }; fn fmtLazyPath(cw: ContentWriter, path: std.Build.LazyPath, hint: UsageHint) LazyPathFormatter { @@ -248,13 +248,9 @@ const ContentWriter = struct { fn formatLazyPath( data: struct { ContentWriter, std.Build.LazyPath, UsageHint }, - comptime fmt: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, - ) !void { + writer: *std.Io.Writer, + ) error{WriteFailed}!void { const cw, const path, const hint = data; - _ = fmt; - _ = options; switch (path) { .cwd_relative, @@ -267,7 +263,7 @@ const ContentWriter = struct { std.debug.assert(std.fs.path.isAbsolute(full_path)); - try writer.print("{}", .{ + try writer.print("{f}", .{ fmtPath(full_path), }); }, @@ -278,7 +274,7 @@ const ContentWriter = struct { const var_id = cw.vars.count() + 1; const var_name = cw.wfs.step.owner.fmt("PATH{}", .{var_id}); - try cw.vars.put(var_name, .{ path, hint }); + cw.vars.put(var_name, .{ path, hint }) catch return error.WriteFailed; try writer.print("${s}", .{var_name}); }, @@ -287,13 +283,8 @@ const ContentWriter = struct { fn formatPath( path: []const u8, - comptime fmt: []const u8, - options: std.fmt.FormatOptions, - writer: anytype, + writer: *std.Io.Writer, ) !void { - _ = fmt; - _ = options; - const is_safe_word = for (path) |char| { switch (char) { 'A'...'Z', @@ -318,7 +309,7 @@ const ContentWriter = struct { if (c == '\\') { try writer.writeAll("/"); } else { - try writer.print("{}", .{std.zig.fmtEscapes(&[_]u8{c})}); + try writer.print("{f}", .{std.zig.fmtString(&[_]u8{c})}); } } @@ -419,7 +410,7 @@ pub const FatFs = struct { pub const FileSystemBuilder = struct { b: *std.Build, - list: std.ArrayListUnmanaged(FileSystem.Item), + list: std.ArrayList(FileSystem.Item), pub fn init(b: *std.Build) FileSystemBuilder { return FileSystemBuilder{ diff --git a/src/Parser.zig b/src/Parser.zig index ff7c179..64609b7 100644 --- a/src/Parser.zig +++ b/src/Parser.zig @@ -103,10 +103,12 @@ pub fn get_include_path(parser: Parser, allocator: std.mem.Allocator, rel_includ if (parser.file_stack.len == parser.max_include_depth) return error.MaxIncludeDepthReached; - const top_path = if (parser.file_stack.len > 0) - parser.file_stack[parser.file_stack.len - 1].path - else - ""; + // const top_path = if (parser.file_stack.len > 0) + // parser.file_stack[parser.file_stack.len - 1].path + // else + // ""; + + const top_path = ""; // TODO what the fuck, the actual issue here needs to be triaged. this workaround fixes things for me for now though. const abs_include_path = try std.fs.path.resolvePosix( allocator, @@ -208,10 +210,12 @@ fn resolve_value(parser: *Parser, token_type: TokenType, text: []const u8) ![]co if (!has_includes) return content_slice; - var unescaped: std.ArrayList(u8) = .init(parser.arena.allocator()); - defer unescaped.deinit(); + const allocator = parser.arena.allocator(); + + var unescaped = std.ArrayList(u8).empty; + defer unescaped.deinit(allocator); - try unescaped.ensureTotalCapacityPrecise(content_slice.len); + try unescaped.ensureTotalCapacityPrecise(allocator, content_slice.len); { var i: usize = 0; @@ -220,7 +224,7 @@ fn resolve_value(parser: *Parser, token_type: TokenType, text: []const u8) ![]co i += 1; if (c != '\\') { - try unescaped.append(c); + try unescaped.append(allocator, c); continue; } @@ -233,20 +237,20 @@ fn resolve_value(parser: *Parser, token_type: TokenType, text: []const u8) ![]co errdefer std.log.err("invalid escape sequence: \\{s}", .{[_]u8{esc_code}}); switch (esc_code) { - 'r' => try unescaped.append('\r'), - 'n' => try unescaped.append('\n'), - 't' => try unescaped.append('\t'), - '\\' => try unescaped.append('\\'), - '\"' => try unescaped.append('\"'), - '\'' => try unescaped.append('\''), - 'e' => try unescaped.append('\x1B'), + 'r' => try unescaped.append(allocator, '\r'), + 'n' => try unescaped.append(allocator, '\n'), + 't' => try unescaped.append(allocator, '\t'), + '\\' => try unescaped.append(allocator, '\\'), + '\"' => try unescaped.append(allocator, '\"'), + '\'' => try unescaped.append(allocator, '\''), + 'e' => try unescaped.append(allocator, '\x1B'), else => return error.InvalidEscapeSequence, } } } - return try unescaped.toOwnedSlice(); + return try unescaped.toOwnedSlice(allocator); }, .comment, .directive, .whitespace => unreachable, diff --git a/src/Tokenizer.zig b/src/Tokenizer.zig index cdac717..c8a0ab5 100644 --- a/src/Tokenizer.zig +++ b/src/Tokenizer.zig @@ -186,9 +186,9 @@ test Tokenizer { var offset: u32 = 0; for (seq) |expected| { const actual = (try tokenizer.next()) orelse return error.Unexpected; - errdefer std.debug.print("unexpected token: .{} \"{}\"\n", .{ + errdefer std.debug.print("unexpected token: .{f} \"{f}\"\n", .{ std.zig.fmtId(@tagName(actual.type)), - std.zig.fmtEscapes(tokenizer.source[actual.offset..][0..actual.len]), + std.zig.fmtString(tokenizer.source[actual.offset..][0..actual.len]), }); try std.testing.expectEqualStrings(expected.@"1", tokenizer.get_text(actual)); try std.testing.expectEqual(offset, actual.offset); diff --git a/src/components/FillData.zig b/src/components/FillData.zig index 82b8cb4..e5f0458 100644 --- a/src/components/FillData.zig +++ b/src/components/FillData.zig @@ -20,8 +20,10 @@ pub fn parse(ctx: dim.Context) !dim.Content { } fn render(self: *FillData, stream: *dim.BinaryStream) dim.Content.RenderError!void { - try stream.writer().writeByteNTimes( + var writer = stream.writer(); + writer.interface.splatByteAll( self.fill_value, stream.length, - ); + ) catch return error.Overflow; // TODO FIX we don't know actually why this failed. + // std.Io.Writer only returns error.WriteFailed. } diff --git a/src/components/fs/FatFileSystem.zig b/src/components/fs/FatFileSystem.zig index 5b5de50..872b1d7 100644 --- a/src/components/fs/FatFileSystem.zig +++ b/src/components/fs/FatFileSystem.zig @@ -14,7 +14,7 @@ format_as: FatType, label: ?[]const u8 = null, fats: ?fatfs.FatTables = null, rootdir_size: ?c_uint = null, -ops: std.ArrayList(common.FsOperation), +ops: std.array_list.Managed(common.FsOperation), sector_align: ?c_uint = null, cluster_size: ?u32 = null, @@ -88,7 +88,7 @@ fn render(self: *FAT, stream: *dim.BinaryStream) dim.Content.RenderError!void { if (stream.length < min_size) { // TODO(fqu): Report fatal erro! - std.log.err("cannot format {} bytes with {s}: min required size is {}", .{ + std.log.err("cannot format {f} bytes with {s}: min required size is {f}", .{ @as(dim.DiskSize, @enumFromInt(stream.length)), @tagName(self.format_as), @as(dim.DiskSize, @enumFromInt(min_size)), @@ -98,7 +98,7 @@ fn render(self: *FAT, stream: *dim.BinaryStream) dim.Content.RenderError!void { if (stream.length > max_size) { // TODO(fqu): Report warning - std.log.warn("will not use all available space: available space is {}, but maximum size for {s} is {}", .{ + std.log.warn("will not use all available space: available space is {f}, but maximum size for {s} is {f}", .{ @as(dim.DiskSize, @enumFromInt(stream.length)), @tagName(self.format_as), @as(dim.DiskSize, @enumFromInt(min_size)), @@ -147,8 +147,8 @@ fn render(self: *FAT, stream: *dim.BinaryStream) dim.Content.RenderError!void { return error.IoError; } } else { - std.log.err("label \"{}\" is {} characters long, but only up to {} are permitted.", .{ - std.zig.fmtEscapes(label), + std.log.err("label \"{f}\" is {} characters long, but only up to {} are permitted.", .{ + std.zig.fmtString(label), label.len, max_label_len, }); @@ -212,7 +212,7 @@ const AtomicOps = struct { }; } - pub fn mkfile(ops: AtomicOps, path: []const u8, reader: anytype) dim.Content.RenderError!void { + pub fn mkfile(ops: AtomicOps, path: []const u8, reader: *std.Io.Reader) dim.Content.RenderError!void { _ = ops; var path_buffer: [max_path_len:0]u8 = undefined; @@ -244,19 +244,28 @@ const AtomicOps = struct { }; defer fs_file.close(); - var fifo: std.fifo.LinearFifo(u8, .{ .Static = 8192 }) = .init(); - fifo.pump( - reader, - fs_file.writer(), - ) catch |err| switch (@as(dim.FileHandle.ReadError || fatfs.File.ReadError.Error, err)) { - error.Overflow => return error.IoError, - error.ReadFileFailed => return error.IoError, - error.Timeout => @panic("implementation bug in fatfs glue"), - error.DiskErr => return error.IoError, - error.IntErr => return error.IoError, - error.Denied => @panic("implementation bug in fatfs glue"), - error.InvalidObject => @panic("implementation bug in fatfs glue"), - }; + var writer_buf: [8192]u8 = undefined; + var writer = fs_file.writer(&writer_buf); + + _ = reader.streamRemaining(&writer.interface) catch return error.IoError; + + writer.interface.flush() catch return error.IoError; + + // TODO we've lost a lot of error specificity due to the use of the new APIs + // See old code: + + // fifo.pump( + // reader, + // fs_file.writer(), + // ) catch |err| switch (@as(dim.FileHandle.ReadError || fatfs.File.ReadError.Error, err)) { + // error.Overflow => return error.IoError, + // error.ReadFileFailed => return error.IoError, + // error.Timeout => @panic("implementation bug in fatfs glue"), + // error.DiskErr => return error.IoError, + // error.IntErr => return error.IoError, + // error.Denied => @panic("implementation bug in fatfs glue"), + // error.InvalidObject => @panic("implementation bug in fatfs glue"), + // }; } }; diff --git a/src/components/fs/common.zig b/src/components/fs/common.zig index a3421e9..d3cb1e6 100644 --- a/src/components/fs/common.zig +++ b/src/components/fs/common.zig @@ -56,7 +56,9 @@ fn Executor(comptime T: type) type { }; defer handle.close(); - try exec.add_file(data.path, handle.reader()); + var reader_buf: [1024]u8 = undefined; + var reader = handle.reader(&reader_buf); + try exec.add_file(data.path, &reader.interface); }, .copy_dir => |data| { var iter_dir = data.source.open_dir() catch |err| switch (err) { @@ -91,7 +93,10 @@ fn Executor(comptime T: type) type { var file = try fname.open(); defer file.close(); - try exec.add_file(path, file.reader()); + var reader_buf: [1024]u8 = undefined; + var reader = file.reader(&reader_buf); + + try exec.add_file(path, &reader.interface); }, .directory => { @@ -117,14 +122,13 @@ fn Executor(comptime T: type) type { try data.contents.render(&bs); - var fbs: std.io.FixedBufferStream([]u8) = .{ .buffer = buffer, .pos = 0 }; - - try exec.add_file(data.path, fbs.reader()); + var reader = std.Io.Reader.fixed(buffer); + try exec.add_file(data.path, &reader); }, } } - fn add_file(exec: Exec, path: [:0]const u8, reader: anytype) !void { + fn add_file(exec: Exec, path: [:0]const u8, reader: *std.Io.Reader) !void { if (std.fs.path.dirnamePosix(path)) |dir| { try exec.recursive_mkdir(dir); } @@ -143,7 +147,7 @@ fn Executor(comptime T: type) type { try exec.inner_mkdir(path); } - fn inner_mkfile(exec: Exec, path: []const u8, reader: anytype) dim.Content.RenderError!void { + fn inner_mkfile(exec: Exec, path: []const u8, reader: *std.Io.Reader) dim.Content.RenderError!void { try exec.inner.mkfile(path, reader); } @@ -153,24 +157,26 @@ fn Executor(comptime T: type) type { fn walk_err(err: (std.fs.Dir.OpenError || std.mem.Allocator.Error)) dim.Content.RenderError { return switch (err) { - error.InvalidUtf8 => error.InvalidPath, - error.InvalidWtf8 => error.InvalidPath, - error.BadPathName => error.InvalidPath, + error.InvalidUtf8, + error.InvalidWtf8, + error.BadPathName, error.NameTooLong => error.InvalidPath, error.OutOfMemory => error.OutOfMemory, error.FileNotFound => error.FileNotFound, - error.DeviceBusy => error.IoError, - error.AccessDenied => error.IoError, - error.SystemResources => error.IoError, - error.NoDevice => error.IoError, - error.Unexpected => error.IoError, - error.NetworkNotFound => error.IoError, - error.SymLinkLoop => error.IoError, - error.ProcessFdQuotaExceeded => error.IoError, - error.SystemFdQuotaExceeded => error.IoError, - error.NotDir => error.IoError, + error.DeviceBusy, + error.AccessDenied, + error.SystemResources, + error.NoDevice, + error.Unexpected, + error.NetworkNotFound, + error.SymLinkLoop, + error.ProcessFdQuotaExceeded, + error.SystemFdQuotaExceeded, + error.NotDir, + error.ProcessNotFound, + error.PermissionDenied, => error.IoError, }; } }; @@ -185,23 +191,23 @@ fn parse_path(ctx: dim.Context) ![:0]const u8 { } if (!std.mem.startsWith(u8, path, "/")) { - try ctx.report_nonfatal_error("Path '{}' did not start with a \"/\"", .{ - std.zig.fmtEscapes(path), + try ctx.report_nonfatal_error("Path '{f}' did not start with a \"/\"", .{ + std.zig.fmtString(path), }); } for (path) |c| { if (c < 0x20 or c == 0x7F or c == '\\') { - try ctx.report_nonfatal_error("Path '{}' contains invalid character 0x{X:0>2}", .{ - std.zig.fmtEscapes(path), + try ctx.report_nonfatal_error("Path '{f}' contains invalid character 0x{X:0>2}", .{ + std.zig.fmtString(path), c, }); } } _ = std.unicode.Utf8View.init(path) catch |err| { - try ctx.report_nonfatal_error("Path '{}' is not a valid UTF-8 string: {s}", .{ - std.zig.fmtEscapes(path), + try ctx.report_nonfatal_error("Path '{f}' is not a valid UTF-8 string: {s}", .{ + std.zig.fmtString(path), @errorName(err), }); }; @@ -249,8 +255,8 @@ pub fn parse_ops(ctx: dim.Context, end_seq: []const u8, handler: anytype) !void } fn normalize(allocator: std.mem.Allocator, src_path: []const u8) ![:0]const u8 { - var list = std.ArrayList([]const u8).init(allocator); - defer list.deinit(); + var list: std.ArrayList([]const u8) = .empty; + defer list.deinit(allocator); var parts = std.mem.tokenizeAny(u8, src_path, "\\/"); @@ -263,7 +269,7 @@ fn normalize(allocator: std.mem.Allocator, src_path: []const u8) ![:0]const u8 { _ = list.pop(); } else { // this is an actual "descend" - try list.append(part); + try list.append(allocator, part); } } diff --git a/src/components/part/GptPartitionTable.zig b/src/components/part/GptPartitionTable.zig index e85007c..249c40f 100644 --- a/src/components/part/GptPartitionTable.zig +++ b/src/components/part/GptPartitionTable.zig @@ -17,7 +17,9 @@ pub fn parse(ctx: dim.Context) !dim.Content { .partitions = undefined, }; - var partitions = std.ArrayList(Partition).init(ctx.get_arena()); + const allocator = ctx.get_arena(); + + var partitions: std.ArrayList(Partition) = .empty; loop: while (true) { const kw = try ctx.parse_enum(enum { guid, @@ -34,7 +36,7 @@ pub fn parse(ctx: dim.Context) !dim.Content { pt.disk_id = Guid.parse(guid_str[0..36].*) catch |err| return ctx.report_fatal_error("Invalid disk GUID: {}", .{err}); }, - .part => (try partitions.addOne()).* = try parsePartition(ctx), + .part => (try partitions.addOne(allocator)).* = try parsePartition(ctx), .@"legacy-bootable" => pt.legacy_bootable = true, .endgpt => break :loop, } @@ -113,7 +115,7 @@ fn parsePartition(ctx: dim.Context) !Partition { const type_guid = known_types.get(type_name) orelse blk: { if (type_name.len == 36) if (Guid.parse(type_name[0..36].*)) |guid| break :blk guid else |_| {}; - return ctx.report_fatal_error("unknown partition type: `{}`", .{std.zig.fmtEscapes(type_name)}); + return ctx.report_fatal_error("unknown partition type: `{f}`", .{std.zig.fmtString(type_name)}); }; try updater.set(.type, type_guid); @@ -167,7 +169,7 @@ pub fn render(table: *PartTable, stream: *dim.BinaryStream) dim.Content.RenderEr for (table.partitions[0..], 0..) |partition, i| { @memset(&pe_block, 0); - const offset = partition.offset orelse 33 * block_size; + const offset = partition.offset orelse 34 * block_size; const size = partition.size orelse if (i == table.partitions.len - 1) ((max_partition_lba + 1) * block_size) - offset else diff --git a/src/components/part/MbrPartitionTable.zig b/src/components/part/MbrPartitionTable.zig index 66ceca8..9d21b69 100644 --- a/src/components/part/MbrPartitionTable.zig +++ b/src/components/part/MbrPartitionTable.zig @@ -115,7 +115,7 @@ fn parse_partition(ctx: dim.Context) !Partition { value else |_| known_partition_types.get(part_name) orelse blk: { - try ctx.report_nonfatal_error("unknown partition type '{}'", .{std.zig.fmtEscapes(part_name)}); + try ctx.report_nonfatal_error("unknown partition type '{f}'", .{std.zig.fmtString(part_name)}); break :blk 0x00; }; diff --git a/src/dim.zig b/src/dim.zig index 01438d8..fe3ae8a 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -89,8 +89,8 @@ pub fn main() !u8 { const val = pos[idx + 1 ..]; try var_map.put(gpa, key, val); } else { - std.debug.print("unexpected argument positional '{}'\n", .{ - std.zig.fmtEscapes(pos), + std.debug.print("unexpected argument positional '{f}'\n", .{ + std.zig.fmtString(pos), }); bad_args = true; } @@ -106,13 +106,17 @@ pub fn main() !u8 { var current_dir = try std.fs.cwd().openDir(".", .{}); defer current_dir.close(); - const script_source = try current_dir.readFileAlloc(gpa, script_path, max_script_size); + const script_source = try current_dir.readFileAlloc(script_path, gpa, .limited(max_script_size)); defer gpa.free(script_source); if (options.@"deps-file") |deps_file_path| { global_deps_file = try std.fs.cwd().createFile(deps_file_path, .{}); - try global_deps_file.?.writer().print( + var writer = global_deps_file.?.writerStreaming(&.{}); + // TODO would it be better to store the writer and just reuse it? that way we can utilise + // buffering and not have the risk of someone positional writes and breaking things. + + try writer.interface.print( \\{s}: {s} , .{ output_path, @@ -252,7 +256,7 @@ pub const Context = struct { ); if (converted) |ok| return ok; - std.debug.print("detected invalid enum tag for {s}: \"{}\"\n", .{ @typeName(E), std.zig.fmtEscapes(tag_name) }); + std.debug.print("detected invalid enum tag for {s}: \"{f}\"\n", .{ @typeName(E), std.zig.fmtString(tag_name) }); std.debug.print("valid options are:\n", .{}); for (std.enums.values(E)) |val| { @@ -293,8 +297,8 @@ pub const Context = struct { } } - return ctx.report_fatal_error("unknown content type: '{}'", .{ - std.zig.fmtEscapes(content_type_str), + return ctx.report_fatal_error("unknown content type: '{f}'", .{ + std.zig.fmtString(content_type_str), }); } }; @@ -383,14 +387,14 @@ const Environment = struct { fn fetch_file(io: *const Parser.IO, allocator: std.mem.Allocator, path: []const u8) error{ FileNotFound, IoError, OutOfMemory, InvalidPath }![]const u8 { const env: *const Environment = @fieldParentPtr("io", io); - const contents = env.include_base.readFileAlloc(allocator, path, max_script_size) catch |err| switch (err) { + const contents = env.include_base.readFileAlloc(path, allocator, .limited(max_script_size)) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, error.FileNotFound => { const ctx = Context{ .env = @constCast(env) }; var buffer: [std.fs.max_path_bytes]u8 = undefined; - try ctx.report_nonfatal_error("failed to open file: \"{}/{}\"", .{ - std.zig.fmtEscapes(env.include_base.realpath(".", &buffer) catch return error.FileNotFound), - std.zig.fmtEscapes(path), + try ctx.report_nonfatal_error("failed to open file: \"{f}/{f}\"", .{ + std.zig.fmtString(env.include_base.realpath(".", &buffer) catch return error.FileNotFound), + std.zig.fmtString(path), }); return error.FileNotFound; }, @@ -470,9 +474,9 @@ pub const FileName = struct { const file = name.root_dir.openFile(name.rel_path, .{}) catch |err| switch (err) { error.FileNotFound => { var buffer: [std.fs.max_path_bytes]u8 = undefined; - std.log.err("failed to open \"{}/{}\": not found", .{ - std.zig.fmtEscapes(name.root_dir.realpath(".", &buffer) catch |e| @errorName(e)), - std.zig.fmtEscapes(name.rel_path), + std.log.err("failed to open \"{f}/{f}\": not found", .{ + std.zig.fmtString(name.root_dir.realpath(".", &buffer) catch |e| @errorName(e)), + std.zig.fmtString(name.rel_path), }); return error.FileNotFound; }, @@ -503,6 +507,8 @@ pub const FileName = struct { error.NotDir, error.FileLocksNotSupported, error.FileBusy, + error.ProcessNotFound, + error.PermissionDenied, => return error.IoError, }; @@ -515,9 +521,9 @@ pub const FileName = struct { const dir = name.root_dir.openDir(name.rel_path, .{ .iterate = true }) catch |err| switch (err) { error.FileNotFound => { var buffer: [std.fs.max_path_bytes]u8 = undefined; - std.log.err("failed to open \"{}/{}\": not found", .{ - std.zig.fmtEscapes(name.root_dir.realpath(".", &buffer) catch |e| @errorName(e)), - std.zig.fmtEscapes(name.rel_path), + std.log.err("failed to open \"{f}/{f}\": not found", .{ + std.zig.fmtString(name.root_dir.realpath(".", &buffer) catch |e| @errorName(e)), + std.zig.fmtString(name.rel_path), }); return error.FileNotFound; }, @@ -538,6 +544,8 @@ pub const FileName = struct { error.ProcessFdQuotaExceeded, error.SystemFdQuotaExceeded, error.NotDir, + error.ProcessNotFound, + error.PermissionDenied, => return error.IoError, }; @@ -597,19 +605,20 @@ pub const FileName = struct { var handle = try file.open(); defer handle.close(); - var fifo: std.fifo.LinearFifo(u8, .{ .Static = 8192 }) = .init(); + var reader_buf: [8192]u8 = undefined; + var reader = handle.reader(&reader_buf); - try fifo.pump( - handle.reader(), - stream.writer(), - ); + var writer = stream.writer(); + + _ = writer.interface.sendFileAll(&reader, .unlimited) catch |e| return switch (e) { + error.WriteFailed => error.IoError, // TODO this isn't great + else => |err| err, + }; } }; pub const FileHandle = struct { - pub const ReadError = error{ReadFileFailed}; - - pub const Reader = std.io.Reader(std.fs.File, ReadError, read_some); + pub const ReadError = std.Io.Reader.Error; file: std.fs.File, @@ -618,36 +627,15 @@ pub const FileHandle = struct { fd.* = undefined; } - pub fn reader(fd: FileHandle) Reader { - return .{ .context = fd.file }; - } - - fn read_some(file: std.fs.File, data: []u8) ReadError!usize { - return file.read(data) catch |err| switch (err) { - error.InputOutput, - error.AccessDenied, - error.BrokenPipe, - error.SystemResources, - error.OperationAborted, - error.LockViolation, - error.WouldBlock, - error.ConnectionResetByPeer, - error.ProcessNotFound, - error.Unexpected, - error.IsDir, - error.ConnectionTimedOut, - error.NotOpenForReading, - error.SocketNotConnected, - error.Canceled, - => return error.ReadFileFailed, - }; + pub fn reader(fd: FileHandle, buf: []u8) std.fs.File.Reader { + return fd.file.reader(buf); } }; pub const BinaryStream = struct { pub const WriteError = error{ Overflow, IoError }; + pub const WriterError = std.Io.Writer.Error; pub const ReadError = error{ Overflow, IoError }; - pub const Writer = std.io.Writer(*BinaryStream, WriteError, write_some); backing: Backing, @@ -707,26 +695,9 @@ pub const BinaryStream = struct { switch (bs.backing) { .buffer => |ptr| @memcpy(data, ptr[@intCast(offset)..][0..data.len]), .file => |state| { - state.file.seekTo(state.base + offset) catch return error.IoError; - state.file.reader().readNoEof(data) catch |err| switch (err) { - error.InputOutput, - error.AccessDenied, - error.BrokenPipe, - error.SystemResources, - error.OperationAborted, - error.LockViolation, - error.WouldBlock, - error.ConnectionResetByPeer, - error.ProcessNotFound, - error.Unexpected, - error.IsDir, - error.ConnectionTimedOut, - error.NotOpenForReading, - error.SocketNotConnected, - error.Canceled, - error.EndOfStream, - => return error.IoError, - }; + var reader = state.file.reader(&.{}); + reader.seekTo(state.base + offset) catch return error.IoError; + reader.interface.readSliceAll(data) catch return error.IoError; }, } } @@ -757,6 +728,8 @@ pub const BinaryStream = struct { error.ProcessNotFound, error.NoDevice, error.Unexpected, + error.PermissionDenied, + error.MessageTooBig, => return error.IoError, }; }, @@ -770,15 +743,48 @@ pub const BinaryStream = struct { } pub fn writer(bs: *BinaryStream) Writer { - return .{ .context = bs }; + return .{ + .interface = .{ + .vtable = &.{ + .drain = Writer.drain, + }, + .buffer = &.{}, + }, + .stream = bs, + }; } - fn write_some(stream: *BinaryStream, data: []const u8) WriteError!usize { + pub const Writer = struct { + interface: std.Io.Writer, + stream: *BinaryStream, + + pub fn drain(io_w: *std.Io.Writer, data: []const []const u8, splat: usize) std.Io.Writer.Error!usize { + const w: *Writer = @alignCast(@fieldParentPtr("interface", io_w)); + + var written: usize = 0; + + for (data[0..data.len - 1]) |bytes| { + written += try w.stream.write_some(bytes); + } + + const pattern = data[data.len - 1]; + switch (pattern.len) { + 0 => {}, + else => for (0..splat) |_| { + written += try w.stream.write_some(pattern); + }, + } + + return written; + } + }; + + fn write_some(stream: *BinaryStream, data: []const u8) std.Io.Writer.Error!usize { const remaining_len = stream.length - stream.virtual_offset; const written_len: usize = @intCast(@min(remaining_len, data.len)); - try stream.write(stream.virtual_offset, data[0..written_len]); + stream.write(stream.virtual_offset, data[0..written_len]) catch return error.WriteFailed; stream.virtual_offset += written_len; return written_len; @@ -838,10 +844,7 @@ pub const DiskSize = enum(u64) { return @intFromEnum(ds); } - pub fn format(ds: DiskSize, fmt: []const u8, opt: std.fmt.FormatOptions, writer: anytype) !void { - _ = fmt; - _ = opt; - + pub fn format(ds: DiskSize, writer: *std.Io.Writer) std.Io.Writer.Error!void { const size = ds.size_in_bytes(); const div: u64, const unit: []const u8 = if (size > GiB) @@ -861,7 +864,7 @@ pub const DiskSize = enum(u64) { const scaled_value = (1000 * size) / div; var buf: [std.math.log2_int_ceil(u64, std.math.maxInt(u64))]u8 = undefined; - const divided = try std.fmt.bufPrint(&buf, "{d}", .{scaled_value}); + const divided = std.fmt.bufPrint(&buf, "{d}", .{scaled_value}) catch return error.WriteFailed; std.debug.assert(divided.len >= 3); From 963c018a6d4e760377288adf65943250ee82779b Mon Sep 17 00:00:00 2001 From: bnuuydev Date: Sun, 19 Oct 2025 19:22:29 +0100 Subject: [PATCH 2/2] fix build interface we set the cwd of dimmer to the script directory such that the run step passes the correct relative paths. this caused weirdness in zig's dependency file handling - zig expects relative paths to be relative to the project root instead of the cwd of the run step. changing paths to be absolute is a suitable workaround. --- src/BuildInterface.zig | 2 ++ src/Parser.zig | 10 ++++------ src/dim.zig | 14 +++++++++++--- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/src/BuildInterface.zig b/src/BuildInterface.zig index 10d559d..421f2d3 100644 --- a/src/BuildInterface.zig +++ b/src/BuildInterface.zig @@ -32,6 +32,8 @@ pub fn createDisk(dimmer: Interface, size: u64, content: Content) std.Build.Lazy const compile_script = b.addRunArtifact(dimmer.dimmer_exe); + compile_script.setCwd(script_file.dirname()); + _ = compile_script.addPrefixedDepFileOutputArg("--deps-file=", "image.d"); compile_script.addArg(b.fmt("--size={d}", .{size})); diff --git a/src/Parser.zig b/src/Parser.zig index 64609b7..98720e8 100644 --- a/src/Parser.zig +++ b/src/Parser.zig @@ -103,12 +103,10 @@ pub fn get_include_path(parser: Parser, allocator: std.mem.Allocator, rel_includ if (parser.file_stack.len == parser.max_include_depth) return error.MaxIncludeDepthReached; - // const top_path = if (parser.file_stack.len > 0) - // parser.file_stack[parser.file_stack.len - 1].path - // else - // ""; - - const top_path = ""; // TODO what the fuck, the actual issue here needs to be triaged. this workaround fixes things for me for now though. + const top_path = if (parser.file_stack.len > 0) + parser.file_stack[parser.file_stack.len - 1].path + else + ""; const abs_include_path = try std.fs.path.resolvePosix( allocator, diff --git a/src/dim.zig b/src/dim.zig index fe3ae8a..6a36878 100644 --- a/src/dim.zig +++ b/src/dim.zig @@ -116,11 +116,19 @@ pub fn main() !u8 { // TODO would it be better to store the writer and just reuse it? that way we can utilise // buffering and not have the risk of someone positional writes and breaking things. + // TODO Zig has a bug in dependency file handling: relative paths are taken not to the run + // step cwd, but to the project root. hence, we need to absolute paths until this is fixed. + var buf_output: [std.fs.max_path_bytes]u8 = undefined; + const output_path_abs = try std.fs.cwd().realpath(std.fs.path.dirname(output_path) orelse ".", &buf_output); + var buf_script: [std.fs.max_path_bytes]u8 = undefined; + const script_path_abs = try std.fs.cwd().realpath(script_path, &buf_script); + try writer.interface.print( - \\{s}: {s} + \\{s}/{s}: {s} , .{ - output_path, - script_path, + output_path_abs, + std.fs.path.basename(output_path), + script_path_abs, }); } defer if (global_deps_file) |deps_file|