diff --git a/src/build_runner/build_runner.zig b/src/build_runner/build_runner.zig index edb3c328c..7338153b2 100644 --- a/src/build_runner/build_runner.zig +++ b/src/build_runner/build_runner.zig @@ -1,7 +1,7 @@ //! PLEASE READ THE FOLLOWING MESSAGE BEFORE EDITING THIS FILE: //! //! This build runner is targeting compatibility with the following Zig versions: -//! - 0.15.1 or later +//! - 0.16.0 or later //! //! Handling multiple Zig versions can be achieved with one of the following strategies: //! - use `@hasDecl` or `@hasField` (recommended) @@ -15,17 +15,25 @@ //! `zig build --build-runner /path/to/zls/src/build_runner/build_runner.zig` (if the cwd contains build.zig) //! -const root = @import("@build"); -const std = @import("std"); +const runner = @This(); const builtin = @import("builtin"); + +const std = @import("std"); +const Io = std.Io; const assert = std.debug.assert; +const fmt = std.fmt; const mem = std.mem; const process = std.process; -const ArrayListManaged = if (@hasDecl(std, "array_list")) std.array_list.Managed else std.ArrayList; -const ArrayList = if (@hasDecl(std, "array_list")) std.ArrayList else std.ArrayList; +const File = std.Io.File; const Step = std.Build.Step; +//const Watch = std.Build.Watch; +// Cut out for ZLS +//const WebServer = std.Build.WebServer; const Allocator = std.mem.Allocator; +const fatal = std.process.fatal; +const Writer = std.Io.Writer; +pub const root = @import("@build"); pub const dependencies = @import("@dependencies"); pub const std_options: std.Options = .{ @@ -33,28 +41,27 @@ pub const std_options: std.Options = .{ .http_disable_tls = true, }; -///! This is a modified build runner to extract information out of build.zig -///! Modified version of lib/build_runner.zig pub fn main(init: process.Init.Minimal) !void { - // Here we use an ArenaAllocator backed by a DirectAllocator because a build is a short-lived, - // one shot program. We don't need to waste time freeing memory and finding places to squish - // bytes into. So we free everything all at once at the very end. - var single_threaded_arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); + // The build runner is often short-lived, but thanks to `--watch` and `--webui`, that's not + // always the case. So, we do need a true gpa for some things. + var debug_gpa_state: std.heap.DebugAllocator(.{}) = .init; + defer _ = debug_gpa_state.deinit(); + const gpa = debug_gpa_state.allocator(); + + // ...but we'll back our arena by `std.heap.page_allocator` for efficiency. + var single_threaded_arena: std.heap.ArenaAllocator = .init(std.heap.page_allocator); defer single_threaded_arena.deinit(); - - var thread_safe_arena: std.heap.ThreadSafeAllocator = .{ - .child_allocator = single_threaded_arena.allocator(), - }; + var thread_safe_arena: std.heap.ThreadSafeAllocator = .{ .child_allocator = single_threaded_arena.allocator() }; const arena = thread_safe_arena.allocator(); const args = try init.args.toSlice(arena); - var threaded: std.Io.Threaded = .init(arena, .{ + var threaded: std.Io.Threaded = .init(gpa, .{ .environ = init.environ, .argv0 = .init(init.args), }); defer threaded.deinit(); - const io = threaded.ioBasic(); + const io = threaded.io(); // skip my own exe name var arg_idx: usize = 1; @@ -65,7 +72,7 @@ pub fn main(init: process.Init.Minimal) !void { const cache_root = nextArg(args, &arg_idx) orelse fatal("missing cache root directory path", .{}); const global_cache_root = nextArg(args, &arg_idx) orelse fatal("missing global cache root directory path", .{}); - const cwd: std.Io.Dir = .cwd(); + const cwd: Io.Dir = .cwd(); const zig_lib_directory: std.Build.Cache.Directory = .{ .path = zig_lib_dir, @@ -92,7 +99,7 @@ pub fn main(init: process.Init.Minimal) !void { .arena = arena, .cache = .{ .io = io, - .gpa = arena, + .gpa = gpa, .manifest_dir = try local_cache_directory.handle.createDirPathOpen(io, "h", .{}), .cwd = try process.getCwdAlloc(single_threaded_arena.allocator()), }, @@ -120,27 +127,50 @@ pub fn main(init: process.Init.Minimal) !void { dependencies.root_deps, ); - var targets = ArrayListManaged([]const u8).init(arena); - var debug_log_scopes = ArrayListManaged([]const u8).init(arena); + var targets = std.array_list.Managed([]const u8).init(arena); + var debug_log_scopes = std.array_list.Managed([]const u8).init(arena); var install_prefix: ?[]const u8 = null; - var dir_list: std.Build.DirList = .{}; + var dir_list = std.Build.DirList{}; + var error_style: ErrorStyle = .verbose; + var multiline_errors: MultilineErrors = .indent; + var summary: ?Summary = null; var max_rss: u64 = 0; var skip_oom_steps = false; - var seed: u32 = 0; + var test_timeout_ns: ?u64 = null; + var color: Color = .auto; + // Cut out for ZLS + //var help_menu = false; + //var steps_menu = false; var output_tmp_nonce: ?[16]u8 = null; - var debounce_interval_ms: u16 = 50; var watch = false; - var check_step_only = false; + var check_step_only = false; // Only for ZLS + // Cut out for ZLS + //var fuzz: ?std.Build.Fuzz.Mode = null; + var debounce_interval_ms: u16 = 50; + // Cut out for ZLS + //var webui_listen: ?Io.net.IpAddress = null; + + if (std.zig.EnvVar.ZIG_BUILD_ERROR_STYLE.get(&graph.environ_map)) |str| { + if (std.meta.stringToEnum(ErrorStyle, str)) |style| { + error_style = style; + } + } + + if (std.zig.EnvVar.ZIG_BUILD_MULTILINE_ERRORS.get(&graph.environ_map)) |str| { + if (std.meta.stringToEnum(MultilineErrors, str)) |style| { + multiline_errors = style; + } + } while (nextArg(args, &arg_idx)) |arg| { if (mem.startsWith(u8, arg, "-Z")) { - if (arg.len != 18) fatal("bad argument: '{s}'", .{arg}); + if (arg.len != 18) fatalWithHint("bad argument: '{s}'", .{arg}); output_tmp_nonce = arg[2..18].*; } else if (mem.startsWith(u8, arg, "-D")) { const option_contents = arg[2..]; if (option_contents.len == 0) - fatal("expected option name after '-D'", .{}); + fatalWithHint("expected option name after '-D'", .{}); if (mem.indexOfScalar(u8, option_contents, '=')) |name_end| { const option_name = option_contents[0..name_end]; const option_value = option_contents[name_end + 1 ..]; @@ -153,12 +183,12 @@ pub fn main(init: process.Init.Minimal) !void { } else if (mem.startsWith(u8, arg, "-")) { if (mem.eql(u8, arg, "--verbose")) { builder.verbose = true; - } else if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { - fatal("argument '{s}' is not available", .{arg}); + //} else if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) { + // help_menu = true; } else if (mem.eql(u8, arg, "-p") or mem.eql(u8, arg, "--prefix")) { install_prefix = nextArgOrFatal(args, &arg_idx); - } else if (mem.eql(u8, arg, "-l") or mem.eql(u8, arg, "--list-steps")) { - fatal("argument '{s}' is not available", .{arg}); + //} else if (mem.eql(u8, arg, "-l") or mem.eql(u8, arg, "--list-steps")) { + // steps_menu = true; } else if (mem.startsWith(u8, arg, "-fsys=")) { const name = arg["-fsys=".len..]; graph.system_library_options.put(arena, name, .user_enabled) catch @panic("OOM"); @@ -170,7 +200,7 @@ pub fn main(init: process.Init.Minimal) !void { } else if (mem.startsWith(u8, arg, "--release=")) { const text = arg["--release=".len..]; builder.release_mode = std.meta.stringToEnum(std.Build.ReleaseMode, text) orelse { - fatal("expected [off|any|fast|safe|small] in '{s}', found '{s}'", .{ + fatalWithHint("expected [off|any|fast|safe|small] in '{s}', found '{s}'", .{ arg, text, }); }; @@ -192,6 +222,41 @@ pub fn main(init: process.Init.Minimal) !void { }; } else if (mem.eql(u8, arg, "--skip-oom-steps")) { skip_oom_steps = true; + } else if (mem.eql(u8, arg, "--test-timeout")) { + const units: []const struct { []const u8, u64 } = &.{ + .{ "ns", 1 }, + .{ "nanosecond", 1 }, + .{ "us", std.time.ns_per_us }, + .{ "microsecond", std.time.ns_per_us }, + .{ "ms", std.time.ns_per_ms }, + .{ "millisecond", std.time.ns_per_ms }, + .{ "s", std.time.ns_per_s }, + .{ "second", std.time.ns_per_s }, + .{ "m", std.time.ns_per_min }, + .{ "minute", std.time.ns_per_min }, + .{ "h", std.time.ns_per_hour }, + .{ "hour", std.time.ns_per_hour }, + }; + const timeout_str = nextArgOrFatal(args, &arg_idx); + const num_end_idx = std.mem.findLastNone(u8, timeout_str, "abcdefghijklmnopqrstuvwxyz") orelse fatal( + "invalid timeout '{s}': expected unit (ns, us, ms, s, m, h)", + .{timeout_str}, + ); + const num_str = timeout_str[0 .. num_end_idx + 1]; + const unit_str = timeout_str[num_end_idx + 1 ..]; + const unit_factor: f64 = for (units) |unit_and_factor| { + if (std.mem.eql(u8, unit_str, unit_and_factor[0])) { + break @floatFromInt(unit_and_factor[1]); + } + } else fatal( + "invalid timeout '{s}': invalid unit '{s}' (expected ns, us, ms, s, m, h)", + .{ timeout_str, unit_str }, + ); + const num_parsed = std.fmt.parseFloat(f64, num_str) catch |err| fatal( + "invalid timeout '{s}': invalid number '{s}' ({t})", + .{ timeout_str, num_str, err }, + ); + test_timeout_ns = std.math.lossyCast(u64, unit_factor * num_parsed); } else if (mem.eql(u8, arg, "--search-prefix")) { const search_prefix = nextArgOrFatal(args, &arg_idx); builder.addSearchPrefix(search_prefix); @@ -199,46 +264,85 @@ pub fn main(init: process.Init.Minimal) !void { builder.libc_file = nextArgOrFatal(args, &arg_idx); } else if (mem.eql(u8, arg, "--color")) { const next_arg = nextArg(args, &arg_idx) orelse - fatal("expected [auto|on|off] after '{s}'", .{arg}); - _ = next_arg; + fatalWithHint("expected [auto|on|off] after '{s}'", .{arg}); + color = std.meta.stringToEnum(Color, next_arg) orelse { + fatalWithHint("expected [auto|on|off] after '{s}', found '{s}'", .{ + arg, next_arg, + }); + }; + } else if (mem.eql(u8, arg, "--error-style")) { + const next_arg = nextArg(args, &arg_idx) orelse + fatalWithHint("expected style after '{s}'", .{arg}); + error_style = std.meta.stringToEnum(ErrorStyle, next_arg) orelse { + fatalWithHint("expected style after '{s}', found '{s}'", .{ arg, next_arg }); + }; + } else if (mem.eql(u8, arg, "--multiline-errors")) { + const next_arg = nextArg(args, &arg_idx) orelse + fatalWithHint("expected style after '{s}'", .{arg}); + multiline_errors = std.meta.stringToEnum(MultilineErrors, next_arg) orelse { + fatalWithHint("expected style after '{s}', found '{s}'", .{ arg, next_arg }); + }; } else if (mem.eql(u8, arg, "--summary")) { const next_arg = nextArg(args, &arg_idx) orelse - fatal("expected [all|new|failures|none] after '{s}'", .{arg}); - _ = next_arg; + fatalWithHint("expected [all|new|failures|line|none] after '{s}'", .{arg}); + summary = std.meta.stringToEnum(Summary, next_arg) orelse { + fatalWithHint("expected [all|new|failures|line|none] after '{s}', found '{s}'", .{ + arg, next_arg, + }); + }; } else if (mem.eql(u8, arg, "--seed")) { const next_arg = nextArg(args, &arg_idx) orelse - fatal("expected u32 after '{s}'", .{arg}); - seed = std.fmt.parseUnsigned(u32, next_arg, 0) catch |err| { + fatalWithHint("expected u32 after '{s}'", .{arg}); + graph.random_seed = std.fmt.parseUnsigned(u32, next_arg, 0) catch |err| { fatal("unable to parse seed '{s}' as unsigned 32-bit integer: {s}\n", .{ next_arg, @errorName(err), }); }; + } else if (mem.eql(u8, arg, "--build-id")) { + builder.build_id = .fast; + } else if (mem.startsWith(u8, arg, "--build-id=")) { + const style = arg["--build-id=".len..]; + builder.build_id = std.zig.BuildId.parse(style) catch |err| { + fatal("unable to parse --build-id style '{s}': {s}", .{ + style, @errorName(err), + }); + }; } else if (mem.eql(u8, arg, "--debounce")) { const next_arg = nextArg(args, &arg_idx) orelse - fatal("expected u16 after '{s}'", .{arg}); + fatalWithHint("expected u16 after '{s}'", .{arg}); debounce_interval_ms = std.fmt.parseUnsigned(u16, next_arg, 0) catch |err| { - fatal("unable to parse debounce interval '{s}' as unsigned 16-bit integer: {s}\n", .{ - next_arg, @errorName(err), + fatal("unable to parse debounce interval '{s}' as unsigned 16-bit integer: {t}\n", .{ + next_arg, err, }); }; + // Cut out for ZLS + //} else if (mem.eql(u8, arg, "--webui")) { + // if (webui_listen == null) webui_listen = .{ .ip6 = .loopback(0) }; + //} else if (mem.startsWith(u8, arg, "--webui=")) { + // const addr_str = arg["--webui=".len..]; + // if (std.mem.eql(u8, addr_str, "-")) fatal("web interface cannot listen on stdio", .{}); + // webui_listen = Io.net.IpAddress.parseLiteral(addr_str) catch |err| { + // fatal("invalid web UI address '{s}': {s}", .{ addr_str, @errorName(err) }); + // }; } else if (mem.eql(u8, arg, "--debug-log")) { const next_arg = nextArgOrFatal(args, &arg_idx); try debug_log_scopes.append(next_arg); } else if (mem.eql(u8, arg, "--debug-pkg-config")) { builder.debug_pkg_config = true; + } else if (mem.eql(u8, arg, "--debug-rt")) { + graph.debug_compiler_runtime_libs = true; } else if (mem.eql(u8, arg, "--debug-compile-errors")) { builder.debug_compile_errors = true; + } else if (mem.eql(u8, arg, "--debug-incremental")) { + builder.debug_incremental = true; } else if (mem.eql(u8, arg, "--system")) { // The usage text shows another argument after this parameter // but it is handled by the parent process. The build runner // only sees this flag. graph.system_package_mode = true; } else if (mem.eql(u8, arg, "--libc-runtimes") or mem.eql(u8, arg, "--glibc-runtimes")) { - if (@hasField(std.Build, "glibc_runtimes_dir")) { - builder.glibc_runtimes_dir = nextArgOrFatal(args, &arg_idx); - } else { - builder.libc_runtimes_dir = nextArgOrFatal(args, &arg_idx); - } + // --glibc-runtimes was the old name of the flag; kept for compatibility for now. + builder.libc_runtimes_dir = nextArgOrFatal(args, &arg_idx); } else if (mem.eql(u8, arg, "--verbose-link")) { builder.verbose_link = true; } else if (mem.eql(u8, arg, "--verbose-air")) { @@ -247,7 +351,7 @@ pub fn main(init: process.Init.Minimal) !void { builder.verbose_llvm_ir = "-"; } else if (mem.startsWith(u8, arg, "--verbose-llvm-ir=")) { builder.verbose_llvm_ir = arg["--verbose-llvm-ir=".len..]; - } else if (mem.eql(u8, arg, "--verbose-llvm-bc=")) { + } else if (mem.startsWith(u8, arg, "--verbose-llvm-bc=")) { builder.verbose_llvm_bc = arg["--verbose-llvm-bc=".len..]; } else if (mem.eql(u8, arg, "--verbose-cimport")) { builder.verbose_cimport = true; @@ -255,12 +359,51 @@ pub fn main(init: process.Init.Minimal) !void { builder.verbose_cc = true; } else if (mem.eql(u8, arg, "--verbose-llvm-cpu-features")) { builder.verbose_llvm_cpu_features = true; - } else if (mem.eql(u8, arg, "--prominent-compile-errors")) { - // prominent_compile_errors = true; } else if (mem.eql(u8, arg, "--watch")) { watch = true; } else if (mem.eql(u8, arg, "--check-only")) { // ZLS only check_step_only = true; + } else if (mem.eql(u8, arg, "--time-report")) { + graph.time_report = true; + // Cut out for ZLS + // if (webui_listen == null) webui_listen = .{ .ip6 = .loopback(0) }; + //} else if (mem.eql(u8, arg, "--fuzz")) { + // fuzz = .{ .forever = undefined }; + // if (webui_listen == null) webui_listen = .{ .ip6 = .loopback(0) }; + //} else if (mem.startsWith(u8, arg, "--fuzz=")) { + // const value = arg["--fuzz=".len..]; + // if (value.len == 0) fatal("missing argument to --fuzz", .{}); + + // const unit: u8 = value[value.len - 1]; + // const digits = switch (unit) { + // '0'...'9' => value, + // 'K', 'M', 'G' => value[0 .. value.len - 1], + // else => fatal( + // "invalid argument to --fuzz, expected a positive number optionally suffixed by one of: [KMG]", + // .{}, + // ), + // }; + + // const amount = std.fmt.parseInt(u64, digits, 10) catch { + // fatal( + // "invalid argument to --fuzz, expected a positive number optionally suffixed by one of: [KMG]", + // .{}, + // ); + // }; + + // const normalized_amount = std.math.mul(u64, amount, switch (unit) { + // else => unreachable, + // '0'...'9' => 1, + // 'K' => 1000, + // 'M' => 1_000_000, + // 'G' => 1_000_000_000, + // }) catch fatal("fuzzing limit amount overflows u64", .{}); + + // fuzz = .{ + // .limit = .{ + // .amount = normalized_amount, + // }, + // }; } else if (mem.eql(u8, arg, "-fincremental")) { graph.incremental = true; } else if (mem.eql(u8, arg, "-fno-incremental")) { @@ -285,6 +428,10 @@ pub fn main(init: process.Init.Minimal) !void { builder.enable_darling = true; } else if (mem.eql(u8, arg, "-fno-darling")) { builder.enable_darling = false; + } else if (mem.eql(u8, arg, "-fallow-so-scripts")) { + graph.allow_so_scripts = true; + } else if (mem.eql(u8, arg, "-fno-allow-so-scripts")) { + graph.allow_so_scripts = false; } else if (mem.eql(u8, arg, "-freference-trace")) { builder.reference_trace = 256; } else if (mem.startsWith(u8, arg, "-freference-trace=")) { @@ -295,32 +442,39 @@ pub fn main(init: process.Init.Minimal) !void { }; } else if (mem.eql(u8, arg, "-fno-reference-trace")) { builder.reference_trace = null; - } else if (mem.startsWith(u8, arg, "-j")) { - const num = arg["-j".len..]; - const n_jobs = std.fmt.parseUnsigned(u32, num, 10) catch |err| { - std.debug.print("unable to parse jobs count '{s}': {s}", .{ - num, @errorName(err), - }); - process.exit(1); - }; - if (n_jobs < 1) { - std.debug.print("number of jobs must be at least 1\n", .{}); - process.exit(1); - } - threaded.setAsyncLimit(.limited(n_jobs)); + } else if (mem.cutPrefix(u8, arg, "-j")) |text| { + const n = std.fmt.parseUnsigned(u32, text, 10) catch |err| + fatal("unable to parse jobs count '{s}': {t}", .{ text, err }); + if (n < 1) fatal("number of jobs must be at least 1", .{}); + threaded.setAsyncLimit(.limited(n)); } else if (mem.eql(u8, arg, "--")) { builder.args = argsRest(args, arg_idx); break; } else { - fatal("unrecognized argument: '{s}'", .{arg}); + fatalWithHint("unrecognized argument: '{s}'", .{arg}); } } else { try targets.append(arg); } } + const NO_COLOR = std.zig.EnvVar.NO_COLOR.isSet(&graph.environ_map); + const CLICOLOR_FORCE = std.zig.EnvVar.CLICOLOR_FORCE.isSet(&graph.environ_map); + + graph.stderr_mode = switch (color) { + .auto => try .detect(io, .stderr(), NO_COLOR, CLICOLOR_FORCE), + .on => .escape_codes, + .off => .no_color, + }; + + //if (webui_listen != null) { + // if (watch) fatal("using '--webui' and '--watch' together is not yet supported; consider omitting '--watch' in favour of the web UI \"Rebuild\" button", .{}); + // if (builtin.single_threaded) fatal("'--webui' is not yet supported on single-threaded hosts", .{}); + //} + const main_progress_node = std.Progress.start(io, .{ .disable_printing = true, + //.disable_printing = (color == .off), }); defer main_progress_node.end(); @@ -334,24 +488,22 @@ pub fn main(init: process.Init.Minimal) !void { } if (graph.needed_lazy_dependencies.entries.len != 0) { - var buffer: ArrayList(u8) = .{}; + var buffer: std.ArrayList(u8) = .empty; for (graph.needed_lazy_dependencies.keys()) |k| { try buffer.appendSlice(arena, k); try buffer.append(arena, '\n'); } const s = std.fs.path.sep_str; const tmp_sub_path = "tmp" ++ s ++ (output_tmp_nonce orelse fatal("missing -Z arg", .{})); - local_cache_directory.handle.writeFile(io, .{ .sub_path = tmp_sub_path, .data = buffer.items, .flags = .{ .exclusive = true }, }) catch |err| { - fatal("unable to write configuration results to '{f}{s}': {}", .{ - local_cache_directory, tmp_sub_path, err, + fatal("unable to write configuration results to '{f}{s}': {s}", .{ + local_cache_directory, tmp_sub_path, @errorName(err), }); }; - process.exit(3); // Indicate configure phase failed with meaningful stdout. } @@ -361,36 +513,61 @@ pub fn main(init: process.Init.Minimal) !void { validateSystemLibraryOptions(builder); + //if (help_menu) { + // var w = initStdoutWriter(io); + // printUsage(builder, w) catch return stdout_writer_allocation.err.?; + // w.flush() catch return stdout_writer_allocation.err.?; + // return; + //} + + //if (steps_menu) { + // var w = initStdoutWriter(io); + // printSteps(builder, w) catch return stdout_writer_allocation.err.?; + // w.flush() catch return stdout_writer_allocation.err.?; + // return; + //} + var run: Run = .{ - .max_rss = max_rss, + .gpa = gpa, + + .available_rss = max_rss, .max_rss_is_default = false, .max_rss_mutex = .init, .skip_oom_steps = skip_oom_steps, - .memory_blocked_steps = .init(arena), - - .claimed_rss = 0, + .unit_test_timeout_ns = test_timeout_ns, .watch = watch, .cycle = 0, + //.web_server = undefined, // set after `prepare` + .memory_blocked_steps = .empty, + .step_stack = .empty, + + .error_style = error_style, + .multiline_errors = multiline_errors, + //.summary = summary orelse if (watch or webui_listen != null) .line else .failures, + .summary = summary orelse if (watch) .line else .failures, }; + defer { + run.memory_blocked_steps.deinit(gpa); + run.step_stack.deinit(gpa); + } - if (run.max_rss == 0) { - run.max_rss = process.totalSystemMemory() catch std.math.maxInt(u64); + if (run.available_rss == 0) { + run.available_rss = process.totalSystemMemory() catch std.math.maxInt(u64); run.max_rss_is_default = true; } if (!watch) { try extractBuildInformation( - arena, + gpa, builder, arena, + targets.items, main_progress_node, &run, - seed, ); return; } - var w = try Watch.init(io, graph.cache.cwd); const message_thread = try std.Thread.spawn(.{}, struct { @@ -411,46 +588,102 @@ pub fn main(init: process.Init.Minimal) !void { }.do, .{&w}); message_thread.detach(); - const gpa = arena; - - var step_stack = try stepNamesToStepStack(gpa, builder, targets.items, check_step_only); - if (step_stack.count() == 0) { - // This means that `enable_build_on_save == null` and the project contains no "check" step. - return; - } - - prepare(gpa, builder, &step_stack, &run, seed) catch |err| switch (err) { - error.UncleanExit => process.exit(1), - else => return err, + prepare(arena, builder, targets.items, &run, graph.random_seed, check_step_only) catch |err| switch (err) { + error.DependencyLoopDetected => { + // Perhaps in the future there could be an Advanced Options flag + // such as --debug-build-runner-leaks which would make this code + // return instead of calling exit. + _ = io.lockStderr(&.{}, graph.stderr_mode) catch {}; + process.exit(1); + }, + else => |e| return e, }; - rebuild: while (true) : (run.cycle += 1) { - runSteps( - gpa, + //const now = Io.Clock.Timestamp.now(io, .awake) catch |err| fatal("failed to collect timestamp: {t}", .{err}); + + //run.web_server = if (webui_listen) |listen_address| ws: { + // if (builtin.single_threaded) unreachable; // `fatal` above + // break :ws .init(.{ + // .gpa = gpa, + // .graph = &graph, + // .all_steps = run.step_stack.keys(), + // .root_prog_node = main_progress_node, + // .watch = watch, + // .listen_address = listen_address, + // .base_timestamp = now, + // }); + //} else null; + + //if (run.web_server) |*ws| { + // ws.start() catch |err| fatal("failed to start web server: {t}", .{err}); + //} + + rebuild: while (true) : (if (run.error_style.clearOnUpdate()) { + const stderr = try io.lockStderr(&stdio_buffer_allocation, graph.stderr_mode); + defer io.unlockStderr(); + try stderr.file_writer.interface.writeAll("\x1B[2J\x1B[3J\x1B[H"); + }) { + run.cycle += 1; + //if (run.web_server) |*ws| ws.startBuild(); + + try runStepNames( builder, - &step_stack, + targets.items, main_progress_node, &run, - ) catch |err| switch (err) { - error.UncleanExit => process.exit(1), - else => return err, - }; + null, + ); - try w.update(gpa, step_stack.keys()); + //if (run.web_server) |*web_server| { + // if (fuzz) |mode| if (mode != .forever) fatal( + // "error: limited fuzzing is not implemented yet for --webui", + // .{}, + // ); + // + // web_server.finishBuild(.{ .fuzz = fuzz != null }); + //} + + //if (run.web_server) |*ws| { + // assert(!watch); // fatal error after CLI parsing + // while (true) switch (try ws.wait()) { + // .rebuild => { + // for (run.step_stack.keys()) |step| { + // step.state = .precheck_done; + // step.pending_deps = @intCast(step.dependencies.items.len); + // step.reset(gpa); + // } + // continue :rebuild; + // }, + // }; + //} + + // Comptime-known guard to prevent including the logic below when `!Watch.have_impl`. + //if (!Watch.have_impl) unreachable; + + try w.update(gpa, run.step_stack.keys()); // Wait until a file system notification arrives. Read all such events // until the buffer is empty. Then wait for a debounce interval, resetting // if any more events come in. After the debounce interval has passed, // trigger a rebuild on all steps with modified inputs, as well as their // recursive dependants. - var debounce_timeout: std.Io.Timeout = .none; - while (true) switch (try w.wait(gpa, debounce_timeout)) { + var caption_buf: [std.Progress.Node.max_name_len]u8 = undefined; + const caption = std.fmt.bufPrint(&caption_buf, "watching {d} directories, {d} processes", .{ + w.fs_watch.dir_count, countSubProcesses(run.step_stack.keys()), + }) catch &caption_buf; + var debouncing_node = main_progress_node.start(caption, 0); + var in_debounce = false; + while (true) switch (try w.wait(gpa, if (in_debounce) .{ .ms = debounce_interval_ms } else .none)) { .timeout => { - markFailedStepsDirty(gpa, step_stack.keys()); + assert(in_debounce); + debouncing_node.end(); + markFailedStepsDirty(gpa, run.step_stack.keys()); continue :rebuild; }, - .dirty => if (debounce_timeout == .none) { - debounce_timeout = .{ .duration = .{ .raw = .fromMilliseconds(debounce_interval_ms), .clock = .real } }; + .dirty => if (!in_debounce) { + in_debounce = true; + debouncing_node.end(); + debouncing_node = main_progress_node.start("Debouncing (Change Detected)", 0); }, .clean => {}, }; @@ -459,7 +692,7 @@ pub fn main(init: process.Init.Minimal) !void { fn markFailedStepsDirty(gpa: Allocator, all_steps: []const *Step) void { for (all_steps) |step| switch (step.state) { - .dependency_failure, .failure, .skipped => step.recursiveReset(gpa), + .dependency_failure, .failure, .skipped => _ = step.invalidateResult(gpa), else => continue, }; // Now that all dirty steps have been found, the remaining steps that @@ -470,6 +703,14 @@ fn markFailedStepsDirty(gpa: Allocator, all_steps: []const *Step) void { }; } +fn countSubProcesses(all_steps: []const *Step) usize { + var count: usize = 0; + for (all_steps) |s| { + count += @intFromBool(s.getZigProcess() != null); + } + return count; +} + /// A wrapper around `std.Build.Watch` that supports manually triggering recompilations. const Watch = struct { io: std.Io, @@ -502,15 +743,25 @@ const Watch = struct { w.manual_event.set(w.io); } - fn wait(w: *Watch, gpa: Allocator, timeout: std.Io.Timeout) !std.Build.Watch.WaitResult { + fn wait(w: *Watch, gpa: Allocator, timeout: std.Build.Watch.Timeout) !std.Build.Watch.WaitResult { if (@TypeOf(std.Build.Watch) != void and w.supports_fs_watch) { - return try w.fs_watch.wait(gpa, switch (timeout) { - .none => .none, - .duration => |d| .{ .ms = @intCast(d.raw.toMilliseconds()) }, - .deadline => unreachable, - }); + //return try w.fs_watch.wait(gpa, switch (timeout) { + // .none => .none, + // .duration => |d| .{ .ms = @intCast(d.raw.toMilliseconds()) }, + // .deadline => unreachable, + //}); + return try w.fs_watch.wait(gpa, timeout); } - waitTimeout(&w.manual_event, w.io, timeout) catch |err| switch (err) { + const io_timeout: std.Io.Timeout = switch (timeout) { + .none => .none, + .ms => |d| .{ + .duration = .{ + .clock = std.Io.Clock.real, + .raw = std.Io.Duration.fromMilliseconds(@intCast(d)), + }, + }, + }; + waitTimeout(&w.manual_event, w.io, io_timeout) catch |err| switch (err) { error.Canceled => unreachable, error.Timeout => return .timeout, }; @@ -537,32 +788,42 @@ const Watch = struct { fn markStepsDirty(gpa: Allocator, all_steps: []const *Step) void { for (all_steps) |step| switch (step.state) { .precheck_done => continue, - else => step.recursiveReset(gpa), + else => _ = step.invalidateResult(gpa), }; } }; const Run = struct { - max_rss: u64, + gpa: Allocator, + + available_rss: usize, max_rss_is_default: bool, - max_rss_mutex: std.Io.Mutex, + max_rss_mutex: Io.Mutex, skip_oom_steps: bool, - memory_blocked_steps: ArrayListManaged(*Step), - - claimed_rss: usize, - + unit_test_timeout_ns: ?u64, watch: bool, cycle: u32, + //web_server: if (!builtin.single_threaded) ?WebServer else ?noreturn, + /// Allocated into `gpa`. + memory_blocked_steps: std.ArrayList(*Step), + /// Allocated into `gpa`. + step_stack: std.AutoArrayHashMapUnmanaged(*Step, void), + + error_style: ErrorStyle, + multiline_errors: MultilineErrors, + summary: Summary, }; -fn stepNamesToStepStack( - gpa: Allocator, +fn prepare( + arena: Allocator, b: *std.Build, step_names: []const []const u8, - check_step_only: bool, -) !std.AutoArrayHashMapUnmanaged(*Step, void) { - var step_stack: std.AutoArrayHashMapUnmanaged(*Step, void) = .{}; - errdefer step_stack.deinit(gpa); + run: *Run, + seed: u32, + check_step_only: bool, // ZLS Hack +) !void { + const gpa = run.gpa; + const step_stack = &run.step_stack; if (step_names.len == 0) { if (b.top_level_steps.get("check")) |tls| { @@ -582,31 +843,14 @@ fn stepNamesToStepStack( } } - return step_stack; -} - -fn prepare( - gpa: Allocator, - b: *std.Build, - step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void), - run: *Run, - seed: u32, -) error{ OutOfMemory, UncleanExit }!void { - const starting_steps = try gpa.dupe(*Step, step_stack.keys()); - defer gpa.free(starting_steps); + const starting_steps = try arena.dupe(*Step, step_stack.keys()); var rng = std.Random.DefaultPrng.init(seed); const rand = rng.random(); rand.shuffle(*Step, starting_steps); for (starting_steps) |s| { - constructGraphAndCheckForDependencyLoop(b, s, step_stack, rand) catch |err| switch (err) { - error.DependencyLoopDetected => { - _ = b.graph.io.lockStderr(&.{}, b.graph.stderr_mode) catch {}; - process.exit(1); - }, - else => |e| return e, - }; + try constructGraphAndCheckForDependencyLoop(gpa, b, s, &run.step_stack, rand); } { @@ -614,12 +858,15 @@ fn prepare( var any_problems = false; for (step_stack.keys()) |s| { if (s.max_rss == 0) continue; - if (s.max_rss > run.max_rss) { + if (s.max_rss > run.available_rss) { if (run.skip_oom_steps) { s.state = .skipped_oom; + for (s.dependants.items) |dependant| { + dependant.pending_deps -= 1; + } } else { std.debug.print("{s}{s}: this step declares an upper bound of {d} bytes of memory, exceeding the available {d} bytes of memory\n", .{ - s.owner.dep_prefix, s.name, s.max_rss, run.max_rss, + s.owner.dep_prefix, s.name, s.max_rss, run.available_rss, }); any_problems = true; } @@ -633,43 +880,517 @@ fn prepare( } } -fn runSteps( - gpa: std.mem.Allocator, +fn runStepNames( b: *std.Build, - steps_stack: *const std.AutoArrayHashMapUnmanaged(*Step, void), + step_names: []const []const u8, parent_prog_node: std.Progress.Node, run: *Run, -) error{ OutOfMemory, UncleanExit, Canceled }!void { - const io = b.graph.io; - const steps = steps_stack.keys(); + fuzz: ?std.Build.Fuzz.Mode, +) !void { + const gpa = run.gpa; + const graph = b.graph; + const io = graph.io; + const step_stack = &run.step_stack; + _ = fuzz; // ZLS hack - var step_prog = parent_prog_node.start("steps", steps.len); - defer step_prog.end(); + { + // Collect the initial set of tasks (those with no outstanding dependencies) into a buffer, + // then spawn them. The buffer is so that we don't race with `makeStep` and end up thinking + // a step is initial when it actually became ready due to an earlier initial step. + var initial_set: std.ArrayList(*Step) = .empty; + defer initial_set.deinit(gpa); + try initial_set.ensureUnusedCapacity(gpa, step_stack.count()); + for (step_stack.keys()) |s| { + if (s.state == .precheck_done and s.pending_deps == 0) { + initial_set.appendAssumeCapacity(s); + } + } - var group: std.Io.Group = .init; - defer group.cancel(io); + const step_prog = parent_prog_node.start("steps", step_stack.count()); + defer step_prog.end(); + + var group: Io.Group = .init; + defer group.cancel(io); + // Start working on all of the initial steps... + for (initial_set.items) |s| try stepReady(&group, b, s, step_prog, run); + // ...and `makeStep` will trigger every other step when their last dependency finishes. + try group.await(io); + + if (run.watch) { + for (initial_set.items) |step| { + const step_id: u32 = @intCast(run.step_stack.getIndex(step).?); + // missing fields: + // - result_error_msgs + // - result_stderr + serveWatchErrorBundle(b.graph.io, step_id, run.cycle, step.result_error_bundle) catch @panic("failed to send watch errors"); + } + } + } - // Here we spawn the initial set of tasks with a nice heuristic - - // dependency order. Each worker when it finishes a step will then - // check whether it should run any dependants. - for (steps) |step| { - if (step.state == .skipped_oom) continue; + assert(run.memory_blocked_steps.items.len == 0); + + var test_pass_count: usize = 0; + var test_skip_count: usize = 0; + var test_fail_count: usize = 0; + var test_crash_count: usize = 0; + var test_timeout_count: usize = 0; + + var test_count: usize = 0; + + var success_count: usize = 0; + var skipped_count: usize = 0; + var failure_count: usize = 0; + var pending_count: usize = 0; + var total_compile_errors: usize = 0; + + var cleanup_task = io.async(cleanTmpFiles, .{ io, step_stack.keys() }); + defer cleanup_task.await(io); + + for (step_stack.keys()) |s| { + test_pass_count += s.test_results.passCount(); + test_skip_count += s.test_results.skip_count; + test_fail_count += s.test_results.fail_count; + test_crash_count += s.test_results.crash_count; + test_timeout_count += s.test_results.timeout_count; + + test_count += s.test_results.test_count; + + switch (s.state) { + .precheck_unstarted => unreachable, + .precheck_started => unreachable, + .precheck_done => unreachable, + .dependency_failure => pending_count += 1, + .success => success_count += 1, + .skipped, .skipped_oom => skipped_count += 1, + .failure => { + failure_count += 1; + const compile_errors_len = s.result_error_bundle.errorMessageCount(); + if (compile_errors_len > 0) { + total_compile_errors += compile_errors_len; + } + }, + } + } + + //if (fuzz) |mode| blk: { + // switch (builtin.os.tag) { + // // Current implementation depends on two things that need to be ported to Windows: + // // * Memory-mapping to share data between the fuzzer and build runner. + // // * COFF/PE support added to `std.debug.Info` (it needs a batching API for resolving + // // many addresses to source locations). + // .windows => fatal("--fuzz not yet implemented for {t}", .{builtin.os.tag}), + // else => {}, + // } + // if (@bitSizeOf(usize) != 64) { + // // Current implementation depends on posix.mmap()'s second parameter, `length: usize`, + // // being compatible with file system's u64 return value. This is not the case + // // on 32-bit platforms. + // // Affects or affected by issues #5185, #22523, and #22464. + // fatal("--fuzz not yet implemented on {d}-bit platforms", .{@bitSizeOf(usize)}); + // } + + // switch (mode) { + // .forever => break :blk, + // .limit => {}, + // } + + // assert(mode == .limit); + // var f = std.Build.Fuzz.init( + // gpa, + // io, + // step_stack.keys(), + // parent_prog_node, + // mode, + // ) catch |err| fatal("failed to start fuzzer: {t}", .{err}); + // defer f.deinit(); + + // f.start(); + // try f.waitAndPrintReport(); + //} + + // Every test has a state + assert(test_pass_count + test_skip_count + test_fail_count + test_crash_count + test_timeout_count == test_count); + + if (failure_count == 0) { + std.Progress.setStatus(.success); + } else { + std.Progress.setStatus(.failure); + } + + summary: { + switch (run.summary) { + .all, .new, .line => {}, + .failures => if (failure_count == 0) break :summary, + .none => break :summary, + } + + const stderr = try io.lockStderr(&stdio_buffer_allocation, graph.stderr_mode); + defer io.unlockStderr(); + const t = stderr.terminal(); + const w = &stderr.file_writer.interface; + + const total_count = success_count + failure_count + pending_count + skipped_count; + t.setColor(.cyan) catch {}; + t.setColor(.bold) catch {}; + w.writeAll("Build Summary: ") catch {}; + t.setColor(.reset) catch {}; + w.print("{d}/{d} steps succeeded", .{ success_count, total_count }) catch {}; + { + t.setColor(.dim) catch {}; + var first = true; + if (skipped_count > 0) { + w.print("{s}{d} skipped", .{ if (first) " (" else ", ", skipped_count }) catch {}; + first = false; + } + if (failure_count > 0) { + w.print("{s}{d} failed", .{ if (first) " (" else ", ", failure_count }) catch {}; + first = false; + } + if (!first) w.writeByte(')') catch {}; + t.setColor(.reset) catch {}; + } + + if (test_count > 0) { + w.print("; {d}/{d} tests passed", .{ test_pass_count, test_count }) catch {}; + t.setColor(.dim) catch {}; + var first = true; + if (test_skip_count > 0) { + w.print("{s}{d} skipped", .{ if (first) " (" else ", ", test_skip_count }) catch {}; + first = false; + } + if (test_fail_count > 0) { + w.print("{s}{d} failed", .{ if (first) " (" else ", ", test_fail_count }) catch {}; + first = false; + } + if (test_crash_count > 0) { + w.print("{s}{d} crashed", .{ if (first) " (" else ", ", test_crash_count }) catch {}; + first = false; + } + if (test_timeout_count > 0) { + w.print("{s}{d} timed out", .{ if (first) " (" else ", ", test_timeout_count }) catch {}; + first = false; + } + if (!first) w.writeByte(')') catch {}; + t.setColor(.reset) catch {}; + } + + w.writeAll("\n") catch {}; + + if (run.summary == .line) break :summary; + + // Print a fancy tree with build results. + var step_stack_copy = try step_stack.clone(gpa); + defer step_stack_copy.deinit(gpa); + + var print_node: PrintNode = .{ .parent = null }; + if (step_names.len == 0) { + print_node.last = true; + printTreeStep(b, b.default_step, run, t, &print_node, &step_stack_copy) catch {}; + } else { + const last_index = if (run.summary == .all) b.top_level_steps.count() else blk: { + var i: usize = step_names.len; + while (i > 0) { + i -= 1; + const step = b.top_level_steps.get(step_names[i]).?.step; + const found = switch (run.summary) { + .all, .line, .none => unreachable, + .failures => step.state != .success, + .new => !step.result_cached, + }; + if (found) break :blk i; + } + break :blk b.top_level_steps.count(); + }; + for (step_names, 0..) |step_name, i| { + const tls = b.top_level_steps.get(step_name).?; + print_node.last = i + 1 == last_index; + printTreeStep(b, &tls.step, run, t, &print_node, &step_stack_copy) catch {}; + } + } + w.writeByte('\n') catch {}; + } + + // + //if (run.watch or run.web_server != null) return; + if (run.watch) return; + + // Perhaps in the future there could be an Advanced Options flag such as + // --debug-build-runner-leaks which would make this code return instead of + // calling exit. + + const code: u8 = code: { + if (failure_count == 0) break :code 0; // success + if (run.error_style.verboseContext()) break :code 1; // failure; print build command + break :code 2; // failure; do not print build command + }; + _ = io.lockStderr(&.{}, graph.stderr_mode) catch {}; + process.exit(code); +} + +const PrintNode = struct { + parent: ?*PrintNode, + last: bool = false, +}; + +fn printPrefix(node: *PrintNode, stderr: Io.Terminal) !void { + const parent = node.parent orelse return; + const writer = stderr.writer; + if (parent.parent == null) return; + try printPrefix(parent, stderr); + if (parent.last) { + try writer.writeAll(" "); + } else { + try writer.writeAll(switch (stderr.mode) { + .escape_codes => "\x1B\x28\x30\x78\x1B\x28\x42 ", // │ + else => "| ", + }); + } +} + +fn printChildNodePrefix(stderr: Io.Terminal) !void { + try stderr.writer.writeAll(switch (stderr.mode) { + .escape_codes => "\x1B\x28\x30\x6d\x71\x1B\x28\x42 ", // └─ + else => "+- ", + }); +} - group.async(io, workerMakeOneStep, .{ - &group, gpa, b, steps_stack, step, step_prog, run, +fn printStepStatus(s: *Step, stderr: Io.Terminal, run: *const Run) !void { + const writer = stderr.writer; + switch (s.state) { + .precheck_unstarted => unreachable, + .precheck_started => unreachable, + .precheck_done => unreachable, + + .dependency_failure => { + try stderr.setColor(.dim); + try writer.writeAll(" transitive failure\n"); + try stderr.setColor(.reset); + }, + + .success => { + try stderr.setColor(.green); + if (s.result_cached) { + try writer.writeAll(" cached"); + } else if (s.test_results.test_count > 0) { + const pass_count = s.test_results.passCount(); + assert(s.test_results.test_count == pass_count + s.test_results.skip_count); + try writer.print(" {d} pass", .{pass_count}); + if (s.test_results.skip_count > 0) { + try stderr.setColor(.reset); + try writer.writeAll(", "); + try stderr.setColor(.yellow); + try writer.print("{d} skip", .{s.test_results.skip_count}); + } + try stderr.setColor(.reset); + try writer.print(" ({d} total)", .{s.test_results.test_count}); + } else { + try writer.writeAll(" success"); + } + try stderr.setColor(.reset); + if (s.result_duration_ns) |ns| { + try stderr.setColor(.dim); + if (ns >= std.time.ns_per_min) { + try writer.print(" {d}m", .{ns / std.time.ns_per_min}); + } else if (ns >= std.time.ns_per_s) { + try writer.print(" {d}s", .{ns / std.time.ns_per_s}); + } else if (ns >= std.time.ns_per_ms) { + try writer.print(" {d}ms", .{ns / std.time.ns_per_ms}); + } else if (ns >= std.time.ns_per_us) { + try writer.print(" {d}us", .{ns / std.time.ns_per_us}); + } else { + try writer.print(" {d}ns", .{ns}); + } + try stderr.setColor(.reset); + } + if (s.result_peak_rss != 0) { + const rss = s.result_peak_rss; + try stderr.setColor(.dim); + if (rss >= 1000_000_000) { + try writer.print(" MaxRSS:{d}G", .{rss / 1000_000_000}); + } else if (rss >= 1000_000) { + try writer.print(" MaxRSS:{d}M", .{rss / 1000_000}); + } else if (rss >= 1000) { + try writer.print(" MaxRSS:{d}K", .{rss / 1000}); + } else { + try writer.print(" MaxRSS:{d}B", .{rss}); + } + try stderr.setColor(.reset); + } + try writer.writeAll("\n"); + }, + .skipped => { + try stderr.setColor(.yellow); + try writer.writeAll(" skipped\n"); + try stderr.setColor(.reset); + }, + .skipped_oom => { + try stderr.setColor(.yellow); + try writer.writeAll(" skipped (not enough memory)"); + try stderr.setColor(.dim); + try writer.print(" upper bound of {d} exceeded runner limit ({d})\n", .{ s.max_rss, run.available_rss }); + try stderr.setColor(.reset); + }, + .failure => { + try printStepFailure(s, stderr, false); + try stderr.setColor(.reset); + }, + } +} + +fn printStepFailure(s: *Step, stderr: Io.Terminal, dim: bool) !void { + const w = stderr.writer; + if (s.result_error_bundle.errorMessageCount() > 0) { + try stderr.setColor(.red); + try w.print(" {d} errors\n", .{ + s.result_error_bundle.errorMessageCount(), }); + } else if (!s.test_results.isSuccess()) { + // These first values include all of the test "statuses". Every test is either passsed, + // skipped, failed, crashed, or timed out. + try stderr.setColor(.green); + try w.print(" {d} pass", .{s.test_results.passCount()}); + try stderr.setColor(.reset); + if (dim) try stderr.setColor(.dim); + if (s.test_results.skip_count > 0) { + try w.writeAll(", "); + try stderr.setColor(.yellow); + try w.print("{d} skip", .{s.test_results.skip_count}); + try stderr.setColor(.reset); + if (dim) try stderr.setColor(.dim); + } + if (s.test_results.fail_count > 0) { + try w.writeAll(", "); + try stderr.setColor(.red); + try w.print("{d} fail", .{s.test_results.fail_count}); + try stderr.setColor(.reset); + if (dim) try stderr.setColor(.dim); + } + if (s.test_results.crash_count > 0) { + try w.writeAll(", "); + try stderr.setColor(.red); + try w.print("{d} crash", .{s.test_results.crash_count}); + try stderr.setColor(.reset); + if (dim) try stderr.setColor(.dim); + } + if (s.test_results.timeout_count > 0) { + try w.writeAll(", "); + try stderr.setColor(.red); + try w.print("{d} timeout", .{s.test_results.timeout_count}); + try stderr.setColor(.reset); + if (dim) try stderr.setColor(.dim); + } + try w.print(" ({d} total)", .{s.test_results.test_count}); + + // Memory leaks are intentionally written after the total, because is isn't a test *status*, + // but just a flag that any tests -- even passed ones -- can have. We also use a different + // separator, so it looks like: + // 2 pass, 1 skip, 2 fail (5 total); 2 leaks + if (s.test_results.leak_count > 0) { + try w.writeAll("; "); + try stderr.setColor(.red); + try w.print("{d} leaks", .{s.test_results.leak_count}); + try stderr.setColor(.reset); + if (dim) try stderr.setColor(.dim); + } + + // It's usually not helpful to know how many error logs there were because they tend to + // just come with other errors (e.g. crashes and leaks print stack traces, and clean + // failures print error traces). So only mention them if they're the only thing causing + // the failure. + const show_err_logs: bool = show: { + var alt_results = s.test_results; + alt_results.log_err_count = 0; + break :show alt_results.isSuccess(); + }; + if (show_err_logs) { + try w.writeAll("; "); + try stderr.setColor(.red); + try w.print("{d} error logs", .{s.test_results.log_err_count}); + try stderr.setColor(.reset); + if (dim) try stderr.setColor(.dim); + } + + try w.writeAll("\n"); + } else if (s.result_error_msgs.items.len > 0) { + try stderr.setColor(.red); + try w.writeAll(" failure\n"); + } else { + assert(s.result_stderr.len > 0); + try stderr.setColor(.red); + try w.writeAll(" w\n"); + } +} + +fn printTreeStep( + b: *std.Build, + s: *Step, + run: *const Run, + stderr: Io.Terminal, + parent_node: *PrintNode, + step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void), +) !void { + const writer = stderr.writer; + const first = step_stack.swapRemove(s); + const summary = run.summary; + const skip = switch (summary) { + .none, .line => unreachable, + .all => false, + .new => s.result_cached, + .failures => s.state == .success, + }; + if (skip) return; + try printPrefix(parent_node, stderr); + + if (parent_node.parent != null) { + if (parent_node.last) { + try printChildNodePrefix(stderr); + } else { + try writer.writeAll(switch (stderr.mode) { + .escape_codes => "\x1B\x28\x30\x74\x71\x1B\x28\x42 ", // ├─ + else => "+- ", + }); + } } - try group.await(io); + if (!first) try stderr.setColor(.dim); - if (run.watch) { - for (steps) |step| { - const step_id: u32 = @intCast(steps_stack.getIndex(step).?); - // missing fields: - // - result_error_msgs - // - result_stderr - serveWatchErrorBundle(b.graph.io, step_id, run.cycle, step.result_error_bundle) catch @panic("failed to send watch errors"); + // dep_prefix omitted here because it is redundant with the tree. + try writer.writeAll(s.name); + + if (first) { + try printStepStatus(s, stderr, run); + + const last_index = if (summary == .all) s.dependencies.items.len -| 1 else blk: { + var i: usize = s.dependencies.items.len; + while (i > 0) { + i -= 1; + + const step = s.dependencies.items[i]; + const found = switch (summary) { + .all, .line, .none => unreachable, + .failures => step.state != .success, + .new => !step.result_cached, + }; + if (found) break :blk i; + } + break :blk s.dependencies.items.len -| 1; + }; + for (s.dependencies.items, 0..) |dep, i| { + var print_node: PrintNode = .{ + .parent = parent_node, + .last = i == last_index, + }; + try printTreeStep(b, dep, run, stderr, &print_node, step_stack); } + } else { + if (s.dependencies.items.len == 0) { + try writer.writeAll(" (reused)\n"); + } else { + try writer.print(" (+{d} more reused dependencies)\n", .{ + s.dependencies.items.len, + }); + } + try stderr.setColor(.reset); } } @@ -680,176 +1401,455 @@ fn runSteps( /// Each step has its dependencies traversed in random order, this accomplishes /// two things: /// - `step_stack` will be in randomized-depth-first order, so the build runner -/// spawns steps in a random (but optimized) order +/// spawns initial steps in a random order /// - each step's `dependants` list is also filled in a random order, so that -/// when it finishes executing in `workerMakeOneStep`, it spawns next steps -/// to run in random order +/// when it finishes executing in `makeStep`, it spawns next steps to run in +/// random order fn constructGraphAndCheckForDependencyLoop( + gpa: Allocator, b: *std.Build, s: *Step, step_stack: *std.AutoArrayHashMapUnmanaged(*Step, void), rand: std.Random, -) error{ OutOfMemory, DependencyLoopDetected }!void { +) !void { switch (s.state) { - .precheck_started => return error.DependencyLoopDetected, + .precheck_started => { + std.debug.print("dependency loop detected:\n {s}\n", .{s.name}); + return error.DependencyLoopDetected; + }, .precheck_unstarted => { s.state = .precheck_started; - try step_stack.ensureUnusedCapacity(b.allocator, s.dependencies.items.len); + try step_stack.ensureUnusedCapacity(gpa, s.dependencies.items.len); // We dupe to avoid shuffling the steps in the summary, it depends // on s.dependencies' order. - const deps = b.allocator.dupe(*Step, s.dependencies.items) catch @panic("OOM"); + const deps = gpa.dupe(*Step, s.dependencies.items) catch @panic("OOM"); + defer gpa.free(deps); + rand.shuffle(*Step, deps); for (deps) |dep| { - try step_stack.put(b.allocator, dep, {}); + try step_stack.put(gpa, dep, {}); try dep.dependants.append(b.allocator, s); - try constructGraphAndCheckForDependencyLoop(b, dep, step_stack, rand); + constructGraphAndCheckForDependencyLoop(gpa, b, dep, step_stack, rand) catch |err| { + if (err == error.DependencyLoopDetected) { + std.debug.print(" {s}\n", .{s.name}); + } + return err; + }; } s.state = .precheck_done; + s.pending_deps = @intCast(s.dependencies.items.len); }, .precheck_done => {}, // These don't happen until we actually run the step graph. - .dependency_failure, - .running, - .success, - .failure, - .skipped, - .skipped_oom, - => {}, + .dependency_failure => unreachable, + .success => unreachable, + .failure => unreachable, + .skipped => unreachable, + .skipped_oom => unreachable, } } -fn workerMakeOneStep( - group: *std.Io.Group, - gpa: std.mem.Allocator, +/// Runs the "make" function of the single step `s`, updates its state, and then spawns newly-ready +/// dependant steps in `group`. If `s` makes an RSS claim (i.e. `s.max_rss != 0`), the caller must +/// have already subtracted this value from `run.available_rss`. This function will release the RSS +/// claim (i.e. add `s.max_rss` back into `run.available_rss`) and queue any viable memory-blocked +/// steps after "make" completes for `s`. +fn makeStep( + group: *Io.Group, b: *std.Build, - steps_stack: *const std.AutoArrayHashMapUnmanaged(*Step, void), s: *Step, - prog_node: std.Progress.Node, + root_prog_node: std.Progress.Node, run: *Run, -) void { - const io = b.graph.io; +) Io.Cancelable!void { + const graph = b.graph; + const io = graph.io; + const gpa = run.gpa; - // First, check the conditions for running this step. If they are not met, - // then we return without doing the step, relying on another worker to - // queue this step up again when dependencies are met. - for (s.dependencies.items) |dep| { - switch (@atomicLoad(Step.State, &dep.state, .seq_cst)) { - .success, .skipped => continue, - .failure, .dependency_failure, .skipped_oom => { - @atomicStore(Step.State, &s.state, .dependency_failure, .seq_cst); - return; - }, - .precheck_done, .running => { - // dependency is not finished yet. - return; - }, - .precheck_unstarted => unreachable, - .precheck_started => unreachable, - } - } + { + const step_prog_node = root_prog_node.start(s.name, 0); + defer step_prog_node.end(); - if (s.max_rss != 0) { - run.max_rss_mutex.lockUncancelable(io); - defer run.max_rss_mutex.unlock(io); + //if (run.web_server) |*ws| ws.updateStepStatus(s, .wip); - // Avoid running steps twice. - if (s.state != .precheck_done) { - // Another worker got the job. - return; - } + const new_state: Step.State = for (s.dependencies.items) |dep| { + switch (@atomicLoad(Step.State, &dep.state, .monotonic)) { + .precheck_unstarted => unreachable, + .precheck_started => unreachable, + .precheck_done => unreachable, - const new_claimed_rss = run.claimed_rss + s.max_rss; - if (new_claimed_rss > run.max_rss) { - // Running this step right now could possibly exceed the allotted RSS. - // Add this step to the queue of memory-blocked steps. - run.memory_blocked_steps.append(s) catch @panic("OOM"); - return; - } + .failure, + .dependency_failure, + .skipped_oom, + => break .dependency_failure, - run.claimed_rss = new_claimed_rss; - s.state = .running; - } else { - // Avoid running steps twice. - if (@cmpxchgStrong(Step.State, &s.state, .precheck_done, .running, .seq_cst, .seq_cst) != null) { - // Another worker got the job. - return; + .success, .skipped => {}, + } + } else if (s.make(.{ + .progress_node = step_prog_node, + .watch = run.watch, + //.web_server = if (run.web_server) |*ws| ws else null, + .web_server = null, + .unit_test_timeout_ns = run.unit_test_timeout_ns, + .gpa = gpa, + })) state: { + break :state .success; + } else |err| switch (err) { + error.MakeFailed => .failure, + error.MakeSkipped => .skipped, + }; + + @atomicStore(Step.State, &s.state, new_state, .monotonic); + + switch (new_state) { + .precheck_unstarted => unreachable, + .precheck_started => unreachable, + .precheck_done => unreachable, + + .failure, + .dependency_failure, + .skipped_oom, + => { + //if (run.web_server) |*ws| ws.updateStepStatus(s, .failure); + std.Progress.setStatus(.failure_working); + }, + + .success, + .skipped, + => { + //if (run.web_server) |*ws| ws.updateStepStatus(s, .success); + }, } } - var sub_prog_node = prog_node.start(s.name, 0); - defer sub_prog_node.end(); - - const make_result = s.make(.{ - .progress_node = sub_prog_node, - .watch = true, - .web_server = null, - .unit_test_timeout_ns = null, - .gpa = gpa, - }); + // No matter the result, we want to display error/warning messages. + //if (s.result_error_bundle.errorMessageCount() > 0 or + // s.result_error_msgs.items.len > 0 or + // s.result_stderr.len > 0) + //{ + // const stderr = try io.lockStderr(&stdio_buffer_allocation, graph.stderr_mode); + // defer io.unlockStderr(); + // printErrorMessages(gpa, s, .{}, stderr.terminal(), run.error_style, run.multiline_errors) catch {}; + //} if (run.watch) { - const step_id: u32 = @intCast(steps_stack.getIndex(s).?); + const step_id: u32 = @intCast(run.step_stack.getIndex(s).?); // missing fields: // - result_error_msgs // - result_stderr serveWatchErrorBundle(b.graph.io, step_id, run.cycle, s.result_error_bundle) catch @panic("failed to send watch errors"); } - handle_result: { - if (make_result) |_| { - @atomicStore(Step.State, &s.state, .success, .seq_cst); - } else |err| switch (err) { - error.MakeFailed => { - @atomicStore(Step.State, &s.state, .failure, .seq_cst); - break :handle_result; - }, - error.MakeSkipped => @atomicStore(Step.State, &s.state, .skipped, .seq_cst), + if (s.max_rss != 0) { + var dispatch_set: std.ArrayList(*Step) = .empty; + defer dispatch_set.deinit(gpa); + + // Release our RSS claim and kick off some blocked steps if possible. We use `dispatch_set` + // as a staging buffer to avoid recursing into `makeStep` while `run.max_rss_mutex` is held. + { + try run.max_rss_mutex.lock(io); + defer run.max_rss_mutex.unlock(io); + run.available_rss += s.max_rss; + dispatch_set.ensureUnusedCapacity(gpa, run.memory_blocked_steps.items.len) catch @panic("OOM"); + while (run.memory_blocked_steps.getLastOrNull()) |candidate| { + if (run.available_rss < candidate.max_rss) break; + assert(run.memory_blocked_steps.pop() == candidate); + dispatch_set.appendAssumeCapacity(candidate); + } } + for (dispatch_set.items) |candidate| { + group.async(io, makeStep, .{ group, b, candidate, root_prog_node, run }); + } + } - // Successful completion of a step, so we queue up its dependants as well. - for (s.dependants.items) |dep| { - group.async(io, workerMakeOneStep, .{ - group, gpa, b, steps_stack, dep, prog_node, run, - }); + for (s.dependants.items) |dependant| { + // `.acq_rel` synchronizes with itself to ensure all dependencies' final states are visible when this hits 0. + if (@atomicRmw(u32, &dependant.pending_deps, .Sub, 1, .acq_rel) == 1) { + try stepReady(group, b, dependant, root_prog_node, run); } } +} - // If this is a step that claims resources, we must now queue up other - // steps that are waiting for resources. +fn stepReady( + group: *Io.Group, + b: *std.Build, + s: *Step, + root_prog_node: std.Progress.Node, + run: *Run, +) !void { + const io = b.graph.io; if (s.max_rss != 0) { - run.max_rss_mutex.lockUncancelable(io); + try run.max_rss_mutex.lock(io); defer run.max_rss_mutex.unlock(io); - - // Give the memory back to the scheduler. - run.claimed_rss -= s.max_rss; - // Avoid kicking off too many tasks that we already know will not have - // enough resources. - var remaining = run.max_rss - run.claimed_rss; - var i: usize = 0; - var j: usize = 0; - while (j < run.memory_blocked_steps.items.len) : (j += 1) { - const dep = run.memory_blocked_steps.items[j]; - assert(dep.max_rss != 0); - if (dep.max_rss <= remaining) { - remaining -= dep.max_rss; - - group.async(io, workerMakeOneStep, .{ - group, gpa, b, steps_stack, dep, prog_node, run, - }); - } else { - run.memory_blocked_steps.items[i] = dep; - i += 1; - } + if (run.available_rss < s.max_rss) { + // Running this step right now could possibly exceed the allotted RSS. + run.memory_blocked_steps.append(run.gpa, s) catch @panic("OOM"); + return; } - run.memory_blocked_steps.shrinkRetainingCapacity(i); + run.available_rss -= s.max_rss; } + group.async(io, makeStep, .{ group, b, s, root_prog_node, run }); } +//pub fn printErrorMessages( +// gpa: Allocator, +// failing_step: *Step, +// options: std.zig.ErrorBundle.RenderOptions, +// stderr: Io.Terminal, +// error_style: ErrorStyle, +// multiline_errors: MultilineErrors, +//) !void { +// const writer = stderr.writer; +// if (error_style.verboseContext()) { +// // Provide context for where these error messages are coming from by +// // printing the corresponding Step subtree. +// var step_stack: std.ArrayList(*Step) = .empty; +// defer step_stack.deinit(gpa); +// try step_stack.append(gpa, failing_step); +// while (step_stack.items[step_stack.items.len - 1].dependants.items.len != 0) { +// try step_stack.append(gpa, step_stack.items[step_stack.items.len - 1].dependants.items[0]); +// } +// +// // Now, `step_stack` has the subtree that we want to print, in reverse order. +// try stderr.setColor(.dim); +// var indent: usize = 0; +// while (step_stack.pop()) |s| : (indent += 1) { +// if (indent > 0) { +// try writer.splatByteAll(' ', (indent - 1) * 3); +// try printChildNodePrefix(stderr); +// } +// +// try writer.writeAll(s.name); +// +// if (s == failing_step) { +// try printStepFailure(s, stderr, true); +// } else { +// try writer.writeAll("\n"); +// } +// } +// try stderr.setColor(.reset); +// } else { +// // Just print the failing step itself. +// try stderr.setColor(.dim); +// try writer.writeAll(failing_step.name); +// try printStepFailure(failing_step, stderr, true); +// try stderr.setColor(.reset); +// } +// +// if (failing_step.result_stderr.len > 0) { +// try writer.writeAll(failing_step.result_stderr); +// if (!mem.endsWith(u8, failing_step.result_stderr, "\n")) { +// try writer.writeAll("\n"); +// } +// } +// +// try failing_step.result_error_bundle.renderToTerminal(options, stderr); +// +// for (failing_step.result_error_msgs.items) |msg| { +// try stderr.setColor(.red); +// try writer.writeAll("error:"); +// try stderr.setColor(.reset); +// if (std.mem.indexOfScalar(u8, msg, '\n') == null) { +// try writer.print(" {s}\n", .{msg}); +// } else switch (multiline_errors) { +// .indent => { +// var it = std.mem.splitScalar(u8, msg, '\n'); +// try writer.print(" {s}\n", .{it.first()}); +// while (it.next()) |line| { +// try writer.print(" {s}\n", .{line}); +// } +// }, +// .newline => try writer.print("\n{s}\n", .{msg}), +// .none => try writer.print(" {s}\n", .{msg}), +// } +// } +// +// if (error_style.verboseContext()) { +// if (failing_step.result_failed_command) |cmd_str| { +// try stderr.setColor(.red); +// try writer.writeAll("failed command: "); +// try stderr.setColor(.reset); +// try writer.writeAll(cmd_str); +// try writer.writeByte('\n'); +// } +// } +// +// try writer.writeByte('\n'); +//} +// +//fn printSteps(builder: *std.Build, w: *Writer) !void { +// const arena = builder.graph.arena; +// for (builder.top_level_steps.values()) |top_level_step| { +// const name = if (&top_level_step.step == builder.default_step) +// try fmt.allocPrint(arena, "{s} (default)", .{top_level_step.step.name}) +// else +// top_level_step.step.name; +// try w.print(" {s:<28} {s}\n", .{ name, top_level_step.description }); +// } +//} +// +//fn printUsage(b: *std.Build, w: *Writer) !void { +// const arena = b.graph.arena; +// +// try w.print( +// \\Usage: {s} build [steps] [options] +// \\ +// \\Steps: +// \\ +// , .{b.graph.zig_exe}); +// try printSteps(b, w); +// try w.writeAll( +// \\ +// \\Project-Specific Options: +// \\ +// ); +// +// if (b.available_options_list.items.len == 0) { +// try w.print(" (none)\n", .{}); +// } else { +// for (b.available_options_list.items) |option| { +// const name = try fmt.allocPrint(arena, " -D{s}=[{t}]", .{ option.name, option.type_id }); +// try w.print("{s:<30} {s}\n", .{ name, option.description }); +// if (option.enum_options) |enum_options| { +// const padding = " " ** 33; +// try w.writeAll(padding ++ "Supported Values:\n"); +// for (enum_options) |enum_option| { +// try w.print(padding ++ " {s}\n", .{enum_option}); +// } +// } +// } +// } +// +// try w.writeAll( +// \\ +// \\System Integration Options: +// \\ --search-prefix [path] Add a path to look for binaries, libraries, headers +// \\ --sysroot [path] Set the system root directory (usually /) +// \\ --libc [file] Provide a file which specifies libc paths +// \\ +// \\ --system [pkgdir] Disable package fetching; enable all integrations +// \\ -fsys=[name] Enable a system integration +// \\ -fno-sys=[name] Disable a system integration +// \\ +// \\ Available System Integrations: Enabled: +// \\ +// ); +// if (b.graph.system_library_options.entries.len == 0) { +// try w.writeAll(" (none) -\n"); +// } else { +// for (b.graph.system_library_options.keys(), b.graph.system_library_options.values()) |k, v| { +// const status = switch (v) { +// .declared_enabled => "yes", +// .declared_disabled => "no", +// .user_enabled, .user_disabled => unreachable, // already emitted error +// }; +// try w.print(" {s:<43} {s}\n", .{ k, status }); +// } +// } +// +// try w.writeAll( +// \\ +// \\General Options: +// \\ -p, --prefix [path] Where to install files (default: zig-out) +// \\ --prefix-lib-dir [path] Where to install libraries +// \\ --prefix-exe-dir [path] Where to install executables +// \\ --prefix-include-dir [path] Where to install C header files +// \\ +// \\ --release[=mode] Request release mode, optionally specifying a +// \\ preferred optimization mode: fast, safe, small +// \\ +// \\ -fdarling, -fno-darling Integration with system-installed Darling to +// \\ execute macOS programs on Linux hosts +// \\ (default: no) +// \\ -fqemu, -fno-qemu Integration with system-installed QEMU to execute +// \\ foreign-architecture programs on Linux hosts +// \\ (default: no) +// \\ --libc-runtimes [path] Enhances QEMU integration by providing dynamic libc +// \\ (e.g. glibc or musl) built for multiple foreign +// \\ architectures, allowing execution of non-native +// \\ programs that link with libc. +// \\ -frosetta, -fno-rosetta Rely on Rosetta to execute x86_64 programs on +// \\ ARM64 macOS hosts. (default: no) +// \\ -fwasmtime, -fno-wasmtime Integration with system-installed wasmtime to +// \\ execute WASI binaries. (default: no) +// \\ -fwine, -fno-wine Integration with system-installed Wine to execute +// \\ Windows programs on Linux hosts. (default: no) +// \\ +// \\ -h, --help Print this help and exit +// \\ -l, --list-steps Print available steps +// \\ --verbose Print commands before executing them +// \\ --color [auto|off|on] Enable or disable colored error messages +// \\ --error-style [style] Control how build errors are printed +// \\ verbose (Default) Report errors with full context +// \\ minimal Report errors after summary, excluding context like command lines +// \\ verbose_clear Like 'verbose', but clear the terminal at the start of each update +// \\ minimal_clear Like 'minimal', but clear the terminal at the start of each update +// \\ --multiline-errors [style] Control how multi-line error messages are printed +// \\ indent (Default) Indent non-initial lines to align with initial line +// \\ newline Include a leading newline so that the error message is on its own lines +// \\ none Print as usual so the first line is misaligned +// \\ --summary [mode] Control the printing of the build summary +// \\ all Print the build summary in its entirety +// \\ new Omit cached steps +// \\ failures (Default if short-lived) Only print failed steps +// \\ line (Default if long-lived) Only print the single-line summary +// \\ none Do not print the build summary +// \\ -j Limit concurrent jobs (default is to use all CPU cores) +// \\ --maxrss Limit memory usage (default is to use available memory) +// \\ --skip-oom-steps Instead of failing, skip steps that would exceed --maxrss +// \\ --test-timeout Limit execution time of unit tests, terminating if exceeded. +// \\ The timeout must include a unit: ns, us, ms, s, m, h +// \\ --fetch[=mode] Fetch dependency tree (optionally choose laziness) and exit +// \\ needed (Default) Lazy dependencies are fetched as needed +// \\ all Lazy dependencies are always fetched +// \\ --watch Continuously rebuild when source files are modified +// \\ --debounce Delay before rebuilding after changed file detected +// \\ --webui[=ip] Enable the web interface on the given IP address +// \\ --fuzz[=limit] Continuously search for unit test failures with an optional +// \\ limit to the max number of iterations. The argument supports +// \\ an optional 'K', 'M', or 'G' suffix (e.g. '10K'). Implies +// \\ '--webui' when no limit is specified. +// \\ --time-report Force full rebuild and provide detailed information on +// \\ compilation time of Zig source code (implies '--webui') +// \\ -fincremental Enable incremental compilation +// \\ -fno-incremental Disable incremental compilation +// \\ +// \\Advanced Options: +// \\ -freference-trace[=num] How many lines of reference trace should be shown per compile error +// \\ -fno-reference-trace Disable reference trace +// \\ -fallow-so-scripts Allows .so files to be GNU ld scripts +// \\ -fno-allow-so-scripts (default) .so files must be ELF files +// \\ --build-file [file] Override path to build.zig +// \\ --cache-dir [path] Override path to local Zig cache directory +// \\ --global-cache-dir [path] Override path to global Zig cache directory +// \\ --zig-lib-dir [arg] Override path to Zig lib directory +// \\ --build-runner [file] Override path to build runner +// \\ --seed [integer] For shuffling dependency traversal order (default: random) +// \\ --build-id[=style] At a minor link-time expense, embeds a build ID in binaries +// \\ fast 8-byte non-cryptographic hash (COFF, ELF, WASM) +// \\ sha1, tree 20-byte cryptographic hash (ELF, WASM) +// \\ md5 16-byte cryptographic hash (ELF) +// \\ uuid 16-byte random UUID (ELF, WASM) +// \\ 0x[hexstring] Constant ID, maximum 32 bytes (ELF, WASM) +// \\ none (default) No build ID +// \\ --debug-log [scope] Enable debugging the compiler +// \\ --debug-pkg-config Fail if unknown pkg-config flags encountered +// \\ --debug-rt Debug compiler runtime libraries +// \\ --verbose-link Enable compiler debug output for linking +// \\ --verbose-air Enable compiler debug output for Zig AIR +// \\ --verbose-llvm-ir[=file] Enable compiler debug output for LLVM IR +// \\ --verbose-llvm-bc=[file] Enable compiler debug output for LLVM BC +// \\ --verbose-cimport Enable compiler debug output for C imports +// \\ --verbose-cc Enable compiler debug output for C compilation +// \\ --verbose-llvm-cpu-features Enable compiler debug output for LLVM CPU features +// \\ +// ); +//} + fn nextArg(args: []const [:0]const u8, idx: *usize) ?[:0]const u8 { if (idx.* >= args.len) return null; defer idx.* += 1; @@ -868,16 +1868,30 @@ fn argsRest(args: []const [:0]const u8, idx: usize) ?[]const [:0]const u8 { return args[idx..]; } -/// Perhaps in the future there could be an Advanced Options flag such as -/// --debug-build-runner-leaks which would make this function return instead of -/// calling exit. -fn cleanExit() void { - std.debug.lockStdErr(); - process.exit(0); -} +const Color = std.zig.Color; +const ErrorStyle = enum { + verbose, + minimal, + verbose_clear, + minimal_clear, + fn verboseContext(s: ErrorStyle) bool { + return switch (s) { + .verbose, .verbose_clear => true, + .minimal, .minimal_clear => false, + }; + } + fn clearOnUpdate(s: ErrorStyle) bool { + return switch (s) { + .verbose, .minimal => false, + .verbose_clear, .minimal_clear => true, + }; + } +}; +const MultilineErrors = enum { indent, newline, none }; +const Summary = enum { all, new, failures, line, none }; -fn fatal(comptime f: []const u8, args: anytype) noreturn { - std.debug.print(f ++ "\n", args); +fn fatalWithHint(comptime f: []const u8, args: anytype) noreturn { + std.debug.print(f ++ "\n access the help menu with 'zig build -h'\n", args); process.exit(1); } @@ -955,7 +1969,7 @@ fn createModuleDependenciesForStep(step: *Step) Allocator.Error!void { step.dependOn(&other.step); }, - .config_header_step => |config_header| step.dependOn(&config_header.step), + .config_header_step => |other| step.dependOn(&other.step), }; for (mod.lib_paths.items) |lp| lp.addStepDependencies(step); for (mod.rpaths.items) |rpath| switch (rpath) { @@ -978,6 +1992,25 @@ fn createModuleDependenciesForStep(step: *Step) Allocator.Error!void { } } +var stdio_buffer_allocation: [256]u8 = undefined; +var stdout_writer_allocation: Io.File.Writer = undefined; + +fn initStdoutWriter(io: Io) *Writer { + stdout_writer_allocation = Io.File.stdout().writerStreaming(io, &stdio_buffer_allocation); + return &stdout_writer_allocation.interface; +} + +fn cleanTmpFiles(io: Io, steps: []const *Step) void { + for (steps) |step| { + const wf = step.cast(std.Build.Step.WriteFile) orelse continue; + if (wf.mode != .tmp) continue; + const path = wf.generated_directory.path orelse continue; + Io.Dir.cwd().deleteTree(io, path) catch |err| { + std.log.warn("failed to delete {s}: {t}", .{ path, err }); + }; + } +} + // // // ZLS code @@ -987,6 +2020,8 @@ fn createModuleDependenciesForStep(step: *Step) Allocator.Error!void { const shared = @import("shared.zig"); const Transport = shared.Transport; const BuildConfig = shared.BuildConfig; +const ArrayListManaged = if (@hasDecl(std, "array_list")) std.array_list.Managed else std.ArrayList; +const ArrayList = if (@hasDecl(std, "array_list")) std.ArrayList else std.ArrayList; const Packages = struct { allocator: std.mem.Allocator, @@ -1036,9 +2071,9 @@ fn extractBuildInformation( gpa: Allocator, b: *std.Build, arena: Allocator, + step_names: []const []const u8, main_progress_node: std.Progress.Node, run: *Run, - seed: u32, ) !void { var steps = std.AutoArrayHashMapUnmanaged(*Step, void){}; defer steps.deinit(gpa); @@ -1169,8 +2204,9 @@ fn extractBuildInformation( } }; - var step_dependencies: std.AutoArrayHashMapUnmanaged(*Step, void) = .{}; - defer step_dependencies.deinit(gpa); + const step_dependencies = &run.step_stack; + // var step_dependencies: std.AutoArrayHashMapUnmanaged(*Step, void) = .{}; + // defer step_dependencies.deinit(gpa); // collect step dependencies { @@ -1194,22 +2230,28 @@ fn extractBuildInformation( // collect all dependencies of all found modules for (modules.keys()) |module| { - try helper.addModuleDependencies(gpa, &step_dependencies, module); + try helper.addModuleDependencies(gpa, step_dependencies, module); } } - prepare(gpa, b, &step_dependencies, run, seed) catch |err| switch (err) { - error.UncleanExit => process.exit(1), - else => return err, + prepare(gpa, b, step_names, run, b.graph.random_seed, false) catch |err| switch (err) { + error.DependencyLoopDetected => { + // Perhaps in the future there could be an Advanced Options flag + // such as --debug-build-runner-leaks which would make this code + // return instead of calling exit. + _ = b.graph.io.lockStderr(&.{}, b.graph.stderr_mode) catch {}; + process.exit(1); + }, + else => |e| return e, }; // run all steps that are dependencies - try runSteps( - gpa, + try runStepNames( b, - &step_dependencies, + step_names, main_progress_node, run, + null, ); var include_dirs: std.StringArrayHashMapUnmanaged(void) = .{};