diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7c46fdf..ba30d67 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -91,7 +91,7 @@ jobs: - name: Install zig uses: goto-bus-stop/setup-zig@v2 with: - version: 0.12.0-dev.1245+a07f288eb + version: 0.12.0-dev.1710+2bffd8101 - name: test run: zig build test --summary all diff --git a/build.zig b/build.zig index cc2d276..39cd989 100644 --- a/build.zig +++ b/build.zig @@ -201,7 +201,7 @@ fn benchTargets( // Open the directory const c_dir_path = (comptime thisDir()) ++ "/src/bench"; - var c_dir = try std.fs.openIterableDirAbsolute(c_dir_path, .{}); + var c_dir = try std.fs.openDirAbsolute(c_dir_path, .{ .iterate = true }); defer c_dir.close(); // Go through and add each as a step @@ -258,7 +258,7 @@ fn exampleTargets( // Open the directory const c_dir_path = (comptime thisDir()) ++ "/examples"; - var c_dir = try std.fs.openIterableDirAbsolute(c_dir_path, .{}); + var c_dir = try std.fs.openDirAbsolute(c_dir_path, .{ .iterate = true }); defer c_dir.close(); // Go through and add each as a step diff --git a/flake.lock b/flake.lock index 19cd84f..e6ae48a 100644 --- a/flake.lock +++ b/flake.lock @@ -126,11 +126,11 @@ "nixpkgs": "nixpkgs_2" }, "locked": { - "lastModified": 1700526194, - "narHash": "sha256-/7C9bzFG0Gq/tBAbSwC84Dg5TNPomCcxIJJQNj3Y2BI=", + "lastModified": 1700827703, + "narHash": "sha256-EfM3pRvtb5TrvxURhtI1gEKb/mSXHJx3A/12HOWKOyI=", "owner": "mitchellh", "repo": "zig-overlay", - "rev": "2877b025231e96196469ee8a5b80799027b42e03", + "rev": "2c9179e22a4759c7c88438a4a9eb0f5e3c00d3b0", "type": "github" }, "original": { diff --git a/src/ThreadPool.zig b/src/ThreadPool.zig index 69fb964..920e440 100644 --- a/src/ThreadPool.zig +++ b/src/ThreadPool.zig @@ -38,7 +38,7 @@ const ThreadPool = @This(); const std = @import("std"); const assert = std.debug.assert; -const Atomic = std.atomic.Atomic; +const Atomic = std.atomic.Value; stack_size: u32, max_threads: u32, @@ -192,7 +192,7 @@ noinline fn notifySlow(self: *ThreadPool, is_waking: bool) void { // Release barrier synchronizes with Acquire in wait() // to ensure pushes to run queues happen before observing a posted notification. - sync = @bitCast(self.sync.tryCompareAndSwap( + sync = @bitCast(self.sync.cmpxchgWeak( @bitCast(sync), @bitCast(new_sync), .Release, @@ -235,7 +235,7 @@ noinline fn wait(self: *ThreadPool, _is_waking: bool) error{Shutdown}!bool { // Acquire barrier synchronizes with notify() // to ensure that pushes to run queue are observed after wait() returns. - sync = @bitCast(self.sync.tryCompareAndSwap( + sync = @bitCast(self.sync.cmpxchgWeak( @bitCast(sync), @bitCast(new_sync), .Acquire, @@ -252,7 +252,7 @@ noinline fn wait(self: *ThreadPool, _is_waking: bool) error{Shutdown}!bool { if (is_waking) new_sync.state = .pending; - sync = @bitCast(self.sync.tryCompareAndSwap( + sync = @bitCast(self.sync.cmpxchgWeak( @bitCast(sync), @bitCast(new_sync), .Monotonic, @@ -282,7 +282,7 @@ pub noinline fn shutdown(self: *ThreadPool) void { new_sync.idle = 0; // Full barrier to synchronize with both wait() and notify() - sync = @bitCast(self.sync.tryCompareAndSwap( + sync = @bitCast(self.sync.cmpxchgWeak( @bitCast(sync), @bitCast(new_sync), .AcqRel, @@ -301,7 +301,7 @@ fn register(noalias self: *ThreadPool, noalias thread: *Thread) void { var threads = self.threads.load(.Monotonic); while (true) { thread.next = threads; - threads = self.threads.tryCompareAndSwap( + threads = self.threads.cmpxchgWeak( threads, thread, .Release, @@ -455,14 +455,14 @@ const Event = struct { // Acquire barrier to ensure operations before the shutdown() are seen after the wait(). // Shutdown is rare so it's better to have an Acquire barrier here instead of on CAS failure + load which are common. if (state == SHUTDOWN) { - std.atomic.fence(.Acquire); + @fence(.Acquire); return; } // Consume a notification when it pops up. // Acquire barrier to ensure operations before the notify() appear after the wait(). if (state == NOTIFIED) { - state = self.state.tryCompareAndSwap( + state = self.state.cmpxchgWeak( state, acquire_with, .Acquire, @@ -473,7 +473,7 @@ const Event = struct { // There is no notification to consume, we should wait on the event by ensuring its WAITING. if (state != WAITING) blk: { - state = self.state.tryCompareAndSwap( + state = self.state.cmpxchgWeak( state, WAITING, .Monotonic, @@ -556,7 +556,7 @@ const Node = struct { new_stack |= (stack & ~PTR_MASK); // Push to the stack with a release barrier for the consumer to see the proper list links. - stack = self.stack.tryCompareAndSwap( + stack = self.stack.cmpxchgWeak( stack, new_stack, .Release, @@ -582,7 +582,7 @@ const Node = struct { // Acquire barrier on getting the consumer to see cache/Node updates done by previous consumers // and to ensure our cache/Node updates in pop() happen after that of previous consumers. - stack = self.stack.tryCompareAndSwap( + stack = self.stack.cmpxchgWeak( stack, new_stack, .Acquire, @@ -645,7 +645,7 @@ const Node = struct { fn push(noalias self: *Buffer, noalias list: *List) error{Overflow}!void { var head = self.head.load(.Monotonic); - var tail = self.tail.loadUnchecked(); // we're the only thread that can change this + var tail = self.tail.raw; // we're the only thread that can change this while (true) { var size = tail -% head; @@ -677,22 +677,22 @@ const Node = struct { // Migrating half amortizes the cost of stealing while requiring future pops to still use the buffer. // Acquire barrier to ensure the linked list creation after the steal only happens after we succesfully steal. var migrate = size / 2; - head = self.head.tryCompareAndSwap( + head = self.head.cmpxchgWeak( head, head +% migrate, .Acquire, .Monotonic, ) orelse { // Link the migrated Nodes together - const first = self.array[head % capacity].loadUnchecked(); + const first = self.array[head % capacity].raw; while (migrate > 0) : (migrate -= 1) { - const prev = self.array[head % capacity].loadUnchecked(); + const prev = self.array[head % capacity].raw; head +%= 1; - prev.next = self.array[head % capacity].loadUnchecked(); + prev.next = self.array[head % capacity].raw; } // Append the list that was supposed to be pushed to the end of the migrated Nodes - const last = self.array[(head -% 1) % capacity].loadUnchecked(); + const last = self.array[(head -% 1) % capacity].raw; last.next = list.head; list.tail.next = null; @@ -705,7 +705,7 @@ const Node = struct { fn pop(self: *Buffer) ?*Node { var head = self.head.load(.Monotonic); - const tail = self.tail.loadUnchecked(); // we're the only thread that can change this + const tail = self.tail.raw; // we're the only thread that can change this while (true) { // Quick sanity check and return null when not empty @@ -717,12 +717,12 @@ const Node = struct { // Dequeue with an acquire barrier to ensure any writes done to the Node // only happen after we succesfully claim it from the array. - head = self.head.tryCompareAndSwap( + head = self.head.cmpxchgWeak( head, head +% 1, .Acquire, .Monotonic, - ) orelse return self.array[head % capacity].loadUnchecked(); + ) orelse return self.array[head % capacity].raw; } } @@ -736,7 +736,7 @@ const Node = struct { defer queue.releaseConsumer(consumer); const head = self.head.load(.Monotonic); - const tail = self.tail.loadUnchecked(); // we're the only thread that can change this + const tail = self.tail.raw; // we're the only thread that can change this const size = tail -% head; assert(size <= capacity); @@ -755,7 +755,7 @@ const Node = struct { const node = queue.pop(&consumer) orelse blk: { if (pushed == 0) return null; pushed -= 1; - break :blk self.array[(tail +% pushed) % capacity].loadUnchecked(); + break :blk self.array[(tail +% pushed) % capacity].raw; }; // Update the array tail with the nodes we pushed to it. @@ -769,7 +769,7 @@ const Node = struct { fn steal(noalias self: *Buffer, noalias buffer: *Buffer) ?Stole { const head = self.head.load(.Monotonic); - const tail = self.tail.loadUnchecked(); // we're the only thread that can change this + const tail = self.tail.raw; // we're the only thread that can change this const size = tail -% head; assert(size <= capacity); @@ -805,7 +805,7 @@ const Node = struct { // - an Acquire barrier to ensure that we only interact with the stolen Nodes after the steal was committed. // - a Release barrier to ensure that the Nodes are copied above prior to the committing of the steal // because if they're copied after the steal, the could be getting rewritten by the target's push(). - _ = buffer.head.compareAndSwap( + _ = buffer.head.cmpxchgStrong( buffer_head, buffer_head +% steal_size, .AcqRel, @@ -813,7 +813,7 @@ const Node = struct { ) orelse { // Pop one from the nodes we stole as we'll be returning it const pushed = steal_size - 1; - const node = self.array[(tail +% pushed) % capacity].loadUnchecked(); + const node = self.array[(tail +% pushed) % capacity].raw; // Update the array tail with the nodes we pushed to it. // Release barrier to synchronize with Acquire barrier in steal()'s to see the written array Nodes. diff --git a/src/backend/iocp.zig b/src/backend/iocp.zig index 2636cc6..682ef24 100644 --- a/src/backend/iocp.zig +++ b/src/backend/iocp.zig @@ -1272,7 +1272,7 @@ pub const Operation = union(OperationType) { }, async_wait: struct { - wakeup: std.atomic.Atomic(bool) = .{ .value = false }, + wakeup: std.atomic.Value(bool) = .{ .raw = false }, }, job_object: struct { diff --git a/src/bench/async_pummel_1.zig b/src/bench/async_pummel_1.zig index 017d5c6..052f156 100644 --- a/src/bench/async_pummel_1.zig +++ b/src/bench/async_pummel_1.zig @@ -30,7 +30,7 @@ pub fn run(comptime thread_count: comptime_int) !void { notifier = try xev.Async.init(); defer notifier.deinit(); - var userdata: ?*void = null; + const userdata: ?*void = null; var c: xev.Completion = undefined; notifier.wait(&loop, &c, void, userdata, &asyncCallback); diff --git a/src/build/ScdocStep.zig b/src/build/ScdocStep.zig index 6bd33b6..96ebde7 100644 --- a/src/build/ScdocStep.zig +++ b/src/build/ScdocStep.zig @@ -63,7 +63,7 @@ fn make(step: *std.build.Step, progress: *std.Progress.Node) !void { } // Find all our man pages which are in our src path ending with ".scd". - var dir = try fs.openIterableDirAbsolute(self.src_path, .{}); + var dir = try fs.openDirAbsolute(self.src_path, .{ .iterate = true }); defer dir.close(); var iter = dir.iterate(); @@ -138,7 +138,7 @@ const InstallStep = struct { } // Find all our man pages which are in our src path ending with ".scd". - var dir = try fs.openIterableDirAbsolute(path, .{}); + var dir = try fs.openDirAbsolute(path, .{ .iterate = true }); defer dir.close(); var iter = dir.iterate(); while (try iter.next()) |*entry| {