diff --git a/doc/langref.html.in b/doc/langref.html.in index baba472e88..3a7892fd45 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -6728,17 +6728,8 @@ async fn func(y: *i32) void { This builtin function atomically dereferences a pointer and returns the value.
- {#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}, a float, - an integer whose bit count meets these requirements: -
-- TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe - we can remove this restriction + {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float, + an integer or an enum.
{#header_close#} {#header_open|@atomicRmw#} @@ -6747,17 +6738,8 @@ async fn func(y: *i32) void { This builtin function atomically modifies memory and then returns the previous value.- {#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}, - or an integer whose bit count meets these requirements: -
-- TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe - we can remove this restriction + {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float, + an integer or an enum.
Supported operations: @@ -6782,17 +6764,8 @@ async fn func(y: *i32) void { This builtin function atomically stores a value.
- {#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}, a float, - an integer whose bit count meets these requirements: -
-- TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe - we can remove this restriction + {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float, + an integer or an enum.
{#header_close#} {#header_open|@bitCast#} @@ -7108,7 +7081,8 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_v more efficiently in machine instructions.- {#syntax#}AtomicOrder{#endsyntax#} can be found with {#syntax#}@import("builtin").AtomicOrder{#endsyntax#}. + {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float, + an integer or an enum.
{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}
{#see_also|Compile Variables|cmpxchgWeak#} @@ -7136,7 +7110,8 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val However if you need a stronger guarantee, use {#link|@cmpxchgStrong#}.- {#syntax#}AtomicOrder{#endsyntax#} can be found with {#syntax#}@import("builtin").AtomicOrder{#endsyntax#}. + {#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float, + an integer or an enum.
{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}
{#see_also|Compile Variables|cmpxchgStrong#} diff --git a/lib/std/atomic/int.zig b/lib/std/atomic/int.zig index 94985b914f..446059e7ef 100644 --- a/lib/std/atomic/int.zig +++ b/lib/std/atomic/int.zig @@ -1,6 +1,3 @@ -const builtin = @import("builtin"); -const AtomicOrder = builtin.AtomicOrder; - /// Thread-safe, lock-free integer pub fn Int(comptime T: type) type { return struct { @@ -14,16 +11,16 @@ pub fn Int(comptime T: type) type { /// Returns previous value pub fn incr(self: *Self) T { - return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Add, 1, AtomicOrder.SeqCst); + return @atomicRmw(T, &self.unprotected_value, .Add, 1, .SeqCst); } /// Returns previous value pub fn decr(self: *Self) T { - return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Sub, 1, AtomicOrder.SeqCst); + return @atomicRmw(T, &self.unprotected_value, .Sub, 1, .SeqCst); } pub fn get(self: *Self) T { - return @atomicLoad(T, &self.unprotected_value, AtomicOrder.SeqCst); + return @atomicLoad(T, &self.unprotected_value, .SeqCst); } pub fn set(self: *Self, new_value: T) void { @@ -31,11 +28,11 @@ pub fn Int(comptime T: type) type { } pub fn xchg(self: *Self, new_value: T) T { - return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Xchg, new_value, AtomicOrder.SeqCst); + return @atomicRmw(T, &self.unprotected_value, .Xchg, new_value, .SeqCst); } pub fn fetchAdd(self: *Self, op: T) T { - return @atomicRmw(T, &self.unprotected_value, builtin.AtomicRmwOp.Add, op, AtomicOrder.SeqCst); + return @atomicRmw(T, &self.unprotected_value, .Add, op, .SeqCst); } }; } diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig index 1a0f39587e..52e200e7a2 100644 --- a/lib/std/atomic/queue.zig +++ b/lib/std/atomic/queue.zig @@ -1,7 +1,5 @@ const std = @import("../std.zig"); const builtin = @import("builtin"); -const AtomicOrder = builtin.AtomicOrder; -const AtomicRmwOp = builtin.AtomicRmwOp; const assert = std.debug.assert; const expect = std.testing.expect; @@ -145,7 +143,7 @@ const Context = struct { put_sum: isize, get_sum: isize, get_count: usize, - puts_done: u8, // TODO make this a bool + puts_done: bool, }; // TODO add lazy evaluated build options and then put puts_per_thread behind @@ -169,7 +167,7 @@ test "std.atomic.Queue" { .queue = &queue, .put_sum = 0, .get_sum = 0, - .puts_done = 0, + .puts_done = false, .get_count = 0, }; @@ -182,7 +180,7 @@ test "std.atomic.Queue" { } } expect(!context.queue.isEmpty()); - context.puts_done = 1; + context.puts_done = true; { var i: usize = 0; while (i < put_thread_count) : (i += 1) { @@ -204,7 +202,7 @@ test "std.atomic.Queue" { for (putters) |t| t.wait(); - @atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst); + @atomicStore(bool, &context.puts_done, true, .SeqCst); for (getters) |t| t.wait(); @@ -231,25 +229,25 @@ fn startPuts(ctx: *Context) u8 { std.time.sleep(1); // let the os scheduler be our fuzz const x = @bitCast(i32, r.random.scalar(u32)); const node = ctx.allocator.create(Queue(i32).Node) catch unreachable; - node.* = Queue(i32).Node{ + node.* = .{ .prev = undefined, .next = undefined, .data = x, }; ctx.queue.put(node); - _ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst); + _ = @atomicRmw(isize, &ctx.put_sum, .Add, x, .SeqCst); } return 0; } fn startGets(ctx: *Context) u8 { while (true) { - const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1; + const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst); while (ctx.queue.get()) |node| { std.time.sleep(1); // let the os scheduler be our fuzz - _ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst); - _ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst); + _ = @atomicRmw(isize, &ctx.get_sum, .Add, node.data, .SeqCst); + _ = @atomicRmw(usize, &ctx.get_count, .Add, 1, .SeqCst); } if (last) return 0; diff --git a/lib/std/atomic/stack.zig b/lib/std/atomic/stack.zig index 0f67a257cc..092dce15b0 100644 --- a/lib/std/atomic/stack.zig +++ b/lib/std/atomic/stack.zig @@ -1,6 +1,5 @@ const assert = std.debug.assert; const builtin = @import("builtin"); -const AtomicOrder = builtin.AtomicOrder; const expect = std.testing.expect; /// Many reader, many writer, non-allocating, thread-safe @@ -11,7 +10,7 @@ pub fn Stack(comptime T: type) type { root: ?*Node, lock: @TypeOf(lock_init), - const lock_init = if (builtin.single_threaded) {} else @as(u8, 0); + const lock_init = if (builtin.single_threaded) {} else false; pub const Self = @This(); @@ -31,7 +30,7 @@ pub fn Stack(comptime T: type) type { /// being the first item in the stack, returns the other item that was there. pub fn pushFirst(self: *Self, node: *Node) ?*Node { node.next = null; - return @cmpxchgStrong(?*Node, &self.root, null, node, AtomicOrder.SeqCst, AtomicOrder.SeqCst); + return @cmpxchgStrong(?*Node, &self.root, null, node, .SeqCst, .SeqCst); } pub fn push(self: *Self, node: *Node) void { @@ -39,8 +38,8 @@ pub fn Stack(comptime T: type) type { node.next = self.root; self.root = node; } else { - while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} - defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); + while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst)) {} + defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst)); node.next = self.root; self.root = node; @@ -53,8 +52,8 @@ pub fn Stack(comptime T: type) type { self.root = root.next; return root; } else { - while (@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst) != 0) {} - defer assert(@atomicRmw(u8, &self.lock, builtin.AtomicRmwOp.Xchg, 0, AtomicOrder.SeqCst) == 1); + while (@atomicRmw(bool, &self.lock, .Xchg, true, .SeqCst)) {} + defer assert(@atomicRmw(bool, &self.lock, .Xchg, false, .SeqCst)); const root = self.root orelse return null; self.root = root.next; @@ -63,7 +62,7 @@ pub fn Stack(comptime T: type) type { } pub fn isEmpty(self: *Self) bool { - return @atomicLoad(?*Node, &self.root, AtomicOrder.SeqCst) == null; + return @atomicLoad(?*Node, &self.root, .SeqCst) == null; } }; } @@ -75,7 +74,7 @@ const Context = struct { put_sum: isize, get_sum: isize, get_count: usize, - puts_done: u8, // TODO make this a bool + puts_done: bool, }; // TODO add lazy evaluated build options and then put puts_per_thread behind // some option such as: "AggressiveMultithreadedFuzzTest". In the AppVeyor @@ -98,7 +97,7 @@ test "std.atomic.stack" { .stack = &stack, .put_sum = 0, .get_sum = 0, - .puts_done = 0, + .puts_done = false, .get_count = 0, }; @@ -109,7 +108,7 @@ test "std.atomic.stack" { expect(startPuts(&context) == 0); } } - context.puts_done = 1; + context.puts_done = true; { var i: usize = 0; while (i < put_thread_count) : (i += 1) { @@ -128,7 +127,7 @@ test "std.atomic.stack" { for (putters) |t| t.wait(); - @atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst); + @atomicStore(bool, &context.puts_done, true, .SeqCst); for (getters) |t| t.wait(); } @@ -158,19 +157,19 @@ fn startPuts(ctx: *Context) u8 { .data = x, }; ctx.stack.push(node); - _ = @atomicRmw(isize, &ctx.put_sum, builtin.AtomicRmwOp.Add, x, AtomicOrder.SeqCst); + _ = @atomicRmw(isize, &ctx.put_sum, .Add, x, .SeqCst); } return 0; } fn startGets(ctx: *Context) u8 { while (true) { - const last = @atomicLoad(u8, &ctx.puts_done, builtin.AtomicOrder.SeqCst) == 1; + const last = @atomicLoad(bool, &ctx.puts_done, .SeqCst); while (ctx.stack.pop()) |node| { std.time.sleep(1); // let the os scheduler be our fuzz - _ = @atomicRmw(isize, &ctx.get_sum, builtin.AtomicRmwOp.Add, node.data, builtin.AtomicOrder.SeqCst); - _ = @atomicRmw(usize, &ctx.get_count, builtin.AtomicRmwOp.Add, 1, builtin.AtomicOrder.SeqCst); + _ = @atomicRmw(isize, &ctx.get_sum, .Add, node.data, .SeqCst); + _ = @atomicRmw(usize, &ctx.get_count, .Add, 1, .SeqCst); } if (last) return 0; diff --git a/lib/std/event/channel.zig b/lib/std/event/channel.zig index 3c5b48d047..83c77bcac5 100644 --- a/lib/std/event/channel.zig +++ b/lib/std/event/channel.zig @@ -14,8 +14,8 @@ pub fn Channel(comptime T: type) type { putters: std.atomic.Queue(PutNode), get_count: usize, put_count: usize, - dispatch_lock: u8, // TODO make this a bool - need_dispatch: u8, // TODO make this a bool + dispatch_lock: bool, + need_dispatch: bool, // simple fixed size ring buffer buffer_nodes: []T, @@ -62,8 +62,8 @@ pub fn Channel(comptime T: type) type { .buffer_len = 0, .buffer_nodes = buffer, .buffer_index = 0, - .dispatch_lock = 0, - .need_dispatch = 0, + .dispatch_lock = false, + .need_dispatch = false, .getters = std.atomic.Queue(GetNode).init(), .putters = std.atomic.Queue(PutNode).init(), .or_null_queue = std.atomic.Queue(*std.atomic.Queue(GetNode).Node).init(), @@ -165,15 +165,14 @@ pub fn Channel(comptime T: type) type { fn dispatch(self: *SelfChannel) void { // set the "need dispatch" flag - @atomicStore(u8, &self.need_dispatch, 1, .SeqCst); + @atomicStore(bool, &self.need_dispatch, true, .SeqCst); lock: while (true) { // set the lock flag - const prev_lock = @atomicRmw(u8, &self.dispatch_lock, .Xchg, 1, .SeqCst); - if (prev_lock != 0) return; + if (@atomicRmw(bool, &self.dispatch_lock, .Xchg, true, .SeqCst)) return; // clear the need_dispatch flag since we're about to do it - @atomicStore(u8, &self.need_dispatch, 0, .SeqCst); + @atomicStore(bool, &self.need_dispatch, false, .SeqCst); while (true) { one_dispatch: { @@ -250,14 +249,12 @@ pub fn Channel(comptime T: type) type { } // clear need-dispatch flag - const need_dispatch = @atomicRmw(u8, &self.need_dispatch, .Xchg, 0, .SeqCst); - if (need_dispatch != 0) continue; + if (@atomicRmw(bool, &self.need_dispatch, .Xchg, false, .SeqCst)) continue; - const my_lock = @atomicRmw(u8, &self.dispatch_lock, .Xchg, 0, .SeqCst); - assert(my_lock != 0); + assert(@atomicRmw(bool, &self.dispatch_lock, .Xchg, false, .SeqCst)); // we have to check again now that we unlocked - if (@atomicLoad(u8, &self.need_dispatch, .SeqCst) != 0) continue :lock; + if (@atomicLoad(bool, &self.need_dispatch, .SeqCst)) continue :lock; return; } diff --git a/lib/std/event/lock.zig b/lib/std/event/lock.zig index 1bb51261d7..ff1f738c5e 100644 --- a/lib/std/event/lock.zig +++ b/lib/std/event/lock.zig @@ -11,9 +11,9 @@ const Loop = std.event.Loop; /// Allows only one actor to hold the lock. /// TODO: make this API also work in blocking I/O mode. pub const Lock = struct { - shared_bit: u8, // TODO make this a bool + shared: bool, queue: Queue, - queue_empty_bit: u8, // TODO make this a bool + queue_empty: bool, const Queue = std.atomic.Queue(anyframe); @@ -31,20 +31,19 @@ pub const Lock = struct { } // We need to release the lock. - @atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst); - @atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst); + @atomicStore(bool, &self.lock.queue_empty, true, .SeqCst); + @atomicStore(bool, &self.lock.shared, false, .SeqCst); // There might be a queue item. If we know the queue is empty, we can be done, // because the other actor will try to obtain the lock. // But if there's a queue item, we are the actor which must loop and attempt // to grab the lock again. - if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) { + if (@atomicLoad(bool, &self.lock.queue_empty, .SeqCst)) { return; } while (true) { - const old_bit = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 1, .SeqCst); - if (old_bit != 0) { + if (@atomicRmw(bool, &self.lock.shared, .Xchg, true, .SeqCst)) { // We did not obtain the lock. Great, the queue is someone else's problem. return; } @@ -56,11 +55,11 @@ pub const Lock = struct { } // Release the lock again. - @atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst); - @atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst); + @atomicStore(bool, &self.lock.queue_empty, true, .SeqCst); + @atomicStore(bool, &self.lock.shared, false, .SeqCst); // Find out if we can be done. - if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) { + if (@atomicLoad(bool, &self.lock.queue_empty, .SeqCst)) { return; } } @@ -69,24 +68,24 @@ pub const Lock = struct { pub fn init() Lock { return Lock{ - .shared_bit = 0, + .shared = false, .queue = Queue.init(), - .queue_empty_bit = 1, + .queue_empty = true, }; } pub fn initLocked() Lock { return Lock{ - .shared_bit = 1, + .shared = true, .queue = Queue.init(), - .queue_empty_bit = 1, + .queue_empty = true, }; } /// Must be called when not locked. Not thread safe. /// All calls to acquire() and release() must complete before calling deinit(). pub fn deinit(self: *Lock) void { - assert(self.shared_bit == 0); + assert(!self.shared); while (self.queue.get()) |node| resume node.data; } @@ -99,12 +98,11 @@ pub const Lock = struct { // At this point, we are in the queue, so we might have already been resumed. - // We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor + // We set this bit so that later we can rely on the fact, that if queue_empty == true, some actor // will attempt to grab the lock. - @atomicStore(u8, &self.queue_empty_bit, 0, .SeqCst); + @atomicStore(bool, &self.queue_empty, false, .SeqCst); - const old_bit = @atomicRmw(u8, &self.shared_bit, .Xchg, 1, .SeqCst); - if (old_bit == 0) { + if (!@atomicRmw(bool, &self.shared, .Xchg, true, .SeqCst)) { if (self.queue.get()) |node| { // Whether this node is us or someone else, we tail resume it. resume node.data; diff --git a/lib/std/event/rwlock.zig b/lib/std/event/rwlock.zig index f4b13d008b..425088063f 100644 --- a/lib/std/event/rwlock.zig +++ b/lib/std/event/rwlock.zig @@ -16,8 +16,8 @@ pub const RwLock = struct { shared_state: State, writer_queue: Queue, reader_queue: Queue, - writer_queue_empty_bit: u8, // TODO make this a bool - reader_queue_empty_bit: u8, // TODO make this a bool + writer_queue_empty: bool, + reader_queue_empty: bool, reader_lock_count: usize, const State = enum(u8) { @@ -40,7 +40,7 @@ pub const RwLock = struct { return; } - @atomicStore(u8, &self.lock.reader_queue_empty_bit, 1, .SeqCst); + @atomicStore(bool, &self.lock.reader_queue_empty, true, .SeqCst); if (@cmpxchgStrong(State, &self.lock.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) { // Didn't unlock. Someone else's problem. return; @@ -62,7 +62,7 @@ pub const RwLock = struct { } // We need to release the write lock. Check if any readers are waiting to grab the lock. - if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, .SeqCst) == 0) { + if (!@atomicLoad(bool, &self.lock.reader_queue_empty, .SeqCst)) { // Switch to a read lock. @atomicStore(State, &self.lock.shared_state, .ReadLock, .SeqCst); while (self.lock.reader_queue.get()) |node| { @@ -71,7 +71,7 @@ pub const RwLock = struct { return; } - @atomicStore(u8, &self.lock.writer_queue_empty_bit, 1, .SeqCst); + @atomicStore(bool, &self.lock.writer_queue_empty, true, .SeqCst); @atomicStore(State, &self.lock.shared_state, .Unlocked, .SeqCst); self.lock.commonPostUnlock(); @@ -79,12 +79,12 @@ pub const RwLock = struct { }; pub fn init() RwLock { - return RwLock{ + return .{ .shared_state = .Unlocked, .writer_queue = Queue.init(), - .writer_queue_empty_bit = 1, + .writer_queue_empty = true, .reader_queue = Queue.init(), - .reader_queue_empty_bit = 1, + .reader_queue_empty = true, .reader_lock_count = 0, }; } @@ -111,9 +111,9 @@ pub const RwLock = struct { // At this point, we are in the reader_queue, so we might have already been resumed. - // We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1, + // We set this bit so that later we can rely on the fact, that if reader_queue_empty == true, // some actor will attempt to grab the lock. - @atomicStore(u8, &self.reader_queue_empty_bit, 0, .SeqCst); + @atomicStore(bool, &self.reader_queue_empty, false, .SeqCst); // Here we don't care if we are the one to do the locking or if it was already locked for reading. const have_read_lock = if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst)) |old_state| old_state == .ReadLock else true; @@ -142,9 +142,9 @@ pub const RwLock = struct { // At this point, we are in the writer_queue, so we might have already been resumed. - // We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1, + // We set this bit so that later we can rely on the fact, that if writer_queue_empty == true, // some actor will attempt to grab the lock. - @atomicStore(u8, &self.writer_queue_empty_bit, 0, .SeqCst); + @atomicStore(bool, &self.writer_queue_empty, false, .SeqCst); // Here we must be the one to acquire the write lock. It cannot already be locked. if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) == null) { @@ -165,7 +165,7 @@ pub const RwLock = struct { // obtain the lock. // But if there's a writer_queue item or a reader_queue item, // we are the actor which must loop and attempt to grab the lock again. - if (@atomicLoad(u8, &self.writer_queue_empty_bit, .SeqCst) == 0) { + if (!@atomicLoad(bool, &self.writer_queue_empty, .SeqCst)) { if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) != null) { // We did not obtain the lock. Great, the queues are someone else's problem. return; @@ -176,12 +176,12 @@ pub const RwLock = struct { return; } // Release the lock again. - @atomicStore(u8, &self.writer_queue_empty_bit, 1, .SeqCst); + @atomicStore(bool, &self.writer_queue_empty, true, .SeqCst); @atomicStore(State, &self.shared_state, .Unlocked, .SeqCst); continue; } - if (@atomicLoad(u8, &self.reader_queue_empty_bit, .SeqCst) == 0) { + if (!@atomicLoad(bool, &self.reader_queue_empty, .SeqCst)) { if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst) != null) { // We did not obtain the lock. Great, the queues are someone else's problem. return; @@ -195,7 +195,7 @@ pub const RwLock = struct { return; } // Release the lock again. - @atomicStore(u8, &self.reader_queue_empty_bit, 1, .SeqCst); + @atomicStore(bool, &self.reader_queue_empty, true, .SeqCst); if (@cmpxchgStrong(State, &self.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) { // Didn't unlock. Someone else's problem. return; diff --git a/src/codegen.cpp b/src/codegen.cpp index e692c7b805..aba8a49032 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -5251,11 +5251,55 @@ static enum ZigLLVM_AtomicRMWBinOp to_ZigLLVMAtomicRMWBinOp(AtomicRmwOp op, bool zig_unreachable(); } +static LLVMTypeRef get_atomic_abi_type(CodeGen *g, IrInstGen *instruction) { + // If the operand type of an atomic operation is not a power of two sized + // we need to widen it before using it and then truncate the result. + + ir_assert(instruction->value->type->id == ZigTypeIdPointer, instruction); + ZigType *operand_type = instruction->value->type->data.pointer.child_type; + if (operand_type->id == ZigTypeIdInt || operand_type->id == ZigTypeIdEnum) { + if (operand_type->id == ZigTypeIdEnum) { + operand_type = operand_type->data.enumeration.tag_int_type; + } + auto bit_count = operand_type->data.integral.bit_count; + bool is_signed = operand_type->data.integral.is_signed; + + ir_assert(bit_count != 0, instruction); + if (bit_count == 1 || !is_power_of_2(bit_count)) { + return get_llvm_type(g, get_int_type(g, is_signed, operand_type->abi_size * 8)); + } else { + return nullptr; + } + } else if (operand_type->id == ZigTypeIdFloat) { + return nullptr; + } else if (operand_type->id == ZigTypeIdBool) { + return g->builtin_types.entry_u8->llvm_type; + } else { + ir_assert(get_codegen_ptr_type_bail(g, operand_type) != nullptr, instruction); + return nullptr; + } +} + static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutableGen *executable, IrInstGenCmpxchg *instruction) { LLVMValueRef ptr_val = ir_llvm_value(g, instruction->ptr); LLVMValueRef cmp_val = ir_llvm_value(g, instruction->cmp_value); LLVMValueRef new_val = ir_llvm_value(g, instruction->new_value); + ZigType *operand_type = instruction->new_value->value->type; + LLVMTypeRef actual_abi_type = get_atomic_abi_type(g, instruction->ptr); + if (actual_abi_type != nullptr) { + // operand needs widening and truncating + ptr_val = LLVMBuildBitCast(g->builder, ptr_val, + LLVMPointerType(actual_abi_type, 0), ""); + if (operand_type->data.integral.is_signed) { + cmp_val = LLVMBuildSExt(g->builder, cmp_val, actual_abi_type, ""); + new_val = LLVMBuildSExt(g->builder, new_val, actual_abi_type, ""); + } else { + cmp_val = LLVMBuildZExt(g->builder, cmp_val, actual_abi_type, ""); + new_val = LLVMBuildZExt(g->builder, new_val, actual_abi_type, ""); + } + } + LLVMAtomicOrdering success_order = to_LLVMAtomicOrdering(instruction->success_order); LLVMAtomicOrdering failure_order = to_LLVMAtomicOrdering(instruction->failure_order); @@ -5268,6 +5312,9 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutableGen *executable, I if (!handle_is_ptr(g, optional_type)) { LLVMValueRef payload_val = LLVMBuildExtractValue(g->builder, result_val, 0, ""); + if (actual_abi_type != nullptr) { + payload_val = LLVMBuildTrunc(g->builder, payload_val, get_llvm_type(g, operand_type), ""); + } LLVMValueRef success_bit = LLVMBuildExtractValue(g->builder, result_val, 1, ""); return LLVMBuildSelect(g->builder, success_bit, LLVMConstNull(get_llvm_type(g, child_type)), payload_val, ""); } @@ -5282,6 +5329,9 @@ static LLVMValueRef ir_render_cmpxchg(CodeGen *g, IrExecutableGen *executable, I ir_assert(type_has_bits(g, child_type), &instruction->base); LLVMValueRef payload_val = LLVMBuildExtractValue(g->builder, result_val, 0, ""); + if (actual_abi_type != nullptr) { + payload_val = LLVMBuildTrunc(g->builder, payload_val, get_llvm_type(g, operand_type), ""); + } LLVMValueRef val_ptr = LLVMBuildStructGEP(g->builder, result_loc, maybe_child_index, ""); gen_assign_raw(g, val_ptr, get_pointer_to_type(g, child_type, false), payload_val); @@ -5859,6 +5909,22 @@ static LLVMValueRef ir_render_atomic_rmw(CodeGen *g, IrExecutableGen *executable LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr); LLVMValueRef operand = ir_llvm_value(g, instruction->operand); + LLVMTypeRef actual_abi_type = get_atomic_abi_type(g, instruction->ptr); + if (actual_abi_type != nullptr) { + // operand needs widening and truncating + LLVMValueRef casted_ptr = LLVMBuildBitCast(g->builder, ptr, + LLVMPointerType(actual_abi_type, 0), ""); + LLVMValueRef casted_operand; + if (operand_type->data.integral.is_signed) { + casted_operand = LLVMBuildSExt(g->builder, operand, actual_abi_type, ""); + } else { + casted_operand = LLVMBuildZExt(g->builder, operand, actual_abi_type, ""); + } + LLVMValueRef uncasted_result = ZigLLVMBuildAtomicRMW(g->builder, op, casted_ptr, casted_operand, ordering, + g->is_single_threaded); + return LLVMBuildTrunc(g->builder, uncasted_result, get_llvm_type(g, operand_type), ""); + } + if (get_codegen_ptr_type_bail(g, operand_type) == nullptr) { return ZigLLVMBuildAtomicRMW(g->builder, op, ptr, operand, ordering, g->is_single_threaded); } @@ -5877,6 +5943,17 @@ static LLVMValueRef ir_render_atomic_load(CodeGen *g, IrExecutableGen *executabl { LLVMAtomicOrdering ordering = to_LLVMAtomicOrdering(instruction->ordering); LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr); + + ZigType *operand_type = instruction->ptr->value->type->data.pointer.child_type; + LLVMTypeRef actual_abi_type = get_atomic_abi_type(g, instruction->ptr); + if (actual_abi_type != nullptr) { + // operand needs widening and truncating + ptr = LLVMBuildBitCast(g->builder, ptr, + LLVMPointerType(actual_abi_type, 0), ""); + LLVMValueRef load_inst = gen_load(g, ptr, instruction->ptr->value->type, ""); + LLVMSetOrdering(load_inst, ordering); + return LLVMBuildTrunc(g->builder, load_inst, get_llvm_type(g, operand_type), ""); + } LLVMValueRef load_inst = gen_load(g, ptr, instruction->ptr->value->type, ""); LLVMSetOrdering(load_inst, ordering); return load_inst; @@ -5888,6 +5965,18 @@ static LLVMValueRef ir_render_atomic_store(CodeGen *g, IrExecutableGen *executab LLVMAtomicOrdering ordering = to_LLVMAtomicOrdering(instruction->ordering); LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr); LLVMValueRef value = ir_llvm_value(g, instruction->value); + + LLVMTypeRef actual_abi_type = get_atomic_abi_type(g, instruction->ptr); + if (actual_abi_type != nullptr) { + // operand needs widening + ptr = LLVMBuildBitCast(g->builder, ptr, + LLVMPointerType(actual_abi_type, 0), ""); + if (instruction->value->value->type->data.integral.is_signed) { + value = LLVMBuildSExt(g->builder, value, actual_abi_type, ""); + } else { + value = LLVMBuildZExt(g->builder, value, actual_abi_type, ""); + } + } LLVMValueRef store_inst = gen_store(g, value, ptr, instruction->ptr->value->type); LLVMSetOrdering(store_inst, ordering); return nullptr; diff --git a/src/ir.cpp b/src/ir.cpp index bb2dc75c64..b9875a7efe 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -25208,12 +25208,50 @@ static IrInstGen *ir_analyze_instruction_cmpxchg(IrAnalyze *ira, IrInstSrcCmpxch return ira->codegen->invalid_inst_gen; } - if (instr_is_comptime(casted_ptr) && casted_ptr->value->data.x_ptr.mut != ConstPtrMutRuntimeVar && - instr_is_comptime(casted_cmp_value) && instr_is_comptime(casted_new_value)) { - zig_panic("TODO compile-time execution of cmpxchg"); + ZigType *result_type = get_optional_type(ira->codegen, operand_type); + + // special case zero bit types + switch (type_has_one_possible_value(ira->codegen, operand_type)) { + case OnePossibleValueInvalid: + return ira->codegen->invalid_inst_gen; + case OnePossibleValueYes: { + IrInstGen *result = ir_const(ira, &instruction->base.base, result_type); + set_optional_value_to_null(result->value); + return result; + } + case OnePossibleValueNo: + break; + } + + if (instr_is_comptime(casted_ptr) && casted_ptr->value->data.x_ptr.mut != ConstPtrMutRuntimeVar && + instr_is_comptime(casted_cmp_value) && instr_is_comptime(casted_new_value)) { + ZigValue *ptr_val = ir_resolve_const(ira, casted_ptr, UndefBad); + if (ptr_val == nullptr) + return ira->codegen->invalid_inst_gen; + + ZigValue *stored_val = const_ptr_pointee(ira, ira->codegen, ptr_val, instruction->base.base.source_node); + if (stored_val == nullptr) + return ira->codegen->invalid_inst_gen; + + ZigValue *expected_val = ir_resolve_const(ira, casted_cmp_value, UndefBad); + if (expected_val == nullptr) + return ira->codegen->invalid_inst_gen; + + ZigValue *new_val = ir_resolve_const(ira, casted_new_value, UndefBad); + if (new_val == nullptr) + return ira->codegen->invalid_inst_gen; + + bool eql = const_values_equal(ira->codegen, stored_val, expected_val); + IrInstGen *result = ir_const(ira, &instruction->base.base, result_type); + if (eql) { + copy_const_val(ira->codegen, stored_val, new_val); + set_optional_value_to_null(result->value); + } else { + set_optional_payload(result->value, stored_val); + } + return result; } - ZigType *result_type = get_optional_type(ira->codegen, operand_type); IrInstGen *result_loc; if (handle_is_ptr(ira->codegen, result_type)) { result_loc = ir_resolve_result(ira, &instruction->base.base, instruction->result_loc, @@ -28324,43 +28362,20 @@ static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op) { if (type_is_invalid(operand_type)) return ira->codegen->builtin_types.entry_invalid; - if (operand_type->id == ZigTypeIdInt) { - if (operand_type->data.integral.bit_count < 8) { - ir_add_error(ira, &op->base, - buf_sprintf("expected integer type 8 bits or larger, found %" PRIu32 "-bit integer type", - operand_type->data.integral.bit_count)); - return ira->codegen->builtin_types.entry_invalid; + if (operand_type->id == ZigTypeIdInt || operand_type->id == ZigTypeIdEnum) { + ZigType *int_type; + if (operand_type->id == ZigTypeIdEnum) { + int_type = operand_type->data.enumeration.tag_int_type; + } else { + int_type = operand_type; } + auto bit_count = int_type->data.integral.bit_count; uint32_t max_atomic_bits = target_arch_largest_atomic_bits(ira->codegen->zig_target->arch); - if (operand_type->data.integral.bit_count > max_atomic_bits) { + + if (bit_count > max_atomic_bits) { ir_add_error(ira, &op->base, buf_sprintf("expected %" PRIu32 "-bit integer type or smaller, found %" PRIu32 "-bit integer type", - max_atomic_bits, operand_type->data.integral.bit_count)); - return ira->codegen->builtin_types.entry_invalid; - } - if (!is_power_of_2(operand_type->data.integral.bit_count)) { - ir_add_error(ira, &op->base, - buf_sprintf("%" PRIu32 "-bit integer type is not a power of 2", operand_type->data.integral.bit_count)); - return ira->codegen->builtin_types.entry_invalid; - } - } else if (operand_type->id == ZigTypeIdEnum) { - ZigType *int_type = operand_type->data.enumeration.tag_int_type; - if (int_type->data.integral.bit_count < 8) { - ir_add_error(ira, &op->base, - buf_sprintf("expected enum tag type 8 bits or larger, found %" PRIu32 "-bit tag type", - int_type->data.integral.bit_count)); - return ira->codegen->builtin_types.entry_invalid; - } - uint32_t max_atomic_bits = target_arch_largest_atomic_bits(ira->codegen->zig_target->arch); - if (int_type->data.integral.bit_count > max_atomic_bits) { - ir_add_error(ira, &op->base, - buf_sprintf("expected %" PRIu32 "-bit enum tag type or smaller, found %" PRIu32 "-bit tag type", - max_atomic_bits, int_type->data.integral.bit_count)); - return ira->codegen->builtin_types.entry_invalid; - } - if (!is_power_of_2(int_type->data.integral.bit_count)) { - ir_add_error(ira, &op->base, - buf_sprintf("%" PRIu32 "-bit enum tag type is not a power of 2", int_type->data.integral.bit_count)); + max_atomic_bits, bit_count)); return ira->codegen->builtin_types.entry_invalid; } } else if (operand_type->id == ZigTypeIdFloat) { @@ -28371,6 +28386,8 @@ static ZigType *ir_resolve_atomic_operand_type(IrAnalyze *ira, IrInstGen *op) { max_atomic_bits, (uint32_t) operand_type->data.floating.bit_count)); return ira->codegen->builtin_types.entry_invalid; } + } else if (operand_type->id == ZigTypeIdBool) { + // will be treated as u8 } else { Error err; ZigType *operand_ptr_type; @@ -28409,11 +28426,15 @@ static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAto if (operand_type->id == ZigTypeIdEnum && op != AtomicRmwOp_xchg) { ir_add_error(ira, &instruction->op->base, - buf_sprintf("@atomicRmw on enum only works with .Xchg")); + buf_sprintf("@atomicRmw with enum only allowed with .Xchg")); + return ira->codegen->invalid_inst_gen; + } else if (operand_type->id == ZigTypeIdBool && op != AtomicRmwOp_xchg) { + ir_add_error(ira, &instruction->op->base, + buf_sprintf("@atomicRmw with bool only allowed with .Xchg")); return ira->codegen->invalid_inst_gen; } else if (operand_type->id == ZigTypeIdFloat && op > AtomicRmwOp_sub) { ir_add_error(ira, &instruction->op->base, - buf_sprintf("@atomicRmw with float only works with .Xchg, .Add and .Sub")); + buf_sprintf("@atomicRmw with float only allowed with .Xchg, .Add and .Sub")); return ira->codegen->invalid_inst_gen; } @@ -28434,14 +28455,103 @@ static IrInstGen *ir_analyze_instruction_atomic_rmw(IrAnalyze *ira, IrInstSrcAto return ira->codegen->invalid_inst_gen; } - if (instr_is_comptime(casted_operand) && instr_is_comptime(casted_ptr) && casted_ptr->value->data.x_ptr.mut == ConstPtrMutComptimeVar) - { - ir_add_error(ira, &instruction->base.base, - buf_sprintf("compiler bug: TODO compile-time execution of @atomicRmw")); - return ira->codegen->invalid_inst_gen; + // special case zero bit types + switch (type_has_one_possible_value(ira->codegen, operand_type)) { + case OnePossibleValueInvalid: + return ira->codegen->invalid_inst_gen; + case OnePossibleValueYes: + return ir_const_move(ira, &instruction->base.base, get_the_one_possible_value(ira->codegen, operand_type)); + case OnePossibleValueNo: + break; } - return ir_build_atomic_rmw_gen(ira, &instruction->base.base, casted_ptr, casted_operand, op, + IrInst *source_inst = &instruction->base.base; + if (instr_is_comptime(casted_operand) && instr_is_comptime(casted_ptr) && casted_ptr->value->data.x_ptr.mut == ConstPtrMutComptimeVar) { + ZigValue *ptr_val = ir_resolve_const(ira, casted_ptr, UndefBad); + if (ptr_val == nullptr) + return ira->codegen->invalid_inst_gen; + + ZigValue *op1_val = const_ptr_pointee(ira, ira->codegen, ptr_val, instruction->base.base.source_node); + if (op1_val == nullptr) + return ira->codegen->invalid_inst_gen; + + ZigValue *op2_val = ir_resolve_const(ira, casted_operand, UndefBad); + if (op2_val == nullptr) + return ira->codegen->invalid_inst_gen; + + IrInstGen *result = ir_const(ira, source_inst, operand_type); + copy_const_val(ira->codegen, result->value, op1_val); + if (op == AtomicRmwOp_xchg) { + copy_const_val(ira->codegen, op1_val, op2_val); + return result; + } + + if (operand_type->id == ZigTypeIdPointer || operand_type->id == ZigTypeIdOptional) { + ir_add_error(ira, &instruction->ordering->base, + buf_sprintf("TODO comptime @atomicRmw with pointers other than .Xchg")); + return ira->codegen->invalid_inst_gen; + } + + ErrorMsg *msg; + if (op == AtomicRmwOp_min || op == AtomicRmwOp_max) { + IrBinOp bin_op; + if (op == AtomicRmwOp_min) + // store op2 if op2 < op1 + bin_op = IrBinOpCmpGreaterThan; + else + // store op2 if op2 > op1 + bin_op = IrBinOpCmpLessThan; + + IrInstGen *dummy_value = ir_const(ira, source_inst, operand_type); + msg = ir_eval_bin_op_cmp_scalar(ira, source_inst, op1_val, bin_op, op2_val, dummy_value->value); + if (msg != nullptr) { + return ira->codegen->invalid_inst_gen; + } + if (dummy_value->value->data.x_bool) + copy_const_val(ira->codegen, op1_val, op2_val); + } else { + IrBinOp bin_op; + switch (op) { + case AtomicRmwOp_xchg: + case AtomicRmwOp_max: + case AtomicRmwOp_min: + zig_unreachable(); + case AtomicRmwOp_add: + if (operand_type->id == ZigTypeIdFloat) + bin_op = IrBinOpAdd; + else + bin_op = IrBinOpAddWrap; + break; + case AtomicRmwOp_sub: + if (operand_type->id == ZigTypeIdFloat) + bin_op = IrBinOpSub; + else + bin_op = IrBinOpSubWrap; + break; + case AtomicRmwOp_and: + case AtomicRmwOp_nand: + bin_op = IrBinOpBinAnd; + break; + case AtomicRmwOp_or: + bin_op = IrBinOpBinOr; + break; + case AtomicRmwOp_xor: + bin_op = IrBinOpBinXor; + break; + } + msg = ir_eval_math_op_scalar(ira, source_inst, operand_type, op1_val, bin_op, op2_val, op1_val); + if (msg != nullptr) { + return ira->codegen->invalid_inst_gen; + } + if (op == AtomicRmwOp_nand) { + bigint_not(&op1_val->data.x_bigint, &op1_val->data.x_bigint, + operand_type->data.integral.bit_count, operand_type->data.integral.is_signed); + } + } + return result; + } + + return ir_build_atomic_rmw_gen(ira, source_inst, casted_ptr, casted_operand, op, ordering, operand_type); } @@ -28513,6 +28623,16 @@ static IrInstGen *ir_analyze_instruction_atomic_store(IrAnalyze *ira, IrInstSrcA return ira->codegen->invalid_inst_gen; } + // special case zero bit types + switch (type_has_one_possible_value(ira->codegen, operand_type)) { + case OnePossibleValueInvalid: + return ira->codegen->invalid_inst_gen; + case OnePossibleValueYes: + return ir_const_void(ira, &instruction->base.base); + case OnePossibleValueNo: + break; + } + if (instr_is_comptime(casted_value) && instr_is_comptime(casted_ptr)) { IrInstGen *result = ir_analyze_store_ptr(ira, &instruction->base.base, casted_ptr, value, false); result->value->type = ira->codegen->builtin_types.entry_void; diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 5078453332..f894a152a7 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -49,6 +49,15 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { "tmp.zig:5:9: error: resume in noasync scope", }); + cases.add("atomicrmw with bool op not .Xchg", + \\export fn entry() void { + \\ var x = false; + \\ _ = @atomicRmw(bool, &x, .Add, true, .SeqCst); + \\} + , &[_][]const u8{ + "tmp.zig:3:30: error: @atomicRmw with bool only allowed with .Xchg", + }); + cases.addTest("@TypeOf with no arguments", \\export fn entry() void { \\ _ = @TypeOf(); @@ -357,7 +366,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ _ = @atomicRmw(f32, &x, .And, 2, .SeqCst); \\} , &[_][]const u8{ - "tmp.zig:3:29: error: @atomicRmw with float only works with .Xchg, .Add and .Sub", + "tmp.zig:3:29: error: @atomicRmw with float only allowed with .Xchg, .Add and .Sub", }); cases.add("intToPtr with misaligned address", @@ -574,7 +583,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void { \\ _ = @atomicRmw(E, &x, .Add, .b, .SeqCst); \\} , &[_][]const u8{ - "tmp.zig:9:27: error: @atomicRmw on enum only works with .Xchg", + "tmp.zig:9:27: error: @atomicRmw with enum only allowed with .Xchg", }); cases.add("disallow coercion from non-null-terminated pointer to null-terminated pointer", diff --git a/test/stage1/behavior/atomics.zig b/test/stage1/behavior/atomics.zig index 0347f6f94a..3e6d0b3d0f 100644 --- a/test/stage1/behavior/atomics.zig +++ b/test/stage1/behavior/atomics.zig @@ -2,29 +2,32 @@ const std = @import("std"); const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; const builtin = @import("builtin"); -const AtomicRmwOp = builtin.AtomicRmwOp; -const AtomicOrder = builtin.AtomicOrder; test "cmpxchg" { + testCmpxchg(); + comptime testCmpxchg(); +} + +fn testCmpxchg() void { var x: i32 = 1234; - if (@cmpxchgWeak(i32, &x, 99, 5678, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| { + if (@cmpxchgWeak(i32, &x, 99, 5678, .SeqCst, .SeqCst)) |x1| { expect(x1 == 1234); } else { @panic("cmpxchg should have failed"); } - while (@cmpxchgWeak(i32, &x, 1234, 5678, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| { + while (@cmpxchgWeak(i32, &x, 1234, 5678, .SeqCst, .SeqCst)) |x1| { expect(x1 == 1234); } expect(x == 5678); - expect(@cmpxchgStrong(i32, &x, 5678, 42, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null); + expect(@cmpxchgStrong(i32, &x, 5678, 42, .SeqCst, .SeqCst) == null); expect(x == 42); } test "fence" { var x: i32 = 1234; - @fence(AtomicOrder.SeqCst); + @fence(.SeqCst); x = 5678; } @@ -36,18 +39,18 @@ test "atomicrmw and atomicload" { } fn testAtomicRmw(ptr: *u8) void { - const prev_value = @atomicRmw(u8, ptr, AtomicRmwOp.Xchg, 42, AtomicOrder.SeqCst); + const prev_value = @atomicRmw(u8, ptr, .Xchg, 42, .SeqCst); expect(prev_value == 200); comptime { var x: i32 = 1234; const y: i32 = 12345; - expect(@atomicLoad(i32, &x, AtomicOrder.SeqCst) == 1234); - expect(@atomicLoad(i32, &y, AtomicOrder.SeqCst) == 12345); + expect(@atomicLoad(i32, &x, .SeqCst) == 1234); + expect(@atomicLoad(i32, &y, .SeqCst) == 12345); } } fn testAtomicLoad(ptr: *u8) void { - const x = @atomicLoad(u8, ptr, AtomicOrder.SeqCst); + const x = @atomicLoad(u8, ptr, .SeqCst); expect(x == 42); } @@ -56,18 +59,18 @@ test "cmpxchg with ptr" { var data2: i32 = 5678; var data3: i32 = 9101; var x: *i32 = &data1; - if (@cmpxchgWeak(*i32, &x, &data2, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| { + if (@cmpxchgWeak(*i32, &x, &data2, &data3, .SeqCst, .SeqCst)) |x1| { expect(x1 == &data1); } else { @panic("cmpxchg should have failed"); } - while (@cmpxchgWeak(*i32, &x, &data1, &data3, AtomicOrder.SeqCst, AtomicOrder.SeqCst)) |x1| { + while (@cmpxchgWeak(*i32, &x, &data1, &data3, .SeqCst, .SeqCst)) |x1| { expect(x1 == &data1); } expect(x == &data3); - expect(@cmpxchgStrong(*i32, &x, &data3, &data2, AtomicOrder.SeqCst, AtomicOrder.SeqCst) == null); + expect(@cmpxchgStrong(*i32, &x, &data3, &data2, .SeqCst, .SeqCst) == null); expect(x == &data2); } @@ -146,9 +149,11 @@ fn testAtomicStore() void { } test "atomicrmw with floats" { + // TODO https://github.com/ziglang/zig/issues/4457 if (builtin.arch == .aarch64 or builtin.arch == .arm or builtin.arch == .riscv64) return error.SkipZigTest; testAtomicRmwFloat(); + comptime testAtomicRmwFloat(); } fn testAtomicRmwFloat() void { @@ -161,3 +166,55 @@ fn testAtomicRmwFloat() void { _ = @atomicRmw(f32, &x, .Sub, 2, .SeqCst); expect(x == 4); } + +test "atomicrmw with ints" { + testAtomicRmwInt(); + comptime testAtomicRmwInt(); +} + +fn testAtomicRmwInt() void { + var x: u8 = 1; + var res = @atomicRmw(u8, &x, .Xchg, 3, .SeqCst); + expect(x == 3 and res == 1); + _ = @atomicRmw(u8, &x, .Add, 3, .SeqCst); + expect(x == 6); + _ = @atomicRmw(u8, &x, .Sub, 1, .SeqCst); + expect(x == 5); + _ = @atomicRmw(u8, &x, .And, 4, .SeqCst); + expect(x == 4); + _ = @atomicRmw(u8, &x, .Nand, 4, .SeqCst); + expect(x == 0xfb); + _ = @atomicRmw(u8, &x, .Or, 6, .SeqCst); + expect(x == 0xff); + _ = @atomicRmw(u8, &x, .Xor, 2, .SeqCst); + expect(x == 0xfd); + + // TODO https://github.com/ziglang/zig/issues/4724 + if (builtin.arch == .mipsel) return; + _ = @atomicRmw(u8, &x, .Max, 1, .SeqCst); + expect(x == 0xfd); + _ = @atomicRmw(u8, &x, .Min, 1, .SeqCst); + expect(x == 1); +} + + +test "atomics with different types" { + testAtomicsWithType(bool, true, false); + inline for (.{ u1, i5, u15 }) |T| { + var x: T = 0; + testAtomicsWithType(T, 0, 1); + } + testAtomicsWithType(u0, 0, 0); + testAtomicsWithType(i0, 0, 0); +} + +fn testAtomicsWithType(comptime T: type, a: T, b: T) void { + var x: T = b; + @atomicStore(T, &x, a, .SeqCst); + expect(x == a); + expect(@atomicLoad(T, &x, .SeqCst) == a); + expect(@atomicRmw(T, &x, .Xchg, b, .SeqCst) == a); + expect(@cmpxchgStrong(T, &x, b, a, .SeqCst, .SeqCst) == null); + if (@sizeOf(T) != 0) + expect(@cmpxchgStrong(T, &x, b, a, .SeqCst, .SeqCst).? == a); +}