diff --git a/doc/langref.html.in b/doc/langref.html.in index bca12c62d7..5d989c1bbd 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -6612,14 +6612,14 @@ async fn func(y: *i32) void { This builtin function atomically dereferences a pointer and returns the value.
- {#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}, - or an integer whose bit count meets these requirements: + {#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#} + an integer whose bit count meets these requirements:
TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe we can remove this restriction @@ -6660,6 +6660,25 @@ async fn func(y: *i32) void {
{#syntax#}@atomicStore(comptime T: type, ptr: *T, value: T, comptime ordering: builtin.AtomicOrder) void{#endsyntax#}
+ + This builtin function atomically stores a value. +
++ {#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#} + an integer whose bit count meets these requirements: +
++ TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe + we can remove this restriction +
+ {#header_close#} {#header_open|@bitCast#}{#syntax#}@bitCast(comptime DestType: type, value: var) DestType{#endsyntax#}
diff --git a/lib/std/atomic/queue.zig b/lib/std/atomic/queue.zig
index 173355eb3b..9d6b15ff4a 100644
--- a/lib/std/atomic/queue.zig
+++ b/lib/std/atomic/queue.zig
@@ -199,7 +199,7 @@ test "std.atomic.Queue" {
for (putters) |t|
t.wait();
- _ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ @atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst);
for (getters) |t|
t.wait();
diff --git a/lib/std/atomic/stack.zig b/lib/std/atomic/stack.zig
index 664191eb77..4246e15985 100644
--- a/lib/std/atomic/stack.zig
+++ b/lib/std/atomic/stack.zig
@@ -128,7 +128,7 @@ test "std.atomic.stack" {
for (putters) |t|
t.wait();
- _ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ @atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst);
for (getters) |t|
t.wait();
}
diff --git a/lib/std/event/channel.zig b/lib/std/event/channel.zig
index 2ea99d234d..ac5a65e1b0 100644
--- a/lib/std/event/channel.zig
+++ b/lib/std/event/channel.zig
@@ -161,7 +161,7 @@ pub fn Channel(comptime T: type) type {
fn dispatch(self: *SelfChannel) void {
// set the "need dispatch" flag
- _ = @atomicRmw(u8, &self.need_dispatch, .Xchg, 1, .SeqCst);
+ @atomicStore(u8, &self.need_dispatch, 1, .SeqCst);
lock: while (true) {
// set the lock flag
@@ -169,7 +169,7 @@ pub fn Channel(comptime T: type) type {
if (prev_lock != 0) return;
// clear the need_dispatch flag since we're about to do it
- _ = @atomicRmw(u8, &self.need_dispatch, .Xchg, 0, .SeqCst);
+ @atomicStore(u8, &self.need_dispatch, 0, .SeqCst);
while (true) {
one_dispatch: {
diff --git a/lib/std/event/future.zig b/lib/std/event/future.zig
index 43593b348a..5261db990c 100644
--- a/lib/std/event/future.zig
+++ b/lib/std/event/future.zig
@@ -62,12 +62,12 @@ pub fn Future(comptime T: type) type {
pub async fn start(self: *Self) ?*T {
const state = @cmpxchgStrong(Available, &self.available, .NotStarted, .Started, .SeqCst, .SeqCst) orelse return null;
switch (state) {
- 1 => {
+ .Started => {
const held = self.lock.acquire();
held.release();
return &self.data;
},
- 2 => return &self.data,
+ .Finished => return &self.data,
else => unreachable,
}
}
diff --git a/lib/std/event/lock.zig b/lib/std/event/lock.zig
index 576a09064f..a95c5bf7e2 100644
--- a/lib/std/event/lock.zig
+++ b/lib/std/event/lock.zig
@@ -31,8 +31,8 @@ pub const Lock = struct {
}
// We need to release the lock.
- _ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst);
- _ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst);
+ @atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst);
+ @atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst);
// There might be a queue item. If we know the queue is empty, we can be done,
// because the other actor will try to obtain the lock.
@@ -56,8 +56,8 @@ pub const Lock = struct {
}
// Release the lock again.
- _ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst);
- _ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst);
+ @atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst);
+ @atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst);
// Find out if we can be done.
if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) {
@@ -101,7 +101,7 @@ pub const Lock = struct {
// We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor
// will attempt to grab the lock.
- _ = @atomicRmw(u8, &self.queue_empty_bit, .Xchg, 0, .SeqCst);
+ @atomicStore(u8, &self.queue_empty_bit, 0, .SeqCst);
const old_bit = @atomicRmw(u8, &self.shared_bit, .Xchg, 1, .SeqCst);
if (old_bit == 0) {
diff --git a/lib/std/event/loop.zig b/lib/std/event/loop.zig
index 588cd3c8b5..8f01c19746 100644
--- a/lib/std/event/loop.zig
+++ b/lib/std/event/loop.zig
@@ -814,7 +814,7 @@ pub const Loop = struct {
_ = os.kevent(self.os_data.fs_kqfd, fs_kevs, empty_kevs, null) catch unreachable;
},
.linux => {
- _ = @atomicRmw(i32, &self.os_data.fs_queue_item, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
+ @atomicStore(i32, &self.os_data.fs_queue_item, 1, AtomicOrder.SeqCst);
const rc = os.linux.futex_wake(&self.os_data.fs_queue_item, os.linux.FUTEX_WAKE, 1);
switch (os.linux.getErrno(rc)) {
0 => {},
@@ -837,7 +837,7 @@ pub const Loop = struct {
fn posixFsRun(self: *Loop) void {
while (true) {
if (builtin.os == .linux) {
- _ = @atomicRmw(i32, &self.os_data.fs_queue_item, .Xchg, 0, .SeqCst);
+ @atomicStore(i32, &self.os_data.fs_queue_item, 0, .SeqCst);
}
while (self.os_data.fs_queue.get()) |node| {
switch (node.data.msg) {
diff --git a/lib/std/event/rwlock.zig b/lib/std/event/rwlock.zig
index 3a64b9df8c..ec4ab8f6d0 100644
--- a/lib/std/event/rwlock.zig
+++ b/lib/std/event/rwlock.zig
@@ -40,7 +40,7 @@ pub const RwLock = struct {
return;
}
- _ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, .Xchg, 1, .SeqCst);
+ @atomicStore(u8, &self.lock.reader_queue_empty_bit, 1, .SeqCst);
if (@cmpxchgStrong(State, &self.lock.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) {
// Didn't unlock. Someone else's problem.
return;
@@ -64,15 +64,15 @@ pub const RwLock = struct {
// We need to release the write lock. Check if any readers are waiting to grab the lock.
if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, .SeqCst) == 0) {
// Switch to a read lock.
- _ = @atomicRmw(State, &self.lock.shared_state, .Xchg, .ReadLock, .SeqCst);
+ @atomicStore(State, &self.lock.shared_state, .ReadLock, .SeqCst);
while (self.lock.reader_queue.get()) |node| {
global_event_loop.onNextTick(node);
}
return;
}
- _ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, .Xchg, 1, .SeqCst);
- _ = @atomicRmw(State, &self.lock.shared_state, .Xchg, State.Unlocked, .SeqCst);
+ @atomicStore(u8, &self.lock.writer_queue_empty_bit, 1, .SeqCst);
+ @atomicStore(State, &self.lock.shared_state, .Unlocked, .SeqCst);
self.lock.commonPostUnlock();
}
@@ -113,7 +113,7 @@ pub const RwLock = struct {
// We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1,
// some actor will attempt to grab the lock.
- _ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 0, .SeqCst);
+ @atomicStore(u8, &self.reader_queue_empty_bit, 0, .SeqCst);
// Here we don't care if we are the one to do the locking or if it was already locked for reading.
const have_read_lock = if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst)) |old_state| old_state == .ReadLock else true;
@@ -144,7 +144,7 @@ pub const RwLock = struct {
// We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1,
// some actor will attempt to grab the lock.
- _ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 0, .SeqCst);
+ @atomicStore(u8, &self.writer_queue_empty_bit, 0, .SeqCst);
// Here we must be the one to acquire the write lock. It cannot already be locked.
if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) == null) {
@@ -176,8 +176,8 @@ pub const RwLock = struct {
return;
}
// Release the lock again.
- _ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 1, .SeqCst);
- _ = @atomicRmw(State, &self.shared_state, .Xchg, .Unlocked, .SeqCst);
+ @atomicStore(u8, &self.writer_queue_empty_bit, 1, .SeqCst);
+ @atomicStore(State, &self.shared_state, .Unlocked, .SeqCst);
continue;
}
@@ -195,7 +195,7 @@ pub const RwLock = struct {
return;
}
// Release the lock again.
- _ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 1, .SeqCst);
+ @atomicStore(u8, &self.reader_queue_empty_bit, 1, .SeqCst);
if (@cmpxchgStrong(State, &self.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) {
// Didn't unlock. Someone else's problem.
return;
diff --git a/lib/std/os/linux.zig b/lib/std/os/linux.zig
index 618a21f456..7e2f14021f 100644
--- a/lib/std/os/linux.zig
+++ b/lib/std/os/linux.zig
@@ -531,7 +531,7 @@ extern fn init_vdso_clock_gettime(clk: i32, ts: *timespec) usize {
const ptr = @intToPtr(?*const c_void, vdso.lookup(VDSO_CGT_VER, VDSO_CGT_SYM));
// Note that we may not have a VDSO at all, update the stub address anyway
// so that clock_gettime will fall back on the good old (and slow) syscall
- _ = @cmpxchgStrong(?*const c_void, &vdso_clock_gettime, &init_vdso_clock_gettime, ptr, .Monotonic, .Monotonic);
+ @atomicStore(?*const c_void, &vdso_clock_gettime, ptr, .Monotonic);
// Call into the VDSO if available
if (ptr) |fn_ptr| {
const f = @ptrCast(vdso_clock_gettime_ty, fn_ptr);
diff --git a/lib/std/spinlock.zig b/lib/std/spinlock.zig
index 3bed3d3891..bd811f709c 100644
--- a/lib/std/spinlock.zig
+++ b/lib/std/spinlock.zig
@@ -11,8 +11,7 @@ pub const SpinLock = struct {
spinlock: *SpinLock,
pub fn release(self: Held) void {
- // TODO: @atomicStore() https://github.com/ziglang/zig/issues/2995
- assert(@atomicRmw(u8, &self.spinlock.lock, .Xchg, 0, .Release) == 1);
+ @atomicStore(u8, &self.spinlock.lock, 0, .Release);
}
};
diff --git a/src/all_types.hpp b/src/all_types.hpp
index 1464dfba59..25815ef64a 100644
--- a/src/all_types.hpp
+++ b/src/all_types.hpp
@@ -1700,6 +1700,7 @@ enum BuiltinFnId {
BuiltinFnIdErrorReturnTrace,
BuiltinFnIdAtomicRmw,
BuiltinFnIdAtomicLoad,
+ BuiltinFnIdAtomicStore,
BuiltinFnIdHasDecl,
BuiltinFnIdUnionInit,
BuiltinFnIdFrameAddress,
@@ -2569,6 +2570,7 @@ enum IrInstructionId {
IrInstructionIdErrorUnion,
IrInstructionIdAtomicRmw,
IrInstructionIdAtomicLoad,
+ IrInstructionIdAtomicStore,
IrInstructionIdSaveErrRetAddr,
IrInstructionIdAddImplicitReturnType,
IrInstructionIdErrSetCast,
@@ -3714,6 +3716,16 @@ struct IrInstructionAtomicLoad {
AtomicOrder resolved_ordering;
};
+struct IrInstructionAtomicStore {
+ IrInstruction base;
+
+ IrInstruction *operand_type;
+ IrInstruction *ptr;
+ IrInstruction *value;
+ IrInstruction *ordering;
+ AtomicOrder resolved_ordering;
+};
+
struct IrInstructionSaveErrRetAddr {
IrInstruction base;
};
diff --git a/src/codegen.cpp b/src/codegen.cpp
index 387c6120c2..a0666a3522 100644
--- a/src/codegen.cpp
+++ b/src/codegen.cpp
@@ -5655,6 +5655,17 @@ static LLVMValueRef ir_render_atomic_load(CodeGen *g, IrExecutable *executable,
return load_inst;
}
+static LLVMValueRef ir_render_atomic_store(CodeGen *g, IrExecutable *executable,
+ IrInstructionAtomicStore *instruction)
+{
+ LLVMAtomicOrdering ordering = to_LLVMAtomicOrdering(instruction->resolved_ordering);
+ LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr);
+ LLVMValueRef value = ir_llvm_value(g, instruction->value);
+ LLVMValueRef store_inst = gen_store(g, value, ptr, instruction->ptr->value.type);
+ LLVMSetOrdering(store_inst, ordering);
+ return nullptr;
+}
+
static LLVMValueRef ir_render_float_op(CodeGen *g, IrExecutable *executable, IrInstructionFloatOp *instruction) {
LLVMValueRef op = ir_llvm_value(g, instruction->op1);
assert(instruction->base.value.type->id == ZigTypeIdFloat);
@@ -6258,6 +6269,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
return ir_render_atomic_rmw(g, executable, (IrInstructionAtomicRmw *)instruction);
case IrInstructionIdAtomicLoad:
return ir_render_atomic_load(g, executable, (IrInstructionAtomicLoad *)instruction);
+ case IrInstructionIdAtomicStore:
+ return ir_render_atomic_store(g, executable, (IrInstructionAtomicStore *)instruction);
case IrInstructionIdSaveErrRetAddr:
return ir_render_save_err_ret_addr(g, executable, (IrInstructionSaveErrRetAddr *)instruction);
case IrInstructionIdFloatOp:
@@ -8074,6 +8087,7 @@ static void define_builtin_fns(CodeGen *g) {
create_builtin_fn(g, BuiltinFnIdErrorReturnTrace, "errorReturnTrace", 0);
create_builtin_fn(g, BuiltinFnIdAtomicRmw, "atomicRmw", 5);
create_builtin_fn(g, BuiltinFnIdAtomicLoad, "atomicLoad", 3);
+ create_builtin_fn(g, BuiltinFnIdAtomicStore, "atomicStore", 4);
create_builtin_fn(g, BuiltinFnIdErrSetCast, "errSetCast", 2);
create_builtin_fn(g, BuiltinFnIdToBytes, "sliceToBytes", 1);
create_builtin_fn(g, BuiltinFnIdFromBytes, "bytesToSlice", 2);
diff --git a/src/ir.cpp b/src/ir.cpp
index 676f69dea7..b6cc3cd4cb 100644
--- a/src/ir.cpp
+++ b/src/ir.cpp
@@ -1010,6 +1010,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicLoad *) {
return IrInstructionIdAtomicLoad;
}
+static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicStore *) {
+ return IrInstructionIdAtomicStore;
+}
+
static constexpr IrInstructionId ir_instruction_id(IrInstructionSaveErrRetAddr *) {
return IrInstructionIdSaveErrRetAddr;
}
@@ -3188,6 +3192,25 @@ static IrInstruction *ir_build_atomic_load(IrBuilder *irb, Scope *scope, AstNode
return &instruction->base;
}
+static IrInstruction *ir_build_atomic_store(IrBuilder *irb, Scope *scope, AstNode *source_node,
+ IrInstruction *operand_type, IrInstruction *ptr, IrInstruction *value,
+ IrInstruction *ordering, AtomicOrder resolved_ordering)
+{
+ IrInstructionAtomicStore *instruction = ir_build_instruction