mirror of
https://github.com/ziglang/zig.git
synced 2026-01-20 14:25:16 +00:00
Merge pull request #3675 from Vexu/atomic-store
Add @atomicStore builtin
This commit is contained in:
commit
8bae70454d
@ -6612,14 +6612,14 @@ async fn func(y: *i32) void {
|
||||
This builtin function atomically dereferences a pointer and returns the value.
|
||||
</p>
|
||||
<p>
|
||||
{#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#},
|
||||
or an integer whose bit count meets these requirements:
|
||||
{#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}
|
||||
an integer whose bit count meets these requirements:
|
||||
</p>
|
||||
<ul>
|
||||
<li>At least 8</li>
|
||||
<li>At most the same as usize</li>
|
||||
<li>Power of 2</li>
|
||||
</ul>
|
||||
</ul> or an enum with a valid integer tag type.
|
||||
<p>
|
||||
TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe
|
||||
we can remove this restriction
|
||||
@ -6660,6 +6660,25 @@ async fn func(y: *i32) void {
|
||||
<li>{#syntax#}.Min{#endsyntax#} - stores the operand if it is smaller. Supports integers and floats.</li>
|
||||
</ul>
|
||||
{#header_close#}
|
||||
{#header_open|@atomicStore#}
|
||||
<pre>{#syntax#}@atomicStore(comptime T: type, ptr: *T, value: T, comptime ordering: builtin.AtomicOrder) void{#endsyntax#}</pre>
|
||||
<p>
|
||||
This builtin function atomically stores a value.
|
||||
</p>
|
||||
<p>
|
||||
{#syntax#}T{#endsyntax#} must be a pointer type, a {#syntax#}bool{#endsyntax#}
|
||||
an integer whose bit count meets these requirements:
|
||||
</p>
|
||||
<ul>
|
||||
<li>At least 8</li>
|
||||
<li>At most the same as usize</li>
|
||||
<li>Power of 2</li>
|
||||
</ul> or an enum with a valid integer tag type.
|
||||
<p>
|
||||
TODO right now bool is not accepted. Also I think we could make non powers of 2 work fine, maybe
|
||||
we can remove this restriction
|
||||
</p>
|
||||
{#header_close#}
|
||||
{#header_open|@bitCast#}
|
||||
<pre>{#syntax#}@bitCast(comptime DestType: type, value: var) DestType{#endsyntax#}</pre>
|
||||
<p>
|
||||
|
||||
@ -199,7 +199,7 @@ test "std.atomic.Queue" {
|
||||
|
||||
for (putters) |t|
|
||||
t.wait();
|
||||
_ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
|
||||
@atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst);
|
||||
for (getters) |t|
|
||||
t.wait();
|
||||
|
||||
|
||||
@ -128,7 +128,7 @@ test "std.atomic.stack" {
|
||||
|
||||
for (putters) |t|
|
||||
t.wait();
|
||||
_ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
|
||||
@atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst);
|
||||
for (getters) |t|
|
||||
t.wait();
|
||||
}
|
||||
|
||||
@ -161,7 +161,7 @@ pub fn Channel(comptime T: type) type {
|
||||
|
||||
fn dispatch(self: *SelfChannel) void {
|
||||
// set the "need dispatch" flag
|
||||
_ = @atomicRmw(u8, &self.need_dispatch, .Xchg, 1, .SeqCst);
|
||||
@atomicStore(u8, &self.need_dispatch, 1, .SeqCst);
|
||||
|
||||
lock: while (true) {
|
||||
// set the lock flag
|
||||
@ -169,7 +169,7 @@ pub fn Channel(comptime T: type) type {
|
||||
if (prev_lock != 0) return;
|
||||
|
||||
// clear the need_dispatch flag since we're about to do it
|
||||
_ = @atomicRmw(u8, &self.need_dispatch, .Xchg, 0, .SeqCst);
|
||||
@atomicStore(u8, &self.need_dispatch, 0, .SeqCst);
|
||||
|
||||
while (true) {
|
||||
one_dispatch: {
|
||||
|
||||
@ -62,12 +62,12 @@ pub fn Future(comptime T: type) type {
|
||||
pub async fn start(self: *Self) ?*T {
|
||||
const state = @cmpxchgStrong(Available, &self.available, .NotStarted, .Started, .SeqCst, .SeqCst) orelse return null;
|
||||
switch (state) {
|
||||
1 => {
|
||||
.Started => {
|
||||
const held = self.lock.acquire();
|
||||
held.release();
|
||||
return &self.data;
|
||||
},
|
||||
2 => return &self.data,
|
||||
.Finished => return &self.data,
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
@ -31,8 +31,8 @@ pub const Lock = struct {
|
||||
}
|
||||
|
||||
// We need to release the lock.
|
||||
_ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst);
|
||||
_ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst);
|
||||
@atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst);
|
||||
@atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst);
|
||||
|
||||
// There might be a queue item. If we know the queue is empty, we can be done,
|
||||
// because the other actor will try to obtain the lock.
|
||||
@ -56,8 +56,8 @@ pub const Lock = struct {
|
||||
}
|
||||
|
||||
// Release the lock again.
|
||||
_ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst);
|
||||
_ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst);
|
||||
@atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst);
|
||||
@atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst);
|
||||
|
||||
// Find out if we can be done.
|
||||
if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) {
|
||||
@ -101,7 +101,7 @@ pub const Lock = struct {
|
||||
|
||||
// We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor
|
||||
// will attempt to grab the lock.
|
||||
_ = @atomicRmw(u8, &self.queue_empty_bit, .Xchg, 0, .SeqCst);
|
||||
@atomicStore(u8, &self.queue_empty_bit, 0, .SeqCst);
|
||||
|
||||
const old_bit = @atomicRmw(u8, &self.shared_bit, .Xchg, 1, .SeqCst);
|
||||
if (old_bit == 0) {
|
||||
|
||||
@ -814,7 +814,7 @@ pub const Loop = struct {
|
||||
_ = os.kevent(self.os_data.fs_kqfd, fs_kevs, empty_kevs, null) catch unreachable;
|
||||
},
|
||||
.linux => {
|
||||
_ = @atomicRmw(i32, &self.os_data.fs_queue_item, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
|
||||
@atomicStore(i32, &self.os_data.fs_queue_item, 1, AtomicOrder.SeqCst);
|
||||
const rc = os.linux.futex_wake(&self.os_data.fs_queue_item, os.linux.FUTEX_WAKE, 1);
|
||||
switch (os.linux.getErrno(rc)) {
|
||||
0 => {},
|
||||
@ -837,7 +837,7 @@ pub const Loop = struct {
|
||||
fn posixFsRun(self: *Loop) void {
|
||||
while (true) {
|
||||
if (builtin.os == .linux) {
|
||||
_ = @atomicRmw(i32, &self.os_data.fs_queue_item, .Xchg, 0, .SeqCst);
|
||||
@atomicStore(i32, &self.os_data.fs_queue_item, 0, .SeqCst);
|
||||
}
|
||||
while (self.os_data.fs_queue.get()) |node| {
|
||||
switch (node.data.msg) {
|
||||
|
||||
@ -40,7 +40,7 @@ pub const RwLock = struct {
|
||||
return;
|
||||
}
|
||||
|
||||
_ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, .Xchg, 1, .SeqCst);
|
||||
@atomicStore(u8, &self.lock.reader_queue_empty_bit, 1, .SeqCst);
|
||||
if (@cmpxchgStrong(State, &self.lock.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) {
|
||||
// Didn't unlock. Someone else's problem.
|
||||
return;
|
||||
@ -64,15 +64,15 @@ pub const RwLock = struct {
|
||||
// We need to release the write lock. Check if any readers are waiting to grab the lock.
|
||||
if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, .SeqCst) == 0) {
|
||||
// Switch to a read lock.
|
||||
_ = @atomicRmw(State, &self.lock.shared_state, .Xchg, .ReadLock, .SeqCst);
|
||||
@atomicStore(State, &self.lock.shared_state, .ReadLock, .SeqCst);
|
||||
while (self.lock.reader_queue.get()) |node| {
|
||||
global_event_loop.onNextTick(node);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
_ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, .Xchg, 1, .SeqCst);
|
||||
_ = @atomicRmw(State, &self.lock.shared_state, .Xchg, State.Unlocked, .SeqCst);
|
||||
@atomicStore(u8, &self.lock.writer_queue_empty_bit, 1, .SeqCst);
|
||||
@atomicStore(State, &self.lock.shared_state, .Unlocked, .SeqCst);
|
||||
|
||||
self.lock.commonPostUnlock();
|
||||
}
|
||||
@ -113,7 +113,7 @@ pub const RwLock = struct {
|
||||
|
||||
// We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1,
|
||||
// some actor will attempt to grab the lock.
|
||||
_ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 0, .SeqCst);
|
||||
@atomicStore(u8, &self.reader_queue_empty_bit, 0, .SeqCst);
|
||||
|
||||
// Here we don't care if we are the one to do the locking or if it was already locked for reading.
|
||||
const have_read_lock = if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst)) |old_state| old_state == .ReadLock else true;
|
||||
@ -144,7 +144,7 @@ pub const RwLock = struct {
|
||||
|
||||
// We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1,
|
||||
// some actor will attempt to grab the lock.
|
||||
_ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 0, .SeqCst);
|
||||
@atomicStore(u8, &self.writer_queue_empty_bit, 0, .SeqCst);
|
||||
|
||||
// Here we must be the one to acquire the write lock. It cannot already be locked.
|
||||
if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) == null) {
|
||||
@ -176,8 +176,8 @@ pub const RwLock = struct {
|
||||
return;
|
||||
}
|
||||
// Release the lock again.
|
||||
_ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 1, .SeqCst);
|
||||
_ = @atomicRmw(State, &self.shared_state, .Xchg, .Unlocked, .SeqCst);
|
||||
@atomicStore(u8, &self.writer_queue_empty_bit, 1, .SeqCst);
|
||||
@atomicStore(State, &self.shared_state, .Unlocked, .SeqCst);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -195,7 +195,7 @@ pub const RwLock = struct {
|
||||
return;
|
||||
}
|
||||
// Release the lock again.
|
||||
_ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 1, .SeqCst);
|
||||
@atomicStore(u8, &self.reader_queue_empty_bit, 1, .SeqCst);
|
||||
if (@cmpxchgStrong(State, &self.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) {
|
||||
// Didn't unlock. Someone else's problem.
|
||||
return;
|
||||
|
||||
@ -531,7 +531,7 @@ extern fn init_vdso_clock_gettime(clk: i32, ts: *timespec) usize {
|
||||
const ptr = @intToPtr(?*const c_void, vdso.lookup(VDSO_CGT_VER, VDSO_CGT_SYM));
|
||||
// Note that we may not have a VDSO at all, update the stub address anyway
|
||||
// so that clock_gettime will fall back on the good old (and slow) syscall
|
||||
_ = @cmpxchgStrong(?*const c_void, &vdso_clock_gettime, &init_vdso_clock_gettime, ptr, .Monotonic, .Monotonic);
|
||||
@atomicStore(?*const c_void, &vdso_clock_gettime, ptr, .Monotonic);
|
||||
// Call into the VDSO if available
|
||||
if (ptr) |fn_ptr| {
|
||||
const f = @ptrCast(vdso_clock_gettime_ty, fn_ptr);
|
||||
|
||||
@ -11,8 +11,7 @@ pub const SpinLock = struct {
|
||||
spinlock: *SpinLock,
|
||||
|
||||
pub fn release(self: Held) void {
|
||||
// TODO: @atomicStore() https://github.com/ziglang/zig/issues/2995
|
||||
assert(@atomicRmw(u8, &self.spinlock.lock, .Xchg, 0, .Release) == 1);
|
||||
@atomicStore(u8, &self.spinlock.lock, 0, .Release);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -1700,6 +1700,7 @@ enum BuiltinFnId {
|
||||
BuiltinFnIdErrorReturnTrace,
|
||||
BuiltinFnIdAtomicRmw,
|
||||
BuiltinFnIdAtomicLoad,
|
||||
BuiltinFnIdAtomicStore,
|
||||
BuiltinFnIdHasDecl,
|
||||
BuiltinFnIdUnionInit,
|
||||
BuiltinFnIdFrameAddress,
|
||||
@ -2569,6 +2570,7 @@ enum IrInstructionId {
|
||||
IrInstructionIdErrorUnion,
|
||||
IrInstructionIdAtomicRmw,
|
||||
IrInstructionIdAtomicLoad,
|
||||
IrInstructionIdAtomicStore,
|
||||
IrInstructionIdSaveErrRetAddr,
|
||||
IrInstructionIdAddImplicitReturnType,
|
||||
IrInstructionIdErrSetCast,
|
||||
@ -3714,6 +3716,16 @@ struct IrInstructionAtomicLoad {
|
||||
AtomicOrder resolved_ordering;
|
||||
};
|
||||
|
||||
struct IrInstructionAtomicStore {
|
||||
IrInstruction base;
|
||||
|
||||
IrInstruction *operand_type;
|
||||
IrInstruction *ptr;
|
||||
IrInstruction *value;
|
||||
IrInstruction *ordering;
|
||||
AtomicOrder resolved_ordering;
|
||||
};
|
||||
|
||||
struct IrInstructionSaveErrRetAddr {
|
||||
IrInstruction base;
|
||||
};
|
||||
|
||||
@ -5655,6 +5655,17 @@ static LLVMValueRef ir_render_atomic_load(CodeGen *g, IrExecutable *executable,
|
||||
return load_inst;
|
||||
}
|
||||
|
||||
static LLVMValueRef ir_render_atomic_store(CodeGen *g, IrExecutable *executable,
|
||||
IrInstructionAtomicStore *instruction)
|
||||
{
|
||||
LLVMAtomicOrdering ordering = to_LLVMAtomicOrdering(instruction->resolved_ordering);
|
||||
LLVMValueRef ptr = ir_llvm_value(g, instruction->ptr);
|
||||
LLVMValueRef value = ir_llvm_value(g, instruction->value);
|
||||
LLVMValueRef store_inst = gen_store(g, value, ptr, instruction->ptr->value.type);
|
||||
LLVMSetOrdering(store_inst, ordering);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
static LLVMValueRef ir_render_float_op(CodeGen *g, IrExecutable *executable, IrInstructionFloatOp *instruction) {
|
||||
LLVMValueRef op = ir_llvm_value(g, instruction->op1);
|
||||
assert(instruction->base.value.type->id == ZigTypeIdFloat);
|
||||
@ -6258,6 +6269,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable,
|
||||
return ir_render_atomic_rmw(g, executable, (IrInstructionAtomicRmw *)instruction);
|
||||
case IrInstructionIdAtomicLoad:
|
||||
return ir_render_atomic_load(g, executable, (IrInstructionAtomicLoad *)instruction);
|
||||
case IrInstructionIdAtomicStore:
|
||||
return ir_render_atomic_store(g, executable, (IrInstructionAtomicStore *)instruction);
|
||||
case IrInstructionIdSaveErrRetAddr:
|
||||
return ir_render_save_err_ret_addr(g, executable, (IrInstructionSaveErrRetAddr *)instruction);
|
||||
case IrInstructionIdFloatOp:
|
||||
@ -8074,6 +8087,7 @@ static void define_builtin_fns(CodeGen *g) {
|
||||
create_builtin_fn(g, BuiltinFnIdErrorReturnTrace, "errorReturnTrace", 0);
|
||||
create_builtin_fn(g, BuiltinFnIdAtomicRmw, "atomicRmw", 5);
|
||||
create_builtin_fn(g, BuiltinFnIdAtomicLoad, "atomicLoad", 3);
|
||||
create_builtin_fn(g, BuiltinFnIdAtomicStore, "atomicStore", 4);
|
||||
create_builtin_fn(g, BuiltinFnIdErrSetCast, "errSetCast", 2);
|
||||
create_builtin_fn(g, BuiltinFnIdToBytes, "sliceToBytes", 1);
|
||||
create_builtin_fn(g, BuiltinFnIdFromBytes, "bytesToSlice", 2);
|
||||
|
||||
103
src/ir.cpp
103
src/ir.cpp
@ -1010,6 +1010,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicLoad *) {
|
||||
return IrInstructionIdAtomicLoad;
|
||||
}
|
||||
|
||||
static constexpr IrInstructionId ir_instruction_id(IrInstructionAtomicStore *) {
|
||||
return IrInstructionIdAtomicStore;
|
||||
}
|
||||
|
||||
static constexpr IrInstructionId ir_instruction_id(IrInstructionSaveErrRetAddr *) {
|
||||
return IrInstructionIdSaveErrRetAddr;
|
||||
}
|
||||
@ -3188,6 +3192,25 @@ static IrInstruction *ir_build_atomic_load(IrBuilder *irb, Scope *scope, AstNode
|
||||
return &instruction->base;
|
||||
}
|
||||
|
||||
static IrInstruction *ir_build_atomic_store(IrBuilder *irb, Scope *scope, AstNode *source_node,
|
||||
IrInstruction *operand_type, IrInstruction *ptr, IrInstruction *value,
|
||||
IrInstruction *ordering, AtomicOrder resolved_ordering)
|
||||
{
|
||||
IrInstructionAtomicStore *instruction = ir_build_instruction<IrInstructionAtomicStore>(irb, scope, source_node);
|
||||
instruction->operand_type = operand_type;
|
||||
instruction->ptr = ptr;
|
||||
instruction->value = value;
|
||||
instruction->ordering = ordering;
|
||||
instruction->resolved_ordering = resolved_ordering;
|
||||
|
||||
if (operand_type != nullptr) ir_ref_instruction(operand_type, irb->current_basic_block);
|
||||
ir_ref_instruction(ptr, irb->current_basic_block);
|
||||
ir_ref_instruction(value, irb->current_basic_block);
|
||||
if (ordering != nullptr) ir_ref_instruction(ordering, irb->current_basic_block);
|
||||
|
||||
return &instruction->base;
|
||||
}
|
||||
|
||||
static IrInstruction *ir_build_save_err_ret_addr(IrBuilder *irb, Scope *scope, AstNode *source_node) {
|
||||
IrInstructionSaveErrRetAddr *instruction = ir_build_instruction<IrInstructionSaveErrRetAddr>(irb, scope, source_node);
|
||||
return &instruction->base;
|
||||
@ -5732,6 +5755,33 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo
|
||||
AtomicOrderMonotonic);
|
||||
return ir_lval_wrap(irb, scope, inst, lval, result_loc);
|
||||
}
|
||||
case BuiltinFnIdAtomicStore:
|
||||
{
|
||||
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
||||
IrInstruction *arg0_value = ir_gen_node(irb, arg0_node, scope);
|
||||
if (arg0_value == irb->codegen->invalid_instruction)
|
||||
return arg0_value;
|
||||
|
||||
AstNode *arg1_node = node->data.fn_call_expr.params.at(1);
|
||||
IrInstruction *arg1_value = ir_gen_node(irb, arg1_node, scope);
|
||||
if (arg1_value == irb->codegen->invalid_instruction)
|
||||
return arg1_value;
|
||||
|
||||
AstNode *arg2_node = node->data.fn_call_expr.params.at(2);
|
||||
IrInstruction *arg2_value = ir_gen_node(irb, arg2_node, scope);
|
||||
if (arg2_value == irb->codegen->invalid_instruction)
|
||||
return arg2_value;
|
||||
|
||||
AstNode *arg3_node = node->data.fn_call_expr.params.at(3);
|
||||
IrInstruction *arg3_value = ir_gen_node(irb, arg3_node, scope);
|
||||
if (arg3_value == irb->codegen->invalid_instruction)
|
||||
return arg3_value;
|
||||
|
||||
IrInstruction *inst = ir_build_atomic_store(irb, scope, node, arg0_value, arg1_value, arg2_value, arg3_value,
|
||||
// this value does not mean anything since we passed non-null values for other arg
|
||||
AtomicOrderMonotonic);
|
||||
return ir_lval_wrap(irb, scope, inst, lval, result_loc);
|
||||
}
|
||||
case BuiltinFnIdIntToEnum:
|
||||
{
|
||||
AstNode *arg0_node = node->data.fn_call_expr.params.at(0);
|
||||
@ -25848,6 +25898,56 @@ static IrInstruction *ir_analyze_instruction_atomic_load(IrAnalyze *ira, IrInstr
|
||||
return result;
|
||||
}
|
||||
|
||||
static IrInstruction *ir_analyze_instruction_atomic_store(IrAnalyze *ira, IrInstructionAtomicStore *instruction) {
|
||||
ZigType *operand_type = ir_resolve_atomic_operand_type(ira, instruction->operand_type->child);
|
||||
if (type_is_invalid(operand_type))
|
||||
return ira->codegen->invalid_instruction;
|
||||
|
||||
IrInstruction *ptr_inst = instruction->ptr->child;
|
||||
if (type_is_invalid(ptr_inst->value.type))
|
||||
return ira->codegen->invalid_instruction;
|
||||
|
||||
ZigType *ptr_type = get_pointer_to_type(ira->codegen, operand_type, false);
|
||||
IrInstruction *casted_ptr = ir_implicit_cast(ira, ptr_inst, ptr_type);
|
||||
if (type_is_invalid(casted_ptr->value.type))
|
||||
return ira->codegen->invalid_instruction;
|
||||
|
||||
IrInstruction *value = instruction->value->child;
|
||||
if (type_is_invalid(value->value.type))
|
||||
return ira->codegen->invalid_instruction;
|
||||
|
||||
IrInstruction *casted_value = ir_implicit_cast(ira, value, operand_type);
|
||||
if (type_is_invalid(casted_value->value.type))
|
||||
return ira->codegen->invalid_instruction;
|
||||
|
||||
|
||||
AtomicOrder ordering;
|
||||
if (instruction->ordering == nullptr) {
|
||||
ordering = instruction->resolved_ordering;
|
||||
} else {
|
||||
if (!ir_resolve_atomic_order(ira, instruction->ordering->child, &ordering))
|
||||
return ira->codegen->invalid_instruction;
|
||||
}
|
||||
|
||||
if (ordering == AtomicOrderAcquire || ordering == AtomicOrderAcqRel) {
|
||||
ir_assert(instruction->ordering != nullptr, &instruction->base);
|
||||
ir_add_error(ira, instruction->ordering,
|
||||
buf_sprintf("@atomicStore atomic ordering must not be Acquire or AcqRel"));
|
||||
return ira->codegen->invalid_instruction;
|
||||
}
|
||||
|
||||
if (instr_is_comptime(casted_value) && instr_is_comptime(casted_ptr)) {
|
||||
IrInstruction *result = ir_analyze_store_ptr(ira, &instruction->base, casted_ptr, value, false);
|
||||
result->value.type = ira->codegen->builtin_types.entry_void;
|
||||
return result;
|
||||
}
|
||||
|
||||
IrInstruction *result = ir_build_atomic_store(&ira->new_irb, instruction->base.scope,
|
||||
instruction->base.source_node, nullptr, casted_ptr, casted_value, nullptr, ordering);
|
||||
result->value.type = ira->codegen->builtin_types.entry_void;
|
||||
return result;
|
||||
}
|
||||
|
||||
static IrInstruction *ir_analyze_instruction_save_err_ret_addr(IrAnalyze *ira, IrInstructionSaveErrRetAddr *instruction) {
|
||||
IrInstruction *result = ir_build_save_err_ret_addr(&ira->new_irb, instruction->base.scope,
|
||||
instruction->base.source_node);
|
||||
@ -26882,6 +26982,8 @@ static IrInstruction *ir_analyze_instruction_base(IrAnalyze *ira, IrInstruction
|
||||
return ir_analyze_instruction_atomic_rmw(ira, (IrInstructionAtomicRmw *)instruction);
|
||||
case IrInstructionIdAtomicLoad:
|
||||
return ir_analyze_instruction_atomic_load(ira, (IrInstructionAtomicLoad *)instruction);
|
||||
case IrInstructionIdAtomicStore:
|
||||
return ir_analyze_instruction_atomic_store(ira, (IrInstructionAtomicStore *)instruction);
|
||||
case IrInstructionIdSaveErrRetAddr:
|
||||
return ir_analyze_instruction_save_err_ret_addr(ira, (IrInstructionSaveErrRetAddr *)instruction);
|
||||
case IrInstructionIdAddImplicitReturnType:
|
||||
@ -27062,6 +27164,7 @@ bool ir_has_side_effects(IrInstruction *instruction) {
|
||||
case IrInstructionIdSaveErrRetAddr:
|
||||
case IrInstructionIdAddImplicitReturnType:
|
||||
case IrInstructionIdAtomicRmw:
|
||||
case IrInstructionIdAtomicStore:
|
||||
case IrInstructionIdCmpxchgGen:
|
||||
case IrInstructionIdCmpxchgSrc:
|
||||
case IrInstructionIdAssertZero:
|
||||
|
||||
@ -324,6 +324,8 @@ const char* ir_instruction_type_str(IrInstructionId id) {
|
||||
return "AtomicRmw";
|
||||
case IrInstructionIdAtomicLoad:
|
||||
return "AtomicLoad";
|
||||
case IrInstructionIdAtomicStore:
|
||||
return "AtomicStore";
|
||||
case IrInstructionIdSaveErrRetAddr:
|
||||
return "SaveErrRetAddr";
|
||||
case IrInstructionIdAddImplicitReturnType:
|
||||
@ -1871,6 +1873,27 @@ static void ir_print_atomic_load(IrPrint *irp, IrInstructionAtomicLoad *instruct
|
||||
fprintf(irp->f, ")");
|
||||
}
|
||||
|
||||
static void ir_print_atomic_store(IrPrint *irp, IrInstructionAtomicStore *instruction) {
|
||||
fprintf(irp->f, "@atomicStore(");
|
||||
if (instruction->operand_type != nullptr) {
|
||||
ir_print_other_instruction(irp, instruction->operand_type);
|
||||
} else {
|
||||
fprintf(irp->f, "[TODO print]");
|
||||
}
|
||||
fprintf(irp->f, ",");
|
||||
ir_print_other_instruction(irp, instruction->ptr);
|
||||
fprintf(irp->f, ",");
|
||||
ir_print_other_instruction(irp, instruction->value);
|
||||
fprintf(irp->f, ",");
|
||||
if (instruction->ordering != nullptr) {
|
||||
ir_print_other_instruction(irp, instruction->ordering);
|
||||
} else {
|
||||
fprintf(irp->f, "[TODO print]");
|
||||
}
|
||||
fprintf(irp->f, ")");
|
||||
}
|
||||
|
||||
|
||||
static void ir_print_save_err_ret_addr(IrPrint *irp, IrInstructionSaveErrRetAddr *instruction) {
|
||||
fprintf(irp->f, "@saveErrRetAddr()");
|
||||
}
|
||||
@ -2431,6 +2454,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction, bool
|
||||
case IrInstructionIdAtomicLoad:
|
||||
ir_print_atomic_load(irp, (IrInstructionAtomicLoad *)instruction);
|
||||
break;
|
||||
case IrInstructionIdAtomicStore:
|
||||
ir_print_atomic_store(irp, (IrInstructionAtomicStore *)instruction);
|
||||
break;
|
||||
case IrInstructionIdEnumToInt:
|
||||
ir_print_enum_to_int(irp, (IrInstructionEnumToInt *)instruction);
|
||||
break;
|
||||
|
||||
@ -2,6 +2,16 @@ const tests = @import("tests.zig");
|
||||
const builtin = @import("builtin");
|
||||
|
||||
pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
cases.add(
|
||||
"atomic orderings of atomicStore Acquire or AcqRel",
|
||||
\\export fn entry() void {
|
||||
\\ var x: u32 = 0;
|
||||
\\ @atomicStore(u32, &x, 1, .Acquire);
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:3:30: error: @atomicStore atomic ordering must not be Acquire or AcqRel",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
"missing const in slice with nested array type",
|
||||
\\const Geo3DTex2D = struct { vertices: [][2]f32 };
|
||||
|
||||
@ -123,3 +123,24 @@ test "atomic load and rmw with enum" {
|
||||
expect(@atomicLoad(Value, &x, .SeqCst) != .a);
|
||||
expect(@atomicLoad(Value, &x, .SeqCst) != .b);
|
||||
}
|
||||
|
||||
test "atomic store" {
|
||||
var x: u32 = 0;
|
||||
@atomicStore(u32, &x, 1, .SeqCst);
|
||||
expect(@atomicLoad(u32, &x, .SeqCst) == 1);
|
||||
@atomicStore(u32, &x, 12345678, .SeqCst);
|
||||
expect(@atomicLoad(u32, &x, .SeqCst) == 12345678);
|
||||
}
|
||||
|
||||
test "atomic store comptime" {
|
||||
comptime testAtomicStore();
|
||||
testAtomicStore();
|
||||
}
|
||||
|
||||
fn testAtomicStore() void {
|
||||
var x: u32 = 0;
|
||||
@atomicStore(u32, &x, 1, .SeqCst);
|
||||
expect(@atomicLoad(u32, &x, .SeqCst) == 1);
|
||||
@atomicStore(u32, &x, 12345678, .SeqCst);
|
||||
expect(@atomicLoad(u32, &x, .SeqCst) == 12345678);
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user