mirror of
https://github.com/ziglang/zig.git
synced 2026-02-15 05:48:31 +00:00
use @atomicStore in std lib
This commit is contained in:
parent
110ef2e528
commit
f0c94d95dd
@ -199,7 +199,7 @@ test "std.atomic.Queue" {
|
||||
|
||||
for (putters) |t|
|
||||
t.wait();
|
||||
_ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
|
||||
@atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst);
|
||||
for (getters) |t|
|
||||
t.wait();
|
||||
|
||||
|
||||
@ -128,7 +128,7 @@ test "std.atomic.stack" {
|
||||
|
||||
for (putters) |t|
|
||||
t.wait();
|
||||
_ = @atomicRmw(u8, &context.puts_done, builtin.AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
|
||||
@atomicStore(u8, &context.puts_done, 1, AtomicOrder.SeqCst);
|
||||
for (getters) |t|
|
||||
t.wait();
|
||||
}
|
||||
|
||||
@ -161,7 +161,7 @@ pub fn Channel(comptime T: type) type {
|
||||
|
||||
fn dispatch(self: *SelfChannel) void {
|
||||
// set the "need dispatch" flag
|
||||
_ = @atomicRmw(u8, &self.need_dispatch, .Xchg, 1, .SeqCst);
|
||||
@atomicStore(u8, &self.need_dispatch, 1, .SeqCst);
|
||||
|
||||
lock: while (true) {
|
||||
// set the lock flag
|
||||
@ -169,7 +169,7 @@ pub fn Channel(comptime T: type) type {
|
||||
if (prev_lock != 0) return;
|
||||
|
||||
// clear the need_dispatch flag since we're about to do it
|
||||
_ = @atomicRmw(u8, &self.need_dispatch, .Xchg, 0, .SeqCst);
|
||||
@atomicStore(u8, &self.need_dispatch, 0, .SeqCst);
|
||||
|
||||
while (true) {
|
||||
one_dispatch: {
|
||||
|
||||
@ -62,12 +62,12 @@ pub fn Future(comptime T: type) type {
|
||||
pub async fn start(self: *Self) ?*T {
|
||||
const state = @cmpxchgStrong(Available, &self.available, .NotStarted, .Started, .SeqCst, .SeqCst) orelse return null;
|
||||
switch (state) {
|
||||
1 => {
|
||||
.Started => {
|
||||
const held = self.lock.acquire();
|
||||
held.release();
|
||||
return &self.data;
|
||||
},
|
||||
2 => return &self.data,
|
||||
.Finished => return &self.data,
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
@ -31,8 +31,8 @@ pub const Lock = struct {
|
||||
}
|
||||
|
||||
// We need to release the lock.
|
||||
_ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst);
|
||||
_ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst);
|
||||
@atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst);
|
||||
@atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst);
|
||||
|
||||
// There might be a queue item. If we know the queue is empty, we can be done,
|
||||
// because the other actor will try to obtain the lock.
|
||||
@ -56,8 +56,8 @@ pub const Lock = struct {
|
||||
}
|
||||
|
||||
// Release the lock again.
|
||||
_ = @atomicRmw(u8, &self.lock.queue_empty_bit, .Xchg, 1, .SeqCst);
|
||||
_ = @atomicRmw(u8, &self.lock.shared_bit, .Xchg, 0, .SeqCst);
|
||||
@atomicStore(u8, &self.lock.queue_empty_bit, 1, .SeqCst);
|
||||
@atomicStore(u8, &self.lock.shared_bit, 0, .SeqCst);
|
||||
|
||||
// Find out if we can be done.
|
||||
if (@atomicLoad(u8, &self.lock.queue_empty_bit, .SeqCst) == 1) {
|
||||
@ -101,7 +101,7 @@ pub const Lock = struct {
|
||||
|
||||
// We set this bit so that later we can rely on the fact, that if queue_empty_bit is 1, some actor
|
||||
// will attempt to grab the lock.
|
||||
_ = @atomicRmw(u8, &self.queue_empty_bit, .Xchg, 0, .SeqCst);
|
||||
@atomicStore(u8, &self.queue_empty_bit, 0, .SeqCst);
|
||||
|
||||
const old_bit = @atomicRmw(u8, &self.shared_bit, .Xchg, 1, .SeqCst);
|
||||
if (old_bit == 0) {
|
||||
|
||||
@ -820,7 +820,7 @@ pub const Loop = struct {
|
||||
_ = os.kevent(self.os_data.fs_kqfd, fs_kevs, empty_kevs, null) catch unreachable;
|
||||
},
|
||||
.linux => {
|
||||
_ = @atomicRmw(i32, &self.os_data.fs_queue_item, AtomicRmwOp.Xchg, 1, AtomicOrder.SeqCst);
|
||||
@atomicStore(i32, &self.os_data.fs_queue_item, 1, AtomicOrder.SeqCst);
|
||||
const rc = os.linux.futex_wake(&self.os_data.fs_queue_item, os.linux.FUTEX_WAKE, 1);
|
||||
switch (os.linux.getErrno(rc)) {
|
||||
0 => {},
|
||||
@ -843,7 +843,7 @@ pub const Loop = struct {
|
||||
fn posixFsRun(self: *Loop) void {
|
||||
while (true) {
|
||||
if (builtin.os == .linux) {
|
||||
_ = @atomicRmw(i32, &self.os_data.fs_queue_item, .Xchg, 0, .SeqCst);
|
||||
@atomicStore(i32, &self.os_data.fs_queue_item, 0, .SeqCst);
|
||||
}
|
||||
while (self.os_data.fs_queue.get()) |node| {
|
||||
switch (node.data.msg) {
|
||||
|
||||
@ -40,7 +40,7 @@ pub const RwLock = struct {
|
||||
return;
|
||||
}
|
||||
|
||||
_ = @atomicRmw(u8, &self.lock.reader_queue_empty_bit, .Xchg, 1, .SeqCst);
|
||||
@atomicStore(u8, &self.lock.reader_queue_empty_bit, 1, .SeqCst);
|
||||
if (@cmpxchgStrong(State, &self.lock.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) {
|
||||
// Didn't unlock. Someone else's problem.
|
||||
return;
|
||||
@ -64,15 +64,15 @@ pub const RwLock = struct {
|
||||
// We need to release the write lock. Check if any readers are waiting to grab the lock.
|
||||
if (@atomicLoad(u8, &self.lock.reader_queue_empty_bit, .SeqCst) == 0) {
|
||||
// Switch to a read lock.
|
||||
_ = @atomicRmw(State, &self.lock.shared_state, .Xchg, .ReadLock, .SeqCst);
|
||||
@atomicStore(State, &self.lock.shared_state, .ReadLock, .SeqCst);
|
||||
while (self.lock.reader_queue.get()) |node| {
|
||||
global_event_loop.onNextTick(node);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
_ = @atomicRmw(u8, &self.lock.writer_queue_empty_bit, .Xchg, 1, .SeqCst);
|
||||
_ = @atomicRmw(State, &self.lock.shared_state, .Xchg, State.Unlocked, .SeqCst);
|
||||
@atomicStore(u8, &self.lock.writer_queue_empty_bit, 1, .SeqCst);
|
||||
@atomicStore(State, &self.lock.shared_state, .Unlocked, .SeqCst);
|
||||
|
||||
self.lock.commonPostUnlock();
|
||||
}
|
||||
@ -113,7 +113,7 @@ pub const RwLock = struct {
|
||||
|
||||
// We set this bit so that later we can rely on the fact, that if reader_queue_empty_bit is 1,
|
||||
// some actor will attempt to grab the lock.
|
||||
_ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 0, .SeqCst);
|
||||
@atomicStore(u8, &self.reader_queue_empty_bit, 0, .SeqCst);
|
||||
|
||||
// Here we don't care if we are the one to do the locking or if it was already locked for reading.
|
||||
const have_read_lock = if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .ReadLock, .SeqCst, .SeqCst)) |old_state| old_state == .ReadLock else true;
|
||||
@ -144,7 +144,7 @@ pub const RwLock = struct {
|
||||
|
||||
// We set this bit so that later we can rely on the fact, that if writer_queue_empty_bit is 1,
|
||||
// some actor will attempt to grab the lock.
|
||||
_ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 0, .SeqCst);
|
||||
@atomicStore(u8, &self.writer_queue_empty_bit, 0, .SeqCst);
|
||||
|
||||
// Here we must be the one to acquire the write lock. It cannot already be locked.
|
||||
if (@cmpxchgStrong(State, &self.shared_state, .Unlocked, .WriteLock, .SeqCst, .SeqCst) == null) {
|
||||
@ -176,8 +176,8 @@ pub const RwLock = struct {
|
||||
return;
|
||||
}
|
||||
// Release the lock again.
|
||||
_ = @atomicRmw(u8, &self.writer_queue_empty_bit, .Xchg, 1, .SeqCst);
|
||||
_ = @atomicRmw(State, &self.shared_state, .Xchg, .Unlocked, .SeqCst);
|
||||
@atomicStore(u8, &self.writer_queue_empty_bit, 1, .SeqCst);
|
||||
@atomicStore(State, &self.shared_state, .Unlocked, .SeqCst);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -195,7 +195,7 @@ pub const RwLock = struct {
|
||||
return;
|
||||
}
|
||||
// Release the lock again.
|
||||
_ = @atomicRmw(u8, &self.reader_queue_empty_bit, .Xchg, 1, .SeqCst);
|
||||
@atomicStore(u8, &self.reader_queue_empty_bit, 1, .SeqCst);
|
||||
if (@cmpxchgStrong(State, &self.shared_state, .ReadLock, .Unlocked, .SeqCst, .SeqCst) != null) {
|
||||
// Didn't unlock. Someone else's problem.
|
||||
return;
|
||||
|
||||
@ -531,7 +531,7 @@ extern fn init_vdso_clock_gettime(clk: i32, ts: *timespec) usize {
|
||||
const ptr = @intToPtr(?*const c_void, vdso.lookup(VDSO_CGT_VER, VDSO_CGT_SYM));
|
||||
// Note that we may not have a VDSO at all, update the stub address anyway
|
||||
// so that clock_gettime will fall back on the good old (and slow) syscall
|
||||
_ = @cmpxchgStrong(?*const c_void, &vdso_clock_gettime, &init_vdso_clock_gettime, ptr, .Monotonic, .Monotonic);
|
||||
@atomicStore(?*const c_void, &vdso_clock_gettime, ptr, .Monotonic);
|
||||
// Call into the VDSO if available
|
||||
if (ptr) |fn_ptr| {
|
||||
const f = @ptrCast(vdso_clock_gettime_ty, fn_ptr);
|
||||
|
||||
@ -11,8 +11,7 @@ pub const SpinLock = struct {
|
||||
spinlock: *SpinLock,
|
||||
|
||||
pub fn release(self: Held) void {
|
||||
// TODO: @atomicStore() https://github.com/ziglang/zig/issues/2995
|
||||
assert(@atomicRmw(u8, &self.spinlock.lock, .Xchg, 0, .Release) == 1);
|
||||
@atomicStore(u8, &self.spinlock.lock, 0, .Release);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -130,4 +130,4 @@ test "atomic store" {
|
||||
expect(@atomicLoad(u32, &x, .SeqCst) == 1);
|
||||
@atomicStore(u32, &x, 12345678, .SeqCst);
|
||||
expect(@atomicLoad(u32, &x, .SeqCst) == 12345678);
|
||||
}
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user