From 4909aa1da43d227ad85b2fe03a58ef1a8c12b769 Mon Sep 17 00:00:00 2001 From: Kenta Iwasaki Date: Tue, 1 Jun 2021 18:35:13 +0900 Subject: [PATCH] os/bits: remove duplicate `sockaddr_storage` for dragonfly --- lib/std/atomic.zig | 6 ++--- lib/std/atomic/Atomic.zig | 44 +++++++++++++++++------------------ lib/std/mem.zig | 4 ++-- lib/std/os/bits/dragonfly.zig | 8 ------- lib/std/target.zig | 3 +-- 5 files changed, 28 insertions(+), 37 deletions(-) diff --git a/lib/std/atomic.zig b/lib/std/atomic.zig index 224b57d1d2..1944e5346b 100644 --- a/lib/std/atomic.zig +++ b/lib/std/atomic.zig @@ -19,7 +19,7 @@ test "std.atomic" { _ = @import("atomic/Atomic.zig"); } -pub fn fence(comptime ordering: Ordering) callconv(.Inline) void { +pub inline fn fence(comptime ordering: Ordering) void { switch (ordering) { .Acquire, .Release, .AcqRel, .SeqCst => { @fence(ordering); @@ -30,7 +30,7 @@ pub fn fence(comptime ordering: Ordering) callconv(.Inline) void { } } -pub fn compilerFence(comptime ordering: Ordering) callconv(.Inline) void { +pub inline fn compilerFence(comptime ordering: Ordering) void { switch (ordering) { .Acquire, .Release, .AcqRel, .SeqCst => asm volatile ("" ::: "memory"), else => @compileLog(ordering, " only applies to a given memory location"), @@ -45,7 +45,7 @@ test "fence/compilerFence" { } /// Signals to the processor that the caller is inside a busy-wait spin-loop. -pub fn spinLoopHint() callconv(.Inline) void { +pub inline fn spinLoopHint() void { const hint_instruction = switch (target.cpu.arch) { // No-op instruction that can hint to save (or share with a hardware-thread) pipelining/power resources // https://software.intel.com/content/www/us/en/develop/articles/benefitting-power-and-performance-sleep-loops.html diff --git a/lib/std/atomic/Atomic.zig b/lib/std/atomic/Atomic.zig index 5c3b865a6a..d137bc7552 100644 --- a/lib/std/atomic/Atomic.zig +++ b/lib/std/atomic/Atomic.zig @@ -48,38 +48,38 @@ pub fn Atomic(comptime T: type) type { }; } - pub fn swap(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + pub inline fn swap(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.Xchg, value, ordering); } - pub fn compareAndSwap( + pub inline fn compareAndSwap( self: *Self, compare: T, exchange: T, comptime success: Ordering, comptime failure: Ordering, - ) callconv(.Inline) ?T { + ) ?T { return self.cmpxchg(true, compare, exchange, success, failure); } - pub fn tryCompareAndSwap( + pub inline fn tryCompareAndSwap( self: *Self, compare: T, exchange: T, comptime success: Ordering, comptime failure: Ordering, - ) callconv(.Inline) ?T { + ) ?T { return self.cmpxchg(false, compare, exchange, success, failure); } - fn cmpxchg( + inline fn cmpxchg( self: *Self, comptime is_strong: bool, compare: T, exchange: T, comptime success: Ordering, comptime failure: Ordering, - ) callconv(.Inline) ?T { + ) ?T { if (success == .Unordered or failure == .Unordered) { @compileError(@tagName(Ordering.Unordered) ++ " is only allowed on atomic loads and stores"); } @@ -103,12 +103,12 @@ pub fn Atomic(comptime T: type) type { }; } - fn rmw( + inline fn rmw( self: *Self, comptime op: std.builtin.AtomicRmwOp, value: T, comptime ordering: Ordering, - ) callconv(.Inline) T { + ) T { return @atomicRmw(T, &self.value, op, value, ordering); } @@ -117,37 +117,37 @@ pub fn Atomic(comptime T: type) type { } pub usingnamespace exportWhen(std.meta.trait.isNumber(T), struct { - pub fn fetchAdd(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + pub inline fn fetchAdd(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.Add, value, ordering); } - pub fn fetchSub(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + pub inline fn fetchSub(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.Sub, value, ordering); } - pub fn fetchMin(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + pub inline fn fetchMin(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.Min, value, ordering); } - pub fn fetchMax(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + pub inline fn fetchMax(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.Max, value, ordering); } }); pub usingnamespace exportWhen(std.meta.trait.isIntegral(T), struct { - pub fn fetchAnd(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + pub inline fn fetchAnd(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.And, value, ordering); } - pub fn fetchNand(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + pub inline fn fetchNand(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.Nand, value, ordering); } - pub fn fetchOr(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + pub inline fn fetchOr(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.Or, value, ordering); } - pub fn fetchXor(self: *Self, value: T, comptime ordering: Ordering) callconv(.Inline) T { + pub inline fn fetchXor(self: *Self, value: T, comptime ordering: Ordering) T { return self.rmw(.Xor, value, ordering); } @@ -158,24 +158,24 @@ pub fn Atomic(comptime T: type) type { Toggle, }; - pub fn bitSet(self: *Self, bit: Bit, comptime ordering: Ordering) callconv(.Inline) u1 { + pub inline fn bitSet(self: *Self, bit: Bit, comptime ordering: Ordering) u1 { return bitRmw(self, .Set, bit, ordering); } - pub fn bitReset(self: *Self, bit: Bit, comptime ordering: Ordering) callconv(.Inline) u1 { + pub inline fn bitReset(self: *Self, bit: Bit, comptime ordering: Ordering) u1 { return bitRmw(self, .Reset, bit, ordering); } - pub fn bitToggle(self: *Self, bit: Bit, comptime ordering: Ordering) callconv(.Inline) u1 { + pub inline fn bitToggle(self: *Self, bit: Bit, comptime ordering: Ordering) u1 { return bitRmw(self, .Toggle, bit, ordering); } - fn bitRmw( + inline fn bitRmw( self: *Self, comptime op: BitRmwOp, bit: Bit, comptime ordering: Ordering, - ) callconv(.Inline) u1 { + ) u1 { // x86 supports dedicated bitwise instructions if (comptime target.cpu.arch.isX86() and @sizeOf(T) >= 2 and @sizeOf(T) <= 8) { const instruction = switch (op) { diff --git a/lib/std/mem.zig b/lib/std/mem.zig index 4cf879cf05..fd9af71e4b 100644 --- a/lib/std/mem.zig +++ b/lib/std/mem.zig @@ -1171,7 +1171,7 @@ test "mem.indexOf" { test "mem.indexOf multibyte" { { // make haystack and needle long enough to trigger boyer-moore-horspool algorithm - const haystack = [1]u16{0} ** 100 ++ [_]u16 { 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee, 0x00ff }; + const haystack = [1]u16{0} ** 100 ++ [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee, 0x00ff }; const needle = [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee }; try testing.expectEqual(indexOfPos(u16, &haystack, 0, &needle), 100); @@ -1184,7 +1184,7 @@ test "mem.indexOf multibyte" { { // make haystack and needle long enough to trigger boyer-moore-horspool algorithm - const haystack = [_]u16 { 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee, 0x00ff } ++ [1]u16{0} ** 100; + const haystack = [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee, 0x00ff } ++ [1]u16{0} ** 100; const needle = [_]u16{ 0xbbaa, 0xccbb, 0xddcc, 0xeedd, 0xffee }; try testing.expectEqual(lastIndexOf(u16, &haystack, &needle), 0); diff --git a/lib/std/os/bits/dragonfly.zig b/lib/std/os/bits/dragonfly.zig index 88199aee7e..5c3ad305da 100644 --- a/lib/std/os/bits/dragonfly.zig +++ b/lib/std/os/bits/dragonfly.zig @@ -696,14 +696,6 @@ pub const in_port_t = u16; pub const sa_family_t = u8; pub const socklen_t = u32; -pub const sockaddr_storage = extern struct { - ss_len: u8, - ss_family: sa_family_t, - __ss_pad1: [5]u8, - __ss_align: i64, - __ss_pad2: [112]u8, -}; - pub const sockaddr_in = extern struct { len: u8 = @sizeOf(sockaddr_in), family: sa_family_t = AF_INET, diff --git a/lib/std/target.zig b/lib/std/target.zig index 692d29b1c7..9a2be37ee8 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -500,8 +500,7 @@ pub const Target = struct { .haiku, .windows, => return .gnu, - .uefi, - => return .msvc, + .uefi => return .msvc, .linux, .wasi, .emscripten,