mirror of
https://github.com/ziglang/zig.git
synced 2026-02-20 16:24:51 +00:00
Merge pull request #18994 from ExpidusOS/feat/container-layout-rename-fields
std.builtin: make enum fields lowercase
This commit is contained in:
commit
4f782d1e85
@ -855,7 +855,7 @@ fn addCMakeLibraryList(exe: *std.Build.Step.Compile, list: []const u8) void {
|
||||
}
|
||||
|
||||
const CMakeConfig = struct {
|
||||
llvm_linkage: std.Build.Step.Compile.Linkage,
|
||||
llvm_linkage: std.builtin.LinkMode,
|
||||
cmake_binary_dir: []const u8,
|
||||
cmake_prefix_path: []const u8,
|
||||
cmake_static_library_prefix: []const u8,
|
||||
|
||||
@ -1453,7 +1453,7 @@ export fn foo_strict(x: f64) f64 {
|
||||
}
|
||||
|
||||
export fn foo_optimized(x: f64) f64 {
|
||||
@setFloatMode(.Optimized);
|
||||
@setFloatMode(.optimized);
|
||||
return x + big - big;
|
||||
}
|
||||
{#code_end#}
|
||||
@ -8356,7 +8356,7 @@ test "main" {
|
||||
</p>
|
||||
{#code_begin|obj|export_builtin#}
|
||||
comptime {
|
||||
@export(internalName, .{ .name = "foo", .linkage = .Strong });
|
||||
@export(internalName, .{ .name = "foo", .linkage = .strong });
|
||||
}
|
||||
|
||||
fn internalName() callconv(.C) void {}
|
||||
|
||||
@ -892,10 +892,10 @@ fn workerMakeOneStep(
|
||||
// then we return without doing the step, relying on another worker to
|
||||
// queue this step up again when dependencies are met.
|
||||
for (s.dependencies.items) |dep| {
|
||||
switch (@atomicLoad(Step.State, &dep.state, .SeqCst)) {
|
||||
switch (@atomicLoad(Step.State, &dep.state, .seq_cst)) {
|
||||
.success, .skipped => continue,
|
||||
.failure, .dependency_failure, .skipped_oom => {
|
||||
@atomicStore(Step.State, &s.state, .dependency_failure, .SeqCst);
|
||||
@atomicStore(Step.State, &s.state, .dependency_failure, .seq_cst);
|
||||
return;
|
||||
},
|
||||
.precheck_done, .running => {
|
||||
@ -929,7 +929,7 @@ fn workerMakeOneStep(
|
||||
s.state = .running;
|
||||
} else {
|
||||
// Avoid running steps twice.
|
||||
if (@cmpxchgStrong(Step.State, &s.state, .precheck_done, .running, .SeqCst, .SeqCst) != null) {
|
||||
if (@cmpxchgStrong(Step.State, &s.state, .precheck_done, .running, .seq_cst, .seq_cst) != null) {
|
||||
// Another worker got the job.
|
||||
return;
|
||||
}
|
||||
@ -956,13 +956,13 @@ fn workerMakeOneStep(
|
||||
|
||||
handle_result: {
|
||||
if (make_result) |_| {
|
||||
@atomicStore(Step.State, &s.state, .success, .SeqCst);
|
||||
@atomicStore(Step.State, &s.state, .success, .seq_cst);
|
||||
} else |err| switch (err) {
|
||||
error.MakeFailed => {
|
||||
@atomicStore(Step.State, &s.state, .failure, .SeqCst);
|
||||
@atomicStore(Step.State, &s.state, .failure, .seq_cst);
|
||||
break :handle_result;
|
||||
},
|
||||
error.MakeSkipped => @atomicStore(Step.State, &s.state, .skipped, .SeqCst),
|
||||
error.MakeSkipped => @atomicStore(Step.State, &s.state, .skipped, .seq_cst),
|
||||
}
|
||||
|
||||
// Successful completion of a step, so we queue up its dependants as well.
|
||||
|
||||
20
lib/c.zig
20
lib/c.zig
@ -26,7 +26,7 @@ const is_freestanding = switch (native_os) {
|
||||
|
||||
comptime {
|
||||
if (is_freestanding and is_wasm and builtin.link_libc) {
|
||||
@export(wasm_start, .{ .name = "_start", .linkage = .Strong });
|
||||
@export(wasm_start, .{ .name = "_start", .linkage = .strong });
|
||||
}
|
||||
|
||||
if (native_os == .linux) {
|
||||
@ -34,16 +34,16 @@ comptime {
|
||||
}
|
||||
|
||||
if (builtin.link_libc) {
|
||||
@export(strcmp, .{ .name = "strcmp", .linkage = .Strong });
|
||||
@export(strncmp, .{ .name = "strncmp", .linkage = .Strong });
|
||||
@export(strerror, .{ .name = "strerror", .linkage = .Strong });
|
||||
@export(strlen, .{ .name = "strlen", .linkage = .Strong });
|
||||
@export(strcpy, .{ .name = "strcpy", .linkage = .Strong });
|
||||
@export(strncpy, .{ .name = "strncpy", .linkage = .Strong });
|
||||
@export(strcat, .{ .name = "strcat", .linkage = .Strong });
|
||||
@export(strncat, .{ .name = "strncat", .linkage = .Strong });
|
||||
@export(strcmp, .{ .name = "strcmp", .linkage = .strong });
|
||||
@export(strncmp, .{ .name = "strncmp", .linkage = .strong });
|
||||
@export(strerror, .{ .name = "strerror", .linkage = .strong });
|
||||
@export(strlen, .{ .name = "strlen", .linkage = .strong });
|
||||
@export(strcpy, .{ .name = "strcpy", .linkage = .strong });
|
||||
@export(strncpy, .{ .name = "strncpy", .linkage = .strong });
|
||||
@export(strcat, .{ .name = "strcat", .linkage = .strong });
|
||||
@export(strncat, .{ .name = "strncat", .linkage = .strong });
|
||||
} else if (is_msvc) {
|
||||
@export(_fltused, .{ .name = "_fltused", .linkage = .Strong });
|
||||
@export(_fltused, .{ .name = "_fltused", .linkage = .strong });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
2
lib/compiler/aro/aro/Attribute.zig
vendored
2
lib/compiler/aro/aro/Attribute.zig
vendored
@ -653,7 +653,7 @@ pub const Arguments = blk: {
|
||||
|
||||
break :blk @Type(.{
|
||||
.Union = .{
|
||||
.layout = .Auto,
|
||||
.layout = .auto,
|
||||
.tag_type = null,
|
||||
.fields = &union_fields,
|
||||
.decls = &.{},
|
||||
|
||||
@ -74,7 +74,7 @@ const SpinlockTable = struct {
|
||||
: "memory"
|
||||
);
|
||||
} else flag: {
|
||||
break :flag @atomicRmw(@TypeOf(self.v), &self.v, .Xchg, .Locked, .Acquire);
|
||||
break :flag @atomicRmw(@TypeOf(self.v), &self.v, .Xchg, .Locked, .acquire);
|
||||
};
|
||||
|
||||
switch (flag) {
|
||||
@ -91,7 +91,7 @@ const SpinlockTable = struct {
|
||||
: "memory"
|
||||
);
|
||||
} else {
|
||||
@atomicStore(@TypeOf(self.v), &self.v, .Unlocked, .Release);
|
||||
@atomicStore(@TypeOf(self.v), &self.v, .Unlocked, .release);
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -172,7 +172,7 @@ inline fn atomic_load_N(comptime T: type, src: *T, model: i32) T {
|
||||
defer sl.release();
|
||||
return src.*;
|
||||
} else {
|
||||
return @atomicLoad(T, src, .SeqCst);
|
||||
return @atomicLoad(T, src, .seq_cst);
|
||||
}
|
||||
}
|
||||
|
||||
@ -203,7 +203,7 @@ inline fn atomic_store_N(comptime T: type, dst: *T, value: T, model: i32) void {
|
||||
defer sl.release();
|
||||
dst.* = value;
|
||||
} else {
|
||||
@atomicStore(T, dst, value, .SeqCst);
|
||||
@atomicStore(T, dst, value, .seq_cst);
|
||||
}
|
||||
}
|
||||
|
||||
@ -239,12 +239,12 @@ fn wideUpdate(comptime T: type, ptr: *T, val: T, update: anytype) T {
|
||||
|
||||
const mask = @as(WideAtomic, std.math.maxInt(T)) << inner_shift;
|
||||
|
||||
var wide_old = @atomicLoad(WideAtomic, wide_ptr, .SeqCst);
|
||||
var wide_old = @atomicLoad(WideAtomic, wide_ptr, .seq_cst);
|
||||
while (true) {
|
||||
const old = @as(T, @truncate((wide_old & mask) >> inner_shift));
|
||||
const new = update(val, old);
|
||||
const wide_new = wide_old & ~mask | (@as(WideAtomic, new) << inner_shift);
|
||||
if (@cmpxchgWeak(WideAtomic, wide_ptr, wide_old, wide_new, .SeqCst, .SeqCst)) |new_wide_old| {
|
||||
if (@cmpxchgWeak(WideAtomic, wide_ptr, wide_old, wide_new, .seq_cst, .seq_cst)) |new_wide_old| {
|
||||
wide_old = new_wide_old;
|
||||
} else {
|
||||
return old;
|
||||
@ -270,7 +270,7 @@ inline fn atomic_exchange_N(comptime T: type, ptr: *T, val: T, model: i32) T {
|
||||
};
|
||||
return wideUpdate(T, ptr, val, Updater.update);
|
||||
} else {
|
||||
return @atomicRmw(T, ptr, .Xchg, val, .SeqCst);
|
||||
return @atomicRmw(T, ptr, .Xchg, val, .seq_cst);
|
||||
}
|
||||
}
|
||||
|
||||
@ -315,7 +315,7 @@ inline fn atomic_compare_exchange_N(
|
||||
expected.* = value;
|
||||
return 0;
|
||||
} else {
|
||||
if (@cmpxchgStrong(T, ptr, expected.*, desired, .SeqCst, .SeqCst)) |old_value| {
|
||||
if (@cmpxchgStrong(T, ptr, expected.*, desired, .seq_cst, .seq_cst)) |old_value| {
|
||||
expected.* = old_value;
|
||||
return 0;
|
||||
}
|
||||
@ -373,7 +373,7 @@ inline fn fetch_op_N(comptime T: type, comptime op: std.builtin.AtomicRmwOp, ptr
|
||||
return wideUpdate(T, ptr, val, Updater.update);
|
||||
}
|
||||
|
||||
return @atomicRmw(T, ptr, op, val, .SeqCst);
|
||||
return @atomicRmw(T, ptr, op, val, .seq_cst);
|
||||
}
|
||||
|
||||
fn __atomic_fetch_add_1(ptr: *u8, val: u8, model: i32) callconv(.C) u8 {
|
||||
|
||||
@ -162,7 +162,7 @@ fn clear_cache(start: usize, end: usize) callconv(.C) void {
|
||||
}
|
||||
}
|
||||
|
||||
const linkage = if (builtin.is_test) std.builtin.GlobalLinkage.Internal else std.builtin.GlobalLinkage.Weak;
|
||||
const linkage = if (builtin.is_test) std.builtin.GlobalLinkage.internal else std.builtin.GlobalLinkage.weak;
|
||||
|
||||
fn exportIt() void {
|
||||
@export(clear_cache, .{ .name = "__clear_cache", .linkage = linkage });
|
||||
|
||||
@ -2,12 +2,12 @@ const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const native_endian = builtin.cpu.arch.endian();
|
||||
|
||||
pub const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
|
||||
pub const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .internal else .weak;
|
||||
/// Determines the symbol's visibility to other objects.
|
||||
/// For WebAssembly this allows the symbol to be resolved to other modules, but will not
|
||||
/// export it to the host runtime.
|
||||
pub const visibility: std.builtin.SymbolVisibility =
|
||||
if (builtin.target.isWasm() and linkage != .Internal) .hidden else .default;
|
||||
if (builtin.target.isWasm() and linkage != .internal) .hidden else .default;
|
||||
pub const want_aeabi = switch (builtin.abi) {
|
||||
.eabi,
|
||||
.eabihf,
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
const std = @import("std");
|
||||
const testing = std.testing;
|
||||
const builtin = @import("builtin");
|
||||
const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
|
||||
const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .internal else .weak;
|
||||
const panic = @import("common.zig").panic;
|
||||
|
||||
const have_availability_version_check = builtin.os.tag.isDarwin() and
|
||||
|
||||
@ -8,8 +8,8 @@ const is_test = builtin.is_test;
|
||||
const is_gnu = abi.isGnu();
|
||||
const is_mingw = os_tag == .windows and is_gnu;
|
||||
|
||||
const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Weak;
|
||||
const strong_linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .Internal else .Strong;
|
||||
const linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .internal else .weak;
|
||||
const strong_linkage: std.builtin.GlobalLinkage = if (builtin.is_test) .internal else .strong;
|
||||
pub const panic = @import("common.zig").panic;
|
||||
|
||||
comptime {
|
||||
|
||||
@ -645,7 +645,7 @@ pub const ExecutableOptions = struct {
|
||||
version: ?std.SemanticVersion = null,
|
||||
optimize: std.builtin.OptimizeMode = .Debug,
|
||||
code_model: std.builtin.CodeModel = .default,
|
||||
linkage: ?Step.Compile.Linkage = null,
|
||||
linkage: ?std.builtin.LinkMode = null,
|
||||
max_rss: usize = 0,
|
||||
link_libc: ?bool = null,
|
||||
single_threaded: ?bool = null,
|
||||
|
||||
@ -414,7 +414,7 @@ pub const LinkSystemLibraryOptions = struct {
|
||||
needed: bool = false,
|
||||
weak: bool = false,
|
||||
use_pkg_config: SystemLib.UsePkgConfig = .yes,
|
||||
preferred_link_mode: std.builtin.LinkMode = .Dynamic,
|
||||
preferred_link_mode: std.builtin.LinkMode = .dynamic,
|
||||
search_strategy: SystemLib.SearchStrategy = .paths_first,
|
||||
};
|
||||
|
||||
|
||||
@ -28,7 +28,7 @@ linker_script: ?LazyPath = null,
|
||||
version_script: ?LazyPath = null,
|
||||
out_filename: []const u8,
|
||||
out_lib_filename: []const u8,
|
||||
linkage: ?Linkage = null,
|
||||
linkage: ?std.builtin.LinkMode = null,
|
||||
version: ?std.SemanticVersion,
|
||||
kind: Kind,
|
||||
major_only_filename: ?[]const u8,
|
||||
@ -223,7 +223,7 @@ pub const Options = struct {
|
||||
name: []const u8,
|
||||
root_module: Module.CreateOptions,
|
||||
kind: Kind,
|
||||
linkage: ?Linkage = null,
|
||||
linkage: ?std.builtin.LinkMode = null,
|
||||
version: ?std.SemanticVersion = null,
|
||||
max_rss: usize = 0,
|
||||
filters: []const []const u8 = &.{},
|
||||
@ -246,8 +246,6 @@ pub const Kind = enum {
|
||||
@"test",
|
||||
};
|
||||
|
||||
pub const Linkage = enum { dynamic, static };
|
||||
|
||||
pub fn create(owner: *std.Build, options: Options) *Compile {
|
||||
const name = owner.dupe(options.name);
|
||||
if (mem.indexOf(u8, name, "/") != null or mem.indexOf(u8, name, "\\") != null) {
|
||||
@ -283,10 +281,7 @@ pub fn create(owner: *std.Build, options: Options) *Compile {
|
||||
.obj => .Obj,
|
||||
.exe, .@"test" => .Exe,
|
||||
},
|
||||
.link_mode = if (options.linkage) |some| @as(std.builtin.LinkMode, switch (some) {
|
||||
.dynamic => .Dynamic,
|
||||
.static => .Static,
|
||||
}) else null,
|
||||
.link_mode = options.linkage,
|
||||
.version = options.version,
|
||||
}) catch @panic("OOM");
|
||||
|
||||
@ -531,11 +526,11 @@ pub fn dependsOnSystemLibrary(self: *const Compile, name: []const u8) bool {
|
||||
}
|
||||
|
||||
pub fn isDynamicLibrary(self: *const Compile) bool {
|
||||
return self.kind == .lib and self.linkage == Linkage.dynamic;
|
||||
return self.kind == .lib and self.linkage == .dynamic;
|
||||
}
|
||||
|
||||
pub fn isStaticLibrary(self: *const Compile) bool {
|
||||
return self.kind == .lib and self.linkage != Linkage.dynamic;
|
||||
return self.kind == .lib and self.linkage != .dynamic;
|
||||
}
|
||||
|
||||
pub fn producesPdbFile(self: *Compile) bool {
|
||||
@ -988,7 +983,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
var prev_has_cflags = false;
|
||||
var prev_has_rcflags = false;
|
||||
var prev_search_strategy: Module.SystemLib.SearchStrategy = .paths_first;
|
||||
var prev_preferred_link_mode: std.builtin.LinkMode = .Dynamic;
|
||||
var prev_preferred_link_mode: std.builtin.LinkMode = .dynamic;
|
||||
// Track the number of positional arguments so that a nice error can be
|
||||
// emitted if there is nothing to link.
|
||||
var total_linker_objects: usize = @intFromBool(self.root_module.root_source_file != null);
|
||||
@ -1053,16 +1048,16 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
{
|
||||
switch (system_lib.search_strategy) {
|
||||
.no_fallback => switch (system_lib.preferred_link_mode) {
|
||||
.Dynamic => try zig_args.append("-search_dylibs_only"),
|
||||
.Static => try zig_args.append("-search_static_only"),
|
||||
.dynamic => try zig_args.append("-search_dylibs_only"),
|
||||
.static => try zig_args.append("-search_static_only"),
|
||||
},
|
||||
.paths_first => switch (system_lib.preferred_link_mode) {
|
||||
.Dynamic => try zig_args.append("-search_paths_first"),
|
||||
.Static => try zig_args.append("-search_paths_first_static"),
|
||||
.dynamic => try zig_args.append("-search_paths_first"),
|
||||
.static => try zig_args.append("-search_paths_first_static"),
|
||||
},
|
||||
.mode_first => switch (system_lib.preferred_link_mode) {
|
||||
.Dynamic => try zig_args.append("-search_dylibs_first"),
|
||||
.Static => try zig_args.append("-search_static_first"),
|
||||
.dynamic => try zig_args.append("-search_dylibs_first"),
|
||||
.static => try zig_args.append("-search_static_first"),
|
||||
},
|
||||
}
|
||||
prev_search_strategy = system_lib.search_strategy;
|
||||
@ -1138,7 +1133,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
try zig_args.append(full_path_lib);
|
||||
total_linker_objects += 1;
|
||||
|
||||
if (other.linkage == Linkage.dynamic and
|
||||
if (other.linkage == .dynamic and
|
||||
self.rootModuleTarget().os.tag != .windows)
|
||||
{
|
||||
if (fs.path.dirname(full_path_lib)) |dirname| {
|
||||
|
||||
@ -55,7 +55,7 @@ pub const AddExecutableOptions = struct {
|
||||
version: ?std.SemanticVersion = null,
|
||||
target: ?std.Build.ResolvedTarget = null,
|
||||
optimize: ?std.builtin.OptimizeMode = null,
|
||||
linkage: ?Step.Compile.Linkage = null,
|
||||
linkage: ?std.builtin.LinkMode = null,
|
||||
};
|
||||
|
||||
pub fn getOutput(self: *TranslateC) std.Build.LazyPath {
|
||||
|
||||
@ -95,9 +95,9 @@ pub const Node = struct {
|
||||
/// This is the same as calling `start` and then `end` on the returned `Node`. Thread-safe.
|
||||
pub fn completeOne(self: *Node) void {
|
||||
if (self.parent) |parent| {
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .Release);
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .release);
|
||||
}
|
||||
_ = @atomicRmw(usize, &self.unprotected_completed_items, .Add, 1, .Monotonic);
|
||||
_ = @atomicRmw(usize, &self.unprotected_completed_items, .Add, 1, .monotonic);
|
||||
self.context.maybeRefresh();
|
||||
}
|
||||
|
||||
@ -108,7 +108,7 @@ pub const Node = struct {
|
||||
{
|
||||
self.context.update_mutex.lock();
|
||||
defer self.context.update_mutex.unlock();
|
||||
_ = @cmpxchgStrong(?*Node, &parent.recently_updated_child, self, null, .Monotonic, .Monotonic);
|
||||
_ = @cmpxchgStrong(?*Node, &parent.recently_updated_child, self, null, .monotonic, .monotonic);
|
||||
}
|
||||
parent.completeOne();
|
||||
} else {
|
||||
@ -122,7 +122,7 @@ pub const Node = struct {
|
||||
/// Tell the parent node that this node is actively being worked on. Thread-safe.
|
||||
pub fn activate(self: *Node) void {
|
||||
if (self.parent) |parent| {
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .Release);
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .release);
|
||||
self.context.maybeRefresh();
|
||||
}
|
||||
}
|
||||
@ -134,9 +134,9 @@ pub const Node = struct {
|
||||
defer progress.update_mutex.unlock();
|
||||
self.name = name;
|
||||
if (self.parent) |parent| {
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .Release);
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .release);
|
||||
if (parent.parent) |grand_parent| {
|
||||
@atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .Release);
|
||||
@atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .release);
|
||||
}
|
||||
if (progress.timer) |*timer| progress.maybeRefreshWithHeldLock(timer);
|
||||
}
|
||||
@ -149,9 +149,9 @@ pub const Node = struct {
|
||||
defer progress.update_mutex.unlock();
|
||||
self.unit = unit;
|
||||
if (self.parent) |parent| {
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .Release);
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .release);
|
||||
if (parent.parent) |grand_parent| {
|
||||
@atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .Release);
|
||||
@atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .release);
|
||||
}
|
||||
if (progress.timer) |*timer| progress.maybeRefreshWithHeldLock(timer);
|
||||
}
|
||||
@ -159,12 +159,12 @@ pub const Node = struct {
|
||||
|
||||
/// Thread-safe. 0 means unknown.
|
||||
pub fn setEstimatedTotalItems(self: *Node, count: usize) void {
|
||||
@atomicStore(usize, &self.unprotected_estimated_total_items, count, .Monotonic);
|
||||
@atomicStore(usize, &self.unprotected_estimated_total_items, count, .monotonic);
|
||||
}
|
||||
|
||||
/// Thread-safe.
|
||||
pub fn setCompletedItems(self: *Node, completed_items: usize) void {
|
||||
@atomicStore(usize, &self.unprotected_completed_items, completed_items, .Monotonic);
|
||||
@atomicStore(usize, &self.unprotected_completed_items, completed_items, .monotonic);
|
||||
}
|
||||
};
|
||||
|
||||
@ -313,8 +313,8 @@ fn refreshWithHeldLock(self: *Progress) void {
|
||||
self.bufWrite(&end, "... ", .{});
|
||||
}
|
||||
need_ellipse = false;
|
||||
const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .Monotonic);
|
||||
const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .Monotonic);
|
||||
const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .monotonic);
|
||||
const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .monotonic);
|
||||
const current_item = completed_items + 1;
|
||||
if (node.name.len != 0 or eti > 0) {
|
||||
if (node.name.len != 0) {
|
||||
@ -331,7 +331,7 @@ fn refreshWithHeldLock(self: *Progress) void {
|
||||
need_ellipse = false;
|
||||
}
|
||||
}
|
||||
maybe_node = @atomicLoad(?*Node, &node.recently_updated_child, .Acquire);
|
||||
maybe_node = @atomicLoad(?*Node, &node.recently_updated_child, .acquire);
|
||||
}
|
||||
if (need_ellipse) {
|
||||
self.bufWrite(&end, "... ", .{});
|
||||
|
||||
@ -510,7 +510,7 @@ const WindowsThreadImpl = struct {
|
||||
|
||||
fn entryFn(raw_ptr: windows.PVOID) callconv(.C) windows.DWORD {
|
||||
const self: *@This() = @ptrCast(@alignCast(raw_ptr));
|
||||
defer switch (self.thread.completion.swap(.completed, .SeqCst)) {
|
||||
defer switch (self.thread.completion.swap(.completed, .seq_cst)) {
|
||||
.running => {},
|
||||
.completed => unreachable,
|
||||
.detached => self.thread.free(),
|
||||
@ -563,7 +563,7 @@ const WindowsThreadImpl = struct {
|
||||
|
||||
fn detach(self: Impl) void {
|
||||
windows.CloseHandle(self.thread.thread_handle);
|
||||
switch (self.thread.completion.swap(.detached, .SeqCst)) {
|
||||
switch (self.thread.completion.swap(.detached, .seq_cst)) {
|
||||
.running => {},
|
||||
.completed => self.thread.free(),
|
||||
.detached => unreachable,
|
||||
@ -573,7 +573,7 @@ const WindowsThreadImpl = struct {
|
||||
fn join(self: Impl) void {
|
||||
windows.WaitForSingleObjectEx(self.thread.thread_handle, windows.INFINITE, false) catch unreachable;
|
||||
windows.CloseHandle(self.thread.thread_handle);
|
||||
assert(self.thread.completion.load(.SeqCst) == .completed);
|
||||
assert(self.thread.completion.load(.seq_cst) == .completed);
|
||||
self.thread.free();
|
||||
}
|
||||
};
|
||||
@ -780,11 +780,11 @@ const WasiThreadImpl = struct {
|
||||
}
|
||||
|
||||
fn getHandle(self: Impl) ThreadHandle {
|
||||
return self.thread.tid.load(.SeqCst);
|
||||
return self.thread.tid.load(.seq_cst);
|
||||
}
|
||||
|
||||
fn detach(self: Impl) void {
|
||||
switch (self.thread.state.swap(.detached, .SeqCst)) {
|
||||
switch (self.thread.state.swap(.detached, .seq_cst)) {
|
||||
.running => {},
|
||||
.completed => self.join(),
|
||||
.detached => unreachable,
|
||||
@ -801,7 +801,7 @@ const WasiThreadImpl = struct {
|
||||
|
||||
var spin: u8 = 10;
|
||||
while (true) {
|
||||
const tid = self.thread.tid.load(.SeqCst);
|
||||
const tid = self.thread.tid.load(.seq_cst);
|
||||
if (tid == 0) {
|
||||
break;
|
||||
}
|
||||
@ -901,7 +901,7 @@ const WasiThreadImpl = struct {
|
||||
if (tid < 0) {
|
||||
return error.SystemResources;
|
||||
}
|
||||
instance.thread.tid.store(tid, .SeqCst);
|
||||
instance.thread.tid.store(tid, .seq_cst);
|
||||
|
||||
return .{ .thread = &instance.thread };
|
||||
}
|
||||
@ -914,12 +914,12 @@ const WasiThreadImpl = struct {
|
||||
}
|
||||
__set_stack_pointer(arg.thread.memory.ptr + arg.stack_offset);
|
||||
__wasm_init_tls(arg.thread.memory.ptr + arg.tls_offset);
|
||||
@atomicStore(u32, &WasiThreadImpl.tls_thread_id, @intCast(tid), .SeqCst);
|
||||
@atomicStore(u32, &WasiThreadImpl.tls_thread_id, @intCast(tid), .seq_cst);
|
||||
|
||||
// Finished bootstrapping, call user's procedure.
|
||||
arg.call_back(arg.raw_ptr);
|
||||
|
||||
switch (arg.thread.state.swap(.completed, .SeqCst)) {
|
||||
switch (arg.thread.state.swap(.completed, .seq_cst)) {
|
||||
.running => {
|
||||
// reset the Thread ID
|
||||
asm volatile (
|
||||
@ -1191,7 +1191,7 @@ const LinuxThreadImpl = struct {
|
||||
|
||||
fn entryFn(raw_arg: usize) callconv(.C) u8 {
|
||||
const self = @as(*@This(), @ptrFromInt(raw_arg));
|
||||
defer switch (self.thread.completion.swap(.completed, .SeqCst)) {
|
||||
defer switch (self.thread.completion.swap(.completed, .seq_cst)) {
|
||||
.running => {},
|
||||
.completed => unreachable,
|
||||
.detached => self.thread.freeAndExit(),
|
||||
@ -1311,7 +1311,7 @@ const LinuxThreadImpl = struct {
|
||||
}
|
||||
|
||||
fn detach(self: Impl) void {
|
||||
switch (self.thread.completion.swap(.detached, .SeqCst)) {
|
||||
switch (self.thread.completion.swap(.detached, .seq_cst)) {
|
||||
.running => {},
|
||||
.completed => self.join(),
|
||||
.detached => unreachable,
|
||||
@ -1323,7 +1323,7 @@ const LinuxThreadImpl = struct {
|
||||
|
||||
var spin: u8 = 10;
|
||||
while (true) {
|
||||
const tid = self.thread.child_tid.load(.SeqCst);
|
||||
const tid = self.thread.child_tid.load(.seq_cst);
|
||||
if (tid == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
@ -163,7 +163,7 @@ const WindowsImpl = struct {
|
||||
|
||||
if (comptime builtin.mode == .Debug) {
|
||||
// The internal state of the DebugMutex needs to be handled here as well.
|
||||
mutex.impl.locking_thread.store(0, .Unordered);
|
||||
mutex.impl.locking_thread.store(0, .unordered);
|
||||
}
|
||||
const rc = os.windows.kernel32.SleepConditionVariableSRW(
|
||||
&self.condition,
|
||||
@ -173,7 +173,7 @@ const WindowsImpl = struct {
|
||||
);
|
||||
if (comptime builtin.mode == .Debug) {
|
||||
// The internal state of the DebugMutex needs to be handled here as well.
|
||||
mutex.impl.locking_thread.store(std.Thread.getCurrentId(), .Unordered);
|
||||
mutex.impl.locking_thread.store(std.Thread.getCurrentId(), .unordered);
|
||||
}
|
||||
|
||||
// Return error.Timeout if we know the timeout elapsed correctly.
|
||||
@ -212,8 +212,8 @@ const FutexImpl = struct {
|
||||
// - T1: s & signals == 0 -> FUTEX_WAIT(&epoch, e) (missed the state update + the epoch change)
|
||||
//
|
||||
// Acquire barrier to ensure the epoch load happens before the state load.
|
||||
var epoch = self.epoch.load(.Acquire);
|
||||
var state = self.state.fetchAdd(one_waiter, .Monotonic);
|
||||
var epoch = self.epoch.load(.acquire);
|
||||
var state = self.state.fetchAdd(one_waiter, .monotonic);
|
||||
assert(state & waiter_mask != waiter_mask);
|
||||
state += one_waiter;
|
||||
|
||||
@ -231,30 +231,30 @@ const FutexImpl = struct {
|
||||
// Acquire barrier ensures code before the wake() which added the signal happens before we decrement it and return.
|
||||
while (state & signal_mask != 0) {
|
||||
const new_state = state - one_waiter - one_signal;
|
||||
state = self.state.cmpxchgWeak(state, new_state, .Acquire, .Monotonic) orelse return;
|
||||
state = self.state.cmpxchgWeak(state, new_state, .acquire, .monotonic) orelse return;
|
||||
}
|
||||
|
||||
// Remove the waiter we added and officially return timed out.
|
||||
const new_state = state - one_waiter;
|
||||
state = self.state.cmpxchgWeak(state, new_state, .Monotonic, .Monotonic) orelse return err;
|
||||
state = self.state.cmpxchgWeak(state, new_state, .monotonic, .monotonic) orelse return err;
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
epoch = self.epoch.load(.Acquire);
|
||||
state = self.state.load(.Monotonic);
|
||||
epoch = self.epoch.load(.acquire);
|
||||
state = self.state.load(.monotonic);
|
||||
|
||||
// Try to wake up by consuming a signal and decremented the waiter we added previously.
|
||||
// Acquire barrier ensures code before the wake() which added the signal happens before we decrement it and return.
|
||||
while (state & signal_mask != 0) {
|
||||
const new_state = state - one_waiter - one_signal;
|
||||
state = self.state.cmpxchgWeak(state, new_state, .Acquire, .Monotonic) orelse return;
|
||||
state = self.state.cmpxchgWeak(state, new_state, .acquire, .monotonic) orelse return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn wake(self: *Impl, comptime notify: Notify) void {
|
||||
var state = self.state.load(.Monotonic);
|
||||
var state = self.state.load(.monotonic);
|
||||
while (true) {
|
||||
const waiters = (state & waiter_mask) / one_waiter;
|
||||
const signals = (state & signal_mask) / one_signal;
|
||||
@ -275,7 +275,7 @@ const FutexImpl = struct {
|
||||
// Reserve the amount of waiters to wake by incrementing the signals count.
|
||||
// Release barrier ensures code before the wake() happens before the signal it posted and consumed by the wait() threads.
|
||||
const new_state = state + (one_signal * to_wake);
|
||||
state = self.state.cmpxchgWeak(state, new_state, .Release, .Monotonic) orelse {
|
||||
state = self.state.cmpxchgWeak(state, new_state, .release, .monotonic) orelse {
|
||||
// Wake up the waiting threads we reserved above by changing the epoch value.
|
||||
// NOTE: a waiting thread could miss a wake up if *exactly* ((1<<32)-1) wake()s happen between it observing the epoch and sleeping on it.
|
||||
// This is very unlikely due to how many precise amount of Futex.wake() calls that would be between the waiting thread's potential preemption.
|
||||
@ -288,7 +288,7 @@ const FutexImpl = struct {
|
||||
// - T1: s = LOAD(&state)
|
||||
// - T2: UPDATE(&state, signal) + FUTEX_WAKE(&epoch)
|
||||
// - T1: s & signals == 0 -> FUTEX_WAIT(&epoch, e) (missed both epoch change and state change)
|
||||
_ = self.epoch.fetchAdd(1, .Release);
|
||||
_ = self.epoch.fetchAdd(1, .release);
|
||||
Futex.wake(&self.epoch, to_wake);
|
||||
return;
|
||||
};
|
||||
|
||||
@ -40,7 +40,7 @@ pub fn timedWait(ptr: *const atomic.Value(u32), expect: u32, timeout_ns: u64) er
|
||||
|
||||
// Avoid calling into the OS for no-op timeouts.
|
||||
if (timeout_ns == 0) {
|
||||
if (ptr.load(.SeqCst) != expect) return;
|
||||
if (ptr.load(.seq_cst) != expect) return;
|
||||
return error.Timeout;
|
||||
}
|
||||
|
||||
@ -783,16 +783,16 @@ const PosixImpl = struct {
|
||||
// - T1: bumps pending waiters (was reordered after the ptr == expect check)
|
||||
// - T1: goes to sleep and misses both the ptr change and T2's wake up
|
||||
//
|
||||
// SeqCst as Acquire barrier to ensure the announcement happens before the ptr check below.
|
||||
// SeqCst as shared modification order to form a happens-before edge with the fence(.SeqCst)+load() in wake().
|
||||
var pending = bucket.pending.fetchAdd(1, .SeqCst);
|
||||
// seq_cst as Acquire barrier to ensure the announcement happens before the ptr check below.
|
||||
// seq_cst as shared modification order to form a happens-before edge with the fence(.seq_cst)+load() in wake().
|
||||
var pending = bucket.pending.fetchAdd(1, .seq_cst);
|
||||
assert(pending < std.math.maxInt(usize));
|
||||
|
||||
// If the wait gets cancelled, remove the pending count we previously added.
|
||||
// This is done outside the mutex lock to keep the critical section short in case of contention.
|
||||
var cancelled = false;
|
||||
defer if (cancelled) {
|
||||
pending = bucket.pending.fetchSub(1, .Monotonic);
|
||||
pending = bucket.pending.fetchSub(1, .monotonic);
|
||||
assert(pending > 0);
|
||||
};
|
||||
|
||||
@ -850,11 +850,11 @@ const PosixImpl = struct {
|
||||
// but the RMW operation unconditionally marks the cache-line as modified for others causing unnecessary fetching/contention.
|
||||
//
|
||||
// Instead we opt to do a full-fence + load instead which avoids taking ownership of the cache-line.
|
||||
// fence(SeqCst) effectively converts the ptr update to SeqCst and the pending load to SeqCst: creating a Store-Load barrier.
|
||||
// fence(seq_cst) effectively converts the ptr update to seq_cst and the pending load to seq_cst: creating a Store-Load barrier.
|
||||
//
|
||||
// The pending count increment in wait() must also now use SeqCst for the update + this pending load
|
||||
// to be in the same modification order as our load isn't using Release/Acquire to guarantee it.
|
||||
bucket.pending.fence(.SeqCst);
|
||||
// The pending count increment in wait() must also now use seq_cst for the update + this pending load
|
||||
// to be in the same modification order as our load isn't using release/acquire to guarantee it.
|
||||
bucket.pending.fence(.seq_cst);
|
||||
if (bucket.pending.load(.Monotonic) == 0) {
|
||||
return;
|
||||
}
|
||||
@ -912,7 +912,7 @@ test "signaling" {
|
||||
current: u32 = 0,
|
||||
|
||||
fn hit(self: *@This()) void {
|
||||
_ = self.value.fetchAdd(1, .Release);
|
||||
_ = self.value.fetchAdd(1, .release);
|
||||
Futex.wake(&self.value, 1);
|
||||
}
|
||||
|
||||
@ -921,7 +921,7 @@ test "signaling" {
|
||||
// Wait for the value to change from hit()
|
||||
var new_value: u32 = undefined;
|
||||
while (true) {
|
||||
new_value = self.value.load(.Acquire);
|
||||
new_value = self.value.load(.acquire);
|
||||
if (new_value != self.current) break;
|
||||
Futex.wait(&self.value, self.current);
|
||||
}
|
||||
@ -968,7 +968,7 @@ test "broadcasting" {
|
||||
fn wait(self: *@This()) !void {
|
||||
// Decrement the counter.
|
||||
// Release ensures stuff before this barrier.wait() happens before the last one.
|
||||
const count = self.count.fetchSub(1, .Release);
|
||||
const count = self.count.fetchSub(1, .release);
|
||||
try testing.expect(count <= num_threads);
|
||||
try testing.expect(count > 0);
|
||||
|
||||
@ -976,15 +976,15 @@ test "broadcasting" {
|
||||
// Acquire for the last counter ensures stuff before previous barrier.wait()s happened before it.
|
||||
// Release on futex update ensures stuff before all barrier.wait()'s happens before they all return.
|
||||
if (count - 1 == 0) {
|
||||
_ = self.count.load(.Acquire); // TODO: could be fence(Acquire) if not for TSAN
|
||||
self.futex.store(1, .Release);
|
||||
_ = self.count.load(.acquire); // TODO: could be fence(acquire) if not for TSAN
|
||||
self.futex.store(1, .release);
|
||||
Futex.wake(&self.futex, num_threads - 1);
|
||||
return;
|
||||
}
|
||||
|
||||
// Other threads wait until last counter wakes them up.
|
||||
// Acquire on futex synchronizes with last barrier count to ensure stuff before all barrier.wait()'s happen before us.
|
||||
while (self.futex.load(.Acquire) == 0) {
|
||||
while (self.futex.load(.acquire) == 0) {
|
||||
Futex.wait(&self.futex, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -72,23 +72,23 @@ const DebugImpl = struct {
|
||||
inline fn tryLock(self: *@This()) bool {
|
||||
const locking = self.impl.tryLock();
|
||||
if (locking) {
|
||||
self.locking_thread.store(Thread.getCurrentId(), .Unordered);
|
||||
self.locking_thread.store(Thread.getCurrentId(), .unordered);
|
||||
}
|
||||
return locking;
|
||||
}
|
||||
|
||||
inline fn lock(self: *@This()) void {
|
||||
const current_id = Thread.getCurrentId();
|
||||
if (self.locking_thread.load(.Unordered) == current_id and current_id != 0) {
|
||||
if (self.locking_thread.load(.unordered) == current_id and current_id != 0) {
|
||||
@panic("Deadlock detected");
|
||||
}
|
||||
self.impl.lock();
|
||||
self.locking_thread.store(current_id, .Unordered);
|
||||
self.locking_thread.store(current_id, .unordered);
|
||||
}
|
||||
|
||||
inline fn unlock(self: *@This()) void {
|
||||
assert(self.locking_thread.load(.Unordered) == Thread.getCurrentId());
|
||||
self.locking_thread.store(0, .Unordered);
|
||||
assert(self.locking_thread.load(.unordered) == Thread.getCurrentId());
|
||||
self.locking_thread.store(0, .unordered);
|
||||
self.impl.unlock();
|
||||
}
|
||||
};
|
||||
@ -167,12 +167,12 @@ const FutexImpl = struct {
|
||||
// - `lock bts` is smaller instruction-wise which makes it better for inlining
|
||||
if (comptime builtin.target.cpu.arch.isX86()) {
|
||||
const locked_bit = @ctz(locked);
|
||||
return self.state.bitSet(locked_bit, .Acquire) == 0;
|
||||
return self.state.bitSet(locked_bit, .acquire) == 0;
|
||||
}
|
||||
|
||||
// Acquire barrier ensures grabbing the lock happens before the critical section
|
||||
// and that the previous lock holder's critical section happens before we grab the lock.
|
||||
return self.state.cmpxchgWeak(unlocked, locked, .Acquire, .Monotonic) == null;
|
||||
return self.state.cmpxchgWeak(unlocked, locked, .acquire, .monotonic) == null;
|
||||
}
|
||||
|
||||
fn lockSlow(self: *@This()) void {
|
||||
@ -180,7 +180,7 @@ const FutexImpl = struct {
|
||||
|
||||
// Avoid doing an atomic swap below if we already know the state is contended.
|
||||
// An atomic swap unconditionally stores which marks the cache-line as modified unnecessarily.
|
||||
if (self.state.load(.Monotonic) == contended) {
|
||||
if (self.state.load(.monotonic) == contended) {
|
||||
Futex.wait(&self.state, contended);
|
||||
}
|
||||
|
||||
@ -193,7 +193,7 @@ const FutexImpl = struct {
|
||||
//
|
||||
// Acquire barrier ensures grabbing the lock happens before the critical section
|
||||
// and that the previous lock holder's critical section happens before we grab the lock.
|
||||
while (self.state.swap(contended, .Acquire) != unlocked) {
|
||||
while (self.state.swap(contended, .acquire) != unlocked) {
|
||||
Futex.wait(&self.state, contended);
|
||||
}
|
||||
}
|
||||
@ -206,7 +206,7 @@ const FutexImpl = struct {
|
||||
//
|
||||
// Release barrier ensures the critical section happens before we let go of the lock
|
||||
// and that our critical section happens before the next lock holder grabs the lock.
|
||||
const state = self.state.swap(unlocked, .Release);
|
||||
const state = self.state.swap(unlocked, .release);
|
||||
assert(state != unlocked);
|
||||
|
||||
if (state == contended) {
|
||||
|
||||
@ -96,7 +96,7 @@ const FutexImpl = struct {
|
||||
|
||||
fn isSet(self: *const Impl) bool {
|
||||
// Acquire barrier ensures memory accesses before set() happen before we return true.
|
||||
return self.state.load(.Acquire) == is_set;
|
||||
return self.state.load(.acquire) == is_set;
|
||||
}
|
||||
|
||||
fn wait(self: *Impl, timeout: ?u64) error{Timeout}!void {
|
||||
@ -112,9 +112,9 @@ const FutexImpl = struct {
|
||||
// Try to set the state from `unset` to `waiting` to indicate
|
||||
// to the set() thread that others are blocked on the ResetEvent.
|
||||
// We avoid using any strict barriers until the end when we know the ResetEvent is set.
|
||||
var state = self.state.load(.Monotonic);
|
||||
var state = self.state.load(.monotonic);
|
||||
if (state == unset) {
|
||||
state = self.state.cmpxchgStrong(state, waiting, .Monotonic, .Monotonic) orelse waiting;
|
||||
state = self.state.cmpxchgStrong(state, waiting, .monotonic, .monotonic) orelse waiting;
|
||||
}
|
||||
|
||||
// Wait until the ResetEvent is set since the state is waiting.
|
||||
@ -124,7 +124,7 @@ const FutexImpl = struct {
|
||||
const wait_result = futex_deadline.wait(&self.state, waiting);
|
||||
|
||||
// Check if the ResetEvent was set before possibly reporting error.Timeout below.
|
||||
state = self.state.load(.Monotonic);
|
||||
state = self.state.load(.monotonic);
|
||||
if (state != waiting) {
|
||||
break;
|
||||
}
|
||||
@ -135,25 +135,25 @@ const FutexImpl = struct {
|
||||
|
||||
// Acquire barrier ensures memory accesses before set() happen before we return.
|
||||
assert(state == is_set);
|
||||
self.state.fence(.Acquire);
|
||||
self.state.fence(.acquire);
|
||||
}
|
||||
|
||||
fn set(self: *Impl) void {
|
||||
// Quick check if the ResetEvent is already set before doing the atomic swap below.
|
||||
// set() could be getting called quite often and multiple threads calling swap() increases contention unnecessarily.
|
||||
if (self.state.load(.Monotonic) == is_set) {
|
||||
if (self.state.load(.monotonic) == is_set) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Mark the ResetEvent as set and unblock all waiters waiting on it if any.
|
||||
// Release barrier ensures memory accesses before set() happen before the ResetEvent is observed to be "set".
|
||||
if (self.state.swap(is_set, .Release) == waiting) {
|
||||
if (self.state.swap(is_set, .release) == waiting) {
|
||||
Futex.wake(&self.state, std.math.maxInt(u32));
|
||||
}
|
||||
}
|
||||
|
||||
fn reset(self: *Impl) void {
|
||||
self.state.store(unset, .Monotonic);
|
||||
self.state.store(unset, .monotonic);
|
||||
}
|
||||
};
|
||||
|
||||
@ -254,7 +254,7 @@ test "broadcast" {
|
||||
counter: std.atomic.Value(usize) = std.atomic.Value(usize).init(num_threads),
|
||||
|
||||
fn wait(self: *@This()) void {
|
||||
if (self.counter.fetchSub(1, .AcqRel) == 1) {
|
||||
if (self.counter.fetchSub(1, .acq_rel) == 1) {
|
||||
self.event.set();
|
||||
}
|
||||
}
|
||||
|
||||
@ -179,9 +179,9 @@ pub const DefaultRwLock = struct {
|
||||
|
||||
pub fn tryLock(rwl: *DefaultRwLock) bool {
|
||||
if (rwl.mutex.tryLock()) {
|
||||
const state = @atomicLoad(usize, &rwl.state, .SeqCst);
|
||||
const state = @atomicLoad(usize, &rwl.state, .seq_cst);
|
||||
if (state & READER_MASK == 0) {
|
||||
_ = @atomicRmw(usize, &rwl.state, .Or, IS_WRITING, .SeqCst);
|
||||
_ = @atomicRmw(usize, &rwl.state, .Or, IS_WRITING, .seq_cst);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -192,34 +192,34 @@ pub const DefaultRwLock = struct {
|
||||
}
|
||||
|
||||
pub fn lock(rwl: *DefaultRwLock) void {
|
||||
_ = @atomicRmw(usize, &rwl.state, .Add, WRITER, .SeqCst);
|
||||
_ = @atomicRmw(usize, &rwl.state, .Add, WRITER, .seq_cst);
|
||||
rwl.mutex.lock();
|
||||
|
||||
const state = @atomicRmw(usize, &rwl.state, .Add, IS_WRITING -% WRITER, .SeqCst);
|
||||
const state = @atomicRmw(usize, &rwl.state, .Add, IS_WRITING -% WRITER, .seq_cst);
|
||||
if (state & READER_MASK != 0)
|
||||
rwl.semaphore.wait();
|
||||
}
|
||||
|
||||
pub fn unlock(rwl: *DefaultRwLock) void {
|
||||
_ = @atomicRmw(usize, &rwl.state, .And, ~IS_WRITING, .SeqCst);
|
||||
_ = @atomicRmw(usize, &rwl.state, .And, ~IS_WRITING, .seq_cst);
|
||||
rwl.mutex.unlock();
|
||||
}
|
||||
|
||||
pub fn tryLockShared(rwl: *DefaultRwLock) bool {
|
||||
const state = @atomicLoad(usize, &rwl.state, .SeqCst);
|
||||
const state = @atomicLoad(usize, &rwl.state, .seq_cst);
|
||||
if (state & (IS_WRITING | WRITER_MASK) == 0) {
|
||||
_ = @cmpxchgStrong(
|
||||
usize,
|
||||
&rwl.state,
|
||||
state,
|
||||
state + READER,
|
||||
.SeqCst,
|
||||
.SeqCst,
|
||||
.seq_cst,
|
||||
.seq_cst,
|
||||
) orelse return true;
|
||||
}
|
||||
|
||||
if (rwl.mutex.tryLock()) {
|
||||
_ = @atomicRmw(usize, &rwl.state, .Add, READER, .SeqCst);
|
||||
_ = @atomicRmw(usize, &rwl.state, .Add, READER, .seq_cst);
|
||||
rwl.mutex.unlock();
|
||||
return true;
|
||||
}
|
||||
@ -228,25 +228,25 @@ pub const DefaultRwLock = struct {
|
||||
}
|
||||
|
||||
pub fn lockShared(rwl: *DefaultRwLock) void {
|
||||
var state = @atomicLoad(usize, &rwl.state, .SeqCst);
|
||||
var state = @atomicLoad(usize, &rwl.state, .seq_cst);
|
||||
while (state & (IS_WRITING | WRITER_MASK) == 0) {
|
||||
state = @cmpxchgWeak(
|
||||
usize,
|
||||
&rwl.state,
|
||||
state,
|
||||
state + READER,
|
||||
.SeqCst,
|
||||
.SeqCst,
|
||||
.seq_cst,
|
||||
.seq_cst,
|
||||
) orelse return;
|
||||
}
|
||||
|
||||
rwl.mutex.lock();
|
||||
_ = @atomicRmw(usize, &rwl.state, .Add, READER, .SeqCst);
|
||||
_ = @atomicRmw(usize, &rwl.state, .Add, READER, .seq_cst);
|
||||
rwl.mutex.unlock();
|
||||
}
|
||||
|
||||
pub fn unlockShared(rwl: *DefaultRwLock) void {
|
||||
const state = @atomicRmw(usize, &rwl.state, .Sub, READER, .SeqCst);
|
||||
const state = @atomicRmw(usize, &rwl.state, .Sub, READER, .seq_cst);
|
||||
|
||||
if ((state & READER_MASK == READER) and (state & IS_WRITING != 0))
|
||||
rwl.semaphore.post();
|
||||
@ -318,12 +318,12 @@ test "concurrent access" {
|
||||
self.rwl.lockShared();
|
||||
defer self.rwl.unlockShared();
|
||||
|
||||
if (self.writes >= num_writes or self.reads.load(.Unordered) >= num_reads)
|
||||
if (self.writes >= num_writes or self.reads.load(.unordered) >= num_reads)
|
||||
break;
|
||||
|
||||
try self.check();
|
||||
|
||||
_ = self.reads.fetchAdd(1, .Monotonic);
|
||||
_ = self.reads.fetchAdd(1, .monotonic);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -10,22 +10,22 @@ state: std.atomic.Value(usize) = std.atomic.Value(usize).init(0),
|
||||
event: std.Thread.ResetEvent = .{},
|
||||
|
||||
pub fn start(self: *WaitGroup) void {
|
||||
const state = self.state.fetchAdd(one_pending, .Monotonic);
|
||||
const state = self.state.fetchAdd(one_pending, .monotonic);
|
||||
assert((state / one_pending) < (std.math.maxInt(usize) / one_pending));
|
||||
}
|
||||
|
||||
pub fn finish(self: *WaitGroup) void {
|
||||
const state = self.state.fetchSub(one_pending, .Release);
|
||||
const state = self.state.fetchSub(one_pending, .release);
|
||||
assert((state / one_pending) > 0);
|
||||
|
||||
if (state == (one_pending | is_waiting)) {
|
||||
self.state.fence(.Acquire);
|
||||
self.state.fence(.acquire);
|
||||
self.event.set();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn wait(self: *WaitGroup) void {
|
||||
const state = self.state.fetchAdd(is_waiting, .Acquire);
|
||||
const state = self.state.fetchAdd(is_waiting, .acquire);
|
||||
assert(state & is_waiting == 0);
|
||||
|
||||
if ((state / one_pending) > 0) {
|
||||
@ -34,12 +34,12 @@ pub fn wait(self: *WaitGroup) void {
|
||||
}
|
||||
|
||||
pub fn reset(self: *WaitGroup) void {
|
||||
self.state.store(0, .Monotonic);
|
||||
self.state.store(0, .monotonic);
|
||||
self.event.reset();
|
||||
}
|
||||
|
||||
pub fn isDone(wg: *WaitGroup) bool {
|
||||
const state = wg.state.load(.Acquire);
|
||||
const state = wg.state.load(.acquire);
|
||||
assert(state & is_waiting == 0);
|
||||
|
||||
return (state / one_pending) == 0;
|
||||
|
||||
@ -23,10 +23,10 @@ pub fn Value(comptime T: type) type {
|
||||
|
||||
const addr: *anyopaque = self;
|
||||
return switch (order) {
|
||||
.Unordered, .Monotonic => @compileError(@tagName(order) ++ " only applies to atomic loads and stores"),
|
||||
.Acquire => tsan.__tsan_acquire(addr),
|
||||
.Release => tsan.__tsan_release(addr),
|
||||
.AcqRel, .SeqCst => {
|
||||
.unordered, .monotonic => @compileError(@tagName(order) ++ " only applies to atomic loads and stores"),
|
||||
.acquire => tsan.__tsan_acquire(addr),
|
||||
.release => tsan.__tsan_release(addr),
|
||||
.acq_rel, .seq_cst => {
|
||||
tsan.__tsan_acquire(addr);
|
||||
tsan.__tsan_release(addr);
|
||||
},
|
||||
@ -149,20 +149,20 @@ test Value {
|
||||
|
||||
fn ref(rc: *RefCount) void {
|
||||
// No ordering necessary; just updating a counter.
|
||||
_ = rc.count.fetchAdd(1, .Monotonic);
|
||||
_ = rc.count.fetchAdd(1, .monotonic);
|
||||
}
|
||||
|
||||
fn unref(rc: *RefCount) void {
|
||||
// Release ensures code before unref() happens-before the
|
||||
// count is decremented as dropFn could be called by then.
|
||||
if (rc.count.fetchSub(1, .Release) == 1) {
|
||||
// Acquire ensures count decrement and code before
|
||||
if (rc.count.fetchSub(1, .release) == 1) {
|
||||
// acquire ensures count decrement and code before
|
||||
// previous unrefs()s happens-before we call dropFn
|
||||
// below.
|
||||
// Another alternative is to use .AcqRel on the
|
||||
// fetchSub count decrement but it's extra barrier in
|
||||
// possibly hot path.
|
||||
rc.count.fence(.Acquire);
|
||||
rc.count.fence(.acquire);
|
||||
(rc.dropFn)(rc);
|
||||
}
|
||||
}
|
||||
@ -182,118 +182,118 @@ test Value {
|
||||
|
||||
test "Value.swap" {
|
||||
var x = Value(usize).init(5);
|
||||
try testing.expectEqual(@as(usize, 5), x.swap(10, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 5), x.swap(10, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.seq_cst));
|
||||
|
||||
const E = enum(usize) { a, b, c };
|
||||
var y = Value(E).init(.c);
|
||||
try testing.expectEqual(E.c, y.swap(.a, .SeqCst));
|
||||
try testing.expectEqual(E.a, y.load(.SeqCst));
|
||||
try testing.expectEqual(E.c, y.swap(.a, .seq_cst));
|
||||
try testing.expectEqual(E.a, y.load(.seq_cst));
|
||||
|
||||
var z = Value(f32).init(5.0);
|
||||
try testing.expectEqual(@as(f32, 5.0), z.swap(10.0, .SeqCst));
|
||||
try testing.expectEqual(@as(f32, 10.0), z.load(.SeqCst));
|
||||
try testing.expectEqual(@as(f32, 5.0), z.swap(10.0, .seq_cst));
|
||||
try testing.expectEqual(@as(f32, 10.0), z.load(.seq_cst));
|
||||
|
||||
var a = Value(bool).init(false);
|
||||
try testing.expectEqual(false, a.swap(true, .SeqCst));
|
||||
try testing.expectEqual(true, a.load(.SeqCst));
|
||||
try testing.expectEqual(false, a.swap(true, .seq_cst));
|
||||
try testing.expectEqual(true, a.load(.seq_cst));
|
||||
|
||||
var b = Value(?*u8).init(null);
|
||||
try testing.expectEqual(@as(?*u8, null), b.swap(@as(?*u8, @ptrFromInt(@alignOf(u8))), .SeqCst));
|
||||
try testing.expectEqual(@as(?*u8, @ptrFromInt(@alignOf(u8))), b.load(.SeqCst));
|
||||
try testing.expectEqual(@as(?*u8, null), b.swap(@as(?*u8, @ptrFromInt(@alignOf(u8))), .seq_cst));
|
||||
try testing.expectEqual(@as(?*u8, @ptrFromInt(@alignOf(u8))), b.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.store" {
|
||||
var x = Value(usize).init(5);
|
||||
x.store(10, .SeqCst);
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.SeqCst));
|
||||
x.store(10, .seq_cst);
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.cmpxchgWeak" {
|
||||
var x = Value(usize).init(0);
|
||||
|
||||
try testing.expectEqual(@as(?usize, 0), x.cmpxchgWeak(1, 0, .SeqCst, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(?usize, 0), x.cmpxchgWeak(1, 0, .seq_cst, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.seq_cst));
|
||||
|
||||
while (x.cmpxchgWeak(0, 1, .SeqCst, .SeqCst)) |_| {}
|
||||
try testing.expectEqual(@as(usize, 1), x.load(.SeqCst));
|
||||
while (x.cmpxchgWeak(0, 1, .seq_cst, .seq_cst)) |_| {}
|
||||
try testing.expectEqual(@as(usize, 1), x.load(.seq_cst));
|
||||
|
||||
while (x.cmpxchgWeak(1, 0, .SeqCst, .SeqCst)) |_| {}
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.SeqCst));
|
||||
while (x.cmpxchgWeak(1, 0, .seq_cst, .seq_cst)) |_| {}
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.cmpxchgStrong" {
|
||||
var x = Value(usize).init(0);
|
||||
try testing.expectEqual(@as(?usize, 0), x.cmpxchgStrong(1, 0, .SeqCst, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(?usize, null), x.cmpxchgStrong(0, 1, .SeqCst, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 1), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(?usize, null), x.cmpxchgStrong(1, 0, .SeqCst, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(?usize, 0), x.cmpxchgStrong(1, 0, .seq_cst, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(?usize, null), x.cmpxchgStrong(0, 1, .seq_cst, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 1), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(?usize, null), x.cmpxchgStrong(1, 0, .seq_cst, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.fetchAdd" {
|
||||
var x = Value(usize).init(5);
|
||||
try testing.expectEqual(@as(usize, 5), x.fetchAdd(5, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 10), x.fetchAdd(std.math.maxInt(usize), .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 9), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 5), x.fetchAdd(5, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(usize, 10), x.fetchAdd(std.math.maxInt(usize), .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 9), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.fetchSub" {
|
||||
var x = Value(usize).init(5);
|
||||
try testing.expectEqual(@as(usize, 5), x.fetchSub(5, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0), x.fetchSub(1, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, std.math.maxInt(usize)), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 5), x.fetchSub(5, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0), x.fetchSub(1, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, std.math.maxInt(usize)), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.fetchMin" {
|
||||
var x = Value(usize).init(5);
|
||||
try testing.expectEqual(@as(usize, 5), x.fetchMin(0, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0), x.fetchMin(10, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 5), x.fetchMin(0, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0), x.fetchMin(10, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.fetchMax" {
|
||||
var x = Value(usize).init(5);
|
||||
try testing.expectEqual(@as(usize, 5), x.fetchMax(10, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 10), x.fetchMax(5, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 5), x.fetchMax(10, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(usize, 10), x.fetchMax(5, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 10), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.fetchAnd" {
|
||||
var x = Value(usize).init(0b11);
|
||||
try testing.expectEqual(@as(usize, 0b11), x.fetchAnd(0b10, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b10), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b10), x.fetchAnd(0b00, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b00), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b11), x.fetchAnd(0b10, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b10), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b10), x.fetchAnd(0b00, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b00), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.fetchNand" {
|
||||
var x = Value(usize).init(0b11);
|
||||
try testing.expectEqual(@as(usize, 0b11), x.fetchNand(0b10, .SeqCst));
|
||||
try testing.expectEqual(~@as(usize, 0b10), x.load(.SeqCst));
|
||||
try testing.expectEqual(~@as(usize, 0b10), x.fetchNand(0b00, .SeqCst));
|
||||
try testing.expectEqual(~@as(usize, 0b00), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b11), x.fetchNand(0b10, .seq_cst));
|
||||
try testing.expectEqual(~@as(usize, 0b10), x.load(.seq_cst));
|
||||
try testing.expectEqual(~@as(usize, 0b10), x.fetchNand(0b00, .seq_cst));
|
||||
try testing.expectEqual(~@as(usize, 0b00), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.fetchOr" {
|
||||
var x = Value(usize).init(0b11);
|
||||
try testing.expectEqual(@as(usize, 0b11), x.fetchOr(0b100, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b111), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b111), x.fetchOr(0b010, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b111), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b11), x.fetchOr(0b100, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b111), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b111), x.fetchOr(0b010, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b111), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.fetchXor" {
|
||||
var x = Value(usize).init(0b11);
|
||||
try testing.expectEqual(@as(usize, 0b11), x.fetchXor(0b10, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b01), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b01), x.fetchXor(0b01, .SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b00), x.load(.SeqCst));
|
||||
try testing.expectEqual(@as(usize, 0b11), x.fetchXor(0b10, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b01), x.load(.seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b01), x.fetchXor(0b01, .seq_cst));
|
||||
try testing.expectEqual(@as(usize, 0b00), x.load(.seq_cst));
|
||||
}
|
||||
|
||||
test "Value.bitSet" {
|
||||
@ -304,19 +304,19 @@ test "Value.bitSet" {
|
||||
const mask = @as(usize, 1) << bit;
|
||||
|
||||
// setting the bit should change the bit
|
||||
try testing.expect(x.load(.SeqCst) & mask == 0);
|
||||
try testing.expectEqual(@as(u1, 0), x.bitSet(bit, .SeqCst));
|
||||
try testing.expect(x.load(.SeqCst) & mask != 0);
|
||||
try testing.expect(x.load(.seq_cst) & mask == 0);
|
||||
try testing.expectEqual(@as(u1, 0), x.bitSet(bit, .seq_cst));
|
||||
try testing.expect(x.load(.seq_cst) & mask != 0);
|
||||
|
||||
// setting it again shouldn't change the bit
|
||||
try testing.expectEqual(@as(u1, 1), x.bitSet(bit, .SeqCst));
|
||||
try testing.expect(x.load(.SeqCst) & mask != 0);
|
||||
try testing.expectEqual(@as(u1, 1), x.bitSet(bit, .seq_cst));
|
||||
try testing.expect(x.load(.seq_cst) & mask != 0);
|
||||
|
||||
// all the previous bits should have not changed (still be set)
|
||||
for (0..bit_index) |prev_bit_index| {
|
||||
const prev_bit = @as(std.math.Log2Int(usize), @intCast(prev_bit_index));
|
||||
const prev_mask = @as(usize, 1) << prev_bit;
|
||||
try testing.expect(x.load(.SeqCst) & prev_mask != 0);
|
||||
try testing.expect(x.load(.seq_cst) & prev_mask != 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -330,19 +330,19 @@ test "Value.bitReset" {
|
||||
x.raw |= mask;
|
||||
|
||||
// unsetting the bit should change the bit
|
||||
try testing.expect(x.load(.SeqCst) & mask != 0);
|
||||
try testing.expectEqual(@as(u1, 1), x.bitReset(bit, .SeqCst));
|
||||
try testing.expect(x.load(.SeqCst) & mask == 0);
|
||||
try testing.expect(x.load(.seq_cst) & mask != 0);
|
||||
try testing.expectEqual(@as(u1, 1), x.bitReset(bit, .seq_cst));
|
||||
try testing.expect(x.load(.seq_cst) & mask == 0);
|
||||
|
||||
// unsetting it again shouldn't change the bit
|
||||
try testing.expectEqual(@as(u1, 0), x.bitReset(bit, .SeqCst));
|
||||
try testing.expect(x.load(.SeqCst) & mask == 0);
|
||||
try testing.expectEqual(@as(u1, 0), x.bitReset(bit, .seq_cst));
|
||||
try testing.expect(x.load(.seq_cst) & mask == 0);
|
||||
|
||||
// all the previous bits should have not changed (still be reset)
|
||||
for (0..bit_index) |prev_bit_index| {
|
||||
const prev_bit = @as(std.math.Log2Int(usize), @intCast(prev_bit_index));
|
||||
const prev_mask = @as(usize, 1) << prev_bit;
|
||||
try testing.expect(x.load(.SeqCst) & prev_mask == 0);
|
||||
try testing.expect(x.load(.seq_cst) & prev_mask == 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -355,19 +355,19 @@ test "Value.bitToggle" {
|
||||
const mask = @as(usize, 1) << bit;
|
||||
|
||||
// toggling the bit should change the bit
|
||||
try testing.expect(x.load(.SeqCst) & mask == 0);
|
||||
try testing.expectEqual(@as(u1, 0), x.bitToggle(bit, .SeqCst));
|
||||
try testing.expect(x.load(.SeqCst) & mask != 0);
|
||||
try testing.expect(x.load(.seq_cst) & mask == 0);
|
||||
try testing.expectEqual(@as(u1, 0), x.bitToggle(bit, .seq_cst));
|
||||
try testing.expect(x.load(.seq_cst) & mask != 0);
|
||||
|
||||
// toggling it again *should* change the bit
|
||||
try testing.expectEqual(@as(u1, 1), x.bitToggle(bit, .SeqCst));
|
||||
try testing.expect(x.load(.SeqCst) & mask == 0);
|
||||
try testing.expectEqual(@as(u1, 1), x.bitToggle(bit, .seq_cst));
|
||||
try testing.expect(x.load(.seq_cst) & mask == 0);
|
||||
|
||||
// all the previous bits should have not changed (still be toggled back)
|
||||
for (0..bit_index) |prev_bit_index| {
|
||||
const prev_bit = @as(std.math.Log2Int(usize), @intCast(prev_bit_index));
|
||||
const prev_mask = @as(usize, 1) << prev_bit;
|
||||
try testing.expect(x.load(.SeqCst) & prev_mask == 0);
|
||||
try testing.expect(x.load(.seq_cst) & prev_mask == 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -64,10 +64,10 @@ pub const StackTrace = struct {
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const GlobalLinkage = enum {
|
||||
Internal,
|
||||
Strong,
|
||||
Weak,
|
||||
LinkOnce,
|
||||
internal,
|
||||
strong,
|
||||
weak,
|
||||
link_once,
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
@ -81,12 +81,12 @@ pub const SymbolVisibility = enum {
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const AtomicOrder = enum {
|
||||
Unordered,
|
||||
Monotonic,
|
||||
Acquire,
|
||||
Release,
|
||||
AcqRel,
|
||||
SeqCst,
|
||||
unordered,
|
||||
monotonic,
|
||||
acquire,
|
||||
release,
|
||||
acq_rel,
|
||||
seq_cst,
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
@ -334,9 +334,9 @@ pub const Type = union(enum) {
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const ContainerLayout = enum(u2) {
|
||||
Auto,
|
||||
Extern,
|
||||
Packed,
|
||||
auto,
|
||||
@"extern",
|
||||
@"packed",
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
@ -353,7 +353,7 @@ pub const Type = union(enum) {
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const Struct = struct {
|
||||
layout: ContainerLayout,
|
||||
/// Only valid if layout is .Packed
|
||||
/// Only valid if layout is .@"packed"
|
||||
backing_integer: ?type = null,
|
||||
fields: []const StructField,
|
||||
decls: []const Declaration,
|
||||
@ -471,8 +471,8 @@ pub const Type = union(enum) {
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const FloatMode = enum {
|
||||
Strict,
|
||||
Optimized,
|
||||
strict,
|
||||
optimized,
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
@ -500,8 +500,8 @@ pub const OutputMode = enum {
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const LinkMode = enum {
|
||||
Static,
|
||||
Dynamic,
|
||||
static,
|
||||
dynamic,
|
||||
};
|
||||
|
||||
/// This data structure is used by the Zig language code generation and
|
||||
@ -659,7 +659,7 @@ pub const PrefetchOptions = struct {
|
||||
/// therefore must be kept in sync with the compiler implementation.
|
||||
pub const ExportOptions = struct {
|
||||
name: []const u8,
|
||||
linkage: GlobalLinkage = .Strong,
|
||||
linkage: GlobalLinkage = .strong,
|
||||
section: ?[]const u8 = null,
|
||||
visibility: SymbolVisibility = .default,
|
||||
};
|
||||
@ -669,7 +669,7 @@ pub const ExportOptions = struct {
|
||||
pub const ExternOptions = struct {
|
||||
name: []const u8,
|
||||
library_name: ?[]const u8 = null,
|
||||
linkage: GlobalLinkage = .Strong,
|
||||
linkage: GlobalLinkage = .strong,
|
||||
is_thread_local: bool = false,
|
||||
};
|
||||
|
||||
|
||||
@ -202,7 +202,7 @@ var dummy_execute_header: mach_hdr = undefined;
|
||||
pub extern var _mh_execute_header: mach_hdr;
|
||||
comptime {
|
||||
if (builtin.target.isDarwin()) {
|
||||
@export(dummy_execute_header, .{ .name = "_mh_execute_header", .linkage = .Weak });
|
||||
@export(dummy_execute_header, .{ .name = "_mh_execute_header", .linkage = .weak });
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1420,7 +1420,7 @@ fn windowsMakeAsyncPipe(rd: *?windows.HANDLE, wr: *?windows.HANDLE, sattr: *cons
|
||||
const pipe_path = std.fmt.bufPrintZ(
|
||||
&tmp_buf,
|
||||
"\\\\.\\pipe\\zig-childprocess-{d}-{d}",
|
||||
.{ windows.kernel32.GetCurrentProcessId(), pipe_name_counter.fetchAdd(1, .Monotonic) },
|
||||
.{ windows.kernel32.GetCurrentProcessId(), pipe_name_counter.fetchAdd(1, .monotonic) },
|
||||
) catch unreachable;
|
||||
const len = std.unicode.wtf8ToWtf16Le(&tmp_bufw, pipe_path) catch unreachable;
|
||||
tmp_bufw[len] = 0;
|
||||
|
||||
@ -461,7 +461,7 @@ pub fn panicImpl(trace: ?*const std.builtin.StackTrace, first_trace_addr: ?usize
|
||||
0 => {
|
||||
panic_stage = 1;
|
||||
|
||||
_ = panicking.fetchAdd(1, .SeqCst);
|
||||
_ = panicking.fetchAdd(1, .seq_cst);
|
||||
|
||||
// Make sure to release the mutex when done
|
||||
{
|
||||
@ -503,7 +503,7 @@ pub fn panicImpl(trace: ?*const std.builtin.StackTrace, first_trace_addr: ?usize
|
||||
|
||||
/// Must be called only after adding 1 to `panicking`. There are three callsites.
|
||||
fn waitForOtherThreadToFinishPanicking() void {
|
||||
if (panicking.fetchSub(1, .SeqCst) != 1) {
|
||||
if (panicking.fetchSub(1, .seq_cst) != 1) {
|
||||
// Another thread is panicking, wait for the last one to finish
|
||||
// and call abort()
|
||||
if (builtin.single_threaded) unreachable;
|
||||
@ -2587,7 +2587,7 @@ fn handleSegfaultPosix(sig: i32, info: *const os.siginfo_t, ctx_ptr: ?*const any
|
||||
nosuspend switch (panic_stage) {
|
||||
0 => {
|
||||
panic_stage = 1;
|
||||
_ = panicking.fetchAdd(1, .SeqCst);
|
||||
_ = panicking.fetchAdd(1, .seq_cst);
|
||||
|
||||
{
|
||||
panic_mutex.lock();
|
||||
@ -2663,7 +2663,7 @@ fn handleSegfaultWindowsExtra(
|
||||
nosuspend switch (panic_stage) {
|
||||
0 => {
|
||||
panic_stage = 1;
|
||||
_ = panicking.fetchAdd(1, .SeqCst);
|
||||
_ = panicking.fetchAdd(1, .seq_cst);
|
||||
|
||||
{
|
||||
panic_mutex.lock();
|
||||
|
||||
@ -8,7 +8,7 @@ const windows = std.os.windows;
|
||||
const system = std.os.system;
|
||||
|
||||
pub const DynLib = switch (builtin.os.tag) {
|
||||
.linux => if (!builtin.link_libc or builtin.abi == .musl and builtin.link_mode == .Static)
|
||||
.linux => if (!builtin.link_libc or builtin.abi == .musl and builtin.link_mode == .static)
|
||||
ElfDynLib
|
||||
else
|
||||
DlDynLib,
|
||||
@ -56,7 +56,7 @@ const RDebug = extern struct {
|
||||
/// TODO make it possible to reference this same external symbol 2x so we don't need this
|
||||
/// helper function.
|
||||
pub fn get_DYNAMIC() ?[*]elf.Dyn {
|
||||
return @extern([*]elf.Dyn, .{ .name = "_DYNAMIC", .linkage = .Weak });
|
||||
return @extern([*]elf.Dyn, .{ .name = "_DYNAMIC", .linkage = .weak });
|
||||
}
|
||||
|
||||
pub fn linkmap_iterator(phdrs: []elf.Phdr) !LinkMap.Iterator {
|
||||
|
||||
@ -22,7 +22,7 @@ pub fn EnumFieldStruct(comptime E: type, comptime Data: type, comptime field_def
|
||||
}};
|
||||
}
|
||||
return @Type(.{ .Struct = .{
|
||||
.layout = .Auto,
|
||||
.layout = .auto,
|
||||
.fields = fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
|
||||
@ -303,11 +303,11 @@ pub const HeapAllocator = switch (builtin.os.tag) {
|
||||
|
||||
const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
|
||||
const amt = n + ptr_align - 1 + @sizeOf(usize);
|
||||
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .SeqCst);
|
||||
const optional_heap_handle = @atomicLoad(?HeapHandle, &self.heap_handle, .seq_cst);
|
||||
const heap_handle = optional_heap_handle orelse blk: {
|
||||
const options = if (builtin.single_threaded) os.windows.HEAP_NO_SERIALIZE else 0;
|
||||
const hh = os.windows.kernel32.HeapCreate(options, amt, 0) orelse return null;
|
||||
const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, .SeqCst, .SeqCst) orelse break :blk hh;
|
||||
const other_hh = @cmpxchgStrong(?HeapHandle, &self.heap_handle, null, hh, .seq_cst, .seq_cst) orelse break :blk hh;
|
||||
os.windows.HeapDestroy(hh);
|
||||
break :blk other_hh.?; // can't be null because of the cmpxchg
|
||||
};
|
||||
@ -482,13 +482,13 @@ pub const FixedBufferAllocator = struct {
|
||||
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
|
||||
_ = ra;
|
||||
const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
|
||||
var end_index = @atomicLoad(usize, &self.end_index, .SeqCst);
|
||||
var end_index = @atomicLoad(usize, &self.end_index, .seq_cst);
|
||||
while (true) {
|
||||
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null;
|
||||
const adjusted_index = end_index + adjust_off;
|
||||
const new_end_index = adjusted_index + n;
|
||||
if (new_end_index > self.buffer.len) return null;
|
||||
end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .SeqCst, .SeqCst) orelse
|
||||
end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .seq_cst, .seq_cst) orelse
|
||||
return self.buffer[adjusted_index..new_end_index].ptr;
|
||||
}
|
||||
}
|
||||
|
||||
@ -30,7 +30,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
|
||||
return @ptrCast(addr);
|
||||
}
|
||||
|
||||
const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .Unordered);
|
||||
const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .unordered);
|
||||
const slice = os.mmap(
|
||||
hint,
|
||||
aligned_len,
|
||||
@ -41,7 +41,7 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
|
||||
) catch return null;
|
||||
assert(mem.isAligned(@intFromPtr(slice.ptr), mem.page_size));
|
||||
const new_hint: [*]align(mem.page_size) u8 = @alignCast(slice.ptr + aligned_len);
|
||||
_ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .Monotonic, .Monotonic);
|
||||
_ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .monotonic, .monotonic);
|
||||
return slice.ptr;
|
||||
}
|
||||
|
||||
|
||||
@ -1642,7 +1642,7 @@ pub fn open(
|
||||
|
||||
const host = uri.host orelse return error.UriMissingHost;
|
||||
|
||||
if (protocol == .tls and @atomicLoad(bool, &client.next_https_rescan_certs, .Acquire)) {
|
||||
if (protocol == .tls and @atomicLoad(bool, &client.next_https_rescan_certs, .acquire)) {
|
||||
if (disable_tls) unreachable;
|
||||
|
||||
client.ca_bundle_mutex.lock();
|
||||
@ -1650,7 +1650,7 @@ pub fn open(
|
||||
|
||||
if (client.next_https_rescan_certs) {
|
||||
client.ca_bundle.rescan(client.allocator) catch return error.CertificateBundleLoadFailure;
|
||||
@atomicStore(bool, &client.next_https_rescan_certs, false, .Release);
|
||||
@atomicStore(bool, &client.next_https_rescan_certs, false, .release);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -688,7 +688,7 @@ pub fn PollFiles(comptime StreamEnum: type) type {
|
||||
};
|
||||
}
|
||||
return @Type(.{ .Struct = .{
|
||||
.layout = .Auto,
|
||||
.layout = .auto,
|
||||
.fields = &struct_fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
|
||||
@ -326,7 +326,7 @@ pub fn isBytes(self: Self, slice: []const u8) anyerror!bool {
|
||||
|
||||
pub fn readStruct(self: Self, comptime T: type) anyerror!T {
|
||||
// Only extern and packed structs have defined in-memory layout.
|
||||
comptime assert(@typeInfo(T).Struct.layout != .Auto);
|
||||
comptime assert(@typeInfo(T).Struct.layout != .auto);
|
||||
var res: [1]T = undefined;
|
||||
try self.readNoEof(mem.sliceAsBytes(res[0..]));
|
||||
return res[0];
|
||||
|
||||
@ -55,7 +55,7 @@ pub inline fn writeInt(self: Self, comptime T: type, value: T, endian: std.built
|
||||
|
||||
pub fn writeStruct(self: Self, value: anytype) anyerror!void {
|
||||
// Only extern and packed structs have defined in-memory layout.
|
||||
comptime assert(@typeInfo(@TypeOf(value)).Struct.layout != .Auto);
|
||||
comptime assert(@typeInfo(@TypeOf(value)).Struct.layout != .auto);
|
||||
return self.writeAll(mem.asBytes(&value));
|
||||
}
|
||||
|
||||
|
||||
@ -238,7 +238,7 @@ pub fn zeroes(comptime T: type) T {
|
||||
},
|
||||
.Struct => |struct_info| {
|
||||
if (@sizeOf(T) == 0) return undefined;
|
||||
if (struct_info.layout == .Extern) {
|
||||
if (struct_info.layout == .@"extern") {
|
||||
var item: T = undefined;
|
||||
@memset(asBytes(&item), 0);
|
||||
return item;
|
||||
@ -284,7 +284,7 @@ pub fn zeroes(comptime T: type) T {
|
||||
return @splat(zeroes(info.child));
|
||||
},
|
||||
.Union => |info| {
|
||||
if (info.layout == .Extern) {
|
||||
if (info.layout == .@"extern") {
|
||||
var item: T = undefined;
|
||||
@memset(asBytes(&item), 0);
|
||||
return item;
|
||||
@ -429,7 +429,7 @@ pub fn zeroInit(comptime T: type, init: anytype) T {
|
||||
}
|
||||
}
|
||||
|
||||
var value: T = if (struct_info.layout == .Extern) zeroes(T) else undefined;
|
||||
var value: T = if (struct_info.layout == .@"extern") zeroes(T) else undefined;
|
||||
|
||||
inline for (struct_info.fields, 0..) |field, i| {
|
||||
if (field.is_comptime) {
|
||||
|
||||
@ -269,12 +269,12 @@ test containerLayout {
|
||||
a: u8,
|
||||
};
|
||||
|
||||
try testing.expect(containerLayout(S1) == .Auto);
|
||||
try testing.expect(containerLayout(S2) == .Packed);
|
||||
try testing.expect(containerLayout(S3) == .Extern);
|
||||
try testing.expect(containerLayout(U1) == .Auto);
|
||||
try testing.expect(containerLayout(U2) == .Packed);
|
||||
try testing.expect(containerLayout(U3) == .Extern);
|
||||
try testing.expect(containerLayout(S1) == .auto);
|
||||
try testing.expect(containerLayout(S2) == .@"packed");
|
||||
try testing.expect(containerLayout(S3) == .@"extern");
|
||||
try testing.expect(containerLayout(U1) == .auto);
|
||||
try testing.expect(containerLayout(U2) == .@"packed");
|
||||
try testing.expect(containerLayout(U3) == .@"extern");
|
||||
}
|
||||
|
||||
/// Instead of this function, prefer to use e.g. `@typeInfo(foo).Struct.decls`
|
||||
@ -1025,7 +1025,7 @@ fn CreateUniqueTuple(comptime N: comptime_int, comptime types: [N]type) type {
|
||||
return @Type(.{
|
||||
.Struct = .{
|
||||
.is_tuple = true,
|
||||
.layout = .Auto,
|
||||
.layout = .auto,
|
||||
.decls = &.{},
|
||||
.fields = &tuple_fields,
|
||||
},
|
||||
|
||||
@ -32,7 +32,7 @@ pub fn TrailerFlags(comptime Fields: type) type {
|
||||
}
|
||||
break :blk @Type(.{
|
||||
.Struct = .{
|
||||
.layout = .Auto,
|
||||
.layout = .auto,
|
||||
.fields = &fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
|
||||
@ -558,7 +558,7 @@ pub fn MultiArrayList(comptime T: type) type {
|
||||
.alignment = fields[i].alignment,
|
||||
};
|
||||
break :entry @Type(.{ .Struct = .{
|
||||
.layout = .Extern,
|
||||
.layout = .@"extern",
|
||||
.fields = &entry_fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
|
||||
@ -17,7 +17,7 @@ pub fn Once(comptime f: fn () void) type {
|
||||
/// first time.
|
||||
/// The invocations are thread-safe.
|
||||
pub fn call(self: *@This()) void {
|
||||
if (@atomicLoad(bool, &self.done, .Acquire))
|
||||
if (@atomicLoad(bool, &self.done, .acquire))
|
||||
return;
|
||||
|
||||
return self.callSlow();
|
||||
@ -32,7 +32,7 @@ pub fn Once(comptime f: fn () void) type {
|
||||
// The first thread to acquire the mutex gets to run the initializer
|
||||
if (!self.done) {
|
||||
f();
|
||||
@atomicStore(bool, &self.done, true, .Release);
|
||||
@atomicStore(bool, &self.done, true, .release);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -436,7 +436,7 @@ fn fchmodat1(dirfd: fd_t, path: []const u8, mode: mode_t, flags: u32) FChmodAtEr
|
||||
fn fchmodat2(dirfd: fd_t, path: []const u8, mode: mode_t, flags: u32) FChmodAtError!void {
|
||||
const path_c = try toPosixPath(path);
|
||||
const use_fchmodat2 = (builtin.os.isAtLeast(.linux, .{ .major = 6, .minor = 6, .patch = 0 }) orelse false) and
|
||||
has_fchmodat2_syscall.load(.Monotonic);
|
||||
has_fchmodat2_syscall.load(.monotonic);
|
||||
while (use_fchmodat2) {
|
||||
// Later on this should be changed to `system.fchmodat2`
|
||||
// when the musl/glibc add a wrapper.
|
||||
@ -458,7 +458,7 @@ fn fchmodat2(dirfd: fd_t, path: []const u8, mode: mode_t, flags: u32) FChmodAtEr
|
||||
.ROFS => return error.ReadOnlyFileSystem,
|
||||
|
||||
.NOSYS => { // Use fallback.
|
||||
has_fchmodat2_syscall.store(false, .Monotonic);
|
||||
has_fchmodat2_syscall.store(false, .monotonic);
|
||||
break;
|
||||
},
|
||||
else => |err| return unexpectedErrno(err),
|
||||
@ -729,7 +729,7 @@ pub fn abort() noreturn {
|
||||
const global = struct {
|
||||
var abort_entered: bool = false;
|
||||
};
|
||||
while (@cmpxchgWeak(bool, &global.abort_entered, false, true, .SeqCst, .SeqCst)) |_| {}
|
||||
while (@cmpxchgWeak(bool, &global.abort_entered, false, true, .seq_cst, .seq_cst)) |_| {}
|
||||
}
|
||||
|
||||
// Install default handler so that the tkill below will terminate.
|
||||
@ -6809,7 +6809,7 @@ pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len
|
||||
if ((comptime builtin.os.isAtLeast(.freebsd, .{ .major = 13, .minor = 0, .patch = 0 }) orelse false) or
|
||||
((comptime builtin.os.isAtLeast(.linux, .{ .major = 4, .minor = 5, .patch = 0 }) orelse false and
|
||||
std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 })) and
|
||||
has_copy_file_range_syscall.load(.Monotonic)))
|
||||
has_copy_file_range_syscall.load(.monotonic)))
|
||||
{
|
||||
var off_in_copy: i64 = @bitCast(off_in);
|
||||
var off_out_copy: i64 = @bitCast(off_out);
|
||||
@ -6844,7 +6844,7 @@ pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len
|
||||
.TXTBSY => return error.SwapFile,
|
||||
.XDEV => break, // support for cross-filesystem copy added in Linux 5.3, use fallback
|
||||
.NOSYS => { // syscall added in Linux 4.5, use fallback
|
||||
has_copy_file_range_syscall.store(false, .Monotonic);
|
||||
has_copy_file_range_syscall.store(false, .monotonic);
|
||||
break;
|
||||
},
|
||||
else => |err| return unexpectedErrno(err),
|
||||
|
||||
@ -394,7 +394,7 @@ const extern_getauxval = switch (builtin.zig_backend) {
|
||||
|
||||
comptime {
|
||||
if (extern_getauxval) {
|
||||
@export(getauxvalImpl, .{ .name = "getauxval", .linkage = .Weak });
|
||||
@export(getauxvalImpl, .{ .name = "getauxval", .linkage = .weak });
|
||||
}
|
||||
}
|
||||
|
||||
@ -1334,7 +1334,7 @@ const vdso_clock_gettime_ty = *align(1) const fn (i32, *timespec) callconv(.C) u
|
||||
|
||||
pub fn clock_gettime(clk_id: i32, tp: *timespec) usize {
|
||||
if (@hasDecl(VDSO, "CGT_SYM")) {
|
||||
const ptr = @atomicLoad(?*const anyopaque, &vdso_clock_gettime, .Unordered);
|
||||
const ptr = @atomicLoad(?*const anyopaque, &vdso_clock_gettime, .unordered);
|
||||
if (ptr) |fn_ptr| {
|
||||
const f = @as(vdso_clock_gettime_ty, @ptrCast(fn_ptr));
|
||||
const rc = f(clk_id, tp);
|
||||
@ -1351,7 +1351,7 @@ fn init_vdso_clock_gettime(clk: i32, ts: *timespec) callconv(.C) usize {
|
||||
const ptr = @as(?*const anyopaque, @ptrFromInt(vdso.lookup(VDSO.CGT_VER, VDSO.CGT_SYM)));
|
||||
// Note that we may not have a VDSO at all, update the stub address anyway
|
||||
// so that clock_gettime will fall back on the good old (and slow) syscall
|
||||
@atomicStore(?*const anyopaque, &vdso_clock_gettime, ptr, .Monotonic);
|
||||
@atomicStore(?*const anyopaque, &vdso_clock_gettime, ptr, .monotonic);
|
||||
// Call into the VDSO if available
|
||||
if (ptr) |fn_ptr| {
|
||||
const f = @as(vdso_clock_gettime_ty, @ptrCast(fn_ptr));
|
||||
|
||||
@ -133,7 +133,7 @@ pub fn deinit(self: *IoUring) void {
|
||||
/// alternative. In Zig, we have first-class error handling... so let's use it.
|
||||
/// Matches the implementation of io_uring_get_sqe() in liburing.
|
||||
pub fn get_sqe(self: *IoUring) !*linux.io_uring_sqe {
|
||||
const head = @atomicLoad(u32, self.sq.head, .Acquire);
|
||||
const head = @atomicLoad(u32, self.sq.head, .acquire);
|
||||
// Remember that these head and tail offsets wrap around every four billion operations.
|
||||
// We must therefore use wrapping addition and subtraction to avoid a runtime crash.
|
||||
const next = self.sq.sqe_tail +% 1;
|
||||
@ -222,7 +222,7 @@ pub fn flush_sq(self: *IoUring) u32 {
|
||||
self.sq.sqe_head +%= 1;
|
||||
}
|
||||
// Ensure that the kernel can actually see the SQE updates when it sees the tail update.
|
||||
@atomicStore(u32, self.sq.tail, tail, .Release);
|
||||
@atomicStore(u32, self.sq.tail, tail, .release);
|
||||
}
|
||||
return self.sq_ready();
|
||||
}
|
||||
@ -234,7 +234,7 @@ pub fn flush_sq(self: *IoUring) u32 {
|
||||
pub fn sq_ring_needs_enter(self: *IoUring, flags: *u32) bool {
|
||||
assert(flags.* == 0);
|
||||
if ((self.flags & linux.IORING_SETUP_SQPOLL) == 0) return true;
|
||||
if ((@atomicLoad(u32, self.sq.flags, .Unordered) & linux.IORING_SQ_NEED_WAKEUP) != 0) {
|
||||
if ((@atomicLoad(u32, self.sq.flags, .unordered) & linux.IORING_SQ_NEED_WAKEUP) != 0) {
|
||||
flags.* |= linux.IORING_ENTER_SQ_WAKEUP;
|
||||
return true;
|
||||
}
|
||||
@ -248,14 +248,14 @@ pub fn sq_ring_needs_enter(self: *IoUring, flags: *u32) bool {
|
||||
pub fn sq_ready(self: *IoUring) u32 {
|
||||
// Always use the shared ring state (i.e. head and not sqe_head) to avoid going out of sync,
|
||||
// see https://github.com/axboe/liburing/issues/92.
|
||||
return self.sq.sqe_tail -% @atomicLoad(u32, self.sq.head, .Acquire);
|
||||
return self.sq.sqe_tail -% @atomicLoad(u32, self.sq.head, .acquire);
|
||||
}
|
||||
|
||||
/// Returns the number of CQEs in the completion queue, i.e. its length.
|
||||
/// These are CQEs that the application is yet to consume.
|
||||
/// Matches the implementation of io_uring_cq_ready in liburing.
|
||||
pub fn cq_ready(self: *IoUring) u32 {
|
||||
return @atomicLoad(u32, self.cq.tail, .Acquire) -% self.cq.head.*;
|
||||
return @atomicLoad(u32, self.cq.tail, .acquire) -% self.cq.head.*;
|
||||
}
|
||||
|
||||
/// Copies as many CQEs as are ready, and that can fit into the destination `cqes` slice.
|
||||
@ -313,7 +313,7 @@ pub fn copy_cqe(ring: *IoUring) !linux.io_uring_cqe {
|
||||
|
||||
/// Matches the implementation of cq_ring_needs_flush() in liburing.
|
||||
pub fn cq_ring_needs_flush(self: *IoUring) bool {
|
||||
return (@atomicLoad(u32, self.sq.flags, .Unordered) & linux.IORING_SQ_CQ_OVERFLOW) != 0;
|
||||
return (@atomicLoad(u32, self.sq.flags, .unordered) & linux.IORING_SQ_CQ_OVERFLOW) != 0;
|
||||
}
|
||||
|
||||
/// For advanced use cases only that implement custom completion queue methods.
|
||||
@ -331,7 +331,7 @@ pub fn cqe_seen(self: *IoUring, cqe: *linux.io_uring_cqe) void {
|
||||
pub fn cq_advance(self: *IoUring, count: u32) void {
|
||||
if (count > 0) {
|
||||
// Ensure the kernel only sees the new head value after the CQEs have been read.
|
||||
@atomicStore(u32, self.cq.head, self.cq.head.* +% count, .Release);
|
||||
@atomicStore(u32, self.cq.head, self.cq.head.* +% count, .release);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -425,7 +425,7 @@ fn start1() u8 {
|
||||
}
|
||||
|
||||
fn start2(ctx: *i32) u8 {
|
||||
_ = @atomicRmw(i32, ctx, AtomicRmwOp.Add, 1, AtomicOrder.SeqCst);
|
||||
_ = @atomicRmw(i32, ctx, AtomicRmwOp.Add, 1, AtomicOrder.seq_cst);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -50,7 +50,7 @@ comptime {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (builtin.output_mode == .Lib and builtin.link_mode == .Dynamic) {
|
||||
if (builtin.output_mode == .Lib and builtin.link_mode == .dynamic) {
|
||||
if (native_os == .windows and !@hasDecl(root, "_DllMainCRTStartup")) {
|
||||
@export(_DllMainCRTStartup, .{ .name = "_DllMainCRTStartup" });
|
||||
}
|
||||
|
||||
@ -155,9 +155,9 @@ pub fn binNameAlloc(allocator: Allocator, options: BinNameOptions) error{OutOfMe
|
||||
.coff => switch (options.output_mode) {
|
||||
.Exe => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, t.exeFileExt() }),
|
||||
.Lib => {
|
||||
const suffix = switch (options.link_mode orelse .Static) {
|
||||
.Static => ".lib",
|
||||
.Dynamic => ".dll",
|
||||
const suffix = switch (options.link_mode orelse .static) {
|
||||
.static => ".lib",
|
||||
.dynamic => ".dll",
|
||||
};
|
||||
return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, suffix });
|
||||
},
|
||||
@ -166,11 +166,11 @@ pub fn binNameAlloc(allocator: Allocator, options: BinNameOptions) error{OutOfMe
|
||||
.elf => switch (options.output_mode) {
|
||||
.Exe => return allocator.dupe(u8, root_name),
|
||||
.Lib => {
|
||||
switch (options.link_mode orelse .Static) {
|
||||
.Static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{
|
||||
switch (options.link_mode orelse .static) {
|
||||
.static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{
|
||||
t.libPrefix(), root_name,
|
||||
}),
|
||||
.Dynamic => {
|
||||
.dynamic => {
|
||||
if (options.version) |ver| {
|
||||
return std.fmt.allocPrint(allocator, "{s}{s}.so.{d}.{d}.{d}", .{
|
||||
t.libPrefix(), root_name, ver.major, ver.minor, ver.patch,
|
||||
@ -188,11 +188,11 @@ pub fn binNameAlloc(allocator: Allocator, options: BinNameOptions) error{OutOfMe
|
||||
.macho => switch (options.output_mode) {
|
||||
.Exe => return allocator.dupe(u8, root_name),
|
||||
.Lib => {
|
||||
switch (options.link_mode orelse .Static) {
|
||||
.Static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{
|
||||
switch (options.link_mode orelse .static) {
|
||||
.static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{
|
||||
t.libPrefix(), root_name,
|
||||
}),
|
||||
.Dynamic => {
|
||||
.dynamic => {
|
||||
if (options.version) |ver| {
|
||||
return std.fmt.allocPrint(allocator, "{s}{s}.{d}.{d}.{d}.dylib", .{
|
||||
t.libPrefix(), root_name, ver.major, ver.minor, ver.patch,
|
||||
@ -210,11 +210,11 @@ pub fn binNameAlloc(allocator: Allocator, options: BinNameOptions) error{OutOfMe
|
||||
.wasm => switch (options.output_mode) {
|
||||
.Exe => return std.fmt.allocPrint(allocator, "{s}{s}", .{ root_name, t.exeFileExt() }),
|
||||
.Lib => {
|
||||
switch (options.link_mode orelse .Static) {
|
||||
.Static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{
|
||||
switch (options.link_mode orelse .static) {
|
||||
.static => return std.fmt.allocPrint(allocator, "{s}{s}.a", .{
|
||||
t.libPrefix(), root_name,
|
||||
}),
|
||||
.Dynamic => return std.fmt.allocPrint(allocator, "{s}.wasm", .{root_name}),
|
||||
.dynamic => return std.fmt.allocPrint(allocator, "{s}.wasm", .{root_name}),
|
||||
}
|
||||
},
|
||||
.Obj => return std.fmt.allocPrint(allocator, "{s}.o", .{root_name}),
|
||||
|
||||
@ -175,7 +175,7 @@ pub fn generate(gpa: Allocator, tree: Ast) Allocator.Error!Zir {
|
||||
&gen_scope.base,
|
||||
0,
|
||||
tree.containerDeclRoot(),
|
||||
.Auto,
|
||||
.auto,
|
||||
0,
|
||||
)) |struct_decl_ref| {
|
||||
assert(struct_decl_ref.toIndex().? == .main_struct_inst);
|
||||
@ -4907,7 +4907,7 @@ fn structDeclInner(
|
||||
var backing_int_body_len: usize = 0;
|
||||
const backing_int_ref: Zir.Inst.Ref = blk: {
|
||||
if (backing_int_node != 0) {
|
||||
if (layout != .Packed) {
|
||||
if (layout != .@"packed") {
|
||||
return astgen.failNode(backing_int_node, "non-packed struct does not support backing integer type", .{});
|
||||
} else {
|
||||
const backing_int_ref = try typeExpr(&block_scope, &namespace.base, backing_int_node);
|
||||
@ -4958,9 +4958,9 @@ fn structDeclInner(
|
||||
} else false;
|
||||
|
||||
if (is_tuple) switch (layout) {
|
||||
.Auto => {},
|
||||
.Extern => return astgen.failNode(node, "extern tuples are not supported", .{}),
|
||||
.Packed => return astgen.failNode(node, "packed tuples are not supported", .{}),
|
||||
.auto => {},
|
||||
.@"extern" => return astgen.failNode(node, "extern tuples are not supported", .{}),
|
||||
.@"packed" => return astgen.failNode(node, "packed tuples are not supported", .{}),
|
||||
};
|
||||
|
||||
if (is_tuple) for (container_decl.ast.members) |member_node| {
|
||||
@ -5055,9 +5055,9 @@ fn structDeclInner(
|
||||
|
||||
if (is_comptime) {
|
||||
switch (layout) {
|
||||
.Packed => return astgen.failTok(member.comptime_token.?, "packed struct fields cannot be marked comptime", .{}),
|
||||
.Extern => return astgen.failTok(member.comptime_token.?, "extern struct fields cannot be marked comptime", .{}),
|
||||
.Auto => any_comptime_fields = true,
|
||||
.@"packed" => return astgen.failTok(member.comptime_token.?, "packed struct fields cannot be marked comptime", .{}),
|
||||
.@"extern" => return astgen.failTok(member.comptime_token.?, "extern struct fields cannot be marked comptime", .{}),
|
||||
.auto => any_comptime_fields = true,
|
||||
}
|
||||
} else {
|
||||
known_non_opv = known_non_opv or
|
||||
@ -5082,7 +5082,7 @@ fn structDeclInner(
|
||||
}
|
||||
|
||||
if (have_align) {
|
||||
if (layout == .Packed) {
|
||||
if (layout == .@"packed") {
|
||||
try astgen.appendErrorNode(member.ast.align_expr, "unable to override alignment of packed struct fields", .{});
|
||||
}
|
||||
any_aligned_fields = true;
|
||||
@ -5229,12 +5229,11 @@ fn unionDeclInner(
|
||||
const decl_count = try astgen.scanDecls(&namespace, members);
|
||||
const field_count: u32 = @intCast(members.len - decl_count);
|
||||
|
||||
if (layout != .Auto and (auto_enum_tok != null or arg_node != 0)) {
|
||||
const layout_str = if (layout == .Extern) "extern" else "packed";
|
||||
if (layout != .auto and (auto_enum_tok != null or arg_node != 0)) {
|
||||
if (arg_node != 0) {
|
||||
return astgen.failNode(arg_node, "{s} union does not support enum tag type", .{layout_str});
|
||||
return astgen.failNode(arg_node, "{s} union does not support enum tag type", .{@tagName(layout)});
|
||||
} else {
|
||||
return astgen.failTok(auto_enum_tok.?, "{s} union does not support enum tag type", .{layout_str});
|
||||
return astgen.failTok(auto_enum_tok.?, "{s} union does not support enum tag type", .{@tagName(layout)});
|
||||
}
|
||||
}
|
||||
|
||||
@ -5429,21 +5428,21 @@ fn containerDecl(
|
||||
|
||||
switch (token_tags[container_decl.ast.main_token]) {
|
||||
.keyword_struct => {
|
||||
const layout = if (container_decl.layout_token) |t| switch (token_tags[t]) {
|
||||
.keyword_packed => std.builtin.Type.ContainerLayout.Packed,
|
||||
.keyword_extern => std.builtin.Type.ContainerLayout.Extern,
|
||||
const layout: std.builtin.Type.ContainerLayout = if (container_decl.layout_token) |t| switch (token_tags[t]) {
|
||||
.keyword_packed => .@"packed",
|
||||
.keyword_extern => .@"extern",
|
||||
else => unreachable,
|
||||
} else std.builtin.Type.ContainerLayout.Auto;
|
||||
} else .auto;
|
||||
|
||||
const result = try structDeclInner(gz, scope, node, container_decl, layout, container_decl.ast.arg);
|
||||
return rvalue(gz, ri, result, node);
|
||||
},
|
||||
.keyword_union => {
|
||||
const layout = if (container_decl.layout_token) |t| switch (token_tags[t]) {
|
||||
.keyword_packed => std.builtin.Type.ContainerLayout.Packed,
|
||||
.keyword_extern => std.builtin.Type.ContainerLayout.Extern,
|
||||
const layout: std.builtin.Type.ContainerLayout = if (container_decl.layout_token) |t| switch (token_tags[t]) {
|
||||
.keyword_packed => .@"packed",
|
||||
.keyword_extern => .@"extern",
|
||||
else => unreachable,
|
||||
} else std.builtin.Type.ContainerLayout.Auto;
|
||||
} else .auto;
|
||||
|
||||
const result = try unionDeclInner(gz, scope, node, container_decl.ast.members, layout, container_decl.ast.arg, container_decl.ast.enum_token);
|
||||
return rvalue(gz, ri, result, node);
|
||||
@ -8588,7 +8587,7 @@ fn numberLiteral(gz: *GenZir, ri: ResultInfo, node: Ast.Node.Index, source_node:
|
||||
.positive => unsigned_float_number,
|
||||
};
|
||||
// If the value fits into a f64 without losing any precision, store it that way.
|
||||
@setFloatMode(.Strict);
|
||||
@setFloatMode(.strict);
|
||||
const smaller_float: f64 = @floatCast(float_number);
|
||||
const bigger_again: f128 = smaller_float;
|
||||
if (bigger_again == float_number) {
|
||||
|
||||
@ -250,18 +250,18 @@ fn bswap(x: anytype) @TypeOf(x) {
|
||||
.Enum => return @as(T, @enumFromInt(@byteSwap(@intFromEnum(x)))),
|
||||
.Int => return @byteSwap(x),
|
||||
.Struct => |info| switch (info.layout) {
|
||||
.Extern => {
|
||||
.@"extern" => {
|
||||
var result: T = undefined;
|
||||
inline for (info.fields) |field| {
|
||||
@field(result, field.name) = bswap(@field(x, field.name));
|
||||
}
|
||||
return result;
|
||||
},
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
const I = info.backing_integer.?;
|
||||
return @as(T, @bitCast(@byteSwap(@as(I, @bitCast(x)))));
|
||||
},
|
||||
.Auto => @compileError("auto layout struct"),
|
||||
.auto => @compileError("auto layout struct"),
|
||||
},
|
||||
else => @compileError("bswap on type " ++ @typeName(T)),
|
||||
}
|
||||
|
||||
@ -1201,7 +1201,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
|
||||
const output_mode = options.config.output_mode;
|
||||
const is_dyn_lib = switch (output_mode) {
|
||||
.Obj, .Exe => false,
|
||||
.Lib => options.config.link_mode == .Dynamic,
|
||||
.Lib => options.config.link_mode == .dynamic,
|
||||
};
|
||||
const is_exe_or_dyn_lib = switch (output_mode) {
|
||||
.Obj => false,
|
||||
@ -1806,8 +1806,8 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
|
||||
.{ .musl_crt_file = .scrt1_o },
|
||||
.{ .musl_crt_file = .rcrt1_o },
|
||||
switch (comp.config.link_mode) {
|
||||
.Static => .{ .musl_crt_file = .libc_a },
|
||||
.Dynamic => .{ .musl_crt_file = .libc_so },
|
||||
.static => .{ .musl_crt_file = .libc_a },
|
||||
.dynamic => .{ .musl_crt_file = .libc_so },
|
||||
},
|
||||
});
|
||||
}
|
||||
@ -6087,7 +6087,7 @@ pub fn get_libc_crt_file(comp: *Compilation, arena: Allocator, basename: []const
|
||||
fn wantBuildLibCFromSource(comp: Compilation) bool {
|
||||
const is_exe_or_dyn_lib = switch (comp.config.output_mode) {
|
||||
.Obj => false,
|
||||
.Lib => comp.config.link_mode == .Dynamic,
|
||||
.Lib => comp.config.link_mode == .dynamic,
|
||||
.Exe => true,
|
||||
};
|
||||
const ofmt = comp.root_mod.resolved_target.result.ofmt;
|
||||
@ -6116,7 +6116,7 @@ fn wantBuildMinGWFromSource(comp: Compilation) bool {
|
||||
fn wantBuildLibUnwindFromSource(comp: *Compilation) bool {
|
||||
const is_exe_or_dyn_lib = switch (comp.config.output_mode) {
|
||||
.Obj => false,
|
||||
.Lib => comp.config.link_mode == .Dynamic,
|
||||
.Lib => comp.config.link_mode == .dynamic,
|
||||
.Exe => true,
|
||||
};
|
||||
const ofmt = comp.root_mod.resolved_target.result.ofmt;
|
||||
@ -6310,7 +6310,7 @@ fn buildOutputFromZig(
|
||||
|
||||
const config = try Config.resolve(.{
|
||||
.output_mode = output_mode,
|
||||
.link_mode = .Static,
|
||||
.link_mode = .static,
|
||||
.resolved_target = comp.root_mod.resolved_target,
|
||||
.is_test = false,
|
||||
.have_zcu = true,
|
||||
|
||||
@ -348,26 +348,26 @@ pub fn resolve(options: Options) ResolveError!Config {
|
||||
const link_mode = b: {
|
||||
const explicitly_exe_or_dyn_lib = switch (options.output_mode) {
|
||||
.Obj => false,
|
||||
.Lib => (options.link_mode orelse .Static) == .Dynamic,
|
||||
.Lib => (options.link_mode orelse .static) == .dynamic,
|
||||
.Exe => true,
|
||||
};
|
||||
|
||||
if (target_util.cannotDynamicLink(target)) {
|
||||
if (options.link_mode == .Dynamic) return error.TargetCannotDynamicLink;
|
||||
break :b .Static;
|
||||
if (options.link_mode == .dynamic) return error.TargetCannotDynamicLink;
|
||||
break :b .static;
|
||||
}
|
||||
if (explicitly_exe_or_dyn_lib and link_libc and
|
||||
(target.isGnuLibC() or target_util.osRequiresLibC(target)))
|
||||
{
|
||||
if (options.link_mode == .Static) return error.LibCRequiresDynamicLinking;
|
||||
break :b .Dynamic;
|
||||
if (options.link_mode == .static) return error.LibCRequiresDynamicLinking;
|
||||
break :b .dynamic;
|
||||
}
|
||||
// When creating a executable that links to system libraries, we
|
||||
// require dynamic linking, but we must not link static libraries
|
||||
// or object files dynamically!
|
||||
if (options.any_dyn_libs and options.output_mode == .Exe) {
|
||||
if (options.link_mode == .Static) return error.SharedLibrariesRequireDynamicLinking;
|
||||
break :b .Dynamic;
|
||||
if (options.link_mode == .static) return error.SharedLibrariesRequireDynamicLinking;
|
||||
break :b .dynamic;
|
||||
}
|
||||
|
||||
if (options.link_mode) |link_mode| break :b link_mode;
|
||||
@ -377,16 +377,16 @@ pub fn resolve(options: Options) ResolveError!Config {
|
||||
{
|
||||
// If targeting the system's native ABI and the system's libc is
|
||||
// musl, link dynamically by default.
|
||||
break :b .Dynamic;
|
||||
break :b .dynamic;
|
||||
}
|
||||
|
||||
// Static is generally a better default. Fight me.
|
||||
break :b .Static;
|
||||
break :b .static;
|
||||
};
|
||||
|
||||
const import_memory = options.import_memory orelse (options.output_mode == .Obj);
|
||||
const export_memory = b: {
|
||||
if (link_mode == .Dynamic) {
|
||||
if (link_mode == .dynamic) {
|
||||
if (options.export_memory == true) return error.ExportMemoryAndDynamicIncompatible;
|
||||
break :b false;
|
||||
}
|
||||
@ -397,7 +397,7 @@ pub fn resolve(options: Options) ResolveError!Config {
|
||||
const pie: bool = b: {
|
||||
switch (options.output_mode) {
|
||||
.Obj, .Exe => {},
|
||||
.Lib => if (link_mode == .Dynamic) {
|
||||
.Lib => if (link_mode == .dynamic) {
|
||||
if (options.pie == true) return error.DynamicLibraryPrecludesPie;
|
||||
break :b false;
|
||||
},
|
||||
@ -467,7 +467,7 @@ pub fn resolve(options: Options) ResolveError!Config {
|
||||
if (rdynamic) break :b true;
|
||||
break :b switch (options.output_mode) {
|
||||
.Obj, .Exe => false,
|
||||
.Lib => link_mode == .Dynamic,
|
||||
.Lib => link_mode == .dynamic,
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
@ -2025,15 +2025,15 @@ pub const LoadedStructType = struct {
|
||||
/// complicated logic.
|
||||
pub fn knownNonOpv(s: @This(), ip: *InternPool) bool {
|
||||
return switch (s.layout) {
|
||||
.Packed => false,
|
||||
.Auto, .Extern => s.flagsPtr(ip).known_non_opv,
|
||||
.@"packed" => false,
|
||||
.auto, .@"extern" => s.flagsPtr(ip).known_non_opv,
|
||||
};
|
||||
}
|
||||
|
||||
/// The returned pointer expires with any addition to the `InternPool`.
|
||||
/// Asserts the struct is not packed.
|
||||
pub fn flagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStruct.Flags {
|
||||
assert(self.layout != .Packed);
|
||||
assert(self.layout != .@"packed");
|
||||
const flags_field_index = std.meta.fieldIndex(Tag.TypeStruct, "flags").?;
|
||||
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
|
||||
}
|
||||
@ -2041,13 +2041,13 @@ pub const LoadedStructType = struct {
|
||||
/// The returned pointer expires with any addition to the `InternPool`.
|
||||
/// Asserts that the struct is packed.
|
||||
pub fn packedFlagsPtr(self: @This(), ip: *const InternPool) *Tag.TypeStructPacked.Flags {
|
||||
assert(self.layout == .Packed);
|
||||
assert(self.layout == .@"packed");
|
||||
const flags_field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "flags").?;
|
||||
return @ptrCast(&ip.extra.items[self.extra_index + flags_field_index]);
|
||||
}
|
||||
|
||||
pub fn assumeRuntimeBitsIfFieldTypesWip(s: @This(), ip: *InternPool) bool {
|
||||
if (s.layout == .Packed) return false;
|
||||
if (s.layout == .@"packed") return false;
|
||||
const flags_ptr = s.flagsPtr(ip);
|
||||
if (flags_ptr.field_types_wip) {
|
||||
flags_ptr.assumed_runtime_bits = true;
|
||||
@ -2057,7 +2057,7 @@ pub const LoadedStructType = struct {
|
||||
}
|
||||
|
||||
pub fn setTypesWip(s: @This(), ip: *InternPool) bool {
|
||||
if (s.layout == .Packed) return false;
|
||||
if (s.layout == .@"packed") return false;
|
||||
const flags_ptr = s.flagsPtr(ip);
|
||||
if (flags_ptr.field_types_wip) return true;
|
||||
flags_ptr.field_types_wip = true;
|
||||
@ -2065,12 +2065,12 @@ pub const LoadedStructType = struct {
|
||||
}
|
||||
|
||||
pub fn clearTypesWip(s: @This(), ip: *InternPool) void {
|
||||
if (s.layout == .Packed) return;
|
||||
if (s.layout == .@"packed") return;
|
||||
s.flagsPtr(ip).field_types_wip = false;
|
||||
}
|
||||
|
||||
pub fn setLayoutWip(s: @This(), ip: *InternPool) bool {
|
||||
if (s.layout == .Packed) return false;
|
||||
if (s.layout == .@"packed") return false;
|
||||
const flags_ptr = s.flagsPtr(ip);
|
||||
if (flags_ptr.layout_wip) return true;
|
||||
flags_ptr.layout_wip = true;
|
||||
@ -2078,12 +2078,12 @@ pub const LoadedStructType = struct {
|
||||
}
|
||||
|
||||
pub fn clearLayoutWip(s: @This(), ip: *InternPool) void {
|
||||
if (s.layout == .Packed) return;
|
||||
if (s.layout == .@"packed") return;
|
||||
s.flagsPtr(ip).layout_wip = false;
|
||||
}
|
||||
|
||||
pub fn setAlignmentWip(s: @This(), ip: *InternPool) bool {
|
||||
if (s.layout == .Packed) return false;
|
||||
if (s.layout == .@"packed") return false;
|
||||
const flags_ptr = s.flagsPtr(ip);
|
||||
if (flags_ptr.alignment_wip) return true;
|
||||
flags_ptr.alignment_wip = true;
|
||||
@ -2091,19 +2091,19 @@ pub const LoadedStructType = struct {
|
||||
}
|
||||
|
||||
pub fn clearAlignmentWip(s: @This(), ip: *InternPool) void {
|
||||
if (s.layout == .Packed) return;
|
||||
if (s.layout == .@"packed") return;
|
||||
s.flagsPtr(ip).alignment_wip = false;
|
||||
}
|
||||
|
||||
pub fn setInitsWip(s: @This(), ip: *InternPool) bool {
|
||||
switch (s.layout) {
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
const flag = &s.packedFlagsPtr(ip).field_inits_wip;
|
||||
if (flag.*) return true;
|
||||
flag.* = true;
|
||||
return false;
|
||||
},
|
||||
.Auto, .Extern => {
|
||||
.auto, .@"extern" => {
|
||||
const flag = &s.flagsPtr(ip).field_inits_wip;
|
||||
if (flag.*) return true;
|
||||
flag.* = true;
|
||||
@ -2114,13 +2114,13 @@ pub const LoadedStructType = struct {
|
||||
|
||||
pub fn clearInitsWip(s: @This(), ip: *InternPool) void {
|
||||
switch (s.layout) {
|
||||
.Packed => s.packedFlagsPtr(ip).field_inits_wip = false,
|
||||
.Auto, .Extern => s.flagsPtr(ip).field_inits_wip = false,
|
||||
.@"packed" => s.packedFlagsPtr(ip).field_inits_wip = false,
|
||||
.auto, .@"extern" => s.flagsPtr(ip).field_inits_wip = false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn setFullyResolved(s: @This(), ip: *InternPool) bool {
|
||||
if (s.layout == .Packed) return true;
|
||||
if (s.layout == .@"packed") return true;
|
||||
const flags_ptr = s.flagsPtr(ip);
|
||||
if (flags_ptr.fully_resolved) return true;
|
||||
flags_ptr.fully_resolved = true;
|
||||
@ -2134,7 +2134,7 @@ pub const LoadedStructType = struct {
|
||||
/// The returned pointer expires with any addition to the `InternPool`.
|
||||
/// Asserts the struct is not packed.
|
||||
pub fn size(self: @This(), ip: *InternPool) *u32 {
|
||||
assert(self.layout != .Packed);
|
||||
assert(self.layout != .@"packed");
|
||||
const size_field_index = std.meta.fieldIndex(Tag.TypeStruct, "size").?;
|
||||
return @ptrCast(&ip.extra.items[self.extra_index + size_field_index]);
|
||||
}
|
||||
@ -2144,14 +2144,14 @@ pub const LoadedStructType = struct {
|
||||
/// set to `none` until the layout is resolved.
|
||||
/// Asserts the struct is packed.
|
||||
pub fn backingIntType(s: @This(), ip: *const InternPool) *Index {
|
||||
assert(s.layout == .Packed);
|
||||
assert(s.layout == .@"packed");
|
||||
const field_index = std.meta.fieldIndex(Tag.TypeStructPacked, "backing_int_ty").?;
|
||||
return @ptrCast(&ip.extra.items[s.extra_index + field_index]);
|
||||
}
|
||||
|
||||
/// Asserts the struct is not packed.
|
||||
pub fn setZirIndex(s: @This(), ip: *InternPool, new_zir_index: TrackedInst.Index.Optional) void {
|
||||
assert(s.layout != .Packed);
|
||||
assert(s.layout != .@"packed");
|
||||
const field_index = std.meta.fieldIndex(Tag.TypeStruct, "zir_index").?;
|
||||
ip.extra.items[s.extra_index + field_index] = @intFromEnum(new_zir_index);
|
||||
}
|
||||
@ -2163,31 +2163,31 @@ pub const LoadedStructType = struct {
|
||||
|
||||
pub fn haveFieldInits(s: @This(), ip: *const InternPool) bool {
|
||||
return switch (s.layout) {
|
||||
.Packed => s.packedFlagsPtr(ip).inits_resolved,
|
||||
.Auto, .Extern => s.flagsPtr(ip).inits_resolved,
|
||||
.@"packed" => s.packedFlagsPtr(ip).inits_resolved,
|
||||
.auto, .@"extern" => s.flagsPtr(ip).inits_resolved,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn setHaveFieldInits(s: @This(), ip: *InternPool) void {
|
||||
switch (s.layout) {
|
||||
.Packed => s.packedFlagsPtr(ip).inits_resolved = true,
|
||||
.Auto, .Extern => s.flagsPtr(ip).inits_resolved = true,
|
||||
.@"packed" => s.packedFlagsPtr(ip).inits_resolved = true,
|
||||
.auto, .@"extern" => s.flagsPtr(ip).inits_resolved = true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn haveLayout(s: @This(), ip: *InternPool) bool {
|
||||
return switch (s.layout) {
|
||||
.Packed => s.backingIntType(ip).* != .none,
|
||||
.Auto, .Extern => s.flagsPtr(ip).layout_resolved,
|
||||
.@"packed" => s.backingIntType(ip).* != .none,
|
||||
.auto, .@"extern" => s.flagsPtr(ip).layout_resolved,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isTuple(s: @This(), ip: *InternPool) bool {
|
||||
return s.layout != .Packed and s.flagsPtr(ip).is_tuple;
|
||||
return s.layout != .@"packed" and s.flagsPtr(ip).is_tuple;
|
||||
}
|
||||
|
||||
pub fn hasReorderedFields(s: @This()) bool {
|
||||
return s.layout == .Auto;
|
||||
return s.layout == .auto;
|
||||
}
|
||||
|
||||
pub const RuntimeOrderIterator = struct {
|
||||
@ -2221,7 +2221,7 @@ pub const LoadedStructType = struct {
|
||||
/// May or may not include zero-bit fields.
|
||||
/// Asserts the struct is not packed.
|
||||
pub fn iterateRuntimeOrder(s: @This(), ip: *InternPool) RuntimeOrderIterator {
|
||||
assert(s.layout != .Packed);
|
||||
assert(s.layout != .@"packed");
|
||||
return .{
|
||||
.ip = ip,
|
||||
.field_index = 0,
|
||||
@ -2239,7 +2239,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
|
||||
.decl = .none,
|
||||
.namespace = .none,
|
||||
.zir_index = .none,
|
||||
.layout = .Auto,
|
||||
.layout = .auto,
|
||||
.field_names = .{ .start = 0, .len = 0 },
|
||||
.field_types = .{ .start = 0, .len = 0 },
|
||||
.field_inits = .{ .start = 0, .len = 0 },
|
||||
@ -2314,7 +2314,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
|
||||
.decl = extra.data.decl.toOptional(),
|
||||
.namespace = namespace,
|
||||
.zir_index = extra.data.zir_index.toOptional(),
|
||||
.layout = if (extra.data.flags.is_extern) .Extern else .Auto,
|
||||
.layout = if (extra.data.flags.is_extern) .@"extern" else .auto,
|
||||
.field_names = names,
|
||||
.field_types = field_types,
|
||||
.field_inits = inits,
|
||||
@ -2367,7 +2367,7 @@ pub fn loadStructType(ip: *const InternPool, index: Index) LoadedStructType {
|
||||
.decl = extra.data.decl.toOptional(),
|
||||
.namespace = extra.data.namespace,
|
||||
.zir_index = extra.data.zir_index.toOptional(),
|
||||
.layout = .Packed,
|
||||
.layout = .@"packed",
|
||||
.field_names = field_names,
|
||||
.field_types = field_types,
|
||||
.field_inits = field_inits,
|
||||
@ -4455,7 +4455,6 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
|
||||
} else .{ .start = 0, .len = 0 } },
|
||||
} };
|
||||
} },
|
||||
|
||||
.type_struct_anon => .{ .anon_struct_type = extraTypeStructAnon(ip, data) },
|
||||
.type_tuple_anon => .{ .anon_struct_type = extraTypeTupleAnon(ip, data) },
|
||||
.type_union => .{ .union_type = ns: {
|
||||
@ -6009,9 +6008,9 @@ pub fn getStructType(
|
||||
};
|
||||
|
||||
const is_extern = switch (ini.layout) {
|
||||
.Auto => false,
|
||||
.Extern => true,
|
||||
.Packed => {
|
||||
.auto => false,
|
||||
.@"extern" => true,
|
||||
.@"packed" => {
|
||||
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Tag.TypeStructPacked).Struct.fields.len +
|
||||
// TODO: fmt bug
|
||||
// zig fmt: off
|
||||
@ -6140,7 +6139,7 @@ pub fn getStructType(
|
||||
if (ini.any_comptime_fields) {
|
||||
ip.extra.appendNTimesAssumeCapacity(0, comptime_elements_len);
|
||||
}
|
||||
if (ini.layout == .Auto) {
|
||||
if (ini.layout == .auto) {
|
||||
ip.extra.appendNTimesAssumeCapacity(@intFromEnum(LoadedStructType.RuntimeOrder.unresolved), ini.fields_len);
|
||||
}
|
||||
ip.extra.appendNTimesAssumeCapacity(std.math.maxInt(u32), ini.fields_len);
|
||||
|
||||
@ -279,7 +279,7 @@ pub const Export = struct {
|
||||
|
||||
pub const Options = struct {
|
||||
name: InternPool.NullTerminatedString,
|
||||
linkage: std.builtin.GlobalLinkage = .Strong,
|
||||
linkage: std.builtin.GlobalLinkage = .strong,
|
||||
section: InternPool.OptionalNullTerminatedString = .none,
|
||||
visibility: std.builtin.SymbolVisibility = .default,
|
||||
};
|
||||
@ -3310,7 +3310,7 @@ fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespa
|
||||
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
|
||||
assert(!small.has_captures_len);
|
||||
assert(!small.has_backing_int);
|
||||
assert(small.layout == .Auto);
|
||||
assert(small.layout == .auto);
|
||||
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
|
||||
const fields_len = if (small.has_fields_len) blk: {
|
||||
const fields_len = file.zir.extra[extra_index];
|
||||
@ -3327,7 +3327,7 @@ fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespa
|
||||
|
||||
const tracked_inst = try ip.trackZir(gpa, file, .main_struct_inst);
|
||||
const wip_ty = switch (try ip.getStructType(gpa, .{
|
||||
.layout = .Auto,
|
||||
.layout = .auto,
|
||||
.fields_len = fields_len,
|
||||
.known_non_opv = small.known_non_opv,
|
||||
.requires_comptime = if (small.known_comptime_only) .yes else .unknown,
|
||||
@ -5969,7 +5969,7 @@ pub fn typeToStruct(mod: *Module, ty: Type) ?InternPool.LoadedStructType {
|
||||
|
||||
pub fn typeToPackedStruct(mod: *Module, ty: Type) ?InternPool.LoadedStructType {
|
||||
const s = mod.typeToStruct(ty) orelse return null;
|
||||
if (s.layout != .Packed) return null;
|
||||
if (s.layout != .@"packed") return null;
|
||||
return s;
|
||||
}
|
||||
|
||||
@ -6185,18 +6185,18 @@ pub fn structFieldAlignment(
|
||||
field_ty: Type,
|
||||
layout: std.builtin.Type.ContainerLayout,
|
||||
) Alignment {
|
||||
assert(layout != .Packed);
|
||||
assert(layout != .@"packed");
|
||||
if (explicit_alignment != .none) return explicit_alignment;
|
||||
switch (layout) {
|
||||
.Packed => unreachable,
|
||||
.Auto => {
|
||||
.@"packed" => unreachable,
|
||||
.auto => {
|
||||
if (mod.getTarget().ofmt == .c) {
|
||||
return structFieldAlignmentExtern(mod, field_ty);
|
||||
} else {
|
||||
return field_ty.abiAlignment(mod);
|
||||
}
|
||||
},
|
||||
.Extern => return structFieldAlignmentExtern(mod, field_ty),
|
||||
.@"extern" => return structFieldAlignmentExtern(mod, field_ty),
|
||||
}
|
||||
}
|
||||
|
||||
@ -6224,7 +6224,7 @@ pub fn structPackedFieldBitOffset(
|
||||
field_index: u32,
|
||||
) u16 {
|
||||
const ip = &mod.intern_pool;
|
||||
assert(struct_type.layout == .Packed);
|
||||
assert(struct_type.layout == .@"packed");
|
||||
assert(struct_type.haveLayout(ip));
|
||||
var bit_sum: u64 = 0;
|
||||
for (0..struct_type.field_types.len) |i| {
|
||||
|
||||
@ -178,7 +178,7 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
|
||||
return error.PieRequiresPic;
|
||||
break :b true;
|
||||
}
|
||||
if (options.global.link_mode == .Dynamic) {
|
||||
if (options.global.link_mode == .dynamic) {
|
||||
if (options.inherited.pic == false)
|
||||
return error.DynamicLinkingRequiresPic;
|
||||
break :b true;
|
||||
|
||||
234
src/Sema.zig
234
src/Sema.zig
@ -357,7 +357,7 @@ pub const Block = struct {
|
||||
want_safety: ?bool = null,
|
||||
|
||||
/// What mode to generate float operations in, set by @setFloatMode
|
||||
float_mode: std.builtin.FloatMode = .Strict,
|
||||
float_mode: std.builtin.FloatMode = .strict,
|
||||
|
||||
c_import_buf: ?*std.ArrayList(u8) = null,
|
||||
|
||||
@ -686,7 +686,7 @@ pub const Block = struct {
|
||||
const sema = block.sema;
|
||||
const mod = sema.mod;
|
||||
return block.addInst(.{
|
||||
.tag = if (block.float_mode == .Optimized) .cmp_vector_optimized else .cmp_vector,
|
||||
.tag = if (block.float_mode == .optimized) .cmp_vector_optimized else .cmp_vector,
|
||||
.data = .{ .ty_pl = .{
|
||||
.ty = Air.internedToRef((try mod.vectorType(.{
|
||||
.len = sema.typeOf(lhs).vectorLen(mod),
|
||||
@ -1020,10 +1020,10 @@ fn analyzeBodyInner(
|
||||
.field_call => try sema.zirCall(block, inst, .field),
|
||||
.cmp_lt => try sema.zirCmp(block, inst, .lt),
|
||||
.cmp_lte => try sema.zirCmp(block, inst, .lte),
|
||||
.cmp_eq => try sema.zirCmpEq(block, inst, .eq, Air.Inst.Tag.fromCmpOp(.eq, block.float_mode == .Optimized)),
|
||||
.cmp_eq => try sema.zirCmpEq(block, inst, .eq, Air.Inst.Tag.fromCmpOp(.eq, block.float_mode == .optimized)),
|
||||
.cmp_gte => try sema.zirCmp(block, inst, .gte),
|
||||
.cmp_gt => try sema.zirCmp(block, inst, .gt),
|
||||
.cmp_neq => try sema.zirCmpEq(block, inst, .neq, Air.Inst.Tag.fromCmpOp(.neq, block.float_mode == .Optimized)),
|
||||
.cmp_neq => try sema.zirCmpEq(block, inst, .neq, Air.Inst.Tag.fromCmpOp(.neq, block.float_mode == .optimized)),
|
||||
.decl_ref => try sema.zirDeclRef(block, inst),
|
||||
.decl_val => try sema.zirDeclVal(block, inst),
|
||||
.load => try sema.zirLoad(block, inst),
|
||||
@ -3236,7 +3236,7 @@ fn zirUnionDecl(
|
||||
.status = .none,
|
||||
.runtime_tag = if (small.has_tag_type or small.auto_enum_tag)
|
||||
.tagged
|
||||
else if (small.layout != .Auto)
|
||||
else if (small.layout != .auto)
|
||||
.none
|
||||
else switch (block.wantSafety()) {
|
||||
true => .safety,
|
||||
@ -6274,7 +6274,7 @@ fn zirExportValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
|
||||
.needed_comptime_reason = "export target must be comptime-known",
|
||||
});
|
||||
const options = try sema.resolveExportOptions(block, options_src, extra.options);
|
||||
if (options.linkage == .Internal)
|
||||
if (options.linkage == .internal)
|
||||
return;
|
||||
if (operand.val.getFunction(mod)) |function| {
|
||||
const decl_index = function.owner_decl;
|
||||
@ -6301,7 +6301,7 @@ pub fn analyzeExport(
|
||||
const gpa = sema.gpa;
|
||||
const mod = sema.mod;
|
||||
|
||||
if (options.linkage == .Internal)
|
||||
if (options.linkage == .internal)
|
||||
return;
|
||||
|
||||
try mod.ensureDeclAnalyzed(exported_decl_index);
|
||||
@ -6450,8 +6450,8 @@ fn zirFence(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) Co
|
||||
.needed_comptime_reason = "atomic order of @fence must be comptime-known",
|
||||
});
|
||||
|
||||
if (@intFromEnum(order) < @intFromEnum(std.builtin.AtomicOrder.Acquire)) {
|
||||
return sema.fail(block, order_src, "atomic ordering must be Acquire or stricter", .{});
|
||||
if (@intFromEnum(order) < @intFromEnum(std.builtin.AtomicOrder.acquire)) {
|
||||
return sema.fail(block, order_src, "atomic ordering must be acquire or stricter", .{});
|
||||
}
|
||||
|
||||
_ = try block.addInst(.{
|
||||
@ -10264,7 +10264,7 @@ fn intCast(
|
||||
const ok = if (is_vector) ok: {
|
||||
const is_in_range = try block.addCmpVector(diff_unsigned, dest_range, .lte);
|
||||
const all_in_range = try block.addInst(.{
|
||||
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
||||
.tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
|
||||
.data = .{ .reduce = .{
|
||||
.operand = is_in_range,
|
||||
.operation = .And,
|
||||
@ -10281,7 +10281,7 @@ fn intCast(
|
||||
const ok = if (is_vector) ok: {
|
||||
const is_in_range = try block.addCmpVector(diff, dest_max, .lte);
|
||||
const all_in_range = try block.addInst(.{
|
||||
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
||||
.tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
|
||||
.data = .{ .reduce = .{
|
||||
.operand = is_in_range,
|
||||
.operation = .And,
|
||||
@ -10303,7 +10303,7 @@ fn intCast(
|
||||
const zero_inst = Air.internedToRef(zero_val.toIntern());
|
||||
const is_in_range = try block.addCmpVector(operand, zero_inst, .gte);
|
||||
const all_in_range = try block.addInst(.{
|
||||
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
||||
.tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
|
||||
.data = .{ .reduce = .{
|
||||
.operand = is_in_range,
|
||||
.operation = .And,
|
||||
@ -10380,7 +10380,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(block, msg);
|
||||
},
|
||||
.Struct, .Union => if (dest_ty.containerLayout(mod) == .Auto) {
|
||||
.Struct, .Union => if (dest_ty.containerLayout(mod) == .auto) {
|
||||
const container = switch (dest_ty.zigTypeTag(mod)) {
|
||||
.Struct => "struct",
|
||||
.Union => "union",
|
||||
@ -10443,7 +10443,7 @@ fn zirBitcast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(block, msg);
|
||||
},
|
||||
.Struct, .Union => if (operand_ty.containerLayout(mod) == .Auto) {
|
||||
.Struct, .Union => if (operand_ty.containerLayout(mod) == .auto) {
|
||||
const container = switch (operand_ty.zigTypeTag(mod)) {
|
||||
.Struct => "struct",
|
||||
.Union => "union",
|
||||
@ -12530,7 +12530,7 @@ fn analyzeSwitchRuntimeBlock(
|
||||
cases_extra.appendSliceAssumeCapacity(@ptrCast(case_block.instructions.items));
|
||||
} else {
|
||||
for (items) |item| {
|
||||
const cmp_ok = try case_block.addBinOp(if (case_block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, item);
|
||||
const cmp_ok = try case_block.addBinOp(if (case_block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, operand, item);
|
||||
if (any_ok != .none) {
|
||||
any_ok = try case_block.addBinOp(.bool_or, any_ok, cmp_ok);
|
||||
} else {
|
||||
@ -12549,12 +12549,12 @@ fn analyzeSwitchRuntimeBlock(
|
||||
|
||||
// operand >= first and operand <= last
|
||||
const range_first_ok = try case_block.addBinOp(
|
||||
if (case_block.float_mode == .Optimized) .cmp_gte_optimized else .cmp_gte,
|
||||
if (case_block.float_mode == .optimized) .cmp_gte_optimized else .cmp_gte,
|
||||
operand,
|
||||
item_first,
|
||||
);
|
||||
const range_last_ok = try case_block.addBinOp(
|
||||
if (case_block.float_mode == .Optimized) .cmp_lte_optimized else .cmp_lte,
|
||||
if (case_block.float_mode == .optimized) .cmp_lte_optimized else .cmp_lte,
|
||||
operand,
|
||||
item_last,
|
||||
);
|
||||
@ -13904,7 +13904,7 @@ fn zirShl(
|
||||
const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
|
||||
const any_ov_bit = if (lhs_ty.zigTypeTag(mod) == .Vector)
|
||||
try block.addInst(.{
|
||||
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
||||
.tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
|
||||
.data = .{ .reduce = .{
|
||||
.operand = ov_bit,
|
||||
.operation = .Or,
|
||||
@ -14044,7 +14044,7 @@ fn zirShr(
|
||||
const ok = if (rhs_ty.zigTypeTag(mod) == .Vector) ok: {
|
||||
const eql = try block.addCmpVector(lhs, back, .eq);
|
||||
break :ok try block.addInst(.{
|
||||
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
||||
.tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
|
||||
.data = .{ .reduce = .{
|
||||
.operand = eql,
|
||||
.operation = .And,
|
||||
@ -14811,7 +14811,7 @@ fn zirNegate(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
|
||||
return Air.internedToRef((try rhs_val.floatNeg(rhs_ty, sema.arena, mod)).toIntern());
|
||||
}
|
||||
try sema.requireRuntimeBlock(block, src, null);
|
||||
return block.addUnOp(if (block.float_mode == .Optimized) .neg_optimized else .neg, rhs);
|
||||
return block.addUnOp(if (block.float_mode == .optimized) .neg_optimized else .neg, rhs);
|
||||
}
|
||||
|
||||
const lhs = Air.internedToRef((try sema.splat(rhs_ty, try mod.intValue(rhs_scalar_ty, 0))).toIntern());
|
||||
@ -15018,8 +15018,8 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
|
||||
}
|
||||
break :blk Air.Inst.Tag.div_trunc;
|
||||
} else switch (block.float_mode) {
|
||||
.Optimized => Air.Inst.Tag.div_float_optimized,
|
||||
.Strict => Air.Inst.Tag.div_float,
|
||||
.optimized => Air.Inst.Tag.div_float_optimized,
|
||||
.strict => Air.Inst.Tag.div_float,
|
||||
};
|
||||
return block.addBinOp(air_tag, casted_lhs, casted_rhs);
|
||||
}
|
||||
@ -15142,8 +15142,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
const eql = try block.addCmpVector(result, floored, .eq);
|
||||
break :ok try block.addInst(.{
|
||||
.tag = switch (block.float_mode) {
|
||||
.Strict => .reduce,
|
||||
.Optimized => .reduce_optimized,
|
||||
.strict => .reduce,
|
||||
.optimized => .reduce_optimized,
|
||||
},
|
||||
.data = .{ .reduce = .{
|
||||
.operand = eql,
|
||||
@ -15152,8 +15152,8 @@ fn zirDivExact(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
});
|
||||
} else {
|
||||
const is_in_range = try block.addBinOp(switch (block.float_mode) {
|
||||
.Strict => .cmp_eq,
|
||||
.Optimized => .cmp_eq_optimized,
|
||||
.strict => .cmp_eq,
|
||||
.optimized => .cmp_eq_optimized,
|
||||
}, result, floored);
|
||||
break :ok is_in_range;
|
||||
}
|
||||
@ -15503,7 +15503,7 @@ fn addDivByZeroSafety(
|
||||
is_int: bool,
|
||||
) CompileError!void {
|
||||
// Strict IEEE floats have well-defined division by zero.
|
||||
if (!is_int and block.float_mode == .Strict) return;
|
||||
if (!is_int and block.float_mode == .strict) return;
|
||||
|
||||
// If rhs was comptime-known to be zero a compile error would have been
|
||||
// emitted above.
|
||||
@ -15535,8 +15535,8 @@ fn addDivByZeroSafety(
|
||||
fn airTag(block: *Block, is_int: bool, normal: Air.Inst.Tag, optimized: Air.Inst.Tag) Air.Inst.Tag {
|
||||
if (is_int) return normal;
|
||||
return switch (block.float_mode) {
|
||||
.Strict => normal,
|
||||
.Optimized => optimized,
|
||||
.strict => normal,
|
||||
.optimized => optimized,
|
||||
};
|
||||
}
|
||||
|
||||
@ -16228,7 +16228,7 @@ fn analyzeArithmetic(
|
||||
return casted_lhs;
|
||||
}
|
||||
}
|
||||
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .add_optimized else .add;
|
||||
const air_tag: Air.Inst.Tag = if (block.float_mode == .optimized) .add_optimized else .add;
|
||||
if (maybe_lhs_val) |lhs_val| {
|
||||
if (lhs_val.isUndef(mod)) {
|
||||
if (is_int) {
|
||||
@ -16330,7 +16330,7 @@ fn analyzeArithmetic(
|
||||
return casted_lhs;
|
||||
}
|
||||
}
|
||||
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .sub_optimized else .sub;
|
||||
const air_tag: Air.Inst.Tag = if (block.float_mode == .optimized) .sub_optimized else .sub;
|
||||
if (maybe_lhs_val) |lhs_val| {
|
||||
if (lhs_val.isUndef(mod)) {
|
||||
if (is_int) {
|
||||
@ -16448,7 +16448,7 @@ fn analyzeArithmetic(
|
||||
}
|
||||
}
|
||||
}
|
||||
const air_tag: Air.Inst.Tag = if (block.float_mode == .Optimized) .mul_optimized else .mul;
|
||||
const air_tag: Air.Inst.Tag = if (block.float_mode == .optimized) .mul_optimized else .mul;
|
||||
if (maybe_rhs_val) |rhs_val| {
|
||||
if (rhs_val.isUndef(mod)) {
|
||||
if (is_int) {
|
||||
@ -16625,7 +16625,7 @@ fn analyzeArithmetic(
|
||||
const ov_bit = try sema.tupleFieldValByIndex(block, src, op_ov, 1, op_ov_tuple_ty);
|
||||
const any_ov_bit = if (resolved_type.zigTypeTag(mod) == .Vector)
|
||||
try block.addInst(.{
|
||||
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
||||
.tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
|
||||
.data = .{ .reduce = .{
|
||||
.operand = ov_bit,
|
||||
.operation = .Or,
|
||||
@ -17168,7 +17168,7 @@ fn cmpSelf(
|
||||
if (resolved_type.zigTypeTag(mod) == .Vector) {
|
||||
return block.addCmpVector(casted_lhs, casted_rhs, op);
|
||||
}
|
||||
const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized);
|
||||
const tag = Air.Inst.Tag.fromCmpOp(op, block.float_mode == .optimized);
|
||||
return block.addBinOp(tag, casted_lhs, casted_rhs);
|
||||
}
|
||||
|
||||
@ -18131,8 +18131,8 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
};
|
||||
|
||||
const alignment = switch (layout) {
|
||||
.Auto, .Extern => try sema.unionFieldAlignment(union_obj, @intCast(i)),
|
||||
.Packed => .none,
|
||||
.auto, .@"extern" => try sema.unionFieldAlignment(union_obj, @intCast(i)),
|
||||
.@"packed" => .none,
|
||||
};
|
||||
|
||||
const field_ty = union_obj.field_types.get(ip)[i];
|
||||
@ -18350,7 +18350,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
|
||||
const opt_default_val = if (field_init == .none) null else Value.fromInterned(field_init);
|
||||
const default_val_ptr = try sema.optRefValue(opt_default_val);
|
||||
const alignment = switch (struct_type.layout) {
|
||||
.Packed => .none,
|
||||
.@"packed" => .none,
|
||||
else => try sema.structFieldAlignment(
|
||||
struct_type.fieldAlign(ip, i),
|
||||
field_ty,
|
||||
@ -19906,7 +19906,7 @@ fn zirStructInit(
|
||||
var field_i: u32 = 0;
|
||||
var extra_index = extra.end;
|
||||
|
||||
const is_packed = resolved_ty.containerLayout(mod) == .Packed;
|
||||
const is_packed = resolved_ty.containerLayout(mod) == .@"packed";
|
||||
while (field_i < extra.data.fields_len) : (field_i += 1) {
|
||||
const item = sema.code.extraData(Zir.Inst.StructInit.Item, extra_index);
|
||||
extra_index = item.end;
|
||||
@ -21302,7 +21302,7 @@ fn zirReify(
|
||||
return sema.fail(block, src, "reified structs must have no decls", .{});
|
||||
}
|
||||
|
||||
if (layout != .Packed and !backing_integer_val.isNull(mod)) {
|
||||
if (layout != .@"packed" and !backing_integer_val.isNull(mod)) {
|
||||
return sema.fail(block, src, "non-packed struct does not support backing integer type", .{});
|
||||
}
|
||||
|
||||
@ -21665,7 +21665,7 @@ fn reifyUnion(
|
||||
.status = .none,
|
||||
.runtime_tag = if (opt_tag_type_val.optionalValue(mod) != null)
|
||||
.tagged
|
||||
else if (layout != .Auto)
|
||||
else if (layout != .auto)
|
||||
.none
|
||||
else switch (block.wantSafety()) {
|
||||
true => .safety,
|
||||
@ -21804,7 +21804,7 @@ fn reifyUnion(
|
||||
break :msg msg;
|
||||
});
|
||||
}
|
||||
if (layout == .Extern and !try sema.validateExternType(field_ty, .union_field)) {
|
||||
if (layout == .@"extern" and !try sema.validateExternType(field_ty, .union_field)) {
|
||||
return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
const msg = try sema.errMsg(block, src, "extern unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
|
||||
errdefer msg.destroy(gpa);
|
||||
@ -21815,7 +21815,7 @@ fn reifyUnion(
|
||||
try sema.addDeclaredHereNote(msg, field_ty);
|
||||
break :msg msg;
|
||||
});
|
||||
} else if (layout == .Packed and !try sema.validatePackedType(field_ty)) {
|
||||
} else if (layout == .@"packed" and !try sema.validatePackedType(field_ty)) {
|
||||
return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
const msg = try sema.errMsg(block, src, "packed unions cannot contain fields of type '{}'", .{field_ty.fmt(mod)});
|
||||
errdefer msg.destroy(gpa);
|
||||
@ -21938,9 +21938,9 @@ fn reifyStruct(
|
||||
errdefer wip_ty.cancel(ip);
|
||||
|
||||
if (is_tuple) switch (layout) {
|
||||
.Extern => return sema.fail(block, src, "extern tuples are not supported", .{}),
|
||||
.Packed => return sema.fail(block, src, "packed tuples are not supported", .{}),
|
||||
.Auto => {},
|
||||
.@"extern" => return sema.fail(block, src, "extern tuples are not supported", .{}),
|
||||
.@"packed" => return sema.fail(block, src, "packed tuples are not supported", .{}),
|
||||
.auto => {},
|
||||
};
|
||||
|
||||
const new_decl_index = try sema.createAnonymousDeclTypeNamed(block, src, .{
|
||||
@ -21990,11 +21990,11 @@ fn reifyStruct(
|
||||
|
||||
const byte_align = try field_alignment_val.toUnsignedIntAdvanced(sema);
|
||||
if (byte_align == 0) {
|
||||
if (layout != .Packed) {
|
||||
if (layout != .@"packed") {
|
||||
struct_type.field_aligns.get(ip)[field_idx] = .none;
|
||||
}
|
||||
} else {
|
||||
if (layout == .Packed) return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{});
|
||||
if (layout == .@"packed") return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{});
|
||||
if (!math.isPowerOfTwo(byte_align)) return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align});
|
||||
struct_type.field_aligns.get(ip)[field_idx] = Alignment.fromNonzeroByteUnits(byte_align);
|
||||
}
|
||||
@ -22004,9 +22004,9 @@ fn reifyStruct(
|
||||
if (field_is_comptime) {
|
||||
assert(any_comptime_fields);
|
||||
switch (layout) {
|
||||
.Extern => return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{}),
|
||||
.Packed => return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{}),
|
||||
.Auto => struct_type.setFieldComptime(ip, field_idx),
|
||||
.@"extern" => return sema.fail(block, src, "extern struct fields cannot be marked comptime", .{}),
|
||||
.@"packed" => return sema.fail(block, src, "packed struct fields cannot be marked comptime", .{}),
|
||||
.auto => struct_type.setFieldComptime(ip, field_idx),
|
||||
}
|
||||
}
|
||||
|
||||
@ -22047,7 +22047,7 @@ fn reifyStruct(
|
||||
break :msg msg;
|
||||
});
|
||||
}
|
||||
if (layout == .Extern and !try sema.validateExternType(field_ty, .struct_field)) {
|
||||
if (layout == .@"extern" and !try sema.validateExternType(field_ty, .struct_field)) {
|
||||
return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
const msg = try sema.errMsg(block, src, "extern structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
|
||||
errdefer msg.destroy(gpa);
|
||||
@ -22058,7 +22058,7 @@ fn reifyStruct(
|
||||
try sema.addDeclaredHereNote(msg, field_ty);
|
||||
break :msg msg;
|
||||
});
|
||||
} else if (layout == .Packed and !try sema.validatePackedType(field_ty)) {
|
||||
} else if (layout == .@"packed" and !try sema.validatePackedType(field_ty)) {
|
||||
return sema.failWithOwnedErrorMsg(block, msg: {
|
||||
const msg = try sema.errMsg(block, src, "packed structs cannot contain fields of type '{}'", .{field_ty.fmt(sema.mod)});
|
||||
errdefer msg.destroy(gpa);
|
||||
@ -22072,7 +22072,7 @@ fn reifyStruct(
|
||||
}
|
||||
}
|
||||
|
||||
if (layout == .Packed) {
|
||||
if (layout == .@"packed") {
|
||||
var fields_bit_sum: u64 = 0;
|
||||
for (0..struct_type.field_types.len) |field_idx| {
|
||||
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_idx]);
|
||||
@ -22226,7 +22226,7 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
|
||||
if (dest_scalar_ty.intInfo(mod).bits == 0) {
|
||||
if (!is_vector) {
|
||||
if (block.wantSafety()) {
|
||||
const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, operand, Air.internedToRef((try mod.floatValue(operand_ty, 0.0)).toIntern()));
|
||||
const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, operand, Air.internedToRef((try mod.floatValue(operand_ty, 0.0)).toIntern()));
|
||||
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
|
||||
}
|
||||
return Air.internedToRef((try mod.intValue(dest_ty, 0)).toIntern());
|
||||
@ -22236,7 +22236,7 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
|
||||
for (0..len) |i| {
|
||||
const idx_ref = try mod.intRef(Type.usize, i);
|
||||
const elem_ref = try block.addBinOp(.array_elem_val, operand, idx_ref);
|
||||
const ok = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_eq_optimized else .cmp_eq, elem_ref, Air.internedToRef((try mod.floatValue(operand_scalar_ty, 0.0)).toIntern()));
|
||||
const ok = try block.addBinOp(if (block.float_mode == .optimized) .cmp_eq_optimized else .cmp_eq, elem_ref, Air.internedToRef((try mod.floatValue(operand_scalar_ty, 0.0)).toIntern()));
|
||||
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
|
||||
}
|
||||
}
|
||||
@ -22246,12 +22246,12 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
|
||||
} }));
|
||||
}
|
||||
if (!is_vector) {
|
||||
const result = try block.addTyOp(if (block.float_mode == .Optimized) .int_from_float_optimized else .int_from_float, dest_ty, operand);
|
||||
const result = try block.addTyOp(if (block.float_mode == .optimized) .int_from_float_optimized else .int_from_float, dest_ty, operand);
|
||||
if (block.wantSafety()) {
|
||||
const back = try block.addTyOp(.float_from_int, operand_ty, result);
|
||||
const diff = try block.addBinOp(.sub, operand, back);
|
||||
const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try mod.floatValue(operand_ty, 1.0)).toIntern()));
|
||||
const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try mod.floatValue(operand_ty, -1.0)).toIntern()));
|
||||
const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try mod.floatValue(operand_ty, 1.0)).toIntern()));
|
||||
const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try mod.floatValue(operand_ty, -1.0)).toIntern()));
|
||||
const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg);
|
||||
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
|
||||
}
|
||||
@ -22262,12 +22262,12 @@ fn zirIntFromFloat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileErro
|
||||
for (new_elems, 0..) |*new_elem, i| {
|
||||
const idx_ref = try mod.intRef(Type.usize, i);
|
||||
const old_elem = try block.addBinOp(.array_elem_val, operand, idx_ref);
|
||||
const result = try block.addTyOp(if (block.float_mode == .Optimized) .int_from_float_optimized else .int_from_float, dest_scalar_ty, old_elem);
|
||||
const result = try block.addTyOp(if (block.float_mode == .optimized) .int_from_float_optimized else .int_from_float, dest_scalar_ty, old_elem);
|
||||
if (block.wantSafety()) {
|
||||
const back = try block.addTyOp(.float_from_int, operand_scalar_ty, result);
|
||||
const diff = try block.addBinOp(.sub, old_elem, back);
|
||||
const ok_pos = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try mod.floatValue(operand_scalar_ty, 1.0)).toIntern()));
|
||||
const ok_neg = try block.addBinOp(if (block.float_mode == .Optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try mod.floatValue(operand_scalar_ty, -1.0)).toIntern()));
|
||||
const ok_pos = try block.addBinOp(if (block.float_mode == .optimized) .cmp_lt_optimized else .cmp_lt, diff, Air.internedToRef((try mod.floatValue(operand_scalar_ty, 1.0)).toIntern()));
|
||||
const ok_neg = try block.addBinOp(if (block.float_mode == .optimized) .cmp_gt_optimized else .cmp_gt, diff, Air.internedToRef((try mod.floatValue(operand_scalar_ty, -1.0)).toIntern()));
|
||||
const ok = try block.addBinOp(.bool_and, ok_pos, ok_neg);
|
||||
try sema.addSafetyCheck(block, src, ok, .integer_part_out_of_bounds);
|
||||
}
|
||||
@ -23311,7 +23311,7 @@ fn bitOffsetOf(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!u6
|
||||
}
|
||||
|
||||
switch (ty.containerLayout(mod)) {
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
var bit_sum: u64 = 0;
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
for (0..struct_type.field_types.len) |i| {
|
||||
@ -23802,7 +23802,7 @@ fn resolveExportOptions(
|
||||
return sema.fail(block, name_src, "exported symbol name cannot be empty", .{});
|
||||
}
|
||||
|
||||
if (visibility != .default and linkage == .Internal) {
|
||||
if (visibility != .default and linkage == .internal) {
|
||||
return sema.fail(block, visibility_src, "symbol '{s}' exported with internal linkage has non-default visibility {s}", .{
|
||||
name, @tagName(visibility),
|
||||
});
|
||||
@ -23894,17 +23894,17 @@ fn zirCmpxchg(
|
||||
.needed_comptime_reason = "atomic order of cmpxchg failure must be comptime-known",
|
||||
});
|
||||
|
||||
if (@intFromEnum(success_order) < @intFromEnum(std.builtin.AtomicOrder.Monotonic)) {
|
||||
return sema.fail(block, success_order_src, "success atomic ordering must be Monotonic or stricter", .{});
|
||||
if (@intFromEnum(success_order) < @intFromEnum(std.builtin.AtomicOrder.monotonic)) {
|
||||
return sema.fail(block, success_order_src, "success atomic ordering must be monotonic or stricter", .{});
|
||||
}
|
||||
if (@intFromEnum(failure_order) < @intFromEnum(std.builtin.AtomicOrder.Monotonic)) {
|
||||
return sema.fail(block, failure_order_src, "failure atomic ordering must be Monotonic or stricter", .{});
|
||||
if (@intFromEnum(failure_order) < @intFromEnum(std.builtin.AtomicOrder.monotonic)) {
|
||||
return sema.fail(block, failure_order_src, "failure atomic ordering must be monotonic or stricter", .{});
|
||||
}
|
||||
if (@intFromEnum(failure_order) > @intFromEnum(success_order)) {
|
||||
return sema.fail(block, failure_order_src, "failure atomic ordering must be no stricter than success", .{});
|
||||
}
|
||||
if (failure_order == .Release or failure_order == .AcqRel) {
|
||||
return sema.fail(block, failure_order_src, "failure atomic ordering must not be Release or AcqRel", .{});
|
||||
if (failure_order == .release or failure_order == .acq_rel) {
|
||||
return sema.fail(block, failure_order_src, "failure atomic ordering must not be release or acq_rel", .{});
|
||||
}
|
||||
|
||||
const result_ty = try mod.optionalType(elem_ty.toIntern());
|
||||
@ -24042,7 +24042,7 @@ fn zirReduce(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
|
||||
|
||||
try sema.requireRuntimeBlock(block, inst_data.src(), operand_src);
|
||||
return block.addInst(.{
|
||||
.tag = if (block.float_mode == .Optimized) .reduce_optimized else .reduce,
|
||||
.tag = if (block.float_mode == .optimized) .reduce_optimized else .reduce,
|
||||
.data = .{ .reduce = .{
|
||||
.operand = operand,
|
||||
.operation = operation,
|
||||
@ -24346,11 +24346,11 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
|
||||
});
|
||||
|
||||
switch (order) {
|
||||
.Release, .AcqRel => {
|
||||
.release, .acq_rel => {
|
||||
return sema.fail(
|
||||
block,
|
||||
order_src,
|
||||
"@atomicLoad atomic ordering must not be Release or AcqRel",
|
||||
"@atomicLoad atomic ordering must not be release or acq_rel",
|
||||
.{},
|
||||
);
|
||||
},
|
||||
@ -24412,8 +24412,8 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
||||
.needed_comptime_reason = "atomic order of @atomicRmW must be comptime-known",
|
||||
});
|
||||
|
||||
if (order == .Unordered) {
|
||||
return sema.fail(block, order_src, "@atomicRmw atomic ordering must not be Unordered", .{});
|
||||
if (order == .unordered) {
|
||||
return sema.fail(block, order_src, "@atomicRmw atomic ordering must not be unordered", .{});
|
||||
}
|
||||
|
||||
// special case zero bit types
|
||||
@ -24482,18 +24482,18 @@ fn zirAtomicStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
|
||||
});
|
||||
|
||||
const air_tag: Air.Inst.Tag = switch (order) {
|
||||
.Acquire, .AcqRel => {
|
||||
.acquire, .acq_rel => {
|
||||
return sema.fail(
|
||||
block,
|
||||
order_src,
|
||||
"@atomicStore atomic ordering must not be Acquire or AcqRel",
|
||||
"@atomicStore atomic ordering must not be acquire or acq_rel",
|
||||
.{},
|
||||
);
|
||||
},
|
||||
.Unordered => .atomic_store_unordered,
|
||||
.Monotonic => .atomic_store_monotonic,
|
||||
.Release => .atomic_store_release,
|
||||
.SeqCst => .atomic_store_seq_cst,
|
||||
.unordered => .atomic_store_unordered,
|
||||
.monotonic => .atomic_store_monotonic,
|
||||
.release => .atomic_store_release,
|
||||
.seq_cst => .atomic_store_seq_cst,
|
||||
};
|
||||
|
||||
return sema.storePtr2(block, src, ptr, ptr_src, operand, operand_src, air_tag);
|
||||
@ -24710,7 +24710,7 @@ fn zirFieldParentPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
|
||||
},
|
||||
};
|
||||
|
||||
if (parent_ty.containerLayout(mod) == .Packed) {
|
||||
if (parent_ty.containerLayout(mod) == .@"packed") {
|
||||
return sema.fail(block, src, "TODO handle packed structs/unions with @fieldParentPtr", .{});
|
||||
} else {
|
||||
ptr_ty_data.flags.alignment = blk: {
|
||||
@ -25888,7 +25888,7 @@ fn resolveExternOptions(
|
||||
) CompileError!struct {
|
||||
name: InternPool.NullTerminatedString,
|
||||
library_name: InternPool.OptionalNullTerminatedString = .none,
|
||||
linkage: std.builtin.GlobalLinkage = .Strong,
|
||||
linkage: std.builtin.GlobalLinkage = .strong,
|
||||
is_thread_local: bool = false,
|
||||
} {
|
||||
const mod = sema.mod;
|
||||
@ -25938,7 +25938,7 @@ fn resolveExternOptions(
|
||||
return sema.fail(block, name_src, "extern symbol name cannot be empty", .{});
|
||||
}
|
||||
|
||||
if (linkage != .Weak and linkage != .Strong) {
|
||||
if (linkage != .weak and linkage != .strong) {
|
||||
return sema.fail(block, linkage_src, "extern symbol must use strong or weak linkage", .{});
|
||||
}
|
||||
|
||||
@ -25984,7 +25984,7 @@ fn zirBuiltinExtern(
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
if (options.linkage == .Weak and !ty.ptrAllowsZero(mod)) {
|
||||
if (options.linkage == .weak and !ty.ptrAllowsZero(mod)) {
|
||||
ty = try mod.optionalType(ty.toIntern());
|
||||
}
|
||||
const ptr_info = ty.ptrInfo(mod);
|
||||
@ -26010,7 +26010,7 @@ fn zirBuiltinExtern(
|
||||
.is_extern = true,
|
||||
.is_const = ptr_info.flags.is_const,
|
||||
.is_threadlocal = options.is_thread_local,
|
||||
.is_weak_linkage = options.linkage == .Weak,
|
||||
.is_weak_linkage = options.linkage == .weak,
|
||||
} }),
|
||||
),
|
||||
}, options.name);
|
||||
@ -26328,15 +26328,15 @@ fn validateExternType(
|
||||
return sema.validateExternType(ty.intTagType(mod), position);
|
||||
},
|
||||
.Struct, .Union => switch (ty.containerLayout(mod)) {
|
||||
.Extern => return true,
|
||||
.Packed => {
|
||||
.@"extern" => return true,
|
||||
.@"packed" => {
|
||||
const bit_size = try ty.bitSizeAdvanced(mod, sema);
|
||||
switch (bit_size) {
|
||||
0, 8, 16, 32, 64, 128 => return true,
|
||||
else => return false,
|
||||
}
|
||||
},
|
||||
.Auto => return !(try sema.typeHasRuntimeBits(ty)),
|
||||
.auto => return !(try sema.typeHasRuntimeBits(ty)),
|
||||
},
|
||||
.Array => {
|
||||
if (position == .ret_ty or position == .param_ty) return false;
|
||||
@ -26456,7 +26456,7 @@ fn validatePackedType(sema: *Sema, ty: Type) !bool {
|
||||
.Enum,
|
||||
=> return true,
|
||||
.Pointer => return !ty.isSlice(mod) and !try sema.typeRequiresComptime(ty),
|
||||
.Struct, .Union => return ty.containerLayout(mod) == .Packed,
|
||||
.Struct, .Union => return ty.containerLayout(mod) == .@"packed",
|
||||
}
|
||||
}
|
||||
|
||||
@ -27596,7 +27596,7 @@ fn structFieldPtrByIndex(
|
||||
else
|
||||
try sema.typeAbiAlignment(Type.fromInterned(struct_ptr_ty_info.child));
|
||||
|
||||
if (struct_type.layout == .Packed) {
|
||||
if (struct_type.layout == .@"packed") {
|
||||
comptime assert(Type.packed_struct_layout_version == 2);
|
||||
|
||||
var running_bits: u16 = 0;
|
||||
@ -27641,7 +27641,7 @@ fn structFieldPtrByIndex(
|
||||
ptr_ty_data.packed_offset = .{ .host_size = 0, .bit_offset = 0 };
|
||||
}
|
||||
}
|
||||
} else if (struct_type.layout == .Extern) {
|
||||
} else if (struct_type.layout == .@"extern") {
|
||||
// For extern structs, field alignment might be bigger than type's
|
||||
// natural alignment. Eg, in `extern struct { x: u32, y: u16 }` the
|
||||
// second field is aligned as u32.
|
||||
@ -27846,7 +27846,7 @@ fn unionFieldPtr(
|
||||
.is_const = union_ptr_info.flags.is_const,
|
||||
.is_volatile = union_ptr_info.flags.is_volatile,
|
||||
.address_space = union_ptr_info.flags.address_space,
|
||||
.alignment = if (union_obj.getLayout(ip) == .Auto) blk: {
|
||||
.alignment = if (union_obj.getLayout(ip) == .auto) blk: {
|
||||
const union_align = if (union_ptr_info.flags.alignment != .none)
|
||||
union_ptr_info.flags.alignment
|
||||
else
|
||||
@ -27875,7 +27875,7 @@ fn unionFieldPtr(
|
||||
|
||||
if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| ct: {
|
||||
switch (union_obj.getLayout(ip)) {
|
||||
.Auto => if (!initializing) {
|
||||
.auto => if (!initializing) {
|
||||
const union_val = (try sema.pointerDeref(block, src, union_ptr_val, union_ptr_ty)) orelse
|
||||
break :ct;
|
||||
if (union_val.isUndef(mod)) {
|
||||
@ -27899,7 +27899,7 @@ fn unionFieldPtr(
|
||||
return sema.failWithOwnedErrorMsg(block, msg);
|
||||
}
|
||||
},
|
||||
.Packed, .Extern => {},
|
||||
.@"packed", .@"extern" => {},
|
||||
}
|
||||
return Air.internedToRef((try mod.intern(.{ .ptr = .{
|
||||
.ty = ptr_field_ty.toIntern(),
|
||||
@ -27911,7 +27911,7 @@ fn unionFieldPtr(
|
||||
}
|
||||
|
||||
try sema.requireRuntimeBlock(block, src, null);
|
||||
if (!initializing and union_obj.getLayout(ip) == .Auto and block.wantSafety() and
|
||||
if (!initializing and union_obj.getLayout(ip) == .auto and block.wantSafety() and
|
||||
union_ty.unionTagTypeSafety(mod) != null and union_obj.field_types.len > 1)
|
||||
{
|
||||
const wanted_tag_val = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
|
||||
@ -27954,7 +27954,7 @@ fn unionFieldVal(
|
||||
const field_tag = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
|
||||
const tag_matches = un.tag == field_tag.toIntern();
|
||||
switch (union_obj.getLayout(ip)) {
|
||||
.Auto => {
|
||||
.auto => {
|
||||
if (tag_matches) {
|
||||
return Air.internedToRef(un.val);
|
||||
} else {
|
||||
@ -27971,7 +27971,7 @@ fn unionFieldVal(
|
||||
return sema.failWithOwnedErrorMsg(block, msg);
|
||||
}
|
||||
},
|
||||
.Packed, .Extern => |layout| {
|
||||
.@"packed", .@"extern" => |layout| {
|
||||
if (tag_matches) {
|
||||
return Air.internedToRef(un.val);
|
||||
} else {
|
||||
@ -27989,7 +27989,7 @@ fn unionFieldVal(
|
||||
}
|
||||
|
||||
try sema.requireRuntimeBlock(block, src, null);
|
||||
if (union_obj.getLayout(ip) == .Auto and block.wantSafety() and
|
||||
if (union_obj.getLayout(ip) == .auto and block.wantSafety() and
|
||||
union_ty.unionTagTypeSafety(mod) != null and union_obj.field_types.len > 1)
|
||||
{
|
||||
const wanted_tag_val = try mod.enumValueFieldIndex(Type.fromInterned(union_obj.enum_tag_ty), enum_field_index);
|
||||
@ -30961,7 +30961,7 @@ fn beginComptimePtrMutation(
|
||||
|
||||
const tag_type = base_child_ty.unionTagTypeHypothetical(mod);
|
||||
const hypothetical_tag = try mod.enumValueFieldIndex(tag_type, field_index);
|
||||
if (layout == .Auto or (payload.tag != null and hypothetical_tag.eql(payload.tag.?, tag_type, mod))) {
|
||||
if (layout == .auto or (payload.tag != null and hypothetical_tag.eql(payload.tag.?, tag_type, mod))) {
|
||||
// We need to set the active field of the union.
|
||||
payload.tag = hypothetical_tag;
|
||||
|
||||
@ -30988,7 +30988,7 @@ fn beginComptimePtrMutation(
|
||||
.pointee = .{ .reinterpret = .{
|
||||
.val_ptr = val_ptr,
|
||||
.byte_offset = 0,
|
||||
.write_packed = layout == .Packed,
|
||||
.write_packed = layout == .@"packed",
|
||||
} },
|
||||
.ty = parent.ty,
|
||||
};
|
||||
@ -31395,7 +31395,7 @@ fn beginComptimePtrLoad(
|
||||
|
||||
if (container_ty.hasWellDefinedLayout(mod)) {
|
||||
const struct_obj = mod.typeToStruct(container_ty);
|
||||
if (struct_obj != null and struct_obj.?.layout == .Packed) {
|
||||
if (struct_obj != null and struct_obj.?.layout == .@"packed") {
|
||||
// packed structs are not byte addressable
|
||||
deref.parent = null;
|
||||
} else if (deref.parent) |*parent| {
|
||||
@ -31551,7 +31551,7 @@ fn bitCastUnionFieldVal(
|
||||
|
||||
// Reading a larger value means we need to reinterpret from undefined bytes.
|
||||
const offset = switch (layout) {
|
||||
.Extern => offset: {
|
||||
.@"extern" => offset: {
|
||||
if (field_size > old_size) @memset(buffer[old_size..], 0xaa);
|
||||
val.writeToMemory(old_ty, mod, buffer) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
@ -31561,7 +31561,7 @@ fn bitCastUnionFieldVal(
|
||||
};
|
||||
break :offset 0;
|
||||
},
|
||||
.Packed => offset: {
|
||||
.@"packed" => offset: {
|
||||
if (field_size > old_size) {
|
||||
const min_size = @max(old_size, 1);
|
||||
switch (endian) {
|
||||
@ -31577,7 +31577,7 @@ fn bitCastUnionFieldVal(
|
||||
|
||||
break :offset if (endian == .big) buffer.len - field_size else 0;
|
||||
},
|
||||
.Auto => unreachable,
|
||||
.auto => unreachable,
|
||||
};
|
||||
|
||||
return Value.readFromMemory(field_ty, mod, buffer[offset..], sema.arena) catch |err| switch (err) {
|
||||
@ -33506,7 +33506,7 @@ fn cmpNumeric(
|
||||
};
|
||||
const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src);
|
||||
const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src);
|
||||
return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs);
|
||||
return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .optimized), casted_lhs, casted_rhs);
|
||||
}
|
||||
// For mixed unsigned integer sizes, implicit cast both operands to the larger integer.
|
||||
// For mixed signed and unsigned integers, implicit cast both operands to a signed
|
||||
@ -33651,7 +33651,7 @@ fn cmpNumeric(
|
||||
const casted_lhs = try sema.coerce(block, dest_ty, lhs, lhs_src);
|
||||
const casted_rhs = try sema.coerce(block, dest_ty, rhs, rhs_src);
|
||||
|
||||
return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .Optimized), casted_lhs, casted_rhs);
|
||||
return block.addBinOp(Air.Inst.Tag.fromCmpOp(op, block.float_mode == .optimized), casted_lhs, casted_rhs);
|
||||
}
|
||||
|
||||
/// Asserts that LHS value is an int or comptime int and not undefined, and
|
||||
@ -35608,7 +35608,7 @@ pub fn resolveStructAlignment(
|
||||
const target = mod.getTarget();
|
||||
|
||||
assert(struct_type.flagsPtr(ip).alignment == .none);
|
||||
assert(struct_type.layout != .Packed);
|
||||
assert(struct_type.layout != .@"packed");
|
||||
|
||||
if (struct_type.flagsPtr(ip).field_types_wip) {
|
||||
// We'll guess "pointer-aligned", if the struct has an
|
||||
@ -35661,7 +35661,7 @@ fn resolveStructLayout(sema: *Sema, ty: Type) CompileError!void {
|
||||
|
||||
try sema.resolveTypeFields(ty);
|
||||
|
||||
if (struct_type.layout == .Packed) {
|
||||
if (struct_type.layout == .@"packed") {
|
||||
try semaBackingIntType(mod, struct_type);
|
||||
return;
|
||||
}
|
||||
@ -36625,11 +36625,11 @@ fn semaStructFields(
|
||||
const fields_len, const small, var extra_index = structZirInfo(zir, zir_index);
|
||||
|
||||
if (fields_len == 0) switch (struct_type.layout) {
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
try semaBackingIntType(mod, struct_type);
|
||||
return;
|
||||
},
|
||||
.Auto, .Extern => {
|
||||
.auto, .@"extern" => {
|
||||
struct_type.size(ip).* = 0;
|
||||
struct_type.flagsPtr(ip).layout_resolved = true;
|
||||
return;
|
||||
@ -36810,7 +36810,7 @@ fn semaStructFields(
|
||||
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
||||
}
|
||||
switch (struct_type.layout) {
|
||||
.Extern => if (!try sema.validateExternType(field_ty, .struct_field)) {
|
||||
.@"extern" => if (!try sema.validateExternType(field_ty, .struct_field)) {
|
||||
const msg = msg: {
|
||||
const ty_src = mod.fieldSrcLoc(decl_index, .{
|
||||
.index = field_i,
|
||||
@ -36826,7 +36826,7 @@ fn semaStructFields(
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
||||
},
|
||||
.Packed => if (!try sema.validatePackedType(field_ty)) {
|
||||
.@"packed" => if (!try sema.validatePackedType(field_ty)) {
|
||||
const msg = msg: {
|
||||
const ty_src = mod.fieldSrcLoc(decl_index, .{
|
||||
.index = field_i,
|
||||
@ -37350,7 +37350,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded
|
||||
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
||||
}
|
||||
const layout = union_type.getLayout(ip);
|
||||
if (layout == .Extern and
|
||||
if (layout == .@"extern" and
|
||||
!try sema.validateExternType(field_ty, .union_field))
|
||||
{
|
||||
const msg = msg: {
|
||||
@ -37367,7 +37367,7 @@ fn semaUnionFields(mod: *Module, arena: Allocator, union_type: InternPool.Loaded
|
||||
break :msg msg;
|
||||
};
|
||||
return sema.failWithOwnedErrorMsg(&block_scope, msg);
|
||||
} else if (layout == .Packed and !try sema.validatePackedType(field_ty)) {
|
||||
} else if (layout == .@"packed" and !try sema.validatePackedType(field_ty)) {
|
||||
const msg = msg: {
|
||||
const ty_src = mod.fieldSrcLoc(union_type.decl, .{
|
||||
.index = field_i,
|
||||
@ -38286,9 +38286,9 @@ fn structFieldAlignment(
|
||||
return explicit_alignment;
|
||||
const mod = sema.mod;
|
||||
switch (layout) {
|
||||
.Packed => return .none,
|
||||
.Auto => if (mod.getTarget().ofmt != .c) return sema.typeAbiAlignment(field_ty),
|
||||
.Extern => {},
|
||||
.@"packed" => return .none,
|
||||
.auto => if (mod.getTarget().ofmt != .c) return sema.typeAbiAlignment(field_ty),
|
||||
.@"extern" => {},
|
||||
}
|
||||
// extern
|
||||
const ty_abi_align = try sema.typeAbiAlignment(field_ty);
|
||||
|
||||
@ -676,8 +676,8 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
|
||||
.Struct => {
|
||||
const struct_type = mod.typeToStruct(ty) orelse return error.IllDefinedMemoryLayout;
|
||||
switch (struct_type.layout) {
|
||||
.Auto => return error.IllDefinedMemoryLayout,
|
||||
.Extern => for (0..struct_type.field_types.len) |i| {
|
||||
.auto => return error.IllDefinedMemoryLayout,
|
||||
.@"extern" => for (0..struct_type.field_types.len) |i| {
|
||||
const off: usize = @intCast(ty.structFieldOffset(i, mod));
|
||||
const field_val = switch (val.ip_index) {
|
||||
.none => switch (val.tag()) {
|
||||
@ -701,7 +701,7 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
|
||||
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
|
||||
try writeToMemory(field_val, field_ty, mod, buffer[off..]);
|
||||
},
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
|
||||
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
|
||||
},
|
||||
@ -724,8 +724,8 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
|
||||
bigint.writeTwosComplement(buffer[0..byte_count], endian);
|
||||
},
|
||||
.Union => switch (ty.containerLayout(mod)) {
|
||||
.Auto => return error.IllDefinedMemoryLayout, // Sema is supposed to have emitted a compile error already
|
||||
.Extern => {
|
||||
.auto => return error.IllDefinedMemoryLayout, // Sema is supposed to have emitted a compile error already
|
||||
.@"extern" => {
|
||||
if (val.unionTag(mod)) |union_tag| {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?;
|
||||
@ -739,7 +739,7 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
|
||||
return writeToMemory(val.unionValue(mod), backing_ty, mod, buffer[0..byte_count]);
|
||||
}
|
||||
},
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
const backing_ty = try ty.unionBackingType(mod);
|
||||
const byte_count: usize = @intCast(backing_ty.abiSize(mod));
|
||||
return writeToPackedMemory(val, ty, mod, buffer[0..byte_count], 0);
|
||||
@ -841,7 +841,7 @@ pub fn writeToPackedMemory(
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
// Sema is supposed to have emitted a compile error already in the case of Auto,
|
||||
// and Extern is handled in non-packed writeToMemory.
|
||||
assert(struct_type.layout == .Packed);
|
||||
assert(struct_type.layout == .@"packed");
|
||||
var bits: u16 = 0;
|
||||
for (0..struct_type.field_types.len) |i| {
|
||||
const field_val = switch (val.ip_index) {
|
||||
@ -866,8 +866,8 @@ pub fn writeToPackedMemory(
|
||||
.Union => {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
switch (union_obj.getLayout(ip)) {
|
||||
.Auto, .Extern => unreachable, // Handled in non-packed writeToMemory
|
||||
.Packed => {
|
||||
.auto, .@"extern" => unreachable, // Handled in non-packed writeToMemory
|
||||
.@"packed" => {
|
||||
if (val.unionTag(mod)) |union_tag| {
|
||||
const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?;
|
||||
const field_type = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
||||
@ -991,8 +991,8 @@ pub fn readFromMemory(
|
||||
.Struct => {
|
||||
const struct_type = mod.typeToStruct(ty).?;
|
||||
switch (struct_type.layout) {
|
||||
.Auto => unreachable, // Sema is supposed to have emitted a compile error already
|
||||
.Extern => {
|
||||
.auto => unreachable, // Sema is supposed to have emitted a compile error already
|
||||
.@"extern" => {
|
||||
const field_types = struct_type.field_types;
|
||||
const field_vals = try arena.alloc(InternPool.Index, field_types.len);
|
||||
for (field_vals, 0..) |*field_val, i| {
|
||||
@ -1006,7 +1006,7 @@ pub fn readFromMemory(
|
||||
.storage = .{ .elems = field_vals },
|
||||
} })));
|
||||
},
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
|
||||
return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
|
||||
},
|
||||
@ -1025,8 +1025,8 @@ pub fn readFromMemory(
|
||||
} })));
|
||||
},
|
||||
.Union => switch (ty.containerLayout(mod)) {
|
||||
.Auto => return error.IllDefinedMemoryLayout,
|
||||
.Extern => {
|
||||
.auto => return error.IllDefinedMemoryLayout,
|
||||
.@"extern" => {
|
||||
const union_size = ty.abiSize(mod);
|
||||
const array_ty = try mod.arrayType(.{ .len = union_size, .child = .u8_type });
|
||||
const val = try (try readFromMemory(array_ty, mod, buffer, arena)).intern(array_ty, mod);
|
||||
@ -1036,7 +1036,7 @@ pub fn readFromMemory(
|
||||
.val = val,
|
||||
} })));
|
||||
},
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
const byte_count = (@as(usize, @intCast(ty.bitSize(mod))) + 7) / 8;
|
||||
return readFromPackedMemory(ty, mod, buffer[0..byte_count], 0, arena);
|
||||
},
|
||||
@ -1177,8 +1177,8 @@ pub fn readFromPackedMemory(
|
||||
} })));
|
||||
},
|
||||
.Union => switch (ty.containerLayout(mod)) {
|
||||
.Auto, .Extern => unreachable, // Handled by non-packed readFromMemory
|
||||
.Packed => {
|
||||
.auto, .@"extern" => unreachable, // Handled by non-packed readFromMemory
|
||||
.@"packed" => {
|
||||
const backing_ty = try ty.unionBackingType(mod);
|
||||
const val = (try readFromPackedMemory(backing_ty, mod, buffer, bit_offset, arena)).toIntern();
|
||||
return Value.fromInterned((try mod.intern(.{ .un = .{
|
||||
@ -4064,7 +4064,7 @@ fn dbHelper(self: *Value, tag_to_payload_map: *map: {
|
||||
.alignment = 0,
|
||||
};
|
||||
break :map @Type(.{ .Struct = .{
|
||||
.layout = .Extern,
|
||||
.layout = .@"extern",
|
||||
.fields = &fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
|
||||
@ -815,10 +815,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.call_never_tail => try self.airCall(inst, .never_tail),
|
||||
.call_never_inline => try self.airCall(inst, .never_inline),
|
||||
|
||||
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
|
||||
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
|
||||
.atomic_store_release => try self.airAtomicStore(inst, .Release),
|
||||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .SeqCst),
|
||||
.atomic_store_unordered => try self.airAtomicStore(inst, .unordered),
|
||||
.atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic),
|
||||
.atomic_store_release => try self.airAtomicStore(inst, .release),
|
||||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst),
|
||||
|
||||
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
|
||||
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
|
||||
|
||||
@ -21,7 +21,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
|
||||
var maybe_float_bits: ?u16 = null;
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Struct => {
|
||||
if (ty.containerLayout(mod) == .Packed) return .byval;
|
||||
if (ty.containerLayout(mod) == .@"packed") return .byval;
|
||||
const float_count = countFloats(ty, mod, &maybe_float_bits);
|
||||
if (float_count <= sret_float_count) return .{ .float_array = float_count };
|
||||
|
||||
@ -31,7 +31,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
|
||||
return .integer;
|
||||
},
|
||||
.Union => {
|
||||
if (ty.containerLayout(mod) == .Packed) return .byval;
|
||||
if (ty.containerLayout(mod) == .@"packed") return .byval;
|
||||
const float_count = countFloats(ty, mod, &maybe_float_bits);
|
||||
if (float_count <= sret_float_count) return .{ .float_array = float_count };
|
||||
|
||||
|
||||
@ -801,10 +801,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.call_never_tail => try self.airCall(inst, .never_tail),
|
||||
.call_never_inline => try self.airCall(inst, .never_inline),
|
||||
|
||||
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
|
||||
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
|
||||
.atomic_store_release => try self.airAtomicStore(inst, .Release),
|
||||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .SeqCst),
|
||||
.atomic_store_unordered => try self.airAtomicStore(inst, .unordered),
|
||||
.atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic),
|
||||
.atomic_store_release => try self.airAtomicStore(inst, .release),
|
||||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst),
|
||||
|
||||
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
|
||||
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
|
||||
|
||||
@ -33,7 +33,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Struct => {
|
||||
const bit_size = ty.bitSize(mod);
|
||||
if (ty.containerLayout(mod) == .Packed) {
|
||||
if (ty.containerLayout(mod) == .@"packed") {
|
||||
if (bit_size > 64) return .memory;
|
||||
return .byval;
|
||||
}
|
||||
@ -56,7 +56,7 @@ pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
|
||||
.Union => {
|
||||
const bit_size = ty.bitSize(mod);
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
if (union_obj.getLayout(ip) == .Packed) {
|
||||
if (union_obj.getLayout(ip) == .@"packed") {
|
||||
if (bit_size > 64) return .memory;
|
||||
return .byval;
|
||||
}
|
||||
|
||||
@ -634,10 +634,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.call_never_tail => try self.airCall(inst, .never_tail),
|
||||
.call_never_inline => try self.airCall(inst, .never_inline),
|
||||
|
||||
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
|
||||
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
|
||||
.atomic_store_release => try self.airAtomicStore(inst, .Release),
|
||||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .SeqCst),
|
||||
.atomic_store_unordered => try self.airAtomicStore(inst, .unordered),
|
||||
.atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic),
|
||||
.atomic_store_release => try self.airAtomicStore(inst, .release),
|
||||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst),
|
||||
|
||||
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
|
||||
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
|
||||
|
||||
@ -15,7 +15,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Struct => {
|
||||
const bit_size = ty.bitSize(mod);
|
||||
if (ty.containerLayout(mod) == .Packed) {
|
||||
if (ty.containerLayout(mod) == .@"packed") {
|
||||
if (bit_size > max_byval_size) return .memory;
|
||||
return .byval;
|
||||
}
|
||||
@ -44,7 +44,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
|
||||
},
|
||||
.Union => {
|
||||
const bit_size = ty.bitSize(mod);
|
||||
if (ty.containerLayout(mod) == .Packed) {
|
||||
if (ty.containerLayout(mod) == .@"packed") {
|
||||
if (bit_size > max_byval_size) return .memory;
|
||||
return .byval;
|
||||
}
|
||||
|
||||
@ -1018,7 +1018,7 @@ fn typeToValtype(ty: Type, mod: *Module) wasm.Valtype {
|
||||
.unrolled => wasm.Valtype.i32,
|
||||
},
|
||||
.Union => switch (ty.containerLayout(mod)) {
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
const int_ty = mod.intType(.unsigned, @as(u16, @intCast(ty.bitSize(mod)))) catch @panic("out of memory");
|
||||
return typeToValtype(int_ty, mod);
|
||||
},
|
||||
@ -1737,7 +1737,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
|
||||
=> return ty.hasRuntimeBitsIgnoreComptime(mod),
|
||||
.Union => {
|
||||
if (mod.typeToUnion(ty)) |union_obj| {
|
||||
if (union_obj.getLayout(ip) == .Packed) {
|
||||
if (union_obj.getLayout(ip) == .@"packed") {
|
||||
return ty.abiSize(mod) > 8;
|
||||
}
|
||||
}
|
||||
@ -3097,7 +3097,7 @@ fn lowerParentPtr(func: *CodeGen, ptr_val: Value, offset: u32) InnerError!WValue
|
||||
break :blk parent_ty.structFieldOffset(field_index, mod);
|
||||
},
|
||||
.Union => switch (parent_ty.containerLayout(mod)) {
|
||||
.Packed => 0,
|
||||
.@"packed" => 0,
|
||||
else => blk: {
|
||||
const layout: Module.UnionLayout = parent_ty.unionGetLayout(mod);
|
||||
if (layout.payload_size == 0) break :blk 0;
|
||||
@ -3358,7 +3358,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
// non-packed structs are not handled in this function because they
|
||||
// are by-ref types.
|
||||
assert(struct_type.layout == .Packed);
|
||||
assert(struct_type.layout == .@"packed");
|
||||
var buf: [8]u8 = .{0} ** 8; // zero the buffer so we do not read 0xaa as integer
|
||||
val.writeToPackedMemory(ty, mod, &buf, 0) catch unreachable;
|
||||
const backing_int_ty = Type.fromInterned(struct_type.backingIntType(ip).*);
|
||||
@ -3890,7 +3890,7 @@ fn structFieldPtr(
|
||||
const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
|
||||
|
||||
const offset = switch (struct_ty.containerLayout(mod)) {
|
||||
.Packed => switch (struct_ty.zigTypeTag(mod)) {
|
||||
.@"packed" => switch (struct_ty.zigTypeTag(mod)) {
|
||||
.Struct => offset: {
|
||||
if (result_ty.ptrInfo(mod).packed_offset.host_size != 0) {
|
||||
break :offset @as(u32, 0);
|
||||
@ -3928,7 +3928,7 @@ fn airStructFieldVal(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) return func.finishAir(inst, .none, &.{struct_field.struct_operand});
|
||||
|
||||
const result = switch (struct_ty.containerLayout(mod)) {
|
||||
.Packed => switch (struct_ty.zigTypeTag(mod)) {
|
||||
.@"packed" => switch (struct_ty.zigTypeTag(mod)) {
|
||||
.Struct => result: {
|
||||
const packed_struct = mod.typeToPackedStruct(struct_ty).?;
|
||||
const offset = mod.structPackedFieldBitOffset(packed_struct, field_index);
|
||||
@ -5321,7 +5321,7 @@ fn airAggregateInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
|
||||
break :result_value result;
|
||||
},
|
||||
.Struct => switch (result_ty.containerLayout(mod)) {
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
if (isByRef(result_ty, mod)) {
|
||||
return func.fail("TODO: airAggregateInit for packed structs larger than 64 bits", .{});
|
||||
}
|
||||
|
||||
@ -29,7 +29,7 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
|
||||
switch (ty.zigTypeTag(mod)) {
|
||||
.Struct => {
|
||||
const struct_type = mod.typeToStruct(ty).?;
|
||||
if (struct_type.layout == .Packed) {
|
||||
if (struct_type.layout == .@"packed") {
|
||||
if (ty.bitSize(mod) <= 64) return direct;
|
||||
return .{ .direct, .direct };
|
||||
}
|
||||
@ -70,7 +70,7 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
|
||||
},
|
||||
.Union => {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
if (union_obj.getLayout(ip) == .Packed) {
|
||||
if (union_obj.getLayout(ip) == .@"packed") {
|
||||
if (ty.bitSize(mod) <= 64) return direct;
|
||||
return .{ .direct, .direct };
|
||||
}
|
||||
@ -113,7 +113,7 @@ pub fn scalarType(ty: Type, mod: *Module) Type {
|
||||
},
|
||||
.Union => {
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
if (union_obj.getLayout(ip) != .Packed) {
|
||||
if (union_obj.getLayout(ip) != .@"packed") {
|
||||
const layout = mod.getUnionLayout(union_obj);
|
||||
if (layout.payload_size == 0 and layout.tag_size != 0) {
|
||||
return scalarType(ty.unionTagTypeSafety(mod).?, mod);
|
||||
|
||||
@ -2111,10 +2111,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
|
||||
.call_never_tail => try self.airCall(inst, .never_tail),
|
||||
.call_never_inline => try self.airCall(inst, .never_inline),
|
||||
|
||||
.atomic_store_unordered => try self.airAtomicStore(inst, .Unordered),
|
||||
.atomic_store_monotonic => try self.airAtomicStore(inst, .Monotonic),
|
||||
.atomic_store_release => try self.airAtomicStore(inst, .Release),
|
||||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .SeqCst),
|
||||
.atomic_store_unordered => try self.airAtomicStore(inst, .unordered),
|
||||
.atomic_store_monotonic => try self.airAtomicStore(inst, .monotonic),
|
||||
.atomic_store_release => try self.airAtomicStore(inst, .release),
|
||||
.atomic_store_seq_cst => try self.airAtomicStore(inst, .seq_cst),
|
||||
|
||||
.struct_field_ptr_index_0 => try self.airStructFieldPtrIndex(inst, 0),
|
||||
.struct_field_ptr_index_1 => try self.airStructFieldPtrIndex(inst, 1),
|
||||
@ -7962,8 +7962,8 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
const src_mcv = try self.resolveInst(operand);
|
||||
const field_off: u32 = switch (container_ty.containerLayout(mod)) {
|
||||
.Auto, .Extern => @intCast(container_ty.structFieldOffset(index, mod) * 8),
|
||||
.Packed => if (mod.typeToStruct(container_ty)) |struct_type|
|
||||
.auto, .@"extern" => @intCast(container_ty.structFieldOffset(index, mod) * 8),
|
||||
.@"packed" => if (mod.typeToStruct(container_ty)) |struct_type|
|
||||
mod.structPackedFieldBitOffset(struct_type, index)
|
||||
else
|
||||
0,
|
||||
@ -11977,9 +11977,9 @@ fn airFrameAddress(self: *Self, inst: Air.Inst.Index) !void {
|
||||
fn airFence(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const order = self.air.instructions.items(.data)[@intFromEnum(inst)].fence;
|
||||
switch (order) {
|
||||
.Unordered, .Monotonic => unreachable,
|
||||
.Acquire, .Release, .AcqRel => {},
|
||||
.SeqCst => try self.asmOpOnly(.{ ._, .mfence }),
|
||||
.unordered, .monotonic => unreachable,
|
||||
.acquire, .release, .acq_rel => {},
|
||||
.seq_cst => try self.asmOpOnly(.{ ._, .mfence }),
|
||||
}
|
||||
self.finishAirBookkeeping();
|
||||
}
|
||||
@ -15747,9 +15747,9 @@ fn atomicOp(
|
||||
.Xor => .xor,
|
||||
else => unreachable,
|
||||
} else switch (order) {
|
||||
.Unordered, .Monotonic, .Release, .AcqRel => .mov,
|
||||
.Acquire => unreachable,
|
||||
.SeqCst => .xchg,
|
||||
.unordered, .monotonic, .release, .acq_rel => .mov,
|
||||
.acquire => unreachable,
|
||||
.seq_cst => .xchg,
|
||||
};
|
||||
|
||||
const dst_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
||||
@ -17979,7 +17979,7 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
|
||||
switch (result_ty.zigTypeTag(mod)) {
|
||||
.Struct => {
|
||||
const frame_index = try self.allocFrameIndex(FrameAlloc.initSpill(result_ty, mod));
|
||||
if (result_ty.containerLayout(mod) == .Packed) {
|
||||
if (result_ty.containerLayout(mod) == .@"packed") {
|
||||
const struct_type = mod.typeToStruct(result_ty).?;
|
||||
try self.genInlineMemset(
|
||||
.{ .lea_frame = .{ .index = frame_index } },
|
||||
|
||||
@ -110,7 +110,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
const is_obj_or_static_lib = switch (emit.lower.output_mode) {
|
||||
.Exe => false,
|
||||
.Obj => true,
|
||||
.Lib => emit.lower.link_mode == .Static,
|
||||
.Lib => emit.lower.link_mode == .static,
|
||||
};
|
||||
const atom = elf_file.symbol(data.atom_index).atom(elf_file).?;
|
||||
const sym_index = elf_file.zigObjectPtr().?.symbol(data.sym_index);
|
||||
@ -158,7 +158,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
const is_obj_or_static_lib = switch (emit.lower.output_mode) {
|
||||
.Exe => false,
|
||||
.Obj => true,
|
||||
.Lib => emit.lower.link_mode == .Static,
|
||||
.Lib => emit.lower.link_mode == .static,
|
||||
};
|
||||
const atom = macho_file.getSymbol(data.atom_index).getAtom(macho_file).?;
|
||||
const sym_index = macho_file.getZigObject().?.symbols.items[data.sym_index];
|
||||
|
||||
@ -329,7 +329,7 @@ fn emit(lower: *Lower, prefix: Prefix, mnemonic: Mnemonic, ops: []const Operand)
|
||||
const is_obj_or_static_lib = switch (lower.output_mode) {
|
||||
.Exe => false,
|
||||
.Obj => true,
|
||||
.Lib => lower.link_mode == .Static,
|
||||
.Lib => lower.link_mode == .static,
|
||||
};
|
||||
|
||||
const emit_prefix = prefix;
|
||||
|
||||
@ -42,7 +42,7 @@ pub fn classifyWindows(ty: Type, mod: *Module) Class {
|
||||
1, 2, 4, 8 => return .integer,
|
||||
else => switch (ty.zigTypeTag(mod)) {
|
||||
.Int => return .win_i128,
|
||||
.Struct, .Union => if (ty.containerLayout(mod) == .Packed) {
|
||||
.Struct, .Union => if (ty.containerLayout(mod) == .@"packed") {
|
||||
return .win_i128;
|
||||
} else {
|
||||
return .memory;
|
||||
@ -238,7 +238,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
|
||||
// separately.".
|
||||
const struct_type = mod.typeToStruct(ty).?;
|
||||
const ty_size = ty.abiSize(mod);
|
||||
if (struct_type.layout == .Packed) {
|
||||
if (struct_type.layout == .@"packed") {
|
||||
assert(ty_size <= 16);
|
||||
result[0] = .integer;
|
||||
if (ty_size > 8) result[1] = .integer;
|
||||
@ -356,7 +356,7 @@ pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
|
||||
// separately.".
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
const ty_size = mod.unionAbiSize(union_obj);
|
||||
if (union_obj.getLayout(ip) == .Packed) {
|
||||
if (union_obj.getLayout(ip) == .@"packed") {
|
||||
assert(ty_size <= 16);
|
||||
result[0] = .integer;
|
||||
if (ty_size > 8) result[1] = .integer;
|
||||
|
||||
@ -513,7 +513,7 @@ pub fn generateSymbol(
|
||||
.struct_type => {
|
||||
const struct_type = ip.loadStructType(typed_value.ty.toIntern());
|
||||
switch (struct_type.layout) {
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse
|
||||
return error.Overflow;
|
||||
const current_pos = code.items.len;
|
||||
@ -550,7 +550,7 @@ pub fn generateSymbol(
|
||||
bits += @as(u16, @intCast(Type.fromInterned(field_ty).bitSize(mod)));
|
||||
}
|
||||
},
|
||||
.Auto, .Extern => {
|
||||
.auto, .@"extern" => {
|
||||
const struct_begin = code.items.len;
|
||||
const field_types = struct_type.field_types.get(ip);
|
||||
const offsets = struct_type.offsets.get(ip);
|
||||
@ -736,11 +736,11 @@ fn lowerParentPtr(
|
||||
.anon_struct_type,
|
||||
.union_type,
|
||||
=> switch (Type.fromInterned(base_ty).containerLayout(mod)) {
|
||||
.Auto, .Extern => @intCast(Type.fromInterned(base_ty).structFieldOffset(
|
||||
.auto, .@"extern" => @intCast(Type.fromInterned(base_ty).structFieldOffset(
|
||||
@intCast(field.index),
|
||||
mod,
|
||||
)),
|
||||
.Packed => if (mod.typeToStruct(Type.fromInterned(base_ty))) |struct_obj|
|
||||
.@"packed" => if (mod.typeToStruct(Type.fromInterned(base_ty))) |struct_obj|
|
||||
if (Type.fromInterned(ptr.ty).ptrInfo(mod).packed_offset.host_size == 0)
|
||||
@divExact(Type.fromInterned(base_ptr_ty).ptrInfo(mod)
|
||||
.packed_offset.bit_offset + mod.structPackedFieldBitOffset(
|
||||
|
||||
@ -890,7 +890,7 @@ pub const DeclGen = struct {
|
||||
return writer.writeAll(" }");
|
||||
},
|
||||
.Struct => switch (ty.containerLayout(mod)) {
|
||||
.Auto, .Extern => {
|
||||
.auto, .@"extern" => {
|
||||
if (!location.isInitializer()) {
|
||||
try writer.writeByte('(');
|
||||
try dg.renderType(writer, ty);
|
||||
@ -912,7 +912,7 @@ pub const DeclGen = struct {
|
||||
|
||||
return writer.writeByte('}');
|
||||
},
|
||||
.Packed => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, Value.undef, .Other)}),
|
||||
.@"packed" => return writer.print("{x}", .{try dg.fmtIntLiteral(ty, Value.undef, .Other)}),
|
||||
},
|
||||
.Union => {
|
||||
if (!location.isInitializer()) {
|
||||
@ -1379,7 +1379,7 @@ pub const DeclGen = struct {
|
||||
.struct_type => {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
switch (struct_type.layout) {
|
||||
.Auto, .Extern => {
|
||||
.auto, .@"extern" => {
|
||||
if (!location.isInitializer()) {
|
||||
try writer.writeByte('(');
|
||||
try dg.renderType(writer, ty);
|
||||
@ -1408,7 +1408,7 @@ pub const DeclGen = struct {
|
||||
}
|
||||
try writer.writeByte('}');
|
||||
},
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
const int_info = ty.intInfo(mod);
|
||||
|
||||
const bits = Type.smallestUnsignedBits(int_info.bits - 1);
|
||||
@ -1517,7 +1517,7 @@ pub const DeclGen = struct {
|
||||
if (un.tag == .none) {
|
||||
const backing_ty = try ty.unionBackingType(mod);
|
||||
switch (union_obj.getLayout(ip)) {
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
if (!location.isInitializer()) {
|
||||
try writer.writeByte('(');
|
||||
try dg.renderType(writer, backing_ty);
|
||||
@ -1525,7 +1525,7 @@ pub const DeclGen = struct {
|
||||
}
|
||||
try dg.renderValue(writer, backing_ty, Value.fromInterned(un.val), initializer_type);
|
||||
},
|
||||
.Extern => {
|
||||
.@"extern" => {
|
||||
if (location == .StaticInitializer) {
|
||||
return dg.fail("TODO: C backend: implement extern union backing type rendering in static initializers", .{});
|
||||
}
|
||||
@ -1551,7 +1551,7 @@ pub const DeclGen = struct {
|
||||
const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
|
||||
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
||||
const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
|
||||
if (union_obj.getLayout(ip) == .Packed) {
|
||||
if (union_obj.getLayout(ip) == .@"packed") {
|
||||
if (field_ty.hasRuntimeBits(mod)) {
|
||||
if (field_ty.isPtrAtRuntime(mod)) {
|
||||
try writer.writeByte('(');
|
||||
@ -1999,7 +1999,7 @@ pub const DeclGen = struct {
|
||||
try fwd.writeAll(if (is_global) "zig_extern " else "static ");
|
||||
const maybe_exports = dg.module.decl_exports.get(decl_index);
|
||||
const export_weak_linkage = if (maybe_exports) |exports|
|
||||
exports.items[0].opts.linkage == .Weak
|
||||
exports.items[0].opts.linkage == .weak
|
||||
else
|
||||
false;
|
||||
if (variable.is_weak_linkage or export_weak_linkage) try fwd.writeAll("zig_weak_linkage ");
|
||||
@ -2689,7 +2689,7 @@ fn genExports(o: *Object) !void {
|
||||
const is_variable_const = switch (ip.indexToKey(tv.val.toIntern())) {
|
||||
.func => return for (exports.items[1..], 1..) |@"export", i| {
|
||||
try fwd.writeAll("zig_extern ");
|
||||
if (@"export".opts.linkage == .Weak) try fwd.writeAll("zig_weak_linkage_fn ");
|
||||
if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage_fn ");
|
||||
try o.dg.renderFunctionSignature(
|
||||
fwd,
|
||||
decl_index,
|
||||
@ -2707,7 +2707,7 @@ fn genExports(o: *Object) !void {
|
||||
};
|
||||
for (exports.items[1..]) |@"export"| {
|
||||
try fwd.writeAll("zig_extern ");
|
||||
if (@"export".opts.linkage == .Weak) try fwd.writeAll("zig_weak_linkage ");
|
||||
if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage ");
|
||||
const export_name = ip.stringToSlice(@"export".opts.name);
|
||||
try o.dg.renderTypeAndName(
|
||||
fwd,
|
||||
@ -2842,7 +2842,7 @@ pub fn genFunc(f: *Function) !void {
|
||||
try fwd_decl_writer.writeAll(if (is_global) "zig_extern " else "static ");
|
||||
|
||||
if (mod.decl_exports.get(decl_index)) |exports|
|
||||
if (exports.items[0].opts.linkage == .Weak) try fwd_decl_writer.writeAll("zig_weak_linkage_fn ");
|
||||
if (exports.items[0].opts.linkage == .weak) try fwd_decl_writer.writeAll("zig_weak_linkage_fn ");
|
||||
try o.dg.renderFunctionSignature(fwd_decl_writer, decl_index, .forward, .{ .export_index = 0 });
|
||||
try fwd_decl_writer.writeAll(";\n");
|
||||
try genExports(o);
|
||||
@ -3278,10 +3278,10 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail,
|
||||
|
||||
.int_from_ptr => try airIntFromPtr(f, inst),
|
||||
|
||||
.atomic_store_unordered => try airAtomicStore(f, inst, toMemoryOrder(.Unordered)),
|
||||
.atomic_store_monotonic => try airAtomicStore(f, inst, toMemoryOrder(.Monotonic)),
|
||||
.atomic_store_release => try airAtomicStore(f, inst, toMemoryOrder(.Release)),
|
||||
.atomic_store_seq_cst => try airAtomicStore(f, inst, toMemoryOrder(.SeqCst)),
|
||||
.atomic_store_unordered => try airAtomicStore(f, inst, toMemoryOrder(.unordered)),
|
||||
.atomic_store_monotonic => try airAtomicStore(f, inst, toMemoryOrder(.monotonic)),
|
||||
.atomic_store_release => try airAtomicStore(f, inst, toMemoryOrder(.release)),
|
||||
.atomic_store_seq_cst => try airAtomicStore(f, inst, toMemoryOrder(.seq_cst)),
|
||||
|
||||
.struct_field_ptr_index_0 => try airStructFieldPtrIndex(f, inst, 0),
|
||||
.struct_field_ptr_index_1 => try airStructFieldPtrIndex(f, inst, 1),
|
||||
@ -5497,7 +5497,7 @@ fn fieldLocation(
|
||||
.Union => {
|
||||
const union_obj = mod.typeToUnion(container_ty).?;
|
||||
return switch (union_obj.getLayout(ip)) {
|
||||
.Auto, .Extern => {
|
||||
.auto, .@"extern" => {
|
||||
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod))
|
||||
return if (container_ty.unionTagTypeSafety(mod) != null and
|
||||
@ -5511,7 +5511,7 @@ fn fieldLocation(
|
||||
else
|
||||
.{ .identifier = ip.stringToSlice(field_name) } };
|
||||
},
|
||||
.Packed => .begin,
|
||||
.@"packed" => .begin,
|
||||
};
|
||||
},
|
||||
.Pointer => switch (container_ty.ptrSize(mod)) {
|
||||
@ -5671,11 +5671,11 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
|
||||
const field_name: CValue = switch (mod.intern_pool.indexToKey(struct_ty.ip_index)) {
|
||||
.struct_type => switch (struct_ty.containerLayout(mod)) {
|
||||
.Auto, .Extern => if (struct_ty.isSimpleTuple(mod))
|
||||
.auto, .@"extern" => if (struct_ty.isSimpleTuple(mod))
|
||||
.{ .field = extra.field_index }
|
||||
else
|
||||
.{ .identifier = ip.stringToSlice(struct_ty.legacyStructFieldName(extra.field_index, mod)) },
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
const struct_type = mod.typeToStruct(struct_ty).?;
|
||||
const int_info = struct_ty.intInfo(mod);
|
||||
|
||||
@ -5740,7 +5740,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
|
||||
.union_type => field_name: {
|
||||
const union_obj = ip.loadUnionType(struct_ty.toIntern());
|
||||
if (union_obj.flagsPtr(ip).layout == .Packed) {
|
||||
if (union_obj.flagsPtr(ip).layout == .@"packed") {
|
||||
const operand_lval = if (struct_byval == .constant) blk: {
|
||||
const operand_local = try f.allocLocal(inst, struct_ty);
|
||||
try f.writeCValue(writer, operand_local, .Other);
|
||||
@ -7081,7 +7081,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
}
|
||||
},
|
||||
.Struct => switch (inst_ty.containerLayout(mod)) {
|
||||
.Auto, .Extern => for (resolved_elements, 0..) |element, field_index| {
|
||||
.auto, .@"extern" => for (resolved_elements, 0..) |element, field_index| {
|
||||
if (inst_ty.structFieldIsComptime(field_index, mod)) continue;
|
||||
const field_ty = inst_ty.structFieldType(field_index, mod);
|
||||
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
|
||||
@ -7095,7 +7095,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
try f.writeCValue(writer, element, .Other);
|
||||
try a.end(f, writer);
|
||||
},
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
try f.writeCValue(writer, local, .Other);
|
||||
try writer.writeAll(" = ");
|
||||
const int_info = inst_ty.intInfo(mod);
|
||||
@ -7181,7 +7181,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
|
||||
const writer = f.object.writer();
|
||||
const local = try f.allocLocal(inst, union_ty);
|
||||
if (union_obj.getLayout(ip) == .Packed) {
|
||||
if (union_obj.getLayout(ip) == .@"packed") {
|
||||
try f.writeCValue(writer, local, .Other);
|
||||
try writer.writeAll(" = ");
|
||||
try f.writeCValue(writer, payload, .Initializer);
|
||||
@ -7482,11 +7482,11 @@ fn airCVaCopy(f: *Function, inst: Air.Inst.Index) !CValue {
|
||||
fn toMemoryOrder(order: std.builtin.AtomicOrder) [:0]const u8 {
|
||||
return switch (order) {
|
||||
// Note: unordered is actually even less atomic than relaxed
|
||||
.Unordered, .Monotonic => "zig_memory_order_relaxed",
|
||||
.Acquire => "zig_memory_order_acquire",
|
||||
.Release => "zig_memory_order_release",
|
||||
.AcqRel => "zig_memory_order_acq_rel",
|
||||
.SeqCst => "zig_memory_order_seq_cst",
|
||||
.unordered, .monotonic => "zig_memory_order_relaxed",
|
||||
.acquire => "zig_memory_order_acquire",
|
||||
.release => "zig_memory_order_release",
|
||||
.acq_rel => "zig_memory_order_acq_rel",
|
||||
.seq_cst => "zig_memory_order_seq_cst",
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
@ -1495,7 +1495,7 @@ pub const CType = extern union {
|
||||
}
|
||||
},
|
||||
|
||||
.Struct, .Union => |zig_ty_tag| if (ty.containerLayout(mod) == .Packed) {
|
||||
.Struct, .Union => |zig_ty_tag| if (ty.containerLayout(mod) == .@"packed") {
|
||||
if (mod.typeToPackedStruct(ty)) |packed_struct| {
|
||||
try self.initType(Type.fromInterned(packed_struct.backingIntType(ip).*), kind, lookup);
|
||||
} else {
|
||||
|
||||
@ -1278,7 +1278,7 @@ pub const Object = struct {
|
||||
|
||||
const reloc_mode: llvm.RelocMode = if (pic)
|
||||
.PIC
|
||||
else if (self.module.comp.config.link_mode == .Dynamic)
|
||||
else if (self.module.comp.config.link_mode == .dynamic)
|
||||
llvm.RelocMode.DynamicNoPIC
|
||||
else
|
||||
.Static;
|
||||
@ -1873,10 +1873,10 @@ pub const Object = struct {
|
||||
if (comp.config.dll_export_fns)
|
||||
global_index.setDllStorageClass(.dllexport, &o.builder);
|
||||
global_index.setLinkage(switch (exports[0].opts.linkage) {
|
||||
.Internal => unreachable,
|
||||
.Strong => .external,
|
||||
.Weak => .weak_odr,
|
||||
.LinkOnce => .linkonce_odr,
|
||||
.internal => unreachable,
|
||||
.strong => .external,
|
||||
.weak => .weak_odr,
|
||||
.link_once => .linkonce_odr,
|
||||
}, &o.builder);
|
||||
global_index.setVisibility(switch (exports[0].opts.visibility) {
|
||||
.default => .default,
|
||||
@ -3327,7 +3327,7 @@ pub const Object = struct {
|
||||
|
||||
const struct_type = ip.loadStructType(t.toIntern());
|
||||
|
||||
if (struct_type.layout == .Packed) {
|
||||
if (struct_type.layout == .@"packed") {
|
||||
const int_ty = try o.lowerType(Type.fromInterned(struct_type.backingIntType(ip).*));
|
||||
try o.type_map.put(o.gpa, t.toIntern(), int_ty);
|
||||
return int_ty;
|
||||
@ -3477,7 +3477,7 @@ pub const Object = struct {
|
||||
const union_obj = ip.loadUnionType(t.toIntern());
|
||||
const layout = mod.getUnionLayout(union_obj);
|
||||
|
||||
if (union_obj.flagsPtr(ip).layout == .Packed) {
|
||||
if (union_obj.flagsPtr(ip).layout == .@"packed") {
|
||||
const int_ty = try o.builder.intType(@intCast(t.bitSize(mod)));
|
||||
try o.type_map.put(o.gpa, t.toIntern(), int_ty);
|
||||
return int_ty;
|
||||
@ -4038,7 +4038,7 @@ pub const Object = struct {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
assert(struct_type.haveLayout(ip));
|
||||
const struct_ty = try o.lowerType(ty);
|
||||
if (struct_type.layout == .Packed) {
|
||||
if (struct_type.layout == .@"packed") {
|
||||
comptime assert(Type.packed_struct_layout_version == 2);
|
||||
var running_int = try o.builder.intConst(struct_ty, 0);
|
||||
var running_bits: u16 = 0;
|
||||
@ -4154,7 +4154,7 @@ pub const Object = struct {
|
||||
const payload = if (un.tag != .none) p: {
|
||||
const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
|
||||
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
|
||||
if (container_layout == .Packed) {
|
||||
if (container_layout == .@"packed") {
|
||||
if (!field_ty.hasRuntimeBits(mod)) return o.builder.intConst(union_ty, 0);
|
||||
const small_int_val = try o.builder.castConst(
|
||||
if (field_ty.isPtrAtRuntime(mod)) .ptrtoint else .bitcast,
|
||||
@ -4190,7 +4190,7 @@ pub const Object = struct {
|
||||
} else p: {
|
||||
assert(layout.tag_size == 0);
|
||||
const union_val = try o.lowerValue(un.val);
|
||||
if (container_layout == .Packed) {
|
||||
if (container_layout == .@"packed") {
|
||||
const bitcast_val = try o.builder.castConst(
|
||||
.bitcast,
|
||||
union_val,
|
||||
@ -4324,7 +4324,7 @@ pub const Object = struct {
|
||||
const field_index: u32 = @intCast(field_ptr.index);
|
||||
switch (parent_ty.zigTypeTag(mod)) {
|
||||
.Union => {
|
||||
if (parent_ty.containerLayout(mod) == .Packed) {
|
||||
if (parent_ty.containerLayout(mod) == .@"packed") {
|
||||
return parent_ptr;
|
||||
}
|
||||
|
||||
@ -6531,7 +6531,7 @@ pub const FuncGen = struct {
|
||||
assert(!isByRef(field_ty, mod));
|
||||
switch (struct_ty.zigTypeTag(mod)) {
|
||||
.Struct => switch (struct_ty.containerLayout(mod)) {
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
const struct_type = mod.typeToStruct(struct_ty).?;
|
||||
const bit_offset = mod.structPackedFieldBitOffset(struct_type, field_index);
|
||||
const containing_int = struct_llvm_val;
|
||||
@ -6558,7 +6558,7 @@ pub const FuncGen = struct {
|
||||
},
|
||||
},
|
||||
.Union => {
|
||||
assert(struct_ty.containerLayout(mod) == .Packed);
|
||||
assert(struct_ty.containerLayout(mod) == .@"packed");
|
||||
const containing_int = struct_llvm_val;
|
||||
const elem_llvm_ty = try o.lowerType(field_ty);
|
||||
if (field_ty.zigTypeTag(mod) == .Float or field_ty.zigTypeTag(mod) == .Vector) {
|
||||
@ -6581,7 +6581,7 @@ pub const FuncGen = struct {
|
||||
switch (struct_ty.zigTypeTag(mod)) {
|
||||
.Struct => {
|
||||
const layout = struct_ty.containerLayout(mod);
|
||||
assert(layout != .Packed);
|
||||
assert(layout != .@"packed");
|
||||
const struct_llvm_ty = try o.lowerType(struct_ty);
|
||||
const llvm_field_index = o.llvmFieldIndex(struct_ty, field_index).?;
|
||||
const field_ptr =
|
||||
@ -9995,7 +9995,7 @@ pub const FuncGen = struct {
|
||||
return running_int;
|
||||
}
|
||||
|
||||
assert(result_ty.containerLayout(mod) != .Packed);
|
||||
assert(result_ty.containerLayout(mod) != .@"packed");
|
||||
|
||||
if (isByRef(result_ty, mod)) {
|
||||
// TODO in debug builds init to undef so that the padding will be 0xaa
|
||||
@ -10080,7 +10080,7 @@ pub const FuncGen = struct {
|
||||
const layout = union_ty.unionGetLayout(mod);
|
||||
const union_obj = mod.typeToUnion(union_ty).?;
|
||||
|
||||
if (union_obj.getLayout(ip) == .Packed) {
|
||||
if (union_obj.getLayout(ip) == .@"packed") {
|
||||
const big_bits = union_ty.bitSize(mod);
|
||||
const int_llvm_ty = try o.builder.intType(@intCast(big_bits));
|
||||
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
|
||||
@ -10420,7 +10420,7 @@ pub const FuncGen = struct {
|
||||
const struct_ty = struct_ptr_ty.childType(mod);
|
||||
switch (struct_ty.zigTypeTag(mod)) {
|
||||
.Struct => switch (struct_ty.containerLayout(mod)) {
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
const result_ty = self.typeOfIndex(inst);
|
||||
const result_ty_info = result_ty.ptrInfo(mod);
|
||||
const struct_ptr_ty_info = struct_ptr_ty.ptrInfo(mod);
|
||||
@ -10462,7 +10462,7 @@ pub const FuncGen = struct {
|
||||
},
|
||||
.Union => {
|
||||
const layout = struct_ty.unionGetLayout(mod);
|
||||
if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .Packed) return struct_ptr;
|
||||
if (layout.payload_size == 0 or struct_ty.containerLayout(mod) == .@"packed") return struct_ptr;
|
||||
const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
|
||||
const union_llvm_ty = try o.lowerType(struct_ty);
|
||||
return self.wip.gepStruct(union_llvm_ty, struct_ptr, payload_index, "");
|
||||
@ -10801,12 +10801,12 @@ pub const FuncGen = struct {
|
||||
|
||||
fn toLlvmAtomicOrdering(atomic_order: std.builtin.AtomicOrder) Builder.AtomicOrdering {
|
||||
return switch (atomic_order) {
|
||||
.Unordered => .unordered,
|
||||
.Monotonic => .monotonic,
|
||||
.Acquire => .acquire,
|
||||
.Release => .release,
|
||||
.AcqRel => .acq_rel,
|
||||
.SeqCst => .seq_cst,
|
||||
.unordered => .unordered,
|
||||
.monotonic => .monotonic,
|
||||
.acquire => .acquire,
|
||||
.release => .release,
|
||||
.acq_rel => .acq_rel,
|
||||
.seq_cst => .seq_cst,
|
||||
};
|
||||
}
|
||||
|
||||
@ -11572,7 +11572,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
|
||||
};
|
||||
|
||||
// Packed structs are represented to LLVM as integers.
|
||||
if (struct_type.layout == .Packed) return false;
|
||||
if (struct_type.layout == .@"packed") return false;
|
||||
|
||||
const field_types = struct_type.field_types.get(ip);
|
||||
var it = struct_type.iterateRuntimeOrder(ip);
|
||||
@ -11586,7 +11586,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
|
||||
return false;
|
||||
},
|
||||
.Union => switch (ty.containerLayout(mod)) {
|
||||
.Packed => return false,
|
||||
.@"packed" => return false,
|
||||
else => return ty.hasRuntimeBits(mod),
|
||||
},
|
||||
.ErrorUnion => {
|
||||
@ -11624,8 +11624,8 @@ fn isScalar(mod: *Module, ty: Type) bool {
|
||||
.Vector,
|
||||
=> true,
|
||||
|
||||
.Struct => ty.containerLayout(mod) == .Packed,
|
||||
.Union => ty.containerLayout(mod) == .Packed,
|
||||
.Struct => ty.containerLayout(mod) == .@"packed",
|
||||
.Union => ty.containerLayout(mod) == .@"packed",
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
@ -8398,7 +8398,7 @@ pub const Metadata = enum(u32) {
|
||||
fmt_str = fmt_str ++ ")\n";
|
||||
|
||||
var fmt_args: @Type(.{ .Struct = .{
|
||||
.layout = .Auto,
|
||||
.layout = .auto,
|
||||
.fields = &fields,
|
||||
.decls = &.{},
|
||||
.is_tuple = false,
|
||||
|
||||
@ -415,8 +415,8 @@ fn BufType(comptime T: type, comptime min_len: usize) type {
|
||||
.Enum => |info| info.tag_type,
|
||||
.Bool => u1,
|
||||
.Struct => |info| switch (info.layout) {
|
||||
.Auto, .Extern => @compileError("Unsupported type: " ++ @typeName(T)),
|
||||
.Packed => std.meta.Int(.unsigned, @bitSizeOf(T)),
|
||||
.auto, .@"extern" => @compileError("Unsupported type: " ++ @typeName(T)),
|
||||
.@"packed" => std.meta.Int(.unsigned, @bitSizeOf(T)),
|
||||
},
|
||||
else => @compileError("Unsupported type: " ++ @typeName(T)),
|
||||
})));
|
||||
|
||||
@ -979,7 +979,7 @@ const DeclGen = struct {
|
||||
},
|
||||
.struct_type => {
|
||||
const struct_type = mod.typeToStruct(ty).?;
|
||||
if (struct_type.layout == .Packed) {
|
||||
if (struct_type.layout == .@"packed") {
|
||||
return self.todo("packed struct constants", .{});
|
||||
}
|
||||
|
||||
@ -1275,7 +1275,7 @@ const DeclGen = struct {
|
||||
const ip = &mod.intern_pool;
|
||||
const union_obj = mod.typeToUnion(ty).?;
|
||||
|
||||
if (union_obj.getLayout(ip) == .Packed) {
|
||||
if (union_obj.getLayout(ip) == .@"packed") {
|
||||
return self.todo("packed union types", .{});
|
||||
}
|
||||
|
||||
@ -1532,7 +1532,7 @@ const DeclGen = struct {
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
if (struct_type.layout == .Packed) {
|
||||
if (struct_type.layout == .@"packed") {
|
||||
return try self.resolveType(Type.fromInterned(struct_type.backingIntType(ip).*), .direct);
|
||||
}
|
||||
|
||||
@ -3904,7 +3904,7 @@ const DeclGen = struct {
|
||||
const union_ty = mod.typeToUnion(ty).?;
|
||||
const tag_ty = Type.fromInterned(union_ty.enum_tag_ty);
|
||||
|
||||
if (union_ty.getLayout(ip) == .Packed) {
|
||||
if (union_ty.getLayout(ip) == .@"packed") {
|
||||
unreachable; // TODO
|
||||
}
|
||||
|
||||
@ -3984,11 +3984,11 @@ const DeclGen = struct {
|
||||
|
||||
switch (object_ty.zigTypeTag(mod)) {
|
||||
.Struct => switch (object_ty.containerLayout(mod)) {
|
||||
.Packed => unreachable, // TODO
|
||||
.@"packed" => unreachable, // TODO
|
||||
else => return try self.extractField(field_ty, object_id, field_index),
|
||||
},
|
||||
.Union => switch (object_ty.containerLayout(mod)) {
|
||||
.Packed => unreachable, // TODO
|
||||
.@"packed" => unreachable, // TODO
|
||||
else => {
|
||||
// Store, ptr-elem-ptr, pointer-cast, load
|
||||
const layout = self.unionLayout(object_ty);
|
||||
@ -4058,13 +4058,13 @@ const DeclGen = struct {
|
||||
const object_ty = object_ptr_ty.childType(mod);
|
||||
switch (object_ty.zigTypeTag(mod)) {
|
||||
.Struct => switch (object_ty.containerLayout(mod)) {
|
||||
.Packed => unreachable, // TODO
|
||||
.@"packed" => unreachable, // TODO
|
||||
else => {
|
||||
return try self.accessChain(result_ty_ref, object_ptr, &.{field_index});
|
||||
},
|
||||
},
|
||||
.Union => switch (object_ty.containerLayout(mod)) {
|
||||
.Packed => unreachable, // TODO
|
||||
.@"packed" => unreachable, // TODO
|
||||
else => {
|
||||
const layout = self.unionLayout(object_ty);
|
||||
if (!layout.has_payload) {
|
||||
|
||||
@ -154,7 +154,7 @@ pub fn writeOperand(section: *Section, comptime Operand: type, operand: Operand)
|
||||
}
|
||||
},
|
||||
.Struct => |info| {
|
||||
if (info.layout == .Packed) {
|
||||
if (info.layout == .@"packed") {
|
||||
section.writeWord(@as(Word, @bitCast(operand)));
|
||||
} else {
|
||||
section.writeExtendedMask(Operand, operand);
|
||||
@ -288,7 +288,7 @@ fn operandSize(comptime Operand: type, operand: Operand) usize {
|
||||
}
|
||||
break :blk total;
|
||||
},
|
||||
.Struct => |info| if (info.layout == .Packed) 1 else extendedMaskSize(Operand, operand),
|
||||
.Struct => |info| if (info.layout == .@"packed") 1 else extendedMaskSize(Operand, operand),
|
||||
.Union => extendedUnionSize(Operand, operand),
|
||||
else => unreachable,
|
||||
},
|
||||
|
||||
@ -376,7 +376,7 @@ const PanicSwitch = struct {
|
||||
};
|
||||
state.* = new_state;
|
||||
|
||||
_ = panicking.fetchAdd(1, .SeqCst);
|
||||
_ = panicking.fetchAdd(1, .seq_cst);
|
||||
|
||||
state.recover_stage = .release_ref_count;
|
||||
|
||||
@ -458,7 +458,7 @@ const PanicSwitch = struct {
|
||||
noinline fn releaseRefCount(state: *volatile PanicState) noreturn {
|
||||
state.recover_stage = .abort;
|
||||
|
||||
if (panicking.fetchSub(1, .SeqCst) != 1) {
|
||||
if (panicking.fetchSub(1, .seq_cst) != 1) {
|
||||
// Another thread is panicking, wait for the last one to finish
|
||||
// and call abort()
|
||||
|
||||
|
||||
@ -1084,7 +1084,7 @@ fn buildSharedLib(
|
||||
const strip = comp.compilerRtStrip();
|
||||
const config = try Compilation.Config.resolve(.{
|
||||
.output_mode = .Lib,
|
||||
.link_mode = .Dynamic,
|
||||
.link_mode = .dynamic,
|
||||
.resolved_target = comp.root_mod.resolved_target,
|
||||
.is_test = false,
|
||||
.have_zcu = false,
|
||||
|
||||
@ -115,7 +115,7 @@ pub fn buildLibCXX(comp: *Compilation, prog_node: *std.Progress.Node) !void {
|
||||
|
||||
const root_name = "c++";
|
||||
const output_mode = .Lib;
|
||||
const link_mode = .Static;
|
||||
const link_mode = .static;
|
||||
const target = comp.root_mod.resolved_target.result;
|
||||
const basename = try std.zig.binNameAlloc(arena, .{
|
||||
.root_name = root_name,
|
||||
@ -327,7 +327,7 @@ pub fn buildLibCXXABI(comp: *Compilation, prog_node: *std.Progress.Node) !void {
|
||||
|
||||
const root_name = "c++abi";
|
||||
const output_mode = .Lib;
|
||||
const link_mode = .Static;
|
||||
const link_mode = .static;
|
||||
const target = comp.root_mod.resolved_target.result;
|
||||
const basename = try std.zig.binNameAlloc(arena, .{
|
||||
.root_name = root_name,
|
||||
|
||||
@ -27,7 +27,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: *std.Progress.Node) BuildError!v
|
||||
|
||||
const root_name = "tsan";
|
||||
const output_mode = .Lib;
|
||||
const link_mode = .Static;
|
||||
const link_mode = .static;
|
||||
const target = comp.getTarget();
|
||||
const basename = try std.zig.binNameAlloc(arena, .{
|
||||
.root_name = root_name,
|
||||
|
||||
@ -62,7 +62,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: *std.Progress.Node) !void {
|
||||
});
|
||||
|
||||
const root_name = "unwind";
|
||||
const link_mode = .Static;
|
||||
const link_mode = .static;
|
||||
const target = comp.root_mod.resolved_target.result;
|
||||
const basename = try std.zig.binNameAlloc(arena, .{
|
||||
.root_name = root_name,
|
||||
|
||||
12
src/link.zig
12
src/link.zig
@ -287,8 +287,8 @@ pub const File = struct {
|
||||
switch (output_mode) {
|
||||
.Obj => return,
|
||||
.Lib => switch (link_mode) {
|
||||
.Static => return,
|
||||
.Dynamic => {},
|
||||
.static => return,
|
||||
.dynamic => {},
|
||||
},
|
||||
.Exe => {},
|
||||
}
|
||||
@ -582,7 +582,7 @@ pub const File = struct {
|
||||
const use_lld = build_options.have_llvm and comp.config.use_lld;
|
||||
const output_mode = comp.config.output_mode;
|
||||
const link_mode = comp.config.link_mode;
|
||||
if (use_lld and output_mode == .Lib and link_mode == .Static) {
|
||||
if (use_lld and output_mode == .Lib and link_mode == .static) {
|
||||
return base.linkAsArchive(arena, prog_node);
|
||||
}
|
||||
switch (base.tag) {
|
||||
@ -957,8 +957,8 @@ pub const File = struct {
|
||||
const executable_mode = if (builtin.target.os.tag == .windows) 0 else 0o777;
|
||||
switch (effectiveOutputMode(use_lld, output_mode)) {
|
||||
.Lib => return switch (link_mode) {
|
||||
.Dynamic => executable_mode,
|
||||
.Static => fs.File.default_mode,
|
||||
.dynamic => executable_mode,
|
||||
.static => fs.File.default_mode,
|
||||
},
|
||||
.Exe => return executable_mode,
|
||||
.Obj => return fs.File.default_mode,
|
||||
@ -966,7 +966,7 @@ pub const File = struct {
|
||||
}
|
||||
|
||||
pub fn isStatic(self: File) bool {
|
||||
return self.comp.config.link_mode == .Static;
|
||||
return self.comp.config.link_mode == .static;
|
||||
}
|
||||
|
||||
pub fn isObject(self: File) bool {
|
||||
|
||||
@ -1599,11 +1599,11 @@ pub fn updateExports(
|
||||
}
|
||||
}
|
||||
|
||||
if (exp.opts.linkage == .LinkOnce) {
|
||||
if (exp.opts.linkage == .link_once) {
|
||||
try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create(
|
||||
gpa,
|
||||
exp.getSrcLoc(mod),
|
||||
"Unimplemented: GlobalLinkage.LinkOnce",
|
||||
"Unimplemented: GlobalLinkage.link_once",
|
||||
.{},
|
||||
));
|
||||
continue;
|
||||
@ -1633,11 +1633,11 @@ pub fn updateExports(
|
||||
sym.type = atom.getSymbol(self).type;
|
||||
|
||||
switch (exp.opts.linkage) {
|
||||
.Strong => {
|
||||
.strong => {
|
||||
sym.storage_class = .EXTERNAL;
|
||||
},
|
||||
.Internal => @panic("TODO Internal"),
|
||||
.Weak => @panic("TODO WeakExternal"),
|
||||
.internal => @panic("TODO Internal"),
|
||||
.weak => @panic("TODO WeakExternal"),
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
@ -2275,7 +2275,7 @@ fn writeHeader(self: *Coff) !void {
|
||||
.p32 => flags.@"32BIT_MACHINE" = 1,
|
||||
.p64 => flags.LARGE_ADDRESS_AWARE = 1,
|
||||
}
|
||||
if (self.base.comp.config.output_mode == .Lib and self.base.comp.config.link_mode == .Dynamic) {
|
||||
if (self.base.comp.config.output_mode == .Lib and self.base.comp.config.link_mode == .dynamic) {
|
||||
flags.DLL = 1;
|
||||
}
|
||||
|
||||
|
||||
@ -45,7 +45,7 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node)
|
||||
defer sub_prog_node.end();
|
||||
|
||||
const is_lib = comp.config.output_mode == .Lib;
|
||||
const is_dyn_lib = comp.config.link_mode == .Dynamic and is_lib;
|
||||
const is_dyn_lib = comp.config.link_mode == .dynamic and is_lib;
|
||||
const is_exe_or_dyn_lib = is_dyn_lib or comp.config.output_mode == .Exe;
|
||||
const link_in_crt = comp.config.link_libc and is_exe_or_dyn_lib;
|
||||
const target = comp.root_mod.resolved_target.result;
|
||||
@ -411,16 +411,16 @@ pub fn linkWithLLD(self: *Coff, arena: Allocator, prog_node: *std.Progress.Node)
|
||||
try argv.append(try comp.get_libc_crt_file(arena, "mingwex.lib"));
|
||||
} else {
|
||||
const lib_str = switch (comp.config.link_mode) {
|
||||
.Dynamic => "",
|
||||
.Static => "lib",
|
||||
.dynamic => "",
|
||||
.static => "lib",
|
||||
};
|
||||
const d_str = switch (optimize_mode) {
|
||||
.Debug => "d",
|
||||
else => "",
|
||||
};
|
||||
switch (comp.config.link_mode) {
|
||||
.Static => try argv.append(try allocPrint(arena, "libcmt{s}.lib", .{d_str})),
|
||||
.Dynamic => try argv.append(try allocPrint(arena, "msvcrt{s}.lib", .{d_str})),
|
||||
.static => try argv.append(try allocPrint(arena, "libcmt{s}.lib", .{d_str})),
|
||||
.dynamic => try argv.append(try allocPrint(arena, "msvcrt{s}.lib", .{d_str})),
|
||||
}
|
||||
|
||||
try argv.append(try allocPrint(arena, "{s}vcruntime{s}.lib", .{ lib_str, d_str }));
|
||||
|
||||
@ -317,7 +317,7 @@ pub const DeclState = struct {
|
||||
try ty.print(dbg_info_buffer.writer(), mod);
|
||||
try dbg_info_buffer.append(0);
|
||||
|
||||
if (struct_type.layout == .Packed) {
|
||||
if (struct_type.layout == .@"packed") {
|
||||
log.debug("TODO implement .debug_info for packed structs", .{});
|
||||
break :blk;
|
||||
}
|
||||
|
||||
@ -262,7 +262,7 @@ pub fn createEmpty(
|
||||
.sparc64 => 0x2000,
|
||||
else => 0x1000,
|
||||
};
|
||||
const is_dyn_lib = output_mode == .Lib and link_mode == .Dynamic;
|
||||
const is_dyn_lib = output_mode == .Lib and link_mode == .dynamic;
|
||||
const default_sym_version: elf.Elf64_Versym = if (is_dyn_lib or comp.config.rdynamic)
|
||||
elf.VER_NDX_GLOBAL
|
||||
else
|
||||
@ -349,7 +349,7 @@ pub fn createEmpty(
|
||||
}
|
||||
|
||||
const is_obj = output_mode == .Obj;
|
||||
const is_obj_or_ar = is_obj or (output_mode == .Lib and link_mode == .Static);
|
||||
const is_obj_or_ar = is_obj or (output_mode == .Lib and link_mode == .static);
|
||||
|
||||
// What path should this ELF linker code output to?
|
||||
// If using LLD to link, this code should produce an object file so that it
|
||||
@ -1180,10 +1180,10 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
|
||||
|
||||
success: {
|
||||
if (!self.base.isStatic()) {
|
||||
if (try self.accessLibPath(arena, &test_path, &checked_paths, lc.crt_dir.?, lib_name, .Dynamic))
|
||||
if (try self.accessLibPath(arena, &test_path, &checked_paths, lc.crt_dir.?, lib_name, .dynamic))
|
||||
break :success;
|
||||
}
|
||||
if (try self.accessLibPath(arena, &test_path, &checked_paths, lc.crt_dir.?, lib_name, .Static))
|
||||
if (try self.accessLibPath(arena, &test_path, &checked_paths, lc.crt_dir.?, lib_name, .static))
|
||||
break :success;
|
||||
|
||||
try self.reportMissingLibraryError(
|
||||
@ -1211,8 +1211,8 @@ pub fn flushModule(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node)
|
||||
});
|
||||
} else if (target.isMusl()) {
|
||||
const path = try comp.get_libc_crt_file(arena, switch (link_mode) {
|
||||
.Static => "libc.a",
|
||||
.Dynamic => "libc.so",
|
||||
.static => "libc.a",
|
||||
.dynamic => "libc.so",
|
||||
});
|
||||
try system_libs.append(.{ .path = path });
|
||||
} else {
|
||||
@ -1628,7 +1628,7 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
|
||||
// libc dep
|
||||
if (comp.config.link_libc) {
|
||||
if (self.base.comp.libc_installation != null) {
|
||||
const needs_grouping = link_mode == .Static;
|
||||
const needs_grouping = link_mode == .static;
|
||||
if (needs_grouping) try argv.append("--start-group");
|
||||
try argv.appendSlice(target_util.libcFullLinkFlags(target));
|
||||
if (needs_grouping) try argv.append("--end-group");
|
||||
@ -1642,8 +1642,8 @@ fn dumpArgv(self: *Elf, comp: *Compilation) !void {
|
||||
try argv.append(try comp.get_libc_crt_file(arena, "libc_nonshared.a"));
|
||||
} else if (target.isMusl()) {
|
||||
try argv.append(try comp.get_libc_crt_file(arena, switch (link_mode) {
|
||||
.Static => "libc.a",
|
||||
.Dynamic => "libc.so",
|
||||
.static => "libc.a",
|
||||
.dynamic => "libc.so",
|
||||
}));
|
||||
}
|
||||
}
|
||||
@ -1797,10 +1797,10 @@ fn parseLdScript(self: *Elf, lib: SystemLib) ParseError!void {
|
||||
// Maybe we should hoist search-strategy all the way here?
|
||||
for (self.lib_dirs) |lib_dir| {
|
||||
if (!self.base.isStatic()) {
|
||||
if (try self.accessLibPath(arena, &test_path, &checked_paths, lib_dir, lib_name, .Dynamic))
|
||||
if (try self.accessLibPath(arena, &test_path, &checked_paths, lib_dir, lib_name, .dynamic))
|
||||
break :success;
|
||||
}
|
||||
if (try self.accessLibPath(arena, &test_path, &checked_paths, lib_dir, lib_name, .Static))
|
||||
if (try self.accessLibPath(arena, &test_path, &checked_paths, lib_dir, lib_name, .static))
|
||||
break :success;
|
||||
}
|
||||
} else {
|
||||
@ -1858,8 +1858,8 @@ fn accessLibPath(
|
||||
test_path.clearRetainingCapacity();
|
||||
const prefix = if (link_mode != null) "lib" else "";
|
||||
const suffix = if (link_mode) |mode| switch (mode) {
|
||||
.Static => target.staticLibSuffix(),
|
||||
.Dynamic => target.dynamicLibSuffix(),
|
||||
.static => target.staticLibSuffix(),
|
||||
.dynamic => target.dynamicLibSuffix(),
|
||||
} else "";
|
||||
try test_path.writer().print("{s}" ++ sep ++ "{s}{s}{s}", .{
|
||||
lib_dir_path,
|
||||
@ -2150,10 +2150,10 @@ fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) !voi
|
||||
const is_obj = output_mode == .Obj;
|
||||
const is_lib = output_mode == .Lib;
|
||||
const link_mode = comp.config.link_mode;
|
||||
const is_dyn_lib = link_mode == .Dynamic and is_lib;
|
||||
const is_dyn_lib = link_mode == .dynamic and is_lib;
|
||||
const is_exe_or_dyn_lib = is_dyn_lib or output_mode == .Exe;
|
||||
const have_dynamic_linker = comp.config.link_libc and
|
||||
link_mode == .Dynamic and is_exe_or_dyn_lib;
|
||||
link_mode == .dynamic and is_exe_or_dyn_lib;
|
||||
const target = comp.root_mod.resolved_target.result;
|
||||
const compiler_rt_path: ?[]const u8 = blk: {
|
||||
if (comp.compiler_rt_lib) |x| break :blk x.full_object_path;
|
||||
@ -2463,7 +2463,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) !voi
|
||||
try argv.append(arg);
|
||||
}
|
||||
|
||||
if (link_mode == .Static) {
|
||||
if (link_mode == .static) {
|
||||
if (target.cpu.arch.isArmOrThumb()) {
|
||||
try argv.append("-Bstatic");
|
||||
} else {
|
||||
@ -2647,7 +2647,7 @@ fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) !voi
|
||||
comp.link_error_flags.missing_libc = false;
|
||||
if (comp.config.link_libc) {
|
||||
if (comp.libc_installation != null) {
|
||||
const needs_grouping = link_mode == .Static;
|
||||
const needs_grouping = link_mode == .static;
|
||||
if (needs_grouping) try argv.append("--start-group");
|
||||
try argv.appendSlice(target_util.libcFullLinkFlags(target));
|
||||
if (needs_grouping) try argv.append("--end-group");
|
||||
@ -2661,8 +2661,8 @@ fn linkWithLLD(self: *Elf, arena: Allocator, prog_node: *std.Progress.Node) !voi
|
||||
try argv.append(try comp.get_libc_crt_file(arena, "libc_nonshared.a"));
|
||||
} else if (target.isMusl()) {
|
||||
try argv.append(try comp.get_libc_crt_file(arena, switch (link_mode) {
|
||||
.Static => "libc.a",
|
||||
.Dynamic => "libc.so",
|
||||
.static => "libc.a",
|
||||
.dynamic => "libc.so",
|
||||
}));
|
||||
} else {
|
||||
comp.link_error_flags.missing_libc = true;
|
||||
@ -2928,8 +2928,8 @@ pub fn writeElfHeader(self: *Elf) !void {
|
||||
.Exe => if (comp.config.pie) .DYN else .EXEC,
|
||||
.Obj => .REL,
|
||||
.Lib => switch (link_mode) {
|
||||
.Static => @as(elf.ET, .REL),
|
||||
.Dynamic => .DYN,
|
||||
.static => @as(elf.ET, .REL),
|
||||
.dynamic => .DYN,
|
||||
},
|
||||
};
|
||||
mem.writeInt(u16, hdr_buf[index..][0..2], @intFromEnum(elf_type), endian);
|
||||
@ -3216,7 +3216,7 @@ fn allocateLinkerDefinedSymbols(self: *Elf) void {
|
||||
|
||||
// __rela_iplt_start, __rela_iplt_end
|
||||
if (self.rela_dyn_section_index) |shndx| blk: {
|
||||
if (link_mode != .Static or comp.config.pie) break :blk;
|
||||
if (link_mode != .static or comp.config.pie) break :blk;
|
||||
const shdr = &self.shdrs.items[shndx];
|
||||
const end_addr = shdr.sh_addr + shdr.sh_size;
|
||||
const start_addr = end_addr - self.calcNumIRelativeRelocs() * @sizeOf(elf.Elf64_Rela);
|
||||
@ -5061,12 +5061,12 @@ const CsuObjects = struct {
|
||||
} = switch (comp.config.output_mode) {
|
||||
.Obj => return CsuObjects{},
|
||||
.Lib => switch (comp.config.link_mode) {
|
||||
.Dynamic => .dynamic_lib,
|
||||
.Static => return CsuObjects{},
|
||||
.dynamic => .dynamic_lib,
|
||||
.static => return CsuObjects{},
|
||||
},
|
||||
.Exe => switch (comp.config.link_mode) {
|
||||
.Dynamic => if (comp.config.pie) .dynamic_pie else .dynamic_exe,
|
||||
.Static => if (comp.config.pie) .static_pie else .static_exe,
|
||||
.dynamic => if (comp.config.pie) .dynamic_pie else .dynamic_exe,
|
||||
.static => if (comp.config.pie) .static_pie else .static_exe,
|
||||
},
|
||||
};
|
||||
|
||||
|
||||
@ -1436,10 +1436,10 @@ pub fn updateExports(
|
||||
}
|
||||
}
|
||||
const stb_bits: u8 = switch (exp.opts.linkage) {
|
||||
.Internal => elf.STB_LOCAL,
|
||||
.Strong => elf.STB_GLOBAL,
|
||||
.Weak => elf.STB_WEAK,
|
||||
.LinkOnce => {
|
||||
.internal => elf.STB_LOCAL,
|
||||
.strong => elf.STB_GLOBAL,
|
||||
.weak => elf.STB_WEAK,
|
||||
.link_once => {
|
||||
try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1);
|
||||
mod.failed_exports.putAssumeCapacityNoClobber(exp, try Module.ErrorMsg.create(
|
||||
gpa,
|
||||
|
||||
@ -1216,11 +1216,11 @@ pub fn updateExports(
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (exp.opts.linkage == .LinkOnce) {
|
||||
if (exp.opts.linkage == .link_once) {
|
||||
try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create(
|
||||
gpa,
|
||||
exp.getSrcLoc(mod),
|
||||
"Unimplemented: GlobalLinkage.LinkOnce",
|
||||
"Unimplemented: GlobalLinkage.link_once",
|
||||
.{},
|
||||
));
|
||||
continue;
|
||||
@ -1242,12 +1242,12 @@ pub fn updateExports(
|
||||
self.symtab.items(.atom)[global_nlist_index] = self.symtab.items(.atom)[nlist_idx];
|
||||
|
||||
switch (exp.opts.linkage) {
|
||||
.Internal => {
|
||||
.internal => {
|
||||
// Symbol should be hidden, or in MachO lingo, private extern.
|
||||
global_nlist.n_type |= macho.N_PEXT;
|
||||
},
|
||||
.Strong => {},
|
||||
.Weak => {
|
||||
.strong => {},
|
||||
.weak => {
|
||||
// Weak linkage is specified as part of n_desc field.
|
||||
// Symbol's n_type is like for a symbol with strong linkage.
|
||||
global_nlist.n_desc |= macho.N_WEAK_DEF;
|
||||
|
||||
@ -222,7 +222,7 @@ pub fn writeDylibLC(ctx: WriteDylibLCCtx, writer: anytype) !void {
|
||||
pub fn writeDylibIdLC(macho_file: *MachO, writer: anytype) !void {
|
||||
const comp = macho_file.base.comp;
|
||||
const gpa = comp.gpa;
|
||||
assert(comp.config.output_mode == .Lib and comp.config.link_mode == .Dynamic);
|
||||
assert(comp.config.output_mode == .Lib and comp.config.link_mode == .dynamic);
|
||||
const emit = macho_file.base.emit;
|
||||
const install_name = macho_file.install_name orelse
|
||||
try emit.directory.join(gpa, &.{emit.sub_path});
|
||||
|
||||
@ -2518,7 +2518,7 @@ pub fn flushModule(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node)
|
||||
// When the target os is WASI, we allow linking with WASI-LIBC
|
||||
if (target.os.tag == .wasi) {
|
||||
const is_exe_or_dyn_lib = output_mode == .Exe or
|
||||
(output_mode == .Lib and link_mode == .Dynamic);
|
||||
(output_mode == .Lib and link_mode == .dynamic);
|
||||
if (is_exe_or_dyn_lib) {
|
||||
for (comp.wasi_emulated_libs) |crt_file| {
|
||||
try positionals.append(try comp.get_libc_crt_file(
|
||||
@ -3549,7 +3549,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node) !vo
|
||||
try argv.append("--allow-undefined");
|
||||
}
|
||||
|
||||
if (comp.config.output_mode == .Lib and comp.config.link_mode == .Dynamic) {
|
||||
if (comp.config.output_mode == .Lib and comp.config.link_mode == .dynamic) {
|
||||
try argv.append("--shared");
|
||||
}
|
||||
if (comp.config.pie) {
|
||||
@ -3569,7 +3569,7 @@ fn linkWithLLD(wasm: *Wasm, arena: Allocator, prog_node: *std.Progress.Node) !vo
|
||||
|
||||
if (target.os.tag == .wasi) {
|
||||
const is_exe_or_dyn_lib = comp.config.output_mode == .Exe or
|
||||
(comp.config.output_mode == .Lib and comp.config.link_mode == .Dynamic);
|
||||
(comp.config.output_mode == .Lib and comp.config.link_mode == .dynamic);
|
||||
if (is_exe_or_dyn_lib) {
|
||||
for (comp.wasi_emulated_libs) |crt_file| {
|
||||
try argv.append(try comp.get_libc_crt_file(
|
||||
|
||||
@ -896,14 +896,14 @@ pub fn updateExports(
|
||||
sym.name = export_name;
|
||||
|
||||
switch (exp.opts.linkage) {
|
||||
.Internal => {
|
||||
.internal => {
|
||||
sym.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
|
||||
},
|
||||
.Weak => {
|
||||
.weak => {
|
||||
sym.setFlag(.WASM_SYM_BINDING_WEAK);
|
||||
},
|
||||
.Strong => {}, // symbols are strong by default
|
||||
.LinkOnce => {
|
||||
.strong => {}, // symbols are strong by default
|
||||
.link_once => {
|
||||
try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create(
|
||||
gpa,
|
||||
decl.srcLoc(mod),
|
||||
|
||||
70
src/main.zig
70
src/main.zig
@ -755,8 +755,8 @@ const SystemLib = struct {
|
||||
fn fallbackMode(this: SystemLib) std.builtin.LinkMode {
|
||||
assert(this.search_strategy != .no_fallback);
|
||||
return switch (this.preferred_mode) {
|
||||
.Dynamic => .Static,
|
||||
.Static => .Dynamic,
|
||||
.dynamic => .static,
|
||||
.static => .dynamic,
|
||||
};
|
||||
}
|
||||
};
|
||||
@ -892,7 +892,7 @@ fn buildOutputType(
|
||||
var entitlements: ?[]const u8 = null;
|
||||
var pagezero_size: ?u64 = null;
|
||||
var lib_search_strategy: SystemLib.SearchStrategy = .paths_first;
|
||||
var lib_preferred_mode: std.builtin.LinkMode = .Dynamic;
|
||||
var lib_preferred_mode: std.builtin.LinkMode = .dynamic;
|
||||
var headerpad_size: ?u32 = null;
|
||||
var headerpad_max_install_names: bool = false;
|
||||
var dead_strip_dylibs: bool = false;
|
||||
@ -1166,22 +1166,22 @@ fn buildOutputType(
|
||||
};
|
||||
} else if (mem.eql(u8, arg, "-search_paths_first")) {
|
||||
lib_search_strategy = .paths_first;
|
||||
lib_preferred_mode = .Dynamic;
|
||||
lib_preferred_mode = .dynamic;
|
||||
} else if (mem.eql(u8, arg, "-search_paths_first_static")) {
|
||||
lib_search_strategy = .paths_first;
|
||||
lib_preferred_mode = .Static;
|
||||
lib_preferred_mode = .static;
|
||||
} else if (mem.eql(u8, arg, "-search_dylibs_first")) {
|
||||
lib_search_strategy = .mode_first;
|
||||
lib_preferred_mode = .Dynamic;
|
||||
lib_preferred_mode = .dynamic;
|
||||
} else if (mem.eql(u8, arg, "-search_static_first")) {
|
||||
lib_search_strategy = .mode_first;
|
||||
lib_preferred_mode = .Static;
|
||||
lib_preferred_mode = .static;
|
||||
} else if (mem.eql(u8, arg, "-search_dylibs_only")) {
|
||||
lib_search_strategy = .no_fallback;
|
||||
lib_preferred_mode = .Dynamic;
|
||||
lib_preferred_mode = .dynamic;
|
||||
} else if (mem.eql(u8, arg, "-search_static_only")) {
|
||||
lib_search_strategy = .no_fallback;
|
||||
lib_preferred_mode = .Static;
|
||||
lib_preferred_mode = .static;
|
||||
} else if (mem.eql(u8, arg, "-headerpad")) {
|
||||
const next_arg = args_iter.nextOrFatal();
|
||||
headerpad_size = std.fmt.parseUnsigned(u32, eatIntPrefix(next_arg, 16), 16) catch |err| {
|
||||
@ -1478,12 +1478,12 @@ fn buildOutputType(
|
||||
emit_implib = .no;
|
||||
emit_implib_arg_provided = true;
|
||||
} else if (mem.eql(u8, arg, "-dynamic")) {
|
||||
create_module.opts.link_mode = .Dynamic;
|
||||
lib_preferred_mode = .Dynamic;
|
||||
create_module.opts.link_mode = .dynamic;
|
||||
lib_preferred_mode = .dynamic;
|
||||
lib_search_strategy = .mode_first;
|
||||
} else if (mem.eql(u8, arg, "-static")) {
|
||||
create_module.opts.link_mode = .Static;
|
||||
lib_preferred_mode = .Static;
|
||||
create_module.opts.link_mode = .static;
|
||||
lib_preferred_mode = .static;
|
||||
lib_search_strategy = .no_fallback;
|
||||
} else if (mem.eql(u8, arg, "-fdll-export-fns")) {
|
||||
create_module.opts.dll_export_fns = true;
|
||||
@ -1904,7 +1904,7 @@ fn buildOutputType(
|
||||
},
|
||||
.nostdlib_cpp => create_module.opts.ensure_libcpp_on_non_freestanding = false,
|
||||
.shared => {
|
||||
create_module.opts.link_mode = .Dynamic;
|
||||
create_module.opts.link_mode = .dynamic;
|
||||
is_shared_lib = true;
|
||||
},
|
||||
.rdynamic => create_module.opts.rdynamic = true,
|
||||
@ -1961,20 +1961,20 @@ fn buildOutputType(
|
||||
mem.eql(u8, linker_arg, "-call_shared"))
|
||||
{
|
||||
lib_search_strategy = .no_fallback;
|
||||
lib_preferred_mode = .Dynamic;
|
||||
lib_preferred_mode = .dynamic;
|
||||
} else if (mem.eql(u8, linker_arg, "-Bstatic") or
|
||||
mem.eql(u8, linker_arg, "-dn") or
|
||||
mem.eql(u8, linker_arg, "-non_shared") or
|
||||
mem.eql(u8, linker_arg, "-static"))
|
||||
{
|
||||
lib_search_strategy = .no_fallback;
|
||||
lib_preferred_mode = .Static;
|
||||
lib_preferred_mode = .static;
|
||||
} else if (mem.eql(u8, linker_arg, "-search_paths_first")) {
|
||||
lib_search_strategy = .paths_first;
|
||||
lib_preferred_mode = .Dynamic;
|
||||
lib_preferred_mode = .dynamic;
|
||||
} else if (mem.eql(u8, linker_arg, "-search_dylibs_first")) {
|
||||
lib_search_strategy = .mode_first;
|
||||
lib_preferred_mode = .Dynamic;
|
||||
lib_preferred_mode = .dynamic;
|
||||
} else {
|
||||
try linker_args.append(linker_arg);
|
||||
}
|
||||
@ -3033,7 +3033,7 @@ fn buildOutputType(
|
||||
|
||||
const is_exe_or_dyn_lib = switch (create_module.resolved_options.output_mode) {
|
||||
.Obj => false,
|
||||
.Lib => create_module.resolved_options.link_mode == .Dynamic,
|
||||
.Lib => create_module.resolved_options.link_mode == .dynamic,
|
||||
.Exe => true,
|
||||
};
|
||||
// Note that cmake when targeting Windows will try to execute
|
||||
@ -3770,8 +3770,8 @@ fn createModule(
|
||||
)) {
|
||||
const path = try arena.dupe(u8, test_path.items);
|
||||
switch (info.preferred_mode) {
|
||||
.Static => try create_module.link_objects.append(arena, .{ .path = path }),
|
||||
.Dynamic => try create_module.resolved_system_libs.append(arena, .{
|
||||
.static => try create_module.link_objects.append(arena, .{ .path = path }),
|
||||
.dynamic => try create_module.resolved_system_libs.append(arena, .{
|
||||
.name = lib_name,
|
||||
.lib = .{
|
||||
.needed = info.needed,
|
||||
@ -3804,8 +3804,8 @@ fn createModule(
|
||||
)) {
|
||||
const path = try arena.dupe(u8, test_path.items);
|
||||
switch (info.fallbackMode()) {
|
||||
.Static => try create_module.link_objects.append(arena, .{ .path = path }),
|
||||
.Dynamic => try create_module.resolved_system_libs.append(arena, .{
|
||||
.static => try create_module.link_objects.append(arena, .{ .path = path }),
|
||||
.dynamic => try create_module.resolved_system_libs.append(arena, .{
|
||||
.name = lib_name,
|
||||
.lib = .{
|
||||
.needed = info.needed,
|
||||
@ -3838,8 +3838,8 @@ fn createModule(
|
||||
)) {
|
||||
const path = try arena.dupe(u8, test_path.items);
|
||||
switch (info.preferred_mode) {
|
||||
.Static => try create_module.link_objects.append(arena, .{ .path = path }),
|
||||
.Dynamic => try create_module.resolved_system_libs.append(arena, .{
|
||||
.static => try create_module.link_objects.append(arena, .{ .path = path }),
|
||||
.dynamic => try create_module.resolved_system_libs.append(arena, .{
|
||||
.name = lib_name,
|
||||
.lib = .{
|
||||
.needed = info.needed,
|
||||
@ -3862,8 +3862,8 @@ fn createModule(
|
||||
)) {
|
||||
const path = try arena.dupe(u8, test_path.items);
|
||||
switch (info.fallbackMode()) {
|
||||
.Static => try create_module.link_objects.append(arena, .{ .path = path }),
|
||||
.Dynamic => try create_module.resolved_system_libs.append(arena, .{
|
||||
.static => try create_module.link_objects.append(arena, .{ .path = path }),
|
||||
.dynamic => try create_module.resolved_system_libs.append(arena, .{
|
||||
.name = lib_name,
|
||||
.lib = .{
|
||||
.needed = info.needed,
|
||||
@ -4145,8 +4145,8 @@ fn progressThread(progress: *std.Progress, server: *const Server, reset: *std.Th
|
||||
buf.appendSlice("... ") catch {};
|
||||
}
|
||||
need_ellipse = false;
|
||||
const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .Monotonic);
|
||||
const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .Monotonic);
|
||||
const eti = @atomicLoad(usize, &node.unprotected_estimated_total_items, .monotonic);
|
||||
const completed_items = @atomicLoad(usize, &node.unprotected_completed_items, .monotonic);
|
||||
const current_item = completed_items + 1;
|
||||
if (node.name.len != 0 or eti > 0) {
|
||||
if (node.name.len != 0) {
|
||||
@ -4163,7 +4163,7 @@ fn progressThread(progress: *std.Progress, server: *const Server, reset: *std.Th
|
||||
need_ellipse = false;
|
||||
}
|
||||
}
|
||||
maybe_node = @atomicLoad(?*std.Progress.Node, &node.recently_updated_child, .Acquire);
|
||||
maybe_node = @atomicLoad(?*std.Progress.Node, &node.recently_updated_child, .acquire);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6842,7 +6842,7 @@ fn accessLibPath(
|
||||
) !bool {
|
||||
const sep = fs.path.sep_str;
|
||||
|
||||
if (target.isDarwin() and link_mode == .Dynamic) tbd: {
|
||||
if (target.isDarwin() and link_mode == .dynamic) tbd: {
|
||||
// Prefer .tbd over .dylib.
|
||||
test_path.clearRetainingCapacity();
|
||||
try test_path.writer().print("{s}" ++ sep ++ "lib{s}.tbd", .{ lib_dir_path, lib_name });
|
||||
@ -6863,8 +6863,8 @@ fn accessLibPath(
|
||||
target.libPrefix(),
|
||||
lib_name,
|
||||
switch (link_mode) {
|
||||
.Static => target.staticLibSuffix(),
|
||||
.Dynamic => target.dynamicLibSuffix(),
|
||||
.static => target.staticLibSuffix(),
|
||||
.dynamic => target.dynamicLibSuffix(),
|
||||
},
|
||||
});
|
||||
try checked_paths.writer().print("\n {s}", .{test_path.items});
|
||||
@ -6879,7 +6879,7 @@ fn accessLibPath(
|
||||
|
||||
// In the case of Darwin, the main check will be .dylib, so here we
|
||||
// additionally check for .so files.
|
||||
if (target.isDarwin() and link_mode == .Dynamic) so: {
|
||||
if (target.isDarwin() and link_mode == .dynamic) so: {
|
||||
test_path.clearRetainingCapacity();
|
||||
try test_path.writer().print("{s}" ++ sep ++ "lib{s}.so", .{ lib_dir_path, lib_name });
|
||||
try checked_paths.writer().print("\n {s}", .{test_path.items});
|
||||
@ -6894,7 +6894,7 @@ fn accessLibPath(
|
||||
|
||||
// In the case of MinGW, the main check will be .lib but we also need to
|
||||
// look for `libfoo.a`.
|
||||
if (target.isMinGW() and link_mode == .Static) mingw: {
|
||||
if (target.isMinGW() and link_mode == .static) mingw: {
|
||||
test_path.clearRetainingCapacity();
|
||||
try test_path.writer().print("{s}" ++ sep ++ "lib{s}.a", .{
|
||||
lib_dir_path, lib_name,
|
||||
|
||||
@ -207,7 +207,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progr
|
||||
const strip = comp.compilerRtStrip();
|
||||
const config = try Compilation.Config.resolve(.{
|
||||
.output_mode = .Lib,
|
||||
.link_mode = .Dynamic,
|
||||
.link_mode = .dynamic,
|
||||
.resolved_target = comp.root_mod.resolved_target,
|
||||
.is_test = false,
|
||||
.have_zcu = false,
|
||||
|
||||
@ -303,10 +303,10 @@ const Writer = struct {
|
||||
.fence => try w.writeFence(s, inst),
|
||||
.atomic_load => try w.writeAtomicLoad(s, inst),
|
||||
.prefetch => try w.writePrefetch(s, inst),
|
||||
.atomic_store_unordered => try w.writeAtomicStore(s, inst, .Unordered),
|
||||
.atomic_store_monotonic => try w.writeAtomicStore(s, inst, .Monotonic),
|
||||
.atomic_store_release => try w.writeAtomicStore(s, inst, .Release),
|
||||
.atomic_store_seq_cst => try w.writeAtomicStore(s, inst, .SeqCst),
|
||||
.atomic_store_unordered => try w.writeAtomicStore(s, inst, .unordered),
|
||||
.atomic_store_monotonic => try w.writeAtomicStore(s, inst, .monotonic),
|
||||
.atomic_store_release => try w.writeAtomicStore(s, inst, .release),
|
||||
.atomic_store_seq_cst => try w.writeAtomicStore(s, inst, .seq_cst),
|
||||
.atomic_rmw => try w.writeAtomicRmw(s, inst),
|
||||
.field_parent_ptr => try w.writeFieldParentPtr(s, inst),
|
||||
.wasm_memory_size => try w.writeWasmMemorySize(s, inst),
|
||||
|
||||
@ -1440,7 +1440,7 @@ const Writer = struct {
|
||||
if (small.has_backing_int) {
|
||||
const backing_int_body_len = self.code.extra[extra_index];
|
||||
extra_index += 1;
|
||||
try stream.writeAll("Packed(");
|
||||
try stream.writeAll("packed(");
|
||||
if (backing_int_body_len == 0) {
|
||||
const backing_int_ref: Zir.Inst.Ref = @enumFromInt(self.code.extra[extra_index]);
|
||||
extra_index += 1;
|
||||
|
||||
38
src/type.zig
38
src/type.zig
@ -741,12 +741,12 @@ pub const Type = struct {
|
||||
.struct_type => {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
// Struct with no fields have a well-defined layout of no bits.
|
||||
return struct_type.layout != .Auto or struct_type.field_types.len == 0;
|
||||
return struct_type.layout != .auto or struct_type.field_types.len == 0;
|
||||
},
|
||||
.union_type => {
|
||||
const union_type = ip.loadUnionType(ty.toIntern());
|
||||
return switch (union_type.flagsPtr(ip).runtime_tag) {
|
||||
.none, .safety => union_type.flagsPtr(ip).layout != .Auto,
|
||||
.none, .safety => union_type.flagsPtr(ip).layout != .auto,
|
||||
.tagged => false,
|
||||
};
|
||||
},
|
||||
@ -1027,7 +1027,7 @@ pub const Type = struct {
|
||||
},
|
||||
.struct_type => {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
if (struct_type.layout == .Packed) {
|
||||
if (struct_type.layout == .@"packed") {
|
||||
switch (strat) {
|
||||
.sema => |sema| try sema.resolveTypeLayout(ty),
|
||||
.lazy => if (struct_type.backingIntType(ip).* == .none) return .{
|
||||
@ -1407,7 +1407,7 @@ pub const Type = struct {
|
||||
switch (strat) {
|
||||
.sema => |sema| try sema.resolveTypeLayout(ty),
|
||||
.lazy => switch (struct_type.layout) {
|
||||
.Packed => {
|
||||
.@"packed" => {
|
||||
if (struct_type.backingIntType(ip).* == .none) return .{
|
||||
.val = Value.fromInterned((try mod.intern(.{ .int = .{
|
||||
.ty = .comptime_int_type,
|
||||
@ -1415,7 +1415,7 @@ pub const Type = struct {
|
||||
} }))),
|
||||
};
|
||||
},
|
||||
.Auto, .Extern => {
|
||||
.auto, .@"extern" => {
|
||||
if (!struct_type.haveLayout(ip)) return .{
|
||||
.val = Value.fromInterned((try mod.intern(.{ .int = .{
|
||||
.ty = .comptime_int_type,
|
||||
@ -1427,10 +1427,10 @@ pub const Type = struct {
|
||||
.eager => {},
|
||||
}
|
||||
switch (struct_type.layout) {
|
||||
.Packed => return .{
|
||||
.@"packed" => return .{
|
||||
.scalar = Type.fromInterned(struct_type.backingIntType(ip).*).abiSize(mod),
|
||||
},
|
||||
.Auto, .Extern => {
|
||||
.auto, .@"extern" => {
|
||||
assert(struct_type.haveLayout(ip));
|
||||
return .{ .scalar = struct_type.size(ip).* };
|
||||
},
|
||||
@ -1656,7 +1656,7 @@ pub const Type = struct {
|
||||
},
|
||||
.struct_type => {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
const is_packed = struct_type.layout == .Packed;
|
||||
const is_packed = struct_type.layout == .@"packed";
|
||||
if (opt_sema) |sema| {
|
||||
try sema.resolveTypeFields(ty);
|
||||
if (is_packed) try sema.resolveTypeLayout(ty);
|
||||
@ -1674,7 +1674,7 @@ pub const Type = struct {
|
||||
|
||||
.union_type => {
|
||||
const union_type = ip.loadUnionType(ty.toIntern());
|
||||
const is_packed = ty.containerLayout(mod) == .Packed;
|
||||
const is_packed = ty.containerLayout(mod) == .@"packed";
|
||||
if (opt_sema) |sema| {
|
||||
try sema.resolveTypeFields(ty);
|
||||
if (is_packed) try sema.resolveTypeLayout(ty);
|
||||
@ -1987,9 +1987,9 @@ pub const Type = struct {
|
||||
/// Asserts the type is either an extern or packed union.
|
||||
pub fn unionBackingType(ty: Type, mod: *Module) !Type {
|
||||
return switch (ty.containerLayout(mod)) {
|
||||
.Extern => try mod.arrayType(.{ .len = ty.abiSize(mod), .child = .u8_type }),
|
||||
.Packed => try mod.intType(.unsigned, @intCast(ty.bitSize(mod))),
|
||||
.Auto => unreachable,
|
||||
.@"extern" => try mod.arrayType(.{ .len = ty.abiSize(mod), .child = .u8_type }),
|
||||
.@"packed" => try mod.intType(.unsigned, @intCast(ty.bitSize(mod))),
|
||||
.auto => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
@ -2003,7 +2003,7 @@ pub const Type = struct {
|
||||
const ip = &mod.intern_pool;
|
||||
return switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => ip.loadStructType(ty.toIntern()).layout,
|
||||
.anon_struct_type => .Auto,
|
||||
.anon_struct_type => .auto,
|
||||
.union_type => ip.loadUnionType(ty.toIntern()).flagsPtr(ip).layout,
|
||||
else => unreachable,
|
||||
};
|
||||
@ -2177,7 +2177,7 @@ pub const Type = struct {
|
||||
pub fn isAbiInt(ty: Type, mod: *Module) bool {
|
||||
return switch (ty.zigTypeTag(mod)) {
|
||||
.Int, .Enum, .ErrorSet => true,
|
||||
.Struct => ty.containerLayout(mod) == .Packed,
|
||||
.Struct => ty.containerLayout(mod) == .@"packed",
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
@ -2690,7 +2690,7 @@ pub const Type = struct {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
// packed structs cannot be comptime-only because they have a well-defined
|
||||
// memory layout and every field has a well-defined bit pattern.
|
||||
if (struct_type.layout == .Packed)
|
||||
if (struct_type.layout == .@"packed")
|
||||
return false;
|
||||
|
||||
// A struct with no fields is not comptime-only.
|
||||
@ -3051,7 +3051,7 @@ pub const Type = struct {
|
||||
switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
assert(struct_type.layout != .Packed);
|
||||
assert(struct_type.layout != .@"packed");
|
||||
const explicit_align = struct_type.fieldAlign(ip, index);
|
||||
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
|
||||
return mod.structFieldAlignment(explicit_align, field_ty, struct_type.layout);
|
||||
@ -3132,7 +3132,7 @@ pub const Type = struct {
|
||||
.struct_type => {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
assert(struct_type.haveLayout(ip));
|
||||
assert(struct_type.layout != .Packed);
|
||||
assert(struct_type.layout != .@"packed");
|
||||
return struct_type.offsets.get(ip)[index];
|
||||
},
|
||||
|
||||
@ -3208,7 +3208,7 @@ pub const Type = struct {
|
||||
return switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
if (struct_type.layout == .Packed) return false;
|
||||
if (struct_type.layout == .@"packed") return false;
|
||||
if (struct_type.decl == .none) return false;
|
||||
return struct_type.flagsPtr(ip).is_tuple;
|
||||
},
|
||||
@ -3230,7 +3230,7 @@ pub const Type = struct {
|
||||
return switch (ip.indexToKey(ty.toIntern())) {
|
||||
.struct_type => {
|
||||
const struct_type = ip.loadStructType(ty.toIntern());
|
||||
if (struct_type.layout == .Packed) return false;
|
||||
if (struct_type.layout == .@"packed") return false;
|
||||
if (struct_type.decl == .none) return false;
|
||||
return struct_type.flagsPtr(ip).is_tuple;
|
||||
},
|
||||
|
||||
@ -22,18 +22,18 @@ test "cmpxchg" {
|
||||
|
||||
fn testCmpxchg() !void {
|
||||
var x: i32 = 1234;
|
||||
if (@cmpxchgWeak(i32, &x, 99, 5678, .SeqCst, .SeqCst)) |x1| {
|
||||
if (@cmpxchgWeak(i32, &x, 99, 5678, .seq_cst, .seq_cst)) |x1| {
|
||||
try expect(x1 == 1234);
|
||||
} else {
|
||||
@panic("cmpxchg should have failed");
|
||||
}
|
||||
|
||||
while (@cmpxchgWeak(i32, &x, 1234, 5678, .SeqCst, .SeqCst)) |x1| {
|
||||
while (@cmpxchgWeak(i32, &x, 1234, 5678, .seq_cst, .seq_cst)) |x1| {
|
||||
try expect(x1 == 1234);
|
||||
}
|
||||
try expect(x == 5678);
|
||||
|
||||
try expect(@cmpxchgStrong(i32, &x, 5678, 42, .SeqCst, .SeqCst) == null);
|
||||
try expect(@cmpxchgStrong(i32, &x, 5678, 42, .seq_cst, .seq_cst) == null);
|
||||
try expect(x == 42);
|
||||
}
|
||||
|
||||
@ -43,7 +43,7 @@ test "fence" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
var x: i32 = 1234;
|
||||
@fence(.SeqCst);
|
||||
@fence(.seq_cst);
|
||||
x = 5678;
|
||||
}
|
||||
|
||||
@ -60,18 +60,18 @@ test "atomicrmw and atomicload" {
|
||||
}
|
||||
|
||||
fn testAtomicRmw(ptr: *u8) !void {
|
||||
const prev_value = @atomicRmw(u8, ptr, .Xchg, 42, .SeqCst);
|
||||
const prev_value = @atomicRmw(u8, ptr, .Xchg, 42, .seq_cst);
|
||||
try expect(prev_value == 200);
|
||||
comptime {
|
||||
var x: i32 = 1234;
|
||||
const y: i32 = 12345;
|
||||
try expect(@atomicLoad(i32, &x, .SeqCst) == 1234);
|
||||
try expect(@atomicLoad(i32, &y, .SeqCst) == 12345);
|
||||
try expect(@atomicLoad(i32, &x, .seq_cst) == 1234);
|
||||
try expect(@atomicLoad(i32, &y, .seq_cst) == 12345);
|
||||
}
|
||||
}
|
||||
|
||||
fn testAtomicLoad(ptr: *u8) !void {
|
||||
const x = @atomicLoad(u8, ptr, .SeqCst);
|
||||
const x = @atomicLoad(u8, ptr, .seq_cst);
|
||||
try expect(x == 42);
|
||||
}
|
||||
|
||||
@ -85,18 +85,18 @@ test "cmpxchg with ptr" {
|
||||
var data2: i32 = 5678;
|
||||
var data3: i32 = 9101;
|
||||
var x: *i32 = &data1;
|
||||
if (@cmpxchgWeak(*i32, &x, &data2, &data3, .SeqCst, .SeqCst)) |x1| {
|
||||
if (@cmpxchgWeak(*i32, &x, &data2, &data3, .seq_cst, .seq_cst)) |x1| {
|
||||
try expect(x1 == &data1);
|
||||
} else {
|
||||
@panic("cmpxchg should have failed");
|
||||
}
|
||||
|
||||
while (@cmpxchgWeak(*i32, &x, &data1, &data3, .SeqCst, .SeqCst)) |x1| {
|
||||
while (@cmpxchgWeak(*i32, &x, &data1, &data3, .seq_cst, .seq_cst)) |x1| {
|
||||
try expect(x1 == &data1);
|
||||
}
|
||||
try expect(x == &data3);
|
||||
|
||||
try expect(@cmpxchgStrong(*i32, &x, &data3, &data2, .SeqCst, .SeqCst) == null);
|
||||
try expect(@cmpxchgStrong(*i32, &x, &data3, &data2, .seq_cst, .seq_cst) == null);
|
||||
try expect(x == &data2);
|
||||
}
|
||||
|
||||
@ -108,7 +108,7 @@ test "cmpxchg with ignored result" {
|
||||
|
||||
var x: i32 = 1234;
|
||||
|
||||
_ = @cmpxchgStrong(i32, &x, 1234, 5678, .Monotonic, .Monotonic);
|
||||
_ = @cmpxchgStrong(i32, &x, 1234, 5678, .monotonic, .monotonic);
|
||||
|
||||
try expect(5678 == x);
|
||||
}
|
||||
@ -127,18 +127,18 @@ test "128-bit cmpxchg" {
|
||||
|
||||
fn test_u128_cmpxchg() !void {
|
||||
var x: u128 align(16) = 1234;
|
||||
if (@cmpxchgWeak(u128, &x, 99, 5678, .SeqCst, .SeqCst)) |x1| {
|
||||
if (@cmpxchgWeak(u128, &x, 99, 5678, .seq_cst, .seq_cst)) |x1| {
|
||||
try expect(x1 == 1234);
|
||||
} else {
|
||||
@panic("cmpxchg should have failed");
|
||||
}
|
||||
|
||||
while (@cmpxchgWeak(u128, &x, 1234, 5678, .SeqCst, .SeqCst)) |x1| {
|
||||
while (@cmpxchgWeak(u128, &x, 1234, 5678, .seq_cst, .seq_cst)) |x1| {
|
||||
try expect(x1 == 1234);
|
||||
}
|
||||
try expect(x == 5678);
|
||||
|
||||
try expect(@cmpxchgStrong(u128, &x, 5678, 42, .SeqCst, .SeqCst) == null);
|
||||
try expect(@cmpxchgStrong(u128, &x, 5678, 42, .seq_cst, .seq_cst) == null);
|
||||
try expect(x == 42);
|
||||
}
|
||||
|
||||
@ -155,7 +155,7 @@ test "cmpxchg on a global variable" {
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
|
||||
_ = @cmpxchgWeak(u32, &a_global_variable, 1234, 42, .Acquire, .Monotonic);
|
||||
_ = @cmpxchgWeak(u32, &a_global_variable, 1234, 42, .acquire, .monotonic);
|
||||
try expect(a_global_variable == 42);
|
||||
}
|
||||
|
||||
@ -168,12 +168,12 @@ test "atomic load and rmw with enum" {
|
||||
const Value = enum(u8) { a, b, c };
|
||||
var x = Value.a;
|
||||
|
||||
try expect(@atomicLoad(Value, &x, .SeqCst) != .b);
|
||||
try expect(@atomicLoad(Value, &x, .seq_cst) != .b);
|
||||
|
||||
_ = @atomicRmw(Value, &x, .Xchg, .c, .SeqCst);
|
||||
try expect(@atomicLoad(Value, &x, .SeqCst) == .c);
|
||||
try expect(@atomicLoad(Value, &x, .SeqCst) != .a);
|
||||
try expect(@atomicLoad(Value, &x, .SeqCst) != .b);
|
||||
_ = @atomicRmw(Value, &x, .Xchg, .c, .seq_cst);
|
||||
try expect(@atomicLoad(Value, &x, .seq_cst) == .c);
|
||||
try expect(@atomicLoad(Value, &x, .seq_cst) != .a);
|
||||
try expect(@atomicLoad(Value, &x, .seq_cst) != .b);
|
||||
}
|
||||
|
||||
test "atomic store" {
|
||||
@ -183,10 +183,10 @@ test "atomic store" {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
var x: u32 = 0;
|
||||
@atomicStore(u32, &x, 1, .SeqCst);
|
||||
try expect(@atomicLoad(u32, &x, .SeqCst) == 1);
|
||||
@atomicStore(u32, &x, 12345678, .SeqCst);
|
||||
try expect(@atomicLoad(u32, &x, .SeqCst) == 12345678);
|
||||
@atomicStore(u32, &x, 1, .seq_cst);
|
||||
try expect(@atomicLoad(u32, &x, .seq_cst) == 1);
|
||||
@atomicStore(u32, &x, 12345678, .seq_cst);
|
||||
try expect(@atomicLoad(u32, &x, .seq_cst) == 12345678);
|
||||
}
|
||||
|
||||
test "atomic store comptime" {
|
||||
@ -201,10 +201,10 @@ test "atomic store comptime" {
|
||||
|
||||
fn testAtomicStore() !void {
|
||||
var x: u32 = 0;
|
||||
@atomicStore(u32, &x, 1, .SeqCst);
|
||||
try expect(@atomicLoad(u32, &x, .SeqCst) == 1);
|
||||
@atomicStore(u32, &x, 12345678, .SeqCst);
|
||||
try expect(@atomicLoad(u32, &x, .SeqCst) == 12345678);
|
||||
@atomicStore(u32, &x, 1, .seq_cst);
|
||||
try expect(@atomicLoad(u32, &x, .seq_cst) == 1);
|
||||
@atomicStore(u32, &x, 12345678, .seq_cst);
|
||||
try expect(@atomicLoad(u32, &x, .seq_cst) == 12345678);
|
||||
}
|
||||
|
||||
test "atomicrmw with floats" {
|
||||
@ -224,15 +224,15 @@ test "atomicrmw with floats" {
|
||||
fn testAtomicRmwFloat() !void {
|
||||
var x: f32 = 0;
|
||||
try expect(x == 0);
|
||||
_ = @atomicRmw(f32, &x, .Xchg, 1, .SeqCst);
|
||||
_ = @atomicRmw(f32, &x, .Xchg, 1, .seq_cst);
|
||||
try expect(x == 1);
|
||||
_ = @atomicRmw(f32, &x, .Add, 5, .SeqCst);
|
||||
_ = @atomicRmw(f32, &x, .Add, 5, .seq_cst);
|
||||
try expect(x == 6);
|
||||
_ = @atomicRmw(f32, &x, .Sub, 2, .SeqCst);
|
||||
_ = @atomicRmw(f32, &x, .Sub, 2, .seq_cst);
|
||||
try expect(x == 4);
|
||||
_ = @atomicRmw(f32, &x, .Max, 13, .SeqCst);
|
||||
_ = @atomicRmw(f32, &x, .Max, 13, .seq_cst);
|
||||
try expect(x == 13);
|
||||
_ = @atomicRmw(f32, &x, .Min, 42, .SeqCst);
|
||||
_ = @atomicRmw(f32, &x, .Min, 42, .seq_cst);
|
||||
try expect(x == 13);
|
||||
}
|
||||
|
||||
@ -266,46 +266,46 @@ fn testAtomicRmwInt(comptime signedness: std.builtin.Signedness, comptime N: usi
|
||||
const int = std.meta.Int(signedness, N);
|
||||
|
||||
var x: int = 1;
|
||||
var res = @atomicRmw(int, &x, .Xchg, 3, .SeqCst);
|
||||
var res = @atomicRmw(int, &x, .Xchg, 3, .seq_cst);
|
||||
try expect(x == 3 and res == 1);
|
||||
|
||||
res = @atomicRmw(int, &x, .Add, 3, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Add, 3, .seq_cst);
|
||||
var y: int = 3;
|
||||
try expect(res == y);
|
||||
y = y + 3;
|
||||
try expect(x == y);
|
||||
|
||||
res = @atomicRmw(int, &x, .Sub, 1, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Sub, 1, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = y - 1;
|
||||
try expect(x == y);
|
||||
|
||||
res = @atomicRmw(int, &x, .And, 4, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .And, 4, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = y & 4;
|
||||
try expect(x == y);
|
||||
|
||||
res = @atomicRmw(int, &x, .Nand, 4, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Nand, 4, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = ~(y & 4);
|
||||
try expect(x == y);
|
||||
|
||||
res = @atomicRmw(int, &x, .Or, 6, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Or, 6, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = y | 6;
|
||||
try expect(x == y);
|
||||
|
||||
res = @atomicRmw(int, &x, .Xor, 2, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Xor, 2, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = y ^ 2;
|
||||
try expect(x == y);
|
||||
|
||||
res = @atomicRmw(int, &x, .Max, 1, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Max, 1, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = @max(y, 1);
|
||||
try expect(x == y);
|
||||
|
||||
res = @atomicRmw(int, &x, .Min, 1, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Min, 1, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = @min(y, 1);
|
||||
try expect(x == y);
|
||||
@ -333,53 +333,53 @@ fn testAtomicRmwInt128(comptime signedness: std.builtin.Signedness) !void {
|
||||
const replacement: int = 0x00000000_00000005_00000000_00000003;
|
||||
|
||||
var x: int align(16) = initial;
|
||||
var res = @atomicRmw(int, &x, .Xchg, replacement, .SeqCst);
|
||||
var res = @atomicRmw(int, &x, .Xchg, replacement, .seq_cst);
|
||||
try expect(x == replacement and res == initial);
|
||||
|
||||
var operator: int = 0x00000001_00000000_20000000_00000000;
|
||||
res = @atomicRmw(int, &x, .Add, operator, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Add, operator, .seq_cst);
|
||||
var y: int = replacement;
|
||||
try expect(res == y);
|
||||
y = y + operator;
|
||||
try expect(x == y);
|
||||
|
||||
operator = 0x00000000_10000000_00000000_20000000;
|
||||
res = @atomicRmw(int, &x, .Sub, operator, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Sub, operator, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = y - operator;
|
||||
try expect(x == y);
|
||||
|
||||
operator = 0x12345678_87654321_12345678_87654321;
|
||||
res = @atomicRmw(int, &x, .And, operator, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .And, operator, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = y & operator;
|
||||
try expect(x == y);
|
||||
|
||||
operator = 0x00000000_10000000_00000000_20000000;
|
||||
res = @atomicRmw(int, &x, .Nand, operator, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Nand, operator, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = ~(y & operator);
|
||||
try expect(x == y);
|
||||
|
||||
operator = 0x12340000_56780000_67890000_98760000;
|
||||
res = @atomicRmw(int, &x, .Or, operator, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Or, operator, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = y | operator;
|
||||
try expect(x == y);
|
||||
|
||||
operator = 0x0a0b0c0d_0e0f0102_03040506_0708090a;
|
||||
res = @atomicRmw(int, &x, .Xor, operator, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Xor, operator, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = y ^ operator;
|
||||
try expect(x == y);
|
||||
|
||||
operator = 0x00000000_10000000_00000000_20000000;
|
||||
res = @atomicRmw(int, &x, .Max, operator, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Max, operator, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = @max(y, operator);
|
||||
try expect(x == y);
|
||||
|
||||
res = @atomicRmw(int, &x, .Min, operator, .SeqCst);
|
||||
res = @atomicRmw(int, &x, .Min, operator, .seq_cst);
|
||||
try expect(res == y);
|
||||
y = @min(y, operator);
|
||||
try expect(x == y);
|
||||
@ -405,13 +405,13 @@ test "atomics with different types" {
|
||||
|
||||
fn testAtomicsWithType(comptime T: type, a: T, b: T) !void {
|
||||
var x: T = b;
|
||||
@atomicStore(T, &x, a, .SeqCst);
|
||||
@atomicStore(T, &x, a, .seq_cst);
|
||||
try expect(x == a);
|
||||
try expect(@atomicLoad(T, &x, .SeqCst) == a);
|
||||
try expect(@atomicRmw(T, &x, .Xchg, b, .SeqCst) == a);
|
||||
try expect(@cmpxchgStrong(T, &x, b, a, .SeqCst, .SeqCst) == null);
|
||||
try expect(@atomicLoad(T, &x, .seq_cst) == a);
|
||||
try expect(@atomicRmw(T, &x, .Xchg, b, .seq_cst) == a);
|
||||
try expect(@cmpxchgStrong(T, &x, b, a, .seq_cst, .seq_cst) == null);
|
||||
if (@sizeOf(T) != 0)
|
||||
try expect(@cmpxchgStrong(T, &x, b, a, .SeqCst, .SeqCst).? == a);
|
||||
try expect(@cmpxchgStrong(T, &x, b, a, .seq_cst, .seq_cst).? == a);
|
||||
}
|
||||
|
||||
test "return @atomicStore, using it as a void value" {
|
||||
@ -425,12 +425,12 @@ test "return @atomicStore, using it as a void value" {
|
||||
value: usize,
|
||||
|
||||
pub fn store(self: *A, value: usize) void {
|
||||
return @atomicStore(usize, &self.value, value, .Unordered);
|
||||
return @atomicStore(usize, &self.value, value, .unordered);
|
||||
}
|
||||
|
||||
pub fn store2(self: *A, value: usize) void {
|
||||
return switch (value) {
|
||||
else => @atomicStore(usize, &self.value, value, .Unordered),
|
||||
else => @atomicStore(usize, &self.value, value, .unordered),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
@ -14,10 +14,10 @@ test {
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
|
||||
var val: u8 = undefined;
|
||||
try testing.expectEqual({}, @atomicStore(u8, &val, 0, .Unordered));
|
||||
try testing.expectEqual({}, @atomicStore(u8, &val, 0, .unordered));
|
||||
try testing.expectEqual(void, @TypeOf(@breakpoint()));
|
||||
try testing.expectEqual({}, @export(x, .{ .name = "x" }));
|
||||
try testing.expectEqual({}, @fence(.Acquire));
|
||||
try testing.expectEqual({}, @fence(.acquire));
|
||||
try testing.expectEqual({}, @memcpy(@as([*]u8, @ptrFromInt(1))[0..0], @as([*]u8, @ptrFromInt(1))[0..0]));
|
||||
try testing.expectEqual({}, @memset(@as([*]u8, @ptrFromInt(1))[0..0], undefined));
|
||||
try testing.expectEqual(noreturn, @TypeOf(if (true) @panic("") else {}));
|
||||
@ -25,6 +25,6 @@ test {
|
||||
try testing.expectEqual({}, @setAlignStack(16));
|
||||
try testing.expectEqual({}, @setCold(true));
|
||||
try testing.expectEqual({}, @setEvalBranchQuota(0));
|
||||
try testing.expectEqual({}, @setFloatMode(.Optimized));
|
||||
try testing.expectEqual({}, @setFloatMode(.optimized));
|
||||
try testing.expectEqual({}, @setRuntimeSafety(true));
|
||||
}
|
||||
|
||||
@ -25,7 +25,7 @@ test "exporting with internal linkage" {
|
||||
const S = struct {
|
||||
fn foo() callconv(.C) void {}
|
||||
comptime {
|
||||
@export(foo, .{ .name = "exporting_with_internal_linkage_foo", .linkage = .Internal });
|
||||
@export(foo, .{ .name = "exporting_with_internal_linkage_foo", .linkage = .internal });
|
||||
}
|
||||
};
|
||||
S.foo();
|
||||
@ -41,7 +41,7 @@ test "exporting using field access" {
|
||||
const x: u32 = 5;
|
||||
};
|
||||
comptime {
|
||||
@export(Inner.x, .{ .name = "foo", .linkage = .Internal });
|
||||
@export(Inner.x, .{ .name = "foo", .linkage = .internal });
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@ -1511,7 +1511,7 @@ test "eval @setFloatMode at compile-time" {
|
||||
}
|
||||
|
||||
fn fnWithFloatMode() f32 {
|
||||
@setFloatMode(std.builtin.FloatMode.Strict);
|
||||
@setFloatMode(std.builtin.FloatMode.strict);
|
||||
return 1234.0;
|
||||
}
|
||||
|
||||
|
||||
@ -135,7 +135,7 @@ test "array-like initializer for tuple types" {
|
||||
const T = @Type(.{
|
||||
.Struct = .{
|
||||
.is_tuple = true,
|
||||
.layout = .Auto,
|
||||
.layout = .auto,
|
||||
.decls = &.{},
|
||||
.fields = &.{
|
||||
.{
|
||||
@ -323,7 +323,7 @@ test "zero sized struct in tuple handled correctly" {
|
||||
data: @Type(.{
|
||||
.Struct = .{
|
||||
.is_tuple = true,
|
||||
.layout = .Auto,
|
||||
.layout = .auto,
|
||||
.decls = &.{},
|
||||
.fields = &.{.{
|
||||
.name = "0",
|
||||
@ -471,7 +471,7 @@ test "coerce anon tuple to tuple" {
|
||||
|
||||
test "empty tuple type" {
|
||||
const S = @Type(.{ .Struct = .{
|
||||
.layout = .Auto,
|
||||
.layout = .auto,
|
||||
.fields = &.{},
|
||||
.decls = &.{},
|
||||
.is_tuple = true,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user