Coff: implement threadlocal variables

This commit is contained in:
Jacob Young 2025-10-08 16:08:05 -04:00 committed by Andrew Kelley
parent b2bc6073c8
commit 2e31077fe0
13 changed files with 953 additions and 553 deletions

View File

@ -249,55 +249,6 @@ pub const OptionalHeader = extern struct {
pub const IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16;
pub const DirectoryEntry = enum(u16) {
/// Export Directory
EXPORT = 0,
/// Import Directory
IMPORT = 1,
/// Resource Directory
RESOURCE = 2,
/// Exception Directory
EXCEPTION = 3,
/// Security Directory
SECURITY = 4,
/// Base Relocation Table
BASERELOC = 5,
/// Debug Directory
DEBUG = 6,
/// Architecture Specific Data
ARCHITECTURE = 7,
/// RVA of GP
GLOBALPTR = 8,
/// TLS Directory
TLS = 9,
/// Load Configuration Directory
LOAD_CONFIG = 10,
/// Bound Import Directory in headers
BOUND_IMPORT = 11,
/// Import Address Table
IAT = 12,
/// Delay Load Import Descriptors
DELAY_IMPORT = 13,
/// COM Runtime descriptor
COM_DESCRIPTOR = 14,
_,
};
pub const ImageDataDirectory = extern struct {
virtual_address: u32,
size: u32,
@ -1054,9 +1005,9 @@ pub const Coff = struct {
assert(self.is_image);
const data_dirs = self.getDataDirectories();
if (@intFromEnum(DirectoryEntry.DEBUG) >= data_dirs.len) return null;
if (@intFromEnum(IMAGE.DIRECTORY_ENTRY.DEBUG) >= data_dirs.len) return null;
const debug_dir = data_dirs[@intFromEnum(DirectoryEntry.DEBUG)];
const debug_dir = data_dirs[@intFromEnum(IMAGE.DIRECTORY_ENTRY.DEBUG)];
var reader: std.Io.Reader = .fixed(self.data);
if (self.is_loaded) {
@ -1400,6 +1351,44 @@ pub const Relocation = extern struct {
};
pub const IMAGE = struct {
pub const DIRECTORY_ENTRY = enum(u32) {
/// Export Directory
EXPORT = 0,
/// Import Directory
IMPORT = 1,
/// Resource Directory
RESOURCE = 2,
/// Exception Directory
EXCEPTION = 3,
/// Security Directory
SECURITY = 4,
/// Base Relocation Table
BASERELOC = 5,
/// Debug Directory
DEBUG = 6,
/// Architecture Specific Data
ARCHITECTURE = 7,
/// RVA of GP
GLOBALPTR = 8,
/// TLS Directory
TLS = 9,
/// Load Configuration Directory
LOAD_CONFIG = 10,
/// Bound Import Directory in headers
BOUND_IMPORT = 11,
/// Import Address Table
IAT = 12,
/// Delay Load Import Descriptors
DELAY_IMPORT = 13,
/// COM Runtime descriptor
COM_DESCRIPTOR = 14,
/// must be zero
RESERVED = 15,
_,
pub const len = @typeInfo(IMAGE.DIRECTORY_ENTRY).@"enum".fields.len;
};
pub const FILE = struct {
/// Machine Types
/// The Machine field has one of the following values, which specify the CPU type.

View File

@ -468,10 +468,6 @@ const use_trap_panic = switch (builtin.zig_backend) {
.stage2_wasm,
.stage2_x86,
=> true,
.stage2_x86_64 => switch (builtin.target.ofmt) {
.elf, .macho => false,
else => true,
},
else => false,
};
@ -484,22 +480,6 @@ pub fn defaultPanic(
if (use_trap_panic) @trap();
switch (builtin.zig_backend) {
.stage2_aarch64,
.stage2_arm,
.stage2_powerpc,
.stage2_riscv64,
.stage2_spirv,
.stage2_wasm,
.stage2_x86,
=> @trap(),
.stage2_x86_64 => switch (builtin.target.ofmt) {
.elf, .macho => {},
else => @trap(),
},
else => {},
}
switch (builtin.os.tag) {
.freestanding, .other => {
@trap();

View File

@ -28,6 +28,8 @@ test isNan {
}
test isSignalNan {
if (builtin.zig_backend == .stage2_x86_64 and builtin.object_format == .coff and builtin.abi != .gnu) return error.SkipZigTest;
inline for ([_]type{ f16, f32, f64, f80, f128, c_longdouble }) |T| {
// TODO: Signalling NaN values get converted to quiet NaN values in
// some cases where they shouldn't such that this can fail.

View File

@ -120,12 +120,6 @@ const Value = extern struct {
}
pub fn format(value: Value, writer: *std.Io.Writer) std.Io.Writer.Error!void {
// Work around x86_64 backend limitation.
if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .windows) {
try writer.writeAll("(unknown)");
return;
}
switch (value.td.kind) {
.integer => {
if (value.td.isSigned()) {
@ -624,10 +618,11 @@ fn exportHandler(
handler: anytype,
comptime sym_name: []const u8,
) void {
// Work around x86_64 backend limitation.
const linkage = if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .windows) .internal else .weak;
const N = "__ubsan_handle_" ++ sym_name;
@export(handler, .{ .name = N, .linkage = linkage, .visibility = if (linkage == .internal) .default else .hidden });
@export(handler, .{
.name = "__ubsan_handle_" ++ sym_name,
.linkage = .weak,
.visibility = .hidden,
});
}
fn exportHandlerWithAbort(
@ -635,16 +630,16 @@ fn exportHandlerWithAbort(
abort_handler: anytype,
comptime sym_name: []const u8,
) void {
// Work around x86_64 backend limitation.
const linkage = if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .windows) .internal else .weak;
{
const N = "__ubsan_handle_" ++ sym_name;
@export(handler, .{ .name = N, .linkage = linkage, .visibility = if (linkage == .internal) .default else .hidden });
}
{
const N = "__ubsan_handle_" ++ sym_name ++ "_abort";
@export(abort_handler, .{ .name = N, .linkage = linkage, .visibility = if (linkage == .internal) .default else .hidden });
}
@export(handler, .{
.name = "__ubsan_handle_" ++ sym_name,
.linkage = .weak,
.visibility = .hidden,
});
@export(abort_handler, .{
.name = "__ubsan_handle_" ++ sym_name ++ "_abort",
.linkage = .weak,
.visibility = .hidden,
});
}
const can_build_ubsan = switch (builtin.zig_backend) {

View File

@ -1985,7 +1985,7 @@ pub fn create(gpa: Allocator, arena: Allocator, diag: *CreateDiagnostic, options
switch (target_util.zigBackend(target, use_llvm)) {
else => {},
.stage2_aarch64, .stage2_x86_64 => if (target.ofmt == .coff) {
break :s if (is_exe_or_dyn_lib) .dyn_lib else .zcu;
break :s if (is_exe_or_dyn_lib and build_options.have_llvm) .dyn_lib else .zcu;
},
}
if (options.config.use_new_linker) break :s .zcu;

View File

@ -173685,7 +173685,7 @@ fn genBody(cg: *CodeGen, body: []const Air.Inst.Index) InnerError!void {
const ty_nav = air_datas[@intFromEnum(inst)].ty_nav;
const nav = ip.getNav(ty_nav.nav);
const is_threadlocal = zcu.comp.config.any_non_single_threaded and nav.isThreadlocal(ip);
if (is_threadlocal) if (cg.mod.pic) {
if (is_threadlocal) if (cg.target.ofmt == .coff or cg.mod.pic) {
try cg.spillRegisters(&.{ .rdi, .rax });
} else {
try cg.spillRegisters(&.{.rax});

View File

@ -386,6 +386,82 @@ pub fn emitMir(emit: *Emit) Error!void {
}, emit.lower.target), &.{});
},
else => unreachable,
} else if (emit.bin_file.cast(.coff2)) |coff| {
switch (emit.lower.target.cpu.arch) {
else => unreachable,
.x86 => {
try emit.encodeInst(try .new(.none, .mov, &.{
.{ .reg = .eax },
.{ .mem = .initSib(.qword, .{
.base = .{ .reg = .fs },
.disp = 4 * 11,
}) },
}, emit.lower.target), &.{});
try emit.encodeInst(try .new(.none, .mov, &.{
.{ .reg = .edi },
.{ .mem = .initSib(.dword, .{}) },
}, emit.lower.target), &.{.{
.op_index = 1,
.target = .{
.index = @intFromEnum(
try coff.globalSymbol("__tls_index", null),
),
.is_extern = false,
.type = .symbol,
},
}});
try emit.encodeInst(try .new(.none, .mov, &.{
.{ .reg = .eax },
.{ .mem = .initSib(.dword, .{
.base = .{ .reg = .eax },
.scale_index = .{ .index = .edi, .scale = 4 },
}) },
}, emit.lower.target), &.{});
try emit.encodeInst(try .new(.none, lowered_inst.encoding.mnemonic, &.{
lowered_inst.ops[0],
.{ .mem = .initSib(lowered_inst.ops[1].mem.sib.ptr_size, .{
.base = .{ .reg = .eax },
.disp = std.math.minInt(i32),
}) },
}, emit.lower.target), reloc_info);
},
.x86_64 => {
try emit.encodeInst(try .new(.none, .mov, &.{
.{ .reg = .rax },
.{ .mem = .initSib(.qword, .{
.base = .{ .reg = .gs },
.disp = 8 * 11,
}) },
}, emit.lower.target), &.{});
try emit.encodeInst(try .new(.none, .mov, &.{
.{ .reg = .edi },
.{ .mem = .initRip(.dword, 0) },
}, emit.lower.target), &.{.{
.op_index = 1,
.target = .{
.index = @intFromEnum(
try coff.globalSymbol("_tls_index", null),
),
.is_extern = false,
.type = .symbol,
},
}});
try emit.encodeInst(try .new(.none, .mov, &.{
.{ .reg = .rax },
.{ .mem = .initSib(.qword, .{
.base = .{ .reg = .rax },
.scale_index = .{ .index = .rdi, .scale = 8 },
}) },
}, emit.lower.target), &.{});
try emit.encodeInst(try .new(.none, lowered_inst.encoding.mnemonic, &.{
lowered_inst.ops[0],
.{ .mem = .initSib(lowered_inst.ops[1].mem.sib.ptr_size, .{
.base = .{ .reg = .rax },
.disp = std.math.minInt(i32),
}) },
}, emit.lower.target), reloc_info);
},
}
} else return emit.fail("TODO implement relocs for {s}", .{
@tagName(emit.bin_file.tag),
});
@ -870,7 +946,13 @@ fn encodeInst(emit: *Emit, lowered_inst: Instruction, reloc_info: []const RelocI
.symbolnum = @intCast(reloc.target.index),
},
});
} else return emit.fail("TODO implement {s} reloc for {s}", .{
} else if (emit.bin_file.cast(.coff2)) |coff| try coff.addReloc(
@enumFromInt(emit.atom_index),
end_offset - 4,
@enumFromInt(reloc.target.index),
reloc.off,
.{ .AMD64 = .SECREL },
) else return emit.fail("TODO implement {s} reloc for {s}", .{
@tagName(reloc.target.type), @tagName(emit.bin_file.tag),
}),
};

File diff suppressed because it is too large Load Diff

View File

@ -546,7 +546,7 @@ fn initHeaders(
break :phndx phnum;
} else undefined;
const expected_nodes_len = 15;
const expected_nodes_len = 5 + phnum * 2;
try elf.nodes.ensureTotalCapacity(gpa, expected_nodes_len);
try elf.phdrs.resize(gpa, phnum);
elf.nodes.appendAssumeCapacity(.file);
@ -808,25 +808,6 @@ fn initHeaders(
Symbol.Index.shstrtab.node(elf).slice(&elf.mf)[0] = 0;
Symbol.Index.strtab.node(elf).slice(&elf.mf)[0] = 0;
if (maybe_interp) |interp| {
try elf.nodes.ensureUnusedCapacity(gpa, 1);
const interp_ni = try elf.mf.addLastChildNode(gpa, Node.Known.rodata, .{
.size = interp.len + 1,
.moved = true,
.resized = true,
});
elf.nodes.appendAssumeCapacity(.{ .segment = interp_phndx });
elf.phdrs.items[interp_phndx] = interp_ni;
const sec_interp_si = try elf.addSection(interp_ni, .{
.name = ".interp",
.size = @intCast(interp.len + 1),
.flags = .{ .ALLOC = true },
});
const sec_interp = sec_interp_si.node(elf).slice(&elf.mf);
@memcpy(sec_interp[0..interp.len], interp);
sec_interp[interp.len] = 0;
}
assert(try elf.addSection(Node.Known.rodata, .{
.name = ".rodata",
.flags = .{ .ALLOC = true },
@ -857,6 +838,25 @@ fn initHeaders(
.addralign = elf.mf.flags.block_size,
}) == .tdata);
}
if (maybe_interp) |interp| {
try elf.nodes.ensureUnusedCapacity(gpa, 1);
const interp_ni = try elf.mf.addLastChildNode(gpa, Node.Known.rodata, .{
.size = interp.len + 1,
.moved = true,
.resized = true,
});
elf.nodes.appendAssumeCapacity(.{ .segment = interp_phndx });
elf.phdrs.items[interp_phndx] = interp_ni;
const sec_interp_si = try elf.addSection(interp_ni, .{
.name = ".interp",
.size = @intCast(interp.len + 1),
.flags = .{ .ALLOC = true },
});
const sec_interp = sec_interp_si.node(elf).slice(&elf.mf);
@memcpy(sec_interp[0..interp.len], interp);
sec_interp[interp.len] = 0;
}
assert(elf.nodes.len == expected_nodes_len);
}
@ -1072,6 +1072,32 @@ fn navType(
},
};
}
fn navSection(
elf: *Elf,
ip: *const InternPool,
nav_fr: @FieldType(@FieldType(InternPool.Nav, "status"), "fully_resolved"),
) Symbol.Index {
if (nav_fr.@"linksection".toSlice(ip)) |@"linksection"| {
if (std.mem.eql(u8, @"linksection", ".rodata") or
std.mem.startsWith(u8, @"linksection", ".rodata.")) return .rodata;
if (std.mem.eql(u8, @"linksection", ".text") or
std.mem.startsWith(u8, @"linksection", ".text.")) return .text;
if (std.mem.eql(u8, @"linksection", ".data") or
std.mem.startsWith(u8, @"linksection", ".data.")) return .data;
if (std.mem.eql(u8, @"linksection", ".tdata") or
std.mem.startsWith(u8, @"linksection", ".tdata.")) return .tdata;
}
return switch (navType(
ip,
.{ .fully_resolved = nav_fr },
elf.base.comp.config.any_non_single_threaded,
)) {
else => unreachable,
.FUNC => .text,
.OBJECT => .data,
.TLS => .tdata,
};
}
fn navMapIndex(elf: *Elf, zcu: *Zcu, nav_index: InternPool.Nav.Index) !Node.NavMapIndex {
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
@ -1312,18 +1338,16 @@ pub fn updateNav(elf: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index)
};
}
fn updateNavInner(elf: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) !void {
const comp = elf.base.comp;
const zcu = pt.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const nav = ip.getNav(nav_index);
const nav_val = nav.status.fully_resolved.val;
const nav_init, const is_threadlocal = switch (ip.indexToKey(nav_val)) {
else => .{ nav_val, false },
.variable => |variable| .{ variable.init, variable.is_threadlocal },
.@"extern" => return,
.func => .{ .none, false },
const nav_init = switch (ip.indexToKey(nav_val)) {
else => nav_val,
.variable => |variable| variable.init,
.@"extern", .func => .none,
};
if (nav_init == .none or !Type.fromInterned(ip.typeOf(nav_init)).hasRuntimeBits(zcu)) return;
@ -1334,8 +1358,7 @@ fn updateNavInner(elf: *Elf, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index)
switch (sym.ni) {
.none => {
try elf.nodes.ensureUnusedCapacity(gpa, 1);
const sec_si: Symbol.Index =
if (is_threadlocal and comp.config.any_non_single_threaded) .tdata else .data;
const sec_si = elf.navSection(ip, nav.status.fully_resolved);
const ni = try elf.mf.addLastChildNode(gpa, sec_si.node(elf), .{
.alignment = pt.navAlignment(nav_index).toStdMem(),
.moved = true,
@ -1452,9 +1475,10 @@ fn updateFuncInner(
switch (sym.ni) {
.none => {
try elf.nodes.ensureUnusedCapacity(gpa, 1);
const sec_si = elf.navSection(ip, nav.status.fully_resolved);
const mod = zcu.navFileScope(func.owner_nav).mod.?;
const target = &mod.resolved_target.result;
const ni = try elf.mf.addLastChildNode(gpa, Symbol.Index.text.node(elf), .{
const ni = try elf.mf.addLastChildNode(gpa, sec_si.node(elf), .{
.alignment = switch (nav.status.fully_resolved.alignment) {
.none => switch (mod.optimize_mode) {
.Debug,
@ -1471,7 +1495,7 @@ fn updateFuncInner(
sym.ni = ni;
switch (elf.symPtr(si)) {
inline else => |sym_ptr, class| sym_ptr.shndx =
@field(elf.symPtr(.text), @tagName(class)).shndx,
@field(elf.symPtr(sec_si), @tagName(class)).shndx,
}
},
else => si.deleteLocationRelocs(elf),

View File

@ -14,6 +14,8 @@ updates: std.ArrayList(Node.Index),
update_prog_node: std.Progress.Node,
writers: std.SinglyLinkedList,
pub const growth_factor = 4;
pub const Error = std.posix.MMapError ||
std.posix.MRemapError ||
std.fs.File.SetEndPosError ||
@ -64,6 +66,7 @@ pub fn init(file: std.fs.File, gpa: std.mem.Allocator) !MappedFile {
assert(try mf.addNode(gpa, .{
.add_node = .{
.size = size,
.alignment = mf.flags.block_size,
.fixed = true,
},
}) == Node.Index.root);
@ -153,20 +156,24 @@ pub const Node = extern struct {
return ni.get(mf).parent;
}
pub const ChildIterator = struct {
mf: *const MappedFile,
ni: Node.Index,
pub fn next(it: *ChildIterator) ?Node.Index {
const ni = it.ni;
if (ni == .none) return null;
it.ni = ni.get(it.mf).next;
return ni;
}
};
pub fn children(ni: Node.Index, mf: *const MappedFile) ChildIterator {
pub fn ChildIterator(comptime direction: enum { prev, next }) type {
return struct {
mf: *const MappedFile,
ni: Node.Index,
pub fn next(it: *@This()) ?Node.Index {
const ni = it.ni;
if (ni == .none) return null;
it.ni = @field(ni.get(it.mf), @tagName(direction));
return ni;
}
};
}
pub fn children(ni: Node.Index, mf: *const MappedFile) ChildIterator(.next) {
return .{ .mf = mf, .ni = ni.get(mf).first };
}
pub fn reverseChildren(ni: Node.Index, mf: *const MappedFile) ChildIterator(.prev) {
return .{ .mf = mf, .ni = ni.get(mf).last };
}
pub fn childrenMoved(ni: Node.Index, gpa: std.mem.Allocator, mf: *MappedFile) !void {
var child_ni = ni.get(mf).last;
@ -274,7 +281,8 @@ pub const Node = extern struct {
if (set_has_content) parent_node.flags.has_content = true;
if (parent_ni == .none) break;
parent_ni = parent_node.parent;
offset += parent_ni.location(mf).resolve(mf)[0];
const parent_offset, _ = parent_ni.location(mf).resolve(mf);
offset += parent_offset;
}
return .{ .offset = offset, .size = size };
}
@ -428,7 +436,7 @@ pub const Node = extern struct {
const total_capacity = interface.end + unused_capacity;
if (interface.buffer.len >= total_capacity) return;
const w: *Writer = @fieldParentPtr("interface", interface);
w.ni.resize(w.mf, w.gpa, total_capacity +| total_capacity / 2) catch |err| {
w.ni.resize(w.mf, w.gpa, total_capacity +| total_capacity / growth_factor) catch |err| {
w.err = err;
return error.WriteFailed;
};
@ -487,7 +495,8 @@ fn addNode(mf: *MappedFile, gpa: std.mem.Allocator, opts: struct {
free_node.flags.moved = false;
free_node.flags.resized = false;
}
if (offset > opts.parent.location(mf).resolve(mf)[1]) try opts.parent.resize(mf, gpa, offset);
_, const parent_size = opts.parent.location(mf).resolve(mf);
if (offset > parent_size) try opts.parent.resize(mf, gpa, offset);
try free_ni.resize(mf, gpa, opts.add_node.size);
}
if (opts.add_node.moved) free_ni.movedAssumeCapacity(mf);
@ -522,6 +531,27 @@ pub fn addOnlyChildNode(
return ni;
}
pub fn addFirstChildNode(
mf: *MappedFile,
gpa: std.mem.Allocator,
parent_ni: Node.Index,
opts: AddNodeOptions,
) !Node.Index {
try mf.nodes.ensureUnusedCapacity(gpa, 1);
const parent = parent_ni.get(mf);
const ni = try mf.addNode(gpa, .{
.parent = parent_ni,
.next = parent.first,
.add_node = opts,
});
switch (parent.first) {
.none => parent.last = ni,
else => |first_ni| first_ni.get(mf).prev = ni,
}
parent.first = ni;
return ni;
}
pub fn addLastChildNode(
mf: *MappedFile,
gpa: std.mem.Allocator,
@ -577,7 +607,7 @@ pub fn addNodeAfter(
fn resizeNode(mf: *MappedFile, gpa: std.mem.Allocator, ni: Node.Index, requested_size: u64) !void {
const node = ni.get(mf);
var old_offset, const old_size = node.location().resolve(mf);
const old_offset, const old_size = node.location().resolve(mf);
const new_size = node.flags.alignment.forward(@intCast(requested_size));
// Resize the entire file
if (ni == Node.Index.root) {
@ -587,169 +617,238 @@ fn resizeNode(mf: *MappedFile, gpa: std.mem.Allocator, ni: Node.Index, requested
ni.setLocationAssumeCapacity(mf, old_offset, new_size);
return;
}
while (true) {
const parent = node.parent.get(mf);
_, const old_parent_size = parent.location().resolve(mf);
const trailing_end = switch (node.next) {
.none => parent.location().resolve(mf)[1],
else => |next_ni| next_ni.location(mf).resolve(mf)[0],
};
assert(old_offset + old_size <= trailing_end);
// Expand the node into available trailing free space
if (old_offset + new_size <= trailing_end) {
try mf.ensureCapacityForSetLocation(gpa);
ni.setLocationAssumeCapacity(mf, old_offset, new_size);
return;
}
// Ask the filesystem driver to insert an extent into the file without copying any data
if (is_linux and !mf.flags.fallocate_insert_range_unsupported and
node.flags.alignment.order(mf.flags.block_size).compare(.gte))
insert_range: {
const last_offset, const last_size = parent.last.location(mf).resolve(mf);
const last_end = last_offset + last_size;
assert(last_end <= old_parent_size);
const range_size =
node.flags.alignment.forward(@intCast(requested_size +| requested_size / 2)) - old_size;
const new_parent_size = last_end + range_size;
if (new_parent_size > old_parent_size) {
try mf.resizeNode(gpa, node.parent, new_parent_size +| new_parent_size / 2);
continue;
}
const range_file_offset = ni.fileLocation(mf, false).offset + old_size;
while (true) switch (linux.E.init(linux.fallocate(
const parent = node.parent.get(mf);
_, var old_parent_size = parent.location().resolve(mf);
const trailing_end = trailing_end: switch (node.next) {
.none => old_parent_size,
else => |next_ni| {
const next_offset, _ = next_ni.location(mf).resolve(mf);
break :trailing_end next_offset;
},
};
assert(old_offset + old_size <= trailing_end);
if (old_offset + new_size <= trailing_end) {
// Expand the node into trailing free space
try mf.ensureCapacityForSetLocation(gpa);
ni.setLocationAssumeCapacity(mf, old_offset, new_size);
return;
}
if (is_linux and !mf.flags.fallocate_insert_range_unsupported and
node.flags.alignment.order(mf.flags.block_size).compare(.gte))
insert_range: {
// Ask the filesystem driver to insert extents into the file without copying any data
const last_offset, const last_size = parent.last.location(mf).resolve(mf);
const last_end = last_offset + last_size;
assert(last_end <= old_parent_size);
const range_file_offset = ni.fileLocation(mf, false).offset + old_size;
const range_size = node.flags.alignment.forward(
@intCast(requested_size +| requested_size / growth_factor),
) - old_size;
_, const file_size = Node.Index.root.location(mf).resolve(mf);
while (true) switch (linux.E.init(switch (std.math.order(range_file_offset, file_size)) {
.lt => linux.fallocate(
mf.file.handle,
linux.FALLOC.FL_INSERT_RANGE,
@intCast(range_file_offset),
@intCast(range_size),
))) {
.SUCCESS => {
var enclosing_ni = ni;
while (true) {
try mf.ensureCapacityForSetLocation(gpa);
const enclosing = enclosing_ni.get(mf);
const enclosing_offset, const old_enclosing_size =
enclosing.location().resolve(mf);
const new_enclosing_size = old_enclosing_size + range_size;
enclosing_ni.setLocationAssumeCapacity(mf, enclosing_offset, new_enclosing_size);
if (enclosing_ni == Node.Index.root) {
assert(enclosing_offset == 0);
try mf.ensureTotalCapacity(@intCast(new_enclosing_size));
break;
}
var after_ni = enclosing.next;
while (after_ni != .none) {
try mf.ensureCapacityForSetLocation(gpa);
const after = after_ni.get(mf);
const after_offset, const after_size = after.location().resolve(mf);
after_ni.setLocationAssumeCapacity(
mf,
range_size + after_offset,
after_size,
);
after_ni = after.next;
}
enclosing_ni = enclosing.parent;
}
return;
},
.INTR => continue,
.BADF, .FBIG, .INVAL => unreachable,
.IO => return error.InputOutput,
.NODEV => return error.NotFile,
.NOSPC => return error.NoSpaceLeft,
.NOSYS, .OPNOTSUPP => {
mf.flags.fallocate_insert_range_unsupported = true;
break :insert_range;
},
.PERM => return error.PermissionDenied,
.SPIPE => return error.Unseekable,
.TXTBSY => return error.FileBusy,
else => |e| return std.posix.unexpectedErrno(e),
};
}
switch (node.next) {
.none => {
// As this is the last node, we simply need more space in the parent
const new_parent_size = old_offset + new_size;
try mf.resizeNode(gpa, node.parent, new_parent_size +| new_parent_size / 2);
},
else => |*next_ni_ptr| switch (node.flags.fixed) {
false => {
// Make space at the end of the parent for this floating node
const last = parent.last.get(mf);
const last_offset, const last_size = last.location().resolve(mf);
const new_offset = node.flags.alignment.forward(@intCast(last_offset + last_size));
const new_parent_size = new_offset + new_size;
if (new_parent_size > old_parent_size) {
try mf.resizeNode(
gpa,
node.parent,
new_parent_size +| new_parent_size / 2,
);
continue;
}
const next_ni = next_ni_ptr.*;
next_ni.get(mf).prev = node.prev;
switch (node.prev) {
.none => parent.first = next_ni,
else => |prev_ni| prev_ni.get(mf).next = next_ni,
}
last.next = ni;
node.prev = parent.last;
next_ni_ptr.* = .none;
parent.last = ni;
if (node.flags.has_content) {
const parent_file_offset = node.parent.fileLocation(mf, false).offset;
try mf.moveRange(
parent_file_offset + old_offset,
parent_file_offset + new_offset,
old_size,
);
}
old_offset = new_offset;
},
true => {
// Move the next floating node to make space for this fixed node
const next_ni = next_ni_ptr.*;
const next = next_ni.get(mf);
assert(!next.flags.fixed);
const next_offset, const next_size = next.location().resolve(mf);
const last = parent.last.get(mf);
const last_offset, const last_size = last.location().resolve(mf);
const new_offset = next.flags.alignment.forward(@intCast(
@max(old_offset + new_size, last_offset + last_size),
));
const new_parent_size = new_offset + next_size;
if (new_parent_size > old_parent_size) {
try mf.resizeNode(
gpa,
node.parent,
new_parent_size +| new_parent_size / 2,
);
continue;
}
),
.eq => linux.ftruncate(mf.file.handle, @intCast(range_file_offset + range_size)),
.gt => unreachable,
})) {
.SUCCESS => {
var enclosing_ni = ni;
while (true) {
try mf.ensureCapacityForSetLocation(gpa);
next.prev = parent.last;
parent.last = next_ni;
last.next = next_ni;
next_ni_ptr.* = next.next;
switch (next.next) {
.none => {},
else => |next_next_ni| next_next_ni.get(mf).prev = ni,
const enclosing = enclosing_ni.get(mf);
const enclosing_offset, const old_enclosing_size =
enclosing.location().resolve(mf);
const new_enclosing_size = old_enclosing_size + range_size;
enclosing_ni.setLocationAssumeCapacity(mf, enclosing_offset, new_enclosing_size);
if (enclosing_ni == Node.Index.root) {
assert(enclosing_offset == 0);
try mf.ensureTotalCapacity(@intCast(new_enclosing_size));
break;
}
next.next = .none;
if (node.flags.has_content) {
const parent_file_offset = node.parent.fileLocation(mf, false).offset;
try mf.moveRange(
parent_file_offset + next_offset,
parent_file_offset + new_offset,
next_size,
var after_ni = enclosing.next;
while (after_ni != .none) {
try mf.ensureCapacityForSetLocation(gpa);
const after = after_ni.get(mf);
const after_offset, const after_size = after.location().resolve(mf);
after_ni.setLocationAssumeCapacity(
mf,
range_size + after_offset,
after_size,
);
after_ni = after.next;
}
next_ni.setLocationAssumeCapacity(mf, new_offset, next_size);
},
enclosing_ni = enclosing.parent;
}
return;
},
.INTR => continue,
.BADF, .FBIG, .INVAL => unreachable,
.IO => return error.InputOutput,
.NODEV => return error.NotFile,
.NOSPC => return error.NoSpaceLeft,
.NOSYS, .OPNOTSUPP => {
mf.flags.fallocate_insert_range_unsupported = true;
break :insert_range;
},
.PERM => return error.PermissionDenied,
.SPIPE => return error.Unseekable,
.TXTBSY => return error.FileBusy,
else => |e| return std.posix.unexpectedErrno(e),
};
}
if (node.next == .none) {
// As this is the last node, we simply need more space in the parent
const new_parent_size = old_offset + new_size;
try mf.resizeNode(gpa, node.parent, new_parent_size +| new_parent_size / growth_factor);
try mf.ensureCapacityForSetLocation(gpa);
ni.setLocationAssumeCapacity(mf, old_offset, new_size);
return;
}
if (!node.flags.fixed) {
// Make space at the end of the parent for this floating node
const last = parent.last.get(mf);
const last_offset, const last_size = last.location().resolve(mf);
const new_offset = node.flags.alignment.forward(@intCast(last_offset + last_size));
const new_parent_size = new_offset + new_size;
if (new_parent_size > old_parent_size)
try mf.resizeNode(gpa, node.parent, new_parent_size +| new_parent_size / growth_factor);
try mf.ensureCapacityForSetLocation(gpa);
const next_ni = node.next;
next_ni.get(mf).prev = node.prev;
switch (node.prev) {
.none => parent.first = next_ni,
else => |prev_ni| prev_ni.get(mf).next = next_ni,
}
last.next = ni;
node.prev = parent.last;
node.next = .none;
parent.last = ni;
if (node.flags.has_content) {
const parent_file_offset = node.parent.fileLocation(mf, false).offset;
try mf.moveRange(
parent_file_offset + old_offset,
parent_file_offset + new_offset,
old_size,
);
}
ni.setLocationAssumeCapacity(mf, new_offset, new_size);
return;
}
// Search for the first floating node following this fixed node
var last_fixed_ni = ni;
var first_floating_ni = node.next;
var shift = new_size - old_size;
var direction: enum { forward, reverse } = .forward;
while (true) {
assert(last_fixed_ni != .none);
const last_fixed = last_fixed_ni.get(mf);
assert(last_fixed.flags.fixed);
const old_last_fixed_offset, const last_fixed_size = last_fixed.location().resolve(mf);
const new_last_fixed_offset = old_last_fixed_offset + shift;
make_space: switch (first_floating_ni) {
else => {
const first_floating = first_floating_ni.get(mf);
const old_first_floating_offset, const first_floating_size =
first_floating.location().resolve(mf);
assert(old_last_fixed_offset + last_fixed_size <= old_first_floating_offset);
if (new_last_fixed_offset + last_fixed_size <= old_first_floating_offset)
break :make_space;
assert(direction == .forward);
if (first_floating.flags.fixed) {
shift = first_floating.flags.alignment.forward(@intCast(
@max(shift, first_floating_size),
));
// Not enough space, try the next node
last_fixed_ni = first_floating_ni;
first_floating_ni = first_floating.next;
continue;
}
// Move the found floating node to make space for preceding fixed nodes
const last = parent.last.get(mf);
const last_offset, const last_size = last.location().resolve(mf);
const new_first_floating_offset = first_floating.flags.alignment.forward(
@intCast(@max(new_last_fixed_offset + last_fixed_size, last_offset + last_size)),
);
const new_parent_size = new_first_floating_offset + first_floating_size;
if (new_parent_size > old_parent_size) {
try mf.resizeNode(
gpa,
node.parent,
new_parent_size +| new_parent_size / growth_factor,
);
_, old_parent_size = parent.location().resolve(mf);
}
try mf.ensureCapacityForSetLocation(gpa);
if (parent.last != first_floating_ni) {
first_floating.prev = parent.last;
parent.last = first_floating_ni;
last.next = first_floating_ni;
last_fixed.next = first_floating.next;
switch (first_floating.next) {
.none => {},
else => |next_ni| next_ni.get(mf).prev = last_fixed_ni,
}
first_floating.next = .none;
}
if (first_floating.flags.has_content) {
const parent_file_offset =
node.parent.fileLocation(mf, false).offset;
try mf.moveRange(
parent_file_offset + old_first_floating_offset,
parent_file_offset + new_first_floating_offset,
first_floating_size,
);
}
first_floating_ni.setLocationAssumeCapacity(
mf,
new_first_floating_offset,
first_floating_size,
);
// Continue the search after the just-moved floating node
first_floating_ni = last_fixed.next;
continue;
},
.none => {
assert(direction == .forward);
const new_parent_size = new_last_fixed_offset + last_fixed_size;
if (new_parent_size > old_parent_size) {
try mf.resizeNode(
gpa,
node.parent,
new_parent_size +| new_parent_size / growth_factor,
);
_, old_parent_size = parent.location().resolve(mf);
}
},
}
try mf.ensureCapacityForSetLocation(gpa);
if (last_fixed_ni == ni) {
// The original fixed node now has enough space
last_fixed_ni.setLocationAssumeCapacity(
mf,
old_last_fixed_offset,
last_fixed_size + shift,
);
return;
}
// Move a fixed node into trailing free space
if (last_fixed.flags.has_content) {
const parent_file_offset = node.parent.fileLocation(mf, false).offset;
try mf.moveRange(
parent_file_offset + old_last_fixed_offset,
parent_file_offset + new_last_fixed_offset,
last_fixed_size,
);
}
last_fixed_ni.setLocationAssumeCapacity(mf, new_last_fixed_offset, last_fixed_size);
// Retry the previous nodes now that there is enough space
first_floating_ni = last_fixed_ni;
last_fixed_ni = last_fixed.prev;
direction = .reverse;
}
}
@ -843,7 +942,7 @@ fn ensureCapacityForSetLocation(mf: *MappedFile, gpa: std.mem.Allocator) !void {
pub fn ensureTotalCapacity(mf: *MappedFile, new_capacity: usize) !void {
if (mf.contents.len >= new_capacity) return;
try mf.ensureTotalCapacityPrecise(new_capacity +| new_capacity / 2);
try mf.ensureTotalCapacityPrecise(new_capacity +| new_capacity / growth_factor);
}
pub fn ensureTotalCapacityPrecise(mf: *MappedFile, new_capacity: usize) !void {

View File

@ -389,10 +389,7 @@ pub fn canBuildLibUbsanRt(target: *const std.Target) enum { no, yes, llvm_only,
}
return switch (zigBackend(target, false)) {
.stage2_wasm => .llvm_lld_only,
.stage2_x86_64 => switch (target.ofmt) {
.elf, .macho => .yes,
else => .llvm_only,
},
.stage2_x86_64 => .yes,
else => .llvm_only,
};
}
@ -776,10 +773,9 @@ pub fn supportsTailCall(target: *const std.Target, backend: std.builtin.Compiler
}
pub fn supportsThreads(target: *const std.Target, backend: std.builtin.CompilerBackend) bool {
_ = target;
return switch (backend) {
.stage2_aarch64 => false,
.stage2_powerpc => true,
.stage2_x86_64 => target.ofmt == .macho or target.ofmt == .elf,
else => true,
};
}

View File

@ -7,7 +7,6 @@ test "thread local variable" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64 and builtin.os.tag == .macos) {
@ -27,7 +26,6 @@ test "pointer to thread local array" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; // TODO
const s = "Hello world";
@memcpy(buffer[0..s.len], s);
@ -41,9 +39,8 @@ test "reference a global threadlocal variable" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt == .coff) return error.SkipZigTest; // TODO
_ = nrfx_uart_rx(&g_uart0);
try nrfx_uart_rx(&g_uart0);
}
const nrfx_uart_t = extern struct {
@ -51,11 +48,12 @@ const nrfx_uart_t = extern struct {
drv_inst_idx: u8,
};
pub fn nrfx_uart_rx(p_instance: [*c]const nrfx_uart_t) void {
_ = p_instance;
pub fn nrfx_uart_rx(p_instance: [*c]const nrfx_uart_t) !void {
try expect(p_instance.*.p_reg == 0);
try expect(p_instance.*.drv_inst_idx == 0xab);
}
threadlocal var g_uart0 = nrfx_uart_t{
.p_reg = 0,
.drv_inst_idx = 0,
.drv_inst_idx = 0xab,
};

View File

@ -2291,24 +2291,12 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step {
if (options.skip_single_threaded and test_target.single_threaded == true)
continue;
// TODO get compiler-rt tests passing for self-hosted backends.
if (((target.cpu.arch != .x86_64 and target.cpu.arch != .aarch64) or target.ofmt == .coff) and
test_target.use_llvm == false and mem.eql(u8, options.name, "compiler-rt"))
continue;
// TODO get zigc tests passing for other self-hosted backends.
if (target.cpu.arch != .x86_64 and
test_target.use_llvm == false and mem.eql(u8, options.name, "zigc"))
continue;
// TODO get std lib tests passing for other self-hosted backends.
if ((target.cpu.arch != .x86_64 or target.os.tag != .linux) and
test_target.use_llvm == false and mem.eql(u8, options.name, "std"))
continue;
if (target.cpu.arch != .x86_64 and
test_target.use_llvm == false and mem.eql(u8, options.name, "c-import"))
continue;
if (!would_use_llvm and target.cpu.arch == .aarch64) {
// TODO get std tests passing for the aarch64 self-hosted backend.
if (mem.eql(u8, options.name, "std")) continue;
// TODO get zigc tests passing for the aarch64 self-hosted backend.
if (mem.eql(u8, options.name, "zigc")) continue;
}
const want_this_mode = for (options.optimize_modes) |m| {
if (m == test_target.optimize_mode) break true;
@ -2362,7 +2350,7 @@ fn addOneModuleTest(
const single_threaded_suffix = if (test_target.single_threaded == true) "-single" else "";
const backend_suffix = if (test_target.use_llvm == true)
"-llvm"
else if (target.ofmt == std.Target.ObjectFormat.c)
else if (target.ofmt == .c)
"-cbe"
else if (test_target.use_llvm == false)
"-selfhosted"
@ -2389,7 +2377,7 @@ fn addOneModuleTest(
use_pic,
});
if (target.ofmt == std.Target.ObjectFormat.c) {
if (target.ofmt == .c) {
var altered_query = test_target.target;
altered_query.ofmt = null;