mirror of
https://github.com/ziglang/zig.git
synced 2026-02-14 13:30:45 +00:00
Merge pull request #15162 from jacobly0/x86_64-start
x86_64: get enough things working to enable full `start.zig` logic
This commit is contained in:
commit
5ea6e78943
@ -125,36 +125,44 @@ pub extern fn clone(func: CloneFn, stack: usize, flags: u32, arg: usize, ptid: *
|
||||
|
||||
pub fn restore() callconv(.Naked) void {
|
||||
switch (@import("builtin").zig_backend) {
|
||||
.stage2_c => return asm volatile (
|
||||
.stage2_c => asm volatile (
|
||||
\\ movl %[number], %%eax
|
||||
\\ int $0x80
|
||||
\\ ret
|
||||
:
|
||||
: [number] "i" (@enumToInt(SYS.sigreturn)),
|
||||
: "memory"
|
||||
),
|
||||
else => return asm volatile ("int $0x80"
|
||||
else => asm volatile (
|
||||
\\ int $0x80
|
||||
\\ ret
|
||||
:
|
||||
: [number] "{eax}" (@enumToInt(SYS.sigreturn)),
|
||||
: "memory"
|
||||
),
|
||||
}
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub fn restore_rt() callconv(.Naked) void {
|
||||
switch (@import("builtin").zig_backend) {
|
||||
.stage2_c => return asm volatile (
|
||||
.stage2_c => asm volatile (
|
||||
\\ movl %[number], %%eax
|
||||
\\ int $0x80
|
||||
\\ ret
|
||||
:
|
||||
: [number] "i" (@enumToInt(SYS.rt_sigreturn)),
|
||||
: "memory"
|
||||
),
|
||||
else => return asm volatile ("int $0x80"
|
||||
else => asm volatile (
|
||||
\\ int $0x80
|
||||
\\ ret
|
||||
:
|
||||
: [number] "{eax}" (@enumToInt(SYS.rt_sigreturn)),
|
||||
: "memory"
|
||||
),
|
||||
}
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub const O = struct {
|
||||
|
||||
@ -109,7 +109,7 @@ pub const restore = restore_rt;
|
||||
|
||||
pub fn restore_rt() callconv(.Naked) void {
|
||||
switch (@import("builtin").zig_backend) {
|
||||
.stage2_c => return asm volatile (
|
||||
.stage2_c => asm volatile (
|
||||
\\ movl %[number], %%eax
|
||||
\\ syscall
|
||||
\\ retq
|
||||
@ -117,12 +117,15 @@ pub fn restore_rt() callconv(.Naked) void {
|
||||
: [number] "i" (@enumToInt(SYS.rt_sigreturn)),
|
||||
: "rcx", "r11", "memory"
|
||||
),
|
||||
else => return asm volatile ("syscall"
|
||||
else => asm volatile (
|
||||
\\ syscall
|
||||
\\ retq
|
||||
:
|
||||
: [number] "{rax}" (@enumToInt(SYS.rt_sigreturn)),
|
||||
: "rcx", "r11", "memory"
|
||||
),
|
||||
}
|
||||
unreachable;
|
||||
}
|
||||
|
||||
pub const mode_t = usize;
|
||||
|
||||
@ -19,7 +19,7 @@ const start_sym_name = if (native_arch.isMIPS()) "__start" else "_start";
|
||||
// self-hosted is capable enough to handle all of the real start.zig logic.
|
||||
pub const simplified_logic =
|
||||
builtin.zig_backend == .stage2_wasm or
|
||||
builtin.zig_backend == .stage2_x86_64 or
|
||||
(builtin.zig_backend == .stage2_x86_64 and (builtin.link_libc or builtin.os.tag == .plan9)) or
|
||||
builtin.zig_backend == .stage2_x86 or
|
||||
builtin.zig_backend == .stage2_aarch64 or
|
||||
builtin.zig_backend == .stage2_arm or
|
||||
|
||||
@ -555,7 +555,7 @@ pub const Decl = struct {
|
||||
_,
|
||||
|
||||
pub fn init(oi: ?Index) OptionalIndex {
|
||||
return oi orelse .none;
|
||||
return @intToEnum(OptionalIndex, @enumToInt(oi orelse return .none));
|
||||
}
|
||||
|
||||
pub fn unwrap(oi: OptionalIndex) ?Index {
|
||||
|
||||
@ -28,10 +28,11 @@ const Type = @import("../../type.zig").Type;
|
||||
const TypedValue = @import("../../TypedValue.zig");
|
||||
const Value = @import("../../value.zig").Value;
|
||||
|
||||
const bits = @import("bits.zig");
|
||||
const abi = @import("abi.zig");
|
||||
const errUnionPayloadOffset = codegen.errUnionPayloadOffset;
|
||||
const bits = @import("bits.zig");
|
||||
const encoder = @import("encoder.zig");
|
||||
const errUnionErrorOffset = codegen.errUnionErrorOffset;
|
||||
const errUnionPayloadOffset = codegen.errUnionPayloadOffset;
|
||||
|
||||
const Condition = bits.Condition;
|
||||
const Immediate = bits.Immediate;
|
||||
@ -2527,7 +2528,7 @@ fn airErrUnionPayloadPtrSet(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const dst_lock = self.register_manager.lockReg(dst_reg);
|
||||
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const pl_off = @intCast(i32, errUnionErrorOffset(pl_ty, self.target.*));
|
||||
const pl_off = @intCast(i32, errUnionPayloadOffset(pl_ty, self.target.*));
|
||||
const dst_abi_size = @intCast(u32, dst_ty.abiSize(self.target.*));
|
||||
try self.asmRegisterMemory(
|
||||
.lea,
|
||||
@ -3650,10 +3651,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
.dead => unreachable,
|
||||
.eflags => unreachable,
|
||||
.register_overflow => unreachable,
|
||||
.immediate => |imm| {
|
||||
try self.setRegOrMem(value_ty, .{ .memory = imm }, value);
|
||||
},
|
||||
.stack_offset => {
|
||||
.immediate, .stack_offset => {
|
||||
const reg = try self.copyToTmpRegister(ptr_ty, ptr);
|
||||
try self.store(.{ .register = reg }, value, ptr_ty, value_ty);
|
||||
},
|
||||
@ -3668,52 +3666,67 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
.none => unreachable,
|
||||
.dead => unreachable,
|
||||
.unreach => unreachable,
|
||||
.eflags => |cc| {
|
||||
try self.asmSetccMemory(Memory.sib(
|
||||
Memory.PtrSize.fromSize(abi_size),
|
||||
.{ .base = reg.to64() },
|
||||
), cc);
|
||||
.eflags => |cc| try self.asmSetccMemory(
|
||||
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = reg.to64() }),
|
||||
cc,
|
||||
),
|
||||
.undef => if (self.wantSafety()) switch (abi_size) {
|
||||
1 => try self.store(ptr, .{ .immediate = 0xaa }, ptr_ty, value_ty),
|
||||
2 => try self.store(ptr, .{ .immediate = 0xaaaa }, ptr_ty, value_ty),
|
||||
4 => try self.store(ptr, .{ .immediate = 0xaaaaaaaa }, ptr_ty, value_ty),
|
||||
8 => try self.store(ptr, .{ .immediate = 0xaaaaaaaaaaaaaaaa }, ptr_ty, value_ty),
|
||||
else => try self.genInlineMemset(
|
||||
ptr,
|
||||
.{ .immediate = 0xaa },
|
||||
.{ .immediate = abi_size },
|
||||
.{},
|
||||
),
|
||||
},
|
||||
.undef => {
|
||||
if (!self.wantSafety()) return; // The already existing value will do just fine.
|
||||
switch (abi_size) {
|
||||
1 => try self.store(ptr, .{ .immediate = 0xaa }, ptr_ty, value_ty),
|
||||
2 => try self.store(ptr, .{ .immediate = 0xaaaa }, ptr_ty, value_ty),
|
||||
4 => try self.store(ptr, .{ .immediate = 0xaaaaaaaa }, ptr_ty, value_ty),
|
||||
8 => try self.store(ptr, .{ .immediate = 0xaaaaaaaaaaaaaaaa }, ptr_ty, value_ty),
|
||||
else => try self.genInlineMemset(ptr, .{ .immediate = 0xaa }, .{ .immediate = abi_size }, .{}),
|
||||
}
|
||||
},
|
||||
.immediate => |imm| {
|
||||
switch (abi_size) {
|
||||
1, 2, 4 => {
|
||||
const immediate = if (value_ty.isSignedInt())
|
||||
Immediate.s(@intCast(i32, @bitCast(i64, imm)))
|
||||
else
|
||||
Immediate.u(@truncate(u32, imm));
|
||||
try self.asmMemoryImmediate(.mov, Memory.sib(
|
||||
Memory.PtrSize.fromSize(abi_size),
|
||||
.{ .base = reg.to64() },
|
||||
), immediate);
|
||||
},
|
||||
8 => {
|
||||
// TODO: optimization: if the imm is only using the lower
|
||||
// 4 bytes and can be sign extended we can use a normal mov
|
||||
// with indirect addressing (mov [reg64], imm32).
|
||||
|
||||
// movabs does not support indirect register addressing
|
||||
// so we need an extra register and an extra mov.
|
||||
const tmp_reg = try self.copyToTmpRegister(value_ty, value);
|
||||
return self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
|
||||
},
|
||||
else => {
|
||||
return self.fail("TODO implement set pointee with immediate of ABI size {d}", .{abi_size});
|
||||
},
|
||||
}
|
||||
},
|
||||
.register => |src_reg| {
|
||||
try self.genInlineMemcpyRegisterRegister(value_ty, reg, src_reg, 0);
|
||||
.immediate => |imm| switch (self.regBitSize(value_ty)) {
|
||||
8 => try self.asmMemoryImmediate(
|
||||
.mov,
|
||||
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = reg.to64() }),
|
||||
if (math.cast(i8, @bitCast(i64, imm))) |small|
|
||||
Immediate.s(small)
|
||||
else
|
||||
Immediate.u(@intCast(u8, imm)),
|
||||
),
|
||||
16 => try self.asmMemoryImmediate(
|
||||
.mov,
|
||||
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = reg.to64() }),
|
||||
if (math.cast(i16, @bitCast(i64, imm))) |small|
|
||||
Immediate.s(small)
|
||||
else
|
||||
Immediate.u(@intCast(u16, imm)),
|
||||
),
|
||||
32 => try self.asmMemoryImmediate(
|
||||
.mov,
|
||||
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = reg.to64() }),
|
||||
if (math.cast(i32, @bitCast(i64, imm))) |small|
|
||||
Immediate.s(small)
|
||||
else
|
||||
Immediate.u(@intCast(u32, imm)),
|
||||
),
|
||||
64 => if (math.cast(i32, @bitCast(i64, imm))) |small|
|
||||
try self.asmMemoryImmediate(
|
||||
.mov,
|
||||
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = reg.to64() }),
|
||||
Immediate.s(small),
|
||||
)
|
||||
else
|
||||
try self.asmMemoryRegister(
|
||||
.mov,
|
||||
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = reg.to64() }),
|
||||
registerAlias(try self.copyToTmpRegister(value_ty, value), abi_size),
|
||||
),
|
||||
else => unreachable,
|
||||
},
|
||||
.register => |src_reg| try self.genInlineMemcpyRegisterRegister(
|
||||
value_ty,
|
||||
reg,
|
||||
src_reg,
|
||||
0,
|
||||
),
|
||||
.register_overflow => |ro| {
|
||||
const ro_reg_lock = self.register_manager.lockReg(ro.reg);
|
||||
defer if (ro_reg_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
@ -3732,23 +3745,18 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
-@intCast(i32, overflow_bit_offset),
|
||||
);
|
||||
},
|
||||
.linker_load,
|
||||
.memory,
|
||||
.stack_offset,
|
||||
=> {
|
||||
if (abi_size <= 8) {
|
||||
const tmp_reg = try self.copyToTmpRegister(value_ty, value);
|
||||
return self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
|
||||
}
|
||||
|
||||
try self.genInlineMemcpy(.{ .stack_offset = 0 }, value, .{ .immediate = abi_size }, .{
|
||||
.source_stack_base = .rbp,
|
||||
.dest_stack_base = reg.to64(),
|
||||
});
|
||||
},
|
||||
.linker_load, .memory, .stack_offset => if (abi_size <= 8) {
|
||||
const tmp_reg = try self.copyToTmpRegister(value_ty, value);
|
||||
try self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
|
||||
} else try self.genInlineMemcpy(
|
||||
.{ .stack_offset = 0 },
|
||||
value,
|
||||
.{ .immediate = abi_size },
|
||||
.{ .source_stack_base = .rbp, .dest_stack_base = reg.to64() },
|
||||
),
|
||||
.ptr_stack_offset => {
|
||||
const tmp_reg = try self.copyToTmpRegister(value_ty, value);
|
||||
return self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
|
||||
try self.store(ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
|
||||
},
|
||||
}
|
||||
},
|
||||
@ -3764,8 +3772,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
defer self.register_manager.unlockReg(addr_reg_lock);
|
||||
|
||||
try self.loadMemPtrIntoRegister(addr_reg, ptr_ty, ptr);
|
||||
|
||||
// To get the actual address of the value we want to modify we have to go through the GOT
|
||||
// Load the pointer, which is stored in memory
|
||||
try self.asmRegisterMemory(
|
||||
.mov,
|
||||
addr_reg.to64(),
|
||||
@ -3773,62 +3780,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type
|
||||
);
|
||||
|
||||
const new_ptr = MCValue{ .register = addr_reg.to64() };
|
||||
|
||||
switch (value) {
|
||||
.immediate => |imm| {
|
||||
if (abi_size > 8) {
|
||||
return self.fail("TODO saving imm to memory for abi_size {}", .{abi_size});
|
||||
}
|
||||
|
||||
if (abi_size == 8) {
|
||||
// TODO
|
||||
const top_bits: u32 = @intCast(u32, imm >> 32);
|
||||
const can_extend = if (value_ty.isUnsignedInt())
|
||||
(top_bits == 0) and (imm & 0x8000_0000) == 0
|
||||
else
|
||||
top_bits == 0xffff_ffff;
|
||||
|
||||
if (!can_extend) {
|
||||
return self.fail("TODO imm64 would get incorrectly sign extended", .{});
|
||||
}
|
||||
}
|
||||
try self.asmMemoryImmediate(
|
||||
.mov,
|
||||
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = addr_reg.to64() }),
|
||||
Immediate.u(@intCast(u32, imm)),
|
||||
);
|
||||
},
|
||||
.register => {
|
||||
return self.store(new_ptr, value, ptr_ty, value_ty);
|
||||
},
|
||||
.linker_load, .memory => {
|
||||
if (abi_size <= 8) {
|
||||
const tmp_reg = try self.register_manager.allocReg(null, gp);
|
||||
const tmp_reg_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unlockReg(tmp_reg_lock);
|
||||
|
||||
try self.loadMemPtrIntoRegister(tmp_reg, value_ty, value);
|
||||
try self.asmRegisterMemory(
|
||||
.mov,
|
||||
tmp_reg,
|
||||
Memory.sib(.qword, .{ .base = tmp_reg }),
|
||||
);
|
||||
|
||||
return self.store(new_ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
|
||||
}
|
||||
|
||||
try self.genInlineMemcpy(new_ptr, value, .{ .immediate = abi_size }, .{});
|
||||
},
|
||||
.stack_offset => {
|
||||
if (abi_size <= 8) {
|
||||
const tmp_reg = try self.copyToTmpRegister(value_ty, value);
|
||||
return self.store(new_ptr, .{ .register = tmp_reg }, ptr_ty, value_ty);
|
||||
}
|
||||
|
||||
try self.genInlineMemcpy(new_ptr, value, .{ .immediate = abi_size }, .{});
|
||||
},
|
||||
else => return self.fail("TODO implement storing {} to MCValue.memory", .{value}),
|
||||
}
|
||||
try self.store(new_ptr, value, ptr_ty, value_ty);
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -4886,41 +4838,39 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s
|
||||
registerAlias(src_reg, abi_size),
|
||||
),
|
||||
},
|
||||
.immediate => |imm| {
|
||||
switch (self.regBitSize(ty)) {
|
||||
8 => try self.asmRegisterImmediate(
|
||||
mir_tag,
|
||||
dst_alias,
|
||||
if (math.cast(i8, @bitCast(i64, imm))) |small|
|
||||
Immediate.s(small)
|
||||
else
|
||||
Immediate.u(@intCast(u8, imm)),
|
||||
),
|
||||
16 => try self.asmRegisterImmediate(
|
||||
mir_tag,
|
||||
dst_alias,
|
||||
if (math.cast(i16, @bitCast(i64, imm))) |small|
|
||||
Immediate.s(small)
|
||||
else
|
||||
Immediate.u(@intCast(u16, imm)),
|
||||
),
|
||||
32 => try self.asmRegisterImmediate(
|
||||
mir_tag,
|
||||
dst_alias,
|
||||
if (math.cast(i32, @bitCast(i64, imm))) |small|
|
||||
Immediate.s(small)
|
||||
else
|
||||
Immediate.u(@intCast(u32, imm)),
|
||||
),
|
||||
64 => if (math.cast(i32, @bitCast(i64, imm))) |small|
|
||||
try self.asmRegisterImmediate(mir_tag, dst_alias, Immediate.s(small))
|
||||
.immediate => |imm| switch (self.regBitSize(ty)) {
|
||||
8 => try self.asmRegisterImmediate(
|
||||
mir_tag,
|
||||
dst_alias,
|
||||
if (math.cast(i8, @bitCast(i64, imm))) |small|
|
||||
Immediate.s(small)
|
||||
else
|
||||
try self.asmRegisterRegister(mir_tag, dst_alias, registerAlias(
|
||||
try self.copyToTmpRegister(ty, src_mcv),
|
||||
abi_size,
|
||||
)),
|
||||
else => unreachable,
|
||||
}
|
||||
Immediate.u(@intCast(u8, imm)),
|
||||
),
|
||||
16 => try self.asmRegisterImmediate(
|
||||
mir_tag,
|
||||
dst_alias,
|
||||
if (math.cast(i16, @bitCast(i64, imm))) |small|
|
||||
Immediate.s(small)
|
||||
else
|
||||
Immediate.u(@intCast(u16, imm)),
|
||||
),
|
||||
32 => try self.asmRegisterImmediate(
|
||||
mir_tag,
|
||||
dst_alias,
|
||||
if (math.cast(i32, @bitCast(i64, imm))) |small|
|
||||
Immediate.s(small)
|
||||
else
|
||||
Immediate.u(@intCast(u32, imm)),
|
||||
),
|
||||
64 => if (math.cast(i32, @bitCast(i64, imm))) |small|
|
||||
try self.asmRegisterImmediate(mir_tag, dst_alias, Immediate.s(small))
|
||||
else
|
||||
try self.asmRegisterRegister(mir_tag, dst_alias, registerAlias(
|
||||
try self.copyToTmpRegister(ty, src_mcv),
|
||||
abi_size,
|
||||
)),
|
||||
else => unreachable,
|
||||
},
|
||||
.memory, .linker_load, .eflags => {
|
||||
assert(abi_size <= 8);
|
||||
@ -4930,13 +4880,11 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, ty: Type, dst_mcv: MCValue, s
|
||||
const reg = try self.copyToTmpRegister(ty, src_mcv);
|
||||
return self.genBinOpMir(mir_tag, ty, dst_mcv, .{ .register = reg });
|
||||
},
|
||||
.stack_offset => |off| {
|
||||
try self.asmRegisterMemory(
|
||||
mir_tag,
|
||||
registerAlias(dst_reg, abi_size),
|
||||
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .rbp, .disp = -off }),
|
||||
);
|
||||
},
|
||||
.stack_offset => |off| try self.asmRegisterMemory(
|
||||
mir_tag,
|
||||
registerAlias(dst_reg, abi_size),
|
||||
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .rbp, .disp = -off }),
|
||||
),
|
||||
}
|
||||
},
|
||||
.memory, .linker_load, .stack_offset => {
|
||||
@ -5654,7 +5602,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
|
||||
|
||||
const rhs_mcv = try self.resolveInst(bin_op.rhs);
|
||||
const rhs_lock = switch (rhs_mcv) {
|
||||
.register => |reg| self.register_manager.lockRegAssumeUnused(reg),
|
||||
.register => |reg| self.register_manager.lockReg(reg),
|
||||
else => null,
|
||||
};
|
||||
defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
@ -5702,9 +5650,62 @@ fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const un_op = self.air.instructions.items(.data)[inst].un_op;
|
||||
const operand = try self.resolveInst(un_op);
|
||||
_ = operand;
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airCmpLtErrorsLen for {}", .{self.target.cpu.arch});
|
||||
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
|
||||
const addr_reg = try self.register_manager.allocReg(null, gp);
|
||||
const addr_lock = self.register_manager.lockRegAssumeUnused(addr_reg);
|
||||
defer self.register_manager.unlockReg(addr_lock);
|
||||
|
||||
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
const atom_index = try elf_file.getOrCreateAtomForLazySymbol(
|
||||
.{ .kind = .const_data, .ty = Type.anyerror },
|
||||
4, // dword alignment
|
||||
);
|
||||
const got_addr = elf_file.getAtom(atom_index).getOffsetTableAddress(elf_file);
|
||||
try self.asmRegisterMemory(.mov, addr_reg.to64(), Memory.sib(.qword, .{
|
||||
.base = .ds,
|
||||
.disp = @intCast(i32, got_addr),
|
||||
}));
|
||||
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
|
||||
const atom_index = try coff_file.getOrCreateAtomForLazySymbol(
|
||||
.{ .kind = .const_data, .ty = Type.anyerror },
|
||||
4, // dword alignment
|
||||
);
|
||||
const sym_index = coff_file.getAtom(atom_index).getSymbolIndex().?;
|
||||
try self.genSetReg(Type.usize, addr_reg, .{ .linker_load = .{
|
||||
.type = .got,
|
||||
.sym_index = sym_index,
|
||||
} });
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
const atom_index = try macho_file.getOrCreateAtomForLazySymbol(
|
||||
.{ .kind = .const_data, .ty = Type.anyerror },
|
||||
4, // dword alignment
|
||||
);
|
||||
const sym_index = macho_file.getAtom(atom_index).getSymbolIndex().?;
|
||||
try self.genSetReg(Type.usize, addr_reg, .{ .linker_load = .{
|
||||
.type = .got,
|
||||
.sym_index = sym_index,
|
||||
} });
|
||||
} else {
|
||||
return self.fail("TODO implement airErrorName for x86_64 {s}", .{@tagName(self.bin_file.tag)});
|
||||
}
|
||||
|
||||
try self.spillEflagsIfOccupied();
|
||||
self.eflags_inst = inst;
|
||||
|
||||
const op_ty = self.air.typeOf(un_op);
|
||||
const op_abi_size = @intCast(u32, op_ty.abiSize(self.target.*));
|
||||
const op_mcv = try self.resolveInst(un_op);
|
||||
const dst_reg = switch (op_mcv) {
|
||||
.register => |reg| reg,
|
||||
else => try self.copyToTmpRegister(op_ty, op_mcv),
|
||||
};
|
||||
try self.asmRegisterMemory(
|
||||
.cmp,
|
||||
registerAlias(dst_reg, op_abi_size),
|
||||
Memory.sib(Memory.PtrSize.fromSize(op_abi_size), .{ .base = addr_reg }),
|
||||
);
|
||||
break :result .{ .eflags = .b };
|
||||
};
|
||||
return self.finishAir(inst, result, .{ un_op, .none, .none });
|
||||
}
|
||||
|
||||
@ -6184,7 +6185,28 @@ fn airLoop(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const loop = self.air.extraData(Air.Block, ty_pl.payload);
|
||||
const body = self.air.extra[loop.end..][0..loop.data.body_len];
|
||||
const jmp_target = @intCast(u32, self.mir_instructions.len);
|
||||
try self.genBody(body);
|
||||
|
||||
{
|
||||
try self.branch_stack.append(.{});
|
||||
errdefer _ = self.branch_stack.pop();
|
||||
|
||||
try self.genBody(body);
|
||||
}
|
||||
|
||||
var branch = self.branch_stack.pop();
|
||||
defer branch.deinit(self.gpa);
|
||||
|
||||
log.debug("airLoop: %{d}", .{inst});
|
||||
log.debug("Upper branches:", .{});
|
||||
for (self.branch_stack.items) |bs| {
|
||||
log.debug("{}", .{bs.fmtDebug()});
|
||||
}
|
||||
log.debug("Loop branch: {}", .{branch.fmtDebug()});
|
||||
|
||||
var dummy_branch = Branch{};
|
||||
defer dummy_branch.deinit(self.gpa);
|
||||
try self.canonicaliseBranches(true, &dummy_branch, &branch, true, false);
|
||||
|
||||
_ = try self.asmJmpReloc(jmp_target);
|
||||
return self.finishAirBookkeeping();
|
||||
}
|
||||
@ -6570,7 +6592,8 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
|
||||
return self.fail("unrecognized constraint: '{s}'", .{constraint});
|
||||
args.putAssumeCapacity(name, mcv);
|
||||
switch (mcv) {
|
||||
.register => |reg| _ = self.register_manager.lockRegAssumeUnused(reg),
|
||||
.register => |reg| _ = if (RegisterManager.indexOfRegIntoTracked(reg)) |_|
|
||||
self.register_manager.lockRegAssumeUnused(reg),
|
||||
else => {},
|
||||
}
|
||||
if (output == .none) result = mcv;
|
||||
@ -6609,70 +6632,139 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
|
||||
const asm_source = mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len];
|
||||
var line_it = mem.tokenize(u8, asm_source, "\n\r");
|
||||
var line_it = mem.tokenize(u8, asm_source, "\n\r;");
|
||||
while (line_it.next()) |line| {
|
||||
var mnem_it = mem.tokenize(u8, line, " \t");
|
||||
const mnem = mnem_it.next() orelse continue;
|
||||
if (mem.startsWith(u8, mnem, "#")) continue;
|
||||
var arg_it = mem.tokenize(u8, mnem_it.rest(), ", ");
|
||||
if (std.ascii.eqlIgnoreCase(mnem, "syscall")) {
|
||||
if (arg_it.next()) |trailing| if (!mem.startsWith(u8, trailing, "#"))
|
||||
return self.fail("Too many operands: '{s}'", .{line});
|
||||
try self.asmOpOnly(.syscall);
|
||||
} else if (std.ascii.eqlIgnoreCase(mnem, "push")) {
|
||||
const src = arg_it.next() orelse
|
||||
return self.fail("Not enough operands: '{s}'", .{line});
|
||||
if (arg_it.next()) |trailing| if (!mem.startsWith(u8, trailing, "#"))
|
||||
return self.fail("Too many operands: '{s}'", .{line});
|
||||
if (mem.startsWith(u8, src, "$")) {
|
||||
const imm = std.fmt.parseInt(u32, src["$".len..], 0) catch
|
||||
return self.fail("Invalid immediate: '{s}'", .{src});
|
||||
try self.asmImmediate(.push, Immediate.u(imm));
|
||||
} else if (mem.startsWith(u8, src, "%%")) {
|
||||
const reg = parseRegName(src["%%".len..]) orelse
|
||||
return self.fail("Invalid register: '{s}'", .{src});
|
||||
try self.asmRegister(.push, reg);
|
||||
} else return self.fail("Unsupported operand: '{s}'", .{src});
|
||||
} else if (std.ascii.eqlIgnoreCase(mnem, "pop")) {
|
||||
const dst = arg_it.next() orelse
|
||||
return self.fail("Not enough operands: '{s}'", .{line});
|
||||
if (arg_it.next()) |trailing| if (!mem.startsWith(u8, trailing, "#"))
|
||||
return self.fail("Too many operands: '{s}'", .{line});
|
||||
if (mem.startsWith(u8, dst, "%%")) {
|
||||
const reg = parseRegName(dst["%%".len..]) orelse
|
||||
return self.fail("Invalid register: '{s}'", .{dst});
|
||||
try self.asmRegister(.pop, reg);
|
||||
} else return self.fail("Unsupported operand: '{s}'", .{dst});
|
||||
} else if (std.ascii.eqlIgnoreCase(mnem, "movq")) {
|
||||
const src = arg_it.next() orelse
|
||||
return self.fail("Not enough operands: '{s}'", .{line});
|
||||
const dst = arg_it.next() orelse
|
||||
return self.fail("Not enough operands: '{s}'", .{line});
|
||||
if (arg_it.next()) |trailing| if (!mem.startsWith(u8, trailing, "#"))
|
||||
return self.fail("Too many operands: '{s}'", .{line});
|
||||
if (mem.startsWith(u8, src, "%%")) {
|
||||
const colon = mem.indexOfScalarPos(u8, src, "%%".len + 2, ':');
|
||||
const src_reg = parseRegName(src["%%".len .. colon orelse src.len]) orelse
|
||||
return self.fail("Invalid register: '{s}'", .{src});
|
||||
const mnem_str = mnem_it.next() orelse continue;
|
||||
if (mem.startsWith(u8, mnem_str, "#")) continue;
|
||||
|
||||
const mnem_size: ?Memory.PtrSize = if (mem.endsWith(u8, mnem_str, "b"))
|
||||
.byte
|
||||
else if (mem.endsWith(u8, mnem_str, "w"))
|
||||
.word
|
||||
else if (mem.endsWith(u8, mnem_str, "l"))
|
||||
.dword
|
||||
else if (mem.endsWith(u8, mnem_str, "q"))
|
||||
.qword
|
||||
else
|
||||
null;
|
||||
const mnem = std.meta.stringToEnum(Mir.Inst.Tag, mnem_str) orelse
|
||||
(if (mnem_size) |_|
|
||||
std.meta.stringToEnum(Mir.Inst.Tag, mnem_str[0 .. mnem_str.len - 1])
|
||||
else
|
||||
null) orelse return self.fail("Invalid mnemonic: '{s}'", .{mnem_str});
|
||||
|
||||
var op_it = mem.tokenize(u8, mnem_it.rest(), ",");
|
||||
var ops = [1]encoder.Instruction.Operand{.none} ** 4;
|
||||
for (&ops) |*op| {
|
||||
const op_str = mem.trim(u8, op_it.next() orelse break, " \t");
|
||||
if (mem.startsWith(u8, op_str, "#")) break;
|
||||
if (mem.startsWith(u8, op_str, "%%")) {
|
||||
const colon = mem.indexOfScalarPos(u8, op_str, "%%".len + 2, ':');
|
||||
const reg = parseRegName(op_str["%%".len .. colon orelse op_str.len]) orelse
|
||||
return self.fail("Invalid register: '{s}'", .{op_str});
|
||||
if (colon) |colon_pos| {
|
||||
const src_disp = std.fmt.parseInt(i32, src[colon_pos + 1 ..], 0) catch
|
||||
return self.fail("Invalid immediate: '{s}'", .{src});
|
||||
if (mem.startsWith(u8, dst, "%[") and mem.endsWith(u8, dst, "]")) {
|
||||
switch (args.get(dst["%[".len .. dst.len - "]".len]) orelse
|
||||
return self.fail("no matching constraint for: '{s}'", .{dst})) {
|
||||
.register => |dst_reg| try self.asmRegisterMemory(
|
||||
.mov,
|
||||
dst_reg,
|
||||
Memory.sib(.qword, .{ .base = src_reg, .disp = src_disp }),
|
||||
),
|
||||
else => return self.fail("Invalid constraint: '{s}'", .{dst}),
|
||||
}
|
||||
} else return self.fail("Unsupported operand: '{s}'", .{dst});
|
||||
} else return self.fail("Unsupported operand: '{s}'", .{src});
|
||||
}
|
||||
} else {
|
||||
return self.fail("Unsupported instruction: '{s}'", .{mnem});
|
||||
}
|
||||
const disp = std.fmt.parseInt(i32, op_str[colon_pos + 1 ..], 0) catch
|
||||
return self.fail("Invalid displacement: '{s}'", .{op_str});
|
||||
op.* = .{ .mem = Memory.sib(
|
||||
mnem_size orelse return self.fail("Unknown size: '{s}'", .{op_str}),
|
||||
.{ .base = reg, .disp = disp },
|
||||
) };
|
||||
} else {
|
||||
if (mnem_size) |size| if (reg.bitSize() != size.bitSize())
|
||||
return self.fail("Invalid register size: '{s}'", .{op_str});
|
||||
op.* = .{ .reg = reg };
|
||||
}
|
||||
} else if (mem.startsWith(u8, op_str, "%[") and mem.endsWith(u8, op_str, "]")) {
|
||||
switch (args.get(op_str["%[".len .. op_str.len - "]".len]) orelse
|
||||
return self.fail("No matching constraint: '{s}'", .{op_str})) {
|
||||
.register => |reg| op.* = .{ .reg = reg },
|
||||
else => return self.fail("Invalid constraint: '{s}'", .{op_str}),
|
||||
}
|
||||
} else if (mem.startsWith(u8, op_str, "$")) {
|
||||
if (std.fmt.parseInt(i32, op_str["$".len..], 0)) |s| {
|
||||
if (mnem_size) |size| {
|
||||
const max = @as(u64, std.math.maxInt(u64)) >>
|
||||
@intCast(u6, 64 - (size.bitSize() - 1));
|
||||
if ((if (s < 0) ~s else s) > max)
|
||||
return self.fail("Invalid immediate size: '{s}'", .{op_str});
|
||||
}
|
||||
op.* = .{ .imm = Immediate.s(s) };
|
||||
} else |_| if (std.fmt.parseInt(u64, op_str["$".len..], 0)) |u| {
|
||||
if (mnem_size) |size| {
|
||||
const max = @as(u64, std.math.maxInt(u64)) >>
|
||||
@intCast(u6, 64 - size.bitSize());
|
||||
if (u > max)
|
||||
return self.fail("Invalid immediate size: '{s}'", .{op_str});
|
||||
}
|
||||
op.* = .{ .imm = Immediate.u(u) };
|
||||
} else |_| return self.fail("Invalid immediate: '{s}'", .{op_str});
|
||||
} else return self.fail("Invalid operand: '{s}'", .{op_str});
|
||||
} else if (op_it.next()) |op_str| return self.fail("Extra operand: '{s}'", .{op_str});
|
||||
|
||||
(switch (ops[0]) {
|
||||
.none => self.asmOpOnly(mnem),
|
||||
.reg => |reg0| switch (ops[1]) {
|
||||
.none => self.asmRegister(mnem, reg0),
|
||||
.reg => |reg1| switch (ops[2]) {
|
||||
.none => self.asmRegisterRegister(mnem, reg1, reg0),
|
||||
.reg => |reg2| switch (ops[3]) {
|
||||
.none => self.asmRegisterRegisterRegister(mnem, reg2, reg1, reg0),
|
||||
else => error.InvalidInstruction,
|
||||
},
|
||||
.mem => |mem2| switch (ops[3]) {
|
||||
.none => self.asmMemoryRegisterRegister(mnem, mem2, reg1, reg0),
|
||||
else => error.InvalidInstruction,
|
||||
},
|
||||
else => error.InvalidInstruction,
|
||||
},
|
||||
.mem => |mem1| switch (ops[2]) {
|
||||
.none => self.asmMemoryRegister(mnem, mem1, reg0),
|
||||
else => error.InvalidInstruction,
|
||||
},
|
||||
else => error.InvalidInstruction,
|
||||
},
|
||||
.mem => |mem0| switch (ops[1]) {
|
||||
.none => self.asmMemory(mnem, mem0),
|
||||
.reg => |reg1| switch (ops[2]) {
|
||||
.none => self.asmRegisterMemory(mnem, reg1, mem0),
|
||||
else => error.InvalidInstruction,
|
||||
},
|
||||
else => error.InvalidInstruction,
|
||||
},
|
||||
.imm => |imm0| switch (ops[1]) {
|
||||
.none => self.asmImmediate(mnem, imm0),
|
||||
.reg => |reg1| switch (ops[2]) {
|
||||
.none => self.asmRegisterImmediate(mnem, reg1, imm0),
|
||||
.reg => |reg2| switch (ops[3]) {
|
||||
.none => self.asmRegisterRegisterImmediate(mnem, reg2, reg1, imm0),
|
||||
else => error.InvalidInstruction,
|
||||
},
|
||||
.mem => |mem2| switch (ops[3]) {
|
||||
.none => self.asmMemoryRegisterImmediate(mnem, mem2, reg1, imm0),
|
||||
else => error.InvalidInstruction,
|
||||
},
|
||||
else => error.InvalidInstruction,
|
||||
},
|
||||
.mem => |mem1| switch (ops[2]) {
|
||||
.none => self.asmMemoryImmediate(mnem, mem1, imm0),
|
||||
else => error.InvalidInstruction,
|
||||
},
|
||||
else => error.InvalidInstruction,
|
||||
},
|
||||
}) catch |err| switch (err) {
|
||||
error.InvalidInstruction => return self.fail(
|
||||
"Invalid instruction: '{s} {s} {s} {s} {s}'",
|
||||
.{
|
||||
@tagName(mnem),
|
||||
@tagName(ops[0]),
|
||||
@tagName(ops[1]),
|
||||
@tagName(ops[2]),
|
||||
@tagName(ops[3]),
|
||||
},
|
||||
),
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
@ -7988,12 +8080,12 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
|
||||
try self.asmRegisterMemory(.mov, start_reg.to32(), Memory.sib(.dword, .{
|
||||
.base = addr_reg.to64(),
|
||||
.scale_index = .{ .scale = 4, .index = err_reg.to64() },
|
||||
.disp = 0,
|
||||
.disp = 4,
|
||||
}));
|
||||
try self.asmRegisterMemory(.mov, end_reg.to32(), Memory.sib(.dword, .{
|
||||
.base = addr_reg.to64(),
|
||||
.scale_index = .{ .scale = 4, .index = err_reg.to64() },
|
||||
.disp = 4,
|
||||
.disp = 8,
|
||||
}));
|
||||
try self.asmRegisterRegister(.sub, end_reg.to32(), start_reg.to32());
|
||||
try self.asmRegisterMemory(.lea, start_reg.to64(), Memory.sib(.byte, .{
|
||||
|
||||
@ -124,13 +124,17 @@ pub fn generateLazySymbol(
|
||||
|
||||
if (lazy_sym.kind == .const_data and lazy_sym.ty.isAnyError()) {
|
||||
const err_names = mod.error_name_list.items;
|
||||
try code.resize(err_names.len * 4);
|
||||
for (err_names, 0..) |err_name, index| {
|
||||
mem.writeInt(u32, code.items[index * 4 ..][0..4], @intCast(u32, code.items.len), endian);
|
||||
mem.writeInt(u32, try code.addManyAsArray(4), @intCast(u32, err_names.len), endian);
|
||||
var offset = code.items.len;
|
||||
try code.resize((1 + err_names.len + 1) * 4);
|
||||
for (err_names) |err_name| {
|
||||
mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian);
|
||||
offset += 4;
|
||||
try code.ensureUnusedCapacity(err_name.len + 1);
|
||||
code.appendSliceAssumeCapacity(err_name);
|
||||
code.appendAssumeCapacity(0);
|
||||
}
|
||||
mem.writeInt(u32, code.items[offset..][0..4], @intCast(u32, code.items.len), endian);
|
||||
return Result.ok;
|
||||
} else return .{ .fail = try ErrorMsg.create(
|
||||
bin_file.allocator,
|
||||
|
||||
26
src/link.zig
26
src/link.zig
@ -1106,23 +1106,21 @@ pub const File = struct {
|
||||
};
|
||||
|
||||
pub const LazySymbol = struct {
|
||||
kind: enum { code, const_data },
|
||||
pub const Kind = enum { code, const_data };
|
||||
|
||||
kind: Kind,
|
||||
ty: Type,
|
||||
|
||||
pub const Context = struct {
|
||||
mod: *Module,
|
||||
pub fn initDecl(kind: Kind, decl: Module.Decl.OptionalIndex, mod: *Module) LazySymbol {
|
||||
return .{ .kind = kind, .ty = if (decl.unwrap()) |decl_index|
|
||||
mod.declPtr(decl_index).val.castTag(.ty).?.data
|
||||
else
|
||||
Type.anyerror };
|
||||
}
|
||||
|
||||
pub fn hash(ctx: @This(), sym: LazySymbol) u32 {
|
||||
var hasher = std.hash.Wyhash.init(0);
|
||||
std.hash.autoHash(&hasher, sym.kind);
|
||||
sym.ty.hashWithHasher(&hasher, ctx.mod);
|
||||
return @truncate(u32, hasher.final());
|
||||
}
|
||||
|
||||
pub fn eql(ctx: @This(), lhs: LazySymbol, rhs: LazySymbol, _: usize) bool {
|
||||
return lhs.kind == rhs.kind and lhs.ty.eql(rhs.ty, ctx.mod);
|
||||
}
|
||||
};
|
||||
pub fn getDecl(self: LazySymbol) Module.Decl.OptionalIndex {
|
||||
return Module.Decl.OptionalIndex.init(self.ty.getOwnerDeclOrNull());
|
||||
}
|
||||
};
|
||||
|
||||
pub const C = @import("link/C.zig");
|
||||
|
||||
@ -145,16 +145,11 @@ const Section = struct {
|
||||
free_list: std.ArrayListUnmanaged(Atom.Index) = .{},
|
||||
};
|
||||
|
||||
const LazySymbolTable = std.ArrayHashMapUnmanaged(
|
||||
link.File.LazySymbol,
|
||||
LazySymbolMetadata,
|
||||
link.File.LazySymbol.Context,
|
||||
true,
|
||||
);
|
||||
const LazySymbolTable = std.AutoArrayHashMapUnmanaged(Module.Decl.OptionalIndex, LazySymbolMetadata);
|
||||
|
||||
const LazySymbolMetadata = struct {
|
||||
atom: Atom.Index,
|
||||
section: u16,
|
||||
text_atom: ?Atom.Index = null,
|
||||
rdata_atom: ?Atom.Index = null,
|
||||
alignment: u32,
|
||||
};
|
||||
|
||||
@ -1176,10 +1171,28 @@ pub fn updateDecl(self: *Coff, module: *Module, decl_index: Module.Decl.Index) !
|
||||
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
|
||||
}
|
||||
|
||||
fn updateLazySymbol(
|
||||
fn updateLazySymbol(self: *Coff, decl: Module.Decl.OptionalIndex, metadata: LazySymbolMetadata) !void {
|
||||
const mod = self.base.options.module.?;
|
||||
if (metadata.text_atom) |atom| try self.updateLazySymbolAtom(
|
||||
link.File.LazySymbol.initDecl(.code, decl, mod),
|
||||
atom,
|
||||
self.text_section_index.?,
|
||||
metadata.alignment,
|
||||
);
|
||||
if (metadata.rdata_atom) |atom| try self.updateLazySymbolAtom(
|
||||
link.File.LazySymbol.initDecl(.const_data, decl, mod),
|
||||
atom,
|
||||
self.rdata_section_index.?,
|
||||
metadata.alignment,
|
||||
);
|
||||
}
|
||||
|
||||
fn updateLazySymbolAtom(
|
||||
self: *Coff,
|
||||
lazy_sym: link.File.LazySymbol,
|
||||
lazy_metadata: LazySymbolMetadata,
|
||||
sym: link.File.LazySymbol,
|
||||
atom_index: Atom.Index,
|
||||
section_index: u16,
|
||||
required_alignment: u32,
|
||||
) !void {
|
||||
const gpa = self.base.allocator;
|
||||
const mod = self.base.options.module.?;
|
||||
@ -1188,16 +1201,15 @@ fn updateLazySymbol(
|
||||
defer code_buffer.deinit();
|
||||
|
||||
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{
|
||||
@tagName(lazy_sym.kind),
|
||||
lazy_sym.ty.fmt(mod),
|
||||
@tagName(sym.kind),
|
||||
sym.ty.fmt(mod),
|
||||
});
|
||||
defer gpa.free(name);
|
||||
|
||||
const atom_index = lazy_metadata.atom;
|
||||
const atom = self.getAtomPtr(atom_index);
|
||||
const local_sym_index = atom.getSymbolIndex().?;
|
||||
|
||||
const src = if (lazy_sym.ty.getOwnerDeclOrNull()) |owner_decl|
|
||||
const src = if (sym.ty.getOwnerDeclOrNull()) |owner_decl|
|
||||
mod.declPtr(owner_decl).srcLoc()
|
||||
else
|
||||
Module.SrcLoc{
|
||||
@ -1205,14 +1217,9 @@ fn updateLazySymbol(
|
||||
.parent_decl_node = undefined,
|
||||
.lazy = .unneeded,
|
||||
};
|
||||
const res = try codegen.generateLazySymbol(
|
||||
&self.base,
|
||||
src,
|
||||
lazy_sym,
|
||||
&code_buffer,
|
||||
.none,
|
||||
.{ .parent_atom_index = local_sym_index },
|
||||
);
|
||||
const res = try codegen.generateLazySymbol(&self.base, src, sym, &code_buffer, .none, .{
|
||||
.parent_atom_index = local_sym_index,
|
||||
});
|
||||
const code = switch (res) {
|
||||
.ok => code_buffer.items,
|
||||
.fail => |em| {
|
||||
@ -1221,11 +1228,10 @@ fn updateLazySymbol(
|
||||
},
|
||||
};
|
||||
|
||||
const required_alignment = lazy_metadata.alignment;
|
||||
const code_len = @intCast(u32, code.len);
|
||||
const symbol = atom.getSymbolPtr(self);
|
||||
try self.setSymbolName(symbol, name);
|
||||
symbol.section_number = @intToEnum(coff.SectionNumber, lazy_metadata.section + 1);
|
||||
symbol.section_number = @intToEnum(coff.SectionNumber, section_index + 1);
|
||||
symbol.type = .{ .complex_type = .NULL, .base_type = .NULL };
|
||||
|
||||
const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
|
||||
@ -1250,24 +1256,18 @@ fn updateLazySymbol(
|
||||
|
||||
pub fn getOrCreateAtomForLazySymbol(
|
||||
self: *Coff,
|
||||
lazy_sym: link.File.LazySymbol,
|
||||
sym: link.File.LazySymbol,
|
||||
alignment: u32,
|
||||
) !Atom.Index {
|
||||
const gop = try self.lazy_syms.getOrPutContext(self.base.allocator, lazy_sym, .{
|
||||
.mod = self.base.options.module.?,
|
||||
});
|
||||
const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl());
|
||||
errdefer _ = self.lazy_syms.pop();
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{
|
||||
.atom = try self.createAtom(),
|
||||
.section = switch (lazy_sym.kind) {
|
||||
.code => self.text_section_index.?,
|
||||
.const_data => self.rdata_section_index.?,
|
||||
},
|
||||
.alignment = alignment,
|
||||
};
|
||||
}
|
||||
return gop.value_ptr.atom;
|
||||
if (!gop.found_existing) gop.value_ptr.* = .{ .alignment = alignment };
|
||||
const atom = switch (sym.kind) {
|
||||
.code => &gop.value_ptr.text_atom,
|
||||
.const_data => &gop.value_ptr.rdata_atom,
|
||||
};
|
||||
if (atom.* == null) atom.* = try self.createAtom();
|
||||
return atom.*.?;
|
||||
}
|
||||
|
||||
pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: Module.Decl.Index) !Atom.Index {
|
||||
@ -1600,17 +1600,13 @@ pub fn flushModule(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Nod
|
||||
sub_prog_node.activate();
|
||||
defer sub_prog_node.end();
|
||||
|
||||
{
|
||||
var lazy_it = self.lazy_syms.iterator();
|
||||
while (lazy_it.next()) |lazy_entry| {
|
||||
self.updateLazySymbol(
|
||||
lazy_entry.key_ptr.*,
|
||||
lazy_entry.value_ptr.*,
|
||||
) catch |err| switch (err) {
|
||||
error.CodegenFail => return error.FlushFailure,
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
// Most lazy symbols can be updated when the corresponding decl is,
|
||||
// so we only have to worry about the one without an associated decl.
|
||||
if (self.lazy_syms.get(.none)) |metadata| {
|
||||
self.updateLazySymbol(.none, metadata) catch |err| switch (err) {
|
||||
error.CodegenFail => return error.FlushFailure,
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
const gpa = self.base.allocator;
|
||||
@ -2489,6 +2485,7 @@ const Module = @import("../Module.zig");
|
||||
const Object = @import("Coff/Object.zig");
|
||||
const Relocation = @import("Coff/Relocation.zig");
|
||||
const StringTable = @import("strtab.zig").StringTable;
|
||||
const Type = @import("../type.zig").Type;
|
||||
const TypedValue = @import("../TypedValue.zig");
|
||||
|
||||
pub const base_tag: link.File.Tag = .coff;
|
||||
|
||||
201
src/link/Elf.zig
201
src/link/Elf.zig
@ -64,8 +64,8 @@ const Section = struct {
|
||||
};
|
||||
|
||||
const LazySymbolMetadata = struct {
|
||||
atom: Atom.Index,
|
||||
shdr: u16,
|
||||
text_atom: ?Atom.Index = null,
|
||||
rodata_atom: ?Atom.Index = null,
|
||||
alignment: u32,
|
||||
};
|
||||
|
||||
@ -106,7 +106,12 @@ shdr_table_offset: ?u64 = null,
|
||||
/// Stored in native-endian format, depending on target endianness needs to be bswapped on read/write.
|
||||
/// Same order as in the file.
|
||||
program_headers: std.ArrayListUnmanaged(elf.Elf64_Phdr) = .{},
|
||||
phdr_table_offset: ?u64 = null,
|
||||
/// The index into the program headers of the PT_PHDR program header
|
||||
phdr_table_index: ?u16 = null,
|
||||
/// The index into the program headers of the PT_LOAD program header containing the phdr
|
||||
/// Most linkers would merge this with phdr_load_ro_index,
|
||||
/// but incremental linking means we can't ensure they are consecutive.
|
||||
phdr_table_load_index: ?u16 = null,
|
||||
/// The index into the program headers of a PT_LOAD program header with Read and Execute flags
|
||||
phdr_load_re_index: ?u16 = null,
|
||||
/// The index into the program headers of the global offset table.
|
||||
@ -203,7 +208,7 @@ relocs: RelocTable = .{},
|
||||
|
||||
const RelocTable = std.AutoHashMapUnmanaged(Atom.Index, std.ArrayListUnmanaged(Atom.Reloc));
|
||||
const UnnamedConstTable = std.AutoHashMapUnmanaged(Module.Decl.Index, std.ArrayListUnmanaged(Atom.Index));
|
||||
const LazySymbolTable = std.ArrayHashMapUnmanaged(File.LazySymbol, LazySymbolMetadata, File.LazySymbol.Context, true);
|
||||
const LazySymbolTable = std.AutoArrayHashMapUnmanaged(Module.Decl.OptionalIndex, LazySymbolMetadata);
|
||||
|
||||
/// When allocating, the ideal_capacity is calculated by
|
||||
/// actual_capacity + (actual_capacity / ideal_factor)
|
||||
@ -396,16 +401,6 @@ fn detectAllocCollision(self: *Elf, start: u64, size: u64) ?u64 {
|
||||
}
|
||||
}
|
||||
|
||||
if (self.phdr_table_offset) |off| {
|
||||
const phdr_size: u64 = if (small_ptr) @sizeOf(elf.Elf32_Phdr) else @sizeOf(elf.Elf64_Phdr);
|
||||
const tight_size = self.sections.slice().len * phdr_size;
|
||||
const increased_size = padToIdeal(tight_size);
|
||||
const test_end = off + increased_size;
|
||||
if (end > off and start < test_end) {
|
||||
return test_end;
|
||||
}
|
||||
}
|
||||
|
||||
for (self.sections.items(.shdr)) |section| {
|
||||
const increased_size = padToIdeal(section.sh_size);
|
||||
const test_end = section.sh_offset + increased_size;
|
||||
@ -430,9 +425,6 @@ pub fn allocatedSize(self: *Elf, start: u64) u64 {
|
||||
if (self.shdr_table_offset) |off| {
|
||||
if (off > start and off < min_pos) min_pos = off;
|
||||
}
|
||||
if (self.phdr_table_offset) |off| {
|
||||
if (off > start and off < min_pos) min_pos = off;
|
||||
}
|
||||
for (self.sections.items(.shdr)) |section| {
|
||||
if (section.sh_offset <= start) continue;
|
||||
if (section.sh_offset < min_pos) min_pos = section.sh_offset;
|
||||
@ -462,6 +454,43 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
||||
};
|
||||
const ptr_size: u8 = self.ptrWidthBytes();
|
||||
|
||||
if (self.phdr_table_index == null) {
|
||||
self.phdr_table_index = @intCast(u16, self.program_headers.items.len);
|
||||
const p_align: u16 = switch (self.ptr_width) {
|
||||
.p32 => @alignOf(elf.Elf32_Phdr),
|
||||
.p64 => @alignOf(elf.Elf64_Phdr),
|
||||
};
|
||||
try self.program_headers.append(gpa, .{
|
||||
.p_type = elf.PT_PHDR,
|
||||
.p_offset = 0,
|
||||
.p_filesz = 0,
|
||||
.p_vaddr = 0,
|
||||
.p_paddr = 0,
|
||||
.p_memsz = 0,
|
||||
.p_align = p_align,
|
||||
.p_flags = elf.PF_R,
|
||||
});
|
||||
self.phdr_table_dirty = true;
|
||||
}
|
||||
|
||||
if (self.phdr_table_load_index == null) {
|
||||
self.phdr_table_load_index = @intCast(u16, self.program_headers.items.len);
|
||||
// TODO Same as for GOT
|
||||
const phdr_addr: u64 = if (self.base.options.target.cpu.arch.ptrBitWidth() >= 32) 0x1000000 else 0x1000;
|
||||
const p_align = self.page_size;
|
||||
try self.program_headers.append(gpa, .{
|
||||
.p_type = elf.PT_LOAD,
|
||||
.p_offset = 0,
|
||||
.p_filesz = 0,
|
||||
.p_vaddr = phdr_addr,
|
||||
.p_paddr = phdr_addr,
|
||||
.p_memsz = 0,
|
||||
.p_align = p_align,
|
||||
.p_flags = elf.PF_R,
|
||||
});
|
||||
self.phdr_table_dirty = true;
|
||||
}
|
||||
|
||||
if (self.phdr_load_re_index == null) {
|
||||
self.phdr_load_re_index = @intCast(u16, self.program_headers.items.len);
|
||||
const file_size = self.base.options.program_code_size_hint;
|
||||
@ -849,19 +878,6 @@ pub fn populateMissingMetadata(self: *Elf) !void {
|
||||
self.shdr_table_dirty = true;
|
||||
}
|
||||
|
||||
const phsize: u64 = switch (self.ptr_width) {
|
||||
.p32 => @sizeOf(elf.Elf32_Phdr),
|
||||
.p64 => @sizeOf(elf.Elf64_Phdr),
|
||||
};
|
||||
const phalign: u16 = switch (self.ptr_width) {
|
||||
.p32 => @alignOf(elf.Elf32_Phdr),
|
||||
.p64 => @alignOf(elf.Elf64_Phdr),
|
||||
};
|
||||
if (self.phdr_table_offset == null) {
|
||||
self.phdr_table_offset = self.findFreeSpace(self.program_headers.items.len * phsize, phalign);
|
||||
self.phdr_table_dirty = true;
|
||||
}
|
||||
|
||||
{
|
||||
// Iterate over symbols, populating free_list and last_text_block.
|
||||
if (self.local_symbols.items.len != 1) {
|
||||
@ -1021,17 +1037,13 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
|
||||
sub_prog_node.activate();
|
||||
defer sub_prog_node.end();
|
||||
|
||||
{
|
||||
var lazy_it = self.lazy_syms.iterator();
|
||||
while (lazy_it.next()) |lazy_entry| {
|
||||
self.updateLazySymbol(
|
||||
lazy_entry.key_ptr.*,
|
||||
lazy_entry.value_ptr.*,
|
||||
) catch |err| switch (err) {
|
||||
error.CodegenFail => return error.FlushFailure,
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
// Most lazy symbols can be updated when the corresponding decl is,
|
||||
// so we only have to worry about the one without an associated decl.
|
||||
if (self.lazy_syms.get(.none)) |metadata| {
|
||||
self.updateLazySymbol(.none, metadata) catch |err| switch (err) {
|
||||
error.CodegenFail => return error.FlushFailure,
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
// TODO This linker code currently assumes there is only 1 compilation unit and it
|
||||
@ -1132,18 +1144,29 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
|
||||
.p32 => @sizeOf(elf.Elf32_Phdr),
|
||||
.p64 => @sizeOf(elf.Elf64_Phdr),
|
||||
};
|
||||
const phalign: u16 = switch (self.ptr_width) {
|
||||
.p32 => @alignOf(elf.Elf32_Phdr),
|
||||
.p64 => @alignOf(elf.Elf64_Phdr),
|
||||
};
|
||||
const allocated_size = self.allocatedSize(self.phdr_table_offset.?);
|
||||
|
||||
const phdr_table_index = self.phdr_table_index.?;
|
||||
const phdr_table = &self.program_headers.items[phdr_table_index];
|
||||
const phdr_table_load = &self.program_headers.items[self.phdr_table_load_index.?];
|
||||
|
||||
const allocated_size = self.allocatedSize(phdr_table.p_offset);
|
||||
const needed_size = self.program_headers.items.len * phsize;
|
||||
|
||||
if (needed_size > allocated_size) {
|
||||
self.phdr_table_offset = null; // free the space
|
||||
self.phdr_table_offset = self.findFreeSpace(needed_size, phalign);
|
||||
phdr_table.p_offset = 0; // free the space
|
||||
phdr_table.p_offset = self.findFreeSpace(needed_size, @intCast(u32, phdr_table.p_align));
|
||||
}
|
||||
|
||||
phdr_table_load.p_offset = mem.alignBackwardGeneric(u64, phdr_table.p_offset, phdr_table_load.p_align);
|
||||
const load_align_offset = phdr_table.p_offset - phdr_table_load.p_offset;
|
||||
phdr_table_load.p_filesz = load_align_offset + needed_size;
|
||||
phdr_table_load.p_memsz = load_align_offset + needed_size;
|
||||
|
||||
phdr_table.p_filesz = needed_size;
|
||||
phdr_table.p_vaddr = phdr_table_load.p_vaddr + load_align_offset;
|
||||
phdr_table.p_paddr = phdr_table_load.p_paddr + load_align_offset;
|
||||
phdr_table.p_memsz = needed_size;
|
||||
|
||||
switch (self.ptr_width) {
|
||||
.p32 => {
|
||||
const buf = try gpa.alloc(elf.Elf32_Phdr, self.program_headers.items.len);
|
||||
@ -1155,7 +1178,7 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
|
||||
mem.byteSwapAllFields(elf.Elf32_Phdr, phdr);
|
||||
}
|
||||
}
|
||||
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
|
||||
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset);
|
||||
},
|
||||
.p64 => {
|
||||
const buf = try gpa.alloc(elf.Elf64_Phdr, self.program_headers.items.len);
|
||||
@ -1167,9 +1190,14 @@ pub fn flushModule(self: *Elf, comp: *Compilation, prog_node: *std.Progress.Node
|
||||
mem.byteSwapAllFields(elf.Elf64_Phdr, phdr);
|
||||
}
|
||||
}
|
||||
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), self.phdr_table_offset.?);
|
||||
try self.base.file.?.pwriteAll(mem.sliceAsBytes(buf), phdr_table.p_offset);
|
||||
},
|
||||
}
|
||||
|
||||
// We don't actually care if the phdr load section overlaps, only the phdr section matters.
|
||||
phdr_table_load.p_offset = 0;
|
||||
phdr_table_load.p_filesz = 0;
|
||||
|
||||
self.phdr_table_dirty = false;
|
||||
}
|
||||
|
||||
@ -1992,13 +2020,14 @@ fn writeElfHeader(self: *Elf) !void {
|
||||
|
||||
const e_entry = if (elf_type == .REL) 0 else self.entry_addr.?;
|
||||
|
||||
const phdr_table_offset = self.program_headers.items[self.phdr_table_index.?].p_offset;
|
||||
switch (self.ptr_width) {
|
||||
.p32 => {
|
||||
mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, e_entry), endian);
|
||||
index += 4;
|
||||
|
||||
// e_phoff
|
||||
mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, self.phdr_table_offset.?), endian);
|
||||
mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, phdr_table_offset), endian);
|
||||
index += 4;
|
||||
|
||||
// e_shoff
|
||||
@ -2011,7 +2040,7 @@ fn writeElfHeader(self: *Elf) !void {
|
||||
index += 8;
|
||||
|
||||
// e_phoff
|
||||
mem.writeInt(u64, hdr_buf[index..][0..8], self.phdr_table_offset.?, endian);
|
||||
mem.writeInt(u64, hdr_buf[index..][0..8], phdr_table_offset, endian);
|
||||
index += 8;
|
||||
|
||||
// e_shoff
|
||||
@ -2367,22 +2396,16 @@ pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getOrCreateAtomForLazySymbol(self: *Elf, lazy_sym: File.LazySymbol, alignment: u32) !Atom.Index {
|
||||
const gop = try self.lazy_syms.getOrPutContext(self.base.allocator, lazy_sym, .{
|
||||
.mod = self.base.options.module.?,
|
||||
});
|
||||
pub fn getOrCreateAtomForLazySymbol(self: *Elf, sym: File.LazySymbol, alignment: u32) !Atom.Index {
|
||||
const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl());
|
||||
errdefer _ = self.lazy_syms.pop();
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{
|
||||
.atom = try self.createAtom(),
|
||||
.shdr = switch (lazy_sym.kind) {
|
||||
.code => self.text_section_index.?,
|
||||
.const_data => self.rodata_section_index.?,
|
||||
},
|
||||
.alignment = alignment,
|
||||
};
|
||||
}
|
||||
return gop.value_ptr.atom;
|
||||
if (!gop.found_existing) gop.value_ptr.* = .{ .alignment = alignment };
|
||||
const atom = switch (sym.kind) {
|
||||
.code => &gop.value_ptr.text_atom,
|
||||
.const_data => &gop.value_ptr.rodata_atom,
|
||||
};
|
||||
if (atom.* == null) atom.* = try self.createAtom();
|
||||
return atom.*.?;
|
||||
}
|
||||
|
||||
pub fn getOrCreateAtomForDecl(self: *Elf, decl_index: Module.Decl.Index) !Atom.Index {
|
||||
@ -2651,7 +2674,29 @@ pub fn updateDecl(self: *Elf, module: *Module, decl_index: Module.Decl.Index) !v
|
||||
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
|
||||
}
|
||||
|
||||
fn updateLazySymbol(self: *Elf, lazy_sym: File.LazySymbol, lazy_metadata: LazySymbolMetadata) !void {
|
||||
fn updateLazySymbol(self: *Elf, decl: Module.Decl.OptionalIndex, metadata: LazySymbolMetadata) !void {
|
||||
const mod = self.base.options.module.?;
|
||||
if (metadata.text_atom) |atom| try self.updateLazySymbolAtom(
|
||||
File.LazySymbol.initDecl(.code, decl, mod),
|
||||
atom,
|
||||
self.text_section_index.?,
|
||||
metadata.alignment,
|
||||
);
|
||||
if (metadata.rodata_atom) |atom| try self.updateLazySymbolAtom(
|
||||
File.LazySymbol.initDecl(.const_data, decl, mod),
|
||||
atom,
|
||||
self.rodata_section_index.?,
|
||||
metadata.alignment,
|
||||
);
|
||||
}
|
||||
|
||||
fn updateLazySymbolAtom(
|
||||
self: *Elf,
|
||||
sym: File.LazySymbol,
|
||||
atom_index: Atom.Index,
|
||||
shdr_index: u16,
|
||||
required_alignment: u32,
|
||||
) !void {
|
||||
const gpa = self.base.allocator;
|
||||
const mod = self.base.options.module.?;
|
||||
|
||||
@ -2660,19 +2705,18 @@ fn updateLazySymbol(self: *Elf, lazy_sym: File.LazySymbol, lazy_metadata: LazySy
|
||||
|
||||
const name_str_index = blk: {
|
||||
const name = try std.fmt.allocPrint(gpa, "__lazy_{s}_{}", .{
|
||||
@tagName(lazy_sym.kind),
|
||||
lazy_sym.ty.fmt(mod),
|
||||
@tagName(sym.kind),
|
||||
sym.ty.fmt(mod),
|
||||
});
|
||||
defer gpa.free(name);
|
||||
break :blk try self.shstrtab.insert(gpa, name);
|
||||
};
|
||||
const name = self.shstrtab.get(name_str_index).?;
|
||||
|
||||
const atom_index = lazy_metadata.atom;
|
||||
const atom = self.getAtom(atom_index);
|
||||
const local_sym_index = atom.getSymbolIndex().?;
|
||||
|
||||
const src = if (lazy_sym.ty.getOwnerDeclOrNull()) |owner_decl|
|
||||
const src = if (sym.ty.getOwnerDeclOrNull()) |owner_decl|
|
||||
mod.declPtr(owner_decl).srcLoc()
|
||||
else
|
||||
Module.SrcLoc{
|
||||
@ -2680,14 +2724,9 @@ fn updateLazySymbol(self: *Elf, lazy_sym: File.LazySymbol, lazy_metadata: LazySy
|
||||
.parent_decl_node = undefined,
|
||||
.lazy = .unneeded,
|
||||
};
|
||||
const res = try codegen.generateLazySymbol(
|
||||
&self.base,
|
||||
src,
|
||||
lazy_sym,
|
||||
&code_buffer,
|
||||
.none,
|
||||
.{ .parent_atom_index = local_sym_index },
|
||||
);
|
||||
const res = try codegen.generateLazySymbol(&self.base, src, sym, &code_buffer, .none, .{
|
||||
.parent_atom_index = local_sym_index,
|
||||
});
|
||||
const code = switch (res) {
|
||||
.ok => code_buffer.items,
|
||||
.fail => |em| {
|
||||
@ -2696,7 +2735,6 @@ fn updateLazySymbol(self: *Elf, lazy_sym: File.LazySymbol, lazy_metadata: LazySy
|
||||
},
|
||||
};
|
||||
|
||||
const shdr_index = lazy_metadata.shdr;
|
||||
const phdr_index = self.sections.items(.phdr_index)[shdr_index];
|
||||
const local_sym = atom.getSymbolPtr(self);
|
||||
local_sym.* = .{
|
||||
@ -2707,7 +2745,6 @@ fn updateLazySymbol(self: *Elf, lazy_sym: File.LazySymbol, lazy_metadata: LazySy
|
||||
.st_value = 0,
|
||||
.st_size = 0,
|
||||
};
|
||||
const required_alignment = lazy_metadata.alignment;
|
||||
const vaddr = try self.allocateAtom(atom_index, code.len, required_alignment);
|
||||
errdefer self.freeAtom(atom_index);
|
||||
log.debug("allocated text block for {s} at 0x{x}", .{ name, vaddr });
|
||||
|
||||
@ -232,16 +232,11 @@ const is_hot_update_compatible = switch (builtin.target.os.tag) {
|
||||
else => false,
|
||||
};
|
||||
|
||||
const LazySymbolTable = std.ArrayHashMapUnmanaged(
|
||||
link.File.LazySymbol,
|
||||
LazySymbolMetadata,
|
||||
link.File.LazySymbol.Context,
|
||||
true,
|
||||
);
|
||||
const LazySymbolTable = std.AutoArrayHashMapUnmanaged(Module.Decl.OptionalIndex, LazySymbolMetadata);
|
||||
|
||||
const LazySymbolMetadata = struct {
|
||||
atom: Atom.Index,
|
||||
section: u8,
|
||||
text_atom: ?Atom.Index = null,
|
||||
data_const_atom: ?Atom.Index = null,
|
||||
alignment: u32,
|
||||
};
|
||||
|
||||
@ -513,17 +508,13 @@ pub fn flushModule(self: *MachO, comp: *Compilation, prog_node: *std.Progress.No
|
||||
sub_prog_node.activate();
|
||||
defer sub_prog_node.end();
|
||||
|
||||
{
|
||||
var lazy_it = self.lazy_syms.iterator();
|
||||
while (lazy_it.next()) |lazy_entry| {
|
||||
self.updateLazySymbol(
|
||||
lazy_entry.key_ptr.*,
|
||||
lazy_entry.value_ptr.*,
|
||||
) catch |err| switch (err) {
|
||||
error.CodegenFail => return error.FlushFailure,
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
// Most lazy symbols can be updated when the corresponding decl is,
|
||||
// so we only have to worry about the one without an associated decl.
|
||||
if (self.lazy_syms.get(.none)) |metadata| {
|
||||
self.updateLazySymbol(.none, metadata) catch |err| switch (err) {
|
||||
error.CodegenFail => return error.FlushFailure,
|
||||
else => |e| return e,
|
||||
};
|
||||
}
|
||||
|
||||
const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented;
|
||||
@ -2309,7 +2300,29 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
|
||||
try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
|
||||
}
|
||||
|
||||
fn updateLazySymbol(self: *MachO, lazy_sym: File.LazySymbol, lazy_metadata: LazySymbolMetadata) !void {
|
||||
fn updateLazySymbol(self: *MachO, decl: Module.Decl.OptionalIndex, metadata: LazySymbolMetadata) !void {
|
||||
const mod = self.base.options.module.?;
|
||||
if (metadata.text_atom) |atom| try self.updateLazySymbolAtom(
|
||||
File.LazySymbol.initDecl(.code, decl, mod),
|
||||
atom,
|
||||
self.text_section_index.?,
|
||||
metadata.alignment,
|
||||
);
|
||||
if (metadata.data_const_atom) |atom| try self.updateLazySymbolAtom(
|
||||
File.LazySymbol.initDecl(.const_data, decl, mod),
|
||||
atom,
|
||||
self.data_const_section_index.?,
|
||||
metadata.alignment,
|
||||
);
|
||||
}
|
||||
|
||||
fn updateLazySymbolAtom(
|
||||
self: *MachO,
|
||||
sym: File.LazySymbol,
|
||||
atom_index: Atom.Index,
|
||||
section_index: u8,
|
||||
required_alignment: u32,
|
||||
) !void {
|
||||
const gpa = self.base.allocator;
|
||||
const mod = self.base.options.module.?;
|
||||
|
||||
@ -2318,19 +2331,18 @@ fn updateLazySymbol(self: *MachO, lazy_sym: File.LazySymbol, lazy_metadata: Lazy
|
||||
|
||||
const name_str_index = blk: {
|
||||
const name = try std.fmt.allocPrint(gpa, "___lazy_{s}_{}", .{
|
||||
@tagName(lazy_sym.kind),
|
||||
lazy_sym.ty.fmt(mod),
|
||||
@tagName(sym.kind),
|
||||
sym.ty.fmt(mod),
|
||||
});
|
||||
defer gpa.free(name);
|
||||
break :blk try self.strtab.insert(gpa, name);
|
||||
};
|
||||
const name = self.strtab.get(name_str_index).?;
|
||||
|
||||
const atom_index = lazy_metadata.atom;
|
||||
const atom = self.getAtomPtr(atom_index);
|
||||
const local_sym_index = atom.getSymbolIndex().?;
|
||||
|
||||
const src = if (lazy_sym.ty.getOwnerDeclOrNull()) |owner_decl|
|
||||
const src = if (sym.ty.getOwnerDeclOrNull()) |owner_decl|
|
||||
mod.declPtr(owner_decl).srcLoc()
|
||||
else
|
||||
Module.SrcLoc{
|
||||
@ -2338,14 +2350,9 @@ fn updateLazySymbol(self: *MachO, lazy_sym: File.LazySymbol, lazy_metadata: Lazy
|
||||
.parent_decl_node = undefined,
|
||||
.lazy = .unneeded,
|
||||
};
|
||||
const res = try codegen.generateLazySymbol(
|
||||
&self.base,
|
||||
src,
|
||||
lazy_sym,
|
||||
&code_buffer,
|
||||
.none,
|
||||
.{ .parent_atom_index = local_sym_index },
|
||||
);
|
||||
const res = try codegen.generateLazySymbol(&self.base, src, sym, &code_buffer, .none, .{
|
||||
.parent_atom_index = local_sym_index,
|
||||
});
|
||||
const code = switch (res) {
|
||||
.ok => code_buffer.items,
|
||||
.fail => |em| {
|
||||
@ -2354,11 +2361,10 @@ fn updateLazySymbol(self: *MachO, lazy_sym: File.LazySymbol, lazy_metadata: Lazy
|
||||
},
|
||||
};
|
||||
|
||||
const required_alignment = lazy_metadata.alignment;
|
||||
const symbol = atom.getSymbolPtr(self);
|
||||
symbol.n_strx = name_str_index;
|
||||
symbol.n_type = macho.N_SECT;
|
||||
symbol.n_sect = lazy_metadata.section + 1;
|
||||
symbol.n_sect = section_index + 1;
|
||||
symbol.n_desc = 0;
|
||||
|
||||
const vaddr = try self.allocateAtom(atom_index, code.len, required_alignment);
|
||||
@ -2381,26 +2387,16 @@ fn updateLazySymbol(self: *MachO, lazy_sym: File.LazySymbol, lazy_metadata: Lazy
|
||||
try self.writeAtom(atom_index, code);
|
||||
}
|
||||
|
||||
pub fn getOrCreateAtomForLazySymbol(
|
||||
self: *MachO,
|
||||
lazy_sym: File.LazySymbol,
|
||||
alignment: u32,
|
||||
) !Atom.Index {
|
||||
const gop = try self.lazy_syms.getOrPutContext(self.base.allocator, lazy_sym, .{
|
||||
.mod = self.base.options.module.?,
|
||||
});
|
||||
pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol, alignment: u32) !Atom.Index {
|
||||
const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl());
|
||||
errdefer _ = self.lazy_syms.pop();
|
||||
if (!gop.found_existing) {
|
||||
gop.value_ptr.* = .{
|
||||
.atom = try self.createAtom(),
|
||||
.section = switch (lazy_sym.kind) {
|
||||
.code => self.text_section_index.?,
|
||||
.const_data => self.data_const_section_index.?,
|
||||
},
|
||||
.alignment = alignment,
|
||||
};
|
||||
}
|
||||
return gop.value_ptr.atom;
|
||||
if (!gop.found_existing) gop.value_ptr.* = .{ .alignment = alignment };
|
||||
const atom = switch (sym.kind) {
|
||||
.code => &gop.value_ptr.text_atom,
|
||||
.const_data => &gop.value_ptr.data_const_atom,
|
||||
};
|
||||
if (atom.* == null) atom.* = try self.createAtom();
|
||||
return atom.*.?;
|
||||
}
|
||||
|
||||
pub fn getOrCreateAtomForDecl(self: *MachO, decl_index: Module.Decl.Index) !Atom.Index {
|
||||
|
||||
@ -804,6 +804,7 @@ const Writer = struct {
|
||||
var case_i: u32 = 0;
|
||||
|
||||
try w.writeOperand(s, inst, 0, pl_op.operand);
|
||||
if (w.skip_body) return s.writeAll(", ...");
|
||||
const old_indent = w.indent;
|
||||
w.indent += 2;
|
||||
|
||||
|
||||
@ -31,7 +31,6 @@ const CPU = packed struct {
|
||||
test {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
|
||||
|
||||
var ram = try RAM.new();
|
||||
|
||||
@ -9,7 +9,6 @@ const Union = union(enum) {
|
||||
test "const error union field alignment" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
var union_or_err: anyerror!Union = Union{ .Color = 1234 };
|
||||
try std.testing.expect((union_or_err catch unreachable).Color == 1234);
|
||||
|
||||
@ -21,7 +21,6 @@ fn get_foo() Foo.FooError!*Foo {
|
||||
test "fixed" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
default_foo = get_foo() catch null; // This Line
|
||||
|
||||
@ -402,7 +402,6 @@ test "expected [*c]const u8, found [*:0]const u8" {
|
||||
|
||||
test "explicit cast from integer to error type" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
@ -370,7 +370,6 @@ fn intLiteral(str: []const u8) !?i64 {
|
||||
}
|
||||
|
||||
test "nested error union function call in optional unwrap" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
@ -499,7 +498,6 @@ test "function pointer with return type that is error union with payload which i
|
||||
}
|
||||
|
||||
test "return result loc as peer result loc in inferred error set function" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
@ -531,7 +529,6 @@ test "return result loc as peer result loc in inferred error set function" {
|
||||
}
|
||||
|
||||
test "error payload type is correctly resolved" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
@ -665,7 +662,6 @@ test "coerce error set to the current inferred error set" {
|
||||
}
|
||||
|
||||
test "error union payload is properly aligned" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
@ -967,7 +967,6 @@ test "closure capture type of runtime-known parameter" {
|
||||
}
|
||||
|
||||
test "comptime break passing through runtime condition converted to runtime break" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
const S = struct {
|
||||
@ -999,7 +998,6 @@ test "comptime break passing through runtime condition converted to runtime brea
|
||||
}
|
||||
|
||||
test "comptime break to outer loop passing through runtime condition converted to runtime break" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
@ -1218,7 +1216,6 @@ test "storing an array of type in a field" {
|
||||
}
|
||||
|
||||
test "pass pointer to field of comptime-only type as a runtime parameter" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
|
||||
@ -11,7 +11,6 @@ fn addressToFunction() void {
|
||||
}
|
||||
|
||||
test "mutate through ptr initialized with constant intToPtr value" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
@ -1116,7 +1116,6 @@ test "for loop over pointers to struct, getting field from struct pointer" {
|
||||
}
|
||||
|
||||
test "anon init through error unions and optionals" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
@ -1162,7 +1161,6 @@ test "anon init through optional" {
|
||||
}
|
||||
|
||||
test "anon init through error union" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
@ -1182,7 +1180,6 @@ test "anon init through error union" {
|
||||
}
|
||||
|
||||
test "typed init through error unions and optionals" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
@ -23,7 +23,6 @@ fn doThing(form_id: u64) anyerror!FormValue {
|
||||
test "switch prong returns error enum" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
switch (doThing(17) catch unreachable) {
|
||||
|
||||
@ -17,7 +17,6 @@ fn foo(id: u64) !FormValue {
|
||||
test "switch prong implicit cast" {
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
|
||||
const result = switch (foo(2) catch unreachable) {
|
||||
|
||||
@ -2,4 +2,4 @@
|
||||
// output_mode=Exe
|
||||
// target=aarch64-macos
|
||||
//
|
||||
// :110:9: error: root struct of file 'tmp' has no member named 'main'
|
||||
// :?:?: error: root struct of file 'tmp' has no member named 'main'
|
||||
|
||||
@ -2,4 +2,7 @@
|
||||
// output_mode=Exe
|
||||
// target=x86_64-linux
|
||||
//
|
||||
// :110:9: error: root struct of file 'tmp' has no member named 'main'
|
||||
// :?:?: error: root struct of file 'tmp' has no member named 'main'
|
||||
// :?:?: note: called from here
|
||||
// :?:?: note: called from here
|
||||
// :?:?: note: called from here
|
||||
|
||||
@ -2,4 +2,4 @@
|
||||
// output_mode=Exe
|
||||
// target=x86_64-macos
|
||||
//
|
||||
// :110:9: error: root struct of file 'tmp' has no member named 'main'
|
||||
// :?:?: error: root struct of file 'tmp' has no member named 'main'
|
||||
|
||||
@ -2,4 +2,6 @@
|
||||
// output_mode=Exe
|
||||
// target=x86_64-windows
|
||||
//
|
||||
// :131:9: error: root struct of file 'tmp' has no member named 'main'
|
||||
// :?:?: error: root struct of file 'tmp' has no member named 'main'
|
||||
// :?:?: note: called from here
|
||||
// :?:?: note: called from here
|
||||
|
||||
@ -1045,8 +1045,7 @@ pub fn main() !void {
|
||||
var ctx = Cases.init(gpa, arena);
|
||||
|
||||
var test_it = TestIterator{ .filenames = filenames.items };
|
||||
while (test_it.next()) |maybe_batch| {
|
||||
const batch = maybe_batch orelse break;
|
||||
while (try test_it.next()) |batch| {
|
||||
const strategy: TestStrategy = if (batch.len > 1) .incremental else .independent;
|
||||
var cases = std.ArrayList(usize).init(arena);
|
||||
|
||||
@ -1084,6 +1083,11 @@ pub fn main() !void {
|
||||
|
||||
for (cases.items) |case_index| {
|
||||
const case = &ctx.cases.items[case_index];
|
||||
if (strategy == .incremental and case.backend == .stage2 and case.target.getCpuArch() == .x86_64 and !case.link_libc and case.target.getOsTag() != .plan9) {
|
||||
// https://github.com/ziglang/zig/issues/15174
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (manifest.type) {
|
||||
.compile => {
|
||||
case.addCompile(src);
|
||||
@ -1115,8 +1119,6 @@ pub fn main() !void {
|
||||
}
|
||||
}
|
||||
}
|
||||
} else |err| {
|
||||
return err;
|
||||
}
|
||||
|
||||
return runCases(&ctx, zig_exe_path);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user