Merge pull request #6247 from alexnask/stage2_pe

Added a basic Portable Executable linker to stage2
This commit is contained in:
Andrew Kelley 2020-09-04 16:05:58 -04:00 committed by GitHub
commit cff14dc2c6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 1122 additions and 144 deletions

View File

@ -18,11 +18,77 @@ const IMAGE_FILE_MACHINE_I386 = 0x014c;
const IMAGE_FILE_MACHINE_IA64 = 0x0200;
const IMAGE_FILE_MACHINE_AMD64 = 0x8664;
pub const MachineType = enum(u16) {
Unknown = 0x0,
/// Matsushita AM33
AM33 = 0x1d3,
/// x64
X64 = 0x8664,
/// ARM little endian
ARM = 0x1c0,
/// ARM64 little endian
ARM64 = 0xaa64,
/// ARM Thumb-2 little endian
ARMNT = 0x1c4,
/// EFI byte code
EBC = 0xebc,
/// Intel 386 or later processors and compatible processors
I386 = 0x14c,
/// Intel Itanium processor family
IA64 = 0x200,
/// Mitsubishi M32R little endian
M32R = 0x9041,
/// MIPS16
MIPS16 = 0x266,
/// MIPS with FPU
MIPSFPU = 0x366,
/// MIPS16 with FPU
MIPSFPU16 = 0x466,
/// Power PC little endian
POWERPC = 0x1f0,
/// Power PC with floating point support
POWERPCFP = 0x1f1,
/// MIPS little endian
R4000 = 0x166,
/// RISC-V 32-bit address space
RISCV32 = 0x5032,
/// RISC-V 64-bit address space
RISCV64 = 0x5064,
/// RISC-V 128-bit address space
RISCV128 = 0x5128,
/// Hitachi SH3
SH3 = 0x1a2,
/// Hitachi SH3 DSP
SH3DSP = 0x1a3,
/// Hitachi SH4
SH4 = 0x1a6,
/// Hitachi SH5
SH5 = 0x1a8,
/// Thumb
Thumb = 0x1c2,
/// MIPS little-endian WCE v2
WCEMIPSV2 = 0x169,
};
// OptionalHeader.magic values
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms680339(v=vs.85).aspx
const IMAGE_NT_OPTIONAL_HDR32_MAGIC = 0x10b;
const IMAGE_NT_OPTIONAL_HDR64_MAGIC = 0x20b;
// Image Characteristics
pub const IMAGE_FILE_RELOCS_STRIPPED = 0x1;
pub const IMAGE_FILE_DEBUG_STRIPPED = 0x200;
pub const IMAGE_FILE_EXECUTABLE_IMAGE = 0x2;
pub const IMAGE_FILE_32BIT_MACHINE = 0x100;
pub const IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x20;
// Section flags
pub const IMAGE_SCN_CNT_INITIALIZED_DATA = 0x40;
pub const IMAGE_SCN_MEM_READ = 0x40000000;
pub const IMAGE_SCN_CNT_CODE = 0x20;
pub const IMAGE_SCN_MEM_EXECUTE = 0x20000000;
pub const IMAGE_SCN_MEM_WRITE = 0x80000000;
const IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16;
const IMAGE_DEBUG_TYPE_CODEVIEW = 2;
const DEBUG_DIRECTORY = 6;

View File

@ -468,6 +468,7 @@ pub const Target = struct {
/// TODO Get rid of this one.
unknown,
coff,
pe,
elf,
macho,
wasm,
@ -771,6 +772,63 @@ pub const Target = struct {
};
}
pub fn toCoffMachine(arch: Arch) std.coff.MachineType {
return switch (arch) {
.avr => .Unknown,
.msp430 => .Unknown,
.arc => .Unknown,
.arm => .ARM,
.armeb => .Unknown,
.hexagon => .Unknown,
.le32 => .Unknown,
.mips => .Unknown,
.mipsel => .Unknown,
.powerpc => .POWERPC,
.r600 => .Unknown,
.riscv32 => .RISCV32,
.sparc => .Unknown,
.sparcel => .Unknown,
.tce => .Unknown,
.tcele => .Unknown,
.thumb => .Thumb,
.thumbeb => .Thumb,
.i386 => .I386,
.xcore => .Unknown,
.nvptx => .Unknown,
.amdil => .Unknown,
.hsail => .Unknown,
.spir => .Unknown,
.kalimba => .Unknown,
.shave => .Unknown,
.lanai => .Unknown,
.wasm32 => .Unknown,
.renderscript32 => .Unknown,
.aarch64_32 => .ARM64,
.aarch64 => .ARM64,
.aarch64_be => .Unknown,
.mips64 => .Unknown,
.mips64el => .Unknown,
.powerpc64 => .Unknown,
.powerpc64le => .Unknown,
.riscv64 => .RISCV64,
.x86_64 => .X64,
.nvptx64 => .Unknown,
.le64 => .Unknown,
.amdil64 => .Unknown,
.hsail64 => .Unknown,
.spir64 => .Unknown,
.wasm64 => .Unknown,
.renderscript64 => .Unknown,
.amdgcn => .Unknown,
.bpfel => .Unknown,
.bpfeb => .Unknown,
.sparcv9 => .Unknown,
.s390x => .Unknown,
.ve => .Unknown,
.spu_2 => .Unknown,
};
}
pub fn endian(arch: Arch) builtin.Endian {
return switch (arch) {
.avr,

View File

@ -1820,6 +1820,9 @@ fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
try self.markOutdatedDecl(decl);
decl.contents_hash = contents_hash;
} else switch (self.bin_file.tag) {
.coff => {
// TODO Implement for COFF
},
.elf => if (decl.fn_link.elf.len != 0) {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
@ -2078,12 +2081,14 @@ fn allocateNewDecl(
.deletion_flag = false,
.contents_hash = contents_hash,
.link = switch (self.bin_file.tag) {
.coff => .{ .coff = link.File.Coff.TextBlock.empty },
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.TextBlock.empty },
.c => .{ .c = {} },
.wasm => .{ .wasm = {} },
},
.fn_link = switch (self.bin_file.tag) {
.coff => .{ .coff = {} },
.elf => .{ .elf = link.File.Elf.SrcFn.empty },
.macho => .{ .macho = link.File.MachO.SrcFn.empty },
.c => .{ .c = {} },

View File

@ -59,14 +59,21 @@ pub const GenerateSymbolError = error{
AnalysisFail,
};
pub const DebugInfoOutput = union(enum) {
dwarf: struct {
dbg_line: *std.ArrayList(u8),
dbg_info: *std.ArrayList(u8),
dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
},
none,
};
pub fn generateSymbol(
bin_file: *link.File,
src: usize,
typed_value: TypedValue,
code: *std.ArrayList(u8),
dbg_line: *std.ArrayList(u8),
dbg_info: *std.ArrayList(u8),
dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
debug_output: DebugInfoOutput,
) GenerateSymbolError!Result {
const tracy = trace(@src());
defer tracy.end();
@ -76,56 +83,56 @@ pub fn generateSymbol(
switch (bin_file.options.target.cpu.arch) {
.wasm32 => unreachable, // has its own code path
.wasm64 => unreachable, // has its own code path
.arm => return Function(.arm).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
.armeb => return Function(.armeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.aarch64 => return Function(.aarch64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.arc => return Function(.arc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.avr => return Function(.avr).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.bpfel => return Function(.bpfel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.hexagon => return Function(.hexagon).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.mips => return Function(.mips).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.mipsel => return Function(.mipsel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.mips64 => return Function(.mips64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.mips64el => return Function(.mips64el).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.msp430 => return Function(.msp430).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.powerpc => return Function(.powerpc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.r600 => return Function(.r600).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
.riscv64 => return Function(.riscv64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.sparc => return Function(.sparc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.sparcel => return Function(.sparcel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.s390x => return Function(.s390x).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
.spu_2 => return Function(.spu_2).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.tce => return Function(.tce).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.tcele => return Function(.tcele).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.thumb => return Function(.thumb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.i386 => return Function(.i386).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
.x86_64 => return Function(.x86_64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.xcore => return Function(.xcore).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.nvptx => return Function(.nvptx).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.le32 => return Function(.le32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.le64 => return Function(.le64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.amdil => return Function(.amdil).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.hsail => return Function(.hsail).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.spir => return Function(.spir).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.spir64 => return Function(.spir64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.kalimba => return Function(.kalimba).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.shave => return Function(.shave).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.lanai => return Function(.lanai).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.ve => return Function(.ve).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
.arm => return Function(.arm).generateSymbol(bin_file, src, typed_value, code, debug_output),
.armeb => return Function(.armeb).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.aarch64 => return Function(.aarch64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.arc => return Function(.arc).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.avr => return Function(.avr).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.bpfel => return Function(.bpfel).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.hexagon => return Function(.hexagon).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.mips => return Function(.mips).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.mipsel => return Function(.mipsel).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.mips64 => return Function(.mips64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.mips64el => return Function(.mips64el).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.msp430 => return Function(.msp430).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.powerpc => return Function(.powerpc).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.r600 => return Function(.r600).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src, typed_value, code, debug_output),
.riscv64 => return Function(.riscv64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.sparc => return Function(.sparc).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.sparcel => return Function(.sparcel).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.s390x => return Function(.s390x).generateSymbol(bin_file, src, typed_value, code, debug_output),
.spu_2 => return Function(.spu_2).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.tce => return Function(.tce).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.tcele => return Function(.tcele).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.thumb => return Function(.thumb).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.i386 => return Function(.i386).generateSymbol(bin_file, src, typed_value, code, debug_output),
.x86_64 => return Function(.x86_64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.xcore => return Function(.xcore).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.nvptx => return Function(.nvptx).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.le32 => return Function(.le32).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.le64 => return Function(.le64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.amdil => return Function(.amdil).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.hsail => return Function(.hsail).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.spir => return Function(.spir).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.spir64 => return Function(.spir64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.kalimba => return Function(.kalimba).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.shave => return Function(.shave).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.lanai => return Function(.lanai).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.ve => return Function(.ve).generateSymbol(bin_file, src, typed_value, code, debug_output),
else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."),
}
},
@ -139,7 +146,7 @@ pub fn generateSymbol(
switch (try generateSymbol(bin_file, src, .{
.ty = typed_value.ty.elemType(),
.val = sentinel,
}, code, dbg_line, dbg_info, dbg_info_type_relocs)) {
}, code, debug_output)) {
.appended => return Result{ .appended = {} },
.externally_managed => |slice| {
code.appendSliceAssumeCapacity(slice);
@ -239,9 +246,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
target: *const std.Target,
mod_fn: *const Module.Fn,
code: *std.ArrayList(u8),
dbg_line: *std.ArrayList(u8),
dbg_info: *std.ArrayList(u8),
dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
debug_output: DebugInfoOutput,
err_msg: ?*ErrorMsg,
args: []MCValue,
ret_mcv: MCValue,
@ -419,9 +424,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
src: usize,
typed_value: TypedValue,
code: *std.ArrayList(u8),
dbg_line: *std.ArrayList(u8),
dbg_info: *std.ArrayList(u8),
dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
debug_output: DebugInfoOutput,
) GenerateSymbolError!Result {
const module_fn = typed_value.val.cast(Value.Payload.Function).?.func;
@ -457,9 +460,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.bin_file = bin_file,
.mod_fn = module_fn,
.code = code,
.dbg_line = dbg_line,
.dbg_info = dbg_info,
.dbg_info_type_relocs = dbg_info_type_relocs,
.debug_output = debug_output,
.err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
@ -598,35 +599,50 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn dbgSetPrologueEnd(self: *Self) InnerError!void {
try self.dbg_line.append(DW.LNS_set_prologue_end);
try self.dbgAdvancePCAndLine(self.prev_di_src);
switch (self.debug_output) {
.dwarf => |dbg_out| {
try dbg_out.dbg_line.append(DW.LNS_set_prologue_end);
try self.dbgAdvancePCAndLine(self.prev_di_src);
},
.none => {},
}
}
fn dbgSetEpilogueBegin(self: *Self) InnerError!void {
try self.dbg_line.append(DW.LNS_set_epilogue_begin);
try self.dbgAdvancePCAndLine(self.prev_di_src);
switch (self.debug_output) {
.dwarf => |dbg_out| {
try dbg_out.dbg_line.append(DW.LNS_set_epilogue_begin);
try self.dbgAdvancePCAndLine(self.prev_di_src);
},
.none => {},
}
}
fn dbgAdvancePCAndLine(self: *Self, src: usize) InnerError!void {
// TODO Look into improving the performance here by adding a token-index-to-line
// lookup table, and changing ir.Inst from storing byte offset to token. Currently
// this involves scanning over the source code for newlines
// (but only from the previous byte offset to the new one).
const delta_line = std.zig.lineDelta(self.source, self.prev_di_src, src);
const delta_pc = self.code.items.len - self.prev_di_pc;
self.prev_di_src = src;
self.prev_di_pc = self.code.items.len;
// TODO Look into using the DWARF special opcodes to compress this data. It lets you emit
// single-byte opcodes that add different numbers to both the PC and the line number
// at the same time.
try self.dbg_line.ensureCapacity(self.dbg_line.items.len + 11);
self.dbg_line.appendAssumeCapacity(DW.LNS_advance_pc);
leb128.writeULEB128(self.dbg_line.writer(), delta_pc) catch unreachable;
if (delta_line != 0) {
self.dbg_line.appendAssumeCapacity(DW.LNS_advance_line);
leb128.writeILEB128(self.dbg_line.writer(), delta_line) catch unreachable;
switch (self.debug_output) {
.dwarf => |dbg_out| {
// TODO Look into improving the performance here by adding a token-index-to-line
// lookup table, and changing ir.Inst from storing byte offset to token. Currently
// this involves scanning over the source code for newlines
// (but only from the previous byte offset to the new one).
const delta_line = std.zig.lineDelta(self.source, self.prev_di_src, src);
const delta_pc = self.code.items.len - self.prev_di_pc;
// TODO Look into using the DWARF special opcodes to compress this data. It lets you emit
// single-byte opcodes that add different numbers to both the PC and the line number
// at the same time.
try dbg_out.dbg_line.ensureCapacity(dbg_out.dbg_line.items.len + 11);
dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_advance_pc);
leb128.writeULEB128(dbg_out.dbg_line.writer(), delta_pc) catch unreachable;
if (delta_line != 0) {
dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_advance_line);
leb128.writeILEB128(dbg_out.dbg_line.writer(), delta_line) catch unreachable;
}
dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_copy);
},
.none => {},
}
self.dbg_line.appendAssumeCapacity(DW.LNS_copy);
}
/// Asserts there is already capacity to insert into top branch inst_table.
@ -654,18 +670,23 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
/// Adds a Type to the .debug_info at the current position. The bytes will be populated later,
/// after codegen for this symbol is done.
fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
assert(ty.hasCodeGenBits());
const index = self.dbg_info.items.len;
try self.dbg_info.resize(index + 4); // DW.AT_type, DW.FORM_ref4
switch (self.debug_output) {
.dwarf => |dbg_out| {
assert(ty.hasCodeGenBits());
const index = dbg_out.dbg_info.items.len;
try dbg_out.dbg_info.resize(index + 4); // DW.AT_type, DW.FORM_ref4
const gop = try self.dbg_info_type_relocs.getOrPut(self.gpa, ty);
if (!gop.found_existing) {
gop.entry.value = .{
.off = undefined,
.relocs = .{},
};
const gop = try dbg_out.dbg_info_type_relocs.getOrPut(self.gpa, ty);
if (!gop.found_existing) {
gop.entry.value = .{
.off = undefined,
.relocs = .{},
};
}
try gop.entry.value.relocs.append(self.gpa, @intCast(u32, index));
},
.none => {},
}
try gop.entry.value.relocs.append(self.gpa, @intCast(u32, index));
}
fn genFuncInst(self: *Self, inst: *ir.Inst) !MCValue {
@ -1258,14 +1279,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
self.registers.putAssumeCapacityNoClobber(toCanonicalReg(reg), &inst.base);
self.markRegUsed(reg);
try self.dbg_info.ensureCapacity(self.dbg_info.items.len + 8 + name_with_null.len);
self.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter);
self.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT_location, DW.FORM_exprloc
1, // ULEB128 dwarf expression length
reg.dwarfLocOp(),
});
try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4
self.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string
switch (self.debug_output) {
.dwarf => |dbg_out| {
try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 8 + name_with_null.len);
dbg_out.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter);
dbg_out.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT_location, DW.FORM_exprloc
1, // ULEB128 dwarf expression length
reg.dwarfLocOp(),
});
try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4
dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string
},
.none => {},
}
},
else => {},
}
@ -1302,7 +1328,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// Due to incremental compilation, how function calls are generated depends
// on linking.
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) {
switch (arch) {
.x86_64 => {
for (info.args) |mc_arg, arg_i| {
@ -1341,10 +1367,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
@intCast(u32, coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes)
else
unreachable;
// ff 14 25 xx xx xx xx call [addr]
try self.code.ensureCapacity(self.code.items.len + 7);
self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
@ -1362,10 +1395,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes
else
unreachable;
try self.genSetReg(inst.base.src, .ra, .{ .memory = got_addr });
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.jalr(.ra, 0, .ra).toU32());
@ -1383,8 +1422,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = @intCast(u16, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * 2);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u16, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * 2);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
@intCast(u16, coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * 2)
else
unreachable;
const return_type = func.owner_decl.typed_value.most_recent.typed_value.ty.fnReturnType();
// First, push the return address, then jump; if noreturn, don't bother with the first step
// TODO: implement packed struct -> u16 at comptime and move the bitcast here
@ -1420,10 +1465,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes
else
unreachable;
// TODO only works with leaf functions
// at the moment, which works fine for
@ -1983,7 +2033,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (mem.eql(u8, inst.asm_source, "syscall")) {
try self.code.appendSlice(&[_]u8{ 0x0f, 0x05 });
} else {
} else if (inst.asm_source.len != 0) {
return self.fail(inst.base.src, "TODO implement support for more x86 assembly instructions", .{});
}
@ -2541,6 +2591,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const got = &macho_file.sections.items[macho_file.got_section_index.?];
const got_addr = got.addr + decl.link.macho.offset_table_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const decl = payload.decl;
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail(src, "TODO codegen non-ELF const Decl pointer", .{});
}

View File

@ -34,6 +34,7 @@ pub const File = struct {
pub const LinkBlock = union {
elf: Elf.TextBlock,
coff: Coff.TextBlock,
macho: MachO.TextBlock,
c: void,
wasm: void,
@ -41,6 +42,7 @@ pub const File = struct {
pub const LinkFn = union {
elf: Elf.SrcFn,
coff: Coff.SrcFn,
macho: MachO.SrcFn,
c: void,
wasm: ?Wasm.FnData,
@ -66,7 +68,7 @@ pub const File = struct {
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: Options) !*File {
switch (options.object_format) {
.unknown => unreachable,
.coff => return error.TODOImplementCoff,
.coff, .pe => return Coff.openPath(allocator, dir, sub_path, options),
.elf => return Elf.openPath(allocator, dir, sub_path, options),
.macho => return MachO.openPath(allocator, dir, sub_path, options),
.wasm => return Wasm.openPath(allocator, dir, sub_path, options),
@ -85,7 +87,7 @@ pub const File = struct {
pub fn makeWritable(base: *File, dir: fs.Dir, sub_path: []const u8) !void {
switch (base.tag) {
.elf, .macho => {
.coff, .elf, .macho => {
if (base.file != null) return;
base.file = try dir.createFile(sub_path, .{
.truncate = false,
@ -112,6 +114,7 @@ pub const File = struct {
/// after allocateDeclIndexes for any given Decl.
pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl),
.elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl),
.c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl),
@ -121,6 +124,7 @@ pub const File = struct {
pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
.c, .wasm => {},
@ -131,6 +135,7 @@ pub const File = struct {
/// any given Decl.
pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl),
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl),
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl),
.c, .wasm => {},
@ -140,6 +145,7 @@ pub const File = struct {
pub fn deinit(base: *File) void {
if (base.file) |f| f.close();
switch (base.tag) {
.coff => @fieldParentPtr(Coff, "base", base).deinit(),
.elf => @fieldParentPtr(Elf, "base", base).deinit(),
.macho => @fieldParentPtr(MachO, "base", base).deinit(),
.c => @fieldParentPtr(C, "base", base).deinit(),
@ -149,6 +155,11 @@ pub const File = struct {
pub fn destroy(base: *File) void {
switch (base.tag) {
.coff => {
const parent = @fieldParentPtr(Coff, "base", base);
parent.deinit();
base.allocator.destroy(parent);
},
.elf => {
const parent = @fieldParentPtr(Elf, "base", base);
parent.deinit();
@ -177,6 +188,7 @@ pub const File = struct {
defer tracy.end();
try switch (base.tag) {
.coff => @fieldParentPtr(Coff, "base", base).flush(module),
.elf => @fieldParentPtr(Elf, "base", base).flush(module),
.macho => @fieldParentPtr(MachO, "base", base).flush(module),
.c => @fieldParentPtr(C, "base", base).flush(module),
@ -186,6 +198,7 @@ pub const File = struct {
pub fn freeDecl(base: *File, decl: *Module.Decl) void {
switch (base.tag) {
.coff => @fieldParentPtr(Coff, "base", base).freeDecl(decl),
.elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl),
.macho => @fieldParentPtr(MachO, "base", base).freeDecl(decl),
.c => unreachable,
@ -195,6 +208,7 @@ pub const File = struct {
pub fn errorFlags(base: *File) ErrorFlags {
return switch (base.tag) {
.coff => @fieldParentPtr(Coff, "base", base).error_flags,
.elf => @fieldParentPtr(Elf, "base", base).error_flags,
.macho => @fieldParentPtr(MachO, "base", base).error_flags,
.c => return .{ .no_entry_point_found = false },
@ -211,6 +225,7 @@ pub const File = struct {
exports: []const *Module.Export,
) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclExports(module, decl, exports),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclExports(module, decl, exports),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclExports(module, decl, exports),
.c => return {},
@ -220,6 +235,7 @@ pub const File = struct {
pub fn getDeclVAddr(base: *File, decl: *const Module.Decl) u64 {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl),
.elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl),
.macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl),
.c => unreachable,
@ -228,6 +244,7 @@ pub const File = struct {
}
pub const Tag = enum {
coff,
elf,
macho,
c,
@ -239,6 +256,7 @@ pub const File = struct {
};
pub const C = @import("link/C.zig");
pub const Coff = @import("link/Coff.zig");
pub const Elf = @import("link/Elf.zig");
pub const MachO = @import("link/MachO.zig");
pub const Wasm = @import("link/Wasm.zig");

View File

@ -0,0 +1,792 @@
const Coff = @This();
const std = @import("std");
const log = std.log.scoped(.link);
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const fs = std.fs;
const trace = @import("../tracy.zig").trace;
const Module = @import("../Module.zig");
const codegen = @import("../codegen.zig");
const link = @import("../link.zig");
const allocation_padding = 4 / 3;
const minimum_text_block_size = 64 * allocation_padding;
const section_alignment = 4096;
const file_alignment = 512;
const image_base = 0x400_000;
const section_table_size = 2 * 40;
comptime {
std.debug.assert(std.mem.isAligned(image_base, section_alignment));
}
pub const base_tag: link.File.Tag = .coff;
const msdos_stub = @embedFile("msdos-stub.bin");
base: link.File,
ptr_width: enum { p32, p64 },
error_flags: link.File.ErrorFlags = .{},
text_block_free_list: std.ArrayListUnmanaged(*TextBlock) = .{},
last_text_block: ?*TextBlock = null,
/// Section table file pointer.
section_table_offset: u32 = 0,
/// Section data file pointer.
section_data_offset: u32 = 0,
/// Optiona header file pointer.
optional_header_offset: u32 = 0,
/// Absolute virtual address of the offset table when the executable is loaded in memory.
offset_table_virtual_address: u32 = 0,
/// Current size of the offset table on disk, must be a multiple of `file_alignment`
offset_table_size: u32 = 0,
/// Contains absolute virtual addresses
offset_table: std.ArrayListUnmanaged(u64) = .{},
/// Free list of offset table indices
offset_table_free_list: std.ArrayListUnmanaged(u32) = .{},
/// Virtual address of the entry point procedure relative to `image_base`
entry_addr: ?u32 = null,
/// Absolute virtual address of the text section when the executable is loaded in memory.
text_section_virtual_address: u32 = 0,
/// Current size of the `.text` section on disk, must be a multiple of `file_alignment`
text_section_size: u32 = 0,
offset_table_size_dirty: bool = false,
text_section_size_dirty: bool = false,
/// This flag is set when the virtual size of the whole image file when loaded in memory has changed
/// and needs to be updated in the optional header.
size_of_image_dirty: bool = false,
pub const TextBlock = struct {
/// Offset of the code relative to the start of the text section
text_offset: u32,
/// Used size of the text block
size: u32,
/// This field is undefined for symbols with size = 0.
offset_table_index: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `TextBlock`.
prev: ?*TextBlock,
next: ?*TextBlock,
pub const empty = TextBlock{
.text_offset = 0,
.size = 0,
.offset_table_index = undefined,
.prev = null,
.next = null,
};
/// Returns how much room there is to grow in virtual address space.
fn capacity(self: TextBlock) u64 {
if (self.next) |next| {
return next.text_offset - self.text_offset;
}
// This is the last block, the capacity is only limited by the address space.
return std.math.maxInt(u32) - self.text_offset;
}
fn freeListEligible(self: TextBlock) bool {
// No need to keep a free list node for the last block.
const next = self.next orelse return false;
const cap = next.text_offset - self.text_offset;
const ideal_cap = self.size * allocation_padding;
if (cap <= ideal_cap) return false;
const surplus = cap - ideal_cap;
return surplus >= minimum_text_block_size;
}
/// Absolute virtual address of the text block when the file is loaded in memory.
fn getVAddr(self: TextBlock, coff: Coff) u32 {
return coff.text_section_virtual_address + self.text_offset;
}
};
pub const SrcFn = void;
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*link.File {
assert(options.object_format == .coff);
const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options) });
errdefer file.close();
var coff_file = try allocator.create(Coff);
errdefer allocator.destroy(coff_file);
coff_file.* = openFile(allocator, file, options) catch |err| switch (err) {
error.IncrFailed => try createFile(allocator, file, options),
else => |e| return e,
};
return &coff_file.base;
}
/// Returns error.IncrFailed if incremental update could not be performed.
fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff {
switch (options.output_mode) {
.Exe => {},
.Obj => return error.IncrFailed,
.Lib => return error.IncrFailed,
}
var self: Coff = .{
.base = .{
.file = file,
.tag = .coff,
.options = options,
.allocator = allocator,
},
.ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
32 => .p32,
64 => .p64,
else => return error.UnsupportedELFArchitecture,
},
};
errdefer self.deinit();
// TODO implement reading the PE/COFF file
return error.IncrFailed;
}
/// Truncates the existing file contents and overwrites the contents.
/// Returns an error if `file` is not already open with +read +write +seek abilities.
fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff {
// TODO Write object specific relocations, COFF symbol table, then enable object file output.
switch (options.output_mode) {
.Exe => {},
.Obj => return error.TODOImplementWritingObjFiles,
.Lib => return error.TODOImplementWritingLibFiles,
}
var self: Coff = .{
.base = .{
.tag = .coff,
.options = options,
.allocator = allocator,
.file = file,
},
.ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
32 => .p32,
64 => .p64,
else => return error.UnsupportedCOFFArchitecture,
},
};
errdefer self.deinit();
var coff_file_header_offset: u32 = 0;
if (options.output_mode == .Exe) {
// Write the MS-DOS stub and the PE signature
try self.base.file.?.pwriteAll(msdos_stub ++ "PE\x00\x00", 0);
coff_file_header_offset = msdos_stub.len + 4;
}
// COFF file header
const data_directory_count = 0;
var hdr_data: [112 + data_directory_count * 8 + section_table_size]u8 = undefined;
var index: usize = 0;
const machine = self.base.options.target.cpu.arch.toCoffMachine();
if (machine == .Unknown) {
return error.UnsupportedCOFFArchitecture;
}
std.mem.writeIntLittle(u16, hdr_data[0..2], @enumToInt(machine));
index += 2;
// Number of sections (we only use .got, .text)
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 2);
index += 2;
// TimeDateStamp (u32), PointerToSymbolTable (u32), NumberOfSymbols (u32)
std.mem.set(u8, hdr_data[index..][0..12], 0);
index += 12;
const optional_header_size = switch (options.output_mode) {
.Exe => data_directory_count * 8 + switch (self.ptr_width) {
.p32 => @as(u16, 96),
.p64 => 112,
},
else => 0,
};
const section_table_offset = coff_file_header_offset + 20 + optional_header_size;
const default_offset_table_size = file_alignment;
const default_size_of_code = 0;
self.section_data_offset = std.mem.alignForwardGeneric(u32, self.section_table_offset + section_table_size, file_alignment);
const section_data_relative_virtual_address = std.mem.alignForwardGeneric(u32, self.section_table_offset + section_table_size, section_alignment);
self.offset_table_virtual_address = image_base + section_data_relative_virtual_address;
self.offset_table_size = default_offset_table_size;
self.section_table_offset = section_table_offset;
self.text_section_virtual_address = image_base + section_data_relative_virtual_address + section_alignment;
self.text_section_size = default_size_of_code;
// Size of file when loaded in memory
const size_of_image = std.mem.alignForwardGeneric(u32, self.text_section_virtual_address - image_base + default_size_of_code, section_alignment);
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], optional_header_size);
index += 2;
// Characteristics
var characteristics: u16 = std.coff.IMAGE_FILE_DEBUG_STRIPPED | std.coff.IMAGE_FILE_RELOCS_STRIPPED; // TODO Remove debug info stripped flag when necessary
if (options.output_mode == .Exe) {
characteristics |= std.coff.IMAGE_FILE_EXECUTABLE_IMAGE;
}
switch (self.ptr_width) {
.p32 => characteristics |= std.coff.IMAGE_FILE_32BIT_MACHINE,
.p64 => characteristics |= std.coff.IMAGE_FILE_LARGE_ADDRESS_AWARE,
}
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], characteristics);
index += 2;
assert(index == 20);
try self.base.file.?.pwriteAll(hdr_data[0..index], coff_file_header_offset);
if (options.output_mode == .Exe) {
self.optional_header_offset = coff_file_header_offset + 20;
// Optional header
index = 0;
std.mem.writeIntLittle(u16, hdr_data[0..2], switch (self.ptr_width) {
.p32 => @as(u16, 0x10b),
.p64 => 0x20b,
});
index += 2;
// Linker version (u8 + u8)
std.mem.set(u8, hdr_data[index..][0..2], 0);
index += 2;
// SizeOfCode (UNUSED, u32), SizeOfInitializedData (u32), SizeOfUninitializedData (u32), AddressOfEntryPoint (u32), BaseOfCode (UNUSED, u32)
std.mem.set(u8, hdr_data[index..][0..20], 0);
index += 20;
if (self.ptr_width == .p32) {
// Base of data relative to the image base (UNUSED)
std.mem.set(u8, hdr_data[index..][0..4], 0);
index += 4;
// Image base address
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], image_base);
index += 4;
} else {
// Image base address
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], image_base);
index += 8;
}
// Section alignment
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], section_alignment);
index += 4;
// File alignment
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], file_alignment);
index += 4;
// Required OS version, 6.0 is vista
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 6);
index += 2;
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0);
index += 2;
// Image version
std.mem.set(u8, hdr_data[index..][0..4], 0);
index += 4;
// Required subsystem version, same as OS version
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 6);
index += 2;
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0);
index += 2;
// Reserved zeroes (u32)
std.mem.set(u8, hdr_data[index..][0..4], 0);
index += 4;
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], size_of_image);
index += 4;
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset);
index += 4;
// CheckSum (u32)
std.mem.set(u8, hdr_data[index..][0..4], 0);
index += 4;
// Subsystem, TODO: Let users specify the subsystem, always CUI for now
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 3);
index += 2;
// DLL characteristics
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0x0);
index += 2;
switch (self.ptr_width) {
.p32 => {
// Size of stack reserve + commit
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000_000);
index += 4;
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000);
index += 4;
// Size of heap reserve + commit
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x100_000);
index += 4;
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000);
index += 4;
},
.p64 => {
// Size of stack reserve + commit
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000_000);
index += 8;
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000);
index += 8;
// Size of heap reserve + commit
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x100_000);
index += 8;
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000);
index += 8;
},
}
// Reserved zeroes
std.mem.set(u8, hdr_data[index..][0..4], 0);
index += 4;
// Number of data directories
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], data_directory_count);
index += 4;
// Initialize data directories to zero
std.mem.set(u8, hdr_data[index..][0 .. data_directory_count * 8], 0);
index += data_directory_count * 8;
assert(index == optional_header_size);
}
// Write section table.
// First, the .got section
hdr_data[index..][0..8].* = ".got\x00\x00\x00\x00".*;
index += 8;
if (options.output_mode == .Exe) {
// Virtual size (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_offset_table_size);
index += 4;
// Virtual address (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.offset_table_virtual_address - image_base);
index += 4;
} else {
std.mem.set(u8, hdr_data[index..][0..8], 0);
index += 8;
}
// Size of raw data (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_offset_table_size);
index += 4;
// File pointer to the start of the section
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset);
index += 4;
// Pointer to relocations (u32), PointerToLinenumbers (u32), NumberOfRelocations (u16), NumberOfLinenumbers (u16)
std.mem.set(u8, hdr_data[index..][0..12], 0);
index += 12;
// Section flags
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], std.coff.IMAGE_SCN_CNT_INITIALIZED_DATA | std.coff.IMAGE_SCN_MEM_READ);
index += 4;
// Then, the .text section
hdr_data[index..][0..8].* = ".text\x00\x00\x00".*;
index += 8;
if (options.output_mode == .Exe) {
// Virtual size (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_size_of_code);
index += 4;
// Virtual address (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.text_section_virtual_address - image_base);
index += 4;
} else {
std.mem.set(u8, hdr_data[index..][0..8], 0);
index += 8;
}
// Size of raw data (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_size_of_code);
index += 4;
// File pointer to the start of the section
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset + default_offset_table_size);
index += 4;
// Pointer to relocations (u32), PointerToLinenumbers (u32), NumberOfRelocations (u16), NumberOfLinenumbers (u16)
std.mem.set(u8, hdr_data[index..][0..12], 0);
index += 12;
// Section flags
std.mem.writeIntLittle(
u32,
hdr_data[index..][0..4],
std.coff.IMAGE_SCN_CNT_CODE | std.coff.IMAGE_SCN_MEM_EXECUTE | std.coff.IMAGE_SCN_MEM_READ | std.coff.IMAGE_SCN_MEM_WRITE,
);
index += 4;
assert(index == optional_header_size + section_table_size);
try self.base.file.?.pwriteAll(hdr_data[0..index], self.optional_header_offset);
try self.base.file.?.setEndPos(self.section_data_offset + default_offset_table_size + default_size_of_code);
return self;
}
pub fn allocateDeclIndexes(self: *Coff, decl: *Module.Decl) !void {
try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
if (self.offset_table_free_list.popOrNull()) |i| {
decl.link.coff.offset_table_index = i;
} else {
decl.link.coff.offset_table_index = @intCast(u32, self.offset_table.items.len);
_ = self.offset_table.addOneAssumeCapacity();
const entry_size = self.base.options.target.cpu.arch.ptrBitWidth() / 8;
if (self.offset_table.items.len > self.offset_table_size / entry_size) {
self.offset_table_size_dirty = true;
}
}
self.offset_table.items[decl.link.coff.offset_table_index] = 0;
}
fn allocateTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
const new_block_min_capacity = new_block_size * allocation_padding;
// We use these to indicate our intention to update metadata, placing the new block,
// and possibly removing a free list node.
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
var block_placement: ?*TextBlock = null;
var free_list_removal: ?usize = null;
const vaddr = blk: {
var i: usize = 0;
while (i < self.text_block_free_list.items.len) {
const free_block = self.text_block_free_list.items[i];
const next_block_text_offset = free_block.text_offset + free_block.capacity();
const new_block_text_offset = std.mem.alignForwardGeneric(u64, free_block.getVAddr(self.*) + free_block.size, alignment) - self.text_section_virtual_address;
if (new_block_text_offset < next_block_text_offset and next_block_text_offset - new_block_text_offset >= new_block_min_capacity) {
block_placement = free_block;
const remaining_capacity = next_block_text_offset - new_block_text_offset - new_block_min_capacity;
if (remaining_capacity < minimum_text_block_size) {
free_list_removal = i;
}
break :blk new_block_text_offset + self.text_section_virtual_address;
} else {
if (!free_block.freeListEligible()) {
_ = self.text_block_free_list.swapRemove(i);
} else {
i += 1;
}
continue;
}
} else if (self.last_text_block) |last| {
const new_block_vaddr = std.mem.alignForwardGeneric(u64, last.getVAddr(self.*) + last.size, alignment);
block_placement = last;
break :blk new_block_vaddr;
} else {
break :blk self.text_section_virtual_address;
}
};
const expand_text_section = block_placement == null or block_placement.?.next == null;
if (expand_text_section) {
const needed_size = @intCast(u32, std.mem.alignForwardGeneric(u64, vaddr + new_block_size - self.text_section_virtual_address, file_alignment));
if (needed_size > self.text_section_size) {
const current_text_section_virtual_size = std.mem.alignForwardGeneric(u32, self.text_section_size, section_alignment);
const new_text_section_virtual_size = std.mem.alignForwardGeneric(u32, needed_size, section_alignment);
if (current_text_section_virtual_size != new_text_section_virtual_size) {
self.size_of_image_dirty = true;
// Write new virtual size
var buf: [4]u8 = undefined;
std.mem.writeIntLittle(u32, &buf, new_text_section_virtual_size);
try self.base.file.?.pwriteAll(&buf, self.section_table_offset + 40 + 8);
}
self.text_section_size = needed_size;
self.text_section_size_dirty = true;
}
self.last_text_block = text_block;
}
text_block.text_offset = @intCast(u32, vaddr - self.text_section_virtual_address);
text_block.size = @intCast(u32, new_block_size);
// This function can also reallocate a text block.
// In this case we need to "unplug" it from its previous location before
// plugging it in to its new location.
if (text_block.prev) |prev| {
prev.next = text_block.next;
}
if (text_block.next) |next| {
next.prev = text_block.prev;
}
if (block_placement) |big_block| {
text_block.prev = big_block;
text_block.next = big_block.next;
big_block.next = text_block;
} else {
text_block.prev = null;
text_block.next = null;
}
if (free_list_removal) |i| {
_ = self.text_block_free_list.swapRemove(i);
}
return vaddr;
}
fn growTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
const block_vaddr = text_block.getVAddr(self.*);
const align_ok = std.mem.alignBackwardGeneric(u64, block_vaddr, alignment) == block_vaddr;
const need_realloc = !align_ok or new_block_size > text_block.capacity();
if (!need_realloc) return @as(u64, block_vaddr);
return self.allocateTextBlock(text_block, new_block_size, alignment);
}
fn shrinkTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64) void {
text_block.size = @intCast(u32, new_block_size);
if (text_block.capacity() - text_block.size >= minimum_text_block_size) {
self.text_block_free_list.append(self.base.allocator, text_block) catch {};
}
}
fn freeTextBlock(self: *Coff, text_block: *TextBlock) void {
var already_have_free_list_node = false;
{
var i: usize = 0;
// TODO turn text_block_free_list into a hash map
while (i < self.text_block_free_list.items.len) {
if (self.text_block_free_list.items[i] == text_block) {
_ = self.text_block_free_list.swapRemove(i);
continue;
}
if (self.text_block_free_list.items[i] == text_block.prev) {
already_have_free_list_node = true;
}
i += 1;
}
}
if (self.last_text_block == text_block) {
self.last_text_block = text_block.prev;
}
if (text_block.prev) |prev| {
prev.next = text_block.next;
if (!already_have_free_list_node and prev.freeListEligible()) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
self.text_block_free_list.append(self.base.allocator, prev) catch {};
}
}
if (text_block.next) |next| {
next.prev = text_block.prev;
}
}
fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
const entry_size = self.base.options.target.cpu.arch.ptrBitWidth() / 8;
const endian = self.base.options.target.cpu.arch.endian();
const offset_table_start = self.section_data_offset;
if (self.offset_table_size_dirty) {
const current_raw_size = self.offset_table_size;
const new_raw_size = self.offset_table_size * 2;
log.debug("growing offset table from raw size {} to {}\n", .{ current_raw_size, new_raw_size });
// Move the text section to a new place in the executable
const current_text_section_start = self.section_data_offset + current_raw_size;
const new_text_section_start = self.section_data_offset + new_raw_size;
const amt = try self.base.file.?.copyRangeAll(current_text_section_start, self.base.file.?, new_text_section_start, self.text_section_size);
if (amt != self.text_section_size) return error.InputOutput;
// Write the new raw size in the .got header
var buf: [8]u8 = undefined;
std.mem.writeIntLittle(u32, buf[0..4], new_raw_size);
try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 16);
// Write the new .text section file offset in the .text section header
std.mem.writeIntLittle(u32, buf[0..4], new_text_section_start);
try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 40 + 20);
const current_virtual_size = std.mem.alignForwardGeneric(u32, self.offset_table_size, section_alignment);
const new_virtual_size = std.mem.alignForwardGeneric(u32, new_raw_size, section_alignment);
// If we had to move in the virtual address space, we need to fix the VAs in the offset table, as well as the virtual address of the `.text` section
// and the virutal size of the `.got` section
if (new_virtual_size != current_virtual_size) {
log.debug("growing offset table from virtual size {} to {}\n", .{ current_virtual_size, new_virtual_size });
self.size_of_image_dirty = true;
const va_offset = new_virtual_size - current_virtual_size;
// Write .got virtual size
std.mem.writeIntLittle(u32, buf[0..4], new_virtual_size);
try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 8);
// Write .text new virtual address
self.text_section_virtual_address = self.text_section_virtual_address + va_offset;
std.mem.writeIntLittle(u32, buf[0..4], self.text_section_virtual_address - image_base);
try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 40 + 12);
// Fix the VAs in the offset table
for (self.offset_table.items) |*va, idx| {
if (va.* != 0) {
va.* += va_offset;
switch (entry_size) {
4 => {
std.mem.writeInt(u32, buf[0..4], @intCast(u32, va.*), endian);
try self.base.file.?.pwriteAll(buf[0..4], offset_table_start + idx * entry_size);
},
8 => {
std.mem.writeInt(u64, &buf, va.*, endian);
try self.base.file.?.pwriteAll(&buf, offset_table_start + idx * entry_size);
},
else => unreachable,
}
}
}
}
self.offset_table_size = new_raw_size;
self.offset_table_size_dirty = false;
}
// Write the new entry
switch (entry_size) {
4 => {
var buf: [4]u8 = undefined;
std.mem.writeInt(u32, &buf, @intCast(u32, self.offset_table.items[index]), endian);
try self.base.file.?.pwriteAll(&buf, offset_table_start + index * entry_size);
},
8 => {
var buf: [8]u8 = undefined;
std.mem.writeInt(u64, &buf, self.offset_table.items[index], endian);
try self.base.file.?.pwriteAll(&buf, offset_table_start + index * entry_size);
},
else => unreachable,
}
}
pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
// TODO COFF/PE debug information
// TODO Implement exports
const tracy = trace(@src());
defer tracy.end();
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
const typed_value = decl.typed_value.most_recent.typed_value;
const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .none);
const code = switch (res) {
.externally_managed => |x| x,
.appended => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
return;
},
};
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
const curr_size = decl.link.coff.size;
if (curr_size != 0) {
const capacity = decl.link.coff.capacity();
const need_realloc = code.len > capacity or
!std.mem.isAlignedGeneric(u32, decl.link.coff.text_offset, required_alignment);
if (need_realloc) {
const curr_vaddr = self.getDeclVAddr(decl);
const vaddr = try self.growTextBlock(&decl.link.coff, code.len, required_alignment);
log.debug("growing {} from 0x{x} to 0x{x}\n", .{ decl.name, curr_vaddr, vaddr });
if (vaddr != curr_vaddr) {
log.debug(" (writing new offset table entry)\n", .{});
self.offset_table.items[decl.link.coff.offset_table_index] = vaddr;
try self.writeOffsetTableEntry(decl.link.coff.offset_table_index);
}
} else if (code.len < curr_size) {
self.shrinkTextBlock(&decl.link.coff, code.len);
}
} else {
const vaddr = try self.allocateTextBlock(&decl.link.coff, code.len, required_alignment);
log.debug("allocated text block for {} at 0x{x} (size: {Bi})\n", .{ std.mem.spanZ(decl.name), vaddr, code.len });
errdefer self.freeTextBlock(&decl.link.coff);
self.offset_table.items[decl.link.coff.offset_table_index] = vaddr;
try self.writeOffsetTableEntry(decl.link.coff.offset_table_index);
}
// Write the code into the file
try self.base.file.?.pwriteAll(code, self.section_data_offset + self.offset_table_size + decl.link.coff.text_offset);
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl, decl_exports);
}
pub fn freeDecl(self: *Coff, decl: *Module.Decl) void {
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
self.freeTextBlock(&decl.link.coff);
self.offset_table_free_list.append(self.base.allocator, decl.link.coff.offset_table_index) catch {};
}
pub fn updateDeclExports(self: *Coff, module: *Module, decl: *const Module.Decl, exports: []const *Module.Export) !void {
for (exports) |exp| {
if (exp.options.section) |section_name| {
if (!std.mem.eql(u8, section_name, ".text")) {
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
);
continue;
}
}
if (std.mem.eql(u8, exp.options.name, "_start")) {
self.entry_addr = decl.link.coff.getVAddr(self.*) - image_base;
} else {
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: Exports other than '_start'", .{}),
);
continue;
}
}
}
pub fn flush(self: *Coff, module: *Module) !void {
if (self.text_section_size_dirty) {
// Write the new raw size in the .text header
var buf: [4]u8 = undefined;
std.mem.writeIntLittle(u32, &buf, self.text_section_size);
try self.base.file.?.pwriteAll(&buf, self.section_table_offset + 40 + 16);
try self.base.file.?.setEndPos(self.section_data_offset + self.offset_table_size + self.text_section_size);
self.text_section_size_dirty = false;
}
if (self.base.options.output_mode == .Exe and self.size_of_image_dirty) {
const new_size_of_image = std.mem.alignForwardGeneric(u32, self.text_section_virtual_address - image_base + self.text_section_size, section_alignment);
var buf: [4]u8 = undefined;
std.mem.writeIntLittle(u32, &buf, new_size_of_image);
try self.base.file.?.pwriteAll(&buf, self.optional_header_offset + 56);
self.size_of_image_dirty = false;
}
if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
log.debug("flushing. no_entry_point_found = true\n", .{});
self.error_flags.no_entry_point_found = true;
} else {
log.debug("flushing. no_entry_point_found = false\n", .{});
self.error_flags.no_entry_point_found = false;
if (self.base.options.output_mode == .Exe) {
// Write AddressOfEntryPoint
var buf: [4]u8 = undefined;
std.mem.writeIntLittle(u32, &buf, self.entry_addr.?);
try self.base.file.?.pwriteAll(&buf, self.optional_header_offset + 16);
}
}
}
pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl) u64 {
return self.text_section_virtual_address + decl.link.coff.text_offset;
}
pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
// TODO Implement this
}
pub fn deinit(self: *Coff) void {
self.text_block_free_list.deinit(self.base.allocator);
self.offset_table.deinit(self.base.allocator);
self.offset_table_free_list.deinit(self.base.allocator);
}

View File

@ -1735,7 +1735,13 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
} else {
// TODO implement .debug_info for global variables
}
const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, &dbg_line_buffer, &dbg_info_buffer, &dbg_info_type_relocs);
const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .{
.dwarf = .{
.dbg_line = &dbg_line_buffer,
.dbg_info = &dbg_info_buffer,
.dbg_info_type_relocs = &dbg_info_type_relocs,
},
});
const code = switch (res) {
.externally_managed => |x| x,
.appended => code_buffer.items,

View File

@ -316,31 +316,8 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void {
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator);
defer dbg_line_buffer.deinit();
var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator);
defer dbg_info_buffer.deinit();
var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{};
defer {
var it = dbg_info_type_relocs.iterator();
while (it.next()) |entry| {
entry.value.relocs.deinit(self.base.allocator);
}
dbg_info_type_relocs.deinit(self.base.allocator);
}
const typed_value = decl.typed_value.most_recent.typed_value;
const res = try codegen.generateSymbol(
&self.base,
decl.src(),
typed_value,
&code_buffer,
&dbg_line_buffer,
&dbg_info_buffer,
&dbg_info_type_relocs,
);
const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .none);
const code = switch (res) {
.externally_managed => |x| x,

Binary file not shown.

View File

@ -153,8 +153,8 @@ const usage_build_generic =
\\ elf Executable and Linking Format
\\ c Compile to C source code
\\ wasm WebAssembly
\\ pe Portable Executable (Windows)
\\ coff (planned) Common Object File Format (Windows)
\\ pe (planned) Portable Executable (Windows)
\\ macho (planned) macOS relocatables
\\ hex (planned) Intel IHEX
\\ raw (planned) Dump machine code directly
@ -451,7 +451,7 @@ fn buildOutputType(
} else if (mem.eql(u8, ofmt, "coff")) {
break :blk .coff;
} else if (mem.eql(u8, ofmt, "pe")) {
break :blk .coff;
break :blk .pe;
} else if (mem.eql(u8, ofmt, "macho")) {
break :blk .macho;
} else if (mem.eql(u8, ofmt, "wasm")) {
@ -524,17 +524,19 @@ fn buildOutputType(
try stderr.print("\nUnable to parse command: {}\n", .{@errorName(err)});
continue;
}) |line| {
if (mem.eql(u8, line, "update")) {
const actual_line = mem.trimRight(u8, line, "\r\n ");
if (mem.eql(u8, actual_line, "update")) {
if (output_mode == .Exe) {
try module.makeBinFileWritable();
}
try updateModule(gpa, &module, zir_out_path);
} else if (mem.eql(u8, line, "exit")) {
} else if (mem.eql(u8, actual_line, "exit")) {
break;
} else if (mem.eql(u8, line, "help")) {
} else if (mem.eql(u8, actual_line, "help")) {
try stderr.writeAll(repl_help);
} else {
try stderr.print("unknown command: {}\n", .{line});
try stderr.print("unknown command: {}\n", .{actual_line});
}
} else {
break;