Merge pull request #24322 from ziglang/delete-dead-backends

delete abandoned backends
This commit is contained in:
Andrew Kelley 2025-07-03 04:57:25 +02:00 committed by GitHub
commit 31bc6d5a9d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 15 additions and 17546 deletions

View File

@ -549,45 +549,6 @@ set(ZIG_STAGE2_SOURCES
src/Value.zig
src/Zcu.zig
src/Zcu/PerThread.zig
src/arch/aarch64/CodeGen.zig
src/arch/aarch64/Emit.zig
src/arch/aarch64/Mir.zig
src/arch/aarch64/abi.zig
src/arch/aarch64/bits.zig
src/arch/arm/CodeGen.zig
src/arch/arm/Emit.zig
src/arch/arm/Mir.zig
src/arch/arm/abi.zig
src/arch/arm/bits.zig
src/arch/powerpc/CodeGen.zig
src/arch/riscv64/abi.zig
src/arch/riscv64/bits.zig
src/arch/riscv64/CodeGen.zig
src/arch/riscv64/Emit.zig
src/arch/riscv64/encoding.zig
src/arch/riscv64/Lower.zig
src/arch/riscv64/Mir.zig
src/arch/riscv64/mnem.zig
src/arch/sparc64/CodeGen.zig
src/arch/sparc64/Emit.zig
src/arch/sparc64/Mir.zig
src/arch/sparc64/abi.zig
src/arch/sparc64/bits.zig
src/arch/wasm/CodeGen.zig
src/arch/wasm/Emit.zig
src/arch/wasm/Mir.zig
src/arch/wasm/abi.zig
src/arch/x86/bits.zig
src/arch/x86_64/CodeGen.zig
src/arch/x86_64/Disassembler.zig
src/arch/x86_64/Emit.zig
src/arch/x86_64/Encoding.zig
src/arch/x86_64/Lower.zig
src/arch/x86_64/Mir.zig
src/arch/x86_64/abi.zig
src/arch/x86_64/bits.zig
src/arch/x86_64/encoder.zig
src/arch/x86_64/encodings.zon
src/clang.zig
src/clang_options.zig
src/clang_options_data.zig

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,568 +0,0 @@
//! Machine Intermediate Representation.
//! This data is produced by AArch64 Codegen or AArch64 assembly parsing
//! These instructions have a 1:1 correspondence with machine code instructions
//! for the target. MIR can be lowered to source-annotated textual assembly code
//! instructions, or it can be lowered to machine code.
//! The main purpose of MIR is to postpone the assignment of offsets until Isel,
//! so that, for example, the smaller encodings of jump instructions can be used.
const Mir = @This();
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const bits = @import("bits.zig");
const Register = bits.Register;
const InternPool = @import("../../InternPool.zig");
const Emit = @import("Emit.zig");
const codegen = @import("../../codegen.zig");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
max_end_stack: u32,
saved_regs_stack_space: u32,
instructions: std.MultiArrayList(Inst).Slice,
/// The meaning of this data is determined by `Inst.Tag` value.
extra: []const u32,
pub const Inst = struct {
tag: Tag,
/// The meaning of this depends on `tag`.
data: Data,
pub const Tag = enum(u16) {
/// Add (immediate)
add_immediate,
/// Add, update condition flags (immediate)
adds_immediate,
/// Add (shifted register)
add_shifted_register,
/// Add, update condition flags (shifted register)
adds_shifted_register,
/// Add (extended register)
add_extended_register,
/// Add, update condition flags (extended register)
adds_extended_register,
/// Bitwise AND (shifted register)
and_shifted_register,
/// Arithmetic Shift Right (immediate)
asr_immediate,
/// Arithmetic Shift Right (register)
asr_register,
/// Branch conditionally
b_cond,
/// Branch
b,
/// Branch with Link
bl,
/// Branch with Link to Register
blr,
/// Breakpoint
brk,
/// Pseudo-instruction: Call extern
call_extern,
/// Compare and Branch on Zero
cbz,
/// Compare (immediate)
cmp_immediate,
/// Compare (shifted register)
cmp_shifted_register,
/// Compare (extended register)
cmp_extended_register,
/// Conditional Select
csel,
/// Conditional set
cset,
/// Pseudo-instruction: End of prologue
dbg_prologue_end,
/// Pseudo-instruction: Beginning of epilogue
dbg_epilogue_begin,
/// Pseudo-instruction: Update debug line
dbg_line,
/// Bitwise Exclusive OR (immediate)
eor_immediate,
/// Bitwise Exclusive OR (shifted register)
eor_shifted_register,
/// Loads the contents into a register
///
/// Payload is `LoadMemoryPie`
load_memory_got,
/// Loads the contents into a register
///
/// Payload is `LoadMemoryPie`
load_memory_direct,
/// Loads the contents into a register
///
/// Payload is `LoadMemoryPie`
load_memory_import,
/// Loads the address into a register
///
/// Payload is `LoadMemoryPie`
load_memory_ptr_got,
/// Loads the address into a register
///
/// Payload is `LoadMemoryPie`
load_memory_ptr_direct,
/// Load Pair of Registers
ldp,
/// Pseudo-instruction: Load pointer to stack item
ldr_ptr_stack,
/// Pseudo-instruction: Load pointer to stack argument
ldr_ptr_stack_argument,
/// Pseudo-instruction: Load from stack
ldr_stack,
/// Pseudo-instruction: Load from stack argument
ldr_stack_argument,
/// Load Register (immediate)
ldr_immediate,
/// Load Register (register)
ldr_register,
/// Pseudo-instruction: Load byte from stack
ldrb_stack,
/// Pseudo-instruction: Load byte from stack argument
ldrb_stack_argument,
/// Load Register Byte (immediate)
ldrb_immediate,
/// Load Register Byte (register)
ldrb_register,
/// Pseudo-instruction: Load halfword from stack
ldrh_stack,
/// Pseudo-instruction: Load halfword from stack argument
ldrh_stack_argument,
/// Load Register Halfword (immediate)
ldrh_immediate,
/// Load Register Halfword (register)
ldrh_register,
/// Load Register Signed Byte (immediate)
ldrsb_immediate,
/// Pseudo-instruction: Load signed byte from stack
ldrsb_stack,
/// Pseudo-instruction: Load signed byte from stack argument
ldrsb_stack_argument,
/// Load Register Signed Halfword (immediate)
ldrsh_immediate,
/// Pseudo-instruction: Load signed halfword from stack
ldrsh_stack,
/// Pseudo-instruction: Load signed halfword from stack argument
ldrsh_stack_argument,
/// Load Register Signed Word (immediate)
ldrsw_immediate,
/// Logical Shift Left (immediate)
lsl_immediate,
/// Logical Shift Left (register)
lsl_register,
/// Logical Shift Right (immediate)
lsr_immediate,
/// Logical Shift Right (register)
lsr_register,
/// Move (to/from SP)
mov_to_from_sp,
/// Move (register)
mov_register,
/// Move wide with keep
movk,
/// Move wide with zero
movz,
/// Multiply-subtract
msub,
/// Multiply
mul,
/// Bitwise NOT
mvn,
/// No Operation
nop,
/// Bitwise inclusive OR (shifted register)
orr_shifted_register,
/// Pseudo-instruction: Pop multiple registers
pop_regs,
/// Pseudo-instruction: Push multiple registers
push_regs,
/// Return from subroutine
ret,
/// Signed bitfield extract
sbfx,
/// Signed divide
sdiv,
/// Signed multiply high
smulh,
/// Signed multiply long
smull,
/// Signed extend byte
sxtb,
/// Signed extend halfword
sxth,
/// Signed extend word
sxtw,
/// Store Pair of Registers
stp,
/// Pseudo-instruction: Store to stack
str_stack,
/// Store Register (immediate)
str_immediate,
/// Store Register (register)
str_register,
/// Pseudo-instruction: Store byte to stack
strb_stack,
/// Store Register Byte (immediate)
strb_immediate,
/// Store Register Byte (register)
strb_register,
/// Pseudo-instruction: Store halfword to stack
strh_stack,
/// Store Register Halfword (immediate)
strh_immediate,
/// Store Register Halfword (register)
strh_register,
/// Subtract (immediate)
sub_immediate,
/// Subtract, update condition flags (immediate)
subs_immediate,
/// Subtract (shifted register)
sub_shifted_register,
/// Subtract, update condition flags (shifted register)
subs_shifted_register,
/// Subtract (extended register)
sub_extended_register,
/// Subtract, update condition flags (extended register)
subs_extended_register,
/// Supervisor Call
svc,
/// Test bits (immediate)
tst_immediate,
/// Unsigned bitfield extract
ubfx,
/// Unsigned divide
udiv,
/// Unsigned multiply high
umulh,
/// Unsigned multiply long
umull,
/// Unsigned extend byte
uxtb,
/// Unsigned extend halfword
uxth,
};
/// The position of an MIR instruction within the `Mir` instructions array.
pub const Index = u32;
/// All instructions have a 4-byte payload, which is contained within
/// this union. `Tag` determines which union field is active, as well as
/// how to interpret the data within.
pub const Data = union {
/// No additional data
///
/// Used by e.g. nop
nop: void,
/// Another instruction
///
/// Used by e.g. b
inst: Index,
/// Relocation for the linker where:
/// * `atom_index` is the index of the source
/// * `sym_index` is the index of the target
///
/// Used by e.g. call_extern
relocation: struct {
/// Index of the containing atom.
atom_index: u32,
/// Index into the linker's string table.
sym_index: u32,
},
/// A 16-bit immediate value.
///
/// Used by e.g. svc
imm16: u16,
/// Index into `extra`. Meaning of what can be found there is context-dependent.
payload: u32,
/// A register
///
/// Used by e.g. blr
reg: Register,
/// Multiple registers
///
/// Used by e.g. pop_regs
reg_list: u32,
/// Another instruction and a condition
///
/// Used by e.g. b_cond
inst_cond: struct {
inst: Index,
cond: bits.Instruction.Condition,
},
/// A register, an unsigned 16-bit immediate, and an optional shift
///
/// Used by e.g. movz
r_imm16_sh: struct {
rd: Register,
imm16: u16,
hw: u2 = 0,
},
/// A register and a condition
///
/// Used by e.g. cset
r_cond: struct {
rd: Register,
cond: bits.Instruction.Condition,
},
/// A register and another instruction
///
/// Used by e.g. cbz
r_inst: struct {
rt: Register,
inst: Index,
},
/// A register, an unsigned 12-bit immediate, and an optional shift
///
/// Used by e.g. cmp_immediate
r_imm12_sh: struct {
rn: Register,
imm12: u12,
sh: u1 = 0,
},
/// Two registers
///
/// Used by e.g. mov_register
rr: struct {
rd: Register,
rn: Register,
},
/// Two registers, an unsigned 12-bit immediate, and an optional shift
///
/// Used by e.g. sub_immediate
rr_imm12_sh: struct {
rd: Register,
rn: Register,
imm12: u12,
sh: u1 = 0,
},
/// Two registers and a shift (shift type and 6-bit amount)
///
/// Used by e.g. cmp_shifted_register
rr_imm6_shift: struct {
rn: Register,
rm: Register,
imm6: u6,
shift: bits.Instruction.AddSubtractShiftedRegisterShift,
},
/// Two registers with sign-extension (extension type and 3-bit shift amount)
///
/// Used by e.g. cmp_extended_register
rr_extend_shift: struct {
rn: Register,
rm: Register,
ext_type: bits.Instruction.AddSubtractExtendedRegisterOption,
imm3: u3,
},
/// Two registers and a shift (logical instruction version)
/// (shift type and 6-bit amount)
///
/// Used by e.g. mvn
rr_imm6_logical_shift: struct {
rd: Register,
rm: Register,
imm6: u6,
shift: bits.Instruction.LogicalShiftedRegisterShift,
},
/// Two registers and a lsb (range 0-63) and a width (range
/// 1-64)
///
/// Used by e.g. ubfx
rr_lsb_width: struct {
rd: Register,
rn: Register,
lsb: u6,
width: u7,
},
/// Two registers and a bitmask immediate
///
/// Used by e.g. eor_immediate
rr_bitmask: struct {
rd: Register,
rn: Register,
imms: u6,
immr: u6,
n: u1,
},
/// Two registers and a 6-bit unsigned shift
///
/// Used by e.g. lsl_immediate
rr_shift: struct {
rd: Register,
rn: Register,
shift: u6,
},
/// Three registers
///
/// Used by e.g. mul
rrr: struct {
rd: Register,
rn: Register,
rm: Register,
},
/// Three registers and a condition
///
/// Used by e.g. csel
rrr_cond: struct {
rd: Register,
rn: Register,
rm: Register,
cond: bits.Instruction.Condition,
},
/// Three registers and a shift (shift type and 6-bit amount)
///
/// Used by e.g. add_shifted_register
rrr_imm6_shift: struct {
rd: Register,
rn: Register,
rm: Register,
imm6: u6,
shift: bits.Instruction.AddSubtractShiftedRegisterShift,
},
/// Three registers with sign-extension (extension type and 3-bit shift amount)
///
/// Used by e.g. add_extended_register
rrr_extend_shift: struct {
rd: Register,
rn: Register,
rm: Register,
ext_type: bits.Instruction.AddSubtractExtendedRegisterOption,
imm3: u3,
},
/// Three registers and a shift (logical instruction version)
/// (shift type and 6-bit amount)
///
/// Used by e.g. eor_shifted_register
rrr_imm6_logical_shift: struct {
rd: Register,
rn: Register,
rm: Register,
imm6: u6,
shift: bits.Instruction.LogicalShiftedRegisterShift,
},
/// Two registers and a LoadStoreOffsetImmediate
///
/// Used by e.g. str_immediate
load_store_register_immediate: struct {
rt: Register,
rn: Register,
offset: bits.Instruction.LoadStoreOffsetImmediate,
},
/// Two registers and a LoadStoreOffsetRegister
///
/// Used by e.g. str_register
load_store_register_register: struct {
rt: Register,
rn: Register,
offset: bits.Instruction.LoadStoreOffsetRegister,
},
/// A register and a stack offset
///
/// Used by e.g. str_stack
load_store_stack: struct {
rt: Register,
offset: u32,
},
/// Three registers and a LoadStorePairOffset
///
/// Used by e.g. stp
load_store_register_pair: struct {
rt: Register,
rt2: Register,
rn: Register,
offset: bits.Instruction.LoadStorePairOffset,
},
/// Four registers
///
/// Used by e.g. msub
rrrr: struct {
rd: Register,
rn: Register,
rm: Register,
ra: Register,
},
/// Debug info: line and column
///
/// Used by e.g. dbg_line
dbg_line_column: struct {
line: u32,
column: u32,
},
};
// Make sure we don't accidentally make instructions bigger than expected.
// Note that in safety builds, Zig is allowed to insert a secret field for safety checks.
comptime {
if (!std.debug.runtime_safety) {
assert(@sizeOf(Data) == 8);
}
}
};
pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.instructions.deinit(gpa);
gpa.free(mir.extra);
mir.* = undefined;
}
pub fn emit(
mir: Mir,
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const nav = func.owner_nav;
const mod = zcu.navFileScope(nav).mod.?;
var e: Emit = .{
.mir = mir,
.bin_file = lf,
.debug_output = debug_output,
.target = &mod.resolved_target.result,
.src_loc = src_loc,
.code = code,
.prev_di_pc = 0,
.prev_di_line = func.lbrace_line,
.prev_di_column = func.lbrace_column,
.stack_size = mir.max_end_stack,
.saved_regs_stack_space = mir.saved_regs_stack_space,
};
defer e.deinit();
e.emitMir() catch |err| switch (err) {
error.EmitFail => return zcu.codegenFailMsg(nav, e.err_msg.?),
else => |e1| return e1,
};
}
/// Returns the requested data, as well as the new index which is at the start of the
/// trailers for the object.
pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end: usize } {
const fields = std.meta.fields(T);
var i: usize = index;
var result: T = undefined;
inline for (fields) |field| {
@field(result, field.name) = switch (field.type) {
u32 => mir.extra[i],
i32 => @as(i32, @bitCast(mir.extra[i])),
else => @compileError("bad field type"),
};
i += 1;
}
return .{
.data = result,
.end = i,
};
}
pub const LoadMemoryPie = struct {
register: u32,
/// Index of the containing atom.
atom_index: u32,
/// Index into the linker's symbol table.
sym_index: u32,
};

File diff suppressed because it is too large Load Diff

View File

@ -1,712 +0,0 @@
//! This file contains the functionality for lowering AArch32 MIR into
//! machine code
const Emit = @This();
const builtin = @import("builtin");
const std = @import("std");
const math = std.math;
const Mir = @import("Mir.zig");
const bits = @import("bits.zig");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
const Type = @import("../../Type.zig");
const ErrorMsg = Zcu.ErrorMsg;
const Target = std.Target;
const assert = std.debug.assert;
const Instruction = bits.Instruction;
const Register = bits.Register;
const log = std.log.scoped(.aarch32_emit);
const CodeGen = @import("CodeGen.zig");
mir: Mir,
bin_file: *link.File,
debug_output: link.File.DebugInfoOutput,
target: *const std.Target,
err_msg: ?*ErrorMsg = null,
src_loc: Zcu.LazySrcLoc,
code: *std.ArrayListUnmanaged(u8),
prev_di_line: u32,
prev_di_column: u32,
/// Relative to the beginning of `code`.
prev_di_pc: usize,
/// The amount of stack space consumed by the saved callee-saved
/// registers in bytes
saved_regs_stack_space: u32,
/// The final stack frame size of the function (already aligned to the
/// respective stack alignment). Does not include prologue stack space.
stack_size: u32,
/// The branch type of every branch
branch_types: std.AutoHashMapUnmanaged(Mir.Inst.Index, BranchType) = .empty,
/// For every forward branch, maps the target instruction to a list of
/// branches which branch to this target instruction
branch_forward_origins: std.AutoHashMapUnmanaged(Mir.Inst.Index, std.ArrayListUnmanaged(Mir.Inst.Index)) = .empty,
/// For backward branches: stores the code offset of the target
/// instruction
///
/// For forward branches: stores the code offset of the branch
/// instruction
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .empty,
const InnerError = error{
OutOfMemory,
EmitFail,
};
const BranchType = enum {
b,
fn default(tag: Mir.Inst.Tag) BranchType {
return switch (tag) {
.b => .b,
else => unreachable,
};
}
};
pub fn emitMir(
emit: *Emit,
) !void {
const mir_tags = emit.mir.instructions.items(.tag);
// Find smallest lowerings for branch instructions
try emit.lowerBranches();
// Emit machine code
for (mir_tags, 0..) |tag, index| {
const inst = @as(u32, @intCast(index));
switch (tag) {
.add => try emit.mirDataProcessing(inst),
.adds => try emit.mirDataProcessing(inst),
.@"and" => try emit.mirDataProcessing(inst),
.cmp => try emit.mirDataProcessing(inst),
.eor => try emit.mirDataProcessing(inst),
.mov => try emit.mirDataProcessing(inst),
.mvn => try emit.mirDataProcessing(inst),
.orr => try emit.mirDataProcessing(inst),
.rsb => try emit.mirDataProcessing(inst),
.sub => try emit.mirDataProcessing(inst),
.subs => try emit.mirDataProcessing(inst),
.sub_sp_scratch_r4 => try emit.mirSubStackPointer(inst),
.asr => try emit.mirShift(inst),
.lsl => try emit.mirShift(inst),
.lsr => try emit.mirShift(inst),
.b => try emit.mirBranch(inst),
.undefined_instruction => try emit.mirUndefinedInstruction(),
.bkpt => try emit.mirExceptionGeneration(inst),
.blx => try emit.mirBranchExchange(inst),
.bx => try emit.mirBranchExchange(inst),
.dbg_line => try emit.mirDbgLine(inst),
.dbg_prologue_end => try emit.mirDebugPrologueEnd(),
.dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(),
.ldr => try emit.mirLoadStore(inst),
.ldrb => try emit.mirLoadStore(inst),
.str => try emit.mirLoadStore(inst),
.strb => try emit.mirLoadStore(inst),
.ldr_ptr_stack_argument => try emit.mirLoadStackArgument(inst),
.ldr_stack_argument => try emit.mirLoadStackArgument(inst),
.ldrb_stack_argument => try emit.mirLoadStackArgument(inst),
.ldrh_stack_argument => try emit.mirLoadStackArgument(inst),
.ldrsb_stack_argument => try emit.mirLoadStackArgument(inst),
.ldrsh_stack_argument => try emit.mirLoadStackArgument(inst),
.ldrh => try emit.mirLoadStoreExtra(inst),
.ldrsb => try emit.mirLoadStoreExtra(inst),
.ldrsh => try emit.mirLoadStoreExtra(inst),
.strh => try emit.mirLoadStoreExtra(inst),
.movw => try emit.mirSpecialMove(inst),
.movt => try emit.mirSpecialMove(inst),
.mul => try emit.mirMultiply(inst),
.smulbb => try emit.mirMultiply(inst),
.smull => try emit.mirMultiplyLong(inst),
.umull => try emit.mirMultiplyLong(inst),
.nop => try emit.mirNop(),
.pop => try emit.mirBlockDataTransfer(inst),
.push => try emit.mirBlockDataTransfer(inst),
.svc => try emit.mirSupervisorCall(inst),
.sbfx => try emit.mirBitFieldExtract(inst),
.ubfx => try emit.mirBitFieldExtract(inst),
}
}
}
pub fn deinit(emit: *Emit) void {
const comp = emit.bin_file.comp;
const gpa = comp.gpa;
var iter = emit.branch_forward_origins.valueIterator();
while (iter.next()) |origin_list| {
origin_list.deinit(gpa);
}
emit.branch_types.deinit(gpa);
emit.branch_forward_origins.deinit(gpa);
emit.code_offset_mapping.deinit(gpa);
emit.* = undefined;
}
fn optimalBranchType(emit: *Emit, tag: Mir.Inst.Tag, offset: i64) !BranchType {
assert(std.mem.isAlignedGeneric(i64, offset, 4)); // misaligned offset
switch (tag) {
.b => {
if (std.math.cast(i24, @divExact(offset, 4))) |_| {
return BranchType.b;
} else {
return emit.fail("TODO support larger branches", .{});
}
},
else => unreachable,
}
}
fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
const tag = emit.mir.instructions.items(.tag)[inst];
if (isBranch(tag)) {
switch (emit.branch_types.get(inst).?) {
.b => return 4,
}
}
switch (tag) {
.dbg_line,
.dbg_epilogue_begin,
.dbg_prologue_end,
=> return 0,
.sub_sp_scratch_r4 => {
const imm32 = emit.mir.instructions.items(.data)[inst].imm32;
if (imm32 == 0) {
return 0 * 4;
} else if (Instruction.Operand.fromU32(imm32) != null) {
// sub
return 1 * 4;
} else if (emit.target.cpu.has(.arm, .has_v7)) {
// movw; movt; sub
return 3 * 4;
} else {
// mov; orr; orr; orr; sub
return 5 * 4;
}
},
else => return 4,
}
}
fn isBranch(tag: Mir.Inst.Tag) bool {
return switch (tag) {
.b => true,
else => false,
};
}
fn branchTarget(emit: *Emit, inst: Mir.Inst.Index) Mir.Inst.Index {
const tag = emit.mir.instructions.items(.tag)[inst];
switch (tag) {
.b => return emit.mir.instructions.items(.data)[inst].inst,
else => unreachable,
}
}
fn lowerBranches(emit: *Emit) !void {
const comp = emit.bin_file.comp;
const gpa = comp.gpa;
const mir_tags = emit.mir.instructions.items(.tag);
// First pass: Note down all branches and their target
// instructions, i.e. populate branch_types,
// branch_forward_origins, and code_offset_mapping
//
// TODO optimization opportunity: do this in codegen while
// generating MIR
for (mir_tags, 0..) |tag, index| {
const inst = @as(u32, @intCast(index));
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
// Remember this branch instruction
try emit.branch_types.put(gpa, inst, BranchType.default(tag));
// Forward branches require some extra stuff: We only
// know their offset once we arrive at the target
// instruction. Therefore, we need to be able to
// access the branch instruction when we visit the
// target instruction in order to manipulate its type
// etc.
if (target_inst > inst) {
// Remember the branch instruction index
try emit.code_offset_mapping.put(gpa, inst, 0);
if (emit.branch_forward_origins.getPtr(target_inst)) |origin_list| {
try origin_list.append(gpa, inst);
} else {
var origin_list: std.ArrayListUnmanaged(Mir.Inst.Index) = .empty;
try origin_list.append(gpa, inst);
try emit.branch_forward_origins.put(gpa, target_inst, origin_list);
}
}
// Remember the target instruction index so that we
// update the real code offset in all future passes
//
// putNoClobber may not be used as the put operation
// may clobber the entry when multiple branches branch
// to the same target instruction
try emit.code_offset_mapping.put(gpa, target_inst, 0);
}
}
// Further passes: Until all branches are lowered, interate
// through all instructions and calculate new offsets and
// potentially new branch types
var all_branches_lowered = false;
while (!all_branches_lowered) {
all_branches_lowered = true;
var current_code_offset: usize = 0;
for (mir_tags, 0..) |tag, index| {
const inst = @as(u32, @intCast(index));
// If this instruction contained in the code offset
// mapping (when it is a target of a branch or if it is a
// forward branch), update the code offset
if (emit.code_offset_mapping.getPtr(inst)) |offset| {
offset.* = current_code_offset;
}
// If this instruction is a backward branch, calculate the
// offset, which may potentially update the branch type
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
if (target_inst < inst) {
const target_offset = emit.code_offset_mapping.get(target_inst).?;
const offset = @as(i64, @intCast(target_offset)) - @as(i64, @intCast(current_code_offset + 8));
const branch_type = emit.branch_types.getPtr(inst).?;
const optimal_branch_type = try emit.optimalBranchType(tag, offset);
if (branch_type.* != optimal_branch_type) {
branch_type.* = optimal_branch_type;
all_branches_lowered = false;
}
log.debug("lowerBranches: branch {} has offset {}", .{ inst, offset });
}
}
// If this instruction is the target of one or more
// forward branches, calculate the offset, which may
// potentially update the branch type
if (emit.branch_forward_origins.get(inst)) |origin_list| {
for (origin_list.items) |forward_branch_inst| {
const branch_tag = emit.mir.instructions.items(.tag)[forward_branch_inst];
const forward_branch_inst_offset = emit.code_offset_mapping.get(forward_branch_inst).?;
const offset = @as(i64, @intCast(current_code_offset)) - @as(i64, @intCast(forward_branch_inst_offset + 8));
const branch_type = emit.branch_types.getPtr(forward_branch_inst).?;
const optimal_branch_type = try emit.optimalBranchType(branch_tag, offset);
if (branch_type.* != optimal_branch_type) {
branch_type.* = optimal_branch_type;
all_branches_lowered = false;
}
log.debug("lowerBranches: branch {} has offset {}", .{ forward_branch_inst, offset });
}
}
// Increment code offset
current_code_offset += emit.instructionSize(inst);
}
}
}
fn writeInstruction(emit: *Emit, instruction: Instruction) !void {
const comp = emit.bin_file.comp;
const gpa = comp.gpa;
const endian = emit.target.cpu.arch.endian();
std.mem.writeInt(u32, try emit.code.addManyAsArray(gpa, 4), instruction.toU32(), endian);
}
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
@branchHint(.cold);
assert(emit.err_msg == null);
const comp = emit.bin_file.comp;
const gpa = comp.gpa;
emit.err_msg = try ErrorMsg.create(gpa, emit.src_loc, format, args);
return error.EmitFail;
}
fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(self.prev_di_line));
const delta_pc: usize = self.code.items.len - self.prev_di_pc;
switch (self.debug_output) {
.dwarf => |dw| {
try dw.advancePCAndLine(delta_line, delta_pc);
self.prev_di_line = line;
self.prev_di_column = column;
self.prev_di_pc = self.code.items.len;
},
.plan9 => |dbg_out| {
if (delta_pc <= 0) return; // only do this when the pc changes
// increasing the line number
try link.File.Plan9.changeLine(&dbg_out.dbg_line, delta_line);
// increasing the pc
const d_pc_p9 = @as(i64, @intCast(delta_pc)) - dbg_out.pc_quanta;
if (d_pc_p9 > 0) {
// minus one because if its the last one, we want to leave space to change the line which is one pc quanta
try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, dbg_out.pc_quanta) + 128)) - dbg_out.pc_quanta);
if (dbg_out.pcop_change_index) |pci|
dbg_out.dbg_line.items[pci] += 1;
dbg_out.pcop_change_index = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
} else if (d_pc_p9 == 0) {
// we don't need to do anything, because adding the pc quanta does it for us
} else unreachable;
if (dbg_out.start_line == null)
dbg_out.start_line = self.prev_di_line;
dbg_out.end_line = line;
// only do this if the pc changed
self.prev_di_line = line;
self.prev_di_column = column;
self.prev_di_pc = self.code.items.len;
},
.none => {},
}
}
fn mirDataProcessing(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
switch (tag) {
.add,
.adds,
.@"and",
.eor,
.orr,
.rsb,
.sub,
.subs,
=> {
const rr_op = emit.mir.instructions.items(.data)[inst].rr_op;
switch (tag) {
.add => try emit.writeInstruction(Instruction.add(cond, rr_op.rd, rr_op.rn, rr_op.op)),
.adds => try emit.writeInstruction(Instruction.adds(cond, rr_op.rd, rr_op.rn, rr_op.op)),
.@"and" => try emit.writeInstruction(Instruction.@"and"(cond, rr_op.rd, rr_op.rn, rr_op.op)),
.eor => try emit.writeInstruction(Instruction.eor(cond, rr_op.rd, rr_op.rn, rr_op.op)),
.orr => try emit.writeInstruction(Instruction.orr(cond, rr_op.rd, rr_op.rn, rr_op.op)),
.rsb => try emit.writeInstruction(Instruction.rsb(cond, rr_op.rd, rr_op.rn, rr_op.op)),
.sub => try emit.writeInstruction(Instruction.sub(cond, rr_op.rd, rr_op.rn, rr_op.op)),
.subs => try emit.writeInstruction(Instruction.subs(cond, rr_op.rd, rr_op.rn, rr_op.op)),
else => unreachable,
}
},
.cmp => {
const r_op_cmp = emit.mir.instructions.items(.data)[inst].r_op_cmp;
try emit.writeInstruction(Instruction.cmp(cond, r_op_cmp.rn, r_op_cmp.op));
},
.mov,
.mvn,
=> {
const r_op_mov = emit.mir.instructions.items(.data)[inst].r_op_mov;
switch (tag) {
.mov => try emit.writeInstruction(Instruction.mov(cond, r_op_mov.rd, r_op_mov.op)),
.mvn => try emit.writeInstruction(Instruction.mvn(cond, r_op_mov.rd, r_op_mov.op)),
else => unreachable,
}
},
else => unreachable,
}
}
fn mirSubStackPointer(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
const imm32 = emit.mir.instructions.items(.data)[inst].imm32;
switch (tag) {
.sub_sp_scratch_r4 => {
if (imm32 == 0) return;
const operand = Instruction.Operand.fromU32(imm32) orelse blk: {
const scratch: Register = .r4;
if (emit.target.cpu.has(.arm, .has_v7)) {
try emit.writeInstruction(Instruction.movw(cond, scratch, @as(u16, @truncate(imm32))));
try emit.writeInstruction(Instruction.movt(cond, scratch, @as(u16, @truncate(imm32 >> 16))));
} else {
try emit.writeInstruction(Instruction.mov(cond, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32)), 0)));
try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32 >> 8)), 12)));
try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32 >> 16)), 8)));
try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@as(u8, @truncate(imm32 >> 24)), 4)));
}
break :blk Instruction.Operand.reg(scratch, Instruction.Operand.Shift.none);
};
try emit.writeInstruction(Instruction.sub(cond, .sp, .sp, operand));
},
else => unreachable,
}
}
fn mirShift(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
const rr_shift = emit.mir.instructions.items(.data)[inst].rr_shift;
switch (tag) {
.asr => try emit.writeInstruction(Instruction.asr(cond, rr_shift.rd, rr_shift.rm, rr_shift.shift_amount)),
.lsl => try emit.writeInstruction(Instruction.lsl(cond, rr_shift.rd, rr_shift.rm, rr_shift.shift_amount)),
.lsr => try emit.writeInstruction(Instruction.lsr(cond, rr_shift.rd, rr_shift.rm, rr_shift.shift_amount)),
else => unreachable,
}
}
fn mirBranch(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
const target_inst = emit.mir.instructions.items(.data)[inst].inst;
const offset = @as(i64, @intCast(emit.code_offset_mapping.get(target_inst).?)) - @as(i64, @intCast(emit.code.items.len + 8));
const branch_type = emit.branch_types.get(inst).?;
switch (branch_type) {
.b => switch (tag) {
.b => try emit.writeInstruction(Instruction.b(cond, @as(i26, @intCast(offset)))),
else => unreachable,
},
}
}
fn mirUndefinedInstruction(emit: *Emit) !void {
try emit.writeInstruction(Instruction.undefinedInstruction());
}
fn mirExceptionGeneration(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const imm16 = emit.mir.instructions.items(.data)[inst].imm16;
switch (tag) {
.bkpt => try emit.writeInstruction(Instruction.bkpt(imm16)),
else => unreachable,
}
}
fn mirBranchExchange(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
const reg = emit.mir.instructions.items(.data)[inst].reg;
switch (tag) {
.blx => try emit.writeInstruction(Instruction.blx(cond, reg)),
.bx => try emit.writeInstruction(Instruction.bx(cond, reg)),
else => unreachable,
}
}
fn mirDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const dbg_line_column = emit.mir.instructions.items(.data)[inst].dbg_line_column;
switch (tag) {
.dbg_line => try emit.dbgAdvancePCAndLine(dbg_line_column.line, dbg_line_column.column),
else => unreachable,
}
}
fn mirDebugPrologueEnd(emit: *Emit) !void {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setPrologueEnd();
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
}
fn mirDebugEpilogueBegin(emit: *Emit) !void {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setEpilogueBegin();
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
}
fn mirLoadStore(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
const rr_offset = emit.mir.instructions.items(.data)[inst].rr_offset;
switch (tag) {
.ldr => try emit.writeInstruction(Instruction.ldr(cond, rr_offset.rt, rr_offset.rn, rr_offset.offset)),
.ldrb => try emit.writeInstruction(Instruction.ldrb(cond, rr_offset.rt, rr_offset.rn, rr_offset.offset)),
.str => try emit.writeInstruction(Instruction.str(cond, rr_offset.rt, rr_offset.rn, rr_offset.offset)),
.strb => try emit.writeInstruction(Instruction.strb(cond, rr_offset.rt, rr_offset.rn, rr_offset.offset)),
else => unreachable,
}
}
fn mirLoadStackArgument(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
const r_stack_offset = emit.mir.instructions.items(.data)[inst].r_stack_offset;
const rt = r_stack_offset.rt;
const raw_offset = emit.stack_size + emit.saved_regs_stack_space + r_stack_offset.stack_offset;
switch (tag) {
.ldr_ptr_stack_argument => {
const operand = Instruction.Operand.fromU32(raw_offset) orelse
return emit.fail("TODO mirLoadStack larger offsets", .{});
try emit.writeInstruction(Instruction.add(cond, rt, .sp, operand));
},
.ldr_stack_argument,
.ldrb_stack_argument,
=> {
const offset = if (raw_offset <= math.maxInt(u12)) blk: {
break :blk Instruction.Offset.imm(@as(u12, @intCast(raw_offset)));
} else return emit.fail("TODO mirLoadStack larger offsets", .{});
switch (tag) {
.ldr_stack_argument => try emit.writeInstruction(Instruction.ldr(cond, rt, .sp, .{ .offset = offset })),
.ldrb_stack_argument => try emit.writeInstruction(Instruction.ldrb(cond, rt, .sp, .{ .offset = offset })),
else => unreachable,
}
},
.ldrh_stack_argument,
.ldrsb_stack_argument,
.ldrsh_stack_argument,
=> {
const offset = if (raw_offset <= math.maxInt(u8)) blk: {
break :blk Instruction.ExtraLoadStoreOffset.imm(@as(u8, @intCast(raw_offset)));
} else return emit.fail("TODO mirLoadStack larger offsets", .{});
switch (tag) {
.ldrh_stack_argument => try emit.writeInstruction(Instruction.ldrh(cond, rt, .sp, .{ .offset = offset })),
.ldrsb_stack_argument => try emit.writeInstruction(Instruction.ldrsb(cond, rt, .sp, .{ .offset = offset })),
.ldrsh_stack_argument => try emit.writeInstruction(Instruction.ldrsh(cond, rt, .sp, .{ .offset = offset })),
else => unreachable,
}
},
else => unreachable,
}
}
fn mirLoadStoreExtra(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
const rr_extra_offset = emit.mir.instructions.items(.data)[inst].rr_extra_offset;
switch (tag) {
.ldrh => try emit.writeInstruction(Instruction.ldrh(cond, rr_extra_offset.rt, rr_extra_offset.rn, rr_extra_offset.offset)),
.ldrsb => try emit.writeInstruction(Instruction.ldrsb(cond, rr_extra_offset.rt, rr_extra_offset.rn, rr_extra_offset.offset)),
.ldrsh => try emit.writeInstruction(Instruction.ldrsh(cond, rr_extra_offset.rt, rr_extra_offset.rn, rr_extra_offset.offset)),
.strh => try emit.writeInstruction(Instruction.strh(cond, rr_extra_offset.rt, rr_extra_offset.rn, rr_extra_offset.offset)),
else => unreachable,
}
}
fn mirSpecialMove(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
const r_imm16 = emit.mir.instructions.items(.data)[inst].r_imm16;
switch (tag) {
.movw => try emit.writeInstruction(Instruction.movw(cond, r_imm16.rd, r_imm16.imm16)),
.movt => try emit.writeInstruction(Instruction.movt(cond, r_imm16.rd, r_imm16.imm16)),
else => unreachable,
}
}
fn mirMultiply(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
const rrr = emit.mir.instructions.items(.data)[inst].rrr;
switch (tag) {
.mul => try emit.writeInstruction(Instruction.mul(cond, rrr.rd, rrr.rn, rrr.rm)),
.smulbb => try emit.writeInstruction(Instruction.smulbb(cond, rrr.rd, rrr.rn, rrr.rm)),
else => unreachable,
}
}
fn mirMultiplyLong(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
const rrrr = emit.mir.instructions.items(.data)[inst].rrrr;
switch (tag) {
.smull => try emit.writeInstruction(Instruction.smull(cond, rrrr.rdlo, rrrr.rdhi, rrrr.rn, rrrr.rm)),
.umull => try emit.writeInstruction(Instruction.umull(cond, rrrr.rdlo, rrrr.rdhi, rrrr.rn, rrrr.rm)),
else => unreachable,
}
}
fn mirNop(emit: *Emit) !void {
try emit.writeInstruction(Instruction.nop());
}
fn mirBlockDataTransfer(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
const register_list = emit.mir.instructions.items(.data)[inst].register_list;
switch (tag) {
.pop => try emit.writeInstruction(Instruction.ldm(cond, .sp, true, register_list)),
.push => try emit.writeInstruction(Instruction.stmdb(cond, .sp, true, register_list)),
else => unreachable,
}
}
fn mirSupervisorCall(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
const imm24 = emit.mir.instructions.items(.data)[inst].imm24;
switch (tag) {
.svc => try emit.writeInstruction(Instruction.svc(cond, imm24)),
else => unreachable,
}
}
fn mirBitFieldExtract(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const cond = emit.mir.instructions.items(.cond)[inst];
const rr_lsb_width = emit.mir.instructions.items(.data)[inst].rr_lsb_width;
const rd = rr_lsb_width.rd;
const rn = rr_lsb_width.rn;
const lsb = rr_lsb_width.lsb;
const width = rr_lsb_width.width;
switch (tag) {
.sbfx => try emit.writeInstruction(Instruction.sbfx(cond, rd, rn, lsb, width)),
.ubfx => try emit.writeInstruction(Instruction.ubfx(cond, rd, rn, lsb, width)),
else => unreachable,
}
}

View File

@ -1,340 +0,0 @@
//! Machine Intermediate Representation.
//! This data is produced by ARM Codegen or ARM assembly parsing
//! These instructions have a 1:1 correspondence with machine code instructions
//! for the target. MIR can be lowered to source-annotated textual assembly code
//! instructions, or it can be lowered to machine code.
//! The main purpose of MIR is to postpone the assignment of offsets until Isel,
//! so that, for example, the smaller encodings of jump instructions can be used.
const Mir = @This();
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const bits = @import("bits.zig");
const Register = bits.Register;
const InternPool = @import("../../InternPool.zig");
const Emit = @import("Emit.zig");
const codegen = @import("../../codegen.zig");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
max_end_stack: u32,
saved_regs_stack_space: u32,
instructions: std.MultiArrayList(Inst).Slice,
/// The meaning of this data is determined by `Inst.Tag` value.
extra: []const u32,
pub const Inst = struct {
tag: Tag,
cond: bits.Condition = .al,
/// The meaning of this depends on `tag`.
data: Data,
pub const Tag = enum(u16) {
/// Add
add,
/// Add, update condition flags
adds,
/// Bitwise AND
@"and",
/// Arithmetic Shift Right
asr,
/// Branch
b,
/// Undefined instruction
undefined_instruction,
/// Breakpoint
bkpt,
/// Branch with Link and Exchange
blx,
/// Branch and Exchange
bx,
/// Compare
cmp,
/// Pseudo-instruction: End of prologue
dbg_prologue_end,
/// Pseudo-instruction: Beginning of epilogue
dbg_epilogue_begin,
/// Pseudo-instruction: Update debug line
dbg_line,
/// Bitwise Exclusive OR
eor,
/// Load Register
ldr,
/// Pseudo-instruction: Load pointer to stack argument offset
ldr_ptr_stack_argument,
/// Load Register
ldr_stack_argument,
/// Load Register Byte
ldrb,
/// Load Register Byte
ldrb_stack_argument,
/// Load Register Halfword
ldrh,
/// Load Register Halfword
ldrh_stack_argument,
/// Load Register Signed Byte
ldrsb,
/// Load Register Signed Byte
ldrsb_stack_argument,
/// Load Register Signed Halfword
ldrsh,
/// Load Register Signed Halfword
ldrsh_stack_argument,
/// Logical Shift Left
lsl,
/// Logical Shift Right
lsr,
/// Move
mov,
/// Move
movw,
/// Move Top
movt,
/// Multiply
mul,
/// Bitwise NOT
mvn,
/// No Operation
nop,
/// Bitwise OR
orr,
/// Pop multiple registers from Stack
pop,
/// Push multiple registers to Stack
push,
/// Reverse Subtract
rsb,
/// Signed Bit Field Extract
sbfx,
/// Signed Multiply (halfwords), bottom half, bottom half
smulbb,
/// Signed Multiply Long
smull,
/// Store Register
str,
/// Store Register Byte
strb,
/// Store Register Halfword
strh,
/// Subtract
sub,
/// Pseudo-instruction: Subtract 32-bit immediate from stack
///
/// r4 can be used by Emit as a scratch register for loading
/// the immediate
sub_sp_scratch_r4,
/// Subtract, update condition flags
subs,
/// Supervisor Call
svc,
/// Unsigned Bit Field Extract
ubfx,
/// Unsigned Multiply Long
umull,
};
/// The position of an MIR instruction within the `Mir` instructions array.
pub const Index = u32;
/// All instructions have a 8-byte payload, which is contained within
/// this union. `Tag` determines which union field is active, as well as
/// how to interpret the data within.
pub const Data = union {
/// No additional data
///
/// Used by e.g. nop
nop: void,
/// Another instruction
///
/// Used by e.g. b
inst: Index,
/// A 16-bit immediate value.
///
/// Used by e.g. bkpt
imm16: u16,
/// A 24-bit immediate value.
///
/// Used by e.g. svc
imm24: u24,
/// A 32-bit immediate value.
///
/// Used by e.g. sub_sp_scratch_r0
imm32: u32,
/// Index into `extra`. Meaning of what can be found there is context-dependent.
///
/// Used by e.g. load_memory
payload: u32,
/// A register
///
/// Used by e.g. blx
reg: Register,
/// A register and a stack offset
///
/// Used by e.g. ldr_stack_argument
r_stack_offset: struct {
rt: Register,
stack_offset: u32,
},
/// A register and a 16-bit unsigned immediate
///
/// Used by e.g. movw
r_imm16: struct {
rd: Register,
imm16: u16,
},
/// A register and an operand
///
/// Used by mov and mvn
r_op_mov: struct {
rd: Register,
op: bits.Instruction.Operand,
},
/// A register and an operand
///
/// Used by cmp
r_op_cmp: struct {
rn: Register,
op: bits.Instruction.Operand,
},
/// Two registers and a shift amount
///
/// Used by e.g. lsl
rr_shift: struct {
rd: Register,
rm: Register,
shift_amount: bits.Instruction.ShiftAmount,
},
/// Two registers and an operand
///
/// Used by e.g. sub
rr_op: struct {
rd: Register,
rn: Register,
op: bits.Instruction.Operand,
},
/// Two registers and an offset
///
/// Used by e.g. ldr
rr_offset: struct {
rt: Register,
rn: Register,
offset: bits.Instruction.OffsetArgs,
},
/// Two registers and an extra load/store offset
///
/// Used by e.g. ldrh
rr_extra_offset: struct {
rt: Register,
rn: Register,
offset: bits.Instruction.ExtraLoadStoreOffsetArgs,
},
/// Two registers and a lsb (range 0-31) and a width (range
/// 1-32)
///
/// Used by e.g. sbfx
rr_lsb_width: struct {
rd: Register,
rn: Register,
lsb: u5,
width: u6,
},
/// Three registers
///
/// Used by e.g. mul
rrr: struct {
rd: Register,
rn: Register,
rm: Register,
},
/// Four registers
///
/// Used by e.g. smull
rrrr: struct {
rdlo: Register,
rdhi: Register,
rn: Register,
rm: Register,
},
/// An unordered list of registers
///
/// Used by e.g. push
register_list: bits.Instruction.RegisterList,
/// Debug info: line and column
///
/// Used by e.g. dbg_line
dbg_line_column: struct {
line: u32,
column: u32,
},
};
// Make sure we don't accidentally make instructions bigger than expected.
// Note that in safety builds, Zig is allowed to insert a secret field for safety checks.
comptime {
if (!std.debug.runtime_safety) {
assert(@sizeOf(Data) == 8);
}
}
};
pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.instructions.deinit(gpa);
gpa.free(mir.extra);
mir.* = undefined;
}
pub fn emit(
mir: Mir,
lf: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
const zcu = pt.zcu;
const func = zcu.funcInfo(func_index);
const nav = func.owner_nav;
const mod = zcu.navFileScope(nav).mod.?;
var e: Emit = .{
.mir = mir,
.bin_file = lf,
.debug_output = debug_output,
.target = &mod.resolved_target.result,
.src_loc = src_loc,
.code = code,
.prev_di_pc = 0,
.prev_di_line = func.lbrace_line,
.prev_di_column = func.lbrace_column,
.stack_size = mir.max_end_stack,
.saved_regs_stack_space = mir.saved_regs_stack_space,
};
defer e.deinit();
e.emitMir() catch |err| switch (err) {
error.EmitFail => return zcu.codegenFailMsg(nav, e.err_msg.?),
else => |e1| return e1,
};
}
/// Returns the requested data, as well as the new index which is at the start of the
/// trailers for the object.
pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end: usize } {
const fields = std.meta.fields(T);
var i: usize = index;
var result: T = undefined;
inline for (fields) |field| {
@field(result, field.name) = switch (field.type) {
u32 => mir.extra[i],
i32 => @as(i32, @bitCast(mir.extra[i])),
else => @compileError("bad field type"),
};
i += 1;
}
return .{
.data = result,
.end = i,
};
}

File diff suppressed because it is too large Load Diff

View File

@ -1,51 +0,0 @@
const builtin = @import("builtin");
const std = @import("std");
const Air = @import("../../Air.zig");
const codegen = @import("../../codegen.zig");
const InternPool = @import("../../InternPool.zig");
const link = @import("../../link.zig");
const Zcu = @import("../../Zcu.zig");
const assert = std.debug.assert;
const log = std.log.scoped(.codegen);
pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features {
return null;
}
pub fn generate(
bin_file: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
func_index: InternPool.Index,
air: *const Air,
liveness: *const Air.Liveness,
) codegen.CodeGenError!noreturn {
_ = bin_file;
_ = pt;
_ = src_loc;
_ = func_index;
_ = air;
_ = liveness;
unreachable;
}
pub fn generateLazy(
bin_file: *link.File,
pt: Zcu.PerThread,
src_loc: Zcu.LazySrcLoc,
lazy_sym: link.File.LazySymbol,
code: *std.ArrayListUnmanaged(u8),
debug_output: link.File.DebugInfoOutput,
) codegen.CodeGenError!void {
_ = bin_file;
_ = pt;
_ = src_loc;
_ = lazy_sym;
_ = code;
_ = debug_output;
unreachable;
}

View File

@ -17,7 +17,7 @@ const Compilation = @import("../../Compilation.zig");
const link = @import("../../link.zig");
const Air = @import("../../Air.zig");
const Mir = @import("Mir.zig");
const abi = @import("abi.zig");
const abi = @import("../../codegen/wasm/abi.zig");
const Alignment = InternPool.Alignment;
const errUnionPayloadOffset = codegen.errUnionPayloadOffset;
const errUnionErrorOffset = codegen.errUnionErrorOffset;

View File

@ -1,100 +0,0 @@
const std = @import("std");
// zig fmt: off
pub const Register = enum(u8) {
// 0 through 7, 32-bit registers. id is int value
eax, ecx, edx, ebx, esp, ebp, esi, edi,
// 8-15, 16-bit registers. id is int value - 8.
ax, cx, dx, bx, sp, bp, si, di,
// 16-23, 8-bit registers. id is int value - 16.
al, cl, dl, bl, ah, ch, dh, bh,
/// Returns the bit-width of the register.
pub fn size(self: Register) u7 {
return switch (@intFromEnum(self)) {
0...7 => 32,
8...15 => 16,
16...23 => 8,
else => unreachable,
};
}
/// Returns the register's id. This is used in practically every opcode the
/// x86 has. It is embedded in some instructions, such as the `B8 +rd` move
/// instruction, and is used in the R/M byte.
pub fn id(self: Register) u3 {
return @truncate(@intFromEnum(self));
}
/// Convert from any register to its 32 bit alias.
pub fn to32(self: Register) Register {
return @enumFromInt(@as(u8, self.id()));
}
/// Convert from any register to its 16 bit alias.
pub fn to16(self: Register) Register {
return @enumFromInt(@as(u8, self.id()) + 8);
}
/// Convert from any register to its 8 bit alias.
pub fn to8(self: Register) Register {
return @enumFromInt(@as(u8, self.id()) + 16);
}
pub fn dwarfNum(reg: Register) u8 {
return @intFromEnum(reg.to32());
}
};
// zig fmt: on
/// TODO this set is actually a set of caller-saved registers.
pub const callee_preserved_regs = [_]Register{ .eax, .ecx, .edx, .esi, .edi };
// TODO add these to Register enum and corresponding dwarfNum
// // Return Address register. This is stored in `0(%esp, "")` and is not a physical register.
// RA = (8, "RA"),
//
// ST0 = (11, "st0"),
// ST1 = (12, "st1"),
// ST2 = (13, "st2"),
// ST3 = (14, "st3"),
// ST4 = (15, "st4"),
// ST5 = (16, "st5"),
// ST6 = (17, "st6"),
// ST7 = (18, "st7"),
//
// XMM0 = (21, "xmm0"),
// XMM1 = (22, "xmm1"),
// XMM2 = (23, "xmm2"),
// XMM3 = (24, "xmm3"),
// XMM4 = (25, "xmm4"),
// XMM5 = (26, "xmm5"),
// XMM6 = (27, "xmm6"),
// XMM7 = (28, "xmm7"),
//
// MM0 = (29, "mm0"),
// MM1 = (30, "mm1"),
// MM2 = (31, "mm2"),
// MM3 = (32, "mm3"),
// MM4 = (33, "mm4"),
// MM5 = (34, "mm5"),
// MM6 = (35, "mm6"),
// MM7 = (36, "mm7"),
//
// MXCSR = (39, "mxcsr"),
//
// ES = (40, "es"),
// CS = (41, "cs"),
// SS = (42, "ss"),
// DS = (43, "ds"),
// FS = (44, "fs"),
// GS = (45, "gs"),
//
// TR = (48, "tr"),
// LDTR = (49, "ldtr"),
//
// FS_BASE = (93, "fs.base"),
// GS_BASE = (94, "gs.base"),

View File

@ -34,7 +34,7 @@ fn devFeatureForBackend(backend: std.builtin.CompilerBackend) dev.Feature {
.stage2_arm => .arm_backend,
.stage2_c => .c_backend,
.stage2_llvm => .llvm_backend,
.stage2_powerpc => .powerpc_backend,
.stage2_powerpc => unreachable,
.stage2_riscv64 => .riscv64_backend,
.stage2_sparc64 => .sparc64_backend,
.stage2_spirv => .spirv_backend,
@ -48,11 +48,11 @@ fn devFeatureForBackend(backend: std.builtin.CompilerBackend) dev.Feature {
fn importBackend(comptime backend: std.builtin.CompilerBackend) type {
return switch (backend) {
.other, .stage1 => unreachable,
.stage2_aarch64 => @import("arch/aarch64/CodeGen.zig"),
.stage2_arm => @import("arch/arm/CodeGen.zig"),
.stage2_aarch64 => unreachable,
.stage2_arm => unreachable,
.stage2_c => @import("codegen/c.zig"),
.stage2_llvm => @import("codegen/llvm.zig"),
.stage2_powerpc => @import("arch/powerpc/CodeGen.zig"),
.stage2_powerpc => unreachable,
.stage2_riscv64 => @import("arch/riscv64/CodeGen.zig"),
.stage2_sparc64 => @import("arch/sparc64/CodeGen.zig"),
.stage2_spirv => @import("codegen/spirv.zig"),
@ -70,14 +70,11 @@ pub fn legalizeFeatures(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) ?*co
inline .stage2_llvm,
.stage2_c,
.stage2_wasm,
.stage2_arm,
.stage2_x86_64,
.stage2_aarch64,
.stage2_x86,
.stage2_riscv64,
.stage2_sparc64,
.stage2_spirv,
.stage2_powerpc,
=> |backend| {
dev.check(devFeatureForBackend(backend));
return importBackend(backend).legalizeFeatures(target);
@ -89,9 +86,6 @@ pub fn legalizeFeatures(pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) ?*co
/// MIR from codegen to the linker *regardless* of which backend is in use. So, we use this: a
/// union of all MIR types. The active tag is known from the backend in use; see `AnyMir.tag`.
pub const AnyMir = union {
aarch64: @import("arch/aarch64/Mir.zig"),
arm: @import("arch/arm/Mir.zig"),
powerpc: noreturn, //@import("arch/powerpc/Mir.zig"),
riscv64: @import("arch/riscv64/Mir.zig"),
sparc64: @import("arch/sparc64/Mir.zig"),
x86_64: @import("arch/x86_64/Mir.zig"),
@ -102,7 +96,6 @@ pub const AnyMir = union {
return switch (backend) {
.stage2_aarch64 => "aarch64",
.stage2_arm => "arm",
.stage2_powerpc => "powerpc",
.stage2_riscv64 => "riscv64",
.stage2_sparc64 => "sparc64",
.stage2_x86_64 => "x86_64",
@ -117,10 +110,7 @@ pub const AnyMir = union {
const backend = target_util.zigBackend(&zcu.root_mod.resolved_target.result, zcu.comp.config.use_llvm);
switch (backend) {
else => unreachable,
inline .stage2_aarch64,
.stage2_arm,
.stage2_powerpc,
.stage2_riscv64,
inline .stage2_riscv64,
.stage2_sparc64,
.stage2_x86_64,
.stage2_wasm,
@ -148,10 +138,7 @@ pub fn generateFunction(
const target = &zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
switch (target_util.zigBackend(target, false)) {
else => unreachable,
inline .stage2_aarch64,
.stage2_arm,
.stage2_powerpc,
.stage2_riscv64,
inline .stage2_riscv64,
.stage2_sparc64,
.stage2_x86_64,
.stage2_wasm,
@ -186,10 +173,7 @@ pub fn emitFunction(
const target = &zcu.navFileScope(func.owner_nav).mod.?.resolved_target.result;
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
else => unreachable,
inline .stage2_aarch64,
.stage2_arm,
.stage2_powerpc,
.stage2_riscv64,
inline .stage2_riscv64,
.stage2_sparc64,
.stage2_x86_64,
=> |backend| {
@ -215,10 +199,7 @@ pub fn generateLazyFunction(
zcu.getTarget();
switch (target_util.zigBackend(target, zcu.comp.config.use_llvm)) {
else => unreachable,
inline .stage2_powerpc,
.stage2_riscv64,
.stage2_x86_64,
=> |backend| {
inline .stage2_riscv64, .stage2_x86_64 => |backend| {
dev.check(devFeatureForBackend(backend));
return importBackend(backend).generateLazy(lf, pt, src_loc, lazy_sym, code, debug_output);
},

View File

@ -1,8 +1,7 @@
const std = @import("std");
const builtin = @import("builtin");
const bits = @import("bits.zig");
const bits = @import("../../arch/aarch64/bits.zig");
const Register = bits.Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../Type.zig");
const Zcu = @import("../../Zcu.zig");
@ -149,17 +148,3 @@ pub const c_abi_int_param_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6,
pub const c_abi_int_return_regs = [_]Register{ .x0, .x1, .x2, .x3, .x4, .x5, .x6, .x7 };
const allocatable_registers = callee_preserved_regs;
pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, &allocatable_registers);
// Register classes
const RegisterBitSet = RegisterManager.RegisterBitSet;
pub const RegisterClass = struct {
pub const gp: RegisterBitSet = blk: {
var set = RegisterBitSet.initEmpty();
for (callee_preserved_regs) |reg| {
const index = RegisterManager.indexOfRegIntoTracked(reg).?;
set.set(index);
}
break :blk set;
};
};

View File

@ -1,7 +1,5 @@
const std = @import("std");
const assert = std.debug.assert;
const bits = @import("bits.zig");
const Register = bits.Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../Type.zig");
const Zcu = @import("../../Zcu.zig");
@ -163,25 +161,3 @@ fn countFloats(ty: Type, zcu: *Zcu, maybe_float_bits: *?u16) u32 {
else => return invalid,
}
}
pub const callee_preserved_regs = [_]Register{ .r4, .r5, .r6, .r7, .r8, .r10 };
pub const caller_preserved_regs = [_]Register{ .r0, .r1, .r2, .r3 };
pub const c_abi_int_param_regs = [_]Register{ .r0, .r1, .r2, .r3 };
pub const c_abi_int_return_regs = [_]Register{ .r0, .r1 };
const allocatable_registers = callee_preserved_regs ++ caller_preserved_regs;
pub const RegisterManager = RegisterManagerFn(@import("CodeGen.zig"), Register, &allocatable_registers);
// Register classes
const RegisterBitSet = RegisterManager.RegisterBitSet;
pub const RegisterClass = struct {
pub const gp: RegisterBitSet = blk: {
var set = RegisterBitSet.initEmpty();
set.setRangeValue(.{
.start = 0,
.end = caller_preserved_regs.len + callee_preserved_regs.len,
}, true);
break :blk set;
};
};

View File

@ -21,11 +21,11 @@ const Air = @import("../Air.zig");
const Value = @import("../Value.zig");
const Type = @import("../Type.zig");
const x86_64_abi = @import("../arch/x86_64/abi.zig");
const wasm_c_abi = @import("../arch/wasm/abi.zig");
const aarch64_c_abi = @import("../arch/aarch64/abi.zig");
const arm_c_abi = @import("../arch/arm/abi.zig");
const wasm_c_abi = @import("wasm/abi.zig");
const aarch64_c_abi = @import("aarch64/abi.zig");
const arm_c_abi = @import("arm/abi.zig");
const riscv_c_abi = @import("../arch/riscv64/abi.zig");
const mips_c_abi = @import("../arch/mips/abi.zig");
const mips_c_abi = @import("mips/abi.zig");
const dev = @import("../dev.zig");
const target_util = @import("../target.zig");

View File

@ -31,7 +31,7 @@ const mem = std.mem;
const Mir = @import("../arch/wasm/Mir.zig");
const CodeGen = @import("../arch/wasm/CodeGen.zig");
const abi = @import("../arch/wasm/abi.zig");
const abi = @import("../codegen/wasm/abi.zig");
const Compilation = @import("../Compilation.zig");
const Dwarf = @import("Dwarf.zig");
const InternPool = @import("../InternPool.zig");