riscv: big rewrite to use latest liveness

this one is even harder to document then the last large overhaul.

TLDR;
- split apart Emit.zig into an Emit.zig and a Lower.zig
- created seperate files for the encoding, and now adding a new instruction
is as simple as just adding it to a couple of switch statements and providing the encoding.
- relocs are handled in a more sane maner, and we have a clear defining boundary between
lea_symbol and load_symbol now.
- a lot of different abstractions for things like the stack, memory, registers, and others.
- we're using x86_64's FrameIndex now, which simplifies a lot of the tougher design process.
- a lot more that I don't have the energy to document. at this point, just read the commit itself :p
This commit is contained in:
David Rubin 2024-04-03 00:15:56 -07:00
parent 9d0bb6371d
commit 6740c1f084
16 changed files with 3021 additions and 2103 deletions

View File

@ -252,12 +252,16 @@ pub fn mainSimple() anyerror!void {
pub fn mainExtraSimple() !void {
var pass_count: u8 = 0;
var skip_count: u8 = 0;
var fail_count: u8 = 0;
for (builtin.test_functions) |test_fn| {
test_fn.func() catch |err| {
if (err != error.SkipZigTest) {
@panic(test_fn.name);
fail_count += 1;
continue;
}
skip_count += 1;
continue;
};
pass_count += 1;

View File

@ -775,15 +775,7 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace, ret_addr
}
if (builtin.zig_backend == .stage2_riscv64) {
asm volatile ("ecall"
:
: [number] "{a7}" (64),
[arg1] "{a0}" (1),
[arg2] "{a1}" (@intFromPtr(msg.ptr)),
[arg3] "{a2}" (msg.len),
: "rcx", "r11", "memory"
);
std.posix.exit(127);
unreachable;
}
switch (builtin.os.tag) {

View File

@ -208,8 +208,7 @@ fn wasi_start() callconv(.C) void {
}
fn riscv_start() callconv(.C) noreturn {
const code = @call(.always_inline, callMain, .{});
std.process.exit(code);
std.process.exit(@call(.always_inline, callMain, .{}));
}
fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) callconv(.C) usize {

File diff suppressed because it is too large Load Diff

View File

@ -1,620 +1,163 @@
//! This file contains the functionality for lowering RISCV64 MIR into
//! machine code
//! This file contains the functionality for emitting RISC-V MIR as machine code
mir: Mir,
bin_file: *link.File,
lower: Lower,
debug_output: DebugInfoOutput,
output_mode: std.builtin.OutputMode,
link_mode: std.builtin.LinkMode,
target: *const std.Target,
err_msg: ?*ErrorMsg = null,
src_loc: Module.SrcLoc,
code: *std.ArrayList(u8),
/// List of registers to save in the prologue.
save_reg_list: Mir.RegisterList,
prev_di_line: u32,
prev_di_column: u32,
/// Relative to the beginning of `code`.
prev_di_pc: usize,
/// Function's stack size. Used for backpatching.
stack_size: u32,
/// For backward branches: stores the code offset of the target
/// instruction
///
/// For forward branches: stores the code offset of the branch
/// instruction
code_offset_mapping: std.AutoHashMapUnmanaged(Mir.Inst.Index, usize) = .{},
relocs: std.ArrayListUnmanaged(Reloc) = .{},
const log = std.log.scoped(.emit);
const InnerError = error{
OutOfMemory,
pub const Error = Lower.Error || error{
EmitFail,
};
pub fn emitMir(
emit: *Emit,
) InnerError!void {
const mir_tags = emit.mir.instructions.items(.tag);
pub fn emitMir(emit: *Emit) Error!void {
log.debug("mir instruction len: {}", .{emit.lower.mir.instructions.len});
for (0..emit.lower.mir.instructions.len) |mir_i| {
const mir_index: Mir.Inst.Index = @intCast(mir_i);
try emit.code_offset_mapping.putNoClobber(
emit.lower.allocator,
mir_index,
@intCast(emit.code.items.len),
);
const lowered = try emit.lower.lowerMir(mir_index);
var lowered_relocs = lowered.relocs;
for (lowered.insts, 0..) |lowered_inst, lowered_index| {
const start_offset: u32 = @intCast(emit.code.items.len);
try lowered_inst.encode(emit.code.writer());
try emit.lowerMir();
while (lowered_relocs.len > 0 and
lowered_relocs[0].lowered_inst_index == lowered_index) : ({
lowered_relocs = lowered_relocs[1..];
}) switch (lowered_relocs[0].target) {
.inst => |target| try emit.relocs.append(emit.lower.allocator, .{
.source = start_offset,
.target = target,
.offset = 0,
.enc = std.meta.activeTag(lowered_inst.encoding.data),
}),
else => |x| return emit.fail("TODO: emitMir {s}", .{@tagName(x)}),
};
}
std.debug.assert(lowered_relocs.len == 0);
for (mir_tags, 0..) |tag, index| {
const inst = @as(u32, @intCast(index));
log.debug("emitMir: {s}", .{@tagName(tag)});
switch (tag) {
.add => try emit.mirRType(inst),
.sub => try emit.mirRType(inst),
.mul => try emit.mirRType(inst),
.@"or" => try emit.mirRType(inst),
if (lowered.insts.len == 0) {
const mir_inst = emit.lower.mir.instructions.get(mir_index);
switch (mir_inst.tag) {
else => unreachable,
.pseudo => switch (mir_inst.ops) {
else => unreachable,
.pseudo_dbg_prologue_end => {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setPrologueEnd();
log.debug("mirDbgPrologueEnd (line={d}, col={d})", .{
emit.prev_di_line, emit.prev_di_column,
});
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
},
.pseudo_dbg_line_column => try emit.dbgAdvancePCAndLine(
mir_inst.data.pseudo_dbg_line_column.line,
mir_inst.data.pseudo_dbg_line_column.column,
),
.pseudo_dbg_epilogue_begin => {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setEpilogueBegin();
log.debug("mirDbgEpilogueBegin (line={d}, col={d})", .{
emit.prev_di_line, emit.prev_di_column,
});
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
},
.pseudo_dead => {},
},
}
}
}
try emit.fixupRelocs();
}
.cmp_eq => try emit.mirRType(inst),
.cmp_neq => try emit.mirRType(inst),
.cmp_gt => try emit.mirRType(inst),
.cmp_gte => try emit.mirRType(inst),
.cmp_lt => try emit.mirRType(inst),
.cmp_imm_gte => try emit.mirRType(inst),
.cmp_imm_eq => try emit.mirIType(inst),
.cmp_imm_neq => try emit.mirIType(inst),
.cmp_imm_lte => try emit.mirIType(inst),
.cmp_imm_lt => try emit.mirIType(inst),
pub fn deinit(emit: *Emit) void {
emit.relocs.deinit(emit.lower.allocator);
emit.code_offset_mapping.deinit(emit.lower.allocator);
emit.* = undefined;
}
.beq => try emit.mirBType(inst),
.bne => try emit.mirBType(inst),
const Reloc = struct {
/// Offset of the instruction.
source: usize,
/// Target of the relocation.
target: Mir.Inst.Index,
/// Offset of the relocation within the instruction.
offset: u32,
/// Encoding of the instruction, used to determine how to modify it.
enc: Encoding.InstEnc,
};
.addi => try emit.mirIType(inst),
.addiw => try emit.mirIType(inst),
.andi => try emit.mirIType(inst),
.jalr => try emit.mirIType(inst),
.abs => try emit.mirIType(inst),
fn fixupRelocs(emit: *Emit) Error!void {
for (emit.relocs.items) |reloc| {
log.debug("target inst: {}", .{emit.lower.mir.instructions.get(reloc.target)});
const target = emit.code_offset_mapping.get(reloc.target) orelse
return emit.fail("relocation target not found!", .{});
.jal => try emit.mirJType(inst),
const disp = @as(i32, @intCast(target)) - @as(i32, @intCast(reloc.source));
const code: *[4]u8 = emit.code.items[reloc.source + reloc.offset ..][0..4];
.ebreak => try emit.mirSystem(inst),
.ecall => try emit.mirSystem(inst),
.unimp => try emit.mirSystem(inst),
log.debug("disp: {x}", .{disp});
.dbg_line => try emit.mirDbgLine(inst),
.dbg_prologue_end => try emit.mirDebugPrologueEnd(),
.dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(),
.psuedo_prologue => try emit.mirPsuedo(inst),
.psuedo_epilogue => try emit.mirPsuedo(inst),
.j => try emit.mirPsuedo(inst),
.mv => try emit.mirRR(inst),
.not => try emit.mirRR(inst),
.nop => try emit.mirNop(inst),
.ret => try emit.mirNop(inst),
.lui => try emit.mirUType(inst),
.ld => try emit.mirIType(inst),
.lw => try emit.mirIType(inst),
.lh => try emit.mirIType(inst),
.lb => try emit.mirIType(inst),
.sd => try emit.mirIType(inst),
.sw => try emit.mirIType(inst),
.sh => try emit.mirIType(inst),
.sb => try emit.mirIType(inst),
.srlw => try emit.mirRType(inst),
.sllw => try emit.mirRType(inst),
.srli => try emit.mirIType(inst),
.slli => try emit.mirIType(inst),
.ldr_ptr_stack => try emit.mirIType(inst),
.load_symbol => try emit.mirLoadSymbol(inst),
switch (reloc.enc) {
.J => riscv_util.writeInstJ(code, @bitCast(disp)),
else => return emit.fail("tried to reloc encoding type {s}", .{@tagName(reloc.enc)}),
}
}
}
pub fn deinit(emit: *Emit) void {
const comp = emit.bin_file.comp;
const gpa = comp.gpa;
emit.code_offset_mapping.deinit(gpa);
emit.* = undefined;
}
fn writeInstruction(emit: *Emit, instruction: Instruction) !void {
const endian = emit.target.cpu.arch.endian();
std.mem.writeInt(u32, try emit.code.addManyAsArray(4), instruction.toU32(), endian);
}
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
@setCold(true);
assert(emit.err_msg == null);
const comp = emit.bin_file.comp;
const gpa = comp.gpa;
emit.err_msg = try ErrorMsg.create(gpa, emit.src_loc, format, args);
return error.EmitFail;
}
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) !void {
const delta_line = @as(i32, @intCast(line)) - @as(i32, @intCast(emit.prev_di_line));
fn dbgAdvancePCAndLine(emit: *Emit, line: u32, column: u32) Error!void {
const delta_line = @as(i33, line) - @as(i33, emit.prev_di_line);
const delta_pc: usize = emit.code.items.len - emit.prev_di_pc;
log.debug(" (advance pc={d} and line={d})", .{ delta_pc, delta_line });
switch (emit.debug_output) {
.dwarf => |dw| {
if (column != emit.prev_di_column) try dw.setColumn(column);
if (delta_line == 0) return; // TODO: remove this
if (delta_line == 0) return; // TODO: fix these edge cases.
try dw.advancePCAndLine(delta_line, delta_pc);
emit.prev_di_line = line;
emit.prev_di_column = column;
emit.prev_di_pc = emit.code.items.len;
},
.plan9 => |dbg_out| {
if (delta_pc <= 0) return; // only do this when the pc changes
// increasing the line number
try link.File.Plan9.changeLine(&dbg_out.dbg_line, delta_line);
// increasing the pc
const d_pc_p9 = @as(i64, @intCast(delta_pc)) - dbg_out.pc_quanta;
if (d_pc_p9 > 0) {
// minus one because if its the last one, we want to leave space to change the line which is one pc quanta
try dbg_out.dbg_line.append(@as(u8, @intCast(@divExact(d_pc_p9, dbg_out.pc_quanta) + 128)) - dbg_out.pc_quanta);
if (dbg_out.pcop_change_index) |pci|
dbg_out.dbg_line.items[pci] += 1;
dbg_out.pcop_change_index = @as(u32, @intCast(dbg_out.dbg_line.items.len - 1));
} else if (d_pc_p9 == 0) {
// we don't need to do anything, because adding the pc quanta does it for us
} else unreachable;
if (dbg_out.start_line == null)
dbg_out.start_line = emit.prev_di_line;
dbg_out.end_line = line;
// only do this if the pc changed
emit.prev_di_line = line;
emit.prev_di_column = column;
emit.prev_di_pc = emit.code.items.len;
},
.none => {},
}
}
fn mirRType(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const r_type = emit.mir.instructions.items(.data)[inst].r_type;
const rd = r_type.rd;
const rs1 = r_type.rs1;
const rs2 = r_type.rs2;
switch (tag) {
.add => try emit.writeInstruction(Instruction.add(rd, rs1, rs2)),
.sub => try emit.writeInstruction(Instruction.sub(rd, rs1, rs2)),
.mul => try emit.writeInstruction(Instruction.mul(rd, rs1, rs2)),
.cmp_gt => {
// rs1 > rs2
try emit.writeInstruction(Instruction.sltu(rd, rs2, rs1));
},
.cmp_gte => {
// rs1 >= rs2
try emit.writeInstruction(Instruction.sltu(rd, rs1, rs2));
try emit.writeInstruction(Instruction.xori(rd, rd, 1));
},
.cmp_eq => {
// rs1 == rs2
try emit.writeInstruction(Instruction.xor(rd, rs1, rs2));
try emit.writeInstruction(Instruction.sltiu(rd, rd, 1)); // seqz
},
.cmp_neq => {
// rs1 != rs2
try emit.writeInstruction(Instruction.xor(rd, rs1, rs2));
try emit.writeInstruction(Instruction.sltu(rd, .zero, rd)); // snez
},
.cmp_lt => {
// rd = 1 if rs1 < rs2
try emit.writeInstruction(Instruction.slt(rd, rs1, rs2));
},
.sllw => try emit.writeInstruction(Instruction.sllw(rd, rs1, rs2)),
.srlw => try emit.writeInstruction(Instruction.srlw(rd, rs1, rs2)),
.@"or" => try emit.writeInstruction(Instruction.@"or"(rd, rs1, rs2)),
.cmp_imm_gte => {
// rd = 1 if rs1 >= imm12
// see the docstring of cmp_imm_gte to see why we use r_type here
// (rs1 >= imm12) == !(imm12 > rs1)
try emit.writeInstruction(Instruction.sltu(rd, rs1, rs2));
},
else => unreachable,
}
}
fn mirBType(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const b_type = emit.mir.instructions.items(.data)[inst].b_type;
const offset = @as(i64, @intCast(emit.code_offset_mapping.get(b_type.inst).?)) - @as(i64, @intCast(emit.code.items.len));
switch (tag) {
.beq => {
log.debug("beq: {} offset={}", .{ inst, offset });
try emit.writeInstruction(Instruction.beq(b_type.rs1, b_type.rs2, @intCast(offset)));
},
.bne => {
log.debug("bne: {} offset={}", .{ inst, offset });
try emit.writeInstruction(Instruction.bne(b_type.rs1, b_type.rs2, @intCast(offset)));
},
else => unreachable,
}
}
fn mirIType(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const i_type = emit.mir.instructions.items(.data)[inst].i_type;
const rd = i_type.rd;
const rs1 = i_type.rs1;
const imm12 = i_type.imm12;
switch (tag) {
.addi => try emit.writeInstruction(Instruction.addi(rd, rs1, imm12)),
.addiw => try emit.writeInstruction(Instruction.addiw(rd, rs1, imm12)),
.jalr => try emit.writeInstruction(Instruction.jalr(rd, imm12, rs1)),
.andi => try emit.writeInstruction(Instruction.andi(rd, rs1, imm12)),
.ld => try emit.writeInstruction(Instruction.ld(rd, imm12, rs1)),
.lw => try emit.writeInstruction(Instruction.lw(rd, imm12, rs1)),
.lh => try emit.writeInstruction(Instruction.lh(rd, imm12, rs1)),
.lb => try emit.writeInstruction(Instruction.lb(rd, imm12, rs1)),
.sd => try emit.writeInstruction(Instruction.sd(rd, imm12, rs1)),
.sw => try emit.writeInstruction(Instruction.sw(rd, imm12, rs1)),
.sh => try emit.writeInstruction(Instruction.sh(rd, imm12, rs1)),
.sb => try emit.writeInstruction(Instruction.sb(rd, imm12, rs1)),
.ldr_ptr_stack => try emit.writeInstruction(Instruction.add(rd, rs1, .sp)),
.abs => {
try emit.writeInstruction(Instruction.sraiw(rd, rs1, @intCast(imm12)));
try emit.writeInstruction(Instruction.xor(rs1, rs1, rd));
try emit.writeInstruction(Instruction.subw(rs1, rs1, rd));
},
.srli => try emit.writeInstruction(Instruction.srli(rd, rs1, @intCast(imm12))),
.slli => try emit.writeInstruction(Instruction.slli(rd, rs1, @intCast(imm12))),
.cmp_imm_eq => {
try emit.writeInstruction(Instruction.xori(rd, rs1, imm12));
try emit.writeInstruction(Instruction.sltiu(rd, rd, 1));
},
.cmp_imm_neq => {
try emit.writeInstruction(Instruction.xori(rd, rs1, imm12));
try emit.writeInstruction(Instruction.sltu(rd, .x0, rd));
},
.cmp_imm_lt => {
try emit.writeInstruction(Instruction.slti(rd, rs1, imm12));
},
.cmp_imm_lte => {
try emit.writeInstruction(Instruction.sltiu(rd, rs1, @bitCast(imm12)));
},
else => unreachable,
}
}
fn mirJType(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const j_type = emit.mir.instructions.items(.data)[inst].j_type;
const offset = @as(i64, @intCast(emit.code_offset_mapping.get(j_type.inst).?)) - @as(i64, @intCast(emit.code.items.len));
switch (tag) {
.jal => {
log.debug("jal: {} offset={}", .{ inst, offset });
try emit.writeInstruction(Instruction.jal(j_type.rd, @intCast(offset)));
},
else => unreachable,
}
}
fn mirSystem(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
switch (tag) {
.ebreak => try emit.writeInstruction(Instruction.ebreak),
.ecall => try emit.writeInstruction(Instruction.ecall),
.unimp => try emit.writeInstruction(Instruction.unimp),
else => unreachable,
}
}
fn mirDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const dbg_line_column = emit.mir.instructions.items(.data)[inst].dbg_line_column;
switch (tag) {
.dbg_line => try emit.dbgAdvancePCAndLine(dbg_line_column.line, dbg_line_column.column),
else => unreachable,
}
}
fn mirDebugPrologueEnd(emit: *Emit) !void {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setPrologueEnd();
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
}
fn mirDebugEpilogueBegin(emit: *Emit) !void {
switch (emit.debug_output) {
.dwarf => |dw| {
try dw.setEpilogueBegin();
try emit.dbgAdvancePCAndLine(emit.prev_di_line, emit.prev_di_column);
},
.plan9 => {},
.none => {},
}
}
fn mirPsuedo(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const data = emit.mir.instructions.items(.data)[inst];
switch (tag) {
.psuedo_prologue => {
const stack_size: i12 = math.cast(i12, emit.stack_size) orelse {
return emit.fail("TODO: mirPsuedo support larger stack sizes", .{});
};
// Decrement sp by (num s registers * 8) + local var space
try emit.writeInstruction(Instruction.addi(.sp, .sp, -stack_size));
// Spill ra
try emit.writeInstruction(Instruction.sd(.ra, 0, .sp));
// Spill callee saved registers.
var s_reg_iter = emit.save_reg_list.iterator(.{});
var i: i12 = 8;
while (s_reg_iter.next()) |reg_i| {
const reg = abi.callee_preserved_regs[reg_i];
try emit.writeInstruction(Instruction.sd(reg, i, .sp));
i += 8;
}
},
.psuedo_epilogue => {
const stack_size: i12 = math.cast(i12, emit.stack_size) orelse {
return emit.fail("TODO: mirPsuedo support larger stack sizes", .{});
};
// Restore ra
try emit.writeInstruction(Instruction.ld(.ra, 0, .sp));
// Restore spilled callee saved registers
var s_reg_iter = emit.save_reg_list.iterator(.{});
var i: i12 = 8;
while (s_reg_iter.next()) |reg_i| {
const reg = abi.callee_preserved_regs[reg_i];
try emit.writeInstruction(Instruction.ld(reg, i, .sp));
i += 8;
}
// Increment sp back to previous value
try emit.writeInstruction(Instruction.addi(.sp, .sp, stack_size));
},
.j => {
const offset = @as(i64, @intCast(emit.code_offset_mapping.get(data.inst).?)) - @as(i64, @intCast(emit.code.items.len));
try emit.writeInstruction(Instruction.jal(.zero, @intCast(offset)));
},
else => unreachable,
}
}
fn mirRR(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const rr = emit.mir.instructions.items(.data)[inst].rr;
const rd = rr.rd;
const rs = rr.rs;
switch (tag) {
.mv => try emit.writeInstruction(Instruction.addi(rd, rs, 0)),
.not => try emit.writeInstruction(Instruction.xori(rd, rs, 1)),
else => unreachable,
}
}
fn mirUType(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
const u_type = emit.mir.instructions.items(.data)[inst].u_type;
switch (tag) {
.lui => try emit.writeInstruction(Instruction.lui(u_type.rd, u_type.imm20)),
else => unreachable,
}
}
fn mirNop(emit: *Emit, inst: Mir.Inst.Index) !void {
const tag = emit.mir.instructions.items(.tag)[inst];
switch (tag) {
.nop => try emit.writeInstruction(Instruction.addi(.zero, .zero, 0)),
.ret => try emit.writeInstruction(Instruction.jalr(.zero, 0, .ra)),
else => unreachable,
}
}
fn mirLoadSymbol(emit: *Emit, inst: Mir.Inst.Index) !void {
const payload = emit.mir.instructions.items(.data)[inst].payload;
const data = emit.mir.extraData(Mir.LoadSymbolPayload, payload).data;
const reg = @as(Register, @enumFromInt(data.register));
const start_offset = @as(u32, @intCast(emit.code.items.len));
try emit.writeInstruction(Instruction.lui(reg, 0));
try emit.writeInstruction(Instruction.addi(reg, reg, 0));
switch (emit.bin_file.tag) {
.elf => {
const elf_file = emit.bin_file.cast(link.File.Elf).?;
const atom_ptr = elf_file.symbol(data.atom_index).atom(elf_file).?;
const sym_index = elf_file.zigObjectPtr().?.symbol(data.sym_index);
const sym = elf_file.symbol(sym_index);
var hi_r_type: u32 = @intFromEnum(std.elf.R_RISCV.HI20);
var lo_r_type: u32 = @intFromEnum(std.elf.R_RISCV.LO12_I);
if (sym.flags.needs_zig_got) {
_ = try sym.getOrCreateZigGotEntry(sym_index, elf_file);
hi_r_type = Elf.R_ZIG_GOT_HI20;
lo_r_type = Elf.R_ZIG_GOT_LO12;
}
try atom_ptr.addReloc(elf_file, .{
.r_offset = start_offset,
.r_info = (@as(u64, @intCast(data.sym_index)) << 32) | hi_r_type,
.r_addend = 0,
});
try atom_ptr.addReloc(elf_file, .{
.r_offset = start_offset + 4,
.r_info = (@as(u64, @intCast(data.sym_index)) << 32) | lo_r_type,
.r_addend = 0,
});
},
else => unreachable,
}
}
fn isStore(tag: Mir.Inst.Tag) bool {
return switch (tag) {
.sb => true,
.sh => true,
.sw => true,
.sd => true,
.addi => true, // needed for ptr_stack_offset stores
else => false,
fn fail(emit: *Emit, comptime format: []const u8, args: anytype) Error {
return switch (emit.lower.fail(format, args)) {
error.LowerFail => error.EmitFail,
else => |e| e,
};
}
fn isLoad(tag: Mir.Inst.Tag) bool {
return switch (tag) {
.lb => true,
.lh => true,
.lw => true,
.ld => true,
else => false,
};
}
pub fn isBranch(tag: Mir.Inst.Tag) bool {
return switch (tag) {
.beq => true,
.bne => true,
.jal => true,
.j => true,
else => false,
};
}
pub fn branchTarget(emit: *Emit, inst: Mir.Inst.Index) Mir.Inst.Index {
const tag = emit.mir.instructions.items(.tag)[inst];
const data = emit.mir.instructions.items(.data)[inst];
switch (tag) {
.bne,
.beq,
=> return data.b_type.inst,
.jal => return data.j_type.inst,
.j => return data.inst,
else => std.debug.panic("branchTarget {s}", .{@tagName(tag)}),
}
}
fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize {
const tag = emit.mir.instructions.items(.tag)[inst];
return switch (tag) {
.dbg_line,
.dbg_epilogue_begin,
.dbg_prologue_end,
=> 0,
.cmp_eq,
.cmp_neq,
.cmp_imm_eq,
.cmp_imm_neq,
.cmp_gte,
.load_symbol,
.abs,
=> 8,
.psuedo_epilogue, .psuedo_prologue => size: {
const count = emit.save_reg_list.count() * 4;
break :size count + 8;
},
else => 4,
};
}
fn lowerMir(emit: *Emit) !void {
const comp = emit.bin_file.comp;
const gpa = comp.gpa;
const mir_tags = emit.mir.instructions.items(.tag);
const mir_datas = emit.mir.instructions.items(.data);
const proglogue_size: u32 = @intCast(emit.save_reg_list.size());
emit.stack_size += proglogue_size;
for (mir_tags, 0..) |tag, index| {
const inst: u32 = @intCast(index);
if (isStore(tag) or isLoad(tag)) {
const data = mir_datas[inst].i_type;
if (data.rs1 == .sp) {
const offset = mir_datas[inst].i_type.imm12;
mir_datas[inst].i_type.imm12 = offset + @as(i12, @intCast(proglogue_size)) + 8;
}
}
if (isBranch(tag)) {
const target_inst = emit.branchTarget(inst);
try emit.code_offset_mapping.put(gpa, target_inst, 0);
}
}
var current_code_offset: usize = 0;
for (0..mir_tags.len) |index| {
const inst = @as(u32, @intCast(index));
if (emit.code_offset_mapping.getPtr(inst)) |offset| {
offset.* = current_code_offset;
}
current_code_offset += emit.instructionSize(inst);
}
}
const Emit = @This();
const std = @import("std");
const math = std.math;
const Mir = @import("Mir.zig");
const bits = @import("bits.zig");
const abi = @import("abi.zig");
const link = @import("../../link.zig");
const Module = @import("../../Module.zig");
const Elf = @import("../../link/Elf.zig");
const ErrorMsg = Module.ErrorMsg;
const assert = std.debug.assert;
const Instruction = bits.Instruction;
const Register = bits.Register;
const log = std.log.scoped(.emit);
const mem = std.mem;
const std = @import("std");
const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput;
const Emit = @This();
const Lower = @import("Lower.zig");
const Mir = @import("Mir.zig");
const riscv_util = @import("../../link/riscv.zig");
const Encoding = @import("Encoding.zig");

View File

@ -0,0 +1,333 @@
mnemonic: Mnemonic,
data: Data,
pub const Mnemonic = enum {
// R Type
add,
// I Type
ld,
lw,
lwu,
lh,
lhu,
lb,
lbu,
addi,
jalr,
// U Type
lui,
// S Type
sd,
sw,
sh,
sb,
// J Type
jal,
// System
ecall,
ebreak,
unimp,
pub fn encoding(mnem: Mnemonic) Enc {
return switch (mnem) {
// zig fmt: off
.add => .{ .opcode = 0b0110011, .funct3 = 0b000, .funct7 = 0b0000000 },
.ld => .{ .opcode = 0b0000011, .funct3 = 0b011, .funct7 = null },
.lw => .{ .opcode = 0b0000011, .funct3 = 0b010, .funct7 = null },
.lwu => .{ .opcode = 0b0000011, .funct3 = 0b110, .funct7 = null },
.lh => .{ .opcode = 0b0000011, .funct3 = 0b001, .funct7 = null },
.lhu => .{ .opcode = 0b0000011, .funct3 = 0b101, .funct7 = null },
.lb => .{ .opcode = 0b0000011, .funct3 = 0b000, .funct7 = null },
.lbu => .{ .opcode = 0b0000011, .funct3 = 0b100, .funct7 = null },
.addi => .{ .opcode = 0b0010011, .funct3 = 0b000, .funct7 = null },
.jalr => .{ .opcode = 0b1100111, .funct3 = 0b000, .funct7 = null },
.lui => .{ .opcode = 0b0110111, .funct3 = null, .funct7 = null },
.sd => .{ .opcode = 0b0100011, .funct3 = 0b011, .funct7 = null },
.sw => .{ .opcode = 0b0100011, .funct3 = 0b010, .funct7 = null },
.sh => .{ .opcode = 0b0100011, .funct3 = 0b001, .funct7 = null },
.sb => .{ .opcode = 0b0100011, .funct3 = 0b000, .funct7 = null },
.jal => .{ .opcode = 0b1101111, .funct3 = null, .funct7 = null },
.ecall => .{ .opcode = 0b1110011, .funct3 = 0b000, .funct7 = null },
.ebreak => .{ .opcode = 0b1110011, .funct3 = 0b000, .funct7 = null },
.unimp => .{ .opcode = 0b0000000, .funct3 = 0b000, .funct7 = null },
// zig fmt: on
};
}
};
pub const InstEnc = enum {
R,
I,
S,
B,
U,
J,
/// extras that have unusual op counts
system,
pub fn fromMnemonic(mnem: Mnemonic) InstEnc {
return switch (mnem) {
.add,
=> .R,
.addi,
.ld,
.lw,
.lwu,
.lh,
.lhu,
.lb,
.lbu,
.jalr,
=> .I,
.lui,
=> .U,
.sd,
.sw,
.sh,
.sb,
=> .S,
.jal,
=> .J,
.ecall,
.ebreak,
.unimp,
=> .system,
};
}
pub fn opsList(enc: InstEnc) [4]std.meta.FieldEnum(Operand) {
return switch (enc) {
.R => .{ .reg, .reg, .reg, .none },
.I => .{ .reg, .reg, .imm, .none },
.S => .{ .reg, .reg, .imm, .none },
.B => .{ .imm, .reg, .reg, .imm },
.U => .{ .reg, .imm, .none, .none },
.J => .{ .reg, .imm, .none, .none },
.system => .{ .none, .none, .none, .none },
};
}
};
pub const Data = union(InstEnc) {
R: packed struct {
opcode: u7,
rd: u5,
funct3: u3,
rs1: u5,
rs2: u5,
funct7: u7,
},
I: packed struct {
opcode: u7,
rd: u5,
funct3: u3,
rs1: u5,
imm0_11: u12,
},
S: packed struct {
opcode: u7,
imm0_4: u5,
funct3: u3,
rs1: u5,
rs2: u5,
imm5_11: u7,
},
B: packed struct {
opcode: u7,
imm11: u1,
imm1_4: u4,
funct3: u3,
rs1: u5,
rs2: u5,
imm5_10: u6,
imm12: u1,
},
U: packed struct {
opcode: u7,
rd: u5,
imm12_31: u20,
},
J: packed struct {
opcode: u7,
rd: u5,
imm12_19: u8,
imm11: u1,
imm1_10: u10,
imm20: u1,
},
system: void,
pub fn toU32(self: Data) u32 {
return switch (self) {
.R => |v| @as(u32, @bitCast(v)),
.I => |v| @as(u32, @bitCast(v)),
.S => |v| @as(u32, @bitCast(v)),
.B => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.imm11)) << 7) + (@as(u32, @intCast(v.imm1_4)) << 8) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.rs2)) << 20) + (@as(u32, @intCast(v.imm5_10)) << 25) + (@as(u32, @intCast(v.imm12)) << 31),
.U => |v| @as(u32, @bitCast(v)),
.J => |v| @as(u32, @bitCast(v)),
.system => unreachable,
};
}
pub fn construct(mnem: Mnemonic, ops: []const Operand) !Data {
const inst_enc = InstEnc.fromMnemonic(mnem);
const enc = mnem.encoding();
// special mnemonics
switch (mnem) {
.ecall,
.ebreak,
.unimp,
=> {
assert(ops.len == 0);
return .{
.I = .{
.rd = Register.zero.id(),
.rs1 = Register.zero.id(),
.imm0_11 = switch (mnem) {
.ecall => 0x000,
.ebreak => 0x001,
.unimp => 0,
else => unreachable,
},
.opcode = enc.opcode,
.funct3 = enc.funct3.?,
},
};
},
else => {},
}
switch (inst_enc) {
.R => {
assert(ops.len == 3);
return .{
.R = .{
.rd = ops[0].reg.id(),
.rs1 = ops[1].reg.id(),
.rs2 = ops[2].reg.id(),
.opcode = enc.opcode,
.funct3 = enc.funct3.?,
.funct7 = enc.funct7.?,
},
};
},
.S => {
assert(ops.len == 3);
const umm = ops[2].imm.asBits(u12);
return .{
.S = .{
.imm0_4 = @truncate(umm),
.rs1 = ops[0].reg.id(),
.rs2 = ops[1].reg.id(),
.imm5_11 = @truncate(umm >> 5),
.opcode = enc.opcode,
.funct3 = enc.funct3.?,
},
};
},
.I => {
assert(ops.len == 3);
return .{
.I = .{
.rd = ops[0].reg.id(),
.rs1 = ops[1].reg.id(),
.imm0_11 = ops[2].imm.asBits(u12),
.opcode = enc.opcode,
.funct3 = enc.funct3.?,
},
};
},
.U => {
assert(ops.len == 2);
return .{
.U = .{
.rd = ops[0].reg.id(),
.imm12_31 = ops[1].imm.asBits(u20),
.opcode = enc.opcode,
},
};
},
.J => {
assert(ops.len == 2);
const umm = ops[1].imm.asBits(u21);
assert(umm % 4 == 0); // misaligned jump target
return .{
.J = .{
.rd = ops[0].reg.id(),
.imm1_10 = @truncate(umm >> 1),
.imm11 = @truncate(umm >> 11),
.imm12_19 = @truncate(umm >> 12),
.imm20 = @truncate(umm >> 20),
.opcode = enc.opcode,
},
};
},
else => std.debug.panic("TODO: construct {s}", .{@tagName(inst_enc)}),
}
}
};
pub fn findByMnemonic(mnem: Mnemonic, ops: []const Operand) !?Encoding {
if (!verifyOps(mnem, ops)) return null;
return .{
.mnemonic = mnem,
.data = try Data.construct(mnem, ops),
};
}
const Enc = struct {
opcode: u7,
funct3: ?u3,
funct7: ?u7,
};
fn verifyOps(mnem: Mnemonic, ops: []const Operand) bool {
const inst_enc = InstEnc.fromMnemonic(mnem);
const list = std.mem.sliceTo(&inst_enc.opsList(), .none);
for (list, ops) |l, o| if (l != std.meta.activeTag(o)) return false;
return true;
}
const std = @import("std");
const assert = std.debug.assert;
const log = std.log.scoped(.encoding);
const Encoding = @This();
const bits = @import("bits.zig");
const Register = bits.Register;
const encoder = @import("encoder.zig");
const Instruction = encoder.Instruction;
const Operand = Instruction.Operand;
const OperandEnum = std.meta.FieldEnum(Operand);

222
src/arch/riscv64/Lower.zig Normal file
View File

@ -0,0 +1,222 @@
//! This file contains the functionality for lowering RISC-V MIR to Instructions
bin_file: *link.File,
output_mode: std.builtin.OutputMode,
link_mode: std.builtin.LinkMode,
pic: bool,
allocator: Allocator,
mir: Mir,
cc: std.builtin.CallingConvention,
err_msg: ?*ErrorMsg = null,
src_loc: Module.SrcLoc,
result_insts_len: u8 = undefined,
result_relocs_len: u8 = undefined,
result_insts: [
@max(
1, // non-pseudo instruction
abi.callee_preserved_regs.len, // spill / restore regs,
)
]Instruction = undefined,
result_relocs: [1]Reloc = undefined,
pub const Error = error{
OutOfMemory,
LowerFail,
InvalidInstruction,
};
pub const Reloc = struct {
lowered_inst_index: u8,
target: Target,
const Target = union(enum) {
inst: Mir.Inst.Index,
linker_reloc: bits.Symbol,
};
};
/// The returned slice is overwritten by the next call to lowerMir.
pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
insts: []const Instruction,
relocs: []const Reloc,
} {
lower.result_insts = undefined;
lower.result_relocs = undefined;
errdefer lower.result_insts = undefined;
errdefer lower.result_relocs = undefined;
lower.result_insts_len = 0;
lower.result_relocs_len = 0;
defer lower.result_insts_len = undefined;
defer lower.result_relocs_len = undefined;
const inst = lower.mir.instructions.get(index);
log.debug("lowerMir {}", .{inst});
switch (inst.tag) {
else => try lower.generic(inst),
.pseudo => switch (inst.ops) {
.pseudo_dbg_line_column,
.pseudo_dbg_epilogue_begin,
.pseudo_dbg_prologue_end,
.pseudo_dead,
=> {},
.pseudo_load_rm, .pseudo_store_rm => {
const rm = inst.data.rm;
const frame_loc = rm.m.toFrameLoc(lower.mir);
switch (inst.ops) {
.pseudo_load_rm => {
const tag: Encoding.Mnemonic = switch (rm.m.mod.rm.size) {
.byte => .lb,
.hword => .lh,
.word => .lw,
.dword => .ld,
};
try lower.emit(tag, &.{
.{ .reg = rm.r },
.{ .reg = frame_loc.base },
.{ .imm = Immediate.s(frame_loc.disp) },
});
},
.pseudo_store_rm => {
const tag: Encoding.Mnemonic = switch (rm.m.mod.rm.size) {
.byte => .sb,
.hword => .sh,
.word => .sw,
.dword => .sd,
};
try lower.emit(tag, &.{
.{ .reg = frame_loc.base },
.{ .reg = rm.r },
.{ .imm = Immediate.s(frame_loc.disp) },
});
},
else => unreachable,
}
},
.pseudo_mv => {
const rr = inst.data.rr;
try lower.emit(.addi, &.{
.{ .reg = rr.rd },
.{ .reg = rr.rs },
.{ .imm = Immediate.s(0) },
});
},
.pseudo_ret => {
try lower.emit(.jalr, &.{
.{ .reg = .zero },
.{ .reg = .ra },
.{ .imm = Immediate.s(0) },
});
},
.pseudo_j => {
try lower.emit(.jal, &.{
.{ .reg = .zero },
.{ .imm = lower.reloc(.{ .inst = inst.data.inst }) },
});
},
.pseudo_spill_regs => try lower.pushPopRegList(true, inst.data.reg_list),
.pseudo_restore_regs => try lower.pushPopRegList(false, inst.data.reg_list),
else => return lower.fail("TODO: psuedo {s}", .{@tagName(inst.ops)}),
},
}
return .{
.insts = lower.result_insts[0..lower.result_insts_len],
.relocs = lower.result_relocs[0..lower.result_relocs_len],
};
}
fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
const mnemonic = std.meta.stringToEnum(Encoding.Mnemonic, @tagName(inst.tag)) orelse {
return lower.fail("generic inst name {s}-{s} doesn't match with a mnemonic", .{
@tagName(inst.tag),
@tagName(inst.ops),
});
};
try lower.emit(mnemonic, switch (inst.ops) {
.none => &.{},
.ri => &.{
.{ .reg = inst.data.u_type.rd },
.{ .imm = inst.data.u_type.imm20 },
},
.rri => &.{
.{ .reg = inst.data.i_type.rd },
.{ .reg = inst.data.i_type.rs1 },
.{ .imm = inst.data.i_type.imm12 },
},
else => return lower.fail("TODO: generic lower ops {s}", .{@tagName(inst.ops)}),
});
}
fn emit(lower: *Lower, mnemonic: Encoding.Mnemonic, ops: []const Instruction.Operand) !void {
lower.result_insts[lower.result_insts_len] =
try Instruction.new(mnemonic, ops);
lower.result_insts_len += 1;
}
fn reloc(lower: *Lower, target: Reloc.Target) Immediate {
lower.result_relocs[lower.result_relocs_len] = .{
.lowered_inst_index = lower.result_insts_len,
.target = target,
};
lower.result_relocs_len += 1;
return Immediate.s(0);
}
fn pushPopRegList(lower: *Lower, comptime spilling: bool, reg_list: Mir.RegisterList) !void {
var it = reg_list.iterator(.{ .direction = if (spilling) .forward else .reverse });
var reg_i: u31 = 0;
while (it.next()) |i| {
const frame = lower.mir.frame_locs.get(@intFromEnum(bits.FrameIndex.spill_frame));
if (spilling) {
try lower.emit(.sd, &.{
.{ .reg = frame.base },
.{ .reg = abi.callee_preserved_regs[i] },
.{ .imm = Immediate.s(frame.disp + reg_i) },
});
} else {
try lower.emit(.ld, &.{
.{ .reg = abi.callee_preserved_regs[i] },
.{ .reg = frame.base },
.{ .imm = Immediate.s(frame.disp + reg_i) },
});
}
reg_i += 8;
}
}
pub fn fail(lower: *Lower, comptime format: []const u8, args: anytype) Error {
@setCold(true);
assert(lower.err_msg == null);
lower.err_msg = try ErrorMsg.create(lower.allocator, lower.src_loc, format, args);
return error.LowerFail;
}
const Lower = @This();
const abi = @import("abi.zig");
const assert = std.debug.assert;
const bits = @import("bits.zig");
const encoder = @import("encoder.zig");
const link = @import("../../link.zig");
const Encoding = @import("Encoding.zig");
const std = @import("std");
const log = std.log.scoped(.lower);
const Air = @import("../../Air.zig");
const Allocator = std.mem.Allocator;
const ErrorMsg = Module.ErrorMsg;
const Mir = @import("Mir.zig");
const Module = @import("../../Module.zig");
const Instruction = encoder.Instruction;
const Immediate = bits.Immediate;

View File

@ -9,22 +9,32 @@
instructions: std.MultiArrayList(Inst).Slice,
/// The meaning of this data is determined by `Inst.Tag` value.
extra: []const u32,
frame_locs: std.MultiArrayList(FrameLoc).Slice,
pub const Inst = struct {
tag: Tag,
/// The meaning of this depends on `tag`.
data: Data,
ops: Ops,
/// The position of an MIR instruction within the `Mir` instructions array.
pub const Index = u32;
pub const Tag = enum(u16) {
/// Add immediate. Uses i_type payload.
addi,
/// Add immediate and produce a sign-extended result.
///
/// Uses i-type payload.
addiw,
jalr,
lui,
mv,
unimp,
ebreak,
ecall,
unimp,
/// OR instruction. Uses r_type payload.
@"or",
@ -48,9 +58,11 @@ pub const Inst = struct {
/// Register Logical Right Shit, uses r_type payload
srlw,
/// Jumps, but stores the address of the instruction following the
/// jump in `rd`.
///
/// Uses j_type payload.
jal,
/// Jumps. Uses `inst` payload.
j,
/// Immediate AND, uses i_type payload
andi,
@ -93,55 +105,34 @@ pub const Inst = struct {
/// Boolean NOT, Uses rr payload
not,
/// Generates a NO-OP, uses nop payload
nop,
ret,
/// Load double (64 bits)
/// Load double (64 bits), uses i_type payload
ld,
/// Store double (64 bits)
sd,
/// Load word (32 bits)
/// Load word (32 bits), uses i_type payload
lw,
/// Store word (32 bits)
sw,
/// Load half (16 bits)
/// Load half (16 bits), uses i_type payload
lh,
/// Store half (16 bits)
sh,
/// Load byte (8 bits)
/// Load byte (8 bits), uses i_type payload
lb,
/// Store byte (8 bits)
/// Store double (64 bits), uses s_type payload
sd,
/// Store word (32 bits), uses s_type payload
sw,
/// Store half (16 bits), uses s_type payload
sh,
/// Store byte (8 bits), uses s_type payload
sb,
/// Pseudo-instruction: End of prologue
dbg_prologue_end,
/// Pseudo-instruction: Beginning of epilogue
dbg_epilogue_begin,
/// Pseudo-instruction: Update debug line
dbg_line,
/// Psuedo-instruction that will generate a backpatched
/// function prologue.
psuedo_prologue,
/// Psuedo-instruction that will generate a backpatched
/// function epilogue
psuedo_epilogue,
/// Loads the address of a value that hasn't yet been allocated in memory.
///
/// uses the Mir.LoadSymbolPayload payload.
load_symbol,
// TODO: add description
// this is bad, remove this
ldr_ptr_stack,
/// A pseudo-instruction. Used for anything that isn't 1:1 with an
/// assembly instruction.
pseudo,
};
/// The position of an MIR instruction within the `Mir` instructions array.
pub const Index = u32;
/// All instructions have a 4-byte payload, which is contained within
/// this union. `Tag` determines which union field is active, as well as
/// this union. `Ops` determines which union field is active, as well as
/// how to interpret the data within.
pub const Data = union {
/// No additional data
@ -152,22 +143,69 @@ pub const Inst = struct {
///
/// Used by e.g. b
inst: Index,
/// A 16-bit immediate value.
///
/// Used by e.g. svc
imm16: i16,
/// A 12-bit immediate value.
///
/// Used by e.g. psuedo_prologue
imm12: i12,
/// Index into `extra`. Meaning of what can be found there is context-dependent.
///
/// Used by e.g. load_memory
payload: u32,
r_type: struct {
rd: Register,
rs1: Register,
rs2: Register,
},
i_type: struct {
rd: Register,
rs1: Register,
imm12: Immediate,
},
s_type: struct {
rs1: Register,
rs2: Register,
imm5: Immediate,
imm7: Immediate,
},
b_type: struct {
rs1: Register,
rs2: Register,
inst: Inst.Index,
},
u_type: struct {
rd: Register,
imm20: Immediate,
},
j_type: struct {
rd: Register,
inst: Inst.Index,
},
/// Debug info: line and column
///
/// Used by e.g. pseudo_dbg_line
pseudo_dbg_line_column: struct {
line: u32,
column: u32,
},
// Custom types to be lowered
/// Register + Memory
rm: struct {
r: Register,
m: Memory,
},
reg_list: Mir.RegisterList,
/// A register
///
/// Used by e.g. blr
reg: Register,
/// Two registers
///
/// Used by e.g. mv
@ -175,51 +213,84 @@ pub const Inst = struct {
rd: Register,
rs: Register,
},
/// I-Type
};
pub const Ops = enum {
/// No data associated with this instruction (only mnemonic is used).
none,
/// Two registers
rr,
/// Three registers
rrr,
/// Two registers + immediate, uses the i_type payload.
rri,
/// Two registers + Two Immediates
rrii,
/// Two registers + another instruction.
rr_inst,
/// Register + Memory
rm,
/// Register + Immediate
ri,
/// Another instruction.
inst,
/// Pseudo-instruction that will generate a backpatched
/// function prologue.
pseudo_prologue,
/// Pseudo-instruction that will generate a backpatched
/// function epilogue
pseudo_epilogue,
/// Pseudo-instruction: End of prologue
pseudo_dbg_prologue_end,
/// Pseudo-instruction: Beginning of epilogue
pseudo_dbg_epilogue_begin,
/// Pseudo-instruction: Update debug line
pseudo_dbg_line_column,
/// Pseudo-instruction that loads from memory into a register.
///
/// Used by e.g. jalr
i_type: struct {
rd: Register,
rs1: Register,
imm12: i12,
},
/// R-Type
/// Uses `rm` payload.
pseudo_load_rm,
/// Pseudo-instruction that stores from a register into memory
///
/// Used by e.g. add
r_type: struct {
rd: Register,
rs1: Register,
rs2: Register,
},
/// B-Type
/// Uses `rm` payload.
pseudo_store_rm,
/// Pseudo-instruction that loads the address of memory into a register.
///
/// Used by e.g. beq
b_type: struct {
rs1: Register,
rs2: Register,
inst: Inst.Index,
},
/// J-Type
/// Uses `rm` payload.
pseudo_lea_rm,
/// Shorthand for returning, aka jumping to ra register.
///
/// Used by e.g. jal
j_type: struct {
rd: Register,
inst: Inst.Index,
},
/// U-Type
/// Uses nop payload.
pseudo_ret,
/// Jumps. Uses `inst` payload.
pseudo_j,
/// Dead inst, ignored by the emitter.
pseudo_dead,
/// Loads the address of a value that hasn't yet been allocated in memory.
///
/// Used by e.g. lui
u_type: struct {
rd: Register,
imm20: i20,
},
/// Debug info: line and column
/// uses the Mir.LoadSymbolPayload payload.
pseudo_load_symbol,
/// Moves the value of rs1 to rd.
///
/// Used by e.g. dbg_line
dbg_line_column: struct {
line: u32,
column: u32,
},
/// uses the `rr` payload.
pseudo_mv,
pseudo_restore_regs,
pseudo_spill_regs,
};
// Make sure we don't accidentally make instructions bigger than expected.
@ -229,14 +300,32 @@ pub const Inst = struct {
// assert(@sizeOf(Inst) == 8);
// }
// }
pub fn format(
inst: Inst,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
assert(fmt.len == 0);
_ = options;
try writer.print("Tag: {s}, Ops: {s}", .{ @tagName(inst.tag), @tagName(inst.ops) });
}
};
pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void {
mir.instructions.deinit(gpa);
mir.frame_locs.deinit(gpa);
gpa.free(mir.extra);
mir.* = undefined;
}
pub const FrameLoc = struct {
base: Register,
disp: i32,
};
/// Returns the requested data, as well as the new index which is at the start of the
/// trailers for the object.
pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end: usize } {
@ -291,11 +380,11 @@ pub const RegisterList = struct {
return self.bitset.iterator(options);
}
pub fn count(self: Self) u32 {
pub fn count(self: Self) i32 {
return @intCast(self.bitset.count());
}
pub fn size(self: Self) u32 {
pub fn size(self: Self) i32 {
return @intCast(self.bitset.count() * 8);
}
};
@ -307,4 +396,8 @@ const assert = std.debug.assert;
const bits = @import("bits.zig");
const Register = bits.Register;
const Immediate = bits.Immediate;
const Memory = bits.Memory;
const FrameIndex = bits.FrameIndex;
const FrameAddr = @import("CodeGen.zig").FrameAddr;
const IntegerBitSet = std.bit_set.IntegerBitSet;

View File

@ -93,7 +93,7 @@ pub fn classifyType(ty: Type, mod: *Module) Class {
/// There are a maximum of 8 possible return slots. Returned values are in
/// the beginning of the array; unused slots are filled with .none.
pub fn classifySystemV(ty: Type, mod: *Module) [8]Class {
pub fn classifySystem(ty: Type, mod: *Module) [8]Class {
var result = [1]Class{.none} ** 8;
switch (ty.zigTypeTag(mod)) {
.Pointer => switch (ty.ptrSize(mod)) {
@ -109,18 +109,42 @@ pub fn classifySystemV(ty: Type, mod: *Module) [8]Class {
},
.Optional => {
if (ty.isPtrLikeOptional(mod)) {
result[0] = .integer;
return result;
}
result[0] = .integer;
result[1] = .integer;
return result;
},
else => return result,
.Int, .Enum, .ErrorSet => {
const int_bits = ty.intInfo(mod).bits;
if (int_bits <= 64) {
result[0] = .integer;
return result;
}
if (int_bits <= 128) {
result[0] = .integer;
result[1] = .integer;
return result;
}
unreachable; // support > 128 bit int arguments
},
.ErrorUnion => {
const payload = ty.errorUnionPayload(mod);
const payload_bits = payload.bitSize(mod);
if (payload_bits <= 64) {
result[0] = .integer;
result[1] = .integer;
}
unreachable; // support > 64 bit error payloads
},
else => |bad_ty| std.debug.panic("classifySystem {s}", .{@tagName(bad_ty)}),
}
}
pub const callee_preserved_regs = [_]Register{
.s0, .s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11,
// .s0 is ommited to be used as a frame pointer
.s1, .s2, .s3, .s4, .s5, .s6, .s7, .s8, .s9, .s10, .s11,
};
pub const function_arg_regs = [_]Register{

View File

@ -2,391 +2,141 @@ const std = @import("std");
const DW = std.dwarf;
const assert = std.debug.assert;
const testing = std.testing;
const Encoding = @import("Encoding.zig");
const Mir = @import("Mir.zig");
// TODO: this is only tagged to facilitate the monstrosity.
// Once packed structs work make it packed.
pub const Instruction = union(enum) {
R: packed struct {
opcode: u7,
rd: u5,
funct3: u3,
rs1: u5,
rs2: u5,
funct7: u7,
},
I: packed struct {
opcode: u7,
rd: u5,
funct3: u3,
rs1: u5,
imm0_11: u12,
},
S: packed struct {
opcode: u7,
imm0_4: u5,
funct3: u3,
rs1: u5,
rs2: u5,
imm5_11: u7,
},
B: packed struct {
opcode: u7,
imm11: u1,
imm1_4: u4,
funct3: u3,
rs1: u5,
rs2: u5,
imm5_10: u6,
imm12: u1,
},
U: packed struct {
opcode: u7,
rd: u5,
imm12_31: u20,
},
J: packed struct {
opcode: u7,
rd: u5,
imm12_19: u8,
imm11: u1,
imm1_10: u10,
imm20: u1,
},
pub const Memory = struct {
base: Base,
mod: Mod,
// TODO: once packed structs work we can remove this monstrosity.
pub fn toU32(self: Instruction) u32 {
return switch (self) {
.R => |v| @as(u32, @bitCast(v)),
.I => |v| @as(u32, @bitCast(v)),
.S => |v| @as(u32, @bitCast(v)),
.B => |v| @as(u32, @intCast(v.opcode)) + (@as(u32, @intCast(v.imm11)) << 7) + (@as(u32, @intCast(v.imm1_4)) << 8) + (@as(u32, @intCast(v.funct3)) << 12) + (@as(u32, @intCast(v.rs1)) << 15) + (@as(u32, @intCast(v.rs2)) << 20) + (@as(u32, @intCast(v.imm5_10)) << 25) + (@as(u32, @intCast(v.imm12)) << 31),
.U => |v| @as(u32, @bitCast(v)),
.J => |v| @as(u32, @bitCast(v)),
};
pub const Base = union(enum) {
reg: Register,
frame: FrameIndex,
reloc: Symbol,
};
pub const Mod = union(enum(u1)) {
rm: struct {
size: Size,
disp: i32 = 0,
},
off: u64,
};
pub const Size = enum(u4) {
/// Byte, 1 byte
byte,
/// Half word, 2 bytes
hword,
/// Word, 4 bytes
word,
/// Double word, 8 Bytes
dword,
pub fn fromSize(size: u32) Size {
return switch (size) {
1 => .byte,
2 => .hword,
4 => .word,
8 => .dword,
else => unreachable,
};
}
pub fn fromBitSize(bit_size: u64) Size {
return switch (bit_size) {
8 => .byte,
16 => .hword,
32 => .word,
64 => .dword,
else => unreachable,
};
}
pub fn bitSize(s: Size) u64 {
return switch (s) {
.byte => 8,
.hword => 16,
.word => 32,
.dword => 64,
};
}
};
/// Asserts `mem` can be represented as a `FrameLoc`.
pub fn toFrameLoc(mem: Memory, mir: Mir) Mir.FrameLoc {
switch (mem.base) {
.reg => |reg| {
return .{
.base = reg,
.disp = switch (mem.mod) {
.off => unreachable, // TODO: toFrameLoc disp.off
.rm => |rm| rm.disp,
},
};
},
.frame => |index| return mir.frame_locs.get(@intFromEnum(index)),
.reloc => unreachable,
}
}
};
pub const Immediate = union(enum) {
signed: i32,
unsigned: u32,
pub fn u(x: u64) Immediate {
return .{ .unsigned = x };
}
fn rType(op: u7, fn3: u3, fn7: u7, rd: Register, r1: Register, r2: Register) Instruction {
return Instruction{
.R = .{
.opcode = op,
.funct3 = fn3,
.funct7 = fn7,
.rd = rd.id(),
.rs1 = r1.id(),
.rs2 = r2.id(),
pub fn s(x: i32) Immediate {
return .{ .signed = x };
}
pub fn asSigned(imm: Immediate, bit_size: u64) i64 {
return switch (imm) {
.signed => |x| switch (bit_size) {
1, 8 => @as(i8, @intCast(x)),
16 => @as(i16, @intCast(x)),
32, 64 => x,
else => unreachable,
},
.unsigned => |x| switch (bit_size) {
1, 8 => @as(i8, @bitCast(@as(u8, @intCast(x)))),
16 => @as(i16, @bitCast(@as(u16, @intCast(x)))),
32 => @as(i32, @bitCast(@as(u32, @intCast(x)))),
64 => @bitCast(x),
else => unreachable,
},
};
}
// RISC-V is all signed all the time -- convert immediates to unsigned for processing
fn iType(op: u7, fn3: u3, rd: Register, r1: Register, imm: i12) Instruction {
const umm = @as(u12, @bitCast(imm));
return Instruction{
.I = .{
.opcode = op,
.funct3 = fn3,
.rd = rd.id(),
.rs1 = r1.id(),
.imm0_11 = umm,
pub fn asUnsigned(imm: Immediate, bit_size: u64) u64 {
return switch (imm) {
.signed => |x| switch (bit_size) {
1, 8 => @as(u8, @bitCast(@as(i8, @intCast(x)))),
16 => @as(u16, @bitCast(@as(i16, @intCast(x)))),
32, 64 => @as(u32, @bitCast(x)),
else => unreachable,
},
.unsigned => |x| switch (bit_size) {
1, 8 => @as(u8, @intCast(x)),
16 => @as(u16, @intCast(x)),
32 => @as(u32, @intCast(x)),
64 => x,
else => unreachable,
},
};
}
fn sType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i12) Instruction {
const umm = @as(u12, @bitCast(imm));
return Instruction{
.S = .{
.opcode = op,
.funct3 = fn3,
.rs1 = r1.id(),
.rs2 = r2.id(),
.imm0_4 = @as(u5, @truncate(umm)),
.imm5_11 = @as(u7, @truncate(umm >> 5)),
},
pub fn asBits(imm: Immediate, comptime T: type) T {
const int_info = @typeInfo(T).Int;
if (int_info.signedness != .unsigned) @compileError("Immediate.asBits needs unsigned T");
return switch (imm) {
.signed => |x| @bitCast(@as(std.meta.Int(.signed, int_info.bits), @intCast(x))),
.unsigned => |x| @intCast(x),
};
}
// Use significance value rather than bit value, same for J-type
// -- less burden on callsite, bonus semantic checking
fn bType(op: u7, fn3: u3, r1: Register, r2: Register, imm: i13) Instruction {
const umm = @as(u13, @bitCast(imm));
assert(umm % 4 == 0); // misaligned branch target
return Instruction{
.B = .{
.opcode = op,
.funct3 = fn3,
.rs1 = r1.id(),
.rs2 = r2.id(),
.imm1_4 = @as(u4, @truncate(umm >> 1)),
.imm5_10 = @as(u6, @truncate(umm >> 5)),
.imm11 = @as(u1, @truncate(umm >> 11)),
.imm12 = @as(u1, @truncate(umm >> 12)),
},
};
}
// We have to extract the 20 bits anyway -- let's not make it more painful
fn uType(op: u7, rd: Register, imm: i20) Instruction {
const umm = @as(u20, @bitCast(imm));
return Instruction{
.U = .{
.opcode = op,
.rd = rd.id(),
.imm12_31 = umm,
},
};
}
fn jType(op: u7, rd: Register, imm: i21) Instruction {
const umm = @as(u21, @bitCast(imm));
assert(umm % 2 == 0); // misaligned jump target
return Instruction{
.J = .{
.opcode = op,
.rd = rd.id(),
.imm1_10 = @as(u10, @truncate(umm >> 1)),
.imm11 = @as(u1, @truncate(umm >> 11)),
.imm12_19 = @as(u8, @truncate(umm >> 12)),
.imm20 = @as(u1, @truncate(umm >> 20)),
},
};
}
// The meat and potatoes. Arguments are in the order in which they would appear in assembly code.
// Arithmetic/Logical, Register-Register
pub fn add(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b000, 0b0000000, rd, r1, r2);
}
pub fn sub(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b000, 0b0100000, rd, r1, r2);
}
pub fn @"and"(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b111, 0b0000000, rd, r1, r2);
}
pub fn @"or"(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b110, 0b0000000, rd, r1, r2);
}
pub fn xor(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b100, 0b0000000, rd, r1, r2);
}
pub fn sll(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b001, 0b0000000, rd, r1, r2);
}
pub fn srl(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b101, 0b0000000, rd, r1, r2);
}
pub fn sra(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b101, 0b0100000, rd, r1, r2);
}
pub fn slt(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b010, 0b0000000, rd, r1, r2);
}
pub fn sltu(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b011, 0b0000000, rd, r1, r2);
}
// M extension operations
pub fn mul(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0110011, 0b000, 0b0000001, rd, r1, r2);
}
// Arithmetic/Logical, Register-Register (32-bit)
pub fn addw(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0111011, 0b000, rd, r1, r2);
}
pub fn subw(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0111011, 0b000, 0b0100000, rd, r1, r2);
}
pub fn sllw(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0111011, 0b001, 0b0000000, rd, r1, r2);
}
pub fn srlw(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0111011, 0b101, 0b0000000, rd, r1, r2);
}
pub fn sraw(rd: Register, r1: Register, r2: Register) Instruction {
return rType(0b0111011, 0b101, 0b0100000, rd, r1, r2);
}
// Arithmetic/Logical, Register-Immediate
pub fn addi(rd: Register, r1: Register, imm: i12) Instruction {
return iType(0b0010011, 0b000, rd, r1, imm);
}
pub fn andi(rd: Register, r1: Register, imm: i12) Instruction {
return iType(0b0010011, 0b111, rd, r1, imm);
}
pub fn ori(rd: Register, r1: Register, imm: i12) Instruction {
return iType(0b0010011, 0b110, rd, r1, imm);
}
pub fn xori(rd: Register, r1: Register, imm: i12) Instruction {
return iType(0b0010011, 0b100, rd, r1, imm);
}
pub fn slli(rd: Register, r1: Register, shamt: u6) Instruction {
return iType(0b0010011, 0b001, rd, r1, shamt);
}
pub fn srli(rd: Register, r1: Register, shamt: u6) Instruction {
return iType(0b0010011, 0b101, rd, r1, shamt);
}
pub fn srai(rd: Register, r1: Register, shamt: u6) Instruction {
return iType(0b0010011, 0b101, rd, r1, (@as(i12, 1) << 10) + shamt);
}
pub fn slti(rd: Register, r1: Register, imm: i12) Instruction {
return iType(0b0010011, 0b010, rd, r1, imm);
}
pub fn sltiu(rd: Register, r1: Register, imm: u12) Instruction {
return iType(0b0010011, 0b011, rd, r1, @as(i12, @bitCast(imm)));
}
// Arithmetic/Logical, Register-Immediate (32-bit)
pub fn addiw(rd: Register, r1: Register, imm: i12) Instruction {
return iType(0b0011011, 0b000, rd, r1, imm);
}
pub fn slliw(rd: Register, r1: Register, shamt: u6) Instruction {
return iType(0b0011011, 0b001, rd, r1, shamt);
}
pub fn srliw(rd: Register, r1: Register, shamt: u6) Instruction {
return iType(0b0011011, 0b101, rd, r1, shamt);
}
pub fn sraiw(rd: Register, r1: Register, shamt: u6) Instruction {
return iType(0b0011011, 0b101, rd, r1, (@as(i12, 1) << 10) + shamt);
}
// Upper Immediate
pub fn lui(rd: Register, imm: i20) Instruction {
return uType(0b0110111, rd, imm);
}
pub fn auipc(rd: Register, imm: i20) Instruction {
return uType(0b0010111, rd, imm);
}
// Load
pub fn ld(rd: Register, offset: i12, base: Register) Instruction {
return iType(0b0000011, 0b011, rd, base, offset);
}
pub fn lw(rd: Register, offset: i12, base: Register) Instruction {
return iType(0b0000011, 0b010, rd, base, offset);
}
pub fn lwu(rd: Register, offset: i12, base: Register) Instruction {
return iType(0b0000011, 0b110, rd, base, offset);
}
pub fn lh(rd: Register, offset: i12, base: Register) Instruction {
return iType(0b0000011, 0b001, rd, base, offset);
}
pub fn lhu(rd: Register, offset: i12, base: Register) Instruction {
return iType(0b0000011, 0b101, rd, base, offset);
}
pub fn lb(rd: Register, offset: i12, base: Register) Instruction {
return iType(0b0000011, 0b000, rd, base, offset);
}
pub fn lbu(rd: Register, offset: i12, base: Register) Instruction {
return iType(0b0000011, 0b100, rd, base, offset);
}
// Store
pub fn sd(rs: Register, offset: i12, base: Register) Instruction {
return sType(0b0100011, 0b011, base, rs, offset);
}
pub fn sw(rs: Register, offset: i12, base: Register) Instruction {
return sType(0b0100011, 0b010, base, rs, offset);
}
pub fn sh(rs: Register, offset: i12, base: Register) Instruction {
return sType(0b0100011, 0b001, base, rs, offset);
}
pub fn sb(rs: Register, offset: i12, base: Register) Instruction {
return sType(0b0100011, 0b000, base, rs, offset);
}
// Fence
// TODO: implement fence
// Branch
pub fn beq(r1: Register, r2: Register, offset: i13) Instruction {
return bType(0b1100011, 0b000, r1, r2, offset);
}
pub fn bne(r1: Register, r2: Register, offset: i13) Instruction {
return bType(0b1100011, 0b001, r1, r2, offset);
}
pub fn blt(r1: Register, r2: Register, offset: i13) Instruction {
return bType(0b1100011, 0b100, r1, r2, offset);
}
pub fn bge(r1: Register, r2: Register, offset: i13) Instruction {
return bType(0b1100011, 0b101, r1, r2, offset);
}
pub fn bltu(r1: Register, r2: Register, offset: i13) Instruction {
return bType(0b1100011, 0b110, r1, r2, offset);
}
pub fn bgeu(r1: Register, r2: Register, offset: i13) Instruction {
return bType(0b1100011, 0b111, r1, r2, offset);
}
// Jump
pub fn jal(link: Register, offset: i21) Instruction {
return jType(0b1101111, link, offset);
}
pub fn jalr(link: Register, offset: i12, base: Register) Instruction {
return iType(0b1100111, 0b000, link, base, offset);
}
// System
pub const ecall = iType(0b1110011, 0b000, .zero, .zero, 0x000);
pub const ebreak = iType(0b1110011, 0b000, .zero, .zero, 0x001);
pub const unimp = iType(0, 0, .zero, .zero, 0);
};
pub const Register = enum(u6) {
@ -421,39 +171,52 @@ pub const Register = enum(u6) {
}
};
// zig fmt: on
pub const FrameIndex = enum(u32) {
/// This index refers to the return address.
ret_addr,
/// This index refers to the frame pointer.
base_ptr,
/// This index refers to the entire stack frame.
stack_frame,
/// This index referes to where in the stack frame the args are spilled to.
args_frame,
/// This index referes to a frame dedicated to setting up args for function called
/// in this function. Useful for aligning args separately.
call_frame,
/// This index referes to the frame where callee saved registers are spilled and restore
/// from.
spill_frame,
/// Other indices are used for local variable stack slots
_,
test "serialize instructions" {
const Testcase = struct {
inst: Instruction,
expected: u32,
};
pub const named_count = @typeInfo(FrameIndex).Enum.fields.len;
const testcases = [_]Testcase{
.{ // add t6, zero, zero
.inst = Instruction.add(.t6, .zero, .zero),
.expected = 0b0000000_00000_00000_000_11111_0110011,
},
.{ // sd s0, 0x7f(s0)
.inst = Instruction.sd(.s0, 0x7f, .s0),
.expected = 0b0000011_01000_01000_011_11111_0100011,
},
.{ // bne s0, s1, 0x42
.inst = Instruction.bne(.s0, .s1, 0x42),
.expected = 0b0_000010_01001_01000_001_0001_0_1100011,
},
.{ // j 0x1a
.inst = Instruction.jal(.zero, 0x1a),
.expected = 0b0_0000001101_0_00000000_00000_1101111,
},
.{ // ebreak
.inst = Instruction.ebreak,
.expected = 0b000000000001_00000_000_00000_1110011,
},
};
for (testcases) |case| {
const actual = case.inst.toU32();
try testing.expectEqual(case.expected, actual);
pub fn isNamed(fi: FrameIndex) bool {
return @intFromEnum(fi) < named_count;
}
}
pub fn format(
fi: FrameIndex,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
try writer.writeAll("FrameIndex");
if (fi.isNamed()) {
try writer.writeByte('.');
try writer.writeAll(@tagName(fi));
} else {
try writer.writeByte('(');
try std.fmt.formatType(@intFromEnum(fi), fmt, options, writer, 0);
try writer.writeByte(')');
}
}
};
/// A linker symbol not yet allocated in VM.
pub const Symbol = struct {
/// Index of the containing atom.
atom_index: u32,
/// Index into the linker's symbol table.
sym_index: u32,
};

View File

@ -0,0 +1,49 @@
pub const Instruction = struct {
encoding: Encoding,
ops: [4]Operand = .{.none} ** 4,
pub const Operand = union(enum) {
none,
reg: Register,
mem: Memory,
imm: Immediate,
};
pub fn new(mnemonic: Encoding.Mnemonic, ops: []const Operand) !Instruction {
const encoding = (try Encoding.findByMnemonic(mnemonic, ops)) orelse {
log.err("no encoding found for: {s} {s} {s} {s} {s}", .{
@tagName(mnemonic),
@tagName(if (ops.len > 0) ops[0] else .none),
@tagName(if (ops.len > 1) ops[1] else .none),
@tagName(if (ops.len > 2) ops[2] else .none),
@tagName(if (ops.len > 3) ops[3] else .none),
});
return error.InvalidInstruction;
};
var result_ops: [4]Operand = .{.none} ** 4;
@memcpy(result_ops[0..ops.len], ops);
return .{
.encoding = encoding,
.ops = result_ops,
};
}
pub fn encode(inst: Instruction, writer: anytype) !void {
try writer.writeInt(u32, inst.encoding.data.toU32(), .little);
}
};
const std = @import("std");
const Lower = @import("Lower.zig");
const Mir = @import("Mir.zig");
const bits = @import("bits.zig");
const Encoding = @import("Encoding.zig");
const Register = bits.Register;
const Memory = bits.Memory;
const Immediate = bits.Immediate;
const log = std.log.scoped(.encode);

View File

@ -25,38 +25,52 @@ pub fn writeAddend(
}
pub fn writeInstU(code: *[4]u8, value: u32) void {
var inst = Instruction{
var data = Encoding.Data{
.U = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.U,
Encoding.Data,
Encoding.Data.U,
), code),
};
const compensated: u32 = @bitCast(@as(i32, @bitCast(value)) + 0x800);
inst.U.imm12_31 = bitSlice(compensated, 31, 12);
mem.writeInt(u32, code, inst.toU32(), .little);
data.U.imm12_31 = bitSlice(compensated, 31, 12);
mem.writeInt(u32, code, data.toU32(), .little);
}
pub fn writeInstI(code: *[4]u8, value: u32) void {
var inst = Instruction{
var data = Encoding.Data{
.I = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.I,
Encoding.Data,
Encoding.Data.I,
), code),
};
inst.I.imm0_11 = bitSlice(value, 11, 0);
mem.writeInt(u32, code, inst.toU32(), .little);
data.I.imm0_11 = bitSlice(value, 11, 0);
mem.writeInt(u32, code, data.toU32(), .little);
}
pub fn writeInstS(code: *[4]u8, value: u32) void {
var inst = Instruction{
var data = Encoding.Data{
.S = mem.bytesToValue(std.meta.TagPayload(
Instruction,
Instruction.S,
Encoding.Data,
Encoding.Data.S,
), code),
};
inst.S.imm0_4 = bitSlice(value, 4, 0);
inst.S.imm5_11 = bitSlice(value, 11, 5);
mem.writeInt(u32, code, inst.toU32(), .little);
data.S.imm0_4 = bitSlice(value, 4, 0);
data.S.imm5_11 = bitSlice(value, 11, 5);
mem.writeInt(u32, code, data.toU32(), .little);
}
pub fn writeInstJ(code: *[4]u8, value: u32) void {
var data = Encoding.Data{
.J = mem.bytesToValue(std.meta.TagPayload(
Encoding.Data,
Encoding.Data.J,
), code),
};
data.J.imm1_10 = bitSlice(value, 10, 1);
data.J.imm11 = bitSlice(value, 11, 11);
data.J.imm12_19 = bitSlice(value, 19, 12);
data.J.imm20 = bitSlice(value, 20, 20);
mem.writeInt(u32, code, data.toU32(), .little);
}
fn bitSlice(
@ -67,8 +81,9 @@ fn bitSlice(
return @truncate((value >> low) & (1 << (high - low + 1)) - 1);
}
const bits = @import("../arch/riscv64/bits.zig");
const encoder = @import("../arch/riscv64/encoder.zig");
const Encoding = @import("../arch/riscv64/Encoding.zig");
const mem = std.mem;
const std = @import("std");
pub const Instruction = bits.Instruction;
pub const Instruction = encoder.Instruction;

View File

@ -360,6 +360,7 @@ pub fn RegisterManager(
} else self.getRegIndexAssumeFree(tracked_index, inst);
}
pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) AllocateRegistersError!void {
log.debug("getting reg: {}", .{reg});
return self.getRegIndex(indexOfRegIntoTracked(reg) orelse return, inst);
}
pub fn getKnownReg(

View File

@ -526,7 +526,7 @@ pub fn backendSupportsFeature(
feature: Feature,
) bool {
return switch (feature) {
.panic_fn => ofmt == .c or use_llvm or cpu_arch == .x86_64 or cpu_arch == .riscv64,
.panic_fn => ofmt == .c or use_llvm or cpu_arch == .x86_64,
.panic_unwrap_error => ofmt == .c or use_llvm,
.safety_check_formatted => ofmt == .c or use_llvm,
.error_return_trace => use_llvm,

View File

@ -16,6 +16,7 @@ test "global variable alignment" {
}
test "large alignment of local constant" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky
@ -25,6 +26,7 @@ test "large alignment of local constant" {
}
test "slicing array of length 1 can not assume runtime index is always zero" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest; // flaky
@ -42,6 +44,7 @@ test "default alignment allows unspecified in type syntax" {
}
test "implicitly decreasing pointer alignment" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
const a: u32 align(4) = 3;
const b: u32 align(8) = 4;
try expect(addUnaligned(&a, &b) == 7);
@ -52,6 +55,7 @@ fn addUnaligned(a: *align(1) const u32, b: *align(1) const u32) u32 {
}
test "@alignCast pointers" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
var x: u32 align(4) = 1;
expectsOnly1(&x);
try expect(x == 2);
@ -223,6 +227,7 @@ fn fnWithAlignedStack() i32 {
}
test "implicitly decreasing slice alignment" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -235,6 +240,7 @@ fn addUnalignedSlice(a: []align(1) const u32, b: []align(1) const u32) u32 {
}
test "specifying alignment allows pointer cast" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -247,6 +253,7 @@ fn testBytesAlign(b: u8) !void {
}
test "@alignCast slices" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -265,6 +272,7 @@ fn sliceExpects4(slice: []align(4) u32) void {
}
test "return error union with 128-bit integer" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -277,6 +285,7 @@ fn give() anyerror!u128 {
}
test "page aligned array on stack" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -418,6 +427,7 @@ test "function callconv expression depends on generic parameter" {
}
test "runtime-known array index has best alignment possible" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
// take full advantage of over-alignment
@ -478,6 +488,7 @@ const DefaultAligned = struct {
};
test "read 128-bit field from default aligned struct in stack memory" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -497,6 +508,7 @@ var default_aligned_global = DefaultAligned{
};
test "read 128-bit field from default aligned struct in global memory" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -506,6 +518,7 @@ test "read 128-bit field from default aligned struct in global memory" {
}
test "struct field explicit alignment" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -550,6 +563,7 @@ test "align(@alignOf(T)) T does not force resolution of T" {
}
test "align(N) on functions" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -595,6 +609,7 @@ test "comptime alloc alignment" {
}
test "@alignCast null" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -610,6 +625,7 @@ test "alignment of slice element" {
}
test "sub-aligned pointer field access" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
@ -658,6 +674,7 @@ test "alignment of zero-bit types is respected" {
}
test "zero-bit fields in extern struct pad fields appropriately" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;

View File

@ -7,6 +7,7 @@ const expect = testing.expect;
const expectEqual = testing.expectEqual;
test "array to slice" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
const a: u32 align(4) = 3;
const b: u32 align(8) = 4;
const a_slice: []align(1) const u32 = @as(*const [1]u32, &a)[0..];
@ -19,6 +20,8 @@ test "array to slice" {
}
test "arrays" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -47,6 +50,8 @@ fn getArrayLen(a: []const u32) usize {
}
test "array concat with undefined" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -70,6 +75,8 @@ test "array concat with undefined" {
}
test "array concat with tuple" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -86,6 +93,8 @@ test "array concat with tuple" {
}
test "array init with concat" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
const a = 'a';
@ -94,6 +103,8 @@ test "array init with concat" {
}
test "array init with mult" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -106,6 +117,7 @@ test "array init with mult" {
}
test "array literal with explicit type" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -116,6 +128,7 @@ test "array literal with explicit type" {
}
test "array literal with inferred length" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
const hex_mult = [_]u16{ 4096, 256, 16, 1 };
try expect(hex_mult.len == 4);
@ -123,6 +136,7 @@ test "array literal with inferred length" {
}
test "array dot len const expr" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
try expect(comptime x: {
break :x some_array.len == 4;
});
@ -134,6 +148,7 @@ const ArrayDotLenConstExpr = struct {
const some_array = [_]u8{ 0, 1, 2, 3 };
test "array literal with specified size" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -145,6 +160,7 @@ test "array literal with specified size" {
}
test "array len field" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
var arr = [4]u8{ 0, 0, 0, 0 };
@ -157,6 +173,8 @@ test "array len field" {
}
test "array with sentinels" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -186,6 +204,7 @@ test "array with sentinels" {
}
test "void arrays" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
var array: [4]void = undefined;
array[0] = void{};
array[1] = array[2];
@ -194,6 +213,8 @@ test "void arrays" {
}
test "nested arrays of strings" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -209,6 +230,7 @@ test "nested arrays of strings" {
}
test "nested arrays of integers" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -224,6 +246,8 @@ test "nested arrays of integers" {
}
test "implicit comptime in array type size" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -237,6 +261,8 @@ fn plusOne(x: u32) u32 {
}
test "single-item pointer to array indexing and slicing" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -263,6 +289,8 @@ fn doSomeMangling(array: *[4]u8) void {
}
test "implicit cast zero sized array ptr to slice" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
{
@ -278,6 +306,7 @@ test "implicit cast zero sized array ptr to slice" {
}
test "anonymous list literal syntax" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -300,6 +329,8 @@ var s_array: [8]Sub = undefined;
const Sub = struct { b: u8 };
const Str = struct { a: []Sub };
test "set global var array via slice embedded in struct" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -315,6 +346,8 @@ test "set global var array via slice embedded in struct" {
}
test "read/write through global variable array of struct fields initialized via array mult" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -336,6 +369,8 @@ test "read/write through global variable array of struct fields initialized via
}
test "implicit cast single-item pointer" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -355,6 +390,7 @@ fn testArrayByValAtComptime(b: [2]u8) u8 {
}
test "comptime evaluating function that takes array by value" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -366,6 +402,8 @@ test "comptime evaluating function that takes array by value" {
}
test "runtime initialize array elem and then implicit cast to slice" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -376,6 +414,8 @@ test "runtime initialize array elem and then implicit cast to slice" {
}
test "array literal as argument to function" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -403,6 +443,8 @@ test "array literal as argument to function" {
}
test "double nested array to const slice cast in array literal" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -464,6 +506,7 @@ test "double nested array to const slice cast in array literal" {
}
test "anonymous literal in array" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -490,6 +533,8 @@ test "anonymous literal in array" {
}
test "access the null element of a null terminated array" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -508,6 +553,8 @@ test "access the null element of a null terminated array" {
}
test "type deduction for array subscript expression" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -527,6 +574,8 @@ test "type deduction for array subscript expression" {
}
test "sentinel element count towards the ABI size calculation" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -551,6 +600,8 @@ test "sentinel element count towards the ABI size calculation" {
}
test "zero-sized array with recursive type definition" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
@ -574,6 +625,8 @@ test "zero-sized array with recursive type definition" {
}
test "type coercion of anon struct literal to array" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -608,6 +661,8 @@ test "type coercion of anon struct literal to array" {
}
test "type coercion of pointer to anon struct literal to pointer to array" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -642,12 +697,16 @@ test "type coercion of pointer to anon struct literal to pointer to array" {
}
test "array with comptime-only element type" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
const a = [_]type{ u32, i32 };
try testing.expect(a[0] == u32);
try testing.expect(a[1] == i32);
}
test "tuple to array handles sentinel" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -660,6 +719,8 @@ test "tuple to array handles sentinel" {
}
test "array init of container level array variable" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -683,6 +744,8 @@ test "array init of container level array variable" {
}
test "runtime initialized sentinel-terminated array literal" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
var c: u16 = 300;
_ = &c;
const f = &[_:0x9999]u16{c};
@ -692,6 +755,8 @@ test "runtime initialized sentinel-terminated array literal" {
}
test "array of array agregate init" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -703,6 +768,8 @@ test "array of array agregate init" {
}
test "pointer to array has ptr field" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
const arr: *const [5]u32 = &.{ 10, 20, 30, 40, 50 };
try std.testing.expect(arr.ptr == @as([*]const u32, arr));
try std.testing.expect(arr.ptr[0] == 10);
@ -713,6 +780,8 @@ test "pointer to array has ptr field" {
}
test "discarded array init preserves result location" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
const S = struct {
fn f(p: *u32) u16 {
p.* += 1;
@ -731,6 +800,8 @@ test "discarded array init preserves result location" {
}
test "array init with no result location has result type" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
const x = .{ .foo = [2]u16{
@intCast(10),
@intCast(20),
@ -742,6 +813,8 @@ test "array init with no result location has result type" {
}
test "slicing array of zero-sized values" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
@ -754,6 +827,8 @@ test "slicing array of zero-sized values" {
}
test "array init with no result pointer sets field result types" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
const S = struct {
// A function parameter has a result type, but no result pointer.
fn f(arr: [1]u32) u32 {
@ -768,6 +843,8 @@ test "array init with no result pointer sets field result types" {
}
test "runtime side-effects in comptime-known array init" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
var side_effects: u4 = 0;
const init = [4]u4{
blk: {
@ -792,6 +869,8 @@ test "runtime side-effects in comptime-known array init" {
}
test "slice initialized through reference to anonymous array init provides result types" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
var my_u32: u32 = 123;
var my_u64: u64 = 456;
_ = .{ &my_u32, &my_u64 };
@ -851,6 +930,8 @@ test "many-item sentinel-terminated pointer initialized through reference to ano
}
test "pointer to array initialized through reference to anonymous array init provides result types" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
var my_u32: u32 = 123;
var my_u64: u64 = 456;
_ = .{ &my_u32, &my_u64 };
@ -877,6 +958,8 @@ test "pointer to sentinel-terminated array initialized through reference to anon
}
test "tuple initialized through reference to anonymous array init provides result types" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
const Tuple = struct { u64, *const u32 };
const foo: *const Tuple = &.{
@intCast(12345),
@ -887,6 +970,8 @@ test "tuple initialized through reference to anonymous array init provides resul
}
test "copied array element doesn't alias source" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -901,6 +986,8 @@ test "copied array element doesn't alias source" {
}
test "array initialized with string literal" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -921,6 +1008,8 @@ test "array initialized with string literal" {
}
test "array initialized with array with sentinel" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct {
@ -941,6 +1030,8 @@ test "array initialized with array with sentinel" {
}
test "store array of array of structs at comptime" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
@ -966,6 +1057,8 @@ test "store array of array of structs at comptime" {
}
test "accessing multidimensional global array at comptime" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
@ -982,6 +1075,8 @@ test "accessing multidimensional global array at comptime" {
}
test "union that needs padding bytes inside an array" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO