From a30688ef2a136c5a127c706880e8389b9b32e5be Mon Sep 17 00:00:00 2001 From: Koakuma Date: Sun, 27 Mar 2022 08:20:15 +0700 Subject: [PATCH 01/29] stage2: sparcv9: Add some initial checks in codegen --- src/arch/sparcv9/CodeGen.zig | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/arch/sparcv9/CodeGen.zig b/src/arch/sparcv9/CodeGen.zig index db5811dfda..5d9be85d13 100644 --- a/src/arch/sparcv9/CodeGen.zig +++ b/src/arch/sparcv9/CodeGen.zig @@ -1,6 +1,7 @@ //! SPARCv9 codegen. //! This lowers AIR into MIR. const std = @import("std"); +const assert = std.debug.assert; const builtin = @import("builtin"); const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); @@ -8,6 +9,7 @@ const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); +const build_options = @import("build_options"); const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError; const FnResult = @import("../../codegen.zig").FnResult; @@ -35,5 +37,11 @@ pub fn generate( _ = code; _ = debug_output; + if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } + + assert(module_fn.owner_decl.has_tv); + @panic("TODO implement SPARCv9 codegen"); } From a5a89fde1354892c6714c41ea691922bfa10c442 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Mon, 28 Mar 2022 20:31:40 +0700 Subject: [PATCH 02/29] stage2: sparcv9: Add skeleton codegen impl and necessary fields --- src/arch/sparcv9/CodeGen.zig | 287 +++++++++++++++++++++++++++++++++-- src/arch/sparcv9/Emit.zig | 37 +++++ src/arch/sparcv9/Mir.zig | 49 ++++++ 3 files changed, 362 insertions(+), 11 deletions(-) diff --git a/src/arch/sparcv9/CodeGen.zig b/src/arch/sparcv9/CodeGen.zig index 5d9be85d13..232b83b741 100644 --- a/src/arch/sparcv9/CodeGen.zig +++ b/src/arch/sparcv9/CodeGen.zig @@ -2,24 +2,198 @@ //! This lowers AIR into MIR. const std = @import("std"); const assert = std.debug.assert; +const mem = std.mem; +const Allocator = mem.Allocator; const builtin = @import("builtin"); const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); +const ErrorMsg = Module.ErrorMsg; const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); const Emit = @import("Emit.zig"); const Liveness = @import("../../Liveness.zig"); -const build_options = @import("build_options"); - +const Type = @import("../../type.zig").Type; const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError; const FnResult = @import("../../codegen.zig").FnResult; const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; +const build_options = @import("build_options"); + const bits = @import("bits.zig"); const abi = @import("abi.zig"); +const Register = bits.Register; const Self = @This(); +const InnerError = error{ + OutOfMemory, + CodegenFail, + OutOfRegisters, +}; + +gpa: Allocator, +air: Air, +liveness: Liveness, +bin_file: *link.File, +target: *const std.Target, +mod_fn: *const Module.Fn, +code: *std.ArrayList(u8), +debug_output: DebugInfoOutput, +err_msg: ?*ErrorMsg, +args: []MCValue, +ret_mcv: MCValue, +fn_type: Type, +arg_index: usize, +src_loc: Module.SrcLoc, +stack_align: u32, + +/// MIR Instructions +mir_instructions: std.MultiArrayList(Mir.Inst) = .{}, +/// MIR extra data +mir_extra: std.ArrayListUnmanaged(u32) = .{}, + +/// Byte offset within the source file of the ending curly. +end_di_line: u32, +end_di_column: u32, + +/// The value is an offset into the `Function` `code` from the beginning. +/// To perform the reloc, write 32-bit signed little-endian integer +/// which is a relative jump, based on the address following the reloc. +exitlude_jump_relocs: std.ArrayListUnmanaged(usize) = .{}, + +/// Whenever there is a runtime branch, we push a Branch onto this stack, +/// and pop it off when the runtime branch joins. This provides an "overlay" +/// of the table of mappings from instructions to `MCValue` from within the branch. +/// This way we can modify the `MCValue` for an instruction in different ways +/// within different branches. Special consideration is needed when a branch +/// joins with its parent, to make sure all instructions have the same MCValue +/// across each runtime branch upon joining. +branch_stack: *std.ArrayList(Branch), + +// Key is the block instruction +blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, + +/// Maps offset to what is stored there. +stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{}, + +/// Offset from the stack base, representing the end of the stack frame. +max_end_stack: u32 = 0, +/// Represents the current end stack offset. If there is no existing slot +/// to place a new stack allocation, it goes here, and then bumps `max_end_stack`. +next_stack_offset: u32 = 0, + +/// Debug field, used to find bugs in the compiler. +air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init, + +const air_bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {}; + +const MCValue = union(enum) { + /// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc. + /// TODO Look into deleting this tag and using `dead` instead, since every use + /// of MCValue.none should be instead looking at the type and noticing it is 0 bits. + none, + /// Control flow will not allow this value to be observed. + unreach, + /// No more references to this value remain. + dead, + /// The value is undefined. + undef, + /// A pointer-sized integer that fits in a register. + /// If the type is a pointer, this is the pointer address in virtual address space. + immediate: u64, + /// The value is in a target-specific register. + register: Register, + /// The value is in memory at a hard-coded address. + /// If the type is a pointer, it means the pointer address is at this memory location. + memory: u64, + /// The value is one of the stack variables. + /// If the type is a pointer, it means the pointer address is in the stack at this offset. + stack_offset: u32, + /// The value is a pointer to one of the stack variables (payload is stack offset). + ptr_stack_offset: u32, + + fn isMemory(mcv: MCValue) bool { + return switch (mcv) { + .memory, .stack_offset => true, + else => false, + }; + } + + fn isImmediate(mcv: MCValue) bool { + return switch (mcv) { + .immediate => true, + else => false, + }; + } + + fn isMutable(mcv: MCValue) bool { + return switch (mcv) { + .none => unreachable, + .unreach => unreachable, + .dead => unreachable, + + .immediate, + .memory, + .ptr_stack_offset, + .undef, + => false, + + .register, + .stack_offset, + => true, + }; + } +}; + +const Branch = struct { + inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{}, + + fn deinit(self: *Branch, gpa: Allocator) void { + self.inst_table.deinit(gpa); + self.* = undefined; + } +}; + +const StackAllocation = struct { + inst: Air.Inst.Index, + /// TODO do we need size? should be determined by inst.ty.abiSize() + size: u32, +}; + +const BlockData = struct { + relocs: std.ArrayListUnmanaged(Reloc), + /// The first break instruction encounters `null` here and chooses a + /// machine code value for the block result, populating this field. + /// Following break instructions encounter that value and use it for + /// the location to store their block results. + mcv: MCValue, +}; + +const Reloc = union(enum) { + /// The value is an offset into the `Function` `code` from the beginning. + /// To perform the reloc, write 32-bit signed little-endian integer + /// which is a relative jump, based on the address following the reloc. + rel32: usize, + /// A branch in the ARM instruction set + arm_branch: struct { + pos: usize, + cond: @import("../arm/bits.zig").Condition, + }, +}; + +const CallMCValues = struct { + args: []MCValue, + return_value: MCValue, + stack_byte_count: u32, + stack_align: u32, + + fn deinit(self: *CallMCValues, func: *Self) void { + func.gpa.free(self.args); + self.* = undefined; + } +}; + + pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, @@ -29,19 +203,110 @@ pub fn generate( code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) GenerateSymbolError!FnResult { - _ = bin_file; - _ = src_loc; - _ = module_fn; - _ = air; - _ = liveness; - _ = code; - _ = debug_output; - if (build_options.skip_non_native and builtin.cpu.arch != bin_file.options.target.cpu.arch) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } assert(module_fn.owner_decl.has_tv); + const fn_type = module_fn.owner_decl.ty; - @panic("TODO implement SPARCv9 codegen"); + var branch_stack = std.ArrayList(Branch).init(bin_file.allocator); + defer { + assert(branch_stack.items.len == 1); + branch_stack.items[0].deinit(bin_file.allocator); + branch_stack.deinit(); + } + try branch_stack.append(.{}); + + var function = Self{ + .gpa = bin_file.allocator, + .air = air, + .liveness = liveness, + .target = &bin_file.options.target, + .bin_file = bin_file, + .mod_fn = module_fn, + .code = code, + .debug_output = debug_output, + .err_msg = null, + .args = undefined, // populated after `resolveCallingConventionValues` + .ret_mcv = undefined, // populated after `resolveCallingConventionValues` + .fn_type = fn_type, + .arg_index = 0, + .branch_stack = &branch_stack, + .src_loc = src_loc, + .stack_align = undefined, + .end_di_line = module_fn.rbrace_line, + .end_di_column = module_fn.rbrace_column, + }; + defer function.stack.deinit(bin_file.allocator); + defer function.blocks.deinit(bin_file.allocator); + defer function.exitlude_jump_relocs.deinit(bin_file.allocator); + + var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { + error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, + error.OutOfRegisters => return FnResult{ + .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), + }, + else => |e| return e, + }; + defer call_info.deinit(&function); + + function.args = call_info.args; + function.ret_mcv = call_info.return_value; + function.stack_align = call_info.stack_align; + function.max_end_stack = call_info.stack_byte_count; + + function.gen() catch |err| switch (err) { + error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, + error.OutOfRegisters => return FnResult{ + .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), + }, + else => |e| return e, + }; + + var mir = Mir{ + .instructions = function.mir_instructions.toOwnedSlice(), + .extra = function.mir_extra.toOwnedSlice(bin_file.allocator), + }; + defer mir.deinit(bin_file.allocator); + + var emit = Emit{ + .mir = mir, + .bin_file = bin_file, + .debug_output = debug_output, + .target = &bin_file.options.target, + .src_loc = src_loc, + .code = code, + .prev_di_pc = 0, + .prev_di_line = module_fn.lbrace_line, + .prev_di_column = module_fn.lbrace_column, + }; + defer emit.deinit(); + + emit.emitMir() catch |err| switch (err) { + error.EmitFail => return FnResult{ .fail = emit.err_msg.? }, + else => |e| return e, + }; + + if (function.err_msg) |em| { + return FnResult{ .fail = em }; + } else { + return FnResult{ .appended = {} }; + } +} + +/// Caller must call `CallMCValues.deinit`. +fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { + _ = self; + _ = fn_ty; + + @panic("TODO implement resolveCallingConventionValues"); +} + + +/// Caller must call `CallMCValues.deinit`. +fn gen(self: *Self) !void { + _ = self; + + @panic("TODO implement gen"); } diff --git a/src/arch/sparcv9/Emit.zig b/src/arch/sparcv9/Emit.zig index ba644ede7e..1821570701 100644 --- a/src/arch/sparcv9/Emit.zig +++ b/src/arch/sparcv9/Emit.zig @@ -1,6 +1,43 @@ //! This file contains the functionality for lowering SPARCv9 MIR into //! machine code +const std = @import("std"); +const link = @import("../../link.zig"); +const Module = @import("../../Module.zig"); +const ErrorMsg = Module.ErrorMsg; +const Liveness = @import("../../Liveness.zig"); +const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; + const Emit = @This(); const Mir = @import("Mir.zig"); const bits = @import("bits.zig"); + +mir: Mir, +bin_file: *link.File, +debug_output: DebugInfoOutput, +target: *const std.Target, +err_msg: ?*ErrorMsg = null, +src_loc: Module.SrcLoc, +code: *std.ArrayList(u8), + +prev_di_line: u32, +prev_di_column: u32, +/// Relative to the beginning of `code`. +prev_di_pc: usize, + +const InnerError = error{ + OutOfMemory, + EmitFail, +}; + +pub fn emitMir( + emit: *Emit, +) InnerError!void { + _ = emit; + + @panic("TODO implement emitMir"); +} + +pub fn deinit(emit: *Emit) void { + emit.* = undefined; +} diff --git a/src/arch/sparcv9/Mir.zig b/src/arch/sparcv9/Mir.zig index f0d3b1dfbd..0f80a60ecf 100644 --- a/src/arch/sparcv9/Mir.zig +++ b/src/arch/sparcv9/Mir.zig @@ -6,6 +6,55 @@ //! The main purpose of MIR is to postpone the assignment of offsets until Isel, //! so that, for example, the smaller encodings of jump instructions can be used. +const std = @import("std"); + const Mir = @This(); const bits = @import("bits.zig"); const Register = bits.Register; + +instructions: std.MultiArrayList(Inst).Slice, + +/// The meaning of this data is determined by `Inst.Tag` value. +extra: []const u32, + +pub const Inst = struct { + tag: Tag, + /// The meaning of this depends on `tag`. + data: Data, + + pub const Tag = enum(u16) { + /// Pseudo-instruction: End of prologue + dbg_prologue_end, + /// Pseudo-instruction: Beginning of epilogue + dbg_epilogue_begin, + /// Pseudo-instruction: Update debug line + dbg_line, + }; + + /// The position of an MIR instruction within the `Mir` instructions array. + pub const Index = u32; + + /// All instructions have a 4-byte payload, which is contained within + /// this union. `Tag` determines which union field is active, as well as + /// how to interpret the data within. + pub const Data = union { + /// No additional data + /// + /// Used by e.g. flushw + nop: void, + /// Debug info: line and column + /// + /// Used by e.g. dbg_line + dbg_line_column: struct { + line: u32, + column: u32, + }, + }; +}; + +pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void { + mir.instructions.deinit(gpa); + gpa.free(mir.extra); + mir.* = undefined; +} + From 1ba52272167b12ea14f688df7d0d34940d98ff5b Mon Sep 17 00:00:00 2001 From: Koakuma Date: Mon, 28 Mar 2022 22:37:55 +0700 Subject: [PATCH 03/29] stage2: sparcv9: Initial resolveCallingConventionValues implementation --- src/arch/sparcv9/CodeGen.zig | 97 +++++++++++++++++++++++++++++++++--- src/arch/sparcv9/abi.zig | 4 +- 2 files changed, 92 insertions(+), 9 deletions(-) diff --git a/src/arch/sparcv9/CodeGen.zig b/src/arch/sparcv9/CodeGen.zig index 232b83b741..0c63d992ee 100644 --- a/src/arch/sparcv9/CodeGen.zig +++ b/src/arch/sparcv9/CodeGen.zig @@ -193,7 +193,6 @@ const CallMCValues = struct { } }; - pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, @@ -242,7 +241,7 @@ pub fn generate( defer function.blocks.deinit(bin_file.allocator); defer function.exitlude_jump_relocs.deinit(bin_file.allocator); - var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { + var call_info = function.resolveCallingConventionValues(fn_type, false) catch |err| switch (err) { error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, error.OutOfRegisters => return FnResult{ .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), @@ -296,17 +295,101 @@ pub fn generate( } /// Caller must call `CallMCValues.deinit`. -fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { - _ = self; - _ = fn_ty; +fn resolveCallingConventionValues(self: *Self, fn_ty: Type, is_caller: bool) !CallMCValues { + const cc = fn_ty.fnCallingConvention(); + const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); + defer self.gpa.free(param_types); + fn_ty.fnParamTypes(param_types); + var result: CallMCValues = .{ + .args = try self.gpa.alloc(MCValue, param_types.len), + // These undefined values must be populated before returning from this function. + .return_value = undefined, + .stack_byte_count = undefined, + .stack_align = undefined, + }; + errdefer self.gpa.free(result.args); - @panic("TODO implement resolveCallingConventionValues"); + const ret_ty = fn_ty.fnReturnType(); + + switch (cc) { + .Naked => { + assert(result.args.len == 0); + result.return_value = .{ .unreach = {} }; + result.stack_byte_count = 0; + result.stack_align = 1; + return result; + }, + .Unspecified, .C => { + // SPARC Compliance Definition 2.4.1, Chapter 3 + // Low-Level System Information (64-bit psABI) - Function Calling Sequence + + var next_register: usize = 0; + var next_stack_offset: u32 = 0; + + // The caller puts the argument in %o0-%o5, which becomes %i0-%i5 inside the callee. + const argument_registers = if (is_caller) abi.c_abi_int_param_regs_caller_view else abi.c_abi_int_param_regs_callee_view; + + for (param_types) |ty, i| { + const param_size = @intCast(u32, ty.abiSize(self.target.*)); + if (param_size <= 8) { + if (next_register < argument_registers.len) { + result.args[i] = .{ .register = argument_registers[next_register] }; + next_register += 1; + } else { + result.args[i] = .{ .stack_offset = next_stack_offset }; + next_register += next_stack_offset; + } + } else if (param_size <= 16) { + if (next_register < argument_registers.len - 1) { + return self.fail("TODO MCValues with 2 registers", .{}); + } else if (next_register < argument_registers.len) { + return self.fail("TODO MCValues split register + stack", .{}); + } else { + result.args[i] = .{ .stack_offset = next_stack_offset }; + next_register += next_stack_offset; + } + } else { + result.args[i] = .{ .stack_offset = next_stack_offset }; + next_register += next_stack_offset; + } + } + + result.stack_byte_count = next_stack_offset; + result.stack_align = 16; + }, + else => return self.fail("TODO implement function parameters for {} on sparcv9", .{cc}), + } + + if (ret_ty.zigTypeTag() == .NoReturn) { + result.return_value = .{ .unreach = {} }; + } else if (!ret_ty.hasRuntimeBits()) { + result.return_value = .{ .none = {} }; + } else switch (cc) { + .Naked => unreachable, + .Unspecified, .C => { + const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + // The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller. + if (ret_ty_size <= 8) { + result.return_value = if (is_caller) .{ .register = abi.c_abi_int_return_regs_caller_view[0] } else .{ .register = abi.c_abi_int_return_regs_callee_view[0] }; + } else { + return self.fail("TODO support more return values for sparcv9", .{}); + } + }, + else => return self.fail("TODO implement function return values for {} on sparcv9", .{cc}), + } + return result; } - /// Caller must call `CallMCValues.deinit`. fn gen(self: *Self) !void { _ = self; @panic("TODO implement gen"); } + +fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { + @setCold(true); + assert(self.err_msg == null); + self.err_msg = try ErrorMsg.create(self.bin_file.allocator, self.src_loc, format, args); + return error.CodegenFail; +} diff --git a/src/arch/sparcv9/abi.zig b/src/arch/sparcv9/abi.zig index 5c9ea979fc..ceac33c08c 100644 --- a/src/arch/sparcv9/abi.zig +++ b/src/arch/sparcv9/abi.zig @@ -8,5 +8,5 @@ pub const callee_preserved_regs = [_]Register{}; pub const c_abi_int_param_regs_caller_view = [_]Register{ .o0, .o1, .o2, .o3, .o4, .o5 }; pub const c_abi_int_param_regs_callee_view = [_]Register{ .@"i0", .@"i1", .@"i2", .@"i3", .@"i4", .@"i5" }; -pub const c_abi_int_return_regs_caller_view = [_]Register{ .o0, .o1, .o2, .o3, .o4, .o5 }; -pub const c_abi_int_return_regs_callee_view = [_]Register{ .@"i0", .@"i1", .@"i2", .@"i3", .@"i4", .@"i5" }; +pub const c_abi_int_return_regs_caller_view = [_]Register{ .o0, .o1, .o2, .o3 }; +pub const c_abi_int_return_regs_callee_view = [_]Register{ .@"i0", .@"i1", .@"i2", .@"i3" }; From cf13356dabd79219499bddb63551f61a67a510bb Mon Sep 17 00:00:00 2001 From: Koakuma Date: Tue, 29 Mar 2022 20:37:32 +0700 Subject: [PATCH 04/29] stage2: sparcv9: Mir extraData implementation --- src/arch/riscv64/Mir.zig | 1 + src/arch/sparcv9/Mir.zig | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 5df3a86229..7b5049b7d4 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -144,3 +144,4 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end .end = i, }; } + diff --git a/src/arch/sparcv9/Mir.zig b/src/arch/sparcv9/Mir.zig index 0f80a60ecf..c8b9c6544f 100644 --- a/src/arch/sparcv9/Mir.zig +++ b/src/arch/sparcv9/Mir.zig @@ -58,3 +58,23 @@ pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void { mir.* = undefined; } +/// Returns the requested data, as well as the new index which is at the start of the +/// trailers for the object. +pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end: usize } { + const fields = std.meta.fields(T); + var i: usize = index; + var result: T = undefined; + inline for (fields) |field| { + @field(result, field.name) = switch (field.field_type) { + u32 => mir.extra[i], + i32 => @bitCast(i32, mir.extra[i]), + else => @compileError("bad field type"), + }; + i += 1; + } + return .{ + .data = result, + .end = i, + }; +} + From 927706e6d0cf289be979bb6f12be6636c0e07f59 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Tue, 29 Mar 2022 20:54:23 +0700 Subject: [PATCH 05/29] stage2: sparcv9: Emit debug inst placeholder --- src/arch/sparcv9/Emit.zig | 54 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/src/arch/sparcv9/Emit.zig b/src/arch/sparcv9/Emit.zig index 1821570701..2587b94c7c 100644 --- a/src/arch/sparcv9/Emit.zig +++ b/src/arch/sparcv9/Emit.zig @@ -7,6 +7,7 @@ const Module = @import("../../Module.zig"); const ErrorMsg = Module.ErrorMsg; const Liveness = @import("../../Liveness.zig"); const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; +const DW = std.dwarf; const Emit = @This(); const Mir = @import("Mir.zig"); @@ -33,11 +34,60 @@ const InnerError = error{ pub fn emitMir( emit: *Emit, ) InnerError!void { - _ = emit; + const mir_tags = emit.mir.instructions.items(.tag); - @panic("TODO implement emitMir"); + // Emit machine code + for (mir_tags) |tag, index| { + const inst = @intCast(u32, index); + switch (tag) { + .dbg_line => try emit.mirDbgLine(inst), + + .dbg_prologue_end => try emit.mirDebugPrologueEnd(), + .dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(), + } + } } pub fn deinit(emit: *Emit) void { emit.* = undefined; } + +fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { + _ = self; + _ = line; + _ = column; + + @panic("TODO implement dbgAdvancePCAndLine"); +} + +fn mirDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const dbg_line_column = emit.mir.instructions.items(.data)[inst].dbg_line_column; + + switch (tag) { + .dbg_line => try emit.dbgAdvancePCAndLine(dbg_line_column.line, dbg_line_column.column), + else => unreachable, + } +} + +fn mirDebugPrologueEnd(self: *Emit) !void { + switch (self.debug_output) { + .dwarf => |dbg_out| { + try dbg_out.dbg_line.append(DW.LNS.set_prologue_end); + try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column); + }, + .plan9 => {}, + .none => {}, + } +} + +fn mirDebugEpilogueBegin(self: *Emit) !void { + switch (self.debug_output) { + .dwarf => |dbg_out| { + try dbg_out.dbg_line.append(DW.LNS.set_epilogue_begin); + try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column); + }, + .plan9 => {}, + .none => {}, + } +} From 94a84e783e32169268809e4aa4bdbfdff92d81e9 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Fri, 1 Apr 2022 19:28:24 +0700 Subject: [PATCH 06/29] stage2: sparcv9: Implement basic prologue/epilogue Mir emission --- src/arch/sparcv9/CodeGen.zig | 104 ++++++++++++++++++++++++++++++++++- src/arch/sparcv9/Emit.zig | 16 +++++- src/arch/sparcv9/Mir.zig | 47 +++++++++++++++- 3 files changed, 163 insertions(+), 4 deletions(-) diff --git a/src/arch/sparcv9/CodeGen.zig b/src/arch/sparcv9/CodeGen.zig index 0c63d992ee..c2aa082b11 100644 --- a/src/arch/sparcv9/CodeGen.zig +++ b/src/arch/sparcv9/CodeGen.zig @@ -382,9 +382,109 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, is_caller: bool) !Ca /// Caller must call `CallMCValues.deinit`. fn gen(self: *Self) !void { - _ = self; + const cc = self.fn_type.fnCallingConvention(); + if (cc != .Naked) { + // TODO Finish function prologue and epilogue for sparcv9. - @panic("TODO implement gen"); + // TODO Backpatch stack offset + // save %sp, -176, %sp + _ = try self.addInst(.{ + .tag = .save, + .data = .{ + .arithmetic_3op = .{ + .is_imm = true, + .rd = .sp, + .rs1 = .sp, + .rs2_or_imm = .{ .imm = -176 }, + }, + }, + }); + + _ = try self.addInst(.{ + .tag = .dbg_prologue_end, + .data = .{ .nop = {} }, + }); + + try self.genBody(self.air.getMainBody()); + + _ = try self.addInst(.{ + .tag = .dbg_epilogue_begin, + .data = .{ .nop = {} }, + }); + + // exitlude jumps + if (self.exitlude_jump_relocs.items.len > 0 and + self.exitlude_jump_relocs.items[self.exitlude_jump_relocs.items.len - 1] == self.mir_instructions.len - 2) + { + // If the last Mir instruction (apart from the + // dbg_epilogue_begin) is the last exitlude jump + // relocation (which would just jump one instruction + // further), it can be safely removed + self.mir_instructions.orderedRemove(self.exitlude_jump_relocs.pop()); + } + + for (self.exitlude_jump_relocs.items) |jmp_reloc| { + _ = jmp_reloc; + return self.fail("TODO add branches in sparcv9", .{}); + } + + // return %i7 + 8 + _ = try self.addInst(.{ + .tag = .@"return", + .data = .{ + .arithmetic_2op = .{ + .is_imm = true, + .rs1 = .@"i7", + .rs2_or_imm = .{ .imm = 8 }, + }, + }, + }); + + // TODO Find a way to fill this slot + // nop + _ = try self.addInst(.{ + .tag = .nop, + .data = .{ .nop = {} }, + }); + } else { + _ = try self.addInst(.{ + .tag = .dbg_prologue_end, + .data = .{ .nop = {} }, + }); + + try self.genBody(self.air.getMainBody()); + + _ = try self.addInst(.{ + .tag = .dbg_epilogue_begin, + .data = .{ .nop = {} }, + }); + } + + // Drop them off at the rbrace. + _ = try self.addInst(.{ + .tag = .dbg_line, + .data = .{ .dbg_line_column = .{ + .line = self.end_di_line, + .column = self.end_di_column, + } }, + }); +} + +fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + _ = self; + _ = body; + + @panic("TODO implement genBody"); +} + +fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { + const gpa = self.gpa; + + try self.mir_instructions.ensureUnusedCapacity(gpa, 1); + + const result_index = @intCast(Air.Inst.Index, self.mir_instructions.len); + self.mir_instructions.appendAssumeCapacity(inst); + return result_index; } fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { diff --git a/src/arch/sparcv9/Emit.zig b/src/arch/sparcv9/Emit.zig index 2587b94c7c..28c172f329 100644 --- a/src/arch/sparcv9/Emit.zig +++ b/src/arch/sparcv9/Emit.zig @@ -2,6 +2,7 @@ //! machine code const std = @import("std"); +const assert = std.debug.assert; const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); const ErrorMsg = Module.ErrorMsg; @@ -41,9 +42,15 @@ pub fn emitMir( const inst = @intCast(u32, index); switch (tag) { .dbg_line => try emit.mirDbgLine(inst), - .dbg_prologue_end => try emit.mirDebugPrologueEnd(), .dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(), + + .nop => @panic("TODO implement nop"), + + .save => @panic("TODO implement save"), + .restore => @panic("TODO implement restore"), + + .@"return" => @panic("TODO implement return"), } } } @@ -91,3 +98,10 @@ fn mirDebugEpilogueBegin(self: *Emit) !void { .none => {}, } } + +fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { + @setCold(true); + assert(emit.err_msg == null); + emit.err_msg = try ErrorMsg.create(emit.bin_file.allocator, emit.src_loc, format, args); + return error.EmitFail; +} diff --git a/src/arch/sparcv9/Mir.zig b/src/arch/sparcv9/Mir.zig index c8b9c6544f..43b008d189 100644 --- a/src/arch/sparcv9/Mir.zig +++ b/src/arch/sparcv9/Mir.zig @@ -29,6 +29,22 @@ pub const Inst = struct { dbg_epilogue_begin, /// Pseudo-instruction: Update debug line dbg_line, + + // All the real instructions are ordered by their section number + // in The SPARC Architecture Manual, Version 9. + + /// A.40 No Operation + /// It uses the nop field. + nop, + + /// A.46 SAVE and RESTORE + /// Those uses the arithmetic_3op field. + save, + restore, + + /// A.45 RETURN + /// It uses the arithmetic_2op field. + @"return", }; /// The position of an MIR instruction within the `Mir` instructions array. @@ -42,6 +58,36 @@ pub const Inst = struct { /// /// Used by e.g. flushw nop: void, + + /// Three operand arithmetic. + /// if is_imm true then it uses the imm field of rs2_or_imm, + /// otherwise it uses rs2 field. + /// + /// Used by e.g. add, sub + arithmetic_3op: struct { + is_imm: bool, + rd: Register, + rs1: Register, + rs2_or_imm: union { + rs2: Register, + imm: i13, + }, + }, + + /// Two operand arithmetic. + /// if is_imm true then it uses the imm field of rs2_or_imm, + /// otherwise it uses rs2 field. + /// + /// Used by e.g. return + arithmetic_2op: struct { + is_imm: bool, + rs1: Register, + rs2_or_imm: union { + rs2: Register, + imm: i13, + }, + }, + /// Debug info: line and column /// /// Used by e.g. dbg_line @@ -77,4 +123,3 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end .end = i, }; } - From 94d70bdb69d4459ea522bcef2bc02a56f5a92091 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Fri, 1 Apr 2022 19:30:37 +0700 Subject: [PATCH 07/29] stage2: sparcv9: Change ordering in Mir Tag --- src/arch/sparcv9/Mir.zig | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/arch/sparcv9/Mir.zig b/src/arch/sparcv9/Mir.zig index 43b008d189..0b7c2185eb 100644 --- a/src/arch/sparcv9/Mir.zig +++ b/src/arch/sparcv9/Mir.zig @@ -37,14 +37,14 @@ pub const Inst = struct { /// It uses the nop field. nop, + /// A.45 RETURN + /// It uses the arithmetic_2op field. + @"return", + /// A.46 SAVE and RESTORE /// Those uses the arithmetic_3op field. save, restore, - - /// A.45 RETURN - /// It uses the arithmetic_2op field. - @"return", }; /// The position of an MIR instruction within the `Mir` instructions array. From 18c98eb4293ced51689ad67f15575afc120ceee1 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Fri, 1 Apr 2022 19:58:49 +0700 Subject: [PATCH 08/29] stage2: sparcv9: Placeholder for Air instructions in genBody --- src/arch/sparcv9/CodeGen.zig | 182 ++++++++++++++++++++++++++++++++++- 1 file changed, 179 insertions(+), 3 deletions(-) diff --git a/src/arch/sparcv9/CodeGen.zig b/src/arch/sparcv9/CodeGen.zig index c2aa082b11..286b67a145 100644 --- a/src/arch/sparcv9/CodeGen.zig +++ b/src/arch/sparcv9/CodeGen.zig @@ -471,10 +471,181 @@ fn gen(self: *Self) !void { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { - _ = self; - _ = body; + const air_tags = self.air.instructions.items(.tag); - @panic("TODO implement genBody"); + for (body) |inst| { + const old_air_bookkeeping = self.air_bookkeeping; + try self.ensureProcessDeathCapacity(Liveness.bpi); + + switch (air_tags[inst]) { + .arg => @panic("TODO implement arg"), + .add => @panic("TODO implement add"), + .addwrap => @panic("TODO implement addwrap"), + .add_sat => @panic("TODO implement add_sat"), + .sub => @panic("TODO implement sub"), + .subwrap => @panic("TODO implement subwrap"), + .sub_sat => @panic("TODO implement sub_sat"), + .mul => @panic("TODO implement mul"), + .mulwrap => @panic("TODO implement mulwrap"), + .mul_sat => @panic("TODO implement mul_sat"), + .div_float => @panic("TODO implement div_float"), + .div_trunc => @panic("TODO implement div_trunc"), + .div_floor => @panic("TODO implement div_floor"), + .div_exact => @panic("TODO implement div_exact"), + .rem => @panic("TODO implement rem"), + .mod => @panic("TODO implement mod"), + .ptr_add => @panic("TODO implement ptr_add"), + .ptr_sub => @panic("TODO implement ptr_sub"), + .max => @panic("TODO implement max"), + .min => @panic("TODO implement min"), + .add_with_overflow => @panic("TODO implement add_with_overflow"), + .sub_with_overflow => @panic("TODO implement sub_with_overflow"), + .mul_with_overflow => @panic("TODO implement mul_with_overflow"), + .shl_with_overflow => @panic("TODO implement shl_with_overflow"), + .alloc => @panic("TODO implement alloc"), + .ret_ptr => @panic("TODO implement ret_ptr"), + .assembly => @panic("TODO implement assembly"), + .bit_and => @panic("TODO implement bit_and"), + .bit_or => @panic("TODO implement bit_or"), + .shr => @panic("TODO implement shr"), + .shr_exact => @panic("TODO implement shr_exact"), + .shl => @panic("TODO implement shl"), + .shl_exact => @panic("TODO implement shl_exact"), + .shl_sat => @panic("TODO implement shl_sat"), + .xor => @panic("TODO implement xor"), + .not => @panic("TODO implement not"), + .bitcast => @panic("TODO implement bitcast"), + .block => @panic("TODO implement block"), + .loop => @panic("TODO implement loop"), + .br => @panic("TODO implement br"), + .breakpoint => @panic("TODO implement breakpoint"), + .ret_addr => @panic("TODO implement ret_addr"), + .frame_addr => @panic("TODO implement frame_addr"), + .call => @panic("TODO implement call"), + .call_always_tail => @panic("TODO implement call_always_tail"), + .call_never_tail => @panic("TODO implement call_never_tail"), + .call_never_inline => @panic("TODO implement call_never_inline"), + .clz => @panic("TODO implement clz"), + .ctz => @panic("TODO implement ctz"), + .popcount => @panic("TODO implement popcount"), + .byte_swap => @panic("TODO implement byte_swap"), + .bit_reverse => @panic("TODO implement bit_reverse"), + .sqrt => @panic("TODO implement sqrt"), + .sin => @panic("TODO implement sin"), + .cos => @panic("TODO implement cos"), + .exp => @panic("TODO implement exp"), + .exp2 => @panic("TODO implement exp2"), + .log => @panic("TODO implement log"), + .log2 => @panic("TODO implement log2"), + .log10 => @panic("TODO implement log10"), + .fabs => @panic("TODO implement fabs"), + .floor => @panic("TODO implement floor"), + .ceil => @panic("TODO implement ceil"), + .round => @panic("TODO implement round"), + .trunc_float => @panic("TODO implement trunc_float"), + .cmp_lt => @panic("TODO implement cmp_lt"), + .cmp_lte => @panic("TODO implement cmp_lte"), + .cmp_eq => @panic("TODO implement cmp_eq"), + .cmp_gte => @panic("TODO implement cmp_gte"), + .cmp_gt => @panic("TODO implement cmp_gt"), + .cmp_neq => @panic("TODO implement cmp_neq"), + .cmp_vector => @panic("TODO implement cmp_vector"), + .cond_br => @panic("TODO implement cond_br"), + .switch_br => @panic("TODO implement switch_br"), + .constant => @panic("TODO implement constant"), + .const_ty => @panic("TODO implement const_ty"), + .dbg_stmt => @panic("TODO implement dbg_stmt"), + .dbg_block_begin => @panic("TODO implement dbg_block_begin"), + .dbg_block_end => @panic("TODO implement dbg_block_end"), + .dbg_inline_begin => @panic("TODO implement dbg_inline_begin"), + .dbg_inline_end => @panic("TODO implement dbg_inline_end"), + .dbg_var_ptr => @panic("TODO implement dbg_var_ptr"), + .dbg_var_val => @panic("TODO implement dbg_var_val"), + .is_null => @panic("TODO implement is_null"), + .is_non_null => @panic("TODO implement is_non_null"), + .is_null_ptr => @panic("TODO implement is_null_ptr"), + .is_non_null_ptr => @panic("TODO implement is_non_null_ptr"), + .is_err => @panic("TODO implement is_err"), + .is_non_err => @panic("TODO implement is_non_err"), + .is_err_ptr => @panic("TODO implement is_err_ptr"), + .is_non_err_ptr => @panic("TODO implement is_non_err_ptr"), + .bool_and => @panic("TODO implement bool_and"), + .bool_or => @panic("TODO implement bool_or"), + .load => @panic("TODO implement load"), + .ptrtoint => @panic("TODO implement ptrtoint"), + .bool_to_int => @panic("TODO implement bool_to_int"), + .ret => @panic("TODO implement ret"), + .ret_load => @panic("TODO implement ret_load"), + .store => @panic("TODO implement store"), + .unreach => @panic("TODO implement unreach"), + .fptrunc => @panic("TODO implement fptrunc"), + .fpext => @panic("TODO implement fpext"), + .intcast => @panic("TODO implement intcast"), + .trunc => @panic("TODO implement trunc"), + .optional_payload => @panic("TODO implement optional_payload"), + .optional_payload_ptr => @panic("TODO implement optional_payload_ptr"), + .optional_payload_ptr_set => @panic("TODO implement optional_payload_ptr_set"), + .wrap_optional => @panic("TODO implement wrap_optional"), + .unwrap_errunion_payload => @panic("TODO implement unwrap_errunion_payload"), + .unwrap_errunion_err => @panic("TODO implement unwrap_errunion_err"), + .unwrap_errunion_payload_ptr => @panic("TODO implement unwrap_errunion_payload_ptr"), + .unwrap_errunion_err_ptr => @panic("TODO implement unwrap_errunion_err_ptr"), + .errunion_payload_ptr_set => @panic("TODO implement errunion_payload_ptr_set"), + .wrap_errunion_payload => @panic("TODO implement wrap_errunion_payload"), + .wrap_errunion_err => @panic("TODO implement wrap_errunion_err"), + .struct_field_ptr => @panic("TODO implement struct_field_ptr"), + .struct_field_ptr_index_0 => @panic("TODO implement struct_field_ptr_index_0"), + .struct_field_ptr_index_1 => @panic("TODO implement struct_field_ptr_index_1"), + .struct_field_ptr_index_2 => @panic("TODO implement struct_field_ptr_index_2"), + .struct_field_ptr_index_3 => @panic("TODO implement struct_field_ptr_index_3"), + .struct_field_val => @panic("TODO implement struct_field_val"), + .set_union_tag => @panic("TODO implement set_union_tag"), + .get_union_tag => @panic("TODO implement get_union_tag"), + .slice => @panic("TODO implement slice"), + .slice_len => @panic("TODO implement slice_len"), + .slice_ptr => @panic("TODO implement slice_ptr"), + .ptr_slice_len_ptr => @panic("TODO implement ptr_slice_len_ptr"), + .ptr_slice_ptr_ptr => @panic("TODO implement ptr_slice_ptr_ptr"), + .array_elem_val => @panic("TODO implement array_elem_val"), + .slice_elem_val => @panic("TODO implement slice_elem_val"), + .slice_elem_ptr => @panic("TODO implement slice_elem_ptr"), + .ptr_elem_val => @panic("TODO implement ptr_elem_val"), + .ptr_elem_ptr => @panic("TODO implement ptr_elem_ptr"), + .array_to_slice => @panic("TODO implement array_to_slice"), + .float_to_int => @panic("TODO implement float_to_int"), + .int_to_float => @panic("TODO implement int_to_float"), + .reduce => @panic("TODO implement reduce"), + .splat => @panic("TODO implement splat"), + .shuffle => @panic("TODO implement shuffle"), + .select => @panic("TODO implement select"), + .memset => @panic("TODO implement memset"), + .memcpy => @panic("TODO implement memcpy"), + .cmpxchg_weak => @panic("TODO implement cmpxchg_weak"), + .cmpxchg_strong => @panic("TODO implement cmpxchg_strong"), + .fence => @panic("TODO implement fence"), + .atomic_load => @panic("TODO implement atomic_load"), + .atomic_store_unordered => @panic("TODO implement atomic_store_unordered"), + .atomic_store_monotonic => @panic("TODO implement atomic_store_monotonic"), + .atomic_store_release => @panic("TODO implement atomic_store_release"), + .atomic_store_seq_cst => @panic("TODO implement atomic_store_seq_cst"), + .atomic_rmw => @panic("TODO implement atomic_rmw"), + .tag_name => @panic("TODO implement tag_name"), + .error_name => @panic("TODO implement error_name"), + .aggregate_init => @panic("TODO implement aggregate_init"), + .union_init => @panic("TODO implement union_init"), + .prefetch => @panic("TODO implement prefetch"), + .mul_add => @panic("TODO implement mul_add"), + .field_parent_ptr => @panic("TODO implement field_parent_ptr"), + + .wasm_memory_size, .wasm_memory_grow => unreachable, + } + + if (std.debug.runtime_safety) { + if (self.air_bookkeeping < old_air_bookkeeping + 1) { + std.debug.panic("in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. Look for a missing call to finishAir.", .{ inst, air_tags[inst] }); + } + } + } } fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { @@ -487,6 +658,11 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { return result_index; } +fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { + const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table; + try table.ensureUnusedCapacity(self.gpa, additional_count); +} + fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { @setCold(true); assert(self.err_msg == null); From 71cd3466ec129404a9cc6679f25b0dde8623b094 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Fri, 1 Apr 2022 22:23:19 +0700 Subject: [PATCH 09/29] stage2: sparcv9: Adjust RegisterManager settings --- src/arch/sparcv9/CodeGen.zig | 5 ++++- src/arch/sparcv9/abi.zig | 19 ++++++++++++++++--- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/src/arch/sparcv9/CodeGen.zig b/src/arch/sparcv9/CodeGen.zig index 286b67a145..33dc0e1f8f 100644 --- a/src/arch/sparcv9/CodeGen.zig +++ b/src/arch/sparcv9/CodeGen.zig @@ -16,6 +16,8 @@ const Type = @import("../../type.zig").Type; const GenerateSymbolError = @import("../../codegen.zig").GenerateSymbolError; const FnResult = @import("../../codegen.zig").FnResult; const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; +const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager; +const RegisterManager = RegisterManagerFn(Self, Register, &abi.allocatable_regs); const build_options = @import("build_options"); @@ -73,6 +75,8 @@ branch_stack: *std.ArrayList(Branch), // Key is the block instruction blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, +register_manager: RegisterManager = .{}, + /// Maps offset to what is stored there. stack: std.AutoHashMapUnmanaged(u32, StackAllocation) = .{}, @@ -380,7 +384,6 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type, is_caller: bool) !Ca return result; } -/// Caller must call `CallMCValues.deinit`. fn gen(self: *Self) !void { const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { diff --git a/src/arch/sparcv9/abi.zig b/src/arch/sparcv9/abi.zig index ceac33c08c..4cb10a99ea 100644 --- a/src/arch/sparcv9/abi.zig +++ b/src/arch/sparcv9/abi.zig @@ -1,9 +1,22 @@ const bits = @import("bits.zig"); const Register = bits.Register; -// Register windowing mechanism will take care of preserving registers -// so no need to do it manually -pub const callee_preserved_regs = [_]Register{}; +// There are no callee-preserved registers since the windowing +// mechanism already takes care of them. +// We still need to preserve %o0-%o5, %g1, %g4, and %g5 before calling +// something, though, as those are shared with the callee and might be +// thrashed by it. +pub const caller_preserved_regs = [_]Register{ .o0, .o1, .o2, .o3, .o4, .o5, .g1, .g4, .g5 }; + +// Try to allocate i, l, o, then g sets of registers, in order of priority. +pub const allocatable_regs = [_]Register{ + // zig fmt: off + .@"i0", .@"i1", .@"i2", .@"i3", .@"i4", .@"i5", + .l0, .l1, .l2, .l3, .l4, .l5, .l6, .l7, + .o0, .o1, .o2, .o3, .o4, .o5, + .g1, .g4, .g5, + // zig fmt: on +}; pub const c_abi_int_param_regs_caller_view = [_]Register{ .o0, .o1, .o2, .o3, .o4, .o5 }; pub const c_abi_int_param_regs_callee_view = [_]Register{ .@"i0", .@"i1", .@"i2", .@"i3", .@"i4", .@"i5" }; From 5ab6b5a77723d203644a7112cf37f3122c738986 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Fri, 1 Apr 2022 22:32:56 +0700 Subject: [PATCH 10/29] stage2: sparcv9: implement dbgAdvancePCAndLine --- src/arch/sparcv9/Emit.zig | 52 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 48 insertions(+), 4 deletions(-) diff --git a/src/arch/sparcv9/Emit.zig b/src/arch/sparcv9/Emit.zig index 28c172f329..8d870e43f5 100644 --- a/src/arch/sparcv9/Emit.zig +++ b/src/arch/sparcv9/Emit.zig @@ -9,6 +9,7 @@ const ErrorMsg = Module.ErrorMsg; const Liveness = @import("../../Liveness.zig"); const DebugInfoOutput = @import("../../codegen.zig").DebugInfoOutput; const DW = std.dwarf; +const leb128 = std.leb; const Emit = @This(); const Mir = @import("Mir.zig"); @@ -60,11 +61,54 @@ pub fn deinit(emit: *Emit) void { } fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { - _ = self; - _ = line; - _ = column; + const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line); + const delta_pc: usize = self.code.items.len - self.prev_di_pc; + switch (self.debug_output) { + .dwarf => |dbg_out| { + // TODO Look into using the DWARF special opcodes to compress this data. + // It lets you emit single-byte opcodes that add different numbers to + // both the PC and the line number at the same time. + try dbg_out.dbg_line.ensureUnusedCapacity(11); + dbg_out.dbg_line.appendAssumeCapacity(DW.LNS.advance_pc); + leb128.writeULEB128(dbg_out.dbg_line.writer(), delta_pc) catch unreachable; + if (delta_line != 0) { + dbg_out.dbg_line.appendAssumeCapacity(DW.LNS.advance_line); + leb128.writeILEB128(dbg_out.dbg_line.writer(), delta_line) catch unreachable; + } + dbg_out.dbg_line.appendAssumeCapacity(DW.LNS.copy); + self.prev_di_pc = self.code.items.len; + self.prev_di_line = line; + self.prev_di_column = column; + self.prev_di_pc = self.code.items.len; + }, + .plan9 => |dbg_out| { + if (delta_pc <= 0) return; // only do this when the pc changes + // we have already checked the target in the linker to make sure it is compatable + const quant = @import("../../link/Plan9/aout.zig").getPCQuant(self.target.cpu.arch) catch unreachable; - @panic("TODO implement dbgAdvancePCAndLine"); + // increasing the line number + try @import("../../link/Plan9.zig").changeLine(dbg_out.dbg_line, delta_line); + // increasing the pc + const d_pc_p9 = @intCast(i64, delta_pc) - quant; + if (d_pc_p9 > 0) { + // minus one because if its the last one, we want to leave space to change the line which is one quanta + try dbg_out.dbg_line.append(@intCast(u8, @divExact(d_pc_p9, quant) + 128) - quant); + if (dbg_out.pcop_change_index.*) |pci| + dbg_out.dbg_line.items[pci] += 1; + dbg_out.pcop_change_index.* = @intCast(u32, dbg_out.dbg_line.items.len - 1); + } else if (d_pc_p9 == 0) { + // we don't need to do anything, because adding the quant does it for us + } else unreachable; + if (dbg_out.start_line.* == null) + dbg_out.start_line.* = self.prev_di_line; + dbg_out.end_line.* = line; + // only do this if the pc changed + self.prev_di_line = line; + self.prev_di_column = column; + self.prev_di_pc = self.code.items.len; + }, + .none => {}, + } } fn mirDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void { From cec48f2cf1009653ac1097328b75beaf0bf198d2 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Sat, 2 Apr 2022 08:44:56 +0700 Subject: [PATCH 11/29] stage2: sparcv9: Different formatting for genBody --- src/arch/sparcv9/CodeGen.zig | 337 ++++++++++++++++++----------------- 1 file changed, 178 insertions(+), 159 deletions(-) diff --git a/src/arch/sparcv9/CodeGen.zig b/src/arch/sparcv9/CodeGen.zig index 33dc0e1f8f..35d9f5bfa5 100644 --- a/src/arch/sparcv9/CodeGen.zig +++ b/src/arch/sparcv9/CodeGen.zig @@ -481,166 +481,185 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { try self.ensureProcessDeathCapacity(Liveness.bpi); switch (air_tags[inst]) { - .arg => @panic("TODO implement arg"), - .add => @panic("TODO implement add"), - .addwrap => @panic("TODO implement addwrap"), - .add_sat => @panic("TODO implement add_sat"), - .sub => @panic("TODO implement sub"), - .subwrap => @panic("TODO implement subwrap"), - .sub_sat => @panic("TODO implement sub_sat"), - .mul => @panic("TODO implement mul"), - .mulwrap => @panic("TODO implement mulwrap"), - .mul_sat => @panic("TODO implement mul_sat"), - .div_float => @panic("TODO implement div_float"), - .div_trunc => @panic("TODO implement div_trunc"), - .div_floor => @panic("TODO implement div_floor"), - .div_exact => @panic("TODO implement div_exact"), - .rem => @panic("TODO implement rem"), - .mod => @panic("TODO implement mod"), - .ptr_add => @panic("TODO implement ptr_add"), - .ptr_sub => @panic("TODO implement ptr_sub"), - .max => @panic("TODO implement max"), - .min => @panic("TODO implement min"), - .add_with_overflow => @panic("TODO implement add_with_overflow"), - .sub_with_overflow => @panic("TODO implement sub_with_overflow"), - .mul_with_overflow => @panic("TODO implement mul_with_overflow"), - .shl_with_overflow => @panic("TODO implement shl_with_overflow"), - .alloc => @panic("TODO implement alloc"), - .ret_ptr => @panic("TODO implement ret_ptr"), - .assembly => @panic("TODO implement assembly"), - .bit_and => @panic("TODO implement bit_and"), - .bit_or => @panic("TODO implement bit_or"), - .shr => @panic("TODO implement shr"), - .shr_exact => @panic("TODO implement shr_exact"), - .shl => @panic("TODO implement shl"), - .shl_exact => @panic("TODO implement shl_exact"), - .shl_sat => @panic("TODO implement shl_sat"), - .xor => @panic("TODO implement xor"), - .not => @panic("TODO implement not"), - .bitcast => @panic("TODO implement bitcast"), - .block => @panic("TODO implement block"), - .loop => @panic("TODO implement loop"), - .br => @panic("TODO implement br"), - .breakpoint => @panic("TODO implement breakpoint"), - .ret_addr => @panic("TODO implement ret_addr"), - .frame_addr => @panic("TODO implement frame_addr"), - .call => @panic("TODO implement call"), - .call_always_tail => @panic("TODO implement call_always_tail"), - .call_never_tail => @panic("TODO implement call_never_tail"), - .call_never_inline => @panic("TODO implement call_never_inline"), - .clz => @panic("TODO implement clz"), - .ctz => @panic("TODO implement ctz"), - .popcount => @panic("TODO implement popcount"), - .byte_swap => @panic("TODO implement byte_swap"), - .bit_reverse => @panic("TODO implement bit_reverse"), - .sqrt => @panic("TODO implement sqrt"), - .sin => @panic("TODO implement sin"), - .cos => @panic("TODO implement cos"), - .exp => @panic("TODO implement exp"), - .exp2 => @panic("TODO implement exp2"), - .log => @panic("TODO implement log"), - .log2 => @panic("TODO implement log2"), - .log10 => @panic("TODO implement log10"), - .fabs => @panic("TODO implement fabs"), - .floor => @panic("TODO implement floor"), - .ceil => @panic("TODO implement ceil"), - .round => @panic("TODO implement round"), - .trunc_float => @panic("TODO implement trunc_float"), - .cmp_lt => @panic("TODO implement cmp_lt"), - .cmp_lte => @panic("TODO implement cmp_lte"), - .cmp_eq => @panic("TODO implement cmp_eq"), - .cmp_gte => @panic("TODO implement cmp_gte"), - .cmp_gt => @panic("TODO implement cmp_gt"), - .cmp_neq => @panic("TODO implement cmp_neq"), - .cmp_vector => @panic("TODO implement cmp_vector"), - .cond_br => @panic("TODO implement cond_br"), - .switch_br => @panic("TODO implement switch_br"), - .constant => @panic("TODO implement constant"), - .const_ty => @panic("TODO implement const_ty"), - .dbg_stmt => @panic("TODO implement dbg_stmt"), - .dbg_block_begin => @panic("TODO implement dbg_block_begin"), - .dbg_block_end => @panic("TODO implement dbg_block_end"), - .dbg_inline_begin => @panic("TODO implement dbg_inline_begin"), - .dbg_inline_end => @panic("TODO implement dbg_inline_end"), - .dbg_var_ptr => @panic("TODO implement dbg_var_ptr"), - .dbg_var_val => @panic("TODO implement dbg_var_val"), - .is_null => @panic("TODO implement is_null"), - .is_non_null => @panic("TODO implement is_non_null"), - .is_null_ptr => @panic("TODO implement is_null_ptr"), - .is_non_null_ptr => @panic("TODO implement is_non_null_ptr"), - .is_err => @panic("TODO implement is_err"), - .is_non_err => @panic("TODO implement is_non_err"), - .is_err_ptr => @panic("TODO implement is_err_ptr"), - .is_non_err_ptr => @panic("TODO implement is_non_err_ptr"), - .bool_and => @panic("TODO implement bool_and"), - .bool_or => @panic("TODO implement bool_or"), - .load => @panic("TODO implement load"), - .ptrtoint => @panic("TODO implement ptrtoint"), - .bool_to_int => @panic("TODO implement bool_to_int"), - .ret => @panic("TODO implement ret"), - .ret_load => @panic("TODO implement ret_load"), - .store => @panic("TODO implement store"), - .unreach => @panic("TODO implement unreach"), - .fptrunc => @panic("TODO implement fptrunc"), - .fpext => @panic("TODO implement fpext"), - .intcast => @panic("TODO implement intcast"), - .trunc => @panic("TODO implement trunc"), - .optional_payload => @panic("TODO implement optional_payload"), - .optional_payload_ptr => @panic("TODO implement optional_payload_ptr"), - .optional_payload_ptr_set => @panic("TODO implement optional_payload_ptr_set"), - .wrap_optional => @panic("TODO implement wrap_optional"), - .unwrap_errunion_payload => @panic("TODO implement unwrap_errunion_payload"), - .unwrap_errunion_err => @panic("TODO implement unwrap_errunion_err"), - .unwrap_errunion_payload_ptr => @panic("TODO implement unwrap_errunion_payload_ptr"), - .unwrap_errunion_err_ptr => @panic("TODO implement unwrap_errunion_err_ptr"), - .errunion_payload_ptr_set => @panic("TODO implement errunion_payload_ptr_set"), - .wrap_errunion_payload => @panic("TODO implement wrap_errunion_payload"), - .wrap_errunion_err => @panic("TODO implement wrap_errunion_err"), - .struct_field_ptr => @panic("TODO implement struct_field_ptr"), - .struct_field_ptr_index_0 => @panic("TODO implement struct_field_ptr_index_0"), - .struct_field_ptr_index_1 => @panic("TODO implement struct_field_ptr_index_1"), - .struct_field_ptr_index_2 => @panic("TODO implement struct_field_ptr_index_2"), - .struct_field_ptr_index_3 => @panic("TODO implement struct_field_ptr_index_3"), - .struct_field_val => @panic("TODO implement struct_field_val"), - .set_union_tag => @panic("TODO implement set_union_tag"), - .get_union_tag => @panic("TODO implement get_union_tag"), - .slice => @panic("TODO implement slice"), - .slice_len => @panic("TODO implement slice_len"), - .slice_ptr => @panic("TODO implement slice_ptr"), - .ptr_slice_len_ptr => @panic("TODO implement ptr_slice_len_ptr"), - .ptr_slice_ptr_ptr => @panic("TODO implement ptr_slice_ptr_ptr"), - .array_elem_val => @panic("TODO implement array_elem_val"), - .slice_elem_val => @panic("TODO implement slice_elem_val"), - .slice_elem_ptr => @panic("TODO implement slice_elem_ptr"), - .ptr_elem_val => @panic("TODO implement ptr_elem_val"), - .ptr_elem_ptr => @panic("TODO implement ptr_elem_ptr"), - .array_to_slice => @panic("TODO implement array_to_slice"), - .float_to_int => @panic("TODO implement float_to_int"), - .int_to_float => @panic("TODO implement int_to_float"), - .reduce => @panic("TODO implement reduce"), - .splat => @panic("TODO implement splat"), - .shuffle => @panic("TODO implement shuffle"), - .select => @panic("TODO implement select"), - .memset => @panic("TODO implement memset"), - .memcpy => @panic("TODO implement memcpy"), - .cmpxchg_weak => @panic("TODO implement cmpxchg_weak"), - .cmpxchg_strong => @panic("TODO implement cmpxchg_strong"), - .fence => @panic("TODO implement fence"), - .atomic_load => @panic("TODO implement atomic_load"), - .atomic_store_unordered => @panic("TODO implement atomic_store_unordered"), - .atomic_store_monotonic => @panic("TODO implement atomic_store_monotonic"), - .atomic_store_release => @panic("TODO implement atomic_store_release"), - .atomic_store_seq_cst => @panic("TODO implement atomic_store_seq_cst"), - .atomic_rmw => @panic("TODO implement atomic_rmw"), - .tag_name => @panic("TODO implement tag_name"), - .error_name => @panic("TODO implement error_name"), - .aggregate_init => @panic("TODO implement aggregate_init"), - .union_init => @panic("TODO implement union_init"), - .prefetch => @panic("TODO implement prefetch"), - .mul_add => @panic("TODO implement mul_add"), - .field_parent_ptr => @panic("TODO implement field_parent_ptr"), + // zig fmt: off + .add, .ptr_add => @panic("TODO try self.airBinOp(inst)"), + .addwrap => @panic("TODO try self.airAddWrap(inst)"), + .add_sat => @panic("TODO try self.airAddSat(inst)"), + .sub, .ptr_sub => @panic("TODO try self.airBinOp(inst)"), + .subwrap => @panic("TODO try self.airSubWrap(inst)"), + .sub_sat => @panic("TODO try self.airSubSat(inst)"), + .mul => @panic("TODO try self.airMul(inst)"), + .mulwrap => @panic("TODO try self.airMulWrap(inst)"), + .mul_sat => @panic("TODO try self.airMulSat(inst)"), + .rem => @panic("TODO try self.airRem(inst)"), + .mod => @panic("TODO try self.airMod(inst)"), + .shl, .shl_exact => @panic("TODO try self.airShl(inst)"), + .shl_sat => @panic("TODO try self.airShlSat(inst)"), + .min => @panic("TODO try self.airMin(inst)"), + .max => @panic("TODO try self.airMax(inst)"), + .slice => @panic("TODO try self.airSlice(inst)"), - .wasm_memory_size, .wasm_memory_grow => unreachable, + .sqrt, + .sin, + .cos, + .exp, + .exp2, + .log, + .log2, + .log10, + .fabs, + .floor, + .ceil, + .round, + .trunc_float, + => @panic("TODO try self.airUnaryMath(inst)"), + + .add_with_overflow => @panic("TODO try self.airAddWithOverflow(inst)"), + .sub_with_overflow => @panic("TODO try self.airSubWithOverflow(inst)"), + .mul_with_overflow => @panic("TODO try self.airMulWithOverflow(inst)"), + .shl_with_overflow => @panic("TODO try self.airShlWithOverflow(inst)"), + + .div_float, .div_trunc, .div_floor, .div_exact => @panic("TODO try self.airDiv(inst)"), + + .cmp_lt => @panic("TODO try self.airCmp(inst, .lt)"), + .cmp_lte => @panic("TODO try self.airCmp(inst, .lte)"), + .cmp_eq => @panic("TODO try self.airCmp(inst, .eq)"), + .cmp_gte => @panic("TODO try self.airCmp(inst, .gte)"), + .cmp_gt => @panic("TODO try self.airCmp(inst, .gt)"), + .cmp_neq => @panic("TODO try self.airCmp(inst, .neq)"), + .cmp_vector => @panic("TODO try self.airCmpVector(inst)"), + + .bool_and => @panic("TODO try self.airBoolOp(inst)"), + .bool_or => @panic("TODO try self.airBoolOp(inst)"), + .bit_and => @panic("TODO try self.airBitAnd(inst)"), + .bit_or => @panic("TODO try self.airBitOr(inst)"), + .xor => @panic("TODO try self.airXor(inst)"), + .shr, .shr_exact => @panic("TODO try self.airShr(inst)"), + + .alloc => @panic("TODO try self.airAlloc(inst)"), + .ret_ptr => @panic("TODO try self.airRetPtr(inst)"), + .arg => @panic("TODO try self.airArg(inst)"), + .assembly => @panic("TODO try self.airAsm(inst)"), + .bitcast => @panic("TODO try self.airBitCast(inst)"), + .block => @panic("TODO try self.airBlock(inst)"), + .br => @panic("TODO try self.airBr(inst)"), + .breakpoint => @panic("TODO try self.airBreakpoint()"), + .ret_addr => @panic("TODO try self.airRetAddr(inst)"), + .frame_addr => @panic("TODO try self.airFrameAddress(inst)"), + .fence => @panic("TODO try self.airFence()"), + .cond_br => @panic("TODO try self.airCondBr(inst)"), + .dbg_stmt => @panic("TODO try self.airDbgStmt(inst)"), + .fptrunc => @panic("TODO try self.airFptrunc(inst)"), + .fpext => @panic("TODO try self.airFpext(inst)"), + .intcast => @panic("TODO try self.airIntCast(inst)"), + .trunc => @panic("TODO try self.airTrunc(inst)"), + .bool_to_int => @panic("TODO try self.airBoolToInt(inst)"), + .is_non_null => @panic("TODO try self.airIsNonNull(inst)"), + .is_non_null_ptr => @panic("TODO try self.airIsNonNullPtr(inst)"), + .is_null => @panic("TODO try self.airIsNull(inst)"), + .is_null_ptr => @panic("TODO try self.airIsNullPtr(inst)"), + .is_non_err => @panic("TODO try self.airIsNonErr(inst)"), + .is_non_err_ptr => @panic("TODO try self.airIsNonErrPtr(inst)"), + .is_err => @panic("TODO try self.airIsErr(inst)"), + .is_err_ptr => @panic("TODO try self.airIsErrPtr(inst)"), + .load => @panic("TODO try self.airLoad(inst)"), + .loop => @panic("TODO try self.airLoop(inst)"), + .not => @panic("TODO try self.airNot(inst)"), + .ptrtoint => @panic("TODO try self.airPtrToInt(inst)"), + .ret => @panic("TODO try self.airRet(inst)"), + .ret_load => @panic("TODO try self.airRetLoad(inst)"), + .store => @panic("TODO try self.airStore(inst)"), + .struct_field_ptr=> @panic("TODO try self.airStructFieldPtr(inst)"), + .struct_field_val=> @panic("TODO try self.airStructFieldVal(inst)"), + .array_to_slice => @panic("TODO try self.airArrayToSlice(inst)"), + .int_to_float => @panic("TODO try self.airIntToFloat(inst)"), + .float_to_int => @panic("TODO try self.airFloatToInt(inst)"), + .cmpxchg_strong => @panic("TODO try self.airCmpxchg(inst)"), + .cmpxchg_weak => @panic("TODO try self.airCmpxchg(inst)"), + .atomic_rmw => @panic("TODO try self.airAtomicRmw(inst)"), + .atomic_load => @panic("TODO try self.airAtomicLoad(inst)"), + .memcpy => @panic("TODO try self.airMemcpy(inst)"), + .memset => @panic("TODO try self.airMemset(inst)"), + .set_union_tag => @panic("TODO try self.airSetUnionTag(inst)"), + .get_union_tag => @panic("TODO try self.airGetUnionTag(inst)"), + .clz => @panic("TODO try self.airClz(inst)"), + .ctz => @panic("TODO try self.airCtz(inst)"), + .popcount => @panic("TODO try self.airPopcount(inst)"), + .byte_swap => @panic("TODO try self.airByteSwap(inst)"), + .bit_reverse => @panic("TODO try self.airBitReverse(inst)"), + .tag_name => @panic("TODO try self.airTagName(inst)"), + .error_name => @panic("TODO try self.airErrorName(inst)"), + .splat => @panic("TODO try self.airSplat(inst)"), + .select => @panic("TODO try self.airSelect(inst)"), + .shuffle => @panic("TODO try self.airShuffle(inst)"), + .reduce => @panic("TODO try self.airReduce(inst)"), + .aggregate_init => @panic("TODO try self.airAggregateInit(inst)"), + .union_init => @panic("TODO try self.airUnionInit(inst)"), + .prefetch => @panic("TODO try self.airPrefetch(inst)"), + .mul_add => @panic("TODO try self.airMulAdd(inst)"), + + .dbg_var_ptr, + .dbg_var_val, + => @panic("TODO try self.airDbgVar(inst)"), + + .dbg_inline_begin, + .dbg_inline_end, + => @panic("TODO try self.airDbgInline(inst)"), + + .dbg_block_begin, + .dbg_block_end, + => @panic("TODO try self.airDbgBlock(inst)"), + + .call => @panic("TODO try self.airCall(inst, .auto)"), + .call_always_tail => @panic("TODO try self.airCall(inst, .always_tail)"), + .call_never_tail => @panic("TODO try self.airCall(inst, .never_tail)"), + .call_never_inline => @panic("TODO try self.airCall(inst, .never_inline)"), + + .atomic_store_unordered => @panic("TODO try self.airAtomicStore(inst, .Unordered)"), + .atomic_store_monotonic => @panic("TODO try self.airAtomicStore(inst, .Monotonic)"), + .atomic_store_release => @panic("TODO try self.airAtomicStore(inst, .Release)"), + .atomic_store_seq_cst => @panic("TODO try self.airAtomicStore(inst, .SeqCst)"), + + .struct_field_ptr_index_0 => @panic("TODO try self.airStructFieldPtrIndex(inst, 0)"), + .struct_field_ptr_index_1 => @panic("TODO try self.airStructFieldPtrIndex(inst, 1)"), + .struct_field_ptr_index_2 => @panic("TODO try self.airStructFieldPtrIndex(inst, 2)"), + .struct_field_ptr_index_3 => @panic("TODO try self.airStructFieldPtrIndex(inst, 3)"), + + .field_parent_ptr => @panic("TODO try self.airFieldParentPtr(inst)"), + + .switch_br => @panic("TODO try self.airSwitch(inst)"), + .slice_ptr => @panic("TODO try self.airSlicePtr(inst)"), + .slice_len => @panic("TODO try self.airSliceLen(inst)"), + + .ptr_slice_len_ptr => @panic("TODO try self.airPtrSliceLenPtr(inst)"), + .ptr_slice_ptr_ptr => @panic("TODO try self.airPtrSlicePtrPtr(inst)"), + + .array_elem_val => @panic("TODO try self.airArrayElemVal(inst)"), + .slice_elem_val => @panic("TODO try self.airSliceElemVal(inst)"), + .slice_elem_ptr => @panic("TODO try self.airSliceElemPtr(inst)"), + .ptr_elem_val => @panic("TODO try self.airPtrElemVal(inst)"), + .ptr_elem_ptr => @panic("TODO try self.airPtrElemPtr(inst)"), + + .constant => unreachable, // excluded from function bodies + .const_ty => unreachable, // excluded from function bodies + .unreach => @panic("TODO self.finishAirBookkeeping()"), + + .optional_payload => @panic("TODO try self.airOptionalPayload(inst)"), + .optional_payload_ptr => @panic("TODO try self.airOptionalPayloadPtr(inst)"), + .optional_payload_ptr_set => @panic("TODO try self.airOptionalPayloadPtrSet(inst)"), + .unwrap_errunion_err => @panic("TODO try self.airUnwrapErrErr(inst)"), + .unwrap_errunion_payload => @panic("TODO try self.airUnwrapErrPayload(inst)"), + .unwrap_errunion_err_ptr => @panic("TODO try self.airUnwrapErrErrPtr(inst)"), + .unwrap_errunion_payload_ptr=> @panic("TODO try self.airUnwrapErrPayloadPtr(inst)"), + .errunion_payload_ptr_set => @panic("TODO try self.airErrUnionPayloadPtrSet(inst)"), + + .wrap_optional => @panic("TODO try self.airWrapOptional(inst)"), + .wrap_errunion_payload => @panic("TODO try self.airWrapErrUnionPayload(inst)"), + .wrap_errunion_err => @panic("TODO try self.airWrapErrUnionErr(inst)"), + + .wasm_memory_size => unreachable, + .wasm_memory_grow => unreachable, + // zig fmt: on } if (std.debug.runtime_safety) { From 1972a2b08063841bdd6dd411b4fb0c1b16225067 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Sat, 2 Apr 2022 18:45:31 +0700 Subject: [PATCH 12/29] stage2: sparcv9: Add placeholders to generate a minimal program --- src/arch/sparcv9/CodeGen.zig | 740 +++++++++++++++++++++++++++++------ src/arch/sparcv9/Emit.zig | 26 +- src/arch/sparcv9/Mir.zig | 86 +++- 3 files changed, 712 insertions(+), 140 deletions(-) diff --git a/src/arch/sparcv9/CodeGen.zig b/src/arch/sparcv9/CodeGen.zig index 35d9f5bfa5..40d6db176f 100644 --- a/src/arch/sparcv9/CodeGen.zig +++ b/src/arch/sparcv9/CodeGen.zig @@ -2,11 +2,14 @@ //! This lowers AIR into MIR. const std = @import("std"); const assert = std.debug.assert; +const log = std.log.scoped(.codegen); +const math = std.math; const mem = std.mem; const Allocator = mem.Allocator; const builtin = @import("builtin"); const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); +const TypedValue = @import("../../TypedValue.zig"); const ErrorMsg = Module.ErrorMsg; const Air = @import("../../Air.zig"); const Mir = @import("Mir.zig"); @@ -33,6 +36,11 @@ const InnerError = error{ OutOfRegisters, }; +const RegisterView = enum(u1) { + caller, + callee, +}; + gpa: Allocator, air: Air, liveness: Liveness, @@ -165,7 +173,7 @@ const StackAllocation = struct { }; const BlockData = struct { - relocs: std.ArrayListUnmanaged(Reloc), + relocs: std.ArrayListUnmanaged(Mir.Inst.Index), /// The first break instruction encounters `null` here and chooses a /// machine code value for the block result, populating this field. /// Following break instructions encounter that value and use it for @@ -173,18 +181,6 @@ const BlockData = struct { mcv: MCValue, }; -const Reloc = union(enum) { - /// The value is an offset into the `Function` `code` from the beginning. - /// To perform the reloc, write 32-bit signed little-endian integer - /// which is a relative jump, based on the address following the reloc. - rel32: usize, - /// A branch in the ARM instruction set - arm_branch: struct { - pos: usize, - cond: @import("../arm/bits.zig").Condition, - }, -}; - const CallMCValues = struct { args: []MCValue, return_value: MCValue, @@ -245,7 +241,7 @@ pub fn generate( defer function.blocks.deinit(bin_file.allocator); defer function.exitlude_jump_relocs.deinit(bin_file.allocator); - var call_info = function.resolveCallingConventionValues(fn_type, false) catch |err| switch (err) { + var call_info = function.resolveCallingConventionValues(fn_type, .callee) catch |err| switch (err) { error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, error.OutOfRegisters => return FnResult{ .fail = try ErrorMsg.create(bin_file.allocator, src_loc, "CodeGen ran out of registers. This is a bug in the Zig compiler.", .{}), @@ -298,92 +294,6 @@ pub fn generate( } } -/// Caller must call `CallMCValues.deinit`. -fn resolveCallingConventionValues(self: *Self, fn_ty: Type, is_caller: bool) !CallMCValues { - const cc = fn_ty.fnCallingConvention(); - const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); - defer self.gpa.free(param_types); - fn_ty.fnParamTypes(param_types); - var result: CallMCValues = .{ - .args = try self.gpa.alloc(MCValue, param_types.len), - // These undefined values must be populated before returning from this function. - .return_value = undefined, - .stack_byte_count = undefined, - .stack_align = undefined, - }; - errdefer self.gpa.free(result.args); - - const ret_ty = fn_ty.fnReturnType(); - - switch (cc) { - .Naked => { - assert(result.args.len == 0); - result.return_value = .{ .unreach = {} }; - result.stack_byte_count = 0; - result.stack_align = 1; - return result; - }, - .Unspecified, .C => { - // SPARC Compliance Definition 2.4.1, Chapter 3 - // Low-Level System Information (64-bit psABI) - Function Calling Sequence - - var next_register: usize = 0; - var next_stack_offset: u32 = 0; - - // The caller puts the argument in %o0-%o5, which becomes %i0-%i5 inside the callee. - const argument_registers = if (is_caller) abi.c_abi_int_param_regs_caller_view else abi.c_abi_int_param_regs_callee_view; - - for (param_types) |ty, i| { - const param_size = @intCast(u32, ty.abiSize(self.target.*)); - if (param_size <= 8) { - if (next_register < argument_registers.len) { - result.args[i] = .{ .register = argument_registers[next_register] }; - next_register += 1; - } else { - result.args[i] = .{ .stack_offset = next_stack_offset }; - next_register += next_stack_offset; - } - } else if (param_size <= 16) { - if (next_register < argument_registers.len - 1) { - return self.fail("TODO MCValues with 2 registers", .{}); - } else if (next_register < argument_registers.len) { - return self.fail("TODO MCValues split register + stack", .{}); - } else { - result.args[i] = .{ .stack_offset = next_stack_offset }; - next_register += next_stack_offset; - } - } else { - result.args[i] = .{ .stack_offset = next_stack_offset }; - next_register += next_stack_offset; - } - } - - result.stack_byte_count = next_stack_offset; - result.stack_align = 16; - }, - else => return self.fail("TODO implement function parameters for {} on sparcv9", .{cc}), - } - - if (ret_ty.zigTypeTag() == .NoReturn) { - result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasRuntimeBits()) { - result.return_value = .{ .none = {} }; - } else switch (cc) { - .Naked => unreachable, - .Unspecified, .C => { - const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); - // The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller. - if (ret_ty_size <= 8) { - result.return_value = if (is_caller) .{ .register = abi.c_abi_int_return_regs_caller_view[0] } else .{ .register = abi.c_abi_int_return_regs_callee_view[0] }; - } else { - return self.fail("TODO support more return values for sparcv9", .{}); - } - }, - else => return self.fail("TODO implement function return values for {} on sparcv9", .{cc}), - } - return result; -} - fn gen(self: *Self) !void { const cc = self.fn_type.fnCallingConvention(); if (cc != .Naked) { @@ -519,7 +429,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .mul_with_overflow => @panic("TODO try self.airMulWithOverflow(inst)"), .shl_with_overflow => @panic("TODO try self.airShlWithOverflow(inst)"), - .div_float, .div_trunc, .div_floor, .div_exact => @panic("TODO try self.airDiv(inst)"), + .div_float, .div_trunc, .div_floor, .div_exact => try self.airDiv(inst), .cmp_lt => @panic("TODO try self.airCmp(inst, .lt)"), .cmp_lte => @panic("TODO try self.airCmp(inst, .lte)"), @@ -537,18 +447,18 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .shr, .shr_exact => @panic("TODO try self.airShr(inst)"), .alloc => @panic("TODO try self.airAlloc(inst)"), - .ret_ptr => @panic("TODO try self.airRetPtr(inst)"), - .arg => @panic("TODO try self.airArg(inst)"), - .assembly => @panic("TODO try self.airAsm(inst)"), + .ret_ptr => try self.airRetPtr(inst), + .arg => try self.airArg(inst), + .assembly => try self.airAsm(inst), .bitcast => @panic("TODO try self.airBitCast(inst)"), - .block => @panic("TODO try self.airBlock(inst)"), + .block => try self.airBlock(inst), .br => @panic("TODO try self.airBr(inst)"), .breakpoint => @panic("TODO try self.airBreakpoint()"), .ret_addr => @panic("TODO try self.airRetAddr(inst)"), .frame_addr => @panic("TODO try self.airFrameAddress(inst)"), .fence => @panic("TODO try self.airFence()"), .cond_br => @panic("TODO try self.airCondBr(inst)"), - .dbg_stmt => @panic("TODO try self.airDbgStmt(inst)"), + .dbg_stmt => try self.airDbgStmt(inst), .fptrunc => @panic("TODO try self.airFptrunc(inst)"), .fpext => @panic("TODO try self.airFpext(inst)"), .intcast => @panic("TODO try self.airIntCast(inst)"), @@ -567,8 +477,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .not => @panic("TODO try self.airNot(inst)"), .ptrtoint => @panic("TODO try self.airPtrToInt(inst)"), .ret => @panic("TODO try self.airRet(inst)"), - .ret_load => @panic("TODO try self.airRetLoad(inst)"), - .store => @panic("TODO try self.airStore(inst)"), + .ret_load => try self.airRetLoad(inst), + .store => try self.airStore(inst), .struct_field_ptr=> @panic("TODO try self.airStructFieldPtr(inst)"), .struct_field_val=> @panic("TODO try self.airStructFieldVal(inst)"), .array_to_slice => @panic("TODO try self.airArrayToSlice(inst)"), @@ -600,20 +510,20 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .dbg_var_ptr, .dbg_var_val, - => @panic("TODO try self.airDbgVar(inst)"), + => try self.airDbgVar(inst), .dbg_inline_begin, .dbg_inline_end, - => @panic("TODO try self.airDbgInline(inst)"), + => try self.airDbgInline(inst), .dbg_block_begin, .dbg_block_end, - => @panic("TODO try self.airDbgBlock(inst)"), + => try self.airDbgBlock(inst), - .call => @panic("TODO try self.airCall(inst, .auto)"), + .call => try self.airCall(inst, .auto), .call_always_tail => @panic("TODO try self.airCall(inst, .always_tail)"), .call_never_tail => @panic("TODO try self.airCall(inst, .never_tail)"), - .call_never_inline => @panic("TODO try self.airCall(inst, .never_inline)"), + .call_never_inline => try self.airCall(inst, .never_inline), .atomic_store_unordered => @panic("TODO try self.airAtomicStore(inst, .Unordered)"), .atomic_store_monotonic => @panic("TODO try self.airAtomicStore(inst, .Monotonic)"), @@ -627,7 +537,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .field_parent_ptr => @panic("TODO try self.airFieldParentPtr(inst)"), - .switch_br => @panic("TODO try self.airSwitch(inst)"), + .switch_br => try self.airSwitch(inst), .slice_ptr => @panic("TODO try self.airSlicePtr(inst)"), .slice_len => @panic("TODO try self.airSliceLen(inst)"), @@ -642,7 +552,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .constant => unreachable, // excluded from function bodies .const_ty => unreachable, // excluded from function bodies - .unreach => @panic("TODO self.finishAirBookkeeping()"), + .unreach => self.finishAirBookkeeping(), .optional_payload => @panic("TODO try self.airOptionalPayload(inst)"), .optional_payload_ptr => @panic("TODO try self.airOptionalPayloadPtr(inst)"), @@ -670,6 +580,212 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { } } +fn airAsm(self: *Self, inst: Air.Inst.Index) !void { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.Asm, ty_pl.payload); + const is_volatile = (extra.data.flags & 0x80000000) != 0; + const clobbers_len = @truncate(u31, extra.data.flags); + var extra_i: usize = extra.end; + const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); + extra_i += outputs.len; + const inputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); + extra_i += inputs.len; + + const dead = !is_volatile and self.liveness.isUnused(inst); + _ = dead; + _ = clobbers_len; + + return self.fail("TODO implement asm for {}", .{self.target.cpu.arch}); +} + +fn airArg(self: *Self, inst: Air.Inst.Index) !void { + const arg_index = self.arg_index; + self.arg_index += 1; + + const ty = self.air.typeOfIndex(inst); + _ = ty; + + const result = self.args[arg_index]; + // TODO support stack-only arguments + // TODO Copy registers to the stack + const mcv = result; + + _ = try self.addInst(.{ + .tag = .dbg_arg, + .data = .{ + .dbg_arg_info = .{ + .air_inst = inst, + .arg_index = arg_index, + }, + }, + }); + + if (self.liveness.isUnused(inst)) + return self.finishAirBookkeeping(); + + switch (mcv) { + .register => |reg| { + self.register_manager.getRegAssumeFree(reg, inst); + }, + else => {}, + } + + return self.finishAir(inst, mcv, .{ .none, .none, .none }); +} + +fn airBlock(self: *Self, inst: Air.Inst.Index) !void { + try self.blocks.putNoClobber(self.gpa, inst, .{ + // A block is a setup to be able to jump to the end. + .relocs = .{}, + // It also acts as a receptacle for break operands. + // Here we use `MCValue.none` to represent a null value so that the first + // break instruction will choose a MCValue for the block result and overwrite + // this field. Following break instructions will use that MCValue to put their + // block results. + .mcv = MCValue{ .none = {} }, + }); + defer self.blocks.getPtr(inst).?.relocs.deinit(self.gpa); + + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.Block, ty_pl.payload); + const body = self.air.extra[extra.end..][0..extra.data.body_len]; + try self.genBody(body); + + // relocations for `bpcc` instructions + const relocs = &self.blocks.getPtr(inst).?.relocs; + if (relocs.items.len > 0 and relocs.items[relocs.items.len - 1] == self.mir_instructions.len - 1) { + // If the last Mir instruction is the last relocation (which + // would just jump one instruction further), it can be safely + // removed + self.mir_instructions.orderedRemove(relocs.pop()); + } + for (relocs.items) |reloc| { + try self.performReloc(reloc); + } + + const result = self.blocks.getPtr(inst).?.mcv; + return self.finishAir(inst, result, .{ .none, .none, .none }); +} + +fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.Modifier) !void { + if (modifier == .always_tail) return self.fail("TODO implement tail calls for {}", .{self.target.cpu.arch}); + + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const callee = pl_op.operand; + const extra = self.air.extraData(Air.Call, pl_op.payload); + const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end .. extra.end + extra.data.args_len]); + const ty = self.air.typeOf(callee); + const fn_ty = switch (ty.zigTypeTag()) { + .Fn => ty, + .Pointer => ty.childType(), + else => unreachable, + }; + + var info = try self.resolveCallingConventionValues(fn_ty, .caller); + defer info.deinit(self); + for (info.args) |mc_arg, arg_i| { + const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); + const arg_mcv = try self.resolveInst(arg); + + switch (mc_arg) { + .none => continue, + .undef => unreachable, + .immediate => unreachable, + .unreach => unreachable, + .dead => unreachable, + .memory => unreachable, + .compare_flags_signed => unreachable, + .compare_flags_unsigned => unreachable, + .got_load => unreachable, + .direct_load => unreachable, + .register => |reg| { + try self.register_manager.getReg(reg, null); + try self.genSetReg(arg_ty, reg, arg_mcv); + }, + .stack_offset => { + return self.fail("TODO implement calling with parameters in memory", .{}); + }, + .ptr_stack_offset => { + return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); + }, + } + } + + return self.fail("TODO implement call for {}", .{self.target.cpu.arch}); +} + +fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { + // TODO emit debug info lexical block + return self.finishAir(inst, .dead, .{ .none, .none, .none }); +} + +fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const function = self.air.values[ty_pl.payload].castTag(.function).?.data; + // TODO emit debug info for function change + _ = function; + return self.finishAir(inst, .dead, .{ .none, .none, .none }); +} + +fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { + const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; + + _ = try self.addInst(.{ + .tag = .dbg_line, + .data = .{ + .dbg_line_column = .{ + .line = dbg_stmt.line, + .column = dbg_stmt.column, + }, + }, + }); + + return self.finishAirBookkeeping(); +} + +fn airDbgVar(self: *Self, inst: Air.Inst.Index) !void { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const name = self.air.nullTerminatedString(pl_op.payload); + const operand = pl_op.operand; + // TODO emit debug info for this variable + _ = name; + return self.finishAir(inst, .dead, .{ operand, .none, .none }); +} + +fn airDiv(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement div for {}", .{self.target.cpu.arch}); + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); +} + +fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { + _ = inst; + return self.fail("TODO implement airRetLoad for {}", .{self.target.cpu.arch}); + //return self.finishAir(inst, .dead, .{ un_op, .none, .none }); +} + +fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void { + const stack_offset = try self.allocMemPtr(inst); + return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none }); +} + +fn airStore(self: *Self, inst: Air.Inst.Index) !void { + _ = self; + _ = inst; + + return self.fail("TODO implement store for {}", .{self.target.cpu.arch}); +} + +fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { + _ = self; + _ = inst; + + return self.fail("TODO implement switch for {}", .{self.target.cpu.arch}); +} + +// Common helper functions + fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { const gpa = self.gpa; @@ -680,6 +796,42 @@ fn addInst(self: *Self, inst: Mir.Inst) error{OutOfMemory}!Mir.Inst.Index { return result_index; } +fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 { + if (abi_align > self.stack_align) + self.stack_align = abi_align; + // TODO find a free slot instead of always appending + const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align); + self.next_stack_offset = offset + abi_size; + if (self.next_stack_offset > self.max_end_stack) + self.max_end_stack = self.next_stack_offset; + try self.stack.putNoClobber(self.gpa, offset, .{ + .inst = inst, + .size = abi_size, + }); + return offset; +} + +/// Use a pointer instruction as the basis for allocating stack memory. +fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { + const elem_ty = self.air.typeOfIndex(inst).elemType(); + + if (!elem_ty.hasRuntimeBits()) { + // As this stack item will never be dereferenced at runtime, + // return the stack offset 0. Stack offset 0 will be where all + // zero-sized stack allocations live as non-zero-sized + // allocations will always have an offset > 0. + return @as(u32, 0); + } + + const target = self.target.*; + const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)}); + }; + // TODO swap this for inst.ty.ptrAlign + const abi_align = elem_ty.abiAlignment(self.target.*); + return self.allocMem(inst, abi_size, abi_align); +} + fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table; try table.ensureUnusedCapacity(self.gpa, additional_count); @@ -691,3 +843,361 @@ fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { self.err_msg = try ErrorMsg.create(self.bin_file.allocator, self.src_loc, format, args); return error.CodegenFail; } + +/// Called when there are no operands, and the instruction is always unreferenced. +fn finishAirBookkeeping(self: *Self) void { + if (std.debug.runtime_safety) { + self.air_bookkeeping += 1; + } +} + +fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { + var tomb_bits = self.liveness.getTombBits(inst); + for (operands) |op| { + const dies = @truncate(u1, tomb_bits) != 0; + tomb_bits >>= 1; + if (!dies) continue; + const op_int = @enumToInt(op); + if (op_int < Air.Inst.Ref.typed_value_map.len) continue; + const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + self.processDeath(op_index); + } + const is_used = @truncate(u1, tomb_bits) == 0; + if (is_used) { + log.debug("%{d} => {}", .{ inst, result }); + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + branch.inst_table.putAssumeCapacityNoClobber(inst, result); + + switch (result) { + .register => |reg| { + // In some cases (such as bitcast), an operand + // may be the same MCValue as the result. If + // that operand died and was a register, it + // was freed by processDeath. We have to + // "re-allocate" the register. + if (self.register_manager.isRegFree(reg)) { + self.register_manager.getRegAssumeFree(reg, inst); + } + }, + else => {}, + } + } + self.finishAirBookkeeping(); +} + +fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { + if (typed_value.val.isUndef()) + return MCValue{ .undef = {} }; + + if (typed_value.val.castTag(.decl_ref)) |payload| { + return self.lowerDeclRef(typed_value, payload.data); + } + if (typed_value.val.castTag(.decl_ref_mut)) |payload| { + return self.lowerDeclRef(typed_value, payload.data.decl); + } + const target = self.target.*; + + switch (typed_value.ty.zigTypeTag()) { + .Pointer => switch (typed_value.ty.ptrSize()) { + .Slice => { + return self.lowerUnnamedConst(typed_value); + }, + else => { + switch (typed_value.val.tag()) { + .int_u64 => { + return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) }; + }, + .slice => { + return self.lowerUnnamedConst(typed_value); + }, + else => { + return self.fail("TODO codegen more kinds of const pointers: {}", .{typed_value.val.tag()}); + }, + } + }, + }, + .Int => { + const info = typed_value.ty.intInfo(self.target.*); + if (info.bits <= 64) { + const unsigned = switch (info.signedness) { + .signed => blk: { + const signed = typed_value.val.toSignedInt(); + break :blk @bitCast(u64, signed); + }, + .unsigned => typed_value.val.toUnsignedInt(target), + }; + + return MCValue{ .immediate = unsigned }; + } else { + return self.lowerUnnamedConst(typed_value); + } + }, + .Bool => { + return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) }; + }, + .ComptimeInt => unreachable, // semantic analysis prevents this + .ComptimeFloat => unreachable, // semantic analysis prevents this + .Optional => { + if (typed_value.ty.isPtrLikeOptional()) { + if (typed_value.val.isNull()) + return MCValue{ .immediate = 0 }; + + var buf: Type.Payload.ElemType = undefined; + return self.genTypedValue(.{ + .ty = typed_value.ty.optionalChild(&buf), + .val = typed_value.val, + }); + } else if (typed_value.ty.abiSize(self.target.*) == 1) { + return MCValue{ .immediate = @boolToInt(typed_value.val.isNull()) }; + } + return self.fail("TODO non pointer optionals", .{}); + }, + .Enum => { + if (typed_value.val.castTag(.enum_field_index)) |field_index| { + switch (typed_value.ty.tag()) { + .enum_simple => { + return MCValue{ .immediate = field_index.data }; + }, + .enum_full, .enum_nonexhaustive => { + const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data; + if (enum_full.values.count() != 0) { + const tag_val = enum_full.values.keys()[field_index.data]; + return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val }); + } else { + return MCValue{ .immediate = field_index.data }; + } + }, + else => unreachable, + } + } else { + var int_tag_buffer: Type.Payload.Bits = undefined; + const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer); + return self.genTypedValue(.{ .ty = int_tag_ty, .val = typed_value.val }); + } + }, + .ErrorSet => { + const err_name = typed_value.val.castTag(.@"error").?.data.name; + const module = self.bin_file.options.module.?; + const global_error_set = module.global_error_set; + const error_index = global_error_set.get(err_name).?; + return MCValue{ .immediate = error_index }; + }, + .ErrorUnion => { + const error_type = typed_value.ty.errorUnionSet(); + const payload_type = typed_value.ty.errorUnionPayload(); + + if (typed_value.val.castTag(.eu_payload)) |pl| { + if (!payload_type.hasRuntimeBits()) { + // We use the error type directly as the type. + return MCValue{ .immediate = 0 }; + } + + _ = pl; + return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty.fmtDebug()}); + } else { + if (!payload_type.hasRuntimeBits()) { + // We use the error type directly as the type. + return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val }); + } + + return self.fail("TODO implement error union const of type '{}' (error)", .{typed_value.ty.fmtDebug()}); + } + }, + .Struct => { + return self.lowerUnnamedConst(typed_value); + }, + else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty.fmtDebug()}), + } +} + +fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { + // Treat each stack item as a "layer" on top of the previous one. + var i: usize = self.branch_stack.items.len; + while (true) { + i -= 1; + if (self.branch_stack.items[i].inst_table.get(inst)) |mcv| { + assert(mcv != .dead); + return mcv; + } + } +} + +fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { + const tag = self.mir_instructions.items(.tag)[inst]; + switch (tag) { + .bpcc => self.mir_instructions.items(.data)[inst].branch_predict.inst = @intCast(Mir.Inst.Index, self.mir_instructions.len), + else => unreachable, + } +} + +/// Asserts there is already capacity to insert into top branch inst_table. +fn processDeath(self: *Self, inst: Air.Inst.Index) void { + const air_tags = self.air.instructions.items(.tag); + if (air_tags[inst] == .constant) return; // Constants are immortal. + // When editing this function, note that the logic must synchronize with `reuseOperand`. + const prev_value = self.getResolvedInstValue(inst); + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + branch.inst_table.putAssumeCapacity(inst, .dead); + switch (prev_value) { + .register => |reg| { + self.register_manager.freeReg(reg); + }, + else => {}, // TODO process stack allocation death + } +} + +/// Caller must call `CallMCValues.deinit`. +fn resolveCallingConventionValues(self: *Self, fn_ty: Type, role: RegisterView) !CallMCValues { + const cc = fn_ty.fnCallingConvention(); + const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); + defer self.gpa.free(param_types); + fn_ty.fnParamTypes(param_types); + var result: CallMCValues = .{ + .args = try self.gpa.alloc(MCValue, param_types.len), + // These undefined values must be populated before returning from this function. + .return_value = undefined, + .stack_byte_count = undefined, + .stack_align = undefined, + }; + errdefer self.gpa.free(result.args); + + const ret_ty = fn_ty.fnReturnType(); + + switch (cc) { + .Naked => { + assert(result.args.len == 0); + result.return_value = .{ .unreach = {} }; + result.stack_byte_count = 0; + result.stack_align = 1; + return result; + }, + .Unspecified, .C => { + // SPARC Compliance Definition 2.4.1, Chapter 3 + // Low-Level System Information (64-bit psABI) - Function Calling Sequence + + var next_register: usize = 0; + var next_stack_offset: u32 = 0; + + // The caller puts the argument in %o0-%o5, which becomes %i0-%i5 inside the callee. + const argument_registers = switch (role) { + .caller => abi.c_abi_int_param_regs_caller_view, + .callee => abi.c_abi_int_param_regs_callee_view, + }; + + for (param_types) |ty, i| { + const param_size = @intCast(u32, ty.abiSize(self.target.*)); + if (param_size <= 8) { + if (next_register < argument_registers.len) { + result.args[i] = .{ .register = argument_registers[next_register] }; + next_register += 1; + } else { + result.args[i] = .{ .stack_offset = next_stack_offset }; + next_register += next_stack_offset; + } + } else if (param_size <= 16) { + if (next_register < argument_registers.len - 1) { + return self.fail("TODO MCValues with 2 registers", .{}); + } else if (next_register < argument_registers.len) { + return self.fail("TODO MCValues split register + stack", .{}); + } else { + result.args[i] = .{ .stack_offset = next_stack_offset }; + next_register += next_stack_offset; + } + } else { + result.args[i] = .{ .stack_offset = next_stack_offset }; + next_register += next_stack_offset; + } + } + + result.stack_byte_count = next_stack_offset; + result.stack_align = 16; + + if (ret_ty.zigTypeTag() == .NoReturn) { + result.return_value = .{ .unreach = {} }; + } else if (!ret_ty.hasRuntimeBits()) { + result.return_value = .{ .none = {} }; + } else { + const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*)); + // The callee puts the return values in %i0-%i3, which becomes %o0-%o3 inside the caller. + if (ret_ty_size <= 8) { + result.return_value = switch (role) { + .caller => .{ .register = abi.c_abi_int_return_regs_caller_view[0] }, + .callee => .{ .register = abi.c_abi_int_return_regs_callee_view[0] }, + }; + } else { + return self.fail("TODO support more return values for sparcv9", .{}); + } + } + }, + else => return self.fail("TODO implement function parameters for {} on sparcv9", .{cc}), + } + + return result; +} + +fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { + // First section of indexes correspond to a set number of constant values. + const ref_int = @enumToInt(inst); + if (ref_int < Air.Inst.Ref.typed_value_map.len) { + const tv = Air.Inst.Ref.typed_value_map[ref_int]; + if (!tv.ty.hasRuntimeBits()) { + return MCValue{ .none = {} }; + } + return self.genTypedValue(tv); + } + + // If the type has no codegen bits, no need to store it. + const inst_ty = self.air.typeOf(inst); + if (!inst_ty.hasRuntimeBits()) + return MCValue{ .none = {} }; + + const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + switch (self.air.instructions.items(.tag)[inst_index]) { + .constant => { + // Constants have static lifetimes, so they are always memoized in the outer most table. + const branch = &self.branch_stack.items[0]; + const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); + if (!gop.found_existing) { + const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + gop.value_ptr.* = try self.genTypedValue(.{ + .ty = inst_ty, + .val = self.air.values[ty_pl.payload], + }); + } + return gop.value_ptr.*; + }, + .const_ty => unreachable, + else => return self.getResolvedInstValue(inst_index), + } +} + +fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool { + if (!self.liveness.operandDies(inst, op_index)) + return false; + + switch (mcv) { + .register => |reg| { + // If it's in the registers table, need to associate the register with the + // new instruction. + if (RegisterManager.indexOfRegIntoTracked(reg)) |index| { + if (!self.register_manager.isRegFree(reg)) { + self.register_manager.registers[index] = inst; + } + } + log.debug("%{d} => {} (reused)", .{ inst, reg }); + }, + .stack_offset => |off| { + log.debug("%{d} => stack offset {d} (reused)", .{ inst, off }); + }, + else => return false, + } + + // Prevent the operand deaths processing code from deallocating it. + self.liveness.clearOperandDeath(inst, op_index); + + // That makes us responsible for doing the rest of the stuff that processDeath would have done. + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + branch.inst_table.putAssumeCapacity(Air.refToIndex(operand).?, .dead); + + return true; +} diff --git a/src/arch/sparcv9/Emit.zig b/src/arch/sparcv9/Emit.zig index 8d870e43f5..2192b21c10 100644 --- a/src/arch/sparcv9/Emit.zig +++ b/src/arch/sparcv9/Emit.zig @@ -42,16 +42,23 @@ pub fn emitMir( for (mir_tags) |tag, index| { const inst = @intCast(u32, index); switch (tag) { + .dbg_arg => try emit.mirDbgArg(inst), .dbg_line => try emit.mirDbgLine(inst), .dbg_prologue_end => try emit.mirDebugPrologueEnd(), .dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(), - .nop => @panic("TODO implement nop"), + .bpcc => @panic("TODO implement sparcv9 bpcc"), - .save => @panic("TODO implement save"), - .restore => @panic("TODO implement restore"), + .call => @panic("TODO implement sparcv9 call"), - .@"return" => @panic("TODO implement return"), + .jmpl => @panic("TODO implement sparcv9 jmpl"), + + .nop => @panic("TODO implement sparcv9 nop"), + + .@"return" => @panic("TODO implement sparcv9 return"), + + .save => @panic("TODO implement sparcv9 save"), + .restore => @panic("TODO implement sparcv9 restore"), } } } @@ -111,6 +118,17 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { } } +fn mirDbgArg(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const dbg_arg_info = emit.mir.instructions.items(.data)[inst].dbg_arg_info; + _ = dbg_arg_info; + + switch (tag) { + .dbg_arg => {}, // TODO try emit.genArgDbgInfo(dbg_arg_info.air_inst, dbg_arg_info.arg_index), + else => unreachable, + } +} + fn mirDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const dbg_line_column = emit.mir.instructions.items(.data)[inst].dbg_line_column; diff --git a/src/arch/sparcv9/Mir.zig b/src/arch/sparcv9/Mir.zig index 0b7c2185eb..21c6224930 100644 --- a/src/arch/sparcv9/Mir.zig +++ b/src/arch/sparcv9/Mir.zig @@ -7,9 +7,14 @@ //! so that, for example, the smaller encodings of jump instructions can be used. const std = @import("std"); +const builtin = @import("builtin"); +const assert = std.debug.assert; const Mir = @This(); const bits = @import("bits.zig"); +const Air = @import("../../Air.zig"); + +const Instruction = bits.Instruction; const Register = bits.Register; instructions: std.MultiArrayList(Inst).Slice, @@ -23,6 +28,8 @@ pub const Inst = struct { data: Data, pub const Tag = enum(u16) { + /// Pseudo-instruction: Argument + dbg_arg, /// Pseudo-instruction: End of prologue dbg_prologue_end, /// Pseudo-instruction: Beginning of epilogue @@ -33,6 +40,18 @@ pub const Inst = struct { // All the real instructions are ordered by their section number // in The SPARC Architecture Manual, Version 9. + /// A.7 Branch on Integer Condition Codes with Prediction (BPcc) + /// It uses the branch_predict field. + bpcc, + + /// A.8 Call and Link + /// It uses the branch_link field. + call, + + /// A.24 Jump and Link + /// It uses the branch_link field. + jmpl, + /// A.40 No Operation /// It uses the nop field. nop, @@ -50,28 +69,24 @@ pub const Inst = struct { /// The position of an MIR instruction within the `Mir` instructions array. pub const Index = u32; - /// All instructions have a 4-byte payload, which is contained within + /// All instructions have a 8-byte payload, which is contained within /// this union. `Tag` determines which union field is active, as well as /// how to interpret the data within. pub const Data = union { - /// No additional data + /// Debug info: argument /// - /// Used by e.g. flushw - nop: void, + /// Used by e.g. dbg_arg + dbg_arg_info: struct { + air_inst: Air.Inst.Index, + arg_index: usize, + }, - /// Three operand arithmetic. - /// if is_imm true then it uses the imm field of rs2_or_imm, - /// otherwise it uses rs2 field. + /// Debug info: line and column /// - /// Used by e.g. add, sub - arithmetic_3op: struct { - is_imm: bool, - rd: Register, - rs1: Register, - rs2_or_imm: union { - rs2: Register, - imm: i13, - }, + /// Used by e.g. dbg_line + dbg_line_column: struct { + line: u32, + column: u32, }, /// Two operand arithmetic. @@ -88,13 +103,42 @@ pub const Inst = struct { }, }, - /// Debug info: line and column + /// Three operand arithmetic. + /// if is_imm true then it uses the imm field of rs2_or_imm, + /// otherwise it uses rs2 field. /// - /// Used by e.g. dbg_line - dbg_line_column: struct { - line: u32, - column: u32, + /// Used by e.g. add, sub + arithmetic_3op: struct { + is_imm: bool, + rd: Register, + rs1: Register, + rs2_or_imm: union { + rs2: Register, + imm: i13, + }, }, + + /// Branch and link (always unconditional). + /// Used by e.g. call + branch_link: struct { + inst: Index, + link: Register, + }, + + /// Branch with prediction. + /// Used by e.g. bpcc + branch_predict: struct { + annul: bool, + pt: bool, + ccr: Instruction.CCR, + cond: Instruction.Condition, + inst: Index, + }, + + /// No additional data + /// + /// Used by e.g. flushw + nop: void, }; }; From 42f4bd34216ae1ae03df0a56502919109e030136 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Mon, 4 Apr 2022 21:28:55 +0700 Subject: [PATCH 13/29] stage2: sparcv9: Add breakpoint, ret, and calling mechanism --- src/arch/sparcv9/CodeGen.zig | 441 +++++++++++++++++++++++++++-------- src/arch/sparcv9/Emit.zig | 13 ++ src/arch/sparcv9/Mir.zig | 85 ++++++- 3 files changed, 436 insertions(+), 103 deletions(-) diff --git a/src/arch/sparcv9/CodeGen.zig b/src/arch/sparcv9/CodeGen.zig index 40d6db176f..7d0a178f9b 100644 --- a/src/arch/sparcv9/CodeGen.zig +++ b/src/arch/sparcv9/CodeGen.zig @@ -453,7 +453,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .bitcast => @panic("TODO try self.airBitCast(inst)"), .block => try self.airBlock(inst), .br => @panic("TODO try self.airBr(inst)"), - .breakpoint => @panic("TODO try self.airBreakpoint()"), + .breakpoint => try self.airBreakpoint(), .ret_addr => @panic("TODO try self.airRetAddr(inst)"), .frame_addr => @panic("TODO try self.airFrameAddress(inst)"), .fence => @panic("TODO try self.airFence()"), @@ -476,7 +476,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .loop => @panic("TODO try self.airLoop(inst)"), .not => @panic("TODO try self.airNot(inst)"), .ptrtoint => @panic("TODO try self.airPtrToInt(inst)"), - .ret => @panic("TODO try self.airRet(inst)"), + .ret => try self.airRet(inst), .ret_load => try self.airRetLoad(inst), .store => try self.airStore(inst), .struct_field_ptr=> @panic("TODO try self.airStructFieldPtr(inst)"), @@ -667,6 +667,21 @@ fn airBlock(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ .none, .none, .none }); } +fn airBreakpoint(self: *Self) !void { + // ta 0x01 + _ = try self.addInst(.{ + .tag = .tcc, + .data = .{ + .trap = .{ + .is_imm = true, + .cond = 0b1000, // TODO need to look into changing this into an enum + .rs2_or_imm = .{ .imm = 0x01 }, + }, + }, + }); + return self.finishAirBookkeeping(); +} + fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.Modifier) !void { if (modifier == .always_tail) return self.fail("TODO implement tail calls for {}", .{self.target.cpu.arch}); @@ -695,10 +710,6 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. .unreach => unreachable, .dead => unreachable, .memory => unreachable, - .compare_flags_signed => unreachable, - .compare_flags_unsigned => unreachable, - .got_load => unreachable, - .direct_load => unreachable, .register => |reg| { try self.register_manager.getReg(reg, null); try self.genSetReg(arg_ty, reg, arg_mcv); @@ -712,6 +723,44 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. } } + // Due to incremental compilation, how function calls are generated depends + // on linking. + if (self.air.value(callee)) |func_value| { + if (self.bin_file.tag == link.File.Elf.base_tag) { + if (func_value.castTag(.function)) |func_payload| { + const func = func_payload.data; + const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bytes: u64 = @divExact(ptr_bits, 8); + const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: { + const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; + break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes); + } else unreachable; + + try self.genSetReg(Type.initTag(.usize), .o7, .{ .memory = got_addr }); + + _ = try self.addInst(.{ + .tag = .jmpl, + .data = .{ .branch_link_indirect = .{ .reg = .o7 } }, + }); + } else if (func_value.castTag(.extern_fn)) |_| { + return self.fail("TODO implement calling extern functions", .{}); + } else { + return self.fail("TODO implement calling bitcasted functions", .{}); + } + } else @panic("TODO SPARCv9 currently does not support non-ELF binaries"); + } else { + assert(ty.zigTypeTag() == .Pointer); + const mcv = try self.resolveInst(callee); + try self.genSetReg(ty, .o7, mcv); + + _ = try self.addInst(.{ + .tag = .jmpl, + .data = .{ .branch_link_indirect = .{ .reg = .o7 } }, + }); + } + + // TODO handle return value + return self.fail("TODO implement call for {}", .{self.target.cpu.arch}); } @@ -759,8 +808,17 @@ fn airDiv(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } +fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); + try self.ret(operand); + return self.finishAir(inst, .dead, .{ un_op, .none, .none }); +} + fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { - _ = inst; + const un_op = self.air.instructions.items(.data)[inst].un_op; + const ptr = try self.resolveInst(un_op); + _ = ptr; return self.fail("TODO implement airRetLoad for {}", .{self.target.cpu.arch}); //return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } @@ -832,6 +890,37 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return self.allocMem(inst, abi_size, abi_align); } +fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { + const elem_ty = self.air.typeOfIndex(inst); + const target = self.target.*; + const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(target)}); + }; + const abi_align = elem_ty.abiAlignment(self.target.*); + if (abi_align > self.stack_align) + self.stack_align = abi_align; + + if (reg_ok) { + // Make sure the type can fit in a register before we try to allocate one. + if (abi_size <= 8) { + if (self.register_manager.tryAllocReg(inst)) |reg| { + return MCValue{ .register = reg }; + } + } + } + const stack_offset = try self.allocMem(inst, abi_size, abi_align); + return MCValue{ .stack_offset = stack_offset }; +} + +/// Copies a value to a register without tracking the register. The register is not considered +/// allocated. A second call to `copyToTmpRegister` may return the same register. +/// This can have a side effect of spilling instructions to the stack to free up a register. +fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { + const reg = try self.register_manager.allocReg(null); + try self.genSetReg(ty, reg, mcv); + return reg; +} + fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table; try table.ensureUnusedCapacity(self.gpa, additional_count); @@ -885,37 +974,216 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live self.finishAirBookkeeping(); } +fn genLoad(self: *Self, value_reg: Register, addr_reg: Register, off: i13, abi_size: u64) !void { + _ = value_reg; + _ = addr_reg; + _ = off; + + switch (abi_size) { + 1, 2, 4, 8 => return self.fail("TODO: A.27 Load Integer", .{}), + 3, 5, 6, 7 => return self.fail("TODO: genLoad for more abi_sizes", .{}), + else => unreachable, + } +} + +fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + switch (mcv) { + .dead => unreachable, + .unreach, .none => return, // Nothing to do. + .undef => { + if (!self.wantSafety()) + return; // The already existing value will do just fine. + // Write the debug undefined value. + return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); + }, + .ptr_stack_offset => |off| { + const simm13 = math.cast(u12, off) catch + return self.fail("TODO larger stack offsets", .{}); + + _ = try self.addInst(.{ + .tag = .add, + .data = .{ + .arithmetic_3op = .{ + .is_imm = true, + .rd = reg, + .rs1 = .sp, + .rs2_or_imm = .{ .imm = simm13 }, + }, + }, + }); + }, + .immediate => |x| { + if (x <= math.maxInt(u12)) { + _ = try self.addInst(.{ + .tag = .@"or", + .data = .{ + .arithmetic_3op = .{ + .is_imm = true, + .rd = reg, + .rs1 = .g0, + .rs2_or_imm = .{ .imm = @truncate(u12, x) }, + }, + }, + }); + } else if (x <= math.maxInt(u32)) { + _ = try self.addInst(.{ + .tag = .sethi, + .data = .{ + .sethi = .{ + .rd = reg, + .imm = @truncate(u22, x >> 10), + }, + }, + }); + + _ = try self.addInst(.{ + .tag = .@"or", + .data = .{ + .arithmetic_3op = .{ + .is_imm = true, + .rd = reg, + .rs1 = reg, + .rs2_or_imm = .{ .imm = @truncate(u10, x) }, + }, + }, + }); + } else if (x <= math.maxInt(u44)) { + try self.genSetReg(ty, reg, .{ .immediate = @truncate(u32, x >> 12) }); + + _ = try self.addInst(.{ + .tag = .sllx, + .data = .{ + .shift = .{ + .is_imm = true, + .width = .shift64, + .rd = reg, + .rs1 = reg, + .rs2_or_imm = .{ .imm = 12 }, + }, + }, + }); + + _ = try self.addInst(.{ + .tag = .@"or", + .data = .{ + .arithmetic_3op = .{ + .is_imm = true, + .rd = reg, + .rs1 = reg, + .rs2_or_imm = .{ .imm = @truncate(u12, x) }, + }, + }, + }); + } else { + // Need to allocate a temporary register to load 64-bit immediates. + const tmp_reg = try self.register_manager.allocReg(null); + + try self.genSetReg(ty, tmp_reg, .{ .immediate = @truncate(u32, x) }); + try self.genSetReg(ty, reg, .{ .immediate = @truncate(u32, x >> 32) }); + + _ = try self.addInst(.{ + .tag = .sllx, + .data = .{ + .shift = .{ + .is_imm = true, + .width = .shift64, + .rd = reg, + .rs1 = reg, + .rs2_or_imm = .{ .imm = 32 }, + }, + }, + }); + + _ = try self.addInst(.{ + .tag = .@"or", + .data = .{ + .arithmetic_3op = .{ + .is_imm = false, + .rd = reg, + .rs1 = reg, + .rs2_or_imm = .{ .rs2 = tmp_reg }, + }, + }, + }); + } + }, + .register => |src_reg| { + // If the registers are the same, nothing to do. + if (src_reg.id() == reg.id()) + return; + + // or %g0, src, dst (aka mov src, dst) + _ = try self.addInst(.{ + .tag = .@"or", + .data = .{ + .arithmetic_3op = .{ + .is_imm = false, + .rd = reg, + .rs1 = .g0, + .rs2_or_imm = .{ .rs2 = src_reg }, + }, + }, + }); + }, + .memory => |addr| { + // The value is in memory at a hard-coded address. + // If the type is a pointer, it means the pointer address is at this memory location. + try self.genSetReg(ty, reg, .{ .immediate = addr }); + try self.genLoad(reg, reg, 0, ty.abiSize(self.target.*)); + }, + .stack_offset => |off| { + const simm13 = math.cast(u12, off) catch + return self.fail("TODO larger stack offsets", .{}); + try self.genLoad(reg, .sp, simm13, ty.abiSize(self.target.*)); + }, + } +} + +fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { + const abi_size = ty.abiSize(self.target.*); + switch (mcv) { + .dead => unreachable, + .unreach, .none => return, // Nothing to do. + .undef => { + if (!self.wantSafety()) + return; // The already existing value will do just fine. + // TODO Upgrade this to a memset call when we have that available. + switch (ty.abiSize(self.target.*)) { + 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), + 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), + 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), + 8 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), + else => return self.fail("TODO implement memset", .{}), + } + }, + .immediate, + .ptr_stack_offset, + => { + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); + }, + .register => return self.fail("TODO implement storing types abi_size={}", .{abi_size}), + .memory, .stack_offset => return self.fail("TODO implement memcpy", .{}), + } +} + fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { if (typed_value.val.isUndef()) return MCValue{ .undef = {} }; if (typed_value.val.castTag(.decl_ref)) |payload| { - return self.lowerDeclRef(typed_value, payload.data); + _ = payload; + return self.fail("TODO implement lowerDeclRef", .{}); + // return self.lowerDeclRef(typed_value, payload.data); } if (typed_value.val.castTag(.decl_ref_mut)) |payload| { - return self.lowerDeclRef(typed_value, payload.data.decl); + _ = payload; + return self.fail("TODO implement lowerDeclRef", .{}); + // return self.lowerDeclRef(typed_value, payload.data.decl); } const target = self.target.*; switch (typed_value.ty.zigTypeTag()) { - .Pointer => switch (typed_value.ty.ptrSize()) { - .Slice => { - return self.lowerUnnamedConst(typed_value); - }, - else => { - switch (typed_value.val.tag()) { - .int_u64 => { - return MCValue{ .immediate = typed_value.val.toUnsignedInt(target) }; - }, - .slice => { - return self.lowerUnnamedConst(typed_value); - }, - else => { - return self.fail("TODO codegen more kinds of const pointers: {}", .{typed_value.val.tag()}); - }, - } - }, - }, .Int => { const info = typed_value.ty.intInfo(self.target.*); if (info.bits <= 64) { @@ -929,83 +1197,11 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { return MCValue{ .immediate = unsigned }; } else { - return self.lowerUnnamedConst(typed_value); + return self.fail("TODO implement int genTypedValue of > 64 bits", .{}); } }, - .Bool => { - return MCValue{ .immediate = @boolToInt(typed_value.val.toBool()) }; - }, .ComptimeInt => unreachable, // semantic analysis prevents this .ComptimeFloat => unreachable, // semantic analysis prevents this - .Optional => { - if (typed_value.ty.isPtrLikeOptional()) { - if (typed_value.val.isNull()) - return MCValue{ .immediate = 0 }; - - var buf: Type.Payload.ElemType = undefined; - return self.genTypedValue(.{ - .ty = typed_value.ty.optionalChild(&buf), - .val = typed_value.val, - }); - } else if (typed_value.ty.abiSize(self.target.*) == 1) { - return MCValue{ .immediate = @boolToInt(typed_value.val.isNull()) }; - } - return self.fail("TODO non pointer optionals", .{}); - }, - .Enum => { - if (typed_value.val.castTag(.enum_field_index)) |field_index| { - switch (typed_value.ty.tag()) { - .enum_simple => { - return MCValue{ .immediate = field_index.data }; - }, - .enum_full, .enum_nonexhaustive => { - const enum_full = typed_value.ty.cast(Type.Payload.EnumFull).?.data; - if (enum_full.values.count() != 0) { - const tag_val = enum_full.values.keys()[field_index.data]; - return self.genTypedValue(.{ .ty = enum_full.tag_ty, .val = tag_val }); - } else { - return MCValue{ .immediate = field_index.data }; - } - }, - else => unreachable, - } - } else { - var int_tag_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = typed_value.ty.intTagType(&int_tag_buffer); - return self.genTypedValue(.{ .ty = int_tag_ty, .val = typed_value.val }); - } - }, - .ErrorSet => { - const err_name = typed_value.val.castTag(.@"error").?.data.name; - const module = self.bin_file.options.module.?; - const global_error_set = module.global_error_set; - const error_index = global_error_set.get(err_name).?; - return MCValue{ .immediate = error_index }; - }, - .ErrorUnion => { - const error_type = typed_value.ty.errorUnionSet(); - const payload_type = typed_value.ty.errorUnionPayload(); - - if (typed_value.val.castTag(.eu_payload)) |pl| { - if (!payload_type.hasRuntimeBits()) { - // We use the error type directly as the type. - return MCValue{ .immediate = 0 }; - } - - _ = pl; - return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty.fmtDebug()}); - } else { - if (!payload_type.hasRuntimeBits()) { - // We use the error type directly as the type. - return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val }); - } - - return self.fail("TODO implement error union const of type '{}' (error)", .{typed_value.ty.fmtDebug()}); - } - }, - .Struct => { - return self.lowerUnnamedConst(typed_value); - }, else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty.fmtDebug()}), } } @@ -1171,6 +1367,18 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { } } +fn ret(self: *Self, mcv: MCValue) !void { + const ret_ty = self.fn_type.fnReturnType(); + try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); + + // Just add space for an instruction, patch this later + const index = try self.addInst(.{ + .tag = .nop, + .data = .{ .nop = {} }, + }); + try self.exitlude_jump_relocs.append(self.gpa, index); +} + fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool { if (!self.liveness.operandDies(inst, op_index)) return false; @@ -1201,3 +1409,36 @@ fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_ind return true; } + +/// Sets the value without any modifications to register allocation metadata or stack allocation metadata. +fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { + switch (loc) { + .none => return, + .register => |reg| return self.genSetReg(ty, reg, val), + .stack_offset => |off| return self.genSetStack(ty, off, val), + .memory => { + return self.fail("TODO implement setRegOrMem for memory", .{}); + }, + else => unreachable, + } +} + +pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { + const stack_mcv = try self.allocRegOrMem(inst, false); + log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv }); + const reg_mcv = self.getResolvedInstValue(inst); + assert(reg == reg_mcv.register); + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + try branch.inst_table.put(self.gpa, inst, stack_mcv); + try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); +} + +/// TODO support scope overrides. Also note this logic is duplicated with `Module.wantSafety`. +fn wantSafety(self: *Self) bool { + return switch (self.bin_file.options.optimize_mode) { + .Debug => true, + .ReleaseSafe => true, + .ReleaseFast => false, + .ReleaseSmall => false, + }; +} diff --git a/src/arch/sparcv9/Emit.zig b/src/arch/sparcv9/Emit.zig index 2192b21c10..4cb789b942 100644 --- a/src/arch/sparcv9/Emit.zig +++ b/src/arch/sparcv9/Emit.zig @@ -47,11 +47,16 @@ pub fn emitMir( .dbg_prologue_end => try emit.mirDebugPrologueEnd(), .dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(), + .add => @panic("TODO implement sparcv9 add"), + .bpcc => @panic("TODO implement sparcv9 bpcc"), .call => @panic("TODO implement sparcv9 call"), .jmpl => @panic("TODO implement sparcv9 jmpl"), + .jmpl_i => @panic("TODO implement sparcv9 jmpl to reg"), + + .@"or" => @panic("TODO implement sparcv9 or"), .nop => @panic("TODO implement sparcv9 nop"), @@ -59,6 +64,14 @@ pub fn emitMir( .save => @panic("TODO implement sparcv9 save"), .restore => @panic("TODO implement sparcv9 restore"), + + .sethi => @panic("TODO implement sparcv9 sethi"), + + .sllx => @panic("TODO implement sparcv9 sllx"), + + .sub => @panic("TODO implement sparcv9 sub"), + + .tcc => @panic("TODO implement sparcv9 tcc"), } } } diff --git a/src/arch/sparcv9/Mir.zig b/src/arch/sparcv9/Mir.zig index 21c6224930..3ff675fc36 100644 --- a/src/arch/sparcv9/Mir.zig +++ b/src/arch/sparcv9/Mir.zig @@ -40,6 +40,11 @@ pub const Inst = struct { // All the real instructions are ordered by their section number // in The SPARC Architecture Manual, Version 9. + /// A.2 Add + /// Those uses the arithmetic_3op field. + // TODO add other operations. + add, + /// A.7 Branch on Integer Condition Codes with Prediction (BPcc) /// It uses the branch_predict field. bpcc, @@ -49,8 +54,16 @@ pub const Inst = struct { call, /// A.24 Jump and Link - /// It uses the branch_link field. + /// jmpl (far direct jump) uses the branch_link field, + /// while jmpl_i (indirect jump) uses the branch_link_indirect field. + /// Those two MIR instructions will be lowered into SPARCv9 jmpl instruction. jmpl, + jmpl_i, + + /// A.31 Logical Operations + /// Those uses the arithmetic_3op field. + // TODO add other operations. + @"or", /// A.40 No Operation /// It uses the nop field. @@ -64,6 +77,24 @@ pub const Inst = struct { /// Those uses the arithmetic_3op field. save, restore, + + /// A.48 SETHI + /// It uses the sethi field. + sethi, + + /// A.49 Shift + /// Those uses the shift field. + // TODO add other operations. + sllx, + + /// A.56 Subtract + /// Those uses the arithmetic_3op field. + // TODO add other operations. + sub, + + /// A.61 Trap on Integer Condition Codes (Tcc) + /// It uses the trap field. + tcc, }; /// The position of an MIR instruction within the `Mir` instructions array. @@ -72,6 +103,7 @@ pub const Inst = struct { /// All instructions have a 8-byte payload, which is contained within /// this union. `Tag` determines which union field is active, as well as /// how to interpret the data within. + // TODO this is a quick-n-dirty solution that needs to be cleaned up. pub const Data = union { /// Debug info: argument /// @@ -122,14 +154,21 @@ pub const Inst = struct { /// Used by e.g. call branch_link: struct { inst: Index, - link: Register, + link: Register = .o7, + }, + + /// Indirect branch and link (always unconditional). + /// Used by e.g. jmpl_i + branch_link_indirect: struct { + reg: Register, + link: Register = .o7, }, /// Branch with prediction. /// Used by e.g. bpcc branch_predict: struct { annul: bool, - pt: bool, + pt: bool = true, ccr: Instruction.CCR, cond: Instruction.Condition, inst: Index, @@ -139,6 +178,46 @@ pub const Inst = struct { /// /// Used by e.g. flushw nop: void, + + /// SETHI operands. + /// + /// Used by sethi + sethi: struct { + rd: Register, + imm: u22, + }, + + /// Shift operands. + /// if is_imm true then it uses the imm field of rs2_or_imm, + /// otherwise it uses rs2 field. + /// + /// Used by e.g. add, sub + shift: struct { + is_imm: bool, + width: Instruction.ShiftWidth, + rd: Register, + rs1: Register, + rs2_or_imm: union { + rs2: Register, + imm: u6, + }, + }, + + /// Trap. + /// if is_imm true then it uses the imm field of rs2_or_imm, + /// otherwise it uses rs2 field. + /// + /// Used by e.g. tcc + trap: struct { + is_imm: bool = true, + cond: Instruction.Condition, + ccr: Instruction.CCR = .icc, + rs1: Register = .g0, + rs2_or_imm: union { + rs2: Register, + imm: u8, + }, + }, }; }; From 5e2045cbe549c9020ebbbecc763699fc4a68c818 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Sun, 10 Apr 2022 11:06:29 +0700 Subject: [PATCH 14/29] stage2: sparcv9: Implement basic asm codegen --- src/arch/sparcv9/CodeGen.zig | 122 ++++++++++++++++++++++++++++++++--- 1 file changed, 113 insertions(+), 9 deletions(-) diff --git a/src/arch/sparcv9/CodeGen.zig b/src/arch/sparcv9/CodeGen.zig index 7d0a178f9b..7ff1473921 100644 --- a/src/arch/sparcv9/CodeGen.zig +++ b/src/arch/sparcv9/CodeGen.zig @@ -521,8 +521,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { => try self.airDbgBlock(inst), .call => try self.airCall(inst, .auto), - .call_always_tail => @panic("TODO try self.airCall(inst, .always_tail)"), - .call_never_tail => @panic("TODO try self.airCall(inst, .never_tail)"), + .call_always_tail => try self.airCall(inst, .always_tail), + .call_never_tail => try self.airCall(inst, .never_tail), .call_never_inline => try self.airCall(inst, .never_inline), .atomic_store_unordered => @panic("TODO try self.airAtomicStore(inst, .Unordered)"), @@ -586,16 +586,106 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const is_volatile = (extra.data.flags & 0x80000000) != 0; const clobbers_len = @truncate(u31, extra.data.flags); var extra_i: usize = extra.end; - const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.outputs_len]); + const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..extra_i+extra.data.outputs_len]); extra_i += outputs.len; - const inputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..][0..extra.data.inputs_len]); + const inputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..extra_i+extra.data.inputs_len]); extra_i += inputs.len; const dead = !is_volatile and self.liveness.isUnused(inst); - _ = dead; - _ = clobbers_len; + const result: MCValue = if (dead) .dead else result: { + if (outputs.len > 1) { + return self.fail("TODO implement codegen for asm with more than 1 output", .{}); + } - return self.fail("TODO implement asm for {}", .{self.target.cpu.arch}); + const output_constraint: ?[]const u8 = for (outputs) |output| { + if (output != .none) { + return self.fail("TODO implement codegen for non-expr asm", .{}); + } + const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); + // This equation accounts for the fact that even if we have exactly 4 bytes + // for the string, we still use the next u32 for the null terminator. + extra_i += constraint.len / 4 + 1; + + break constraint; + } else null; + + for (inputs) |input| { + const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); + // This equation accounts for the fact that even if we have exactly 4 bytes + // for the string, we still use the next u32 for the null terminator. + extra_i += constraint.len / 4 + 1; + + if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { + return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); + } + const reg_name = constraint[1 .. constraint.len - 1]; + const reg = parseRegName(reg_name) orelse + return self.fail("unrecognized register: '{s}'", .{reg_name}); + + const arg_mcv = try self.resolveInst(input); + try self.register_manager.getReg(reg, null); + try self.genSetReg(self.air.typeOf(input), reg, arg_mcv); + } + + { + var clobber_i: u32 = 0; + while (clobber_i < clobbers_len) : (clobber_i += 1) { + const clobber = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); + // This equation accounts for the fact that even if we have exactly 4 bytes + // for the string, we still use the next u32 for the null terminator. + extra_i += clobber.len / 4 + 1; + + // TODO honor these + } + } + + const asm_source = std.mem.sliceAsBytes(self.air.extra[extra_i..])[0..extra.data.source_len]; + + if (mem.eql(u8, asm_source, "ta 0x6d")) { + _ = try self.addInst(.{ + .tag = .tcc, + .data = .{ + .trap = .{ + .is_imm = true, + .cond = 0b1000, // TODO need to look into changing this into an enum + .rs2_or_imm = .{ .imm = 0x6d }, + }, + }, + }); + } else { + return self.fail("TODO implement a full SPARCv9 assembly parsing", .{}); + } + + if (output_constraint) |output| { + if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { + return self.fail("unrecognized asm output constraint: '{s}'", .{output}); + } + const reg_name = output[2 .. output.len - 1]; + const reg = parseRegName(reg_name) orelse + return self.fail("unrecognized register: '{s}'", .{reg_name}); + break :result MCValue{ .register = reg }; + } else { + break :result MCValue{ .none = {} }; + } + }; + + simple: { + var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); + var buf_index: usize = 0; + for (outputs) |output| { + if (output == .none) continue; + + if (buf_index >= buf.len) break :simple; + buf[buf_index] = output; + buf_index += 1; + } + if (buf_index + inputs.len > buf.len) break :simple; + std.mem.copy(Air.Inst.Ref, buf[buf_index..], inputs); + return self.finishAir(inst, result, buf); + } + + @panic("TODO implement asm return"); + //return self.fail("TODO implement asm return for {}", .{self.target.cpu.arch}); } fn airArg(self: *Self, inst: Air.Inst.Index) !void { @@ -759,9 +849,16 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. }); } - // TODO handle return value + const result = info.return_value; - return self.fail("TODO implement call for {}", .{self.target.cpu.arch}); + if (args.len + 1 <= Liveness.bpi - 1) { + var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); + buf[0] = callee; + std.mem.copy(Air.Inst.Ref, buf[1..], args); + return self.finishAir(inst, result, buf); + } + + @panic("TODO handle return value with BigTomb"); } fn airDbgBlock(self: *Self, inst: Air.Inst.Index) !void { @@ -1218,6 +1315,13 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { } } +fn parseRegName(name: []const u8) ?Register { + if (@hasDecl(Register, "parseRegName")) { + return Register.parseRegName(name); + } + return std.meta.stringToEnum(Register, name); +} + fn performReloc(self: *Self, inst: Mir.Inst.Index) !void { const tag = self.mir_instructions.items(.tag)[inst]; switch (tag) { From 7051970ad7e277d93c8bffee7221e3e8c45c8eab Mon Sep 17 00:00:00 2001 From: Koakuma Date: Sun, 10 Apr 2022 14:29:06 +0700 Subject: [PATCH 15/29] stage2: sparcv9: implement basic instruction lowering --- src/arch/sparcv9/CodeGen.zig | 4 +- src/arch/sparcv9/Emit.zig | 194 ++++++++++++++++++++++++++--------- src/arch/sparcv9/Mir.zig | 15 ++- src/arch/sparcv9/bits.zig | 75 +++++++++++++- 4 files changed, 231 insertions(+), 57 deletions(-) diff --git a/src/arch/sparcv9/CodeGen.zig b/src/arch/sparcv9/CodeGen.zig index 7ff1473921..193600804e 100644 --- a/src/arch/sparcv9/CodeGen.zig +++ b/src/arch/sparcv9/CodeGen.zig @@ -1270,12 +1270,12 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { if (typed_value.val.castTag(.decl_ref)) |payload| { _ = payload; - return self.fail("TODO implement lowerDeclRef", .{}); + return self.fail("TODO implement lowerDeclRef non-mut", .{}); // return self.lowerDeclRef(typed_value, payload.data); } if (typed_value.val.castTag(.decl_ref_mut)) |payload| { _ = payload; - return self.fail("TODO implement lowerDeclRef", .{}); + return self.fail("TODO implement lowerDeclRef mut", .{}); // return self.lowerDeclRef(typed_value, payload.data.decl); } const target = self.target.*; diff --git a/src/arch/sparcv9/Emit.zig b/src/arch/sparcv9/Emit.zig index 4cb789b942..7ff1aeb532 100644 --- a/src/arch/sparcv9/Emit.zig +++ b/src/arch/sparcv9/Emit.zig @@ -2,6 +2,7 @@ //! machine code const std = @import("std"); +const Endian = std.builtin.Endian; const assert = std.debug.assert; const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); @@ -14,6 +15,8 @@ const leb128 = std.leb; const Emit = @This(); const Mir = @import("Mir.zig"); const bits = @import("bits.zig"); +const Instruction = bits.Instruction; +const Register = bits.Register; mir: Mir, bin_file: *link.File, @@ -47,7 +50,7 @@ pub fn emitMir( .dbg_prologue_end => try emit.mirDebugPrologueEnd(), .dbg_epilogue_begin => try emit.mirDebugEpilogueBegin(), - .add => @panic("TODO implement sparcv9 add"), + .add => try emit.mirArithmetic3Op(inst), .bpcc => @panic("TODO implement sparcv9 bpcc"), @@ -56,22 +59,22 @@ pub fn emitMir( .jmpl => @panic("TODO implement sparcv9 jmpl"), .jmpl_i => @panic("TODO implement sparcv9 jmpl to reg"), - .@"or" => @panic("TODO implement sparcv9 or"), + .@"or" => try emit.mirArithmetic3Op(inst), - .nop => @panic("TODO implement sparcv9 nop"), + .nop => try emit.mirNop(), - .@"return" => @panic("TODO implement sparcv9 return"), + .@"return" => try emit.mirArithmetic2Op(inst), - .save => @panic("TODO implement sparcv9 save"), - .restore => @panic("TODO implement sparcv9 restore"), + .save => try emit.mirArithmetic3Op(inst), + .restore => try emit.mirArithmetic3Op(inst), .sethi => @panic("TODO implement sparcv9 sethi"), .sllx => @panic("TODO implement sparcv9 sllx"), - .sub => @panic("TODO implement sparcv9 sub"), + .sub => try emit.mirArithmetic3Op(inst), - .tcc => @panic("TODO implement sparcv9 tcc"), + .tcc => try emit.mirTrap(inst), } } } @@ -80,6 +83,129 @@ pub fn deinit(emit: *Emit) void { emit.* = undefined; } +fn mirDbgArg(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const dbg_arg_info = emit.mir.instructions.items(.data)[inst].dbg_arg_info; + _ = dbg_arg_info; + + switch (tag) { + .dbg_arg => {}, // TODO try emit.genArgDbgInfo(dbg_arg_info.air_inst, dbg_arg_info.arg_index), + else => unreachable, + } +} + +fn mirDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const dbg_line_column = emit.mir.instructions.items(.data)[inst].dbg_line_column; + + switch (tag) { + .dbg_line => try emit.dbgAdvancePCAndLine(dbg_line_column.line, dbg_line_column.column), + else => unreachable, + } +} + +fn mirDebugPrologueEnd(self: *Emit) !void { + switch (self.debug_output) { + .dwarf => |dbg_out| { + try dbg_out.dbg_line.append(DW.LNS.set_prologue_end); + try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column); + }, + .plan9 => {}, + .none => {}, + } +} + +fn mirDebugEpilogueBegin(self: *Emit) !void { + switch (self.debug_output) { + .dwarf => |dbg_out| { + try dbg_out.dbg_line.append(DW.LNS.set_epilogue_begin); + try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column); + }, + .plan9 => {}, + .none => {}, + } +} + +fn mirArithmetic2Op(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const data = emit.mir.instructions.items(.data)[inst].arithmetic_2op; + + const rs1 = data.rs1; + + if (data.is_imm) { + const imm = data.rs2_or_imm.imm; + switch (tag) { + .@"return" => try emit.writeInstruction(Instruction.@"return"(i13, rs1, imm)), + else => unreachable, + } + } else { + const rs2 = data.rs2_or_imm.rs2; + switch (tag) { + .@"return" => try emit.writeInstruction(Instruction.@"return"(Register, rs1, rs2)), + else => unreachable, + } + } +} + +fn mirArithmetic3Op(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const data = emit.mir.instructions.items(.data)[inst].arithmetic_3op; + + const rd = data.rd; + const rs1 = data.rs1; + + if (data.is_imm) { + const imm = data.rs2_or_imm.imm; + switch (tag) { + .add => try emit.writeInstruction(Instruction.add(i13, rs1, imm, rd)), + .@"or" => try emit.writeInstruction(Instruction.@"or"(i13, rs1, imm, rd)), + .save => try emit.writeInstruction(Instruction.save(i13, rs1, imm, rd)), + .restore => try emit.writeInstruction(Instruction.restore(i13, rs1, imm, rd)), + .sub => try emit.writeInstruction(Instruction.sub(i13, rs1, imm, rd)), + else => unreachable, + } + } else { + const rs2 = data.rs2_or_imm.rs2; + switch (tag) { + .add => try emit.writeInstruction(Instruction.add(Register, rs1, rs2, rd)), + .@"or" => try emit.writeInstruction(Instruction.@"or"(Register, rs1, rs2, rd)), + .save => try emit.writeInstruction(Instruction.save(Register, rs1, rs2, rd)), + .restore => try emit.writeInstruction(Instruction.restore(Register, rs1, rs2, rd)), + .sub => try emit.writeInstruction(Instruction.sub(Register, rs1, rs2, rd)), + else => unreachable, + } + } +} + +fn mirNop(emit: *Emit) !void { + try emit.writeInstruction(Instruction.nop()); +} + +fn mirTrap(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const data = emit.mir.instructions.items(.data)[inst].trap; + + const cond = data.cond; + const ccr = data.ccr; + const rs1 = data.rs1; + + if (data.is_imm) { + const imm = data.rs2_or_imm.imm; + switch (tag) { + .tcc => try emit.writeInstruction(Instruction.trap(u7, cond, ccr, rs1, imm)), + else => unreachable, + } + } else { + const rs2 = data.rs2_or_imm.rs2; + switch (tag) { + .tcc => try emit.writeInstruction(Instruction.trap(Register, cond, ccr, rs1, rs2)), + else => unreachable, + } + } +} + +// Common helper functions + fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { const delta_line = @intCast(i32, line) - @intCast(i32, self.prev_di_line); const delta_pc: usize = self.code.items.len - self.prev_di_pc; @@ -131,52 +257,18 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { } } -fn mirDbgArg(emit: *Emit, inst: Mir.Inst.Index) !void { - const tag = emit.mir.instructions.items(.tag)[inst]; - const dbg_arg_info = emit.mir.instructions.items(.data)[inst].dbg_arg_info; - _ = dbg_arg_info; - - switch (tag) { - .dbg_arg => {}, // TODO try emit.genArgDbgInfo(dbg_arg_info.air_inst, dbg_arg_info.arg_index), - else => unreachable, - } -} - -fn mirDbgLine(emit: *Emit, inst: Mir.Inst.Index) !void { - const tag = emit.mir.instructions.items(.tag)[inst]; - const dbg_line_column = emit.mir.instructions.items(.data)[inst].dbg_line_column; - - switch (tag) { - .dbg_line => try emit.dbgAdvancePCAndLine(dbg_line_column.line, dbg_line_column.column), - else => unreachable, - } -} - -fn mirDebugPrologueEnd(self: *Emit) !void { - switch (self.debug_output) { - .dwarf => |dbg_out| { - try dbg_out.dbg_line.append(DW.LNS.set_prologue_end); - try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column); - }, - .plan9 => {}, - .none => {}, - } -} - -fn mirDebugEpilogueBegin(self: *Emit) !void { - switch (self.debug_output) { - .dwarf => |dbg_out| { - try dbg_out.dbg_line.append(DW.LNS.set_epilogue_begin); - try self.dbgAdvancePCAndLine(self.prev_di_line, self.prev_di_column); - }, - .plan9 => {}, - .none => {}, - } -} - fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { @setCold(true); assert(emit.err_msg == null); emit.err_msg = try ErrorMsg.create(emit.bin_file.allocator, emit.src_loc, format, args); return error.EmitFail; } + +fn writeInstruction(emit: *Emit, instruction: Instruction) !void { + // SPARCv9 instructions are always arranged in BE regardless of the + // endianness mode the CPU is running in. + // This is to ease porting in case someone wants to do a LE SPARCv9 backend. + const endian = Endian.Big; + + std.mem.writeInt(u32, try emit.code.addManyAsArray(4), instruction.toU32(), endian); +} diff --git a/src/arch/sparcv9/Mir.zig b/src/arch/sparcv9/Mir.zig index 3ff675fc36..02974dfda3 100644 --- a/src/arch/sparcv9/Mir.zig +++ b/src/arch/sparcv9/Mir.zig @@ -167,7 +167,7 @@ pub const Inst = struct { /// Branch with prediction. /// Used by e.g. bpcc branch_predict: struct { - annul: bool, + annul: bool = false, pt: bool = true, ccr: Instruction.CCR, cond: Instruction.Condition, @@ -215,10 +215,21 @@ pub const Inst = struct { rs1: Register = .g0, rs2_or_imm: union { rs2: Register, - imm: u8, + imm: u7, }, }, }; + + // Make sure we don't accidentally make instructions bigger than expected. + // Note that in Debug builds, Zig is allowed to insert a secret field for safety checks. + comptime { + if (builtin.mode != .Debug) { + // TODO clean up the definition of Data before enabling this. + // I'll do that after the PoC backend can produce usable binaries. + + // assert(@sizeOf(Data) == 8); + } + } }; pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void { diff --git a/src/arch/sparcv9/bits.zig b/src/arch/sparcv9/bits.zig index 07cbf7fc91..952ddef4a9 100644 --- a/src/arch/sparcv9/bits.zig +++ b/src/arch/sparcv9/bits.zig @@ -546,6 +546,9 @@ pub const Instruction = union(enum) { }; } + // SPARCv9 Instruction formats. + // See section 6.2 of the SPARCv9 ISA manual. + fn format1(disp: i32) Instruction { const udisp = @bitCast(u32, disp); @@ -561,7 +564,7 @@ pub const Instruction = union(enum) { }; } - fn format2a(op2: u3, rd: Register, imm: u22) Instruction { + fn format2a(op2: u3, imm: u22, rd: Register) Instruction { return Instruction{ .format_2a = .{ .rd = rd.enc(), @@ -956,6 +959,74 @@ pub const Instruction = union(enum) { }, }; } + + // SPARCv9 Instruction definition. + // See appendix A of the SPARCv9 ISA manual. + + pub fn add(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { + return switch(s2) { + Register => format3a(0b10, 0b00_0000, rs1, rs2, rd), + i13 => format3b(0b10, 0b00_0000, rs1, rs2, rd), + else => unreachable, + }; + } + + pub fn @"or"(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { + return switch(s2) { + Register => format3a(0b10, 0b00_0010, rs1, rs2, rd), + i13 => format3b(0b10, 0b00_0010, rs1, rs2, rd), + else => unreachable, + }; + } + + pub fn nop() Instruction { + return sethi(0, .g0); + } + + pub fn @"return"(comptime s2: type, rs1: Register, rs2: s2) Instruction { + return switch(s2) { + Register => format3c(0b10, 0b11_1001, rs1, rs2), + i13 => format3d(0b10, 0b11_1001, rs1, rs2), + else => unreachable, + }; + } + + pub fn save(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { + return switch(s2) { + Register => format3a(0b10, 0b11_1100, rs1, rs2, rd), + i13 => format3b(0b10, 0b11_1100, rs1, rs2, rd), + else => unreachable, + }; + } + + pub fn restore(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { + return switch(s2) { + Register => format3a(0b10, 0b11_1101, rs1, rs2, rd), + i13 => format3b(0b10, 0b11_1101, rs1, rs2, rd), + else => unreachable, + }; + } + + pub fn sethi(imm: u22, rd: Register) Instruction { + return format2a(0b100, imm, rd); + } + + pub fn sub(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { + return switch(s2) { + Register => format3a(0b10, 0b00_0100, rs1, rs2, rd), + i13 => format3b(0b10, 0b00_0100, rs1, rs2, rd), + else => unreachable, + }; + } + + pub fn trap(comptime s2: type, cond: Condition, ccr: CCR, rs1: Register, rs2: s2) Instruction { + // Tcc instructions abuse the rd field to store the conditionals. + return switch(s2) { + Register => format4a(0b11_1010, ccr, rs1, rs2, @intToEnum(Register, cond)), + u7 => format4e(0b00_0100, ccr, rs1, @intToEnum(Register, cond), rs2), + else => unreachable, + }; + } }; test "Serialize formats" { @@ -973,7 +1044,7 @@ test "Serialize formats" { .expected = 0b01_000000000000000000000000000001, }, .{ - .inst = Instruction.format2a(4, .g0, 0), + .inst = Instruction.format2a(4, 0, .g0), .expected = 0b00_00000_100_0000000000000000000000, }, .{ From ab2ea9fb09df6821a7b6cae7990b9bc5be7c1f61 Mon Sep 17 00:00:00 2001 From: Flandre Scarlet Date: Sun, 10 Apr 2022 19:03:37 +0700 Subject: [PATCH 16/29] stage2: sparcv9: Test failure error logging --- src/arch/sparcv9/bits.zig | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/arch/sparcv9/bits.zig b/src/arch/sparcv9/bits.zig index 952ddef4a9..c472fc6b09 100644 --- a/src/arch/sparcv9/bits.zig +++ b/src/arch/sparcv9/bits.zig @@ -1167,6 +1167,10 @@ test "Serialize formats" { for (testcases) |case| { const actual = case.inst.toU32(); - try testing.expectEqual(case.expected, actual); + testing.expectEqual(case.expected, actual) catch |err| { + std.debug.print("error: {x}\n", .{err}); + std.debug.print("case: {x}\n", .{case}); + return err; + }; } } From cfd389f927112cbc81e71118493a7b9fba18192d Mon Sep 17 00:00:00 2001 From: Koakuma Date: Sun, 10 Apr 2022 19:04:16 +0700 Subject: [PATCH 17/29] stage2: sparcv9: zig fmt --- src/arch/sparcv9/CodeGen.zig | 4 ++-- src/arch/sparcv9/bits.zig | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/arch/sparcv9/CodeGen.zig b/src/arch/sparcv9/CodeGen.zig index 193600804e..635d7bb8f2 100644 --- a/src/arch/sparcv9/CodeGen.zig +++ b/src/arch/sparcv9/CodeGen.zig @@ -586,9 +586,9 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { const is_volatile = (extra.data.flags & 0x80000000) != 0; const clobbers_len = @truncate(u31, extra.data.flags); var extra_i: usize = extra.end; - const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..extra_i+extra.data.outputs_len]); + const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i .. extra_i + extra.data.outputs_len]); extra_i += outputs.len; - const inputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i..extra_i+extra.data.inputs_len]); + const inputs = @bitCast([]const Air.Inst.Ref, self.air.extra[extra_i .. extra_i + extra.data.inputs_len]); extra_i += inputs.len; const dead = !is_volatile and self.liveness.isUnused(inst); diff --git a/src/arch/sparcv9/bits.zig b/src/arch/sparcv9/bits.zig index c472fc6b09..83c560e584 100644 --- a/src/arch/sparcv9/bits.zig +++ b/src/arch/sparcv9/bits.zig @@ -964,7 +964,7 @@ pub const Instruction = union(enum) { // See appendix A of the SPARCv9 ISA manual. pub fn add(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { - return switch(s2) { + return switch (s2) { Register => format3a(0b10, 0b00_0000, rs1, rs2, rd), i13 => format3b(0b10, 0b00_0000, rs1, rs2, rd), else => unreachable, @@ -972,7 +972,7 @@ pub const Instruction = union(enum) { } pub fn @"or"(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { - return switch(s2) { + return switch (s2) { Register => format3a(0b10, 0b00_0010, rs1, rs2, rd), i13 => format3b(0b10, 0b00_0010, rs1, rs2, rd), else => unreachable, @@ -984,7 +984,7 @@ pub const Instruction = union(enum) { } pub fn @"return"(comptime s2: type, rs1: Register, rs2: s2) Instruction { - return switch(s2) { + return switch (s2) { Register => format3c(0b10, 0b11_1001, rs1, rs2), i13 => format3d(0b10, 0b11_1001, rs1, rs2), else => unreachable, @@ -992,7 +992,7 @@ pub const Instruction = union(enum) { } pub fn save(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { - return switch(s2) { + return switch (s2) { Register => format3a(0b10, 0b11_1100, rs1, rs2, rd), i13 => format3b(0b10, 0b11_1100, rs1, rs2, rd), else => unreachable, @@ -1000,7 +1000,7 @@ pub const Instruction = union(enum) { } pub fn restore(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { - return switch(s2) { + return switch (s2) { Register => format3a(0b10, 0b11_1101, rs1, rs2, rd), i13 => format3b(0b10, 0b11_1101, rs1, rs2, rd), else => unreachable, @@ -1012,7 +1012,7 @@ pub const Instruction = union(enum) { } pub fn sub(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { - return switch(s2) { + return switch (s2) { Register => format3a(0b10, 0b00_0100, rs1, rs2, rd), i13 => format3b(0b10, 0b00_0100, rs1, rs2, rd), else => unreachable, @@ -1021,7 +1021,7 @@ pub const Instruction = union(enum) { pub fn trap(comptime s2: type, cond: Condition, ccr: CCR, rs1: Register, rs2: s2) Instruction { // Tcc instructions abuse the rd field to store the conditionals. - return switch(s2) { + return switch (s2) { Register => format4a(0b11_1010, ccr, rs1, rs2, @intToEnum(Register, cond)), u7 => format4e(0b00_0100, ccr, rs1, @intToEnum(Register, cond), rs2), else => unreachable, From 1f63afa7c9f57a4d7657890841bdca10c3763534 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Sun, 10 Apr 2022 20:10:06 +0700 Subject: [PATCH 18/29] stage2: sparcv9: Register the backend in stdlib & driver --- lib/std/builtin.zig | 6 +++++- lib/std/start.zig | 1 + src/Compilation.zig | 1 + 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 7b66998dc1..f38fc4e155 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -716,6 +716,9 @@ pub const CompilerBackend = enum(u64) { /// The reference implementation self-hosted compiler of Zig, using the /// riscv64 backend. stage2_riscv64 = 9, + /// The reference implementation self-hosted compiler of Zig, using the + /// sparcv9 backend. + stage2_sparcv9 = 10, _, }; @@ -761,7 +764,8 @@ pub fn default_panic(msg: []const u8, error_return_trace: ?*StackTrace) noreturn builtin.zig_backend == .stage2_aarch64 or builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_x86 or - builtin.zig_backend == .stage2_riscv64) + builtin.zig_backend == .stage2_riscv64 or + builtin.zig_backend == .stage2_sparcv9) { while (true) { @breakpoint(); diff --git a/lib/std/start.zig b/lib/std/start.zig index cd247c915e..20f369476d 100644 --- a/lib/std/start.zig +++ b/lib/std/start.zig @@ -29,6 +29,7 @@ comptime { builtin.zig_backend == .stage2_aarch64 or builtin.zig_backend == .stage2_arm or builtin.zig_backend == .stage2_riscv64 or + builtin.zig_backend == .stage2_sparcv9 or (builtin.zig_backend == .stage2_llvm and native_os != .linux) or (builtin.zig_backend == .stage2_llvm and native_arch != .x86_64)) { diff --git a/src/Compilation.zig b/src/Compilation.zig index 338be582d8..6c486de36a 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -4531,6 +4531,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca .i386 => .stage2_x86, .aarch64, .aarch64_be, .aarch64_32 => .stage2_aarch64, .riscv64 => .stage2_riscv64, + .sparcv9 => .stage2_sparcv9, else => .other, }; }; From 1467590e402b5b198ce7c81540263a8e08329e3c Mon Sep 17 00:00:00 2001 From: Koakuma Date: Sun, 10 Apr 2022 20:52:16 +0700 Subject: [PATCH 19/29] stage2: sparcv9: Implement enough instruction to compile simple exes --- src/arch/sparcv9/CodeGen.zig | 156 +++++++++++++++++++++++++++++++---- src/arch/sparcv9/Emit.zig | 26 +++++- src/arch/sparcv9/Mir.zig | 10 +++ src/arch/sparcv9/bits.zig | 32 +++++++ 4 files changed, 208 insertions(+), 16 deletions(-) diff --git a/src/arch/sparcv9/CodeGen.zig b/src/arch/sparcv9/CodeGen.zig index 635d7bb8f2..de7c786096 100644 --- a/src/arch/sparcv9/CodeGen.zig +++ b/src/arch/sparcv9/CodeGen.zig @@ -193,6 +193,43 @@ const CallMCValues = struct { } }; +const BigTomb = struct { + function: *Self, + inst: Air.Inst.Index, + tomb_bits: Liveness.Bpi, + big_tomb_bits: u32, + bit_index: usize, + + fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void { + const this_bit_index = bt.bit_index; + bt.bit_index += 1; + + const op_int = @enumToInt(op_ref); + if (op_int < Air.Inst.Ref.typed_value_map.len) return; + const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + + if (this_bit_index < Liveness.bpi - 1) { + const dies = @truncate(u1, bt.tomb_bits >> @intCast(Liveness.OperandInt, this_bit_index)) != 0; + if (!dies) return; + } else { + const big_bit_index = @intCast(u5, this_bit_index - (Liveness.bpi - 1)); + const dies = @truncate(u1, bt.big_tomb_bits >> big_bit_index) != 0; + if (!dies) return; + } + bt.function.processDeath(op_index); + } + + fn finishAir(bt: *BigTomb, result: MCValue) void { + const is_used = !bt.function.liveness.isUnused(bt.inst); + if (is_used) { + log.debug("%{d} => {}", .{ bt.inst, result }); + const branch = &bt.function.branch_stack.items[bt.function.branch_stack.items.len - 1]; + branch.inst_table.putAssumeCapacityNoClobber(bt.inst, result); + } + bt.function.finishAirBookkeeping(); + } +}; + pub fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, @@ -684,8 +721,16 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, buf); } - @panic("TODO implement asm return"); - //return self.fail("TODO implement asm return for {}", .{self.target.cpu.arch}); + var bt = try self.iterateBigTomb(inst, outputs.len + inputs.len); + for (outputs) |output| { + if (output == .none) continue; + + bt.feed(output); + } + for (inputs) |input| { + bt.feed(input); + } + return bt.finishAir(result); } fn airArg(self: *Self, inst: Air.Inst.Index) !void { @@ -1071,13 +1116,65 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live self.finishAirBookkeeping(); } -fn genLoad(self: *Self, value_reg: Register, addr_reg: Register, off: i13, abi_size: u64) !void { - _ = value_reg; - _ = addr_reg; - _ = off; +fn genLoad(self: *Self, value_reg: Register, addr_reg: Register, comptime off_type: type, off: off_type, abi_size: u64) !void { + assert(off_type == Register or off_type == i13); + + const is_imm = (off_type == i13); + const rs2_or_imm = if (is_imm) .{ .imm = off } else .{ .rs2 = off }; switch (abi_size) { - 1, 2, 4, 8 => return self.fail("TODO: A.27 Load Integer", .{}), + 1 => { + _ = try self.addInst(.{ + .tag = .ldub, + .data = .{ + .arithmetic_3op = .{ + .is_imm = is_imm, + .rd = value_reg, + .rs1 = addr_reg, + .rs2_or_imm = rs2_or_imm, + }, + }, + }); + }, + 2 => { + _ = try self.addInst(.{ + .tag = .lduh, + .data = .{ + .arithmetic_3op = .{ + .is_imm = is_imm, + .rd = value_reg, + .rs1 = addr_reg, + .rs2_or_imm = rs2_or_imm, + }, + }, + }); + }, + 4 => { + _ = try self.addInst(.{ + .tag = .lduw, + .data = .{ + .arithmetic_3op = .{ + .is_imm = is_imm, + .rd = value_reg, + .rs1 = addr_reg, + .rs2_or_imm = rs2_or_imm, + }, + }, + }); + }, + 8 => { + _ = try self.addInst(.{ + .tag = .ldx, + .data = .{ + .arithmetic_3op = .{ + .is_imm = is_imm, + .rd = value_reg, + .rs1 = addr_reg, + .rs2_or_imm = rs2_or_imm, + }, + }, + }); + }, 3, 5, 6, 7 => return self.fail("TODO: genLoad for more abi_sizes", .{}), else => unreachable, } @@ -1226,12 +1323,12 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. try self.genSetReg(ty, reg, .{ .immediate = addr }); - try self.genLoad(reg, reg, 0, ty.abiSize(self.target.*)); + try self.genLoad(reg, reg, i13, 0, ty.abiSize(self.target.*)); }, .stack_offset => |off| { const simm13 = math.cast(u12, off) catch return self.fail("TODO larger stack offsets", .{}); - try self.genLoad(reg, .sp, simm13, ty.abiSize(self.target.*)); + try self.genLoad(reg, .sp, i13, simm13, ty.abiSize(self.target.*)); }, } } @@ -1269,14 +1366,10 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { return MCValue{ .undef = {} }; if (typed_value.val.castTag(.decl_ref)) |payload| { - _ = payload; - return self.fail("TODO implement lowerDeclRef non-mut", .{}); - // return self.lowerDeclRef(typed_value, payload.data); + return self.lowerDeclRef(typed_value, payload.data); } if (typed_value.val.castTag(.decl_ref_mut)) |payload| { - _ = payload; - return self.fail("TODO implement lowerDeclRef mut", .{}); - // return self.lowerDeclRef(typed_value, payload.data.decl); + return self.lowerDeclRef(typed_value, payload.data.decl); } const target = self.target.*; @@ -1315,6 +1408,39 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { } } +fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigTomb { + try self.ensureProcessDeathCapacity(operand_count + 1); + return BigTomb{ + .function = self, + .inst = inst, + .tomb_bits = self.liveness.getTombBits(inst), + .big_tomb_bits = self.liveness.special.get(inst) orelse 0, + .bit_index = 0, + }; +} + +fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue { + const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bytes: u64 = @divExact(ptr_bits, 8); + + // TODO this feels clunky. Perhaps we should check for it in `genTypedValue`? + if (tv.ty.zigTypeTag() == .Pointer) blk: { + if (tv.ty.castPtrToFn()) |_| break :blk; + if (!tv.ty.elemType2().hasRuntimeBits()) { + return MCValue.none; + } + } + + decl.alive = true; + if (self.bin_file.cast(link.File.Elf)) |elf_file| { + const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; + const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; + return MCValue{ .memory = got_addr }; + } else { + return self.fail("TODO codegen non-ELF const Decl pointer", .{}); + } +} + fn parseRegName(name: []const u8) ?Register { if (@hasDecl(Register, "parseRegName")) { return Register.parseRegName(name); diff --git a/src/arch/sparcv9/Emit.zig b/src/arch/sparcv9/Emit.zig index 7ff1aeb532..b209ce1636 100644 --- a/src/arch/sparcv9/Emit.zig +++ b/src/arch/sparcv9/Emit.zig @@ -59,6 +59,11 @@ pub fn emitMir( .jmpl => @panic("TODO implement sparcv9 jmpl"), .jmpl_i => @panic("TODO implement sparcv9 jmpl to reg"), + .ldub => try emit.mirArithmetic3Op(inst), + .lduh => try emit.mirArithmetic3Op(inst), + .lduw => try emit.mirArithmetic3Op(inst), + .ldx => try emit.mirArithmetic3Op(inst), + .@"or" => try emit.mirArithmetic3Op(inst), .nop => try emit.mirNop(), @@ -68,7 +73,7 @@ pub fn emitMir( .save => try emit.mirArithmetic3Op(inst), .restore => try emit.mirArithmetic3Op(inst), - .sethi => @panic("TODO implement sparcv9 sethi"), + .sethi => try emit.mirSethi(inst), .sllx => @panic("TODO implement sparcv9 sllx"), @@ -158,6 +163,10 @@ fn mirArithmetic3Op(emit: *Emit, inst: Mir.Inst.Index) !void { const imm = data.rs2_or_imm.imm; switch (tag) { .add => try emit.writeInstruction(Instruction.add(i13, rs1, imm, rd)), + .ldub => try emit.writeInstruction(Instruction.ldub(i13, rs1, imm, rd)), + .lduh => try emit.writeInstruction(Instruction.lduh(i13, rs1, imm, rd)), + .lduw => try emit.writeInstruction(Instruction.lduw(i13, rs1, imm, rd)), + .ldx => try emit.writeInstruction(Instruction.ldx(i13, rs1, imm, rd)), .@"or" => try emit.writeInstruction(Instruction.@"or"(i13, rs1, imm, rd)), .save => try emit.writeInstruction(Instruction.save(i13, rs1, imm, rd)), .restore => try emit.writeInstruction(Instruction.restore(i13, rs1, imm, rd)), @@ -168,6 +177,10 @@ fn mirArithmetic3Op(emit: *Emit, inst: Mir.Inst.Index) !void { const rs2 = data.rs2_or_imm.rs2; switch (tag) { .add => try emit.writeInstruction(Instruction.add(Register, rs1, rs2, rd)), + .ldub => try emit.writeInstruction(Instruction.ldub(Register, rs1, rs2, rd)), + .lduh => try emit.writeInstruction(Instruction.lduh(Register, rs1, rs2, rd)), + .lduw => try emit.writeInstruction(Instruction.lduw(Register, rs1, rs2, rd)), + .ldx => try emit.writeInstruction(Instruction.ldx(Register, rs1, rs2, rd)), .@"or" => try emit.writeInstruction(Instruction.@"or"(Register, rs1, rs2, rd)), .save => try emit.writeInstruction(Instruction.save(Register, rs1, rs2, rd)), .restore => try emit.writeInstruction(Instruction.restore(Register, rs1, rs2, rd)), @@ -181,6 +194,17 @@ fn mirNop(emit: *Emit) !void { try emit.writeInstruction(Instruction.nop()); } +fn mirSethi(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const data = emit.mir.instructions.items(.data)[inst].sethi; + + const imm = data.imm; + const rd = data.rd; + + assert(tag == .sethi); + try emit.writeInstruction(Instruction.sethi(imm, rd)); +} + fn mirTrap(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const data = emit.mir.instructions.items(.data)[inst].trap; diff --git a/src/arch/sparcv9/Mir.zig b/src/arch/sparcv9/Mir.zig index 02974dfda3..352019a8fa 100644 --- a/src/arch/sparcv9/Mir.zig +++ b/src/arch/sparcv9/Mir.zig @@ -60,6 +60,16 @@ pub const Inst = struct { jmpl, jmpl_i, + /// A.27 Load Integer + /// Those uses the arithmetic_3op field. + /// Note that the ldd variant of this instruction is deprecated, do not emit + /// it unless specifically requested (e.g. by inline assembly). + // TODO add other operations. + ldub, + lduh, + lduw, + ldx, + /// A.31 Logical Operations /// Those uses the arithmetic_3op field. // TODO add other operations. diff --git a/src/arch/sparcv9/bits.zig b/src/arch/sparcv9/bits.zig index 83c560e584..0e0ff71f86 100644 --- a/src/arch/sparcv9/bits.zig +++ b/src/arch/sparcv9/bits.zig @@ -979,6 +979,38 @@ pub const Instruction = union(enum) { }; } + pub fn ldub(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { + return switch (s2) { + Register => format3a(0b11, 0b00_0001, rs1, rs2, rd), + i13 => format3b(0b11, 0b00_0001, rs1, rs2, rd), + else => unreachable, + }; + } + + pub fn lduh(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { + return switch (s2) { + Register => format3a(0b11, 0b00_0010, rs1, rs2, rd), + i13 => format3b(0b11, 0b00_0010, rs1, rs2, rd), + else => unreachable, + }; + } + + pub fn lduw(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { + return switch (s2) { + Register => format3a(0b11, 0b00_0000, rs1, rs2, rd), + i13 => format3b(0b11, 0b00_0000, rs1, rs2, rd), + else => unreachable, + }; + } + + pub fn ldx(comptime s2: type, rs1: Register, rs2: s2, rd: Register) Instruction { + return switch (s2) { + Register => format3a(0b11, 0b00_1011, rs1, rs2, rd), + i13 => format3b(0b11, 0b00_1011, rs1, rs2, rd), + else => unreachable, + }; + } + pub fn nop() Instruction { return sethi(0, .g0); } From dcb12a7941371cee3b62cc3215d89c1f96577372 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Mon, 11 Apr 2022 20:27:38 +0700 Subject: [PATCH 20/29] stage2: sparcv9: Use regular structs to encode instructions Currently packed structs still has endian-dependent behavior, so it results in code that is not portable across platforms (see also issue 10113). --- src/arch/sparcv9/Emit.zig | 2 +- src/arch/sparcv9/Mir.zig | 2 +- src/arch/sparcv9/bits.zig | 110 ++++++++++++++++++++------------------ 3 files changed, 60 insertions(+), 54 deletions(-) diff --git a/src/arch/sparcv9/Emit.zig b/src/arch/sparcv9/Emit.zig index b209ce1636..b811a3567f 100644 --- a/src/arch/sparcv9/Emit.zig +++ b/src/arch/sparcv9/Emit.zig @@ -290,7 +290,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError { fn writeInstruction(emit: *Emit, instruction: Instruction) !void { // SPARCv9 instructions are always arranged in BE regardless of the - // endianness mode the CPU is running in. + // endianness mode the CPU is running in (Section 3.1 of the ISA specification). // This is to ease porting in case someone wants to do a LE SPARCv9 backend. const endian = Endian.Big; diff --git a/src/arch/sparcv9/Mir.zig b/src/arch/sparcv9/Mir.zig index 352019a8fa..c79ebdcac1 100644 --- a/src/arch/sparcv9/Mir.zig +++ b/src/arch/sparcv9/Mir.zig @@ -62,7 +62,7 @@ pub const Inst = struct { /// A.27 Load Integer /// Those uses the arithmetic_3op field. - /// Note that the ldd variant of this instruction is deprecated, do not emit + /// Note that the ldd variant of this instruction is deprecated, so do not emit /// it unless specifically requested (e.g. by inline assembly). // TODO add other operations. ldub, diff --git a/src/arch/sparcv9/bits.zig b/src/arch/sparcv9/bits.zig index 0e0ff71f86..3e62b68572 100644 --- a/src/arch/sparcv9/bits.zig +++ b/src/arch/sparcv9/bits.zig @@ -164,27 +164,33 @@ pub const Instruction = union(enum) { // name them with letters since there's no official naming scheme. // TODO: need to rename the minor formats to a more descriptive name. + // I am using regular structs instead of packed ones to avoid + // endianness-dependent behavior when constructing the actual + // assembly instructions. + // See also: https://github.com/ziglang/zig/issues/10113 + // TODO: change it back to packed structs once the issue is resolved. + // Format 1 (op = 1): CALL - format_1: packed struct { + format_1: struct { op: u2 = 0b01, disp30: u30, }, // Format 2 (op = 0): SETHI & Branches (Bicc, BPcc, BPr, FBfcc, FBPfcc) - format_2a: packed struct { + format_2a: struct { op: u2 = 0b00, rd: u5, op2: u3, imm22: u22, }, - format_2b: packed struct { + format_2b: struct { op: u2 = 0b00, a: u1, cond: u4, op2: u3, disp22: u22, }, - format_2c: packed struct { + format_2c: struct { op: u2 = 0b00, a: u1, cond: u4, @@ -194,7 +200,7 @@ pub const Instruction = union(enum) { p: u1, disp19: u19, }, - format_2d: packed struct { + format_2d: struct { op: u2 = 0b00, a: u1, fixed: u1 = 0b0, @@ -207,7 +213,7 @@ pub const Instruction = union(enum) { }, // Format 3 (op = 2 or 3): Arithmetic, Logical, MOVr, MEMBAR, Load, and Store - format_3a: packed struct { + format_3a: struct { op: u2, rd: u5, op3: u6, @@ -224,7 +230,7 @@ pub const Instruction = union(enum) { i: u1 = 0b1, simm13: u13, }, - format_3c: packed struct { + format_3c: struct { op: u2, reserved1: u5 = 0b00000, op3: u6, @@ -241,7 +247,7 @@ pub const Instruction = union(enum) { i: u1 = 0b1, simm13: u13, }, - format_3e: packed struct { + format_3e: struct { op: u2, rd: u5, op3: u6, @@ -260,7 +266,7 @@ pub const Instruction = union(enum) { rcond: u3, simm10: u10, }, - format_3g: packed struct { + format_3g: struct { op: u2, rd: u5, op3: u6, @@ -269,7 +275,7 @@ pub const Instruction = union(enum) { reserved: u8 = 0b00000000, rs2: u5, }, - format_3h: packed struct { + format_3h: struct { op: u2 = 0b10, fixed1: u5 = 0b00000, op3: u6 = 0b101000, @@ -279,7 +285,7 @@ pub const Instruction = union(enum) { cmask: u3, mmask: u4, }, - format_3i: packed struct { + format_3i: struct { op: u2, rd: u5, op3: u6, @@ -288,13 +294,13 @@ pub const Instruction = union(enum) { imm_asi: u8, rs2: u5, }, - format_3j: packed struct { + format_3j: struct { op: u2, impl_dep1: u5, op3: u6, impl_dep2: u19, }, - format_3k: packed struct { + format_3k: struct { op: u2, rd: u5, op3: u6, @@ -304,7 +310,7 @@ pub const Instruction = union(enum) { reserved: u7 = 0b0000000, rs2: u5, }, - format_3l: packed struct { + format_3l: struct { op: u2, rd: u5, op3: u6, @@ -314,7 +320,7 @@ pub const Instruction = union(enum) { reserved: u7 = 0b0000000, shcnt32: u5, }, - format_3m: packed struct { + format_3m: struct { op: u2, rd: u5, op3: u6, @@ -324,7 +330,7 @@ pub const Instruction = union(enum) { reserved: u6 = 0b000000, shcnt64: u6, }, - format_3n: packed struct { + format_3n: struct { op: u2, rd: u5, op3: u6, @@ -332,7 +338,7 @@ pub const Instruction = union(enum) { opf: u9, rs2: u5, }, - format_3o: packed struct { + format_3o: struct { op: u2, fixed: u3 = 0b000, cc1: u1, @@ -342,7 +348,7 @@ pub const Instruction = union(enum) { opf: u9, rs2: u5, }, - format_3p: packed struct { + format_3p: struct { op: u2, rd: u5, op3: u6, @@ -350,20 +356,20 @@ pub const Instruction = union(enum) { opf: u9, rs2: u5, }, - format_3q: packed struct { + format_3q: struct { op: u2, rd: u5, op3: u6, rs1: u5, reserved: u14 = 0b00000000000000, }, - format_3r: packed struct { + format_3r: struct { op: u2, fcn: u5, op3: u6, reserved: u19 = 0b0000000000000000000, }, - format_3s: packed struct { + format_3s: struct { op: u2, rd: u5, op3: u6, @@ -371,7 +377,7 @@ pub const Instruction = union(enum) { }, //Format 4 (op = 2): MOVcc, FMOVr, FMOVcc, and Tcc - format_4a: packed struct { + format_4a: struct { op: u2 = 0b10, rd: u5, op3: u6, @@ -392,7 +398,7 @@ pub const Instruction = union(enum) { cc0: u1, simm11: u11, }, - format_4c: packed struct { + format_4c: struct { op: u2 = 0b10, rd: u5, op3: u6, @@ -415,7 +421,7 @@ pub const Instruction = union(enum) { cc0: u1, simm11: u11, }, - format_4e: packed struct { + format_4e: struct { op: u2 = 0b10, rd: u5, op3: u6, @@ -426,7 +432,7 @@ pub const Instruction = union(enum) { reserved: u4 = 0b0000, sw_trap: u7, }, - format_4f: packed struct { + format_4f: struct { op: u2 = 0b10, rd: u5, op3: u6, @@ -436,7 +442,7 @@ pub const Instruction = union(enum) { opf_low: u5, rs2: u5, }, - format_4g: packed struct { + format_4g: struct { op: u2 = 0b10, rd: u5, op3: u6, @@ -512,37 +518,37 @@ pub const Instruction = union(enum) { pub fn toU32(self: Instruction) u32 { // TODO: Remove this once packed structs work. return switch (self) { - .format_1 => |v| @bitCast(u32, v), - .format_2a => |v| @bitCast(u32, v), - .format_2b => |v| @bitCast(u32, v), - .format_2c => |v| @bitCast(u32, v), - .format_2d => |v| @bitCast(u32, v), - .format_3a => |v| @bitCast(u32, v), + .format_1 => |v| (@as(u32, v.op) << 30) | @as(u32, v.disp30), + .format_2a => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op2) << 22) | @as(u32, v.imm22), + .format_2b => |v| (@as(u32, v.op) << 30) | (@as(u32, v.a) << 29) | (@as(u32, v.cond) << 25) | (@as(u32, v.op2) << 22) | @as(u32, v.disp22), + .format_2c => |v| (@as(u32, v.op) << 30) | (@as(u32, v.a) << 29) | (@as(u32, v.cond) << 25) | (@as(u32, v.op2) << 22) | (@as(u32, v.cc1) << 21) | (@as(u32, v.cc0) << 20) | (@as(u32, v.p) << 19) | @as(u32, v.disp19), + .format_2d => |v| (@as(u32, v.op) << 30) | (@as(u32, v.a) << 29) | (@as(u32, v.fixed) << 28) | (@as(u32, v.rcond) << 25) | (@as(u32, v.op2) << 22) | (@as(u32, v.d16hi) << 20) | (@as(u32, v.p) << 19) | (@as(u32, v.rs1) << 14) | @as(u32, v.d16lo), + .format_3a => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.i) << 13) | (@as(u32, v.reserved) << 5) | @as(u32, v.rs2), .format_3b => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.i) << 13) | @as(u32, v.simm13), - .format_3c => |v| @bitCast(u32, v), + .format_3c => |v| (@as(u32, v.op) << 30) | (@as(u32, v.reserved1) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.i) << 13) | (@as(u32, v.reserved2) << 5) | @as(u32, v.rs2), .format_3d => |v| (@as(u32, v.op) << 30) | (@as(u32, v.reserved) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.i) << 13) | @as(u32, v.simm13), - .format_3e => |v| @bitCast(u32, v), + .format_3e => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.i) << 13) | (@as(u32, v.rcond) << 10) | (@as(u32, v.reserved) << 5) | @as(u32, v.rs2), .format_3f => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.i) << 13) | (@as(u32, v.rcond) << 10) | @as(u32, v.simm10), - .format_3g => |v| @bitCast(u32, v), - .format_3h => |v| @bitCast(u32, v), - .format_3i => |v| @bitCast(u32, v), - .format_3j => |v| @bitCast(u32, v), - .format_3k => |v| @bitCast(u32, v), - .format_3l => |v| @bitCast(u32, v), - .format_3m => |v| @bitCast(u32, v), - .format_3n => |v| @bitCast(u32, v), - .format_3o => |v| @bitCast(u32, v), - .format_3p => |v| @bitCast(u32, v), - .format_3q => |v| @bitCast(u32, v), - .format_3r => |v| @bitCast(u32, v), - .format_3s => |v| @bitCast(u32, v), - .format_4a => |v| @bitCast(u32, v), + .format_3g => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.i) << 13) | (@as(u32, v.reserved) << 5) | @as(u32, v.rs2), + .format_3h => |v| (@as(u32, v.op) << 30) | (@as(u32, v.fixed1) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.fixed2) << 14) | (@as(u32, v.i) << 13) | (@as(u32, v.reserved) << 7) | (@as(u32, v.cmask) << 4) | @as(u32, v.mmask), + .format_3i => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.i) << 13) | (@as(u32, v.imm_asi) << 5) | @as(u32, v.rs2), + .format_3j => |v| (@as(u32, v.op) << 30) | (@as(u32, v.impl_dep1) << 25) | (@as(u32, v.op3) << 19) | @as(u32, v.impl_dep2), + .format_3k => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.i) << 13) | (@as(u32, v.x) << 12) | (@as(u32, v.reserved) << 5) | @as(u32, v.rs2), + .format_3l => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.i) << 13) | (@as(u32, v.x) << 12) | (@as(u32, v.reserved) << 5) | @as(u32, v.shcnt32), + .format_3m => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.i) << 13) | (@as(u32, v.x) << 12) | (@as(u32, v.reserved) << 6) | @as(u32, v.shcnt64), + .format_3n => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.reserved) << 14) | (@as(u32, v.opf) << 5) | @as(u32, v.rs2), + .format_3o => |v| (@as(u32, v.op) << 30) | (@as(u32, v.fixed) << 27) | (@as(u32, v.cc1) << 26) | (@as(u32, v.cc0) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.opf) << 5) | @as(u32, v.rs2), + .format_3p => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.opf) << 5) | @as(u32, v.rs2), + .format_3q => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | @as(u32, v.reserved), + .format_3r => |v| (@as(u32, v.op) << 30) | (@as(u32, v.fcn) << 25) | (@as(u32, v.op3) << 19) | @as(u32, v.reserved), + .format_3s => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | @as(u32, v.reserved), + .format_4a => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.i) << 13) | (@as(u32, v.cc1) << 12) | (@as(u32, v.cc0) << 11) | (@as(u32, v.reserved) << 5) | @as(u32, v.rs2), .format_4b => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.i) << 13) | (@as(u32, v.cc1) << 12) | (@as(u32, v.cc0) << 11) | @as(u32, v.simm11), - .format_4c => |v| @bitCast(u32, v), + .format_4c => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.cc2) << 18) | (@as(u32, v.cond) << 14) | (@as(u32, v.i) << 13) | (@as(u32, v.cc1) << 12) | (@as(u32, v.cc0) << 11) | (@as(u32, v.reserved) << 5) | @as(u32, v.rs2), .format_4d => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.cc2) << 18) | (@as(u32, v.cond) << 14) | (@as(u32, v.i) << 13) | (@as(u32, v.cc1) << 12) | (@as(u32, v.cc0) << 11) | @as(u32, v.simm11), - .format_4e => |v| @bitCast(u32, v), - .format_4f => |v| @bitCast(u32, v), - .format_4g => |v| @bitCast(u32, v), + .format_4e => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.i) << 13) | (@as(u32, v.cc1) << 12) | (@as(u32, v.cc0) << 11) | (@as(u32, v.reserved) << 7) | @as(u32, v.sw_trap), + .format_4f => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.rs1) << 14) | (@as(u32, v.fixed) << 13) | (@as(u32, v.rcond) << 10) | (@as(u32, v.opf_low) << 5) | @as(u32, v.rs2), + .format_4g => |v| (@as(u32, v.op) << 30) | (@as(u32, v.rd) << 25) | (@as(u32, v.op3) << 19) | (@as(u32, v.fixed) << 18) | (@as(u32, v.cond) << 14) | (@as(u32, v.opf_cc) << 11) | (@as(u32, v.opf_low) << 5) | @as(u32, v.rs2), }; } From b916ba18b6ace62fccc74eb11205946842bba66b Mon Sep 17 00:00:00 2001 From: Koakuma Date: Wed, 13 Apr 2022 19:39:21 +0700 Subject: [PATCH 21/29] stage2: sparcv9: Fix Tcc encoding --- src/arch/sparcv9/bits.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/arch/sparcv9/bits.zig b/src/arch/sparcv9/bits.zig index 3e62b68572..bc8b8822b7 100644 --- a/src/arch/sparcv9/bits.zig +++ b/src/arch/sparcv9/bits.zig @@ -1061,7 +1061,7 @@ pub const Instruction = union(enum) { // Tcc instructions abuse the rd field to store the conditionals. return switch (s2) { Register => format4a(0b11_1010, ccr, rs1, rs2, @intToEnum(Register, cond)), - u7 => format4e(0b00_0100, ccr, rs1, @intToEnum(Register, cond), rs2), + u7 => format4e(0b11_1010, ccr, rs1, @intToEnum(Register, cond), rs2), else => unreachable, }; } From 43e69be196a808c66d2c9673e7293debec01ad5d Mon Sep 17 00:00:00 2001 From: Koakuma Date: Wed, 13 Apr 2022 19:56:39 +0700 Subject: [PATCH 22/29] stage2: sparcv9: Add exit2 implementation --- lib/std/start.zig | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/lib/std/start.zig b/lib/std/start.zig index 20f369476d..b261ed296e 100644 --- a/lib/std/start.zig +++ b/lib/std/start.zig @@ -166,6 +166,14 @@ fn exit2(code: usize) noreturn { : "rcx", "r11", "memory" ); }, + .sparcv9 => { + asm volatile ("ta 0x6d" + : + : [number] "{g1}" (1), + [arg1] "{o0}" (code) + : "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7", "memory" + ); + }, else => @compileError("TODO"), }, // exits(0) From a6ce2fc3dce018964f73cf88f127ec43bf901b6e Mon Sep 17 00:00:00 2001 From: Koakuma Date: Thu, 14 Apr 2022 21:54:07 +0700 Subject: [PATCH 23/29] linker: ELF: Add page sizes for ppc64le and sparcv9 --- src/link/Elf.zig | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 9e1ed0cf54..bc46c6371c 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -65,7 +65,7 @@ phdr_load_rw_index: ?u16 = null, phdr_shdr_table: std.AutoHashMapUnmanaged(u16, u16) = .{}, entry_addr: ?u64 = null, -page_size: u16, +page_size: u32, shstrtab: std.ArrayListUnmanaged(u8) = std.ArrayListUnmanaged(u8){}, shstrtab_index: ?u16 = null, @@ -304,7 +304,12 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Elf { }; const self = try gpa.create(Elf); errdefer gpa.destroy(self); - const page_size: u16 = 0x1000; // TODO ppc64le requires 64KB + + const page_size: u32 = switch (options.target.cpu.arch) { + .powerpc64le => 0x10000, + .sparcv9 => 0x2000, + else => 0x1000, + }; var dwarf: ?Dwarf = if (!options.strip and options.module != null) Dwarf.init(gpa, .elf, options.target) @@ -472,7 +477,7 @@ pub fn allocatedSize(self: *Elf, start: u64) u64 { return min_pos - start; } -pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u16) u64 { +pub fn findFreeSpace(self: *Elf, object_size: u64, min_alignment: u32) u64 { var start: u64 = 0; while (self.detectAllocCollision(start, object_size)) |item_end| { start = mem.alignForwardGeneric(u64, item_end, min_alignment); From 47b136e3b353932d754c359ae63a1206842d0d70 Mon Sep 17 00:00:00 2001 From: Koakuma Date: Thu, 14 Apr 2022 21:55:56 +0700 Subject: [PATCH 24/29] stage2: Add SPARC function alignment This is based on @kubkon's suggestion. --- src/target.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/src/target.zig b/src/target.zig index 2eff4f8445..aafd65e327 100644 --- a/src/target.zig +++ b/src/target.zig @@ -669,6 +669,7 @@ pub fn defaultFunctionAlignment(target: std.Target) u32 { return switch (target.cpu.arch) { .arm, .armeb => 4, .aarch64, .aarch64_32, .aarch64_be => 4, + .sparc, .sparcel, .sparcv9 => 4, .riscv64 => 2, else => 1, }; From f6b95166ebfce9faf3cc0806c6feb089e6922a2e Mon Sep 17 00:00:00 2001 From: Koakuma Date: Thu, 14 Apr 2022 21:59:25 +0700 Subject: [PATCH 25/29] stage2: sparcv9: Add simple test case --- test/stage2/sparcv9.zig | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 test/stage2/sparcv9.zig diff --git a/test/stage2/sparcv9.zig b/test/stage2/sparcv9.zig new file mode 100644 index 0000000000..d5611a7fae --- /dev/null +++ b/test/stage2/sparcv9.zig @@ -0,0 +1,39 @@ +const std = @import("std"); +const TestContext = @import("../../src/test.zig").TestContext; + +const linux_sparcv9 = std.zig.CrossTarget{ + .cpu_arch = .sparcv9, + .os_tag = .linux, +}; + +pub fn addCases(ctx: *TestContext) !void { + { + var case = ctx.exe("sparcv9 hello world", linux_sparcv9); + // Regular old hello world + case.addCompareOutput( + \\const msg = "Hello, World!\n"; + \\ + \\pub export fn _start() noreturn { + \\ asm volatile ("ta 0x6d" + \\ : + \\ : [number] "{g1}" (4), + \\ [arg1] "{o0}" (1), + \\ [arg2] "{o1}" (@ptrToInt(msg)), + \\ [arg3] "{o2}" (msg.len) + \\ : "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7", "memory" + \\ ); + \\ + \\ asm volatile ("ta 0x6d" + \\ : + \\ : [number] "{g1}" (1), + \\ [arg1] "{o0}" (0) + \\ : "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7", "memory" + \\ ); + \\ + \\ unreachable; + \\} + , + "Hello, World!\n", + ); + } +} From 9201fbe85bff4ae40573b64382131f779a7ed85c Mon Sep 17 00:00:00 2001 From: Koakuma Date: Thu, 14 Apr 2022 22:34:51 +0700 Subject: [PATCH 26/29] stage2: sparcv9: Add cmp_lt_errors_len AIR inst & fix asm parsing --- src/arch/sparcv9/CodeGen.zig | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/arch/sparcv9/CodeGen.zig b/src/arch/sparcv9/CodeGen.zig index de7c786096..7de035bc5c 100644 --- a/src/arch/sparcv9/CodeGen.zig +++ b/src/arch/sparcv9/CodeGen.zig @@ -475,6 +475,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .cmp_gt => @panic("TODO try self.airCmp(inst, .gt)"), .cmp_neq => @panic("TODO try self.airCmp(inst, .neq)"), .cmp_vector => @panic("TODO try self.airCmpVector(inst)"), + .cmp_lt_errors_len => @panic("TODO try self.airCmpLtErrorsLen(inst)"), .bool_and => @panic("TODO try self.airBoolOp(inst)"), .bool_or => @panic("TODO try self.airBoolOp(inst)"), @@ -647,10 +648,12 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void { } else null; for (inputs) |input| { - const constraint = std.mem.sliceTo(std.mem.sliceAsBytes(self.air.extra[extra_i..]), 0); + const input_bytes = std.mem.sliceAsBytes(self.air.extra[extra_i..]); + const constraint = std.mem.sliceTo(input_bytes, 0); + const input_name = std.mem.sliceTo(input_bytes[constraint.len + 1 ..], 0); // This equation accounts for the fact that even if we have exactly 4 bytes // for the string, we still use the next u32 for the null terminator. - extra_i += constraint.len / 4 + 1; + extra_i += (constraint.len + input_name.len + 1) / 4 + 1; if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); From 2ee83e76f7acc4275f654d2a99d0df2715cec17f Mon Sep 17 00:00:00 2001 From: Koakuma Date: Thu, 14 Apr 2022 23:15:56 +0700 Subject: [PATCH 27/29] stage2: Adjust line numbers in tests --- test/stage2/aarch64.zig | 2 +- test/stage2/x86_64.zig | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/test/stage2/aarch64.zig b/test/stage2/aarch64.zig index d2f40d922c..84334c2a29 100644 --- a/test/stage2/aarch64.zig +++ b/test/stage2/aarch64.zig @@ -159,7 +159,7 @@ pub fn addCases(ctx: *TestContext) !void { { var case = ctx.exe("hello world with updates", macos_aarch64); case.addError("", &[_][]const u8{ - ":108:9: error: struct 'tmp.tmp' has no member named 'main'", + ":109:9: error: struct 'tmp.tmp' has no member named 'main'", }); // Incorrect return type diff --git a/test/stage2/x86_64.zig b/test/stage2/x86_64.zig index a7ebce36d3..a15d2f8ca0 100644 --- a/test/stage2/x86_64.zig +++ b/test/stage2/x86_64.zig @@ -719,7 +719,7 @@ pub fn addCases(ctx: *TestContext) !void { ); switch (target.getOsTag()) { .linux => try case.files.append(.{ - .src = + .src = \\pub fn print() void { \\ asm volatile ("syscall" \\ : @@ -735,7 +735,7 @@ pub fn addCases(ctx: *TestContext) !void { .path = "print.zig", }), .macos => try case.files.append(.{ - .src = + .src = \\extern "c" fn write(usize, usize, usize) usize; \\ \\pub fn print() void { @@ -796,7 +796,7 @@ pub fn addCases(ctx: *TestContext) !void { ); switch (target.getOsTag()) { .linux => try case.files.append(.{ - .src = + .src = \\// dummy comment to make print be on line 2 \\fn print() void { \\ asm volatile ("syscall" @@ -813,7 +813,7 @@ pub fn addCases(ctx: *TestContext) !void { .path = "print.zig", }), .macos => try case.files.append(.{ - .src = + .src = \\extern "c" fn write(usize, usize, usize) usize; \\fn print() void { \\ _ = write(1, @ptrToInt("Hello, World!\n"), 14); @@ -1925,7 +1925,7 @@ fn addLinuxTestCases(ctx: *TestContext) !void { var case = ctx.exe("hello world with updates", linux_x64); case.addError("", &[_][]const u8{ - ":108:9: error: struct 'tmp.tmp' has no member named 'main'", + ":109:9: error: struct 'tmp.tmp' has no member named 'main'", }); // Incorrect return type @@ -2176,7 +2176,7 @@ fn addMacOsTestCases(ctx: *TestContext) !void { { var case = ctx.exe("darwin hello world with updates", macos_x64); case.addError("", &[_][]const u8{ - ":108:9: error: struct 'tmp.tmp' has no member named 'main'", + ":109:9: error: struct 'tmp.tmp' has no member named 'main'", }); // Incorrect return type From e791f062ba3bbae62175076a2b04c06099c9397a Mon Sep 17 00:00:00 2001 From: Koakuma Date: Thu, 14 Apr 2022 23:24:46 +0700 Subject: [PATCH 28/29] stage2: sparcv9: Load tests to the list of testcases --- test/cases.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/test/cases.zig b/test/cases.zig index 942119f780..0fb6f381dd 100644 --- a/test/cases.zig +++ b/test/cases.zig @@ -16,6 +16,7 @@ pub fn addCases(ctx: *TestContext) !void { try @import("stage2/riscv64.zig").addCases(ctx); try @import("stage2/plan9.zig").addCases(ctx); try @import("stage2/x86_64.zig").addCases(ctx); + try @import("stage2/sparcv9.zig").addCases(ctx); // https://github.com/ziglang/zig/issues/10968 //try @import("stage2/nvptx.zig").addCases(ctx); } From c07213269fe14235d75d8d768984e329cdfcb4fe Mon Sep 17 00:00:00 2001 From: Koakuma Date: Thu, 14 Apr 2022 23:26:03 +0700 Subject: [PATCH 29/29] stage2: zig fmt --- lib/std/start.zig | 2 +- src/arch/riscv64/Mir.zig | 1 - test/stage2/x86_64.zig | 8 ++++---- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/lib/std/start.zig b/lib/std/start.zig index b261ed296e..f4a5cbb763 100644 --- a/lib/std/start.zig +++ b/lib/std/start.zig @@ -170,7 +170,7 @@ fn exit2(code: usize) noreturn { asm volatile ("ta 0x6d" : : [number] "{g1}" (1), - [arg1] "{o0}" (code) + [arg1] "{o0}" (code), : "o0", "o1", "o2", "o3", "o4", "o5", "o6", "o7", "memory" ); }, diff --git a/src/arch/riscv64/Mir.zig b/src/arch/riscv64/Mir.zig index 7b5049b7d4..5df3a86229 100644 --- a/src/arch/riscv64/Mir.zig +++ b/src/arch/riscv64/Mir.zig @@ -144,4 +144,3 @@ pub fn extraData(mir: Mir, comptime T: type, index: usize) struct { data: T, end .end = i, }; } - diff --git a/test/stage2/x86_64.zig b/test/stage2/x86_64.zig index a15d2f8ca0..214b32b025 100644 --- a/test/stage2/x86_64.zig +++ b/test/stage2/x86_64.zig @@ -719,7 +719,7 @@ pub fn addCases(ctx: *TestContext) !void { ); switch (target.getOsTag()) { .linux => try case.files.append(.{ - .src = + .src = \\pub fn print() void { \\ asm volatile ("syscall" \\ : @@ -735,7 +735,7 @@ pub fn addCases(ctx: *TestContext) !void { .path = "print.zig", }), .macos => try case.files.append(.{ - .src = + .src = \\extern "c" fn write(usize, usize, usize) usize; \\ \\pub fn print() void { @@ -796,7 +796,7 @@ pub fn addCases(ctx: *TestContext) !void { ); switch (target.getOsTag()) { .linux => try case.files.append(.{ - .src = + .src = \\// dummy comment to make print be on line 2 \\fn print() void { \\ asm volatile ("syscall" @@ -813,7 +813,7 @@ pub fn addCases(ctx: *TestContext) !void { .path = "print.zig", }), .macos => try case.files.append(.{ - .src = + .src = \\extern "c" fn write(usize, usize, usize) usize; \\fn print() void { \\ _ = write(1, @ptrToInt("Hello, World!\n"), 14);