From 28cc3639476fae72bae3836e8776966386915142 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sat, 13 Aug 2022 17:42:11 +0200 Subject: [PATCH 01/30] stage2 ARM: improve Mir representation of mov and cmp --- src/arch/arm/CodeGen.zig | 81 ++++++++++++++++------------------------ src/arch/arm/Emit.zig | 48 ++++++++++++++++++------ src/arch/arm/Mir.zig | 14 +++++++ 3 files changed, 82 insertions(+), 61 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index cefcf3b114..0c683c6899 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -438,9 +438,8 @@ fn gen(self: *Self) !void { // mov fp, sp _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = .fp, - .rn = .r0, .op = Instruction.Operand.reg(.sp, Instruction.Operand.Shift.none), } }, }); @@ -531,9 +530,8 @@ fn gen(self: *Self) !void { // mov sp, fp _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = .sp, - .rn = .r0, .op = Instruction.Operand.reg(.fp, Instruction.Operand.Shift.none), } }, }); @@ -1240,9 +1238,8 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { _ = try self.addInst(.{ .tag = .mvn, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = dest_reg, - .rn = undefined, .op = Instruction.Operand.reg(op_reg, Instruction.Operand.Shift.none), } }, }); @@ -1337,9 +1334,8 @@ fn minMax( _ = try self.addInst(.{ .tag = .mov, .cond = cond_choose_lhs, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = dest_reg, - .rn = .r0, .op = Instruction.Operand.reg(lhs_reg, Instruction.Operand.Shift.none), } }, }); @@ -1348,9 +1344,8 @@ fn minMax( _ = try self.addInst(.{ .tag = .mov, .cond = cond_choose_rhs, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = dest_reg, - .rn = .r0, .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), } }, }); @@ -1682,9 +1677,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { // mov rdlo, #0 _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = rdlo, - .rn = .r0, .op = Instruction.Operand.fromU32(0).?, } }, }); @@ -1693,9 +1687,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = try self.addInst(.{ .tag = .mov, .cond = .ne, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = rdlo, - .rn = .r0, .op = Instruction.Operand.fromU32(1).?, } }, }); @@ -1707,9 +1700,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { _ = try self.addInst(.{ .tag = .mov, .cond = .ne, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = rdlo, - .rn = .r0, .op = Instruction.Operand.fromU32(1).?, } }, }); @@ -2670,7 +2662,7 @@ fn binOpRegister( defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg); const dest_reg = switch (mir_tag) { - .cmp => .r0, // cmp has no destination regardless + .cmp => undefined, // cmp has no destination regardless else => if (metadata) |md| blk: { if (lhs_is_register and self.reuseOperand(md.inst, md.lhs, 0, lhs)) { break :blk lhs_reg; @@ -2690,7 +2682,6 @@ fn binOpRegister( .adds, .sub, .subs, - .cmp, .@"and", .orr, .eor, @@ -2699,6 +2690,10 @@ fn binOpRegister( .rn = lhs_reg, .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), } }, + .cmp => .{ .r_op_cmp = .{ + .rn = lhs_reg, + .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), + } }, .lsl, .asr, .lsr, @@ -2767,7 +2762,7 @@ fn binOpImmediate( defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg); const dest_reg = switch (mir_tag) { - .cmp => .r0, // cmp has no destination reg + .cmp => undefined, // cmp has no destination reg else => if (metadata) |md| blk: { if (lhs_is_register and self.reuseOperand( md.inst, @@ -2789,7 +2784,6 @@ fn binOpImmediate( .adds, .sub, .subs, - .cmp, .@"and", .orr, .eor, @@ -2798,6 +2792,10 @@ fn binOpImmediate( .rn = lhs_reg, .op = Instruction.Operand.fromU32(rhs.immediate).?, } }, + .cmp => .{ .r_op_cmp = .{ + .rn = lhs_reg, + .op = Instruction.Operand.fromU32(rhs.immediate).?, + } }, .lsl, .asr, .lsr, @@ -3312,9 +3310,8 @@ fn genInlineMemcpy( // mov count, #0 _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = count, - .rn = .r0, .op = Instruction.Operand.imm(0, 0), } }, }); @@ -3323,8 +3320,7 @@ fn genInlineMemcpy( // cmp count, len _ = try self.addInst(.{ .tag = .cmp, - .data = .{ .rr_op = .{ - .rd = .r0, + .data = .{ .r_op_cmp = .{ .rn = count, .op = Instruction.Operand.reg(len, Instruction.Operand.Shift.none), } }, @@ -3418,9 +3414,8 @@ fn genInlineMemsetCode( // mov count, #0 _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = count, - .rn = .r0, .op = Instruction.Operand.imm(0, 0), } }, }); @@ -3429,8 +3424,7 @@ fn genInlineMemsetCode( // cmp count, len _ = try self.addInst(.{ .tag = .cmp, - .data = .{ .rr_op = .{ - .rd = .r0, + .data = .{ .r_op_cmp = .{ .rn = count, .op = Instruction.Operand.reg(len, Instruction.Operand.Shift.none), } }, @@ -4020,9 +4014,7 @@ fn condBr(self: *Self, condition: MCValue) !Mir.Inst.Index { // bne ... _ = try self.addInst(.{ .tag = .cmp, - .cond = .al, - .data = .{ .rr_op = .{ - .rd = .r0, + .data = .{ .r_op_cmp = .{ .rn = reg, .op = Instruction.Operand.imm(1, 0), } }, @@ -4196,8 +4188,7 @@ fn isNull(self: *Self, ty: Type, operand: MCValue) !MCValue { _ = try self.addInst(.{ .tag = .cmp, - .data = .{ .rr_op = .{ - .rd = undefined, + .data = .{ .r_op_cmp = .{ .rn = reg_mcv.register, .op = Instruction.Operand.fromU32(0).?, } }, @@ -4832,9 +4823,8 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro .register_v_flag => .vs, else => unreachable, }, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = cond_reg, - .rn = .r0, .op = Instruction.Operand.fromU32(1).?, } }, }); @@ -4935,9 +4925,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // mov reg, 0 _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = reg, - .rn = .r0, .op = zero, } }, }); @@ -4946,9 +4935,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void _ = try self.addInst(.{ .tag = .mov, .cond = condition, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = reg, - .rn = .r0, .op = one, } }, }); @@ -4957,18 +4945,16 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void if (Instruction.Operand.fromU32(x)) |op| { _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = reg, - .rn = .r0, .op = op, } }, }); } else if (Instruction.Operand.fromU32(~x)) |op| { _ = try self.addInst(.{ .tag = .mvn, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = reg, - .rn = .r0, .op = op, } }, }); @@ -4984,9 +4970,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } else { _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = reg, - .rn = .r0, .op = Instruction.Operand.imm(@truncate(u8, x), 0), } }, }); @@ -5028,9 +5013,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // orr reg, reg, #0xdd, 8 _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = reg, - .rn = .r0, .op = Instruction.Operand.imm(@truncate(u8, x), 0), } }, }); @@ -5069,9 +5053,8 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void // mov reg, src_reg _ = try self.addInst(.{ .tag = .mov, - .data = .{ .rr_op = .{ + .data = .{ .r_op_mov = .{ .rd = reg, - .rn = .r0, .op = Instruction.Operand.reg(src_reg, Instruction.Operand.Shift.none), } }, }); diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig index cf749792f0..8770ef1a24 100644 --- a/src/arch/arm/Emit.zig +++ b/src/arch/arm/Emit.zig @@ -385,20 +385,44 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { fn mirDataProcessing(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const cond = emit.mir.instructions.items(.cond)[inst]; - const rr_op = emit.mir.instructions.items(.data)[inst].rr_op; switch (tag) { - .add => try emit.writeInstruction(Instruction.add(cond, rr_op.rd, rr_op.rn, rr_op.op)), - .adds => try emit.writeInstruction(Instruction.adds(cond, rr_op.rd, rr_op.rn, rr_op.op)), - .@"and" => try emit.writeInstruction(Instruction.@"and"(cond, rr_op.rd, rr_op.rn, rr_op.op)), - .cmp => try emit.writeInstruction(Instruction.cmp(cond, rr_op.rn, rr_op.op)), - .eor => try emit.writeInstruction(Instruction.eor(cond, rr_op.rd, rr_op.rn, rr_op.op)), - .mov => try emit.writeInstruction(Instruction.mov(cond, rr_op.rd, rr_op.op)), - .mvn => try emit.writeInstruction(Instruction.mvn(cond, rr_op.rd, rr_op.op)), - .orr => try emit.writeInstruction(Instruction.orr(cond, rr_op.rd, rr_op.rn, rr_op.op)), - .rsb => try emit.writeInstruction(Instruction.rsb(cond, rr_op.rd, rr_op.rn, rr_op.op)), - .sub => try emit.writeInstruction(Instruction.sub(cond, rr_op.rd, rr_op.rn, rr_op.op)), - .subs => try emit.writeInstruction(Instruction.subs(cond, rr_op.rd, rr_op.rn, rr_op.op)), + .add, + .adds, + .@"and", + .eor, + .orr, + .rsb, + .sub, + .subs, + => { + const rr_op = emit.mir.instructions.items(.data)[inst].rr_op; + switch (tag) { + .add => try emit.writeInstruction(Instruction.add(cond, rr_op.rd, rr_op.rn, rr_op.op)), + .adds => try emit.writeInstruction(Instruction.adds(cond, rr_op.rd, rr_op.rn, rr_op.op)), + .@"and" => try emit.writeInstruction(Instruction.@"and"(cond, rr_op.rd, rr_op.rn, rr_op.op)), + .eor => try emit.writeInstruction(Instruction.eor(cond, rr_op.rd, rr_op.rn, rr_op.op)), + .orr => try emit.writeInstruction(Instruction.orr(cond, rr_op.rd, rr_op.rn, rr_op.op)), + .rsb => try emit.writeInstruction(Instruction.rsb(cond, rr_op.rd, rr_op.rn, rr_op.op)), + .sub => try emit.writeInstruction(Instruction.sub(cond, rr_op.rd, rr_op.rn, rr_op.op)), + .subs => try emit.writeInstruction(Instruction.subs(cond, rr_op.rd, rr_op.rn, rr_op.op)), + else => unreachable, + } + }, + .cmp => { + const r_op_cmp = emit.mir.instructions.items(.data)[inst].r_op_cmp; + try emit.writeInstruction(Instruction.cmp(cond, r_op_cmp.rn, r_op_cmp.op)); + }, + .mov, + .mvn, + => { + const r_op_mov = emit.mir.instructions.items(.data)[inst].r_op_mov; + switch (tag) { + .mov => try emit.writeInstruction(Instruction.mov(cond, r_op_mov.rd, r_op_mov.op)), + .mvn => try emit.writeInstruction(Instruction.mvn(cond, r_op_mov.rd, r_op_mov.op)), + else => unreachable, + } + }, else => unreachable, } } diff --git a/src/arch/arm/Mir.zig b/src/arch/arm/Mir.zig index d5da7e5d4e..45f89b8120 100644 --- a/src/arch/arm/Mir.zig +++ b/src/arch/arm/Mir.zig @@ -166,6 +166,20 @@ pub const Inst = struct { rd: Register, imm16: u16, }, + /// A register and an operand + /// + /// Used by mov and mvn + r_op_mov: struct { + rd: Register, + op: bits.Instruction.Operand, + }, + /// A register and an operand + /// + /// Used by cmp + r_op_cmp: struct { + rn: Register, + op: bits.Instruction.Operand, + }, /// Two registers and a shift amount /// /// Used by e.g. lsl From 0414ef591a0cb42629d7efb5912612f689ea8910 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Fri, 19 Aug 2022 12:39:39 +0200 Subject: [PATCH 02/30] stage2 ARM: introduce allocRegs This new register allocation mechanism which is designed to be more generic and flexible will replace binOp. --- src/arch/arm/CodeGen.zig | 392 ++++++++++++++++++++++++++------------- 1 file changed, 266 insertions(+), 126 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 0c683c6899..c10e0bb78d 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2232,7 +2232,13 @@ fn airUnaryMath(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ un_op, .none, .none }); } -fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool { +fn reuseOperand( + self: *Self, + inst: Air.Inst.Index, + operand: Air.Inst.Ref, + op_index: Liveness.OperandInt, + mcv: MCValue, +) bool { if (!self.liveness.operandDies(inst, op_index)) return false; @@ -2580,39 +2586,206 @@ fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -/// Allocates a new register. If Inst in non-null, additionally tracks -/// this register and the corresponding int and removes all previous -/// tracking. Does not do the actual moving (that is handled by -/// genSetReg). -fn prepareNewRegForMoving( +/// An argument to a Mir instruction which is read (and possibly also +/// written to) by the respective instruction +const ReadArg = struct { + ty: Type, + bind: Bind, + class: RegisterManager.RegisterBitSet, + reg: *Register, + + const Bind = union(enum) { + inst: Air.Inst.Ref, + mcv: MCValue, + + fn resolveToMcv(bind: Bind, function: *Self) InnerError!MCValue { + return switch (bind) { + .inst => |inst| try function.resolveInst(inst), + .mcv => |mcv| mcv, + }; + } + }; +}; + +/// An argument to a Mir instruction which is written to (but not read +/// from) by the respective instruction +const WriteArg = struct { + ty: Type, + bind: Bind, + class: RegisterManager.RegisterBitSet, + reg: *Register, + + const Bind = union(enum) { + reg: Register, + none: void, + }; +}; + +/// Holds all data necessary for enabling the potential reuse of +/// operand registers as destinations +const ReuseMetadata = struct { + corresponding_inst: Air.Inst.Index, + + /// Maps every element index of read_args to the corresponding + /// index in the Air instruction + /// + /// When the order of read_args corresponds exactly to the order + /// of the inputs of the Air instruction, this would be e.g. + /// &.{ 0, 1 }. However, when the order is not the same or some + /// inputs to the Air instruction are omitted (e.g. when they can + /// be represented as immediates to the Mir instruction), + /// operand_mapping should reflect that fact. + operand_mapping: []const Liveness.OperandInt, +}; + +/// Allocate a set of registers for use as arguments for a Mir +/// instruction +/// +/// If the Mir instruction these registers are allocated for +/// corresponds exactly to a single Air instruction, populate +/// reuse_metadata in order to enable potential reuse of an operand as +/// the destination (provided that that operand dies in this +/// instruction). +/// +/// Reusing an operand register as destination is the only time two +/// arguments may share the same register. In all other cases, +/// allocRegs guarantees that a register will never be allocated to +/// more than one argument. +/// +/// Furthermore, allocReg guarantees that all arguments which are +/// already bound to registers before calling allocRegs will not +/// change their register binding. This is done by locking these +/// registers. +fn allocRegs( self: *Self, - track_inst: ?Air.Inst.Index, - register_class: RegisterManager.RegisterBitSet, - mcv: MCValue, -) !Register { - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - const reg = try self.register_manager.allocReg(track_inst, register_class); + read_args: []const ReadArg, + write_args: []const WriteArg, + reuse_metadata: ?ReuseMetadata, +) InnerError!void { + // Air instructions have either one output or none (cmp) + assert(!(reuse_metadata != null and write_args.len > 1)); // see note above - if (track_inst) |inst| { - // Overwrite the MCValue associated with this inst - branch.inst_table.putAssumeCapacity(inst, .{ .register = reg }); + // The operand mapping is a 1:1 mapping of read args to their + // corresponding operand index in the Air instruction + assert(!(reuse_metadata != null and reuse_metadata.?.operand_mapping.len != read_args.len)); // see note above - // If the previous MCValue occupied some space we track, we - // need to make sure it is marked as free now. - switch (mcv) { - .cpsr_flags => { - assert(self.cpsr_flags_inst.? == inst); - self.cpsr_flags_inst = null; - }, - .register => |prev_reg| { - assert(!self.register_manager.isRegFree(prev_reg)); - self.register_manager.freeReg(prev_reg); - }, - else => {}, + const locks = try self.gpa.alloc(?RegisterLock, read_args.len + write_args.len); + defer self.gpa.free(locks); + const read_locks = locks[0..read_args.len]; + const write_locks = locks[read_args.len..]; + + std.mem.set(?RegisterLock, locks, null); + defer for (locks) |lock| { + if (lock) |locked_reg| self.register_manager.unlockReg(locked_reg); + }; + + // When we reuse a read_arg as a destination, the corresponding + // MCValue of the read_arg will be set to .dead. In that case, we + // skip allocating this read_arg. + var reused_read_arg: ?usize = null; + + // Lock all args which are already allocated to registers + for (read_args) |arg, i| { + const mcv = try arg.bind.resolveToMcv(self); + if (mcv == .register) { + read_locks[i] = self.register_manager.lockReg(mcv.register); } } - return reg; + for (write_args) |arg, i| { + if (arg.bind == .reg) { + write_locks[i] = self.register_manager.lockReg(arg.bind.reg); + } + } + + // Allocate registers for all args which aren't allocated to + // registers yet + for (read_args) |arg, i| { + const mcv = try arg.bind.resolveToMcv(self); + if (mcv == .register) { + arg.reg.* = mcv.register; + } else { + const track_inst: ?Air.Inst.Index = switch (arg.bind) { + .inst => |inst| Air.refToIndex(inst).?, + else => null, + }; + arg.reg.* = try self.register_manager.allocReg(track_inst, arg.class); + read_locks[i] = self.register_manager.lockReg(arg.reg.*); + } + } + + if (reuse_metadata != null and write_args.len > 0) { + const inst = reuse_metadata.?.corresponding_inst; + const operand_mapping = reuse_metadata.?.operand_mapping; + const arg = write_args[0]; + if (arg.bind == .reg) { + arg.reg.* = arg.bind.reg; + } else { + reuse_operand: for (read_args) |read_arg, i| { + if (read_arg.bind == .inst) { + const operand = read_arg.bind.inst; + const mcv = try self.resolveInst(operand); + if (mcv == .register and + std.meta.eql(arg.class, read_arg.class) and + self.reuseOperand(inst, operand, operand_mapping[i], mcv)) + { + arg.reg.* = mcv.register; + write_locks[0] = null; + reused_read_arg = i; + break :reuse_operand; + } + } + } else { + arg.reg.* = try self.register_manager.allocReg(inst, arg.class); + write_locks[0] = self.register_manager.lockReg(arg.reg.*); + } + } + } else { + for (write_args) |arg, i| { + if (arg.bind == .reg) { + arg.reg.* = arg.bind.reg; + } else { + arg.reg.* = try self.register_manager.allocReg(null, arg.class); + write_locks[i] = self.register_manager.lockReg(arg.reg.*); + } + } + } + + // For all read_args which need to be moved from non-register to + // register, perform the move + for (read_args) |arg, i| { + if (reused_read_arg) |j| { + // Check whether this read_arg was reused + if (i == j) continue; + } + + const mcv = try arg.bind.resolveToMcv(self); + if (mcv != .register) { + if (arg.bind == .inst) { + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + const inst = Air.refToIndex(arg.bind.inst).?; + + // Overwrite the MCValue associated with this inst + branch.inst_table.putAssumeCapacity(inst, .{ .register = arg.reg.* }); + + // If the previous MCValue occupied some space we track, we + // need to make sure it is marked as free now. + switch (mcv) { + .cpsr_flags => { + assert(self.cpsr_flags_inst.? == inst); + self.cpsr_flags_inst = null; + }, + .register => |prev_reg| { + assert(!self.register_manager.isRegFree(prev_reg)); + self.register_manager.freeReg(prev_reg); + }, + else => {}, + } + } + + try self.genSetReg(arg.ty, arg.reg.*, mcv); + } + } } /// Don't call this function directly. Use binOp instead. @@ -2632,50 +2805,33 @@ fn binOpRegister( rhs_ty: Type, metadata: ?BinOpMetadata, ) !MCValue { - const lhs_is_register = lhs == .register; - const rhs_is_register = rhs == .register; + var lhs_reg: Register = undefined; + var rhs_reg: Register = undefined; + var dest_reg: Register = undefined; - const lhs_lock: ?RegisterLock = if (lhs_is_register) - self.register_manager.lockReg(lhs.register) + const lhs_bind = if (metadata) |md| + ReadArg.Bind{ .inst = md.lhs } else - null; - defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg); - - const lhs_reg = if (lhs_is_register) lhs.register else blk: { - const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: { - break :inst Air.refToIndex(md.lhs).?; - } else null; - - break :blk try self.prepareNewRegForMoving(track_inst, gp, lhs); + ReadArg.Bind{ .mcv = lhs }; + const rhs_bind = if (metadata) |md| + ReadArg.Bind{ .inst = md.rhs } + else + ReadArg.Bind{ .mcv = rhs }; + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + .{ .ty = rhs_ty, .bind = rhs_bind, .class = gp, .reg = &rhs_reg }, }; - const new_lhs_lock = self.register_manager.lockReg(lhs_reg); - defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg); - - const rhs_reg = if (rhs_is_register) rhs.register else blk: { - const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: { - break :inst Air.refToIndex(md.rhs).?; - } else null; - - break :blk try self.prepareNewRegForMoving(track_inst, gp, rhs); + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, }; - const new_rhs_lock = self.register_manager.lockReg(rhs_reg); - defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg); - - const dest_reg = switch (mir_tag) { - .cmp => undefined, // cmp has no destination regardless - else => if (metadata) |md| blk: { - if (lhs_is_register and self.reuseOperand(md.inst, md.lhs, 0, lhs)) { - break :blk lhs_reg; - } else if (rhs_is_register and self.reuseOperand(md.inst, md.rhs, 1, rhs)) { - break :blk rhs_reg; - } else { - break :blk try self.register_manager.allocReg(md.inst, gp); - } - } else try self.register_manager.allocReg(null, gp), - }; - - if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs); - if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs); + try self.allocRegs( + &read_args, + if (mir_tag == .cmp) &.{} else &write_args, + if (metadata) |md| .{ + .corresponding_inst = md.inst, + .operand_mapping = &.{ 0, 1 }, + } else null, + ); const mir_data: Mir.Inst.Data = switch (mir_tag) { .add, @@ -2741,43 +2897,33 @@ fn binOpImmediate( lhs_and_rhs_swapped: bool, metadata: ?BinOpMetadata, ) !MCValue { - const lhs_is_register = lhs == .register; + var lhs_reg: Register = undefined; + var dest_reg: Register = undefined; - const lhs_lock: ?RegisterLock = if (lhs_is_register) - self.register_manager.lockReg(lhs.register) - else - null; - defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg); - - const lhs_reg = if (lhs_is_register) lhs.register else blk: { - const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: { - break :inst Air.refToIndex( - if (lhs_and_rhs_swapped) md.rhs else md.lhs, - ).?; - } else null; - - break :blk try self.prepareNewRegForMoving(track_inst, gp, lhs); - }; - const new_lhs_lock = self.register_manager.lockReg(lhs_reg); - defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg); - - const dest_reg = switch (mir_tag) { - .cmp => undefined, // cmp has no destination reg - else => if (metadata) |md| blk: { - if (lhs_is_register and self.reuseOperand( - md.inst, - if (lhs_and_rhs_swapped) md.rhs else md.lhs, - if (lhs_and_rhs_swapped) 1 else 0, - lhs, - )) { - break :blk lhs_reg; - } else { - break :blk try self.register_manager.allocReg(md.inst, gp); - } - } else try self.register_manager.allocReg(null, gp), + const lhs_bind = blk: { + if (metadata) |md| { + const inst = if (lhs_and_rhs_swapped) md.rhs else md.lhs; + break :blk ReadArg.Bind{ .inst = inst }; + } else { + break :blk ReadArg.Bind{ .mcv = lhs }; + } }; - if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs); + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + const operand_mapping: []const Liveness.OperandInt = if (lhs_and_rhs_swapped) &.{1} else &.{0}; + try self.allocRegs( + &read_args, + if (mir_tag == .cmp) &.{} else &write_args, + if (metadata) |md| .{ + .corresponding_inst = md.inst, + .operand_mapping = operand_mapping, + } else null, + ); const mir_data: Mir.Inst.Data = switch (mir_tag) { .add, @@ -2983,33 +3129,27 @@ fn binOp( if (std.math.isPowerOfTwo(imm)) { const log2 = std.math.log2_int(u32, imm); - const lhs_is_register = lhs == .register; + var lhs_reg: Register = undefined; + var dest_reg: Register = undefined; - const lhs_lock: ?RegisterLock = if (lhs_is_register) - self.register_manager.lockReg(lhs.register) + const lhs_bind = if (metadata) |md| + ReadArg.Bind{ .inst = md.lhs } else - null; - defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg); - - const lhs_reg = if (lhs_is_register) lhs.register else blk: { - const track_inst: ?Air.Inst.Index = if (metadata) |md| inst: { - break :inst Air.refToIndex(md.lhs).?; - } else null; - - break :blk try self.prepareNewRegForMoving(track_inst, gp, lhs); + ReadArg.Bind{ .mcv = lhs }; + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, }; - const new_lhs_lock = self.register_manager.lockReg(lhs_reg); - defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg); - - const dest_reg = if (metadata) |md| blk: { - if (lhs_is_register and self.reuseOperand(md.inst, md.lhs, 0, lhs)) { - break :blk lhs_reg; - } else { - break :blk try self.register_manager.allocReg(md.inst, gp); - } - } else try self.register_manager.allocReg(null, gp); - - if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs); + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + if (metadata) |md| .{ + .corresponding_inst = md.inst, + .operand_mapping = &.{0}, + } else null, + ); try self.truncRegister(lhs_reg, dest_reg, int_info.signedness, log2); return MCValue{ .register = dest_reg }; From 86dd123392c8ab26432303ff2e5c96e73d747757 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sat, 20 Aug 2022 22:29:52 +0200 Subject: [PATCH 03/30] stage2 ARM: move cmp to new allocReg mechanism; remove from binOp --- src/arch/arm/CodeGen.zig | 270 ++++++++++++++++++++++++++------------- 1 file changed, 181 insertions(+), 89 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index c10e0bb78d..93db3dd76b 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1315,7 +1315,13 @@ fn minMax( // register. assert(lhs_reg != rhs_reg); // see note above - _ = try self.binOpRegister(.cmp, .{ .register = lhs_reg }, .{ .register = rhs_reg }, lhs_ty, rhs_ty, null); + _ = try self.addInst(.{ + .tag = .cmp, + .data = .{ .r_op_cmp = .{ + .rn = lhs_reg, + .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), + } }, + }); const cond_choose_lhs: Condition = switch (tag) { .max => switch (int_info.signedness) { @@ -1473,7 +1479,6 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); try self.spillCompareFlagsIfOccupied(); - self.cpsr_flags_inst = null; const base_tag: Air.Inst.Tag = switch (tag) { .add_with_overflow => .add, @@ -1493,7 +1498,13 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits); // cmp dest, truncated - _ = try self.binOp(.cmp_eq, dest, .{ .register = truncated_reg }, Type.usize, Type.usize, null); + _ = try self.addInst(.{ + .tag = .cmp, + .data = .{ .r_op_cmp = .{ + .rn = dest_reg, + .op = Instruction.Operand.reg(truncated_reg, Instruction.Operand.Shift.none), + } }, + }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); @@ -1578,7 +1589,6 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); try self.spillCompareFlagsIfOccupied(); - self.cpsr_flags_inst = null; const base_tag: Mir.Inst.Tag = switch (int_info.signedness) { .signed => .smulbb, @@ -1598,7 +1608,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.truncRegister(dest_reg, truncated_reg, int_info.signedness, int_info.bits); // cmp dest, truncated - _ = try self.binOp(.cmp_eq, dest, .{ .register = truncated_reg }, Type.usize, Type.usize, null); + _ = try self.addInst(.{ + .tag = .cmp, + .data = .{ .r_op_cmp = .{ + .rn = dest_reg, + .op = Instruction.Operand.reg(truncated_reg, Instruction.Operand.Shift.none), + } }, + }); try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); @@ -1608,7 +1624,6 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); try self.spillCompareFlagsIfOccupied(); - self.cpsr_flags_inst = null; const base_tag: Mir.Inst.Tag = switch (int_info.signedness) { .signed => .smull, @@ -1672,7 +1687,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { try self.genSetStack(lhs_ty, stack_offset, .{ .register = truncated_reg }); // cmp truncated, rdlo - _ = try self.binOp(.cmp_eq, .{ .register = truncated_reg }, .{ .register = rdlo }, Type.usize, Type.usize, null); + _ = try self.addInst(.{ + .tag = .cmp, + .data = .{ .r_op_cmp = .{ + .rn = truncated_reg, + .op = Instruction.Operand.reg(rdlo, Instruction.Operand.Shift.none), + } }, + }); // mov rdlo, #0 _ = try self.addInst(.{ @@ -1694,7 +1715,13 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { }); // cmp rdhi, #0 - _ = try self.binOp(.cmp_eq, .{ .register = rdhi }, .{ .immediate = 0 }, Type.usize, Type.usize, null); + _ = try self.addInst(.{ + .tag = .cmp, + .data = .{ .r_op_cmp = .{ + .rn = rdhi, + .op = Instruction.Operand.fromU32(0).?, + } }, + }); // movne rdlo, #1 _ = try self.addInst(.{ @@ -1725,8 +1752,6 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); const result: MCValue = result: { - const lhs = try self.resolveInst(extra.lhs); - const rhs = try self.resolveInst(extra.rhs); const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); @@ -1742,28 +1767,107 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { if (int_info.bits <= 32) { const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); - const lhs_lock: ?RegisterLock = if (lhs == .register) - self.register_manager.lockRegAssumeUnused(lhs.register) - else - null; - defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg); - try self.spillCompareFlagsIfOccupied(); - self.cpsr_flags_inst = null; - // lsl dest, lhs, rhs - const dest = try self.binOp(.shl, lhs, rhs, lhs_ty, rhs_ty, null); - const dest_reg = dest.register; - const dest_lock = self.register_manager.lockRegAssumeUnused(dest_reg); - defer self.register_manager.unlockReg(dest_lock); + const shr_mir_tag: Mir.Inst.Tag = switch (int_info.signedness) { + .signed => Mir.Inst.Tag.asr, + .unsigned => Mir.Inst.Tag.lsr, + }; - // asr/lsr reconstructed, dest, rhs - const reconstructed = try self.binOp(.shr, dest, rhs, lhs_ty, rhs_ty, null); + var lhs_reg: Register = undefined; + var rhs_reg: Register = undefined; + var dest_reg: Register = undefined; + var reconstructed_reg: Register = undefined; + + const rhs_mcv = try self.resolveInst(extra.rhs); + const rhs_immediate_ok = rhs_mcv == .immediate and Instruction.Operand.fromU32(rhs_mcv.immediate) != null; + + const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; + const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; + + if (rhs_immediate_ok) { + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &reconstructed_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + null, + ); + + // lsl dest, lhs, rhs + _ = try self.addInst(.{ + .tag = .lsl, + .data = .{ .rr_shift = .{ + .rd = dest_reg, + .rm = lhs_reg, + .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_mcv.immediate)), + } }, + }); + + try self.truncRegister(dest_reg, dest_reg, int_info.signedness, int_info.bits); + + // asr/lsr reconstructed, dest, rhs + _ = try self.addInst(.{ + .tag = shr_mir_tag, + .data = .{ .rr_shift = .{ + .rd = reconstructed_reg, + .rm = dest_reg, + .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_mcv.immediate)), + } }, + }); + } else { + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + .{ .ty = rhs_ty, .bind = rhs_bind, .class = gp, .reg = &rhs_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &reconstructed_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + null, + ); + + // lsl dest, lhs, rhs + _ = try self.addInst(.{ + .tag = .lsl, + .data = .{ .rr_shift = .{ + .rd = dest_reg, + .rm = lhs_reg, + .shift_amount = Instruction.ShiftAmount.reg(rhs_reg), + } }, + }); + + try self.truncRegister(dest_reg, dest_reg, int_info.signedness, int_info.bits); + + // asr/lsr reconstructed, dest, rhs + _ = try self.addInst(.{ + .tag = shr_mir_tag, + .data = .{ .rr_shift = .{ + .rd = reconstructed_reg, + .rm = dest_reg, + .shift_amount = Instruction.ShiftAmount.reg(rhs_reg), + } }, + }); + } // cmp lhs, reconstructed - _ = try self.binOp(.cmp_eq, lhs, reconstructed, lhs_ty, lhs_ty, null); + _ = try self.addInst(.{ + .tag = .cmp, + .data = .{ .r_op_cmp = .{ + .rn = lhs_reg, + .op = Instruction.Operand.reg(reconstructed_reg, Instruction.Operand.Shift.none), + } }, + }); - try self.genSetStack(lhs_ty, stack_offset, dest); + try self.genSetStack(lhs_ty, stack_offset, .{ .register = dest_reg }); try self.genSetStack(Type.initTag(.u1), stack_offset - overflow_bit_offset, .{ .cpsr_flags = .ne }); break :result MCValue{ .stack_offset = stack_offset }; @@ -2662,8 +2766,8 @@ fn allocRegs( write_args: []const WriteArg, reuse_metadata: ?ReuseMetadata, ) InnerError!void { - // Air instructions have either one output or none (cmp) - assert(!(reuse_metadata != null and write_args.len > 1)); // see note above + // Air instructions have exactly one output + assert(!(reuse_metadata != null and write_args.len != 1)); // see note above // The operand mapping is a 1:1 mapping of read args to their // corresponding operand index in the Air instruction @@ -2714,7 +2818,7 @@ fn allocRegs( } } - if (reuse_metadata != null and write_args.len > 0) { + if (reuse_metadata != null) { const inst = reuse_metadata.?.corresponding_inst; const operand_mapping = reuse_metadata.?.operand_mapping; const arg = write_args[0]; @@ -2826,7 +2930,7 @@ fn binOpRegister( }; try self.allocRegs( &read_args, - if (mir_tag == .cmp) &.{} else &write_args, + &write_args, if (metadata) |md| .{ .corresponding_inst = md.inst, .operand_mapping = &.{ 0, 1 }, @@ -2846,10 +2950,6 @@ fn binOpRegister( .rn = lhs_reg, .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), } }, - .cmp => .{ .r_op_cmp = .{ - .rn = lhs_reg, - .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), - } }, .lsl, .asr, .lsr, @@ -2918,7 +3018,7 @@ fn binOpImmediate( const operand_mapping: []const Liveness.OperandInt = if (lhs_and_rhs_swapped) &.{1} else &.{0}; try self.allocRegs( &read_args, - if (mir_tag == .cmp) &.{} else &write_args, + &write_args, if (metadata) |md| .{ .corresponding_inst = md.inst, .operand_mapping = operand_mapping, @@ -2938,10 +3038,6 @@ fn binOpImmediate( .rn = lhs_reg, .op = Instruction.Operand.fromU32(rhs.immediate).?, } }, - .cmp => .{ .r_op_cmp = .{ - .rn = lhs_reg, - .op = Instruction.Operand.fromU32(rhs.immediate).?, - } }, .lsl, .asr, .lsr, @@ -2991,7 +3087,6 @@ fn binOp( switch (tag) { .add, .sub, - .cmp_eq, => { switch (lhs_ty.zigTypeTag()) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), @@ -3006,15 +3101,12 @@ fn binOp( // operands const lhs_immediate_ok = switch (tag) { .add => lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null, - .sub, - .cmp_eq, - => false, + .sub => false, else => unreachable, }; const rhs_immediate_ok = switch (tag) { .add, .sub, - .cmp_eq, => rhs == .immediate and Instruction.Operand.fromU32(rhs.immediate) != null, else => unreachable, }; @@ -3022,7 +3114,6 @@ fn binOp( const mir_tag: Mir.Inst.Tag = switch (tag) { .add => .add, .sub => .sub, - .cmp_eq => .cmp, else => unreachable, }; @@ -4005,32 +4096,16 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const lhs_ty = self.air.typeOf(bin_op.lhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { - const operands: BinOpOperands = .{ .inst = .{ - .inst = inst, - .lhs = bin_op.lhs, - .rhs = bin_op.rhs, - } }; - break :blk try self.cmp(operands, lhs_ty, op); + break :blk try self.cmp(.{ .inst = bin_op.lhs }, .{ .inst = bin_op.rhs }, lhs_ty, op); }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -const BinOpOperands = union(enum) { - inst: struct { - inst: Air.Inst.Index, - lhs: Air.Inst.Ref, - rhs: Air.Inst.Ref, - }, - mcv: struct { - lhs: MCValue, - rhs: MCValue, - }, -}; - fn cmp( self: *Self, - operands: BinOpOperands, + lhs: ReadArg.Bind, + rhs: ReadArg.Bind, lhs_ty: Type, op: math.CompareOperator, ) !MCValue { @@ -4060,22 +4135,47 @@ fn cmp( if (int_info.bits <= 32) { try self.spillCompareFlagsIfOccupied(); - switch (operands) { - .inst => |inst_op| { - const metadata: BinOpMetadata = .{ - .inst = inst_op.inst, - .lhs = inst_op.lhs, - .rhs = inst_op.rhs, - }; - const lhs = try self.resolveInst(inst_op.lhs); - const rhs = try self.resolveInst(inst_op.rhs); + var lhs_reg: Register = undefined; + var rhs_reg: Register = undefined; - self.cpsr_flags_inst = inst_op.inst; - _ = try self.binOp(.cmp_eq, lhs, rhs, int_ty, int_ty, metadata); - }, - .mcv => |mcv_op| { - _ = try self.binOp(.cmp_eq, mcv_op.lhs, mcv_op.rhs, int_ty, int_ty, null); - }, + const rhs_mcv = try rhs.resolveToMcv(self); + const rhs_immediate_ok = rhs_mcv == .immediate and Instruction.Operand.fromU32(rhs_mcv.immediate) != null; + + if (rhs_immediate_ok) { + const read_args = [_]ReadArg{ + .{ .ty = int_ty, .bind = lhs, .class = gp, .reg = &lhs_reg }, + }; + try self.allocRegs( + &read_args, + &.{}, + null, // we won't be able to reuse a register as there are no write_regs + ); + + _ = try self.addInst(.{ + .tag = .cmp, + .data = .{ .r_op_cmp = .{ + .rn = lhs_reg, + .op = Instruction.Operand.fromU32(rhs_mcv.immediate).?, + } }, + }); + } else { + const read_args = [_]ReadArg{ + .{ .ty = int_ty, .bind = lhs, .class = gp, .reg = &lhs_reg }, + .{ .ty = int_ty, .bind = rhs, .class = gp, .reg = &rhs_reg }, + }; + try self.allocRegs( + &read_args, + &.{}, + null, // we won't be able to reuse a register as there are no write_regs + ); + + _ = try self.addInst(.{ + .tag = .cmp, + .data = .{ .r_op_cmp = .{ + .rn = lhs_reg, + .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), + } }, + }); } return switch (int_info.signedness) { @@ -4349,14 +4449,13 @@ fn isNonNull(self: *Self, ty: Type, operand: MCValue) !MCValue { fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { const error_type = ty.errorUnionSet(); - const error_int_type = Type.initTag(.u16); if (error_type.errorSetIsEmpty()) { return MCValue{ .immediate = 0 }; // always false } const error_mcv = try self.errUnionErr(operand, ty); - _ = try self.binOp(.cmp_eq, error_mcv, .{ .immediate = 0 }, error_int_type, error_int_type, null); + _ = try self.cmp(.{ .mcv = error_mcv }, .{ .mcv = .{ .immediate = 0 } }, error_type, .neq); return MCValue{ .cpsr_flags = .hi }; } @@ -4587,14 +4686,7 @@ fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { defer self.gpa.free(branch_into_prong_relocs); for (items) |item, idx| { - const condition = try self.resolveInst(pl_op.operand); - const item_mcv = try self.resolveInst(item); - - const operands: BinOpOperands = .{ .mcv = .{ - .lhs = condition, - .rhs = item_mcv, - } }; - const cmp_result = try self.cmp(operands, condition_ty, .neq); + const cmp_result = try self.cmp(.{ .inst = pl_op.operand }, .{ .inst = item }, condition_ty, .neq); branch_into_prong_relocs[idx] = try self.condBr(cmp_result); } From ed4be06883427e2d8f97f2dd241d8996994a0c66 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 21 Aug 2022 13:43:09 +0200 Subject: [PATCH 04/30] stage2 ARM: extract add+sub from binOp This commit also lays the groundwork for further extractions from binOp. --- src/arch/arm/CodeGen.zig | 329 +++++++++++++++++++++++++++++---------- 1 file changed, 246 insertions(+), 83 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 93db3dd76b..aacfff4f9c 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1401,19 +1401,26 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); - const result: MCValue = if (self.liveness.isUnused(inst)) - .dead - else - try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{ - .lhs = bin_op.lhs, - .rhs = bin_op.rhs, - .inst = inst, - }); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; + const rhs_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + + switch (tag) { + .add, + .sub, + => break :result try self.addSub(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + else => break :result try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{ + .lhs = bin_op.lhs, + .rhs = bin_op.rhs, + .inst = inst, + }), + } + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -1459,8 +1466,8 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const lhs = try self.resolveInst(extra.lhs); - const rhs = try self.resolveInst(extra.rhs); + const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; + const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); @@ -1485,7 +1492,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { .sub_with_overflow => .sub, else => unreachable, }; - const dest = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, null); + const dest = try self.addSub(base_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, null); const dest_reg = dest.register; const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg); defer self.register_manager.unlockReg(dest_reg_lock); @@ -1511,6 +1518,9 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits == 32) { + const lhs = try self.resolveInst(extra.lhs); + const rhs = try self.resolveInst(extra.rhs); + // Only say yes if the operation is // commutative, i.e. we can swap both of the // operands @@ -2600,26 +2610,10 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde break :result MCValue{ .ptr_stack_offset = off - struct_field_offset }; }, else => { - const offset_reg = try self.copyToTmpRegister(ptr_ty, .{ - .immediate = struct_field_offset, - }); - const offset_reg_lock = self.register_manager.lockRegAssumeUnused(offset_reg); - defer self.register_manager.unlockReg(offset_reg_lock); + const lhs_bind: ReadArg.Bind = .{ .mcv = mcv }; + const rhs_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = struct_field_offset } }; - const addr_reg = try self.copyToTmpRegister(ptr_ty, mcv); - const addr_reg_lock = self.register_manager.lockRegAssumeUnused(addr_reg); - defer self.register_manager.unlockReg(addr_reg_lock); - - const dest = try self.binOp( - .add, - .{ .register = addr_reg }, - .{ .register = offset_reg }, - Type.usize, - Type.usize, - null, - ); - - break :result dest; + break :result try self.addSub(.add, lhs_bind, rhs_bind, Type.usize, Type.usize, null); }, } }; @@ -2708,6 +2702,25 @@ const ReadArg = struct { .mcv => |mcv| mcv, }; } + + fn resolveToImmediate(bind: Bind, function: *Self) InnerError!?u32 { + switch (bind) { + .inst => |inst| { + // TODO resolve independently of inst_table + const mcv = try function.resolveInst(inst); + switch (mcv) { + .immediate => |imm| return imm, + else => return null, + } + }, + .mcv => |mcv| { + switch (mcv) { + .immediate => |imm| return imm, + else => return null, + } + }, + } + } }; }; @@ -3057,6 +3070,136 @@ fn binOpImmediate( return MCValue{ .register = dest_reg }; } +/// TODO +fn binOpRegisterNew( + self: *Self, + mir_tag: Mir.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) !MCValue { + var lhs_reg: Register = undefined; + var rhs_reg: Register = undefined; + var dest_reg: Register = undefined; + + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + .{ .ty = rhs_ty, .bind = rhs_bind, .class = gp, .reg = &rhs_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + if (maybe_inst) |inst| .{ + .corresponding_inst = inst, + .operand_mapping = &.{ 0, 1 }, + } else null, + ); + + const mir_data: Mir.Inst.Data = switch (mir_tag) { + .add, + .adds, + .sub, + .subs, + .@"and", + .orr, + .eor, + => .{ .rr_op = .{ + .rd = dest_reg, + .rn = lhs_reg, + .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), + } }, + .lsl, + .asr, + .lsr, + => .{ .rr_shift = .{ + .rd = dest_reg, + .rm = lhs_reg, + .shift_amount = Instruction.ShiftAmount.reg(rhs_reg), + } }, + .mul, + .smulbb, + => .{ .rrr = .{ + .rd = dest_reg, + .rn = lhs_reg, + .rm = rhs_reg, + } }, + else => unreachable, + }; + + _ = try self.addInst(.{ + .tag = mir_tag, + .data = mir_data, + }); + + return MCValue{ .register = dest_reg }; +} + +/// TODO +fn binOpImmediateNew( + self: *Self, + mir_tag: Mir.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_immediate: u32, + lhs_ty: Type, + lhs_and_rhs_swapped: bool, + maybe_inst: ?Air.Inst.Index, +) !MCValue { + var lhs_reg: Register = undefined; + var dest_reg: Register = undefined; + + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + const operand_mapping: []const Liveness.OperandInt = if (lhs_and_rhs_swapped) &.{1} else &.{0}; + try self.allocRegs( + &read_args, + &write_args, + if (maybe_inst) |inst| .{ + .corresponding_inst = inst, + .operand_mapping = operand_mapping, + } else null, + ); + + const mir_data: Mir.Inst.Data = switch (mir_tag) { + .add, + .adds, + .sub, + .subs, + .@"and", + .orr, + .eor, + => .{ .rr_op = .{ + .rd = dest_reg, + .rn = lhs_reg, + .op = Instruction.Operand.fromU32(rhs_immediate).?, + } }, + .lsl, + .asr, + .lsr, + => .{ .rr_shift = .{ + .rd = dest_reg, + .rm = lhs_reg, + .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs_immediate)), + } }, + else => unreachable, + }; + + _ = try self.addInst(.{ + .tag = mir_tag, + .data = mir_data, + }); + + return MCValue{ .register = dest_reg }; +} + const BinOpMetadata = struct { inst: Air.Inst.Index, lhs: Air.Inst.Ref, @@ -3085,53 +3228,6 @@ fn binOp( metadata: ?BinOpMetadata, ) InnerError!MCValue { switch (tag) { - .add, - .sub, - => { - switch (lhs_ty.zigTypeTag()) { - .Float => return self.fail("TODO ARM binary operations on floats", .{}), - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => { - const mod = self.bin_file.options.module.?; - assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); - if (int_info.bits <= 32) { - // Only say yes if the operation is - // commutative, i.e. we can swap both of the - // operands - const lhs_immediate_ok = switch (tag) { - .add => lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null, - .sub => false, - else => unreachable, - }; - const rhs_immediate_ok = switch (tag) { - .add, - .sub, - => rhs == .immediate and Instruction.Operand.fromU32(rhs.immediate) != null, - else => unreachable, - }; - - const mir_tag: Mir.Inst.Tag = switch (tag) { - .add => .add, - .sub => .sub, - else => unreachable, - }; - - if (rhs_immediate_ok) { - return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata); - } else if (lhs_immediate_ok) { - // swap lhs and rhs - return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata); - } else { - return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); - } - } else { - return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); - } - }, - else => unreachable, - } - }, .mul => { switch (lhs_ty.zigTypeTag()) { .Float => return self.fail("TODO ARM binary operations on floats", .{}), @@ -3278,8 +3374,18 @@ fn binOp( else => unreachable, }; + const lhs_bind = if (metadata) |md| + ReadArg.Bind{ .inst = md.lhs } + else + ReadArg.Bind{ .mcv = lhs }; + const rhs_bind = if (metadata) |md| + ReadArg.Bind{ .inst = md.rhs } + else + ReadArg.Bind{ .mcv = rhs }; + // Generate an add/sub/mul - const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); + const maybe_inst: ?Air.Inst.Index = if (metadata) |md| md.inst else null; + const result = try self.addSub(base_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); // Truncate if necessary switch (lhs_ty.zigTypeTag()) { @@ -3463,6 +3569,63 @@ fn binOp( } } +fn addSub( + self: *Self, + tag: Air.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + switch (lhs_ty.zigTypeTag()) { + .Float => return self.fail("TODO ARM binary operations on floats", .{}), + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => { + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 32) { + const lhs_immediate = try lhs_bind.resolveToImmediate(self); + const rhs_immediate = try rhs_bind.resolveToImmediate(self); + + // Only say yes if the operation is + // commutative, i.e. we can swap both of the + // operands + const lhs_immediate_ok = switch (tag) { + .add => if (lhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false, + .sub => false, + else => unreachable, + }; + const rhs_immediate_ok = switch (tag) { + .add, + .sub, + => if (rhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false, + else => unreachable, + }; + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .add => .add, + .sub => .sub, + else => unreachable, + }; + + if (rhs_immediate_ok) { + return try self.binOpImmediateNew(mir_tag, lhs_bind, rhs_immediate.?, lhs_ty, false, maybe_inst); + } else if (lhs_immediate_ok) { + // swap lhs and rhs + return try self.binOpImmediateNew(mir_tag, rhs_bind, lhs_immediate.?, rhs_ty, true, maybe_inst); + } else { + return try self.binOpRegisterNew(mir_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); + } + } else { + return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); + } + }, + else => unreachable, + } +} + fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void { const abi_size = ty.abiSize(self.target.*); @@ -4138,8 +4301,8 @@ fn cmp( var lhs_reg: Register = undefined; var rhs_reg: Register = undefined; - const rhs_mcv = try rhs.resolveToMcv(self); - const rhs_immediate_ok = rhs_mcv == .immediate and Instruction.Operand.fromU32(rhs_mcv.immediate) != null; + const rhs_immediate = try rhs.resolveToImmediate(self); + const rhs_immediate_ok = if (rhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false; if (rhs_immediate_ok) { const read_args = [_]ReadArg{ @@ -4155,7 +4318,7 @@ fn cmp( .tag = .cmp, .data = .{ .r_op_cmp = .{ .rn = lhs_reg, - .op = Instruction.Operand.fromU32(rhs_mcv.immediate).?, + .op = Instruction.Operand.fromU32(rhs_immediate.?).?, } }, }); } else { From fdb2c80bdc12bfb6be5235de6a5792e0b0619da8 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 21 Aug 2022 17:10:00 +0200 Subject: [PATCH 05/30] stage2 ARM: extract mul, div, and mod out of binOp --- src/arch/arm/CodeGen.zig | 371 ++++++++++++++++++++++++--------------- 1 file changed, 229 insertions(+), 142 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index aacfff4f9c..02981ce418 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1410,16 +1410,29 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - switch (tag) { - .add, - .sub, - => break :result try self.addSub(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), - else => break :result try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{ + break :result switch (tag) { + .add => try self.addSub(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .sub => try self.addSub(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .mul => try self.mul(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .div_float => try self.divFloat(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .div_trunc => try self.div(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .div_floor => try self.div(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .div_exact => try self.divExact(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .rem => try self.rem(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .mod => try self.modulo(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + else => try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{ .lhs = bin_op.lhs, .rhs = bin_op.rhs, .inst = inst, }), - } + }; }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -3228,141 +3241,6 @@ fn binOp( metadata: ?BinOpMetadata, ) InnerError!MCValue { switch (tag) { - .mul => { - switch (lhs_ty.zigTypeTag()) { - .Float => return self.fail("TODO ARM binary operations on floats", .{}), - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => { - const mod = self.bin_file.options.module.?; - assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); - if (int_info.bits <= 32) { - // TODO add optimisations for multiplication - // with immediates, for example a * 2 can be - // lowered to a << 1 - return try self.binOpRegister(.mul, lhs, rhs, lhs_ty, rhs_ty, metadata); - } else { - return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); - } - }, - else => unreachable, - } - }, - .div_float => { - switch (lhs_ty.zigTypeTag()) { - .Float => return self.fail("TODO ARM binary operations on floats", .{}), - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - else => unreachable, - } - }, - .div_trunc, .div_floor => { - switch (lhs_ty.zigTypeTag()) { - .Float => return self.fail("TODO ARM binary operations on floats", .{}), - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => { - const mod = self.bin_file.options.module.?; - assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); - if (int_info.bits <= 32) { - switch (int_info.signedness) { - .signed => { - return self.fail("TODO ARM signed integer division", .{}); - }, - .unsigned => { - switch (rhs) { - .immediate => |imm| { - if (std.math.isPowerOfTwo(imm)) { - const shift = MCValue{ .immediate = std.math.log2_int(u32, imm) }; - return try self.binOp(.shr, lhs, shift, lhs_ty, rhs_ty, metadata); - } else { - return self.fail("TODO ARM integer division by constants", .{}); - } - }, - else => return self.fail("TODO ARM integer division", .{}), - } - }, - } - } else { - return self.fail("TODO ARM integer division for integers > u32/i32", .{}); - } - }, - else => unreachable, - } - }, - .div_exact => { - switch (lhs_ty.zigTypeTag()) { - .Float => return self.fail("TODO ARM binary operations on floats", .{}), - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => return self.fail("TODO ARM div_exact", .{}), - else => unreachable, - } - }, - .rem => { - switch (lhs_ty.zigTypeTag()) { - .Float => return self.fail("TODO ARM binary operations on floats", .{}), - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => { - const mod = self.bin_file.options.module.?; - assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); - if (int_info.bits <= 32) { - switch (int_info.signedness) { - .signed => { - return self.fail("TODO ARM signed integer mod", .{}); - }, - .unsigned => { - switch (rhs) { - .immediate => |imm| { - if (std.math.isPowerOfTwo(imm)) { - const log2 = std.math.log2_int(u32, imm); - - var lhs_reg: Register = undefined; - var dest_reg: Register = undefined; - - const lhs_bind = if (metadata) |md| - ReadArg.Bind{ .inst = md.lhs } - else - ReadArg.Bind{ .mcv = lhs }; - const read_args = [_]ReadArg{ - .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, - }; - const write_args = [_]WriteArg{ - .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, - }; - try self.allocRegs( - &read_args, - &write_args, - if (metadata) |md| .{ - .corresponding_inst = md.inst, - .operand_mapping = &.{0}, - } else null, - ); - - try self.truncRegister(lhs_reg, dest_reg, int_info.signedness, log2); - return MCValue{ .register = dest_reg }; - } else { - return self.fail("TODO ARM integer mod by constants", .{}); - } - }, - else => return self.fail("TODO ARM integer mod", .{}), - } - }, - } - } else { - return self.fail("TODO ARM integer division for integers > u32/i32", .{}); - } - }, - else => unreachable, - } - }, - .mod => { - switch (lhs_ty.zigTypeTag()) { - .Float => return self.fail("TODO ARM binary operations on floats", .{}), - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => return self.fail("TODO ARM mod", .{}), - else => unreachable, - } - }, .addwrap, .subwrap, .mulwrap, @@ -3557,7 +3435,13 @@ fn binOp( } else { // convert the offset into a byte offset by // multiplying it with elem_size - const offset = try self.binOp(.mul, rhs, .{ .immediate = elem_size }, Type.usize, Type.usize, null); + const rhs_bind = if (metadata) |md| + ReadArg.Bind{ .inst = md.rhs } + else + ReadArg.Bind{ .mcv = rhs }; + const imm_bind = ReadArg.Bind{ .mcv = .{ .immediate = elem_size } }; + + const offset = try self.mul(rhs_bind, imm_bind, Type.usize, Type.usize, null); const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null); return addr; } @@ -3626,6 +3510,209 @@ fn addSub( } } +fn mul( + self: *Self, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + switch (lhs_ty.zigTypeTag()) { + .Float => return self.fail("TODO ARM binary operations on floats", .{}), + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => { + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 32) { + // TODO add optimisations for multiplication + // with immediates, for example a * 2 can be + // lowered to a << 1 + return try self.binOpRegisterNew(.mul, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); + } else { + return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); + } + }, + else => unreachable, + } +} + +fn divFloat( + self: *Self, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + _ = lhs_bind; + _ = rhs_bind; + _ = lhs_ty; + _ = rhs_ty; + _ = maybe_inst; + + switch (lhs_ty.zigTypeTag()) { + .Float => return self.fail("TODO ARM binary operations on floats", .{}), + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + else => unreachable, + } +} + +fn div( + self: *Self, + tag: Air.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + _ = tag; + + switch (lhs_ty.zigTypeTag()) { + .Float => return self.fail("TODO ARM binary operations on floats", .{}), + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => { + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 32) { + switch (int_info.signedness) { + .signed => { + return self.fail("TODO ARM signed integer division", .{}); + }, + .unsigned => { + const rhs_immediate = try rhs_bind.resolveToImmediate(self); + + if (rhs_immediate) |imm| { + if (std.math.isPowerOfTwo(imm)) { + const shift = std.math.log2_int(u32, imm); + return try self.binOpImmediateNew(.lsr, lhs_bind, shift, lhs_ty, false, maybe_inst); + } else { + return self.fail("TODO ARM integer division by constants", .{}); + } + } else { + return self.fail("TODO ARM integer division", .{}); + } + }, + } + } else { + return self.fail("TODO ARM integer division for integers > u32/i32", .{}); + } + }, + else => unreachable, + } +} + +fn divExact( + self: *Self, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + _ = lhs_bind; + _ = rhs_bind; + _ = lhs_ty; + _ = rhs_ty; + _ = maybe_inst; + + switch (lhs_ty.zigTypeTag()) { + .Float => return self.fail("TODO ARM binary operations on floats", .{}), + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => return self.fail("TODO ARM div_exact", .{}), + else => unreachable, + } +} + +fn rem( + self: *Self, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + switch (lhs_ty.zigTypeTag()) { + .Float => return self.fail("TODO ARM binary operations on floats", .{}), + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => { + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 32) { + switch (int_info.signedness) { + .signed => { + return self.fail("TODO ARM signed integer mod", .{}); + }, + .unsigned => { + const rhs_immediate = try rhs_bind.resolveToImmediate(self); + + if (rhs_immediate) |imm| { + if (std.math.isPowerOfTwo(imm)) { + const log2 = std.math.log2_int(u32, imm); + + var lhs_reg: Register = undefined; + var dest_reg: Register = undefined; + + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + if (maybe_inst) |inst| .{ + .corresponding_inst = inst, + .operand_mapping = &.{0}, + } else null, + ); + + try self.truncRegister(lhs_reg, dest_reg, int_info.signedness, log2); + + return MCValue{ .register = dest_reg }; + } else { + return self.fail("TODO ARM integer mod by constants", .{}); + } + } else { + return self.fail("TODO ARM integer mod", .{}); + } + }, + } + } else { + return self.fail("TODO ARM integer division for integers > u32/i32", .{}); + } + }, + else => unreachable, + } +} + +fn modulo( + self: *Self, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + _ = lhs_bind; + _ = rhs_bind; + _ = lhs_ty; + _ = rhs_ty; + _ = maybe_inst; + + switch (lhs_ty.zigTypeTag()) { + .Float => return self.fail("TODO ARM binary operations on floats", .{}), + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => return self.fail("TODO ARM mod", .{}), + else => unreachable, + } +} + fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void { const abi_size = ty.abiSize(self.target.*); From 95b8a5f157aa7552e3f125e56968b889e254497a Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Thu, 25 Aug 2022 22:17:57 +0200 Subject: [PATCH 06/30] stage2 ARM: extract remaining operations out of binOp --- src/arch/arm/CodeGen.zig | 543 ++++++++++++++++++++------------------- 1 file changed, 283 insertions(+), 260 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 02981ce418..b7694291f2 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1407,8 +1407,6 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; const rhs_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); break :result switch (tag) { .add => try self.addSub(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), @@ -1427,11 +1425,24 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { .mod => try self.modulo(lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), - else => try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{ - .lhs = bin_op.lhs, - .rhs = bin_op.rhs, - .inst = inst, - }), + .addwrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .subwrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .mulwrap => try self.wrappingArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .bit_and => try self.bitwise(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .bit_or => try self.bitwise(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .xor => try self.bitwise(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .shl_exact => try self.shiftExact(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .shr_exact => try self.shiftExact(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .shl => try self.shiftNormal(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .shr => try self.shiftNormal(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + .bool_and => try self.booleanOp(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + .bool_or => try self.booleanOp(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst), + + else => unreachable, }; }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1440,19 +1451,15 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); - const result: MCValue = if (self.liveness.isUnused(inst)) - .dead - else - try self.binOp(tag, lhs, rhs, lhs_ty, rhs_ty, BinOpMetadata{ - .lhs = bin_op.lhs, - .rhs = bin_op.rhs, - .inst = inst, - }); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; + const rhs_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; + + break :result try self.ptrArithmetic(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst); + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -2247,7 +2254,11 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { }, else => { const dest = try self.allocRegOrMem(inst, true); - const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ptr_field_type, Type.usize, null); + + const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; + const index_bind: ReadArg.Bind = .{ .mcv = index_mcv }; + + const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ptr_field_type, Type.usize, null); try self.load(dest, addr, slice_ptr_field_type); break :result dest; @@ -2262,12 +2273,15 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const slice_mcv = try self.resolveInst(extra.lhs); - const index_mcv = try self.resolveInst(extra.rhs); const base_mcv = slicePtr(slice_mcv); - const slice_ty = self.air.typeOf(extra.lhs); + const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; + const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; - const addr = try self.binOp(.ptr_add, base_mcv, index_mcv, slice_ty, Type.usize, null); + const slice_ty = self.air.typeOf(extra.lhs); + const index_ty = self.air.typeOf(extra.rhs); + + const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ty, index_ty, null); break :result addr; }; return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); @@ -2290,12 +2304,13 @@ fn airPtrElemPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const ptr_mcv = try self.resolveInst(extra.lhs); - const index_mcv = try self.resolveInst(extra.rhs); + const ptr_bind: ReadArg.Bind = .{ .inst = extra.lhs }; + const index_bind: ReadArg.Bind = .{ .inst = extra.rhs }; const ptr_ty = self.air.typeOf(extra.lhs); + const index_ty = self.air.typeOf(extra.rhs); - const addr = try self.binOp(.ptr_add, ptr_mcv, index_mcv, ptr_ty, Type.usize, null); + const addr = try self.ptrArithmetic(.ptr_add, ptr_bind, index_bind, ptr_ty, index_ty, null); break :result addr; }; return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); @@ -3219,240 +3234,6 @@ const BinOpMetadata = struct { rhs: Air.Inst.Ref, }; -/// For all your binary operation needs, this function will generate -/// the corresponding Mir instruction(s). Returns the location of the -/// result. -/// -/// If the binary operation itself happens to be an Air instruction, -/// pass the corresponding index in the inst parameter. That helps -/// this function do stuff like reusing operands. -/// -/// This function does not do any lowering to Mir itself, but instead -/// looks at the lhs and rhs and determines which kind of lowering -/// would be best suitable and then delegates the lowering to other -/// functions. -fn binOp( - self: *Self, - tag: Air.Inst.Tag, - lhs: MCValue, - rhs: MCValue, - lhs_ty: Type, - rhs_ty: Type, - metadata: ?BinOpMetadata, -) InnerError!MCValue { - switch (tag) { - .addwrap, - .subwrap, - .mulwrap, - => { - const base_tag: Air.Inst.Tag = switch (tag) { - .addwrap => .add, - .subwrap => .sub, - .mulwrap => .mul, - else => unreachable, - }; - - const lhs_bind = if (metadata) |md| - ReadArg.Bind{ .inst = md.lhs } - else - ReadArg.Bind{ .mcv = lhs }; - const rhs_bind = if (metadata) |md| - ReadArg.Bind{ .inst = md.rhs } - else - ReadArg.Bind{ .mcv = rhs }; - - // Generate an add/sub/mul - const maybe_inst: ?Air.Inst.Index = if (metadata) |md| md.inst else null; - const result = try self.addSub(base_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); - - // Truncate if necessary - switch (lhs_ty.zigTypeTag()) { - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => { - const int_info = lhs_ty.intInfo(self.target.*); - if (int_info.bits <= 32) { - const result_reg = result.register; - - if (int_info.bits < 32) { - try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits); - return result; - } else return result; - } else { - return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); - } - }, - else => unreachable, - } - }, - .bit_and, - .bit_or, - .xor, - => { - switch (lhs_ty.zigTypeTag()) { - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => { - const mod = self.bin_file.options.module.?; - assert(lhs_ty.eql(rhs_ty, mod)); - const int_info = lhs_ty.intInfo(self.target.*); - if (int_info.bits <= 32) { - const lhs_immediate_ok = lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null; - const rhs_immediate_ok = rhs == .immediate and Instruction.Operand.fromU32(rhs.immediate) != null; - - const mir_tag: Mir.Inst.Tag = switch (tag) { - .bit_and => .@"and", - .bit_or => .orr, - .xor => .eor, - else => unreachable, - }; - - if (rhs_immediate_ok) { - return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata); - } else if (lhs_immediate_ok) { - // swap lhs and rhs - return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata); - } else { - return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); - } - } else { - return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); - } - }, - else => unreachable, - } - }, - .shl_exact, - .shr_exact, - => { - switch (lhs_ty.zigTypeTag()) { - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => { - const int_info = lhs_ty.intInfo(self.target.*); - if (int_info.bits <= 32) { - const rhs_immediate_ok = rhs == .immediate; - - const mir_tag: Mir.Inst.Tag = switch (tag) { - .shl_exact => .lsl, - .shr_exact => switch (lhs_ty.intInfo(self.target.*).signedness) { - .signed => Mir.Inst.Tag.asr, - .unsigned => Mir.Inst.Tag.lsr, - }, - else => unreachable, - }; - - if (rhs_immediate_ok) { - return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata); - } else { - return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); - } - } else { - return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); - } - }, - else => unreachable, - } - }, - .shl, - .shr, - => { - const base_tag: Air.Inst.Tag = switch (tag) { - .shl => .shl_exact, - .shr => .shr_exact, - else => unreachable, - }; - - // Generate a shl_exact/shr_exact - const result = try self.binOp(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); - - // Truncate if necessary - switch (tag) { - .shr => return result, - .shl => switch (lhs_ty.zigTypeTag()) { - .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), - .Int => { - const int_info = lhs_ty.intInfo(self.target.*); - if (int_info.bits <= 32) { - const result_reg = result.register; - - if (int_info.bits < 32) { - try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits); - return result; - } else return result; - } else { - return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); - } - }, - else => unreachable, - }, - else => unreachable, - } - }, - .bool_and, - .bool_or, - => { - switch (lhs_ty.zigTypeTag()) { - .Bool => { - const lhs_immediate_ok = lhs == .immediate; - const rhs_immediate_ok = rhs == .immediate; - - const mir_tag: Mir.Inst.Tag = switch (tag) { - .bool_and => .@"and", - .bool_or => .orr, - else => unreachable, - }; - - if (rhs_immediate_ok) { - return try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, metadata); - } else if (lhs_immediate_ok) { - // swap lhs and rhs - return try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, metadata); - } else { - return try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); - } - }, - else => unreachable, - } - }, - .ptr_add, - .ptr_sub, - => { - switch (lhs_ty.zigTypeTag()) { - .Pointer => { - const ptr_ty = lhs_ty; - const elem_ty = switch (ptr_ty.ptrSize()) { - .One => ptr_ty.childType().childType(), // ptr to array, so get array element type - else => ptr_ty.childType(), - }; - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); - - if (elem_size == 1) { - const base_tag: Mir.Inst.Tag = switch (tag) { - .ptr_add => .add, - .ptr_sub => .sub, - else => unreachable, - }; - - return try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, metadata); - } else { - // convert the offset into a byte offset by - // multiplying it with elem_size - const rhs_bind = if (metadata) |md| - ReadArg.Bind{ .inst = md.rhs } - else - ReadArg.Bind{ .mcv = rhs }; - const imm_bind = ReadArg.Bind{ .mcv = .{ .immediate = elem_size } }; - - const offset = try self.mul(rhs_bind, imm_bind, Type.usize, Type.usize, null); - const addr = try self.binOp(tag, lhs, offset, Type.initTag(.manyptr_u8), Type.usize, null); - return addr; - } - }, - else => unreachable, - } - }, - else => unreachable, - } -} - fn addSub( self: *Self, tag: Air.Inst.Tag, @@ -3713,6 +3494,248 @@ fn modulo( } } +fn wrappingArithmetic( + self: *Self, + tag: Air.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + const base_tag: Air.Inst.Tag = switch (tag) { + .addwrap => .add, + .subwrap => .sub, + .mulwrap => .mul, + else => unreachable, + }; + + // Generate an add/sub/mul + const result = try self.addSub(base_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); + + // Truncate if necessary + switch (lhs_ty.zigTypeTag()) { + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => { + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 32) { + const result_reg = result.register; + + if (int_info.bits < 32) { + try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits); + return result; + } else return result; + } else { + return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); + } + }, + else => unreachable, + } +} + +fn bitwise( + self: *Self, + tag: Air.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + switch (lhs_ty.zigTypeTag()) { + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => { + const mod = self.bin_file.options.module.?; + assert(lhs_ty.eql(rhs_ty, mod)); + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 32) { + const lhs_immediate = try lhs_bind.resolveToImmediate(self); + const rhs_immediate = try rhs_bind.resolveToImmediate(self); + + const lhs_immediate_ok = if (lhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false; + const rhs_immediate_ok = if (rhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false; + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .bit_and => .@"and", + .bit_or => .orr, + .xor => .eor, + else => unreachable, + }; + + if (rhs_immediate_ok) { + return try self.binOpImmediateNew(mir_tag, lhs_bind, rhs_immediate.?, lhs_ty, false, maybe_inst); + } else if (lhs_immediate_ok) { + // swap lhs and rhs + return try self.binOpImmediateNew(mir_tag, rhs_bind, lhs_immediate.?, rhs_ty, true, maybe_inst); + } else { + return try self.binOpRegisterNew(mir_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); + } + } else { + return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); + } + }, + else => unreachable, + } +} + +fn shiftExact( + self: *Self, + tag: Air.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + switch (lhs_ty.zigTypeTag()) { + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => { + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 32) { + const rhs_immediate = try rhs_bind.resolveToImmediate(self); + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .shl_exact => .lsl, + .shr_exact => switch (lhs_ty.intInfo(self.target.*).signedness) { + .signed => Mir.Inst.Tag.asr, + .unsigned => Mir.Inst.Tag.lsr, + }, + else => unreachable, + }; + + if (rhs_immediate) |imm| { + return try self.binOpImmediateNew(mir_tag, lhs_bind, imm, lhs_ty, false, maybe_inst); + } else { + return try self.binOpRegisterNew(mir_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); + } + } else { + return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); + } + }, + else => unreachable, + } +} + +fn shiftNormal( + self: *Self, + tag: Air.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + const base_tag: Air.Inst.Tag = switch (tag) { + .shl => .shl_exact, + .shr => .shr_exact, + else => unreachable, + }; + + // Generate a shl_exact/shr_exact + const result = try self.shiftExact(base_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); + + // Truncate if necessary + switch (tag) { + .shr => return result, + .shl => switch (lhs_ty.zigTypeTag()) { + .Vector => return self.fail("TODO ARM binary operations on vectors", .{}), + .Int => { + const int_info = lhs_ty.intInfo(self.target.*); + if (int_info.bits <= 32) { + const result_reg = result.register; + + if (int_info.bits < 32) { + try self.truncRegister(result_reg, result_reg, int_info.signedness, int_info.bits); + return result; + } else return result; + } else { + return self.fail("TODO ARM binary operations on integers > u32/i32", .{}); + } + }, + else => unreachable, + }, + else => unreachable, + } +} + +fn booleanOp( + self: *Self, + tag: Air.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + switch (lhs_ty.zigTypeTag()) { + .Bool => { + const lhs_immediate = try lhs_bind.resolveToImmediate(self); + const rhs_immediate = try rhs_bind.resolveToImmediate(self); + + const mir_tag: Mir.Inst.Tag = switch (tag) { + .bool_and => .@"and", + .bool_or => .orr, + else => unreachable, + }; + + if (rhs_immediate) |imm| { + return try self.binOpImmediateNew(mir_tag, lhs_bind, imm, lhs_ty, false, maybe_inst); + } else if (lhs_immediate) |imm| { + // swap lhs and rhs + return try self.binOpImmediateNew(mir_tag, rhs_bind, imm, rhs_ty, true, maybe_inst); + } else { + return try self.binOpRegisterNew(mir_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, maybe_inst); + } + }, + else => unreachable, + } +} + +fn ptrArithmetic( + self: *Self, + tag: Air.Inst.Tag, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, + lhs_ty: Type, + rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + switch (lhs_ty.zigTypeTag()) { + .Pointer => { + const mod = self.bin_file.options.module.?; + assert(rhs_ty.eql(Type.usize, mod)); + + const ptr_ty = lhs_ty; + const elem_ty = switch (ptr_ty.ptrSize()) { + .One => ptr_ty.childType().childType(), // ptr to array, so get array element type + else => ptr_ty.childType(), + }; + const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + + const base_tag: Air.Inst.Tag = switch (tag) { + .ptr_add => .add, + .ptr_sub => .sub, + else => unreachable, + }; + + if (elem_size == 1) { + return try self.addSub(base_tag, lhs_bind, rhs_bind, Type.usize, Type.usize, maybe_inst); + } else { + // convert the offset into a byte offset by + // multiplying it with elem_size + const imm_bind = ReadArg.Bind{ .mcv = .{ .immediate = elem_size } }; + + const offset = try self.mul(rhs_bind, imm_bind, Type.usize, Type.usize, null); + const offset_bind = ReadArg.Bind{ .mcv = offset }; + + const addr = try self.addSub(base_tag, lhs_bind, offset_bind, Type.usize, Type.usize, null); + return addr; + } + }, + else => unreachable, + } +} + fn genLdrRegister(self: *Self, dest_reg: Register, addr_reg: Register, ty: Type) !void { const abi_size = ty.abiSize(self.target.*); @@ -4614,7 +4637,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { if (else_value == .dead) continue; // The instruction is only overridden in the else branch. - var i: usize = self.branch_stack.items.len - 2; + var i: usize = self.branch_stack.items.len - 1; while (true) { i -= 1; // If this overflows, the question is: why wasn't the instruction marked dead? if (self.branch_stack.items[i].inst_table.get(else_key)) |mcv| { @@ -4641,7 +4664,7 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { if (then_value == .dead) continue; const parent_mcv = blk: { - var i: usize = self.branch_stack.items.len - 2; + var i: usize = self.branch_stack.items.len - 1; while (true) { i -= 1; if (self.branch_stack.items[i].inst_table.get(then_key)) |mcv| { From 481bd4761ac9826336d13553e249989f509ba172 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Fri, 26 Aug 2022 19:27:20 +0200 Subject: [PATCH 07/30] stage2 ARM: remove remaining uses of binOp{Register,Immediate} --- src/arch/arm/CodeGen.zig | 310 ++++++++------------------------------- 1 file changed, 60 insertions(+), 250 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index b7694291f2..bf378e24ce 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1264,11 +1264,11 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { fn minMax( self: *Self, tag: Air.Inst.Tag, - maybe_inst: ?Air.Inst.Index, - lhs: MCValue, - rhs: MCValue, + lhs_bind: ReadArg.Bind, + rhs_bind: ReadArg.Bind, lhs_ty: Type, rhs_ty: Type, + maybe_inst: ?Air.Inst.Index, ) !MCValue { switch (lhs_ty.zigTypeTag()) { .Float => return self.fail("TODO ARM min/max on floats", .{}), @@ -1278,34 +1278,25 @@ fn minMax( assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 32) { - const lhs_is_register = lhs == .register; - const rhs_is_register = rhs == .register; + var lhs_reg: Register = undefined; + var rhs_reg: Register = undefined; + var dest_reg: Register = undefined; - const lhs_reg = switch (lhs) { - .register => |r| r, - else => try self.copyToTmpRegister(lhs_ty, lhs), + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + .{ .ty = rhs_ty, .bind = rhs_bind, .class = gp, .reg = &rhs_reg }, }; - const lhs_reg_lock = self.register_manager.lockReg(lhs_reg); - defer if (lhs_reg_lock) |reg| self.register_manager.unlockReg(reg); - - const rhs_reg = switch (rhs) { - .register => |r| r, - else => try self.copyToTmpRegister(rhs_ty, rhs), + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, }; - const rhs_reg_lock = self.register_manager.lockReg(rhs_reg); - defer if (rhs_reg_lock) |reg| self.register_manager.unlockReg(reg); - - const dest_reg = if (maybe_inst) |inst| blk: { - const bin_op = self.air.instructions.items(.data)[inst].bin_op; - - if (lhs_is_register and self.reuseOperand(inst, bin_op.lhs, 0, lhs)) { - break :blk lhs_reg; - } else if (rhs_is_register and self.reuseOperand(inst, bin_op.rhs, 1, rhs)) { - break :blk rhs_reg; - } else { - break :blk try self.register_manager.allocReg(inst, gp); - } - } else try self.register_manager.allocReg(null, gp); + try self.allocRegs( + &read_args, + &write_args, + if (maybe_inst) |inst| .{ + .corresponding_inst = inst, + .operand_mapping = &.{ 0, 1 }, + } else null, + ); // lhs == reg should have been checked by airMinMax // @@ -1369,15 +1360,17 @@ fn minMax( fn airMinMax(self: *Self, inst: Air.Inst.Index) !void { const tag = self.air.instructions.items(.tag)[inst]; const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const lhs = try self.resolveInst(bin_op.lhs); - const rhs = try self.resolveInst(bin_op.rhs); const lhs_ty = self.air.typeOf(bin_op.lhs); const rhs_ty = self.air.typeOf(bin_op.rhs); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const lhs_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; + const rhs_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; + + const lhs = try self.resolveInst(bin_op.lhs); if (bin_op.lhs == bin_op.rhs) break :result lhs; - break :result try self.minMax(tag, inst, lhs, rhs, lhs_ty, rhs_ty); + break :result try self.minMax(tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, inst); }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -1538,21 +1531,21 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits == 32) { - const lhs = try self.resolveInst(extra.lhs); - const rhs = try self.resolveInst(extra.rhs); + const lhs_immediate = try lhs_bind.resolveToImmediate(self); + const rhs_immediate = try rhs_bind.resolveToImmediate(self); // Only say yes if the operation is // commutative, i.e. we can swap both of the // operands const lhs_immediate_ok = switch (tag) { - .add_with_overflow => lhs == .immediate and Instruction.Operand.fromU32(lhs.immediate) != null, + .add_with_overflow => if (lhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false, .sub_with_overflow => false, else => unreachable, }; const rhs_immediate_ok = switch (tag) { .add_with_overflow, .sub_with_overflow, - => rhs == .immediate and Instruction.Operand.fromU32(rhs.immediate) != null, + => if (rhs_immediate) |imm| Instruction.Operand.fromU32(imm) != null else false, else => unreachable, }; @@ -1567,12 +1560,12 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { const dest = blk: { if (rhs_immediate_ok) { - break :blk try self.binOpImmediate(mir_tag, lhs, rhs, lhs_ty, false, null); + break :blk try self.binOpImmediateNew(mir_tag, lhs_bind, rhs_immediate.?, lhs_ty, false, null); } else if (lhs_immediate_ok) { // swap lhs and rhs - break :blk try self.binOpImmediate(mir_tag, rhs, lhs, rhs_ty, true, null); + break :blk try self.binOpImmediateNew(mir_tag, rhs_bind, lhs_immediate.?, rhs_ty, true, null); } else { - break :blk try self.binOpRegister(mir_tag, lhs, rhs, lhs_ty, rhs_ty, null); + break :blk try self.binOpRegisterNew(mir_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, null); } }; @@ -1599,8 +1592,8 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.Bin, ty_pl.payload).data; if (self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ extra.lhs, extra.rhs, .none }); const result: MCValue = result: { - const lhs = try self.resolveInst(extra.lhs); - const rhs = try self.resolveInst(extra.rhs); + const lhs_bind: ReadArg.Bind = .{ .inst = extra.lhs }; + const rhs_bind: ReadArg.Bind = .{ .inst = extra.rhs }; const lhs_ty = self.air.typeOf(extra.lhs); const rhs_ty = self.air.typeOf(extra.rhs); @@ -1625,7 +1618,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .unsigned => .mul, }; - const dest = try self.binOpRegister(base_tag, lhs, rhs, lhs_ty, rhs_ty, null); + const dest = try self.binOpRegisterNew(base_tag, lhs_bind, rhs_bind, lhs_ty, rhs_ty, null); const dest_reg = dest.register; const dest_reg_lock = self.register_manager.lockRegAssumeUnused(dest_reg); defer self.register_manager.unlockReg(dest_reg_lock); @@ -1660,45 +1653,26 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .unsigned => .umull, }; - // TODO extract umull etc. to binOpTwoRegister - // once MCValue.rr is implemented - const lhs_is_register = lhs == .register; - const rhs_is_register = rhs == .register; + var lhs_reg: Register = undefined; + var rhs_reg: Register = undefined; + var rdhi: Register = undefined; + var rdlo: Register = undefined; + var truncated_reg: Register = undefined; - const lhs_lock: ?RegisterLock = if (lhs_is_register) - self.register_manager.lockReg(lhs.register) - else - null; - defer if (lhs_lock) |reg| self.register_manager.unlockReg(reg); - - const lhs_reg = if (lhs_is_register) - lhs.register - else - try self.register_manager.allocReg(null, gp); - const new_lhs_lock = self.register_manager.lockReg(lhs_reg); - defer if (new_lhs_lock) |reg| self.register_manager.unlockReg(reg); - - const rhs_reg = if (rhs_is_register) - rhs.register - else - try self.register_manager.allocReg(null, gp); - const new_rhs_lock = self.register_manager.lockReg(rhs_reg); - defer if (new_rhs_lock) |reg| self.register_manager.unlockReg(reg); - - const dest_regs = try self.register_manager.allocRegs(2, .{ null, null }, gp); - const dest_regs_locks = self.register_manager.lockRegsAssumeUnused(2, dest_regs); - defer for (dest_regs_locks) |reg| { - self.register_manager.unlockReg(reg); + const read_args = [_]ReadArg{ + .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, + .{ .ty = rhs_ty, .bind = rhs_bind, .class = gp, .reg = &rhs_reg }, }; - const rdlo = dest_regs[0]; - const rdhi = dest_regs[1]; - - if (!lhs_is_register) try self.genSetReg(lhs_ty, lhs_reg, lhs); - if (!rhs_is_register) try self.genSetReg(rhs_ty, rhs_reg, rhs); - - const truncated_reg = try self.register_manager.allocReg(null, gp); - const truncated_reg_lock = self.register_manager.lockRegAssumeUnused(truncated_reg); - defer self.register_manager.unlockReg(truncated_reg_lock); + const write_args = [_]WriteArg{ + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &rdhi }, + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &rdlo }, + .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &truncated_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + null, + ); _ = try self.addInst(.{ .tag = base_tag, @@ -2933,172 +2907,10 @@ fn allocRegs( } } -/// Don't call this function directly. Use binOp instead. +/// Wrapper around allocRegs and addInst tailored for specific Mir +/// instructions which are binary operations acting on two registers /// -/// Calling this function signals an intention to generate a Mir -/// instruction of the form -/// -/// op dest, lhs, rhs -/// -/// Asserts that generating an instruction of that form is possible. -fn binOpRegister( - self: *Self, - mir_tag: Mir.Inst.Tag, - lhs: MCValue, - rhs: MCValue, - lhs_ty: Type, - rhs_ty: Type, - metadata: ?BinOpMetadata, -) !MCValue { - var lhs_reg: Register = undefined; - var rhs_reg: Register = undefined; - var dest_reg: Register = undefined; - - const lhs_bind = if (metadata) |md| - ReadArg.Bind{ .inst = md.lhs } - else - ReadArg.Bind{ .mcv = lhs }; - const rhs_bind = if (metadata) |md| - ReadArg.Bind{ .inst = md.rhs } - else - ReadArg.Bind{ .mcv = rhs }; - const read_args = [_]ReadArg{ - .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, - .{ .ty = rhs_ty, .bind = rhs_bind, .class = gp, .reg = &rhs_reg }, - }; - const write_args = [_]WriteArg{ - .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, - }; - try self.allocRegs( - &read_args, - &write_args, - if (metadata) |md| .{ - .corresponding_inst = md.inst, - .operand_mapping = &.{ 0, 1 }, - } else null, - ); - - const mir_data: Mir.Inst.Data = switch (mir_tag) { - .add, - .adds, - .sub, - .subs, - .@"and", - .orr, - .eor, - => .{ .rr_op = .{ - .rd = dest_reg, - .rn = lhs_reg, - .op = Instruction.Operand.reg(rhs_reg, Instruction.Operand.Shift.none), - } }, - .lsl, - .asr, - .lsr, - => .{ .rr_shift = .{ - .rd = dest_reg, - .rm = lhs_reg, - .shift_amount = Instruction.ShiftAmount.reg(rhs_reg), - } }, - .mul, - .smulbb, - => .{ .rrr = .{ - .rd = dest_reg, - .rn = lhs_reg, - .rm = rhs_reg, - } }, - else => unreachable, - }; - - _ = try self.addInst(.{ - .tag = mir_tag, - .data = mir_data, - }); - - return MCValue{ .register = dest_reg }; -} - -/// Don't call this function directly. Use binOp instead. -/// -/// Calling this function signals an intention to generate a Mir -/// instruction of the form -/// -/// op dest, lhs, #rhs_imm -/// -/// Set lhs_and_rhs_swapped to true iff inst.bin_op.lhs corresponds to -/// rhs and vice versa. This parameter is only used when maybe_inst != -/// null. -/// -/// Asserts that generating an instruction of that form is possible. -fn binOpImmediate( - self: *Self, - mir_tag: Mir.Inst.Tag, - lhs: MCValue, - rhs: MCValue, - lhs_ty: Type, - lhs_and_rhs_swapped: bool, - metadata: ?BinOpMetadata, -) !MCValue { - var lhs_reg: Register = undefined; - var dest_reg: Register = undefined; - - const lhs_bind = blk: { - if (metadata) |md| { - const inst = if (lhs_and_rhs_swapped) md.rhs else md.lhs; - break :blk ReadArg.Bind{ .inst = inst }; - } else { - break :blk ReadArg.Bind{ .mcv = lhs }; - } - }; - - const read_args = [_]ReadArg{ - .{ .ty = lhs_ty, .bind = lhs_bind, .class = gp, .reg = &lhs_reg }, - }; - const write_args = [_]WriteArg{ - .{ .ty = lhs_ty, .bind = .none, .class = gp, .reg = &dest_reg }, - }; - const operand_mapping: []const Liveness.OperandInt = if (lhs_and_rhs_swapped) &.{1} else &.{0}; - try self.allocRegs( - &read_args, - &write_args, - if (metadata) |md| .{ - .corresponding_inst = md.inst, - .operand_mapping = operand_mapping, - } else null, - ); - - const mir_data: Mir.Inst.Data = switch (mir_tag) { - .add, - .adds, - .sub, - .subs, - .@"and", - .orr, - .eor, - => .{ .rr_op = .{ - .rd = dest_reg, - .rn = lhs_reg, - .op = Instruction.Operand.fromU32(rhs.immediate).?, - } }, - .lsl, - .asr, - .lsr, - => .{ .rr_shift = .{ - .rd = dest_reg, - .rm = lhs_reg, - .shift_amount = Instruction.ShiftAmount.imm(@intCast(u5, rhs.immediate)), - } }, - else => unreachable, - }; - - _ = try self.addInst(.{ - .tag = mir_tag, - .data = mir_data, - }); - - return MCValue{ .register = dest_reg }; -} - -/// TODO +/// Returns the destination register fn binOpRegisterNew( self: *Self, mir_tag: Mir.Inst.Tag, @@ -3167,7 +2979,11 @@ fn binOpRegisterNew( return MCValue{ .register = dest_reg }; } -/// TODO +/// Wrapper around allocRegs and addInst tailored for specific Mir +/// instructions which are binary operations acting on a register and +/// an immediate +/// +/// Returns the destination register fn binOpImmediateNew( self: *Self, mir_tag: Mir.Inst.Tag, @@ -3228,12 +3044,6 @@ fn binOpImmediateNew( return MCValue{ .register = dest_reg }; } -const BinOpMetadata = struct { - inst: Air.Inst.Index, - lhs: Air.Inst.Ref, - rhs: Air.Inst.Ref, -}; - fn addSub( self: *Self, tag: Air.Inst.Tag, From e2b029e2c8ad761c32886d27aa0227655a60eb9e Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Fri, 26 Aug 2022 22:19:27 +0200 Subject: [PATCH 08/30] stage2 ARM: implement field_parent_ptr --- src/arch/arm/CodeGen.zig | 21 ++++++++++++++++++--- test/behavior/field_parent_ptr.zig | 2 -- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index bf378e24ce..c0a28f1f94 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2681,9 +2681,24 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { fn airFieldParentPtr(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; - const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement airFieldParentPtr", .{}); - return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const field_ptr = try self.resolveInst(extra.field_ptr); + const struct_ty = self.air.getRefType(ty_pl.ty).childType(); + const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(extra.field_index, self.target.*)); + switch (field_ptr) { + .ptr_stack_offset => |off| { + break :result MCValue{ .ptr_stack_offset = off + struct_field_offset }; + }, + else => { + const lhs_bind: ReadArg.Bind = .{ .mcv = field_ptr }; + const rhs_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = struct_field_offset } }; + + break :result try self.addSub(.sub, lhs_bind, rhs_bind, Type.usize, Type.usize, null); + }, + } + }; + return self.finishAir(inst, result, .{ extra.field_ptr, .none, .none }); } /// An argument to a Mir instruction which is read (and possibly also diff --git a/test/behavior/field_parent_ptr.zig b/test/behavior/field_parent_ptr.zig index 3aefb0ef47..570a1f9522 100644 --- a/test/behavior/field_parent_ptr.zig +++ b/test/behavior/field_parent_ptr.zig @@ -2,7 +2,6 @@ const expect = @import("std").testing.expect; const builtin = @import("builtin"); test "@fieldParentPtr non-first field" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; @@ -11,7 +10,6 @@ test "@fieldParentPtr non-first field" { } test "@fieldParentPtr first field" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; From 261fec8036e1e5518951d91c5fc27b53c1a511d8 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sat, 27 Aug 2022 20:22:54 +0200 Subject: [PATCH 09/30] stage2 ARM: amend implementation of various AIR instructions - unwrap_errunion_err for registers - unwrap_errunion_payload for registers - ptr_slice_len_ptr for all MCValues - ptr_slice_ptr_ptr for all MCValues --- src/arch/arm/CodeGen.zig | 297 ++++++++++++++++++++--------- test/behavior/alignof.zig | 1 - test/behavior/basic.zig | 1 - test/behavior/cast.zig | 7 - test/behavior/comptime_memory.zig | 2 - test/behavior/enum.zig | 5 - test/behavior/error.zig | 4 - test/behavior/eval.zig | 2 - test/behavior/merge_error_sets.zig | 1 - test/behavior/slice.zig | 2 - test/behavior/struct.zig | 1 - test/behavior/switch.zig | 1 - test/behavior/this.zig | 1 - test/behavior/try.zig | 1 - test/behavior/while.zig | 1 - 15 files changed, 208 insertions(+), 119 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index c0a28f1f94..2f7028c565 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -451,9 +451,7 @@ fn gen(self: *Self) !void { // The address of where to store the return value is in // r0. As this register might get overwritten along the // way, save the address to the stack. - const stack_offset = mem.alignForwardGeneric(u32, self.next_stack_offset, 4) + 4; - self.next_stack_offset = stack_offset; - self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset); + const stack_offset = try self.allocMem(4, 4, null); try self.genSetStack(Type.usize, stack_offset, MCValue{ .register = .r0 }); self.ret_mcv = MCValue{ .stack_offset = stack_offset }; @@ -893,17 +891,30 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { try table.ensureUnusedCapacity(self.gpa, additional_count); } -fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 { +fn allocMem( + self: *Self, + abi_size: u32, + abi_align: u32, + maybe_inst: ?Air.Inst.Index, +) !u32 { + assert(abi_size > 0); + assert(abi_align > 0); + if (abi_align > self.stack_align) self.stack_align = abi_align; + // TODO find a free slot instead of always appending const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size; self.next_stack_offset = offset; self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset); - try self.stack.putNoClobber(self.gpa, offset, .{ - .inst = inst, - .size = abi_size, - }); + + if (maybe_inst) |inst| { + try self.stack.putNoClobber(self.gpa, offset, .{ + .inst = inst, + .size = abi_size, + }); + } + return offset; } @@ -925,7 +936,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { }; // TODO swap this for inst.ty.ptrAlign const abi_align = elem_ty.abiAlignment(self.target.*); - return self.allocMem(inst, abi_size, abi_align); + return self.allocMem(abi_size, abi_align, inst); } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { @@ -948,7 +959,7 @@ fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { } } } - const stack_offset = try self.allocMem(inst, abi_size, abi_align); + const stack_offset = try self.allocMem(abi_size, abi_align, inst); return MCValue{ .stack_offset = stack_offset }; } @@ -1182,29 +1193,32 @@ fn airBoolToInt(self: *Self, inst: Air.Inst.Index) !void { fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const operand = try self.resolveInst(ty_op.operand); + const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; const operand_ty = self.air.typeOf(ty_op.operand); - switch (operand) { + switch (try operand_bind.resolveToMcv(self)) { .dead => unreachable, .unreach => unreachable, .cpsr_flags => |cond| break :result MCValue{ .cpsr_flags = cond.negate() }, else => { switch (operand_ty.zigTypeTag()) { .Bool => { - const op_reg = switch (operand) { - .register => |r| r, - else => try self.copyToTmpRegister(operand_ty, operand), - }; - const op_reg_lock = self.register_manager.lockRegAssumeUnused(op_reg); - defer self.register_manager.unlockReg(op_reg_lock); + var op_reg: Register = undefined; + var dest_reg: Register = undefined; - const dest_reg = blk: { - if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) { - break :blk op_reg; - } - - break :blk try self.register_manager.allocReg(null, gp); + const read_args = [_]ReadArg{ + .{ .ty = operand_ty, .bind = operand_bind, .class = gp, .reg = &op_reg }, }; + const write_args = [_]WriteArg{ + .{ .ty = operand_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + ReuseMetadata{ + .corresponding_inst = inst, + .operand_mapping = &.{0}, + }, + ); _ = try self.addInst(.{ .tag = .eor, @@ -1221,20 +1235,23 @@ fn airNot(self: *Self, inst: Air.Inst.Index) !void { .Int => { const int_info = operand_ty.intInfo(self.target.*); if (int_info.bits <= 32) { - const op_reg = switch (operand) { - .register => |r| r, - else => try self.copyToTmpRegister(operand_ty, operand), - }; - const op_reg_lock = self.register_manager.lockRegAssumeUnused(op_reg); - defer self.register_manager.unlockReg(op_reg_lock); + var op_reg: Register = undefined; + var dest_reg: Register = undefined; - const dest_reg = blk: { - if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) { - break :blk op_reg; - } - - break :blk try self.register_manager.allocReg(null, gp); + const read_args = [_]ReadArg{ + .{ .ty = operand_ty, .bind = operand_bind, .class = gp, .reg = &op_reg }, }; + const write_args = [_]WriteArg{ + .{ .ty = operand_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + ReuseMetadata{ + .corresponding_inst = inst, + .operand_mapping = &.{0}, + }, + ); _ = try self.addInst(.{ .tag = .mvn, @@ -1384,7 +1401,7 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const len = try self.resolveInst(bin_op.rhs); const len_ty = self.air.typeOf(bin_op.rhs); - const stack_offset = try self.allocMem(inst, 8, 4); + const stack_offset = try self.allocMem(8, 4, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); try self.genSetStack(len_ty, stack_offset - 4, len); break :result MCValue{ .stack_offset = stack_offset }; @@ -1496,7 +1513,7 @@ fn airOverflow(self: *Self, inst: Air.Inst.Index) !void { assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits < 32) { - const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); + const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); try self.spillCompareFlagsIfOccupied(); @@ -1609,7 +1626,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { assert(lhs_ty.eql(rhs_ty, mod)); const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 16) { - const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); + const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); try self.spillCompareFlagsIfOccupied(); @@ -1644,7 +1661,7 @@ fn airMulWithOverflow(self: *Self, inst: Air.Inst.Index) !void { break :result MCValue{ .stack_offset = stack_offset }; } else if (int_info.bits <= 32) { - const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); + const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); try self.spillCompareFlagsIfOccupied(); @@ -1769,7 +1786,7 @@ fn airShlWithOverflow(self: *Self, inst: Air.Inst.Index) !void { .Int => { const int_info = lhs_ty.intInfo(self.target.*); if (int_info.bits <= 32) { - const stack_offset = try self.allocMem(inst, tuple_size, tuple_align); + const stack_offset = try self.allocMem(tuple_size, tuple_align, inst); try self.spillCompareFlagsIfOccupied(); @@ -1926,19 +1943,57 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { } /// Given an error union, returns the error -fn errUnionErr(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { +fn errUnionErr( + self: *Self, + error_union_bind: ReadArg.Bind, + error_union_ty: Type, + maybe_inst: ?Air.Inst.Index, +) !MCValue { const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { return MCValue{ .immediate = 0 }; } if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { - return error_union_mcv; + return try error_union_bind.resolveToMcv(self); } const err_offset = @intCast(u32, errUnionErrorOffset(payload_ty, self.target.*)); - switch (error_union_mcv) { - .register => return self.fail("TODO errUnionErr for registers", .{}), + switch (try error_union_bind.resolveToMcv(self)) { + .register => { + var operand_reg: Register = undefined; + var dest_reg: Register = undefined; + + const read_args = [_]ReadArg{ + .{ .ty = error_union_ty, .bind = error_union_bind, .class = gp, .reg = &operand_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = err_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + if (maybe_inst) |inst| .{ + .corresponding_inst = inst, + .operand_mapping = &.{0}, + } else null, + ); + + const err_bit_offset = err_offset * 8; + const err_bit_size = @intCast(u32, err_ty.abiSize(self.target.*)) * 8; + + _ = try self.addInst(.{ + .tag = .ubfx, // errors are unsigned integers + .data = .{ .rr_lsb_width = .{ + .rd = dest_reg, + .rn = operand_reg, + .lsb = @intCast(u5, err_bit_offset), + .width = @intCast(u6, err_bit_size), + } }, + }); + + return MCValue{ .register = dest_reg }; + }, .stack_argument_offset => |off| { return MCValue{ .stack_argument_offset = off + err_offset }; }, @@ -1955,27 +2010,66 @@ fn errUnionErr(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCV fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; const error_union_ty = self.air.typeOf(ty_op.operand); - const mcv = try self.resolveInst(ty_op.operand); - break :result try self.errUnionErr(mcv, error_union_ty); + + break :result try self.errUnionErr(error_union_bind, error_union_ty, inst); }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// Given an error union, returns the payload -fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) !MCValue { +fn errUnionPayload( + self: *Self, + error_union_bind: ReadArg.Bind, + error_union_ty: Type, + maybe_inst: ?Air.Inst.Index, +) !MCValue { const err_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); if (err_ty.errorSetIsEmpty()) { - return error_union_mcv; + return try error_union_bind.resolveToMcv(self); } if (!payload_ty.hasRuntimeBitsIgnoreComptime()) { return MCValue.none; } const payload_offset = @intCast(u32, errUnionPayloadOffset(payload_ty, self.target.*)); - switch (error_union_mcv) { - .register => return self.fail("TODO errUnionPayload for registers", .{}), + switch (try error_union_bind.resolveToMcv(self)) { + .register => { + var operand_reg: Register = undefined; + var dest_reg: Register = undefined; + + const read_args = [_]ReadArg{ + .{ .ty = error_union_ty, .bind = error_union_bind, .class = gp, .reg = &operand_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = err_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + if (maybe_inst) |inst| .{ + .corresponding_inst = inst, + .operand_mapping = &.{0}, + } else null, + ); + + const payload_bit_offset = payload_offset * 8; + const payload_bit_size = @intCast(u32, payload_ty.abiSize(self.target.*)) * 8; + + _ = try self.addInst(.{ + .tag = if (payload_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .data = .{ .rr_lsb_width = .{ + .rd = dest_reg, + .rn = operand_reg, + .lsb = @intCast(u5, payload_bit_offset), + .width = @intCast(u6, payload_bit_size), + } }, + }); + + return MCValue{ .register = dest_reg }; + }, .stack_argument_offset => |off| { return MCValue{ .stack_argument_offset = off + payload_offset }; }, @@ -1992,9 +2086,10 @@ fn errUnionPayload(self: *Self, error_union_mcv: MCValue, error_union_ty: Type) fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const error_union_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; const error_union_ty = self.air.typeOf(ty_op.operand); - const error_union = try self.resolveInst(ty_op.operand); - break :result try self.errUnionPayload(error_union, error_union_ty); + + break :result try self.errUnionPayload(error_union_bind, error_union_ty, inst); }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } @@ -2038,17 +2133,18 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); + const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); const abi_align = error_union_ty.abiAlignment(self.target.*); - const stack_offset = @intCast(u32, try self.allocMem(inst, abi_size, abi_align)); + const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); const err_off = errUnionErrorOffset(payload_ty, self.target.*); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), operand); - try self.genSetStack(Type.anyerror, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); + try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), .{ .immediate = 0 }); break :result MCValue{ .stack_offset = stack_offset }; }; @@ -2060,16 +2156,17 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.getRefType(ty_op.ty); + const error_ty = error_union_ty.errorUnionSet(); const payload_ty = error_union_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); if (!payload_ty.hasRuntimeBitsIgnoreComptime()) break :result operand; const abi_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); const abi_align = error_union_ty.abiAlignment(self.target.*); - const stack_offset = @intCast(u32, try self.allocMem(inst, abi_size, abi_align)); + const stack_offset = @intCast(u32, try self.allocMem(abi_size, abi_align, inst)); const payload_off = errUnionPayloadOffset(payload_ty, self.target.*); const err_off = errUnionErrorOffset(payload_ty, self.target.*); - try self.genSetStack(Type.anyerror, stack_offset - @intCast(u32, err_off), operand); + try self.genSetStack(error_ty, stack_offset - @intCast(u32, err_off), operand); try self.genSetStack(payload_ty, stack_offset - @intCast(u32, payload_off), .undef); break :result MCValue{ .stack_offset = stack_offset }; @@ -2108,7 +2205,6 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mcv = try self.resolveInst(ty_op.operand); switch (mcv) { - .dead, .unreach => unreachable, .register => unreachable, // a slice doesn't fit in one register .stack_argument_offset => |off| { break :result MCValue{ .stack_argument_offset = off + 4 }; @@ -2119,7 +2215,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { .memory => |addr| { break :result MCValue{ .memory = addr + 4 }; }, - else => return self.fail("TODO implement slice_len for {}", .{mcv}), + else => unreachable, // invalid MCValue for a slice } }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -2134,7 +2230,12 @@ fn airPtrSliceLenPtr(self: *Self, inst: Air.Inst.Index) !void { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off - 4 }; }, - else => return self.fail("TODO implement ptr_slice_len_ptr for {}", .{mcv}), + else => { + const lhs_bind: ReadArg.Bind = .{ .mcv = mcv }; + const rhs_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 4 } }; + + break :result try self.addSub(.add, lhs_bind, rhs_bind, Type.usize, Type.usize, null); + }, } }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -2149,7 +2250,13 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { .ptr_stack_offset => |off| { break :result MCValue{ .ptr_stack_offset = off }; }, - else => return self.fail("TODO implement ptr_slice_ptr_ptr for {}", .{mcv}), + else => { + if (self.reuseOperand(inst, ty_op.operand, 0, mcv)) { + break :result mcv; + } else { + break :result MCValue{ .register = try self.copyToTmpRegister(Type.usize, mcv) }; + } + }, } }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -3891,7 +3998,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { .register => |reg| blk: { const abi_size = @intCast(u32, ty.abiSize(self.target.*)); const abi_align = ty.abiAlignment(self.target.*); - const stack_offset = try self.allocMem(inst, abi_size, abi_align); + const stack_offset = try self.allocMem(abi_size, abi_align, inst); try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); break :blk MCValue{ .stack_offset = stack_offset }; @@ -3978,7 +4085,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. const ret_ty = fn_ty.fnReturnType(); const ret_abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); const ret_abi_align = @intCast(u32, ret_ty.abiAlignment(self.target.*)); - const stack_offset = try self.allocMem(inst, ret_abi_size, ret_abi_align); + const stack_offset = try self.allocMem(ret_abi_size, ret_abi_align, inst); var ptr_ty_payload: Type.Payload.ElemType = .{ .base = .{ .tag = .single_mut_pointer }, @@ -4166,14 +4273,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void { const abi_size = @intCast(u32, ret_ty.abiSize(self.target.*)); const abi_align = ret_ty.abiAlignment(self.target.*); - // This is essentially allocMem without the - // instruction tracking - if (abi_align > self.stack_align) - self.stack_align = abi_align; - // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align) + abi_size; - self.next_stack_offset = offset; - self.max_end_stack = @maximum(self.max_end_stack, self.next_stack_offset); + const offset = try self.allocMem(abi_size, abi_align, null); const tmp_mcv = MCValue{ .stack_offset = offset }; try self.load(tmp_mcv, ptr, ptr_ty); @@ -4545,20 +4645,28 @@ fn isNonNull(self: *Self, ty: Type, operand: MCValue) !MCValue { return MCValue{ .cpsr_flags = .ne }; } -fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { - const error_type = ty.errorUnionSet(); +fn isErr( + self: *Self, + error_union_bind: ReadArg.Bind, + error_union_ty: Type, +) !MCValue { + const error_type = error_union_ty.errorUnionSet(); if (error_type.errorSetIsEmpty()) { return MCValue{ .immediate = 0 }; // always false } - const error_mcv = try self.errUnionErr(operand, ty); + const error_mcv = try self.errUnionErr(error_union_bind, error_union_ty, null); _ = try self.cmp(.{ .mcv = error_mcv }, .{ .mcv = .{ .immediate = 0 } }, error_type, .neq); return MCValue{ .cpsr_flags = .hi }; } -fn isNonErr(self: *Self, ty: Type, operand: MCValue) !MCValue { - const is_err_result = try self.isErr(ty, operand); +fn isNonErr( + self: *Self, + error_union_bind: ReadArg.Bind, + error_union_ty: Type, +) !MCValue { + const is_err_result = try self.isErr(error_union_bind, error_union_ty); switch (is_err_result) { .cpsr_flags => |cond| { assert(cond == .hi); @@ -4637,9 +4745,10 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); - break :result try self.isErr(ty, operand); + const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; + const error_union_ty = self.air.typeOf(un_op); + + break :result try self.isErr(error_union_bind, error_union_ty); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -4658,7 +4767,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { } }; try self.load(operand, operand_ptr, ptr_ty); - break :result try self.isErr(ptr_ty.elemType(), operand); + break :result try self.isErr(.{ .mcv = operand }, ptr_ty.elemType()); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -4666,9 +4775,10 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); - break :result try self.isNonErr(ty, operand); + const error_union_bind: ReadArg.Bind = .{ .inst = un_op }; + const error_union_ty = self.air.typeOf(un_op); + + break :result try self.isNonErr(error_union_bind, error_union_ty); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -4687,7 +4797,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { } }; try self.load(operand, operand_ptr, ptr_ty); - break :result try self.isNonErr(ptr_ty.elemType(), operand); + break :result try self.isNonErr(.{ .mcv = operand }, ptr_ty.elemType()); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -5620,7 +5730,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const array_ty = ptr_ty.childType(); const array_len = @intCast(u32, array_ty.arrayLen()); - const stack_offset = try self.allocMem(inst, 8, 8); + const stack_offset = try self.allocMem(8, 8, inst); try self.genSetStack(ptr_ty, stack_offset, ptr); try self.genSetStack(Type.initTag(.usize), stack_offset - 4, .{ .immediate = array_len }); break :result MCValue{ .stack_offset = stack_offset }; @@ -5774,15 +5884,24 @@ fn airTry(self: *Self, inst: Air.Inst.Index) !void { const extra = self.air.extraData(Air.Try, pl_op.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; const result: MCValue = result: { + const error_union_bind: ReadArg.Bind = .{ .inst = pl_op.operand }; const error_union_ty = self.air.typeOf(pl_op.operand); - const error_union = try self.resolveInst(pl_op.operand); - const is_err_result = try self.isErr(error_union_ty, error_union); + const error_union_size = @intCast(u32, error_union_ty.abiSize(self.target.*)); + const error_union_align = error_union_ty.abiAlignment(self.target.*); + + // The error union will die in the body. However, we need the + // error union after the body in order to extract the payload + // of the error union, so we create a copy of it + const error_union_copy = try self.allocMem(error_union_size, error_union_align, null); + try self.genSetStack(error_union_ty, error_union_copy, try error_union_bind.resolveToMcv(self)); + + const is_err_result = try self.isErr(error_union_bind, error_union_ty); const reloc = try self.condBr(is_err_result); try self.genBody(body); - try self.performReloc(reloc); - break :result try self.errUnionPayload(error_union, error_union_ty); + + break :result try self.errUnionPayload(.{ .mcv = .{ .stack_offset = error_union_copy } }, error_union_ty, null); }; return self.finishAir(inst, result, .{ pl_op.operand, .none, .none }); } diff --git a/test/behavior/alignof.zig b/test/behavior/alignof.zig index b065e4b87f..d6491ff22e 100644 --- a/test/behavior/alignof.zig +++ b/test/behavior/alignof.zig @@ -13,7 +13,6 @@ const Foo = struct { test "@alignOf(T) before referencing T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; comptime try expect(@alignOf(Foo) != maxInt(usize)); if (native_arch == .x86_64) { comptime try expect(@alignOf(Foo) == 4); diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 4d8b176fbf..6661bc2783 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -1060,7 +1060,6 @@ comptime { test "switch inside @as gets correct type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO var a: u32 = 0; var b: [2]u32 = undefined; diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 4c6dab2dbb..675017961d 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -523,7 +523,6 @@ fn testCastConstArrayRefToConstSlice() !void { test "peer type resolution: error and [N]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO try expect(mem.eql(u8, try testPeerErrorAndArray(0), "OK")); comptime try expect(mem.eql(u8, try testPeerErrorAndArray(0), "OK")); @@ -548,7 +547,6 @@ fn testPeerErrorAndArray2(x: u8) anyerror![]const u8 { test "single-item pointer of array to slice to unknown length pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO try testCastPtrOfArrayToSliceAndPtr(); comptime try testCastPtrOfArrayToSliceAndPtr(); @@ -649,7 +647,6 @@ test "@floatCast cast down" { test "peer type resolution: unreachable, error set, unreachable" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const Error = error{ FileDescriptorAlreadyPresentInSet, @@ -964,7 +961,6 @@ test "peer cast [:x]T to [*:x]T" { test "peer type resolution implicit cast to return type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -984,7 +980,6 @@ test "peer type resolution implicit cast to return type" { test "peer type resolution implicit cast to variable type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { @@ -1026,7 +1021,6 @@ test "cast between C pointer with different but compatible types" { test "peer type resolve string lit with sentinel-terminated mutable slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO var array: [4:0]u8 = undefined; @@ -1079,7 +1073,6 @@ test "comptime float casts" { test "pointer reinterpret const float to int" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO // The hex representation is 0x3fe3333333333303. const float: f64 = 5.99999999999994648725e-01; diff --git a/test/behavior/comptime_memory.zig b/test/behavior/comptime_memory.zig index 8fa5fc503e..f9c0073d34 100644 --- a/test/behavior/comptime_memory.zig +++ b/test/behavior/comptime_memory.zig @@ -87,7 +87,6 @@ fn bigToNativeEndian(comptime T: type, v: T) T { test "type pun endianness" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO comptime { const StructOfBytes = extern struct { x: [4]u8 }; @@ -398,7 +397,6 @@ test "offset field ptr by enclosing array element size" { test "accessing reinterpreted memory of parent object" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = extern struct { a: f32, b: [4]u8, diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index 28c8785e64..938c966d22 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -606,7 +606,6 @@ fn testEnumWithSpecifiedTagValues(x: MultipleChoice) !void { } test "enum with specified tag values" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; try testEnumWithSpecifiedTagValues(MultipleChoice.C); @@ -614,7 +613,6 @@ test "enum with specified tag values" { } test "non-exhaustive enum" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; const S = struct { @@ -677,7 +675,6 @@ test "empty non-exhaustive enum" { } test "single field non-exhaustive enum" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; const S = struct { @@ -741,7 +738,6 @@ test "cast integer literal to enum" { } test "enum with specified and unspecified tag values" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; try testEnumWithSpecifiedAndUnspecifiedTagValues(MultipleChoice2.D); @@ -925,7 +921,6 @@ test "enum literal casting to tagged union" { const Bar = enum { A, B, C, D }; test "enum literal casting to error union with payload enum" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var bar: error{B}!Bar = undefined; diff --git a/test/behavior/error.zig b/test/behavior/error.zig index 684b01a797..d483afc300 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -222,7 +222,6 @@ fn testErrorSetType() !void { test "explicit error set cast" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try testExplicitErrorSetCast(Set1.A); @@ -282,7 +281,6 @@ test "inferred empty error set comptime catch" { } test "error union peer type resolution" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try testErrorUnionPeerTypeResolution(1); @@ -327,7 +325,6 @@ fn foo3(b: usize) Error!usize { test "error: Infer error set from literals" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO _ = nullLiteral("n") catch |err| handleErrors(err); _ = floatLiteral("n") catch |err| handleErrors(err); @@ -700,7 +697,6 @@ test "ret_ptr doesn't cause own inferred error set to be resolved" { test "simple else prong allowed even when all errors handled" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO const S = struct { diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index bc1c3628d7..142b08810a 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -69,7 +69,6 @@ fn constExprEvalOnSingleExprBlocksFn(x: i32, b: bool) i32 { } test "constant expressions" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO var array: [array_size]u8 = undefined; @@ -565,7 +564,6 @@ test "inlined loop has array literal with elided runtime scope on first iteratio } test "ptr to local array argument at comptime" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO comptime { diff --git a/test/behavior/merge_error_sets.zig b/test/behavior/merge_error_sets.zig index 725ff5f9da..9033e7795a 100644 --- a/test/behavior/merge_error_sets.zig +++ b/test/behavior/merge_error_sets.zig @@ -12,7 +12,6 @@ fn foo() C!void { } test "merge error sets" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (foo()) { diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index 5e0498342c..fad6cd643f 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -28,7 +28,6 @@ comptime { test "slicing" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var array: [20]i32 = undefined; @@ -283,7 +282,6 @@ test "slice type with custom alignment" { test "obtaining a null terminated slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // here we have a normal array var buf: [50]u8 = undefined; diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 12c874f8ba..06e3cacbd9 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -104,7 +104,6 @@ fn testMutation(foo: *StructFoo) void { test "struct byval assign" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var foo1: StructFoo = undefined; var foo2: StructFoo = undefined; diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index d218fb6bc6..29dcd8491a 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -490,7 +490,6 @@ test "switch prongs with error set cases make a new error set type for capture v } test "return result loc and then switch with range implicit casted to error union" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const S = struct { diff --git a/test/behavior/this.zig b/test/behavior/this.zig index 71a083d2f0..527fff53fe 100644 --- a/test/behavior/this.zig +++ b/test/behavior/this.zig @@ -25,7 +25,6 @@ test "this refer to module call private fn" { } test "this refer to container" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var pt: Point(i32) = undefined; diff --git a/test/behavior/try.zig b/test/behavior/try.zig index b0559d4549..59309e53d0 100644 --- a/test/behavior/try.zig +++ b/test/behavior/try.zig @@ -3,7 +3,6 @@ const builtin = @import("builtin"); const expect = std.testing.expect; test "try on error union" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; try tryOnErrorUnionImpl(); diff --git a/test/behavior/while.zig b/test/behavior/while.zig index 62d5bf90fa..333ed1bd77 100644 --- a/test/behavior/while.zig +++ b/test/behavior/while.zig @@ -175,7 +175,6 @@ test "while with optional as condition with else" { test "while with error union condition" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; numbers_left = 10; From 25729d6155682933d7ab3aa30c7e060519b2f4e1 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Thu, 1 Sep 2022 16:51:42 +0200 Subject: [PATCH 10/30] stage2 ARM: fix multiple uses of reuseOperand - add missing checks whether destination fits into the operand - remove reuseOperand invocations from airIsNullPtr and similar functions as we need to load the operands into temporary locations --- src/arch/arm/CodeGen.zig | 265 ++++++++++++++++++--------------------- 1 file changed, 122 insertions(+), 143 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 2f7028c565..5951434e20 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -936,35 +936,34 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { }; // TODO swap this for inst.ty.ptrAlign const abi_align = elem_ty.abiAlignment(self.target.*); + return self.allocMem(abi_size, abi_align, inst); } -fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const elem_ty = self.air.typeOfIndex(inst); +fn allocRegOrMem(self: *Self, elem_ty: Type, reg_ok: bool, maybe_inst: ?Air.Inst.Index) !MCValue { const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse { const mod = self.bin_file.options.module.?; return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)}); }; const abi_align = elem_ty.abiAlignment(self.target.*); - if (abi_align > self.stack_align) - self.stack_align = abi_align; if (reg_ok) { // Make sure the type can fit in a register before we try to allocate one. const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); if (abi_size <= ptr_bytes) { - if (self.register_manager.tryAllocReg(inst, gp)) |reg| { + if (self.register_manager.tryAllocReg(maybe_inst, gp)) |reg| { return MCValue{ .register = reg }; } } } - const stack_offset = try self.allocMem(abi_size, abi_align, inst); + + const stack_offset = try self.allocMem(abi_size, abi_align, maybe_inst); return MCValue{ .stack_offset = stack_offset }; } pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { - const stack_mcv = try self.allocRegOrMem(inst, false); + const stack_mcv = try self.allocRegOrMem(self.air.typeOfIndex(inst), false, inst); log.debug("spilling {} (%{d}) to stack mcv {any}", .{ reg, inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); @@ -985,12 +984,13 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void /// occupied fn spillCompareFlagsIfOccupied(self: *Self) !void { if (self.cpsr_flags_inst) |inst_to_save| { + const ty = self.air.typeOfIndex(inst_to_save); const mcv = self.getResolvedInstValue(inst_to_save); const new_mcv = switch (mcv) { - .cpsr_flags => try self.allocRegOrMem(inst_to_save, true), + .cpsr_flags => try self.allocRegOrMem(ty, true, inst_to_save), .register_c_flag, .register_v_flag, - => try self.allocRegOrMem(inst_to_save, false), + => try self.allocRegOrMem(ty, false, inst_to_save), else => unreachable, // mcv doesn't occupy the compare flags }; @@ -1121,10 +1121,11 @@ fn truncRegister( }); } +/// Asserts that both operand_ty and dest_ty are integer types fn trunc( self: *Self, maybe_inst: ?Air.Inst.Index, - operand: MCValue, + operand_bind: ReadArg.Bind, operand_ty: Type, dest_ty: Type, ) !MCValue { @@ -1132,39 +1133,38 @@ fn trunc( const info_b = dest_ty.intInfo(self.target.*); if (info_b.bits <= 32) { - const operand_reg = switch (operand) { - .register => |r| r, - else => operand_reg: { - if (info_a.bits <= 32) { - break :operand_reg try self.copyToTmpRegister(operand_ty, operand); - } else { - return self.fail("TODO load least significant word into register", .{}); - } - }, + if (info_a.bits > 32) { + return self.fail("TODO load least significant word into register", .{}); + } + + var operand_reg: Register = undefined; + var dest_reg: Register = undefined; + + const read_args = [_]ReadArg{ + .{ .ty = operand_ty, .bind = operand_bind, .class = gp, .reg = &operand_reg }, }; - const operand_reg_lock = self.register_manager.lockReg(operand_reg); - defer if (operand_reg_lock) |reg| self.register_manager.unlockReg(reg); - - const dest_reg = if (maybe_inst) |inst| blk: { - const ty_op = self.air.instructions.items(.data)[inst].ty_op; - - if (operand == .register and self.reuseOperand(inst, ty_op.operand, 0, operand)) { - break :blk operand_reg; - } else { - break :blk try self.register_manager.allocReg(inst, gp); - } - } else try self.register_manager.allocReg(null, gp); + const write_args = [_]WriteArg{ + .{ .ty = dest_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + if (maybe_inst) |inst| .{ + .corresponding_inst = inst, + .operand_mapping = &.{0}, + } else null, + ); switch (info_b.bits) { 32 => { try self.genSetReg(operand_ty, dest_reg, .{ .register = operand_reg }); - return MCValue{ .register = dest_reg }; }, else => { try self.truncRegister(operand_reg, dest_reg, info_b.signedness, info_b.bits); - return MCValue{ .register = dest_reg }; }, } + + return MCValue{ .register = dest_reg }; } else { return self.fail("TODO: truncate to ints > 32 bits", .{}); } @@ -1172,12 +1172,12 @@ fn trunc( fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand = try self.resolveInst(ty_op.operand); + const operand_bind: ReadArg.Bind = .{ .inst = ty_op.operand }; const operand_ty = self.air.typeOf(ty_op.operand); const dest_ty = self.air.typeOfIndex(inst); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { - break :blk try self.trunc(inst, operand, operand_ty, dest_ty); + break :blk try self.trunc(inst, operand_bind, operand_ty, dest_ty); }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -2334,7 +2334,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { break :result dst_mcv; }, else => { - const dest = try self.allocRegOrMem(inst, true); + const dest = try self.allocRegOrMem(self.air.typeOfIndex(inst), true, inst); const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; const index_bind: ReadArg.Bind = .{ .mcv = index_mcv }; @@ -2583,16 +2583,18 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst) and !is_volatile) break :result MCValue.dead; - const dst_mcv: MCValue = blk: { - if (self.reuseOperand(inst, ty_op.operand, 0, ptr)) { + const dest_mcv: MCValue = blk: { + const ptr_fits_dest = elem_ty.abiSize(self.target.*) <= 4; + if (ptr_fits_dest and self.reuseOperand(inst, ty_op.operand, 0, ptr)) { // The MCValue that holds the pointer can be re-used as the value. break :blk ptr; } else { - break :blk try self.allocRegOrMem(inst, true); + break :blk try self.allocRegOrMem(elem_ty, true, inst); } }; - try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); - break :result dst_mcv; + try self.load(dest_mcv, ptr, self.air.typeOf(ty_op.operand)); + + break :result dest_mcv; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } @@ -4615,36 +4617,84 @@ fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, .unreach, .{ .none, .none, .none }); } -fn isNull(self: *Self, ty: Type, operand: MCValue) !MCValue { - if (ty.isPtrLikeOptional()) { - assert(ty.abiSize(self.target.*) == 4); +fn isNull( + self: *Self, + operand_bind: ReadArg.Bind, + operand_ty: Type, +) !MCValue { + if (operand_ty.isPtrLikeOptional()) { + assert(operand_ty.abiSize(self.target.*) == 4); - const reg_mcv: MCValue = switch (operand) { - .register => operand, - else => .{ .register = try self.copyToTmpRegister(ty, operand) }, - }; - - _ = try self.addInst(.{ - .tag = .cmp, - .data = .{ .r_op_cmp = .{ - .rn = reg_mcv.register, - .op = Instruction.Operand.fromU32(0).?, - } }, - }); - - return MCValue{ .cpsr_flags = .eq }; + const imm_bind: ReadArg.Bind = .{ .mcv = .{ .immediate = 0 } }; + return self.cmp(operand_bind, imm_bind, Type.usize, .eq); } else { return self.fail("TODO implement non-pointer optionals", .{}); } } -fn isNonNull(self: *Self, ty: Type, operand: MCValue) !MCValue { - const is_null_result = try self.isNull(ty, operand); +fn isNonNull( + self: *Self, + operand_bind: ReadArg.Bind, + operand_ty: Type, +) !MCValue { + const is_null_result = try self.isNull(operand_bind, operand_ty); assert(is_null_result.cpsr_flags == .eq); return MCValue{ .cpsr_flags = .ne }; } +fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_bind: ReadArg.Bind = .{ .inst = un_op }; + const operand_ty = self.air.typeOf(un_op); + + break :result try self.isNull(operand_bind, operand_ty); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); +} + +fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ptr = try self.resolveInst(un_op); + const ptr_ty = self.air.typeOf(un_op); + const elem_ty = ptr_ty.elemType(); + + const operand = try self.allocRegOrMem(elem_ty, true, null); + try self.load(operand, operand_ptr, ptr_ty); + + break :result try self.isNull(.{ .mcv = operand }, elem_ty); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); +} + +fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_bind: ReadArg.Bind = .{ .inst = un_op }; + const operand_ty = self.air.typeOf(un_op); + + break :result try self.isNonNull(operand_bind, operand_ty); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); +} + +fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ptr = try self.resolveInst(un_op); + const ptr_ty = self.air.typeOf(un_op); + const elem_ty = ptr_ty.elemType(); + + const operand = try self.allocRegOrMem(elem_ty, true, null); + try self.load(operand, operand_ptr, ptr_ty); + + break :result try self.isNonNull(.{ .mcv = operand }, elem_ty); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); +} + fn isErr( self: *Self, error_union_bind: ReadArg.Bind, @@ -4657,8 +4707,7 @@ fn isErr( } const error_mcv = try self.errUnionErr(error_union_bind, error_union_ty, null); - _ = try self.cmp(.{ .mcv = error_mcv }, .{ .mcv = .{ .immediate = 0 } }, error_type, .neq); - return MCValue{ .cpsr_flags = .hi }; + return try self.cmp(.{ .mcv = error_mcv }, .{ .mcv = .{ .immediate = 0 } }, error_type, .gt); } fn isNonErr( @@ -4680,68 +4729,6 @@ fn isNonErr( } } -fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[inst].un_op; - - try self.spillCompareFlagsIfOccupied(); - self.cpsr_flags_inst = inst; - - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); - break :result try self.isNull(ty, operand); - }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); -} - -fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[inst].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } - }; - try self.load(operand, operand_ptr, ptr_ty); - break :result try self.isNull(ptr_ty.elemType(), operand); - }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); -} - -fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[inst].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const operand = try self.resolveInst(un_op); - const ty = self.air.typeOf(un_op); - break :result try self.isNonNull(ty, operand); - }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); -} - -fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { - const un_op = self.air.instructions.items(.data)[inst].un_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { - const operand_ptr = try self.resolveInst(un_op); - const ptr_ty = self.air.typeOf(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } - }; - try self.load(operand, operand_ptr, ptr_ty); - break :result try self.isNonNull(ptr_ty.elemType(), operand); - }; - return self.finishAir(inst, result, .{ un_op, .none, .none }); -} - fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { @@ -4758,16 +4745,12 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.air.typeOf(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } - }; + const elem_ty = ptr_ty.elemType(); + + const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); - break :result try self.isErr(.{ .mcv = operand }, ptr_ty.elemType()); + + break :result try self.isErr(.{ .mcv = operand }, elem_ty); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -4788,16 +4771,12 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const operand_ptr = try self.resolveInst(un_op); const ptr_ty = self.air.typeOf(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } - }; + const elem_ty = ptr_ty.elemType(); + + const operand = try self.allocRegOrMem(elem_ty, true, null); try self.load(operand, operand_ptr, ptr_ty); - break :result try self.isNonErr(.{ .mcv = operand }, ptr_ty.elemType()); + + break :result try self.isNonErr(.{ .mcv = operand }, elem_ty); }; return self.finishAir(inst, result, .{ un_op, .none, .none }); } @@ -5010,7 +4989,7 @@ fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { .none, .dead, .unreach => unreachable, .register, .stack_offset, .memory => operand_mcv, .immediate, .stack_argument_offset, .cpsr_flags => blk: { - const new_mcv = try self.allocRegOrMem(block, true); + const new_mcv = try self.allocRegOrMem(self.air.typeOfIndex(block), true, block); try self.setRegOrMem(self.air.typeOfIndex(block), new_mcv, operand_mcv); break :blk new_mcv; }, From 3794f2c493c9744e19cd7df23c3d4b32565aaa96 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 4 Sep 2022 09:00:14 +0200 Subject: [PATCH 11/30] stage2 ARM: implement struct_field_val for registers --- src/arch/arm/CodeGen.zig | 37 ++++++++++++++++++++++++++++- test/behavior/array.zig | 2 -- test/behavior/basic.zig | 2 -- test/behavior/bitcast.zig | 1 - test/behavior/enum.zig | 1 - test/behavior/eval.zig | 1 - test/behavior/for.zig | 1 - test/behavior/pointers.zig | 1 - test/behavior/ptrcast.zig | 3 --- test/behavior/sizeof_and_typeof.zig | 2 -- test/behavior/struct.zig | 7 ------ test/behavior/switch.zig | 1 - test/behavior/usingnamespace.zig | 1 - 13 files changed, 36 insertions(+), 24 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 5951434e20..7de0b6ac22 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2739,6 +2739,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const mcv = try self.resolveInst(operand); const struct_ty = self.air.typeOf(operand); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); + const struct_field_ty = struct_ty.structFieldType(index); switch (mcv) { .dead, .unreach => unreachable, @@ -2776,11 +2777,45 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { } else { // Copy to new register const dest_reg = try self.register_manager.allocReg(null, gp); - try self.genSetReg(struct_ty.structFieldType(index), dest_reg, field); + try self.genSetReg(struct_field_ty, dest_reg, field); break :result MCValue{ .register = dest_reg }; } }, + .register => { + var operand_reg: Register = undefined; + var dest_reg: Register = undefined; + + const read_args = [_]ReadArg{ + .{ .ty = struct_ty, .bind = .{ .mcv = mcv }, .class = gp, .reg = &operand_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = struct_field_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + ReuseMetadata{ + .corresponding_inst = inst, + .operand_mapping = &.{0}, + }, + ); + + const field_bit_offset = struct_field_offset * 8; + const field_bit_size = @intCast(u32, struct_field_ty.abiSize(self.target.*)) * 8; + + _ = try self.addInst(.{ + .tag = if (struct_field_ty.isSignedInt()) Mir.Inst.Tag.sbfx else .ubfx, + .data = .{ .rr_lsb_width = .{ + .rd = dest_reg, + .rn = operand_reg, + .lsb = @intCast(u5, field_bit_offset), + .width = @intCast(u6, field_bit_size), + } }, + }); + + break :result MCValue{ .register = dest_reg }; + }, else => return self.fail("TODO implement codegen struct_field_val for {}", .{mcv}), } }; diff --git a/test/behavior/array.zig b/test/behavior/array.zig index b99ac27651..1e5e848c09 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -175,7 +175,6 @@ test "nested arrays of integers" { test "implicit comptime in array type size" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var arr: [plusOne(10)]bool = undefined; try expect(arr.len == 11); @@ -484,7 +483,6 @@ test "sentinel element count towards the ABI size calculation" { test "zero-sized array with recursive type definition" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const U = struct { fn foo(comptime T: type, comptime n: usize) type { diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 6661bc2783..a8909df107 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -465,7 +465,6 @@ fn nine() u8 { test "struct inside function" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; try testStructInFn(); comptime try testStructInFn(); @@ -514,7 +513,6 @@ var global_foo: *i32 = undefined; test "peer result location with typed parent, runtime condition, comptime prongs" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const S = struct { fn doTheTest(arg: i32) i32 { diff --git a/test/behavior/bitcast.zig b/test/behavior/bitcast.zig index 27a0692a44..3a7719191d 100644 --- a/test/behavior/bitcast.zig +++ b/test/behavior/bitcast.zig @@ -138,7 +138,6 @@ test "@bitCast extern structs at runtime and comptime" { if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const Full = extern struct { number: u16, diff --git a/test/behavior/enum.zig b/test/behavior/enum.zig index 938c966d22..e2645058f7 100644 --- a/test/behavior/enum.zig +++ b/test/behavior/enum.zig @@ -1127,7 +1127,6 @@ test "tag name functions are unique" { test "size of enum with only one tag which has explicit integer tag type" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; const E = enum(u8) { nope = 10 }; diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 142b08810a..fb744612ad 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -954,7 +954,6 @@ test "const local with comptime init through array init" { test "closure capture type of runtime-known parameter" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { fn b(c: anytype) !void { diff --git a/test/behavior/for.zig b/test/behavior/for.zig index da6f0717ae..20a88a3131 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -213,7 +213,6 @@ test "for on slice with allowzero ptr" { test "else continue outer for" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO var i: usize = 6; var buf: [5]u8 = undefined; diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index dcdea1ff80..6206f22a45 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -66,7 +66,6 @@ test "initialize const optional C pointer to null" { test "assigning integer to C pointer" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var x: i32 = 0; diff --git a/test/behavior/ptrcast.zig b/test/behavior/ptrcast.zig index c827cb6ef7..21e8b544a8 100644 --- a/test/behavior/ptrcast.zig +++ b/test/behavior/ptrcast.zig @@ -4,7 +4,6 @@ const expect = std.testing.expect; const native_endian = builtin.target.cpu.arch.endian(); test "reinterpret bytes as integer with nonzero offset" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try testReinterpretBytesAsInteger(); @@ -39,7 +38,6 @@ fn testReinterpretWithOffsetAndNoWellDefinedLayout() !void { } test "reinterpret bytes inside auto-layout struct as integer with nonzero offset" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try testReinterpretStructWrappedBytesAsInteger(); @@ -179,7 +177,6 @@ test "lower reinterpreted comptime field ptr" { } test "reinterpret struct field at comptime" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const numNative = comptime Bytes.init(0x12345678); diff --git a/test/behavior/sizeof_and_typeof.zig b/test/behavior/sizeof_and_typeof.zig index ab2d59bf83..748fefa695 100644 --- a/test/behavior/sizeof_and_typeof.zig +++ b/test/behavior/sizeof_and_typeof.zig @@ -18,7 +18,6 @@ test "@sizeOf on compile-time types" { } test "@TypeOf() with multiple arguments" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; { @@ -77,7 +76,6 @@ const P = packed struct { }; test "@offsetOf" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // Packed structs have fixed memory layout diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 06e3cacbd9..12d45be9ae 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -10,7 +10,6 @@ top_level_field: i32, test "top level fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var instance = @This(){ .top_level_field = 1234, @@ -239,7 +238,6 @@ test "usingnamespace within struct scope" { test "struct field init with catch" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const S = struct { fn doTheTest() !void { @@ -280,7 +278,6 @@ const Val = struct { test "struct point to self" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO var root: Node = undefined; root.val.x = 1; @@ -296,7 +293,6 @@ test "struct point to self" { test "void struct fields" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const foo = VoidStructFieldsFoo{ .a = void{}, @@ -760,7 +756,6 @@ test "packed struct with u0 field access" { } test "access to global struct fields" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO @@ -1259,7 +1254,6 @@ test "typed init through error unions and optionals" { test "initialize struct with empty literal" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { x: i32 = 1234 }; var s: S = .{}; @@ -1361,7 +1355,6 @@ test "store to comptime field" { test "struct field init value is size of the struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const namespace = struct { const S = extern struct { diff --git a/test/behavior/switch.zig b/test/behavior/switch.zig index 29dcd8491a..9552ea5008 100644 --- a/test/behavior/switch.zig +++ b/test/behavior/switch.zig @@ -348,7 +348,6 @@ test "switch on const enum with var" { } test "anon enum literal used in switch on union enum" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const Foo = union(enum) { diff --git a/test/behavior/usingnamespace.zig b/test/behavior/usingnamespace.zig index 426f0aa6b9..83f720ff85 100644 --- a/test/behavior/usingnamespace.zig +++ b/test/behavior/usingnamespace.zig @@ -58,7 +58,6 @@ test "two files usingnamespace import each other" { } test { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const AA = struct { From a0a7d15142cfffbab934860064a44b7615f9dd55 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Sun, 4 Sep 2022 22:28:59 +0200 Subject: [PATCH 12/30] stage2 ARM: support larger function stacks This is done by introducing a new Mir pseudo-instruction --- src/arch/arm/CodeGen.zig | 12 ++++------ src/arch/arm/Emit.zig | 52 ++++++++++++++++++++++++++++++++++++++++ src/arch/arm/Mir.zig | 9 +++++++ test/behavior/eval.zig | 1 - 4 files changed, 65 insertions(+), 9 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 7de0b6ac22..3b378af581 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -488,14 +488,10 @@ fn gen(self: *Self) !void { const aligned_total_stack_end = mem.alignForwardGeneric(u32, total_stack_size, self.stack_align); const stack_size = aligned_total_stack_end - self.saved_regs_stack_space; self.max_end_stack = stack_size; - if (Instruction.Operand.fromU32(stack_size)) |op| { - self.mir_instructions.set(sub_reloc, .{ - .tag = .sub, - .data = .{ .rr_op = .{ .rd = .sp, .rn = .sp, .op = op } }, - }); - } else { - return self.failSymbol("TODO ARM: allow larger stacks", .{}); - } + self.mir_instructions.set(sub_reloc, .{ + .tag = .sub_sp_scratch_r0, + .data = .{ .imm32 = stack_size }, + }); _ = try self.addInst(.{ .tag = .dbg_epilogue_begin, diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig index 8770ef1a24..188f5a5cfe 100644 --- a/src/arch/arm/Emit.zig +++ b/src/arch/arm/Emit.zig @@ -11,6 +11,7 @@ const link = @import("../../link.zig"); const Module = @import("../../Module.zig"); const Type = @import("../../type.zig").Type; const ErrorMsg = Module.ErrorMsg; +const Target = std.Target; const assert = std.debug.assert; const DW = std.dwarf; const leb128 = std.leb; @@ -93,6 +94,8 @@ pub fn emitMir( .sub => try emit.mirDataProcessing(inst), .subs => try emit.mirDataProcessing(inst), + .sub_sp_scratch_r0 => try emit.mirSubStackPointer(inst), + .asr => try emit.mirShift(inst), .lsl => try emit.mirShift(inst), .lsr => try emit.mirShift(inst), @@ -190,6 +193,24 @@ fn instructionSize(emit: *Emit, inst: Mir.Inst.Index) usize { .dbg_epilogue_begin, .dbg_prologue_end, => return 0, + + .sub_sp_scratch_r0 => { + const imm32 = emit.mir.instructions.items(.data)[inst].imm32; + + if (imm32 == 0) { + return 0 * 4; + } else if (Instruction.Operand.fromU32(imm32) != null) { + // sub + return 1 * 4; + } else if (Target.arm.featureSetHas(emit.target.cpu.features, .has_v7)) { + // movw; movt; sub + return 3 * 4; + } else { + // mov; orr; orr; orr; sub + return 5 * 4; + } + }, + else => return 4, } } @@ -427,6 +448,37 @@ fn mirDataProcessing(emit: *Emit, inst: Mir.Inst.Index) !void { } } +fn mirSubStackPointer(emit: *Emit, inst: Mir.Inst.Index) !void { + const tag = emit.mir.instructions.items(.tag)[inst]; + const cond = emit.mir.instructions.items(.cond)[inst]; + const imm32 = emit.mir.instructions.items(.data)[inst].imm32; + + switch (tag) { + .sub_sp_scratch_r0 => { + if (imm32 == 0) return; + + const operand = Instruction.Operand.fromU32(imm32) orelse blk: { + const scratch: Register = .r0; + + if (Target.arm.featureSetHas(emit.target.cpu.features, .has_v7)) { + try emit.writeInstruction(Instruction.movw(cond, scratch, @truncate(u16, imm32))); + try emit.writeInstruction(Instruction.movt(cond, scratch, @truncate(u16, imm32 >> 16))); + } else { + try emit.writeInstruction(Instruction.mov(cond, scratch, Instruction.Operand.imm(@truncate(u8, imm32), 0))); + try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 8), 12))); + try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 16), 8))); + try emit.writeInstruction(Instruction.orr(cond, scratch, scratch, Instruction.Operand.imm(@truncate(u8, imm32 >> 24), 4))); + } + + break :blk Instruction.Operand.reg(scratch, Instruction.Operand.Shift.none); + }; + + try emit.writeInstruction(Instruction.sub(cond, .sp, .sp, operand)); + }, + else => unreachable, + } +} + fn mirShift(emit: *Emit, inst: Mir.Inst.Index) !void { const tag = emit.mir.instructions.items(.tag)[inst]; const cond = emit.mir.instructions.items(.cond)[inst]; diff --git a/src/arch/arm/Mir.zig b/src/arch/arm/Mir.zig index 45f89b8120..38cf4da3fd 100644 --- a/src/arch/arm/Mir.zig +++ b/src/arch/arm/Mir.zig @@ -111,6 +111,11 @@ pub const Inst = struct { strh, /// Subtract sub, + /// Pseudo-instruction: Subtract 32-bit immediate from stack + /// + /// r0 can be used by Emit as a scratch register for loading + /// the immediate + sub_sp_scratch_r0, /// Subtract, update condition flags subs, /// Supervisor Call @@ -144,6 +149,10 @@ pub const Inst = struct { /// /// Used by e.g. svc imm24: u24, + /// A 32-bit immediate value. + /// + /// Used by e.g. sub_sp_scratch_r0 + imm32: u32, /// Index into `extra`. Meaning of what can be found there is context-dependent. /// /// Used by e.g. load_memory diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index fb744612ad..0c07a7b5bb 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -1333,7 +1333,6 @@ test "lazy sizeof is resolved in division" { } test "lazy value is resolved as slice operand" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const A = struct { a: u32 }; From b976997e16835e822ef9400973ac12a20e3d0705 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Tue, 6 Sep 2022 12:34:27 +0200 Subject: [PATCH 13/30] stage2 ARM: implement ptr_elem_val --- src/arch/arm/CodeGen.zig | 156 ++++++++++++++-------------- test/behavior/basic.zig | 1 - test/behavior/cast.zig | 3 - test/behavior/const_slice_child.zig | 1 - test/behavior/eval.zig | 5 - test/behavior/generics.zig | 1 - test/behavior/pointers.zig | 4 - test/behavior/union.zig | 1 - 8 files changed, 78 insertions(+), 94 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 3b378af581..857c49fd78 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2258,89 +2258,84 @@ fn airPtrSlicePtrPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } +fn ptrElemVal( + self: *Self, + ptr_bind: ReadArg.Bind, + index_bind: ReadArg.Bind, + ptr_ty: Type, + maybe_inst: ?Air.Inst.Index, +) !MCValue { + const elem_ty = ptr_ty.childType(); + const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + + switch (elem_size) { + 1, 4 => { + var base_reg: Register = undefined; + var index_reg: Register = undefined; + var dest_reg: Register = undefined; + + const read_args = [_]ReadArg{ + .{ .ty = ptr_ty, .bind = ptr_bind, .class = gp, .reg = &base_reg }, + .{ .ty = Type.usize, .bind = index_bind, .class = gp, .reg = &index_reg }, + }; + const write_args = [_]WriteArg{ + .{ .ty = elem_ty, .bind = .none, .class = gp, .reg = &dest_reg }, + }; + try self.allocRegs( + &read_args, + &write_args, + if (maybe_inst) |inst| .{ + .corresponding_inst = inst, + .operand_mapping = &.{ 0, 1 }, + } else null, + ); + + const tag: Mir.Inst.Tag = switch (elem_size) { + 1 => .ldrb, + 4 => .ldr, + else => unreachable, + }; + const shift: u5 = switch (elem_size) { + 1 => 0, + 4 => 2, + else => unreachable, + }; + + _ = try self.addInst(.{ + .tag = tag, + .data = .{ .rr_offset = .{ + .rt = dest_reg, + .rn = base_reg, + .offset = .{ .offset = Instruction.Offset.reg(index_reg, .{ .lsl = shift }) }, + } }, + }); + + return MCValue{ .register = dest_reg }; + }, + else => { + const addr = try self.ptrArithmetic(.ptr_add, ptr_bind, index_bind, ptr_ty, Type.usize, null); + + const dest = try self.allocRegOrMem(elem_ty, true, maybe_inst); + try self.load(dest, addr, ptr_ty); + return dest; + }, + } +} + fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { - const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[inst].bin_op; - - if (!is_volatile and self.liveness.isUnused(inst)) return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); - const result: MCValue = result: { - const slice_mcv = try self.resolveInst(bin_op.lhs); - - // TODO optimize for the case where the index is a constant, - // i.e. index_mcv == .immediate - const index_mcv = try self.resolveInst(bin_op.rhs); - const index_is_register = index_mcv == .register; - - const slice_ty = self.air.typeOf(bin_op.lhs); - const elem_ty = slice_ty.childType(); - const elem_size = @intCast(u32, elem_ty.abiSize(self.target.*)); - + const slice_ty = self.air.typeOf(bin_op.lhs); + const result: MCValue = if (!slice_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { var buf: Type.SlicePtrFieldTypeBuffer = undefined; - const slice_ptr_field_type = slice_ty.slicePtrFieldType(&buf); - - const index_lock: ?RegisterLock = if (index_is_register) - self.register_manager.lockRegAssumeUnused(index_mcv.register) - else - null; - defer if (index_lock) |reg| self.register_manager.unlockReg(reg); + const ptr_ty = slice_ty.slicePtrFieldType(&buf); + const slice_mcv = try self.resolveInst(bin_op.lhs); const base_mcv = slicePtr(slice_mcv); - switch (elem_size) { - 1, 4 => { - const base_reg = switch (base_mcv) { - .register => |r| r, - else => try self.copyToTmpRegister(slice_ptr_field_type, base_mcv), - }; - const base_reg_lock = self.register_manager.lockRegAssumeUnused(base_reg); - defer self.register_manager.unlockReg(base_reg_lock); + const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; + const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; - const dst_reg = try self.register_manager.allocReg(inst, gp); - const dst_mcv = MCValue{ .register = dst_reg }; - const dst_reg_lock = self.register_manager.lockRegAssumeUnused(dst_reg); - defer self.register_manager.unlockReg(dst_reg_lock); - - const index_reg: Register = switch (index_mcv) { - .register => |reg| reg, - else => try self.copyToTmpRegister(Type.usize, index_mcv), - }; - const index_reg_lock = self.register_manager.lockReg(index_reg); - defer if (index_reg_lock) |lock| self.register_manager.unlockReg(lock); - - const tag: Mir.Inst.Tag = switch (elem_size) { - 1 => .ldrb, - 4 => .ldr, - else => unreachable, - }; - const shift: u5 = switch (elem_size) { - 1 => 0, - 4 => 2, - else => unreachable, - }; - - _ = try self.addInst(.{ - .tag = tag, - .data = .{ .rr_offset = .{ - .rt = dst_reg, - .rn = base_reg, - .offset = .{ .offset = Instruction.Offset.reg(index_reg, .{ .lsl = shift }) }, - } }, - }); - - break :result dst_mcv; - }, - else => { - const dest = try self.allocRegOrMem(self.air.typeOfIndex(inst), true, inst); - - const base_bind: ReadArg.Bind = .{ .mcv = base_mcv }; - const index_bind: ReadArg.Bind = .{ .mcv = index_mcv }; - - const addr = try self.ptrArithmetic(.ptr_add, base_bind, index_bind, slice_ptr_field_type, Type.usize, null); - try self.load(dest, addr, slice_ptr_field_type); - - break :result dest; - }, - } + break :result try self.ptrElemVal(base_bind, index_bind, ptr_ty, inst); }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } @@ -2371,9 +2366,14 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { } fn airPtrElemVal(self: *Self, inst: Air.Inst.Index) !void { - const is_volatile = false; // TODO const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const result: MCValue = if (!is_volatile and self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement ptr_elem_val for {}", .{self.target.cpu.arch}); + const ptr_ty = self.air.typeOf(bin_op.lhs); + const result: MCValue = if (!ptr_ty.isVolatilePtr() and self.liveness.isUnused(inst)) .dead else result: { + const base_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; + const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; + + break :result try self.ptrElemVal(base_bind, index_bind, ptr_ty, inst); + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index a8909df107..d073bd9316 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -641,7 +641,6 @@ test "global constant is loaded with a runtime-known index" { test "multiline string literal is null terminated" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const s1 = \\one diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 675017961d..dac3c12b0d 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -576,7 +576,6 @@ fn testCastPtrOfArrayToSliceAndPtr() !void { test "cast *[1][*]const u8 to [*]const ?[*]const u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const window_name = [1][*]const u8{"window name"}; @@ -919,7 +918,6 @@ test "peer cast *[N:x]T to *[N]T" { test "peer cast [*:x]T to [*]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { @@ -1004,7 +1002,6 @@ test "variable initialization uses result locations properly with regards to the test "cast between C pointer with different but compatible types" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { fn foo(arg: [*]c_ushort) u16 { diff --git a/test/behavior/const_slice_child.zig b/test/behavior/const_slice_child.zig index 2006d6c280..5a6525d152 100644 --- a/test/behavior/const_slice_child.zig +++ b/test/behavior/const_slice_child.zig @@ -9,7 +9,6 @@ var argv: [*]const [*]const u8 = undefined; test "const slice child" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const strs = [_][*]const u8{ "one", "two", "three" }; argv = &strs; diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 0c07a7b5bb..373e4e33c6 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -137,7 +137,6 @@ test "pointer to type" { test "a type constructed in a global expression" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO var l: List = undefined; l.array[0] = 10; @@ -804,7 +803,6 @@ test "array concatenation sets the sentinel - value" { test "array concatenation sets the sentinel - pointer" { if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var a = [2]u3{ 1, 7 }; @@ -1071,7 +1069,6 @@ test "comptime break operand passing through runtime switch converted to runtime test "no dependency loop for alignment of self struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -1108,7 +1105,6 @@ test "no dependency loop for alignment of self struct" { test "no dependency loop for alignment of self bare union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -1145,7 +1141,6 @@ test "no dependency loop for alignment of self bare union" { test "no dependency loop for alignment of self tagged union" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { diff --git a/test/behavior/generics.zig b/test/behavior/generics.zig index ba4bca0c1a..f8c19ea416 100644 --- a/test/behavior/generics.zig +++ b/test/behavior/generics.zig @@ -91,7 +91,6 @@ fn max_f64(a: f64, b: f64) f64 { test "type constructed by comptime function call" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var l: SimpleList(10) = undefined; diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index 6206f22a45..28be72cf76 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -18,7 +18,6 @@ fn testDerefPtr() !void { test "pointer arithmetic" { if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var ptr: [*]const u8 = "abcd"; @@ -280,7 +279,6 @@ test "array initialization types" { test "null terminated pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { @@ -298,7 +296,6 @@ test "null terminated pointer" { test "allow any sentinel" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { @@ -314,7 +311,6 @@ test "allow any sentinel" { test "pointer sentinel with enums" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { diff --git a/test/behavior/union.zig b/test/behavior/union.zig index b94034adf4..ddad27e150 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -92,7 +92,6 @@ const FooExtern = extern union { }; test "basic extern unions" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var foo = FooExtern{ .int = 1 }; From 94499898e5cd31209ddfdae3f0c9b418b7f67e60 Mon Sep 17 00:00:00 2001 From: joachimschmidt557 Date: Fri, 9 Sep 2022 17:01:09 +0200 Subject: [PATCH 14/30] stage2 ARM: implement basic array_elem_val --- src/arch/arm/CodeGen.zig | 61 +++++++++++++++++++++++++++++++++++++++- test/behavior/array.zig | 3 -- test/behavior/eval.zig | 1 - test/behavior/for.zig | 3 -- test/behavior/slice.zig | 1 - 5 files changed, 60 insertions(+), 9 deletions(-) diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 857c49fd78..0eeb7a7ded 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -2359,9 +2359,68 @@ fn airSliceElemPtr(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ extra.lhs, extra.rhs, .none }); } +fn arrayElemVal( + self: *Self, + array_bind: ReadArg.Bind, + index_bind: ReadArg.Bind, + array_ty: Type, + maybe_inst: ?Air.Inst.Index, +) InnerError!MCValue { + const elem_ty = array_ty.childType(); + + const mcv = try array_bind.resolveToMcv(self); + switch (mcv) { + .stack_offset, + .memory, + .stack_argument_offset, + => { + const ptr_to_mcv = switch (mcv) { + .stack_offset => |off| MCValue{ .ptr_stack_offset = off }, + .memory => |addr| MCValue{ .immediate = @intCast(u32, addr) }, + .stack_argument_offset => |off| blk: { + const reg = try self.register_manager.allocReg(null, gp); + + _ = try self.addInst(.{ + .tag = .ldr_ptr_stack_argument, + .data = .{ .r_stack_offset = .{ + .rt = reg, + .stack_offset = off, + } }, + }); + + break :blk MCValue{ .register = reg }; + }, + else => unreachable, + }; + const ptr_to_mcv_lock: ?RegisterLock = switch (ptr_to_mcv) { + .register => |reg| self.register_manager.lockRegAssumeUnused(reg), + else => null, + }; + defer if (ptr_to_mcv_lock) |lock| self.register_manager.unlockReg(lock); + + const base_bind: ReadArg.Bind = .{ .mcv = ptr_to_mcv }; + + var ptr_ty_payload: Type.Payload.ElemType = .{ + .base = .{ .tag = .single_mut_pointer }, + .data = elem_ty, + }; + const ptr_ty = Type.initPayload(&ptr_ty_payload.base); + + return try self.ptrElemVal(base_bind, index_bind, ptr_ty, maybe_inst); + }, + else => return self.fail("TODO implement array_elem_val for {}", .{mcv}), + } +} + fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const result: MCValue = if (self.liveness.isUnused(inst)) .dead else return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch}); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const array_bind: ReadArg.Bind = .{ .inst = bin_op.lhs }; + const index_bind: ReadArg.Bind = .{ .inst = bin_op.rhs }; + const array_ty = self.air.typeOf(bin_op.lhs); + + break :result try self.arrayElemVal(array_bind, index_bind, array_ty, inst); + }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } diff --git a/test/behavior/array.zig b/test/behavior/array.zig index 1e5e848c09..54f87927f5 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -244,7 +244,6 @@ const Sub = struct { b: u8 }; const Str = struct { a: []Sub }; test "set global var array via slice embedded in struct" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO var s = Str{ .a = s_array[0..] }; @@ -297,7 +296,6 @@ fn testArrayByValAtComptime(b: [2]u8) u8 { test "comptime evaluating function that takes array by value" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const arr = [_]u8{ 1, 2 }; const x = comptime testArrayByValAtComptime(arr); @@ -426,7 +424,6 @@ test "anonymous literal in array" { test "access the null element of a null terminated array" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index 373e4e33c6..47d2e4374e 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -336,7 +336,6 @@ fn doesAlotT(comptime T: type, value: usize) T { } test "@setEvalBranchQuota at same scope as generic function call" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO try expect(doesAlotT(u32, 2) == 2); diff --git a/test/behavior/for.zig b/test/behavior/for.zig index 20a88a3131..7f2cd2ab8d 100644 --- a/test/behavior/for.zig +++ b/test/behavior/for.zig @@ -5,7 +5,6 @@ const expectEqual = std.testing.expectEqual; const mem = std.mem; test "continue in for loop" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; const array = [_]i32{ 1, 2, 3, 4, 5 }; @@ -130,7 +129,6 @@ test "for with null and T peer types and inferred result location type" { } test "2 break statements and an else" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; const S = struct { @@ -177,7 +175,6 @@ fn mangleString(s: []u8) void { } test "for copies its payload" { - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO const S = struct { diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index fad6cd643f..b9bae08878 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -268,7 +268,6 @@ fn sliceSum(comptime q: []const u8) i32 { test "slice type with custom alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; const LazilyResolvedType = struct { anything: i32, From 8d44e031618a956a1b31c36b4c096a3000678a6b Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Fri, 9 Sep 2022 21:18:39 +0200 Subject: [PATCH 15/30] macho: use globals free list like in COFF linker --- src/link/MachO.zig | 234 +++++++++++++++++++------------- src/link/MachO/Atom.zig | 6 +- src/link/MachO/DebugSymbols.zig | 4 +- src/link/MachO/dead_strip.zig | 4 +- 4 files changed, 150 insertions(+), 98 deletions(-) diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 429bf64eb2..12d8326f35 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -131,17 +131,12 @@ la_symbol_ptr_section_index: ?u8 = null, data_section_index: ?u8 = null, locals: std.ArrayListUnmanaged(macho.nlist_64) = .{}, -globals: std.StringArrayHashMapUnmanaged(SymbolWithLoc) = .{}, -// FIXME Jakub -// TODO storing index into globals might be dangerous if we delete a global -// while not having everything resolved. Actually, perhaps `unresolved` -// should not be stored at the global scope? Is this possible? -// Otherwise, audit if this can be a problem. -// An alternative, which I still need to investigate for perf reasons is to -// store all global names in an adapted with context strtab. +globals: std.ArrayListUnmanaged(SymbolWithLoc) = .{}, +resolver: std.StringHashMapUnmanaged(u32) = .{}, unresolved: std.AutoArrayHashMapUnmanaged(u32, bool) = .{}, locals_free_list: std.ArrayListUnmanaged(u32) = .{}, +globals_free_list: std.ArrayListUnmanaged(u32) = .{}, dyld_stub_binder_index: ?u32 = null, dyld_private_atom: ?*Atom = null, @@ -1917,7 +1912,7 @@ fn allocateSpecialSymbols(self: *MachO) !void { "___dso_handle", "__mh_execute_header", }) |name| { - const global = self.globals.get(name) orelse continue; + const global = self.getGlobal(name) orelse continue; if (global.file != null) continue; const sym = self.getSymbolPtr(global); const seg = self.segments.items[self.text_segment_cmd_index.?]; @@ -2074,7 +2069,7 @@ pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom { const target_sym = self.getSymbol(target); if (target_sym.undf()) { - const global = self.globals.get(self.getSymbolName(target)).?; + const global = self.getGlobal(self.getSymbolName(target)).?; try atom.bindings.append(gpa, .{ .target = global, .offset = 0, @@ -2106,7 +2101,7 @@ pub fn createTlvPtrAtom(self: *MachO, target: SymbolWithLoc) !*Atom { const target_sym = self.getSymbol(target); assert(target_sym.undf()); - const global = self.globals.get(self.getSymbolName(target)).?; + const global = self.getGlobal(self.getSymbolName(target)).?; try atom.bindings.append(gpa, .{ .target = global, .offset = 0, @@ -2376,7 +2371,7 @@ pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWi }); try atom.rebases.append(gpa, 0); - const global = self.globals.get(self.getSymbolName(target)).?; + const global = self.getGlobal(self.getSymbolName(target)).?; try atom.lazy_bindings.append(gpa, .{ .target = global, .offset = 0, @@ -2472,7 +2467,7 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom { fn createTentativeDefAtoms(self: *MachO) !void { const gpa = self.base.allocator; - for (self.globals.values()) |global| { + for (self.globals.items) |global| { const sym = self.getSymbolPtr(global); if (!sym.tentative()) continue; @@ -2516,25 +2511,22 @@ fn createTentativeDefAtoms(self: *MachO) !void { fn createMhExecuteHeaderSymbol(self: *MachO) !void { if (self.base.options.output_mode != .Exe) return; - if (self.globals.get("__mh_execute_header")) |global| { + if (self.getGlobal("__mh_execute_header")) |global| { const sym = self.getSymbol(global); if (!sym.undf() and !(sym.pext() or sym.weakDef())) return; } const gpa = self.base.allocator; - const n_strx = try self.strtab.insert(gpa, "__mh_execute_header"); - const sym_index = @intCast(u32, self.locals.items.len); - try self.locals.append(gpa, .{ - .n_strx = n_strx, + const sym_index = try self.allocateSymbol(); + self.locals.items[sym_index] = .{ + .n_strx = try self.strtab.insert(gpa, "__mh_execute_header"), .n_type = macho.N_SECT | macho.N_EXT, .n_sect = 0, .n_desc = macho.REFERENCED_DYNAMICALLY, .n_value = 0, - }); + }; - const name = try gpa.dupe(u8, "__mh_execute_header"); - const gop = try self.globals.getOrPut(gpa, name); - defer if (gop.found_existing) gpa.free(name); + const gop = try self.getOrPutGlobalPtr("__mh_execute_header"); gop.value_ptr.* = .{ .sym_index = sym_index, .file = null, @@ -2542,25 +2534,24 @@ fn createMhExecuteHeaderSymbol(self: *MachO) !void { } fn createDsoHandleSymbol(self: *MachO) !void { - const global = self.globals.getPtr("___dso_handle") orelse return; + const global = self.getGlobalPtr("___dso_handle") orelse return; const sym = self.getSymbolPtr(global.*); if (!sym.undf()) return; const gpa = self.base.allocator; - const n_strx = try self.strtab.insert(gpa, "___dso_handle"); - const sym_index = @intCast(u32, self.locals.items.len); - try self.locals.append(gpa, .{ - .n_strx = n_strx, + const sym_index = try self.allocateSymbol(); + self.locals.items[sym_index] = .{ + .n_strx = try self.strtab.insert(gpa, "___dso_handle"), .n_type = macho.N_SECT | macho.N_EXT, .n_sect = 0, .n_desc = macho.N_WEAK_DEF, .n_value = 0, - }); + }; global.* = .{ .sym_index = sym_index, .file = null, }; - _ = self.unresolved.swapRemove(@intCast(u32, self.globals.getIndex("___dso_handle").?)); + _ = self.unresolved.swapRemove(self.getGlobalIndex("___dso_handle").?); } fn resolveGlobalSymbol(self: *MachO, current: SymbolWithLoc) !void { @@ -2568,19 +2559,14 @@ fn resolveGlobalSymbol(self: *MachO, current: SymbolWithLoc) !void { const sym = self.getSymbol(current); const sym_name = self.getSymbolName(current); - const name = try gpa.dupe(u8, sym_name); - const global_index = @intCast(u32, self.globals.values().len); - const gop = try self.globals.getOrPut(gpa, name); - defer if (gop.found_existing) gpa.free(name); - + const gop = try self.getOrPutGlobalPtr(sym_name); if (!gop.found_existing) { gop.value_ptr.* = current; if (sym.undf() and !sym.tentative()) { - try self.unresolved.putNoClobber(gpa, global_index, false); + try self.unresolved.putNoClobber(gpa, self.getGlobalIndex(sym_name).?, false); } return; } - const global = gop.value_ptr.*; const global_sym = self.getSymbol(global); @@ -2619,7 +2605,7 @@ fn resolveGlobalSymbol(self: *MachO, current: SymbolWithLoc) !void { } if (sym.undf() and !sym.tentative()) return; - _ = self.unresolved.swapRemove(@intCast(u32, self.globals.getIndex(name).?)); + _ = self.unresolved.swapRemove(self.getGlobalIndex(sym_name).?); gop.value_ptr.* = current; } @@ -2664,7 +2650,7 @@ fn resolveSymbolsInObject(self: *MachO, object_id: u16) !void { const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = object_id }; self.resolveGlobalSymbol(sym_loc) catch |err| switch (err) { error.MultipleSymbolDefinitions => { - const global = self.globals.get(sym_name).?; + const global = self.getGlobal(sym_name).?; log.err("symbol '{s}' defined multiple times", .{sym_name}); if (global.file) |file| { log.err(" first definition in '{s}'", .{self.objects.items[file].name}); @@ -2684,7 +2670,8 @@ fn resolveSymbolsInArchives(self: *MachO) !void { const cpu_arch = self.base.options.target.cpu.arch; var next_sym: usize = 0; loop: while (next_sym < self.unresolved.count()) { - const global = self.globals.values()[self.unresolved.keys()[next_sym]]; + const global_index = self.unresolved.keys()[next_sym]; + const global = self.globals.items[global_index]; const sym_name = self.getSymbolName(global); for (self.archives.items) |archive| { @@ -2710,10 +2697,11 @@ fn resolveSymbolsInArchives(self: *MachO) !void { fn resolveSymbolsInDylibs(self: *MachO) !void { if (self.dylibs.items.len == 0) return; + const gpa = self.base.allocator; var next_sym: usize = 0; loop: while (next_sym < self.unresolved.count()) { const global_index = self.unresolved.keys()[next_sym]; - const global = self.globals.values()[global_index]; + const global = self.globals.items[global_index]; const sym = self.getSymbolPtr(global); const sym_name = self.getSymbolName(global); @@ -2722,7 +2710,7 @@ fn resolveSymbolsInDylibs(self: *MachO) !void { const dylib_id = @intCast(u16, id); if (!self.referenced_dylibs.contains(dylib_id)) { - try self.referenced_dylibs.putNoClobber(self.base.allocator, dylib_id, {}); + try self.referenced_dylibs.putNoClobber(gpa, dylib_id, {}); } const ordinal = self.referenced_dylibs.getIndex(dylib_id) orelse unreachable; @@ -2760,7 +2748,7 @@ fn resolveSymbolsAtLoading(self: *MachO) !void { var next_sym: usize = 0; while (next_sym < self.unresolved.count()) { const global_index = self.unresolved.keys()[next_sym]; - const global = self.globals.values()[global_index]; + const global = self.globals.items[global_index]; const sym = self.getSymbolPtr(global); const sym_name = self.getSymbolName(global); @@ -2800,26 +2788,29 @@ fn resolveDyldStubBinder(self: *MachO) !void { if (self.unresolved.count() == 0) return; // no need for a stub binder if we don't have any imports const gpa = self.base.allocator; - const n_strx = try self.strtab.insert(gpa, "dyld_stub_binder"); - const sym_index = @intCast(u32, self.locals.items.len); - try self.locals.append(gpa, .{ - .n_strx = n_strx, + const sym_index = try self.allocateSymbol(); + const sym = &self.locals.items[sym_index]; + const sym_name = "dyld_stub_binder"; + sym.* = .{ + .n_strx = try self.strtab.insert(gpa, sym_name), .n_type = macho.N_UNDF, .n_sect = 0, .n_desc = 0, .n_value = 0, - }); - const sym_name = try gpa.dupe(u8, "dyld_stub_binder"); - const global = SymbolWithLoc{ .sym_index = sym_index, .file = null }; - try self.globals.putNoClobber(gpa, sym_name, global); - const sym = &self.locals.items[sym_index]; + }; + const gop = try self.getOrPutGlobalPtr(sym_name); + gop.value_ptr.* = .{ + .sym_index = sym_index, + .file = null, + }; + const global = gop.value_ptr.*; for (self.dylibs.items) |dylib, id| { if (!dylib.symbols.contains(sym_name)) continue; const dylib_id = @intCast(u16, id); if (!self.referenced_dylibs.contains(dylib_id)) { - try self.referenced_dylibs.putNoClobber(self.base.allocator, dylib_id, {}); + try self.referenced_dylibs.putNoClobber(gpa, dylib_id, {}); } const ordinal = self.referenced_dylibs.getIndex(dylib_id) orelse unreachable; @@ -3050,14 +3041,20 @@ pub fn deinit(self: *MachO) void { self.stubs_free_list.deinit(gpa); self.stubs_table.deinit(gpa); self.strtab.deinit(gpa); + self.locals.deinit(gpa); + self.globals.deinit(gpa); self.locals_free_list.deinit(gpa); + self.globals_free_list.deinit(gpa); self.unresolved.deinit(gpa); - for (self.globals.keys()) |key| { - gpa.free(key); + { + var it = self.resolver.keyIterator(); + while (it.next()) |key_ptr| { + gpa.free(key_ptr.*); + } + self.resolver.deinit(gpa); } - self.globals.deinit(gpa); for (self.objects.items) |*object| { object.deinit(gpa); @@ -3211,6 +3208,29 @@ fn allocateSymbol(self: *MachO) !u32 { return index; } +fn allocateGlobal(self: *MachO) !u32 { + try self.globals.ensureUnusedCapacity(self.base.allocator, 1); + + const index = blk: { + if (self.globals_free_list.popOrNull()) |index| { + log.debug(" (reusing global index {d})", .{index}); + break :blk index; + } else { + log.debug(" (allocating symbol index {d})", .{self.globals.items.len}); + const index = @intCast(u32, self.globals.items.len); + _ = self.globals.addOneAssumeCapacity(); + break :blk index; + } + }; + + self.globals.items[index] = .{ + .sym_index = 0, + .file = null, + }; + + return index; +} + pub fn allocateGotEntry(self: *MachO, target: SymbolWithLoc) !u32 { const gpa = self.base.allocator; try self.got_entries.ensureUnusedCapacity(gpa, 1); @@ -3832,7 +3852,7 @@ pub fn updateDeclExports( self.resolveGlobalSymbol(sym_loc) catch |err| switch (err) { error.MultipleSymbolDefinitions => { - const global = self.globals.get(exp_name).?; + const global = self.getGlobal(exp_name).?; if (sym_loc.sym_index != global.sym_index and global.file != null) { _ = try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create( gpa, @@ -3869,11 +3889,13 @@ pub fn deleteExport(self: *MachO, exp: Export) void { }; self.locals_free_list.append(gpa, sym_index) catch {}; - if (self.globals.get(sym_name)) |global| blk: { - if (global.sym_index != sym_index) break :blk; - if (global.file != null) break :blk; - const kv = self.globals.fetchSwapRemove(sym_name); - gpa.free(kv.?.key); + if (self.resolver.fetchRemove(sym_name)) |entry| { + defer gpa.free(entry.key); + self.globals_free_list.append(gpa, entry.value) catch {}; + self.globals.items[entry.value] = .{ + .sym_index = 0, + .file = null, + }; } } @@ -4864,30 +4886,23 @@ pub fn addAtomToSection(self: *MachO, atom: *Atom, sect_id: u8) !void { pub fn getGlobalSymbol(self: *MachO, name: []const u8) !u32 { const gpa = self.base.allocator; + const sym_name = try std.fmt.allocPrint(gpa, "_{s}", .{name}); - const global_index = @intCast(u32, self.globals.values().len); - const gop = try self.globals.getOrPut(gpa, sym_name); - defer if (gop.found_existing) gpa.free(sym_name); + defer gpa.free(sym_name); + const gop = try self.getOrPutGlobalPtr(sym_name); if (gop.found_existing) { - // TODO audit this: can we ever reference anything from outside the Zig module? - assert(gop.value_ptr.file == null); return gop.value_ptr.sym_index; } - const sym_index = @intCast(u32, self.locals.items.len); - try self.locals.append(gpa, .{ - .n_strx = try self.strtab.insert(gpa, sym_name), - .n_type = macho.N_UNDF, - .n_sect = 0, - .n_desc = 0, - .n_value = 0, - }); - gop.value_ptr.* = .{ - .sym_index = sym_index, - .file = null, - }; - try self.unresolved.putNoClobber(gpa, global_index, true); + const sym_index = try self.allocateSymbol(); + const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null }; + gop.value_ptr.* = sym_loc; + + const sym = self.getSymbolPtr(sym_loc); + sym.n_strx = try self.strtab.insert(gpa, sym_name); + + try self.unresolved.putNoClobber(gpa, self.getGlobalIndex(sym_name).?, true); return sym_index; } @@ -5055,7 +5070,7 @@ fn writeDyldInfoData(self: *MachO, ncmds: *u32, lc_writer: anytype) !void { if (self.base.options.output_mode == .Exe) { for (&[_]SymbolWithLoc{ try self.getEntryPoint(), - self.globals.get("__mh_execute_header").?, + self.getGlobal("__mh_execute_header").?, }) |global| { const sym = self.getSymbol(global); const sym_name = self.getSymbolName(global); @@ -5068,7 +5083,7 @@ fn writeDyldInfoData(self: *MachO, ncmds: *u32, lc_writer: anytype) !void { } } else { assert(self.base.options.output_mode == .Lib); - for (self.globals.values()) |global| { + for (self.globals.items) |global| { const sym = self.getSymbol(global); if (sym.undf()) continue; @@ -5271,9 +5286,9 @@ fn writeFunctionStarts(self: *MachO, ncmds: *u32, lc_writer: anytype) !void { // We need to sort by address first var addresses = std.ArrayList(u64).init(gpa); defer addresses.deinit(); - try addresses.ensureTotalCapacityPrecise(self.globals.count()); + try addresses.ensureTotalCapacityPrecise(self.globals.items.len); - for (self.globals.values()) |global| { + for (self.globals.items) |global| { const sym = self.getSymbol(global); if (sym.undf()) continue; if (sym.n_desc == N_DESC_GCED) continue; @@ -5453,7 +5468,7 @@ fn writeSymtab(self: *MachO, lc: *macho.symtab_command) !SymtabCtx { if (sym.n_desc == N_DESC_GCED) continue; // GCed, skip const sym_loc = SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null }; if (self.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip - if (self.globals.contains(self.getSymbolName(sym_loc))) continue; // global symbol is either an export or import, skip + if (self.getGlobal(self.getSymbolName(sym_loc)) != null) continue; // global symbol is either an export or import, skip try locals.append(sym); } @@ -5463,7 +5478,7 @@ fn writeSymtab(self: *MachO, lc: *macho.symtab_command) !SymtabCtx { if (sym.n_desc == N_DESC_GCED) continue; // GCed, skip const sym_loc = SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = @intCast(u32, object_id) }; if (self.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip - if (self.globals.contains(self.getSymbolName(sym_loc))) continue; // global symbol is either an export or import, skip + if (self.getGlobal(self.getSymbolName(sym_loc)) != null) continue; // global symbol is either an export or import, skip var out_sym = sym; out_sym.n_strx = try self.strtab.insert(gpa, self.getSymbolName(sym_loc)); try locals.append(out_sym); @@ -5477,7 +5492,7 @@ fn writeSymtab(self: *MachO, lc: *macho.symtab_command) !SymtabCtx { var exports = std.ArrayList(macho.nlist_64).init(gpa); defer exports.deinit(); - for (self.globals.values()) |global| { + for (self.globals.items) |global| { const sym = self.getSymbol(global); if (sym.undf()) continue; // import, skip if (sym.n_desc == N_DESC_GCED) continue; // GCed, skip @@ -5491,7 +5506,7 @@ fn writeSymtab(self: *MachO, lc: *macho.symtab_command) !SymtabCtx { var imports_table = std.AutoHashMap(SymbolWithLoc, u32).init(gpa); - for (self.globals.values()) |global| { + for (self.globals.items) |global| { const sym = self.getSymbol(global); if (sym.n_strx == 0) continue; // no name, skip if (!sym.undf()) continue; // not an import, skip @@ -5798,6 +5813,43 @@ pub fn getSymbolName(self: *MachO, sym_with_loc: SymbolWithLoc) []const u8 { } } +/// Returns pointer to the global entry for `name` if one exists. +pub fn getGlobalPtr(self: *MachO, name: []const u8) ?*SymbolWithLoc { + const global_index = self.resolver.get(name) orelse return null; + return &self.globals.items[global_index]; +} + +/// Returns the global entry for `name` if one exists. +pub fn getGlobal(self: *const MachO, name: []const u8) ?SymbolWithLoc { + const global_index = self.resolver.get(name) orelse return null; + return self.globals.items[global_index]; +} + +/// Returns the index of the global entry for `name` if one exists. +pub fn getGlobalIndex(self: *const MachO, name: []const u8) ?u32 { + return self.resolver.get(name); +} + +const GetOrPutGlobalPtrResult = struct { + found_existing: bool, + value_ptr: *SymbolWithLoc, +}; + +/// Return pointer to the global entry for `name` if one exists. +/// Puts a new global entry for `name` if one doesn't exist, and +/// returns a pointer to it. +pub fn getOrPutGlobalPtr(self: *MachO, name: []const u8) !GetOrPutGlobalPtrResult { + if (self.getGlobalPtr(name)) |ptr| { + return GetOrPutGlobalPtrResult{ .found_existing = true, .value_ptr = ptr }; + } + const gpa = self.base.allocator; + const global_index = try self.allocateGlobal(); + const global_name = try gpa.dupe(u8, name); + _ = try self.resolver.put(gpa, global_name, global_index); + const ptr = &self.globals.items[global_index]; + return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr }; +} + /// Returns atom if there is an atom referenced by the symbol described by `sym_with_loc` descriptor. /// Returns null on failure. pub fn getAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom { @@ -5834,7 +5886,7 @@ pub fn getTlvPtrAtomForSymbol(self: *MachO, sym_with_loc: SymbolWithLoc) ?*Atom /// Asserts output mode is executable. pub fn getEntryPoint(self: MachO) error{MissingMainEntrypoint}!SymbolWithLoc { const entry_name = self.base.options.entry orelse "_main"; - const global = self.globals.get(entry_name) orelse { + const global = self.getGlobal(entry_name) orelse { log.err("entrypoint '{s}' not found", .{entry_name}); return error.MissingMainEntrypoint; }; @@ -6342,9 +6394,9 @@ fn logSymtab(self: *MachO) void { } log.debug("globals table:", .{}); - for (self.globals.keys()) |name, id| { - const value = self.globals.values()[id]; - log.debug(" {s} => %{d} in object({?d})", .{ name, value.sym_index, value.file }); + for (self.globals.items) |global| { + const name = self.getSymbolName(global); + log.debug(" {s} => %{d} in object({?d})", .{ name, global.sym_index, global.file }); } log.debug("GOT entries:", .{}); diff --git a/src/link/MachO/Atom.zig b/src/link/MachO/Atom.zig index dd818ea936..5b242a2013 100644 --- a/src/link/MachO/Atom.zig +++ b/src/link/MachO/Atom.zig @@ -272,7 +272,7 @@ pub fn parseRelocs(self: *Atom, relocs: []align(1) const macho.relocation_info, subtractor = sym_loc; } else { const sym_name = context.macho_file.getSymbolName(sym_loc); - subtractor = context.macho_file.globals.get(sym_name).?; + subtractor = context.macho_file.getGlobal(sym_name).?; } // Verify that *_SUBTRACTOR is followed by *_UNSIGNED. if (relocs.len <= i + 1) { @@ -339,7 +339,7 @@ pub fn parseRelocs(self: *Atom, relocs: []align(1) const macho.relocation_info, break :target sym_loc; } else { const sym_name = context.macho_file.getSymbolName(sym_loc); - break :target context.macho_file.globals.get(sym_name).?; + break :target context.macho_file.getGlobal(sym_name).?; } }; const offset = @intCast(u32, rel.r_address - context.base_offset); @@ -579,7 +579,7 @@ pub fn resolveRelocs(self: *Atom, macho_file: *MachO) !void { // If there is no atom for target, we still need to check for special, atom-less // symbols such as `___dso_handle`. const target_name = macho_file.getSymbolName(rel.target); - assert(macho_file.globals.contains(target_name)); + assert(macho_file.getGlobal(target_name) != null); const atomless_sym = macho_file.getSymbol(rel.target); log.debug(" | atomless target '{s}'", .{target_name}); break :blk atomless_sym.n_value; diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index ffff0fe5f8..a991ba8882 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -480,7 +480,7 @@ fn writeSymtab(self: *DebugSymbols, lc: *macho.symtab_command) !void { if (sym.n_desc == MachO.N_DESC_GCED) continue; // GCed, skip const sym_loc = MachO.SymbolWithLoc{ .sym_index = @intCast(u32, sym_id), .file = null }; if (self.base.symbolIsTemp(sym_loc)) continue; // local temp symbol, skip - if (self.base.globals.contains(self.base.getSymbolName(sym_loc))) continue; // global symbol is either an export or import, skip + if (self.base.getGlobal(self.base.getSymbolName(sym_loc)) != null) continue; // global symbol is either an export or import, skip var out_sym = sym; out_sym.n_strx = try self.strtab.insert(gpa, self.base.getSymbolName(sym_loc)); try locals.append(out_sym); @@ -489,7 +489,7 @@ fn writeSymtab(self: *DebugSymbols, lc: *macho.symtab_command) !void { var exports = std.ArrayList(macho.nlist_64).init(gpa); defer exports.deinit(); - for (self.base.globals.values()) |global| { + for (self.base.globals.items) |global| { const sym = self.base.getSymbol(global); if (sym.undf()) continue; // import, skip if (sym.n_desc == MachO.N_DESC_GCED) continue; // GCed, skip diff --git a/src/link/MachO/dead_strip.zig b/src/link/MachO/dead_strip.zig index eb2be6e5fe..c8c4950730 100644 --- a/src/link/MachO/dead_strip.zig +++ b/src/link/MachO/dead_strip.zig @@ -62,7 +62,7 @@ fn collectRoots(roots: *std.AutoHashMap(*Atom, void), macho_file: *MachO) !void else => |other| { assert(other == .Lib); // Add exports as GC roots - for (macho_file.globals.values()) |global| { + for (macho_file.globals.items) |global| { const sym = macho_file.getSymbol(global); if (!sym.sect()) continue; const atom = macho_file.getAtomForSymbol(global) orelse { @@ -77,7 +77,7 @@ fn collectRoots(roots: *std.AutoHashMap(*Atom, void), macho_file: *MachO) !void } // TODO just a temp until we learn how to parse unwind records - if (macho_file.globals.get("___gxx_personality_v0")) |global| { + if (macho_file.getGlobal("___gxx_personality_v0")) |global| { if (macho_file.getAtomForSymbol(global)) |atom| { _ = try roots.getOrPut(atom); log.debug("adding root", .{}); From bac065c7cfb6f3aa0fe1cdf1334adb5a46d9a2af Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Fri, 9 Sep 2022 22:29:50 +0200 Subject: [PATCH 16/30] coff: use global accessor abstractions from MachO --- src/link/Coff.zig | 68 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 52 insertions(+), 16 deletions(-) diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 49263df225..4f57154b90 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1423,23 +1423,22 @@ fn resolveGlobalSymbol(self: *Coff, current: SymbolWithLoc) !void { const sym = self.getSymbol(current); const sym_name = self.getSymbolName(current); - const global_index = self.resolver.get(sym_name) orelse { - const name = try gpa.dupe(u8, sym_name); - const global_index = try self.allocateGlobal(); - self.globals.items[global_index] = current; - try self.resolver.putNoClobber(gpa, name, global_index); + const gop = try self.getOrPutGlobalPtr(sym_name); + if (!gop.found_existing) { + gop.value_ptr.* = current; if (sym.section_number == .UNDEFINED) { - try self.unresolved.putNoClobber(gpa, global_index, false); + try self.unresolved.putNoClobber(gpa, self.getGlobalIndex(sym_name).?, false); } return; - }; + } log.debug("TODO finish resolveGlobalSymbols implementation", .{}); if (sym.section_number == .UNDEFINED) return; - _ = self.unresolved.swapRemove(global_index); - self.globals.items[global_index] = current; + _ = self.unresolved.swapRemove(self.getGlobalIndex(sym_name).?); + + gop.value_ptr.* = current; } pub fn flush(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Node) !void { @@ -1544,23 +1543,23 @@ pub fn getDeclVAddr( } pub fn getGlobalSymbol(self: *Coff, name: []const u8) !u32 { - if (self.resolver.get(name)) |global_index| { - return self.globals.items[global_index].sym_index; + const gop = try self.getOrPutGlobalPtr(name); + + if (gop.found_existing) { + return gop.value_ptr.sym_index; } - const gpa = self.base.allocator; const sym_index = try self.allocateSymbol(); - const global_index = try self.allocateGlobal(); const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null }; - self.globals.items[global_index] = sym_loc; + gop.value_ptr.* = sym_loc; + const gpa = self.base.allocator; const sym_name = try gpa.dupe(u8, name); const sym = self.getSymbolPtr(sym_loc); try self.setSymbolName(sym, sym_name); sym.storage_class = .EXTERNAL; - try self.resolver.putNoClobber(gpa, sym_name, global_index); - try self.unresolved.putNoClobber(gpa, global_index, true); + try self.unresolved.putNoClobber(gpa, self.getGlobalIndex(name).?, true); return sym_index; } @@ -2061,6 +2060,43 @@ pub fn getSymbolName(self: *const Coff, sym_loc: SymbolWithLoc) []const u8 { return self.strtab.get(offset).?; } +/// Returns pointer to the global entry for `name` if one exists. +pub fn getGlobalPtr(self: *Coff, name: []const u8) ?*SymbolWithLoc { + const global_index = self.resolver.get(name) orelse return null; + return &self.globals.items[global_index]; +} + +/// Returns the global entry for `name` if one exists. +pub fn getGlobal(self: *const Coff, name: []const u8) ?SymbolWithLoc { + const global_index = self.resolver.get(name) orelse return null; + return self.globals.items[global_index]; +} + +/// Returns the index of the global entry for `name` if one exists. +pub fn getGlobalIndex(self: *const Coff, name: []const u8) ?u32 { + return self.resolver.get(name); +} + +const GetOrPutGlobalPtrResult = struct { + found_existing: bool, + value_ptr: *SymbolWithLoc, +}; + +/// Return pointer to the global entry for `name` if one exists. +/// Puts a new global entry for `name` if one doesn't exist, and +/// returns a pointer to it. +pub fn getOrPutGlobalPtr(self: *Coff, name: []const u8) !GetOrPutGlobalPtrResult { + if (self.getGlobalPtr(name)) |ptr| { + return GetOrPutGlobalPtrResult{ .found_existing = true, .value_ptr = ptr }; + } + const gpa = self.base.allocator; + const global_index = try self.allocateGlobal(); + const global_name = try gpa.dupe(u8, name); + _ = try self.resolver.put(gpa, global_name, global_index); + const ptr = &self.globals.items[global_index]; + return GetOrPutGlobalPtrResult{ .found_existing = false, .value_ptr = ptr }; +} + /// Returns atom if there is an atom referenced by the symbol described by `sym_loc` descriptor. /// Returns null on failure. pub fn getAtomForSymbol(self: *Coff, sym_loc: SymbolWithLoc) ?*Atom { From d8f210354577eda1b438d692b28bb3a582913b5a Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Fri, 9 Sep 2022 23:30:31 +0200 Subject: [PATCH 17/30] macho+coff: return index into global table from getGlobalSymbol --- src/arch/x86_64/Emit.zig | 17 ++++++++++------- src/link/Coff.zig | 13 ++++++++++--- src/link/MachO.zig | 13 ++++++++++--- 3 files changed, 30 insertions(+), 13 deletions(-) diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig index 45e58be972..aeb3f4770e 100644 --- a/src/arch/x86_64/Emit.zig +++ b/src/arch/x86_64/Emit.zig @@ -1024,7 +1024,11 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void { 0b10 => .imports, else => unreachable, }, - .target = .{ .sym_index = relocation.sym_index, .file = null }, + .target = switch (ops.flags) { + 0b00, 0b01 => .{ .sym_index = relocation.sym_index, .file = null }, + 0b10 => coff_file.getGlobalByIndex(relocation.sym_index), + else => unreachable, + }, .offset = @intCast(u32, end_offset - 4), .addend = 0, .pcrel = true, @@ -1142,12 +1146,10 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void { if (emit.bin_file.cast(link.File.MachO)) |macho_file| { // Add relocation to the decl. const atom = macho_file.atom_by_index_table.get(relocation.atom_index).?; + const target = macho_file.getGlobalByIndex(relocation.sym_index); try atom.relocs.append(emit.bin_file.allocator, .{ .offset = offset, - .target = .{ - .sym_index = relocation.sym_index, - .file = null, - }, + .target = target, .addend = 0, .subtractor = null, .pcrel = true, @@ -1157,16 +1159,17 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) InnerError!void { } else if (emit.bin_file.cast(link.File.Coff)) |coff_file| { // Add relocation to the decl. const atom = coff_file.atom_by_index_table.get(relocation.atom_index).?; + const target = coff_file.getGlobalByIndex(relocation.sym_index); try atom.addRelocation(coff_file, .{ .@"type" = .direct, - .target = .{ .sym_index = relocation.sym_index, .file = null }, + .target = target, .offset = offset, .addend = 0, .pcrel = true, .length = 2, }); } else { - return emit.fail("TODO implement call_extern for linking backends different than MachO", .{}); + return emit.fail("TODO implement call_extern for linking backends different than MachO and COFF", .{}); } } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 4f57154b90..ea36c8bc91 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1544,9 +1544,10 @@ pub fn getDeclVAddr( pub fn getGlobalSymbol(self: *Coff, name: []const u8) !u32 { const gop = try self.getOrPutGlobalPtr(name); + const global_index = self.getGlobalIndex(name).?; if (gop.found_existing) { - return gop.value_ptr.sym_index; + return global_index; } const sym_index = try self.allocateSymbol(); @@ -1559,9 +1560,9 @@ pub fn getGlobalSymbol(self: *Coff, name: []const u8) !u32 { try self.setSymbolName(sym, sym_name); sym.storage_class = .EXTERNAL; - try self.unresolved.putNoClobber(gpa, self.getGlobalIndex(name).?, true); + try self.unresolved.putNoClobber(gpa, global_index, true); - return sym_index; + return global_index; } pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void { @@ -2077,6 +2078,12 @@ pub fn getGlobalIndex(self: *const Coff, name: []const u8) ?u32 { return self.resolver.get(name); } +/// Returns global entry at `index`. +pub fn getGlobalByIndex(self: *const Coff, index: u32) SymbolWithLoc { + assert(index < self.globals.items.len); + return self.globals.items[index]; +} + const GetOrPutGlobalPtrResult = struct { found_existing: bool, value_ptr: *SymbolWithLoc, diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 12d8326f35..e5c55fdb8a 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -4890,9 +4890,10 @@ pub fn getGlobalSymbol(self: *MachO, name: []const u8) !u32 { const sym_name = try std.fmt.allocPrint(gpa, "_{s}", .{name}); defer gpa.free(sym_name); const gop = try self.getOrPutGlobalPtr(sym_name); + const global_index = self.getGlobalIndex(sym_name).?; if (gop.found_existing) { - return gop.value_ptr.sym_index; + return global_index; } const sym_index = try self.allocateSymbol(); @@ -4902,9 +4903,9 @@ pub fn getGlobalSymbol(self: *MachO, name: []const u8) !u32 { const sym = self.getSymbolPtr(sym_loc); sym.n_strx = try self.strtab.insert(gpa, sym_name); - try self.unresolved.putNoClobber(gpa, self.getGlobalIndex(sym_name).?, true); + try self.unresolved.putNoClobber(gpa, global_index, true); - return sym_index; + return global_index; } fn getSegmentAllocBase(self: MachO, indices: []const ?u8) struct { vmaddr: u64, fileoff: u64 } { @@ -5830,6 +5831,12 @@ pub fn getGlobalIndex(self: *const MachO, name: []const u8) ?u32 { return self.resolver.get(name); } +/// Returns global entry at `index`. +pub fn getGlobalByIndex(self: *const MachO, index: u32) SymbolWithLoc { + assert(index < self.globals.items.len); + return self.globals.items[index]; +} + const GetOrPutGlobalPtrResult = struct { found_existing: bool, value_ptr: *SymbolWithLoc, From fc5a6e0e327c1b671aa782d24059e27e5aa55c84 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sat, 10 Sep 2022 00:18:39 +0200 Subject: [PATCH 18/30] x86_64: combine got_load, direct_load and imports_load into linker_load MCV --- src/arch/x86_64/CodeGen.zig | 208 ++++++++++++------------------------ src/arch/x86_64/Emit.zig | 2 +- src/link/Coff.zig | 4 +- 3 files changed, 70 insertions(+), 144 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 25e8695e82..05d7f2b73b 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -128,15 +128,11 @@ pub const MCValue = union(enum) { /// The value is in memory at a hard-coded address. /// If the type is a pointer, it means the pointer address is at this memory location. memory: u64, - /// The value is in memory referenced indirectly via a GOT entry index. - /// If the type is a pointer, it means the pointer is referenced indirectly via GOT. - /// When lowered, linker will emit a relocation of type X86_64_RELOC_GOT. - got_load: u32, - imports_load: u32, - /// The value is in memory referenced directly via symbol index. - /// If the type is a pointer, it means the pointer is referenced directly via symbol index. - /// When lowered, linker will emit a relocation of type X86_64_RELOC_SIGNED. - direct_load: u32, + /// The value is in memory but requires a linker relocation fixup: + /// * got - the value is referenced indirectly via GOT entry index (the linker emits a got-type reloc) + /// * direct - the value is referenced directly via symbol index index (the linker emits a displacement reloc) + /// * import - the value is referenced indirectly via import entry index (the linker emits an import-type reloc) + linker_load: struct { @"type": enum { got, direct, import }, sym_index: u32 }, /// The value is one of the stack variables. /// If the type is a pointer, it means the pointer address is in the stack at this offset. stack_offset: i32, @@ -150,9 +146,7 @@ pub const MCValue = union(enum) { .memory, .stack_offset, .ptr_stack_offset, - .direct_load, - .got_load, - .imports_load, + .linker_load, => true, else => false, }; @@ -165,26 +159,6 @@ pub const MCValue = union(enum) { }; } - fn isMutable(mcv: MCValue) bool { - return switch (mcv) { - .none => unreachable, - .unreach => unreachable, - .dead => unreachable, - - .immediate, - .memory, - .eflags, - .ptr_stack_offset, - .undef, - .register_overflow, - => false, - - .register, - .stack_offset, - => true, - }; - } - fn isRegister(mcv: MCValue) bool { return switch (mcv) { .register => true, @@ -2307,11 +2281,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { .data = .{ .imm = @bitCast(u32, -off) }, }); }, - .memory, - .got_load, - .direct_load, - .imports_load, - => { + .memory, .linker_load => { try self.loadMemPtrIntoRegister(addr_reg, Type.usize, array); }, else => return self.fail("TODO implement array_elem_val when array is {}", .{array}), @@ -2652,11 +2622,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo else => return self.fail("TODO implement loading from register into {}", .{dst_mcv}), } }, - .memory, - .got_load, - .direct_load, - .imports_load, - => { + .memory, .linker_load => { const reg = try self.copyToTmpRegister(ptr_ty, ptr); try self.load(dst_mcv, .{ .register = reg }, ptr_ty); }, @@ -2691,10 +2657,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue) InnerError!void { switch (ptr) { - .got_load, - .direct_load, - .imports_load, - => |sym_index| { + .linker_load => |load_struct| { const abi_size = @intCast(u32, ptr_ty.abiSize(self.target.*)); const mod = self.bin_file.options.module.?; const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl); @@ -2702,11 +2665,10 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue fn_owner_decl.link.macho.sym_index else fn_owner_decl.link.coff.sym_index; - const flags: u2 = switch (ptr) { - .got_load => 0b00, - .direct_load => 0b01, - .imports_load => 0b10, - else => unreachable, + const flags: u2 = switch (load_struct.@"type") { + .got => 0b00, + .direct => 0b01, + .import => 0b10, }; _ = try self.addInst(.{ .tag = .lea_pic, @@ -2717,7 +2679,7 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue .data = .{ .relocation = .{ .atom_index = atom_index, - .sym_index = sym_index, + .sym_index = load_struct.sym_index, }, }, }); @@ -2801,9 +2763,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type .register => |src_reg| { try self.genInlineMemcpyRegisterRegister(value_ty, reg, src_reg, 0); }, - .got_load, - .direct_load, - .imports_load, + .linker_load, .memory, .stack_offset, => { @@ -2822,11 +2782,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type }, } }, - .got_load, - .direct_load, - .imports_load, - .memory, - => { + .linker_load, .memory => { const value_lock: ?RegisterLock = switch (value) { .register => |reg| self.register_manager.lockReg(reg), else => null, @@ -2894,11 +2850,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type .register => { return self.store(new_ptr, value, ptr_ty, value_ty); }, - .got_load, - .direct_load, - .imports_load, - .memory, - => { + .linker_load, .memory => { if (abi_size <= 8) { const tmp_reg = try self.register_manager.allocReg(null, gp); const tmp_reg_lock = self.register_manager.lockRegAssumeUnused(tmp_reg); @@ -3606,9 +3558,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu }); }, .memory, - .got_load, - .direct_load, - .imports_load, + .linker_load, .eflags, => { assert(abi_size <= 8); @@ -3694,10 +3644,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu => { return self.fail("TODO implement x86 ADD/SUB/CMP source memory", .{}); }, - .got_load, - .direct_load, - .imports_load, - => { + .linker_load => { return self.fail("TODO implement x86 ADD/SUB/CMP source symbol at index in linker", .{}); }, .eflags => { @@ -3708,10 +3655,7 @@ fn genBinOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MCValu .memory => { return self.fail("TODO implement x86 ADD/SUB/CMP destination memory", .{}); }, - .got_load, - .direct_load, - .imports_load, - => { + .linker_load => { return self.fail("TODO implement x86 ADD/SUB/CMP destination symbol at index", .{}); }, } @@ -3779,10 +3723,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M .memory => { return self.fail("TODO implement x86 multiply source memory", .{}); }, - .got_load, - .direct_load, - .imports_load, - => { + .linker_load => { return self.fail("TODO implement x86 multiply source symbol at index in linker", .{}); }, .eflags => { @@ -3826,10 +3767,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M .memory, .stack_offset => { return self.fail("TODO implement x86 multiply source memory", .{}); }, - .got_load, - .direct_load, - .imports_load, - => { + .linker_load => { return self.fail("TODO implement x86 multiply source symbol at index in linker", .{}); }, .eflags => { @@ -3840,10 +3778,7 @@ fn genIntMulComplexOpMir(self: *Self, dst_ty: Type, dst_mcv: MCValue, src_mcv: M .memory => { return self.fail("TODO implement x86 multiply destination memory", .{}); }, - .got_load, - .direct_load, - .imports_load, - => { + .linker_load => { return self.fail("TODO implement x86 multiply destination symbol at index in linker", .{}); }, } @@ -4006,9 +3941,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. .unreach => unreachable, .dead => unreachable, .memory => unreachable, - .got_load => unreachable, - .direct_load => unreachable, - .imports_load => unreachable, + .linker_load => unreachable, .eflags => unreachable, .register_overflow => unreachable, } @@ -4066,7 +3999,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. const func = func_payload.data; const fn_owner_decl = mod.declPtr(func.owner_decl); try self.genSetReg(Type.initTag(.usize), .rax, .{ - .got_load = fn_owner_decl.link.coff.sym_index, + .linker_load = .{ + .@"type" = .got, + .sym_index = fn_owner_decl.link.coff.sym_index, + }, }); _ = try self.addInst(.{ .tag = .call, @@ -4087,7 +4023,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. } const sym_index = try coff_file.getGlobalSymbol(mem.sliceTo(decl_name, 0)); try self.genSetReg(Type.initTag(.usize), .rax, .{ - .imports_load = sym_index, + .linker_load = .{ + .@"type" = .import, + .sym_index = sym_index, + }, }); _ = try self.addInst(.{ .tag = .call, @@ -4119,7 +4058,12 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. const func = func_payload.data; const fn_owner_decl = mod.declPtr(func.owner_decl); const sym_index = fn_owner_decl.link.macho.sym_index; - try self.genSetReg(Type.initTag(.usize), .rax, .{ .got_load = sym_index }); + try self.genSetReg(Type.initTag(.usize), .rax, .{ + .linker_load = .{ + .@"type" = .got, + .sym_index = sym_index, + }, + }); // callq *%rax _ = try self.addInst(.{ .tag = .call, @@ -4505,11 +4449,7 @@ fn genVarDbgInfo( leb128.writeILEB128(dbg_info.writer(), -off) catch unreachable; dbg_info.items[fixup] += @intCast(u8, dbg_info.items.len - fixup - 2); }, - .memory, - .got_load, - .direct_load, - .imports_load, - => { + .memory, .linker_load => { const ptr_width = @intCast(u8, @divExact(self.target.cpu.arch.ptrBitWidth(), 8)); const is_ptr = switch (tag) { .dbg_var_ptr => true, @@ -4540,10 +4480,11 @@ fn genVarDbgInfo( try dbg_info.append(DW.OP.deref); } switch (mcv) { - .got_load, - .direct_load, - .imports_load, - => |index| try dw.addExprlocReloc(index, offset, is_ptr), + .linker_load => |load_struct| try dw.addExprlocReloc( + load_struct.sym_index, + offset, + is_ptr, + ), else => {}, } }, @@ -5587,11 +5528,7 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE else => return self.fail("TODO implement inputs on stack for {} with abi size > 8", .{mcv}), } }, - .memory, - .direct_load, - .got_load, - .imports_load, - => { + .memory, .linker_load => { if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg }); @@ -5835,11 +5772,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue, opts: Inl }, } }, - .memory, - .got_load, - .direct_load, - .imports_load, - => { + .memory, .linker_load => { if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }, opts); @@ -5959,11 +5892,7 @@ fn genInlineMemcpy( const tmp_reg = regs[4].to8(); switch (dst_ptr) { - .memory, - .got_load, - .direct_load, - .imports_load, - => { + .memory, .linker_load => { try self.loadMemPtrIntoRegister(dst_addr_reg, Type.usize, dst_ptr); }, .ptr_stack_offset, .stack_offset => |off| { @@ -5992,11 +5921,7 @@ fn genInlineMemcpy( } switch (src_ptr) { - .memory, - .got_load, - .direct_load, - .imports_load, - => { + .memory, .linker_load => { try self.loadMemPtrIntoRegister(src_addr_reg, Type.usize, src_ptr); }, .ptr_stack_offset, .stack_offset => |off| { @@ -6120,11 +6045,7 @@ fn genInlineMemset( const index_reg = regs[1].to64(); switch (dst_ptr) { - .memory, - .got_load, - .direct_load, - .imports_load, - => { + .memory, .linker_load => { try self.loadMemPtrIntoRegister(addr_reg, Type.usize, dst_ptr); }, .ptr_stack_offset, .stack_offset => |off| { @@ -6356,10 +6277,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .data = undefined, }); }, - .direct_load, - .got_load, - .imports_load, - => { + .linker_load => { switch (ty.zigTypeTag()) { .Float => { const base_reg = try self.register_manager.allocReg(null, gp); @@ -6753,11 +6671,7 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void { // TODO Is this the only condition for pointer dereference for memcpy? const src: MCValue = blk: { switch (src_ptr) { - .got_load, - .direct_load, - .imports_load, - .memory, - => { + .linker_load, .memory => { const reg = try self.register_manager.allocReg(null, gp); try self.loadMemPtrIntoRegister(reg, src_ty, src_ptr); _ = try self.addInst(.{ @@ -6997,10 +6911,16 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne return MCValue{ .memory = got_addr }; } else if (self.bin_file.cast(link.File.MachO)) |_| { assert(decl.link.macho.sym_index != 0); - return MCValue{ .got_load = decl.link.macho.sym_index }; + return MCValue{ .linker_load = .{ + .@"type" = .got, + .sym_index = decl.link.macho.sym_index, + } }; } else if (self.bin_file.cast(link.File.Coff)) |_| { assert(decl.link.coff.sym_index != 0); - return MCValue{ .got_load = decl.link.coff.sym_index }; + return MCValue{ .linker_load = .{ + .@"type" = .got, + .sym_index = decl.link.coff.sym_index, + } }; } else if (self.bin_file.cast(link.File.Plan9)) |p9| { try p9.seeDecl(decl_index); const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; @@ -7019,9 +6939,15 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue { const vaddr = elf_file.local_symbols.items[local_sym_index].st_value; return MCValue{ .memory = vaddr }; } else if (self.bin_file.cast(link.File.MachO)) |_| { - return MCValue{ .direct_load = local_sym_index }; + return MCValue{ .linker_load = .{ + .@"type" = .direct, + .sym_index = local_sym_index, + } }; } else if (self.bin_file.cast(link.File.Coff)) |_| { - return MCValue{ .direct_load = local_sym_index }; + return MCValue{ .linker_load = .{ + .@"type" = .direct, + .sym_index = local_sym_index, + } }; } else if (self.bin_file.cast(link.File.Plan9)) |_| { return self.fail("TODO lower unnamed const in Plan9", .{}); } else { diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig index aeb3f4770e..e99f6ff4f5 100644 --- a/src/arch/x86_64/Emit.zig +++ b/src/arch/x86_64/Emit.zig @@ -1021,7 +1021,7 @@ fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void { .@"type" = switch (ops.flags) { 0b00 => .got, 0b01 => .direct, - 0b10 => .imports, + 0b10 => .import, else => unreachable, }, .target = switch (ops.flags) { diff --git a/src/link/Coff.zig b/src/link/Coff.zig index ea36c8bc91..013a0c0475 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -127,7 +127,7 @@ pub const Reloc = struct { @"type": enum { got, direct, - imports, + import, }, target: SymbolWithLoc, offset: u32, @@ -141,7 +141,7 @@ pub const Reloc = struct { switch (self.@"type") { .got => return coff_file.getGotAtomForSymbol(self.target), .direct => return coff_file.getAtomForSymbol(self.target), - .imports => return coff_file.getImportAtomForSymbol(self.target), + .import => return coff_file.getImportAtomForSymbol(self.target), } } }; From 8e631ee3e7b4e7b4466c0efafaffb4151447785f Mon Sep 17 00:00:00 2001 From: Evan Haas Date: Thu, 8 Sep 2022 20:19:10 -0700 Subject: [PATCH 19/30] translate-c: Escape non-ASCII characters that appear in macros Macro definitions are simply a slice of bytes, which may not be UTF-8 encoded. If they are not UTF-8 encoded, escape non-printable and non-ASCII characters as `\xNN`. Fixes #12784 --- src/translate_c.zig | 20 ++++++++++++++++++-- test/behavior/translate_c_macros.zig | 12 ++++++++++++ test/behavior/translate_c_macros_not_utf8.h | 5 +++++ 3 files changed, 35 insertions(+), 2 deletions(-) create mode 100644 test/behavior/translate_c_macros_not_utf8.h diff --git a/src/translate_c.zig b/src/translate_c.zig index 014f6b1934..f969bf1c8b 100644 --- a/src/translate_c.zig +++ b/src/translate_c.zig @@ -5957,20 +5957,36 @@ fn zigifyEscapeSequences(ctx: *Context, m: *MacroCtx) ![]const u8 { return bytes[0..i]; } +/// non-ASCII characters (c > 127) are also treated as non-printable by fmtSliceEscapeLower. +/// If a C string literal or char literal in a macro is not valid UTF-8, we need to escape +/// non-ASCII characters so that the Zig source we output will itself be UTF-8. +fn escapeUnprintables(ctx: *Context, m: *MacroCtx) ![]const u8 { + const zigified = try zigifyEscapeSequences(ctx, m); + if (std.unicode.utf8ValidateSlice(zigified)) return zigified; + + const formatter = std.fmt.fmtSliceEscapeLower(zigified); + const encoded_size = @intCast(usize, std.fmt.count("{s}", .{formatter})); + var output = try ctx.arena.alloc(u8, encoded_size); + return std.fmt.bufPrint(output, "{s}", .{formatter}) catch |err| switch (err) { + error.NoSpaceLeft => unreachable, + else => |e| return e, + }; +} + fn parseCPrimaryExprInner(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!Node { const tok = m.next().?; const slice = m.slice(); switch (tok) { .CharLiteral => { if (slice[0] != '\'' or slice[1] == '\\' or slice.len == 3) { - return Tag.char_literal.create(c.arena, try zigifyEscapeSequences(c, m)); + return Tag.char_literal.create(c.arena, try escapeUnprintables(c, m)); } else { const str = try std.fmt.allocPrint(c.arena, "0x{s}", .{std.fmt.fmtSliceHexLower(slice[1 .. slice.len - 1])}); return Tag.integer_literal.create(c.arena, str); } }, .StringLiteral => { - return Tag.string_literal.create(c.arena, try zigifyEscapeSequences(c, m)); + return Tag.string_literal.create(c.arena, try escapeUnprintables(c, m)); }, .IntegerLiteral, .FloatLiteral => { return parseCNumLit(c, m); diff --git a/test/behavior/translate_c_macros.zig b/test/behavior/translate_c_macros.zig index 314a9028df..04d217f488 100644 --- a/test/behavior/translate_c_macros.zig +++ b/test/behavior/translate_c_macros.zig @@ -5,6 +5,7 @@ const expectEqual = std.testing.expectEqual; const expectEqualStrings = std.testing.expectEqualStrings; const h = @cImport(@cInclude("behavior/translate_c_macros.h")); +const latin1 = @cImport(@cInclude("behavior/translate_c_macros_not_utf8.h")); test "casting to void with a macro" { h.IGNORE_ME_1(42); @@ -134,3 +135,14 @@ test "string literal macro with embedded tab character" { try expectEqualStrings("hello\t", h.EMBEDDED_TAB); } + +test "string and char literals that are not UTF-8 encoded. Issue #12784" { + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + + try expectEqual(@as(u8, '\xA9'), latin1.UNPRINTABLE_CHAR); + try expectEqualStrings("\xA9\xA9\xA9", latin1.UNPRINTABLE_STRING); +} diff --git a/test/behavior/translate_c_macros_not_utf8.h b/test/behavior/translate_c_macros_not_utf8.h new file mode 100644 index 0000000000..0a7fa4cc6b --- /dev/null +++ b/test/behavior/translate_c_macros_not_utf8.h @@ -0,0 +1,5 @@ +// Note: This file is encoded with ISO/IEC 8859-1 (latin1), not UTF-8. +// Do not change the encoding + +#define UNPRINTABLE_STRING "©©©" +#define UNPRINTABLE_CHAR '©' From 5b9c5191ab919f4166a9e0c4486bd57bb2533791 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 9 Sep 2022 02:28:56 -0400 Subject: [PATCH 20/30] type: print comptime on fn type params This avoids the following confusing error message: error: expected type 'fn(i32, i32) void', found 'fn(i32, i32) void' --- src/type.zig | 3 +++ test/behavior/typename.zig | 4 ++-- .../comptime_param_coersion.zig | 20 +++++++++++++++++++ 3 files changed, 25 insertions(+), 2 deletions(-) create mode 100644 test/cases/compile_errors/comptime_param_coersion.zig diff --git a/src/type.zig b/src/type.zig index 0d48c5e46a..ec7e155d4e 100644 --- a/src/type.zig +++ b/src/type.zig @@ -2042,6 +2042,9 @@ pub const Type = extern union { try writer.writeAll("fn("); for (fn_info.param_types) |param_ty, i| { if (i != 0) try writer.writeAll(", "); + if (fn_info.paramIsComptime(i)) { + try writer.writeAll("comptime "); + } if (std.math.cast(u5, i)) |index| if (@truncate(u1, fn_info.noalias_bits >> index) != 0) { try writer.writeAll("noalias "); }; diff --git a/test/behavior/typename.zig b/test/behavior/typename.zig index 3bc8c58389..63e36488e0 100644 --- a/test/behavior/typename.zig +++ b/test/behavior/typename.zig @@ -122,7 +122,7 @@ test "top level decl" { ); // generic fn try expectEqualStrings( - "fn(type) type", + "fn(comptime type) type", @typeName(@TypeOf(TypeFromFn)), ); } @@ -244,5 +244,5 @@ test "comptime parameters not converted to anytype in function type" { if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const T = fn (fn (type) void, void) void; - try expectEqualStrings("fn(fn(type) void, void) void", @typeName(T)); + try expectEqualStrings("fn(comptime fn(comptime type) void, void) void", @typeName(T)); } diff --git a/test/cases/compile_errors/comptime_param_coersion.zig b/test/cases/compile_errors/comptime_param_coersion.zig new file mode 100644 index 0000000000..8441929249 --- /dev/null +++ b/test/cases/compile_errors/comptime_param_coersion.zig @@ -0,0 +1,20 @@ +pub export fn entry() void { + comptime var x: fn (comptime i32, comptime i32) void = undefined; + x = bar; +} +pub export fn entry1() void { + comptime var x: fn (i32, i32) void = undefined; + x = foo; +} + +fn foo(comptime _: i32, comptime _: i32) void {} +fn bar(comptime _: i32, _: i32) void {} + +// error +// backend=stage2 +// target=native +// +// :3:9: error: expected type 'fn(comptime i32, comptime i32) void', found 'fn(comptime i32, i32) void' +// :3:9: note: non-comptime parameter 1 cannot cast into a comptime parameter +// :7:9: error: expected type 'fn(i32, i32) void', found 'fn(comptime i32, comptime i32) void' +// :7:9: note: generic function cannot cast into a non-generic function From 485d8819b33e3fca1eb63b0d109019d0c3fc15fc Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sat, 10 Sep 2022 00:57:54 +0200 Subject: [PATCH 21/30] aarch64: update codegen to using a global index rather than local index --- src/arch/aarch64/CodeGen.zig | 85 +++++++++++++++--------------------- src/arch/aarch64/Emit.zig | 6 +-- 2 files changed, 37 insertions(+), 54 deletions(-) diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 884fd68d55..a54c8e059c 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -139,21 +139,10 @@ const MCValue = union(enum) { /// If the type is a pointer, it means the pointer address is at /// this memory location. memory: u64, - /// The value is in memory referenced indirectly via a GOT entry - /// index. - /// - /// If the type is a pointer, it means the pointer is referenced - /// indirectly via GOT. When lowered, linker will emit - /// relocations of type ARM64_RELOC_GOT_LOAD_PAGE21 and - /// ARM64_RELOC_GOT_LOAD_PAGEOFF12. - got_load: u32, - /// The value is in memory referenced directly via symbol index. - /// - /// If the type is a pointer, it means the pointer is referenced - /// directly via symbol index. When lowered, linker will emit a - /// relocation of type ARM64_RELOC_PAGE21 and - /// ARM64_RELOC_PAGEOFF12. - direct_load: u32, + /// The value is in memory but requires a linker relocation fixup: + /// * got - the value is referenced indirectly via GOT entry index (the linker emits a got-type reloc) + /// * direct - the value is referenced directly via symbol index index (the linker emits a displacement reloc) + linker_load: struct { @"type": enum { got, direct }, sym_index: u32 }, /// The value is one of the stack variables. /// /// If the type is a pointer, it means the pointer address is in @@ -2959,8 +2948,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo .memory, .stack_offset, .stack_argument_offset, - .got_load, - .direct_load, + .linker_load, => { const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr); try self.load(dst_mcv, .{ .register = addr_reg }, ptr_ty); @@ -3197,8 +3185,7 @@ fn store(self: *Self, ptr: MCValue, value: MCValue, ptr_ty: Type, value_ty: Type .memory, .stack_offset, .stack_argument_offset, - .got_load, - .direct_load, + .linker_load, => { const addr_reg = try self.copyToTmpRegister(ptr_ty, ptr); try self.store(.{ .register = addr_reg }, value, ptr_ty, value_ty); @@ -3493,7 +3480,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions. const func = func_payload.data; const fn_owner_decl = mod.declPtr(func.owner_decl); try self.genSetReg(Type.initTag(.u64), .x30, .{ - .got_load = fn_owner_decl.link.macho.sym_index, + .linker_load = .{ + .@"type" = .got, + .sym_index = fn_owner_decl.link.macho.sym_index, + }, }); // blr x30 _ = try self.addInst(.{ @@ -4427,8 +4417,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro .register = cond_reg, }); }, - .got_load, - .direct_load, + .linker_load, .memory, .stack_argument_offset, .stack_offset, @@ -4479,13 +4468,10 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro }); }, .memory => |addr| try self.genSetReg(Type.usize, src_reg, .{ .immediate = addr }), - .got_load, - .direct_load, - => |sym_index| { - const tag: Mir.Inst.Tag = switch (mcv) { - .got_load => .load_memory_ptr_got, - .direct_load => .load_memory_ptr_direct, - else => unreachable, + .linker_load => |load_struct| { + const tag: Mir.Inst.Tag = switch (load_struct.@"type") { + .got => .load_memory_ptr_got, + .direct => .load_memory_ptr_direct, }; const mod = self.bin_file.options.module.?; _ = try self.addInst(.{ @@ -4494,7 +4480,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerErro .payload = try self.addExtra(Mir.LoadMemoryPie{ .register = @enumToInt(src_reg), .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.sym_index, - .sym_index = sym_index, + .sym_index = load_struct.sym_index, }), }, }); @@ -4594,13 +4580,10 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void }); }, .register_with_overflow => unreachable, // doesn't fit into a register - .got_load, - .direct_load, - => |sym_index| { - const tag: Mir.Inst.Tag = switch (mcv) { - .got_load => .load_memory_got, - .direct_load => .load_memory_direct, - else => unreachable, + .linker_load => |load_struct| { + const tag: Mir.Inst.Tag = switch (load_struct.@"type") { + .got => .load_memory_got, + .direct => .load_memory_direct, }; const mod = self.bin_file.options.module.?; _ = try self.addInst(.{ @@ -4609,7 +4592,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void .payload = try self.addExtra(Mir.LoadMemoryPie{ .register = @enumToInt(reg), .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.sym_index, - .sym_index = sym_index, + .sym_index = load_struct.sym_index, }), }, }); @@ -4741,8 +4724,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I .register_with_overflow => { return self.fail("TODO implement genSetStackArgument {}", .{mcv}); }, - .got_load, - .direct_load, + .linker_load, .memory, .stack_argument_offset, .stack_offset, @@ -4785,13 +4767,10 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I }); }, .memory => |addr| try self.genSetReg(ptr_ty, src_reg, .{ .immediate = @intCast(u32, addr) }), - .got_load, - .direct_load, - => |sym_index| { - const tag: Mir.Inst.Tag = switch (mcv) { - .got_load => .load_memory_ptr_got, - .direct_load => .load_memory_ptr_direct, - else => unreachable, + .linker_load => |load_struct| { + const tag: Mir.Inst.Tag = switch (load_struct.@"type") { + .got => .load_memory_ptr_got, + .direct => .load_memory_ptr_direct, }; const mod = self.bin_file.options.module.?; _ = try self.addInst(.{ @@ -4800,7 +4779,7 @@ fn genSetStackArgument(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) I .payload = try self.addExtra(Mir.LoadMemoryPie{ .register = @enumToInt(src_reg), .atom_index = mod.declPtr(self.mod_fn.owner_decl).link.macho.sym_index, - .sym_index = sym_index, + .sym_index = load_struct.sym_index, }), }, }); @@ -5107,7 +5086,10 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne // Because MachO is PIE-always-on, we defer memory address resolution until // the linker has enough info to perform relocations. assert(decl.link.macho.sym_index != 0); - return MCValue{ .got_load = decl.link.macho.sym_index }; + return MCValue{ .linker_load = .{ + .@"type" = .got, + .sym_index = decl.link.macho.sym_index, + } }; } else if (self.bin_file.cast(link.File.Coff)) |_| { return self.fail("TODO codegen COFF const Decl pointer", .{}); } else if (self.bin_file.cast(link.File.Plan9)) |p9| { @@ -5129,7 +5111,10 @@ fn lowerUnnamedConst(self: *Self, tv: TypedValue) InnerError!MCValue { const vaddr = elf_file.local_symbols.items[local_sym_index].st_value; return MCValue{ .memory = vaddr }; } else if (self.bin_file.cast(link.File.MachO)) |_| { - return MCValue{ .direct_load = local_sym_index }; + return MCValue{ .linker_load = .{ + .@"type" = .direct, + .sym_index = local_sym_index, + } }; } else if (self.bin_file.cast(link.File.Coff)) |_| { return self.fail("TODO lower unnamed const in COFF", .{}); } else if (self.bin_file.cast(link.File.Plan9)) |_| { diff --git a/src/arch/aarch64/Emit.zig b/src/arch/aarch64/Emit.zig index 00a2ff380a..9e243a3f86 100644 --- a/src/arch/aarch64/Emit.zig +++ b/src/arch/aarch64/Emit.zig @@ -681,12 +681,10 @@ fn mirCallExtern(emit: *Emit, inst: Mir.Inst.Index) !void { }; // Add relocation to the decl. const atom = macho_file.atom_by_index_table.get(relocation.atom_index).?; + const target = macho_file.getGlobalByIndex(relocation.sym_index); try atom.relocs.append(emit.bin_file.allocator, .{ .offset = offset, - .target = .{ - .sym_index = relocation.sym_index, - .file = null, - }, + .target = target, .addend = 0, .subtractor = null, .pcrel = true, From 08248084441497489065d8f66e7f79d5e5115b7c Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sat, 10 Sep 2022 00:58:16 +0200 Subject: [PATCH 22/30] macho: refactor direct use of locals container in favour of helpers --- src/link/MachO.zig | 114 ++++++++++++++++----------------------------- 1 file changed, 39 insertions(+), 75 deletions(-) diff --git a/src/link/MachO.zig b/src/link/MachO.zig index e5c55fdb8a..a6720f8dd3 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -2043,16 +2043,11 @@ fn writeAtomsIncremental(self: *MachO) !void { pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom { const gpa = self.base.allocator; - const sym_index = @intCast(u32, self.locals.items.len); - try self.locals.append(gpa, .{ - .n_strx = 0, - .n_type = macho.N_SECT, - .n_sect = 0, - .n_desc = 0, - .n_value = 0, - }); - + const sym_index = try self.allocateSymbol(); const atom = try MachO.createEmptyAtom(gpa, sym_index, @sizeOf(u64), 3); + const sym = atom.getSymbolPtr(self); + sym.n_type = macho.N_SECT; + try atom.relocs.append(gpa, .{ .offset = 0, .target = target, @@ -2088,16 +2083,11 @@ pub fn createGotAtom(self: *MachO, target: SymbolWithLoc) !*Atom { pub fn createTlvPtrAtom(self: *MachO, target: SymbolWithLoc) !*Atom { const gpa = self.base.allocator; - const sym_index = @intCast(u32, self.locals.items.len); - try self.locals.append(gpa, .{ - .n_strx = 0, - .n_type = macho.N_SECT, - .n_sect = 0, - .n_desc = 0, - .n_value = 0, - }); - + const sym_index = try self.allocateSymbol(); const atom = try MachO.createEmptyAtom(gpa, sym_index, @sizeOf(u64), 3); + const sym = atom.getSymbolPtr(self); + sym.n_type = macho.N_SECT; + const target_sym = self.getSymbol(target); assert(target_sym.undf()); @@ -2125,15 +2115,10 @@ fn createDyldPrivateAtom(self: *MachO) !void { if (self.dyld_private_atom != null) return; const gpa = self.base.allocator; - const sym_index = @intCast(u32, self.locals.items.len); - try self.locals.append(gpa, .{ - .n_strx = 0, - .n_type = macho.N_SECT, - .n_sect = 0, - .n_desc = 0, - .n_value = 0, - }); + const sym_index = try self.allocateSymbol(); const atom = try MachO.createEmptyAtom(gpa, sym_index, @sizeOf(u64), 3); + const sym = atom.getSymbolPtr(self); + sym.n_type = macho.N_SECT; self.dyld_private_atom = atom; try self.allocateAtomCommon(atom, self.data_section_index.?); @@ -2158,15 +2143,11 @@ fn createStubHelperPreambleAtom(self: *MachO) !void { .aarch64 => 2, else => unreachable, }; - const sym_index = @intCast(u32, self.locals.items.len); - try self.locals.append(gpa, .{ - .n_strx = 0, - .n_type = macho.N_SECT, - .n_sect = 0, - .n_desc = 0, - .n_value = 0, - }); + const sym_index = try self.allocateSymbol(); const atom = try MachO.createEmptyAtom(gpa, sym_index, size, alignment); + const sym = atom.getSymbolPtr(self); + sym.n_type = macho.N_SECT; + const dyld_private_sym_index = self.dyld_private_atom.?.sym_index; switch (arch) { .x86_64 => { @@ -2283,15 +2264,11 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom { .aarch64 => 2, else => unreachable, }; - const sym_index = @intCast(u32, self.locals.items.len); - try self.locals.append(gpa, .{ - .n_strx = 0, - .n_type = macho.N_SECT, - .n_sect = 0, - .n_desc = 0, - .n_value = 0, - }); + const sym_index = try self.allocateSymbol(); const atom = try MachO.createEmptyAtom(gpa, sym_index, stub_size, alignment); + const sym = atom.getSymbolPtr(self); + sym.n_type = macho.N_SECT; + try atom.relocs.ensureTotalCapacity(gpa, 1); switch (arch) { @@ -2347,15 +2324,11 @@ pub fn createStubHelperAtom(self: *MachO) !*Atom { pub fn createLazyPointerAtom(self: *MachO, stub_sym_index: u32, target: SymbolWithLoc) !*Atom { const gpa = self.base.allocator; - const sym_index = @intCast(u32, self.locals.items.len); - try self.locals.append(gpa, .{ - .n_strx = 0, - .n_type = macho.N_SECT, - .n_sect = 0, - .n_desc = 0, - .n_value = 0, - }); + const sym_index = try self.allocateSymbol(); const atom = try MachO.createEmptyAtom(gpa, sym_index, @sizeOf(u64), 3); + const sym = atom.getSymbolPtr(self); + sym.n_type = macho.N_SECT; + try atom.relocs.append(gpa, .{ .offset = 0, .target = .{ .sym_index = stub_sym_index, .file = null }, @@ -2398,15 +2371,11 @@ pub fn createStubAtom(self: *MachO, laptr_sym_index: u32) !*Atom { .aarch64 => 3 * @sizeOf(u32), else => unreachable, // unhandled architecture type }; - const sym_index = @intCast(u32, self.locals.items.len); - try self.locals.append(gpa, .{ - .n_strx = 0, - .n_type = macho.N_SECT, - .n_sect = 0, - .n_desc = 0, - .n_value = 0, - }); + const sym_index = try self.allocateSymbol(); const atom = try MachO.createEmptyAtom(gpa, sym_index, stub_size, alignment); + const sym = atom.getSymbolPtr(self); + sym.n_type = macho.N_SECT; + switch (arch) { .x86_64 => { // jmp @@ -2518,7 +2487,9 @@ fn createMhExecuteHeaderSymbol(self: *MachO) !void { const gpa = self.base.allocator; const sym_index = try self.allocateSymbol(); - self.locals.items[sym_index] = .{ + const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null }; + const sym = self.getSymbolPtr(sym_loc); + sym.* = .{ .n_strx = try self.strtab.insert(gpa, "__mh_execute_header"), .n_type = macho.N_SECT | macho.N_EXT, .n_sect = 0, @@ -2527,30 +2498,25 @@ fn createMhExecuteHeaderSymbol(self: *MachO) !void { }; const gop = try self.getOrPutGlobalPtr("__mh_execute_header"); - gop.value_ptr.* = .{ - .sym_index = sym_index, - .file = null, - }; + gop.value_ptr.* = sym_loc; } fn createDsoHandleSymbol(self: *MachO) !void { const global = self.getGlobalPtr("___dso_handle") orelse return; - const sym = self.getSymbolPtr(global.*); - if (!sym.undf()) return; + if (!self.getSymbol(global.*).undf()) return; const gpa = self.base.allocator; const sym_index = try self.allocateSymbol(); - self.locals.items[sym_index] = .{ + const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null }; + const sym = self.getSymbolPtr(sym_loc); + sym.* = .{ .n_strx = try self.strtab.insert(gpa, "___dso_handle"), .n_type = macho.N_SECT | macho.N_EXT, .n_sect = 0, .n_desc = macho.N_WEAK_DEF, .n_value = 0, }; - global.* = .{ - .sym_index = sym_index, - .file = null, - }; + global.* = sym_loc; _ = self.unresolved.swapRemove(self.getGlobalIndex("___dso_handle").?); } @@ -2789,7 +2755,8 @@ fn resolveDyldStubBinder(self: *MachO) !void { const gpa = self.base.allocator; const sym_index = try self.allocateSymbol(); - const sym = &self.locals.items[sym_index]; + const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null }; + const sym = self.getSymbolPtr(sym_loc); const sym_name = "dyld_stub_binder"; sym.* = .{ .n_strx = try self.strtab.insert(gpa, sym_name), @@ -2799,10 +2766,7 @@ fn resolveDyldStubBinder(self: *MachO) !void { .n_value = 0, }; const gop = try self.getOrPutGlobalPtr(sym_name); - gop.value_ptr.* = .{ - .sym_index = sym_index, - .file = null, - }; + gop.value_ptr.* = sym_loc; const global = gop.value_ptr.*; for (self.dylibs.items) |dylib, id| { From a1b8545265b4fb47fe45287655f8885d092aa9df Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sat, 10 Sep 2022 00:59:46 +0200 Subject: [PATCH 23/30] coff: remove unused function --- src/link/Coff/Atom.zig | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/src/link/Coff/Atom.zig b/src/link/Coff/Atom.zig index ffd8fe45e6..39e04b2641 100644 --- a/src/link/Coff/Atom.zig +++ b/src/link/Coff/Atom.zig @@ -111,13 +111,3 @@ pub fn addBaseRelocation(self: *Atom, coff_file: *Coff, offset: u32) !void { } try gop.value_ptr.append(gpa, offset); } - -pub fn addBinding(self: *Atom, coff_file: *Coff, target: SymbolWithLoc) !void { - const gpa = coff_file.base.allocator; - log.debug(" (adding binding to target %{d} in %{d})", .{ target.sym_index, self.sym_index }); - const gop = try coff_file.bindings.getOrPut(gpa, self); - if (!gop.found_existing) { - gop.value_ptr.* = .{}; - } - try gop.value_ptr.append(gpa, target); -} From 4fd4c733d4676ee50667ca895259b277966f15c6 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Sat, 10 Sep 2022 09:23:26 +0200 Subject: [PATCH 24/30] x86_64: pass more behavior tests --- test/behavior/pointers.zig | 9 --------- 1 file changed, 9 deletions(-) diff --git a/test/behavior/pointers.zig b/test/behavior/pointers.zig index 28be72cf76..91d398e84d 100644 --- a/test/behavior/pointers.zig +++ b/test/behavior/pointers.zig @@ -17,7 +17,6 @@ fn testDerefPtr() !void { } test "pointer arithmetic" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var ptr: [*]const u8 = "abcd"; @@ -64,7 +63,6 @@ test "initialize const optional C pointer to null" { } test "assigning integer to C pointer" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; var x: i32 = 0; @@ -81,8 +79,6 @@ test "assigning integer to C pointer" { } test "C pointer comparison and arithmetic" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; - const S = struct { fn doTheTest() !void { var ptr1: [*c]u32 = 0; @@ -148,7 +144,6 @@ test "peer type resolution with C pointer and const pointer" { } test "implicit casting between C pointer and optional non-C pointer" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; @@ -279,7 +274,6 @@ test "array initialization types" { test "null terminated pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -296,7 +290,6 @@ test "null terminated pointer" { test "allow any sentinel" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { @@ -311,7 +304,6 @@ test "allow any sentinel" { test "pointer sentinel with enums" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { const Number = enum { @@ -332,7 +324,6 @@ test "pointer sentinel with enums" { test "pointer sentinel with optional element" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO const S = struct { fn doTheTest() !void { From 81939a4939638fb296bc874afcef2f0f141f5c0c Mon Sep 17 00:00:00 2001 From: Loris Cro Date: Sat, 10 Sep 2022 17:31:15 +0200 Subject: [PATCH 25/30] autodoc: remove unnecessary string copy --- src/autodoc/render_source.zig | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/autodoc/render_source.zig b/src/autodoc/render_source.zig index ceba230276..aa9eca7e95 100644 --- a/src/autodoc/render_source.zig +++ b/src/autodoc/render_source.zig @@ -137,7 +137,7 @@ pub fn genHtml( ); const source = try src.getSource(allocator); - try tokenizeAndPrintRaw(allocator, out, source.bytes); + try tokenizeAndPrintRaw(out, source.bytes); try out.writeAll( \\ \\ @@ -150,13 +150,9 @@ const end_line = "\n"; var line_counter: usize = 1; pub fn tokenizeAndPrintRaw( - allocator: Allocator, out: anytype, - raw_src: [:0]const u8, + src: [:0]const u8, ) !void { - const src = try allocator.dupeZ(u8, raw_src); - defer allocator.free(src); - line_counter = 1; try out.print("
" ++ start_line, .{line_counter});

From 2a96f80d03c70546e5166e7752ee2b4b64c7cc5f Mon Sep 17 00:00:00 2001
From: Loris Cro 
Date: Sat, 10 Sep 2022 21:08:26 +0200
Subject: [PATCH 26/30] autodoc: reduce json payload size

this commit removes whitespace and changes Decl, AstNode and Type to be
json arrays instead of json objects. This change reduces json payload
size for the stdlib from 25mb to < 10mb.
---
 lib/docs/main.js | 287 +++++++++++++++++++++++++++++++++++++----------
 src/Autodoc.zig  | 102 +++++++++++++----
 2 files changed, 309 insertions(+), 80 deletions(-)

diff --git a/lib/docs/main.js b/lib/docs/main.js
index b02b061eb4..a4043fb742 100644
--- a/lib/docs/main.js
+++ b/lib/docs/main.js
@@ -203,7 +203,7 @@ var zigAnalysis;
     if (!("type" in resolvedExpr)) {
       return null;
     }
-    let type = zigAnalysis.types[resolvedExpr.type];
+    let type = getType(resolvedExpr.type);
 
     outer: for (let i = 0; i < 10000; i += 1) {
       switch (type.kind) {
@@ -212,7 +212,7 @@ var zigAnalysis;
           let child = type.child;
           let resolvedChild = resolveValue(child);
           if ("type" in resolvedChild) {
-            type = zigAnalysis.types[resolvedChild.type];
+            type = getType(resolvedChild.type);
             continue;
           } else {
             return null;
@@ -276,7 +276,7 @@ var zigAnalysis;
       }
 
       if ("declRef" in value.expr) {
-        value = zigAnalysis.decls[value.expr.declRef].value;
+        value = getDecl(value.expr.declRef).value;
         continue;
       }
 
@@ -430,7 +430,7 @@ var zigAnalysis;
       curNav.pkgObjs.push(pkg);
     }
 
-    let currentType = zigAnalysis.types[pkg.main];
+    let currentType = getType(pkg.main);
     curNav.declObjs = [currentType];
     for (let i = 0; i < curNav.declNames.length; i += 1) {
       let childDecl = findSubDecl(currentType, curNav.declNames[i]);
@@ -440,7 +440,7 @@ var zigAnalysis;
 
       let childDeclValue = resolveValue(childDecl.value).expr;
       if ("type" in childDeclValue) {
-        const t = zigAnalysis.types[childDeclValue.type];
+        const t = getType(childDeclValue.type);
         if (t.kind != typeKinds.Fn) {
           childDecl = t;
         }
@@ -478,7 +478,7 @@ var zigAnalysis;
     }
 
     if (lastIsDecl && last.kind === "const") {
-      let typeObj = zigAnalysis.types[resolveValue(last.value).expr.type];
+      let typeObj = getType(resolveValue(last.value).expr.type);
       if (typeObj && typeObj.kind === typeKinds.Fn) {
         return renderFn(last);
       }
@@ -489,8 +489,8 @@ var zigAnalysis;
   }
     
   function renderDocTest(decl) {
-    if (!("decltest" in decl)) return;
-    const astNode = zigAnalysis.astNodes[decl.decltest];
+    if (!decl.decltest) return;
+    const astNode = getAstNode(decl.decltest);
     domSectDocTests.classList.remove("hidden");
     domDocTestsCode.innerHTML = astNode.code;
   }
@@ -498,7 +498,7 @@ var zigAnalysis;
   function renderUnknownDecl(decl) {
     domDeclNoRef.classList.remove("hidden");
 
-    let docs = zigAnalysis.astNodes[decl.src].docs;
+    let docs = getAstNode(decl.src).docs;
     if (docs != null) {
       domTldDocs.innerHTML = markdown(docs);
     } else {
@@ -509,18 +509,18 @@ var zigAnalysis;
   }
 
   function typeIsErrSet(typeIndex) {
-    let typeObj = zigAnalysis.types[typeIndex];
+    let typeObj = getType(typeIndex);
     return typeObj.kind === typeKinds.ErrorSet;
   }
 
   function typeIsStructWithNoFields(typeIndex) {
-    let typeObj = zigAnalysis.types[typeIndex];
+    let typeObj = getType(typeIndex);
     if (typeObj.kind !== typeKinds.Struct) return false;
     return typeObj.fields.length == 0;
   }
 
   function typeIsGenericFn(typeIndex) {
-    let typeObj = zigAnalysis.types[typeIndex];
+    let typeObj = getType(typeIndex);
     if (typeObj.kind !== typeKinds.Fn) {
       return false;
     }
@@ -532,12 +532,12 @@ var zigAnalysis;
       let last = fnDecl.value.expr.refPath.length - 1;
       let lastExpr = fnDecl.value.expr.refPath[last];
       console.assert("declRef" in lastExpr);
-      fnDecl = zigAnalysis.decls[lastExpr.declRef];
+      fnDecl = getDecl(lastExpr.declRef);
     }
 
     let value = resolveValue(fnDecl.value);
     console.assert("type" in value.expr);
-    let typeObj = zigAnalysis.types[value.expr.type];
+    let typeObj = getType(value.expr.type);
 
     domFnProtoCode.innerHTML = exprName(value.expr, {
       wantHtml: true,
@@ -546,7 +546,7 @@ var zigAnalysis;
     });
 
     let docsSource = null;
-    let srcNode = zigAnalysis.astNodes[fnDecl.src];
+    let srcNode = getAstNode(fnDecl.src);
     if (srcNode.docs != null) {
       docsSource = srcNode.docs;
     }
@@ -557,14 +557,14 @@ var zigAnalysis;
     if ("type" in retExpr) {
       let retIndex = retExpr.type;
       let errSetTypeIndex = null;
-      let retType = zigAnalysis.types[retIndex];
+      let retType = getType(retIndex);
       if (retType.kind === typeKinds.ErrorSet) {
         errSetTypeIndex = retIndex;
       } else if (retType.kind === typeKinds.ErrorUnion) {
         errSetTypeIndex = retType.err.type;
       }
       if (errSetTypeIndex != null) {
-        let errSetType = zigAnalysis.types[errSetTypeIndex];
+        let errSetType = getType(errSetTypeIndex);
         renderErrorSet(errSetType);
       }
     }
@@ -578,7 +578,7 @@ var zigAnalysis;
         let call = zigAnalysis.calls[resolvedGenericRet.expr.call];
         let resolvedFunc = resolveValue({ expr: call.func });
         if (!("type" in resolvedFunc.expr)) return;
-        let callee = zigAnalysis.types[resolvedFunc.expr.type];
+        let callee = getType(resolvedFunc.expr.type);
         if (!callee.generic_ret) return;
         resolvedGenericRet = resolveValue({ expr: callee.generic_ret });
       }
@@ -591,7 +591,7 @@ var zigAnalysis;
       }
 
       if (!("type" in resolvedGenericRet.expr)) return;
-      const genericType = zigAnalysis.types[resolvedGenericRet.expr.type];
+      const genericType = getType(resolvedGenericRet.expr.type);
       if (isContainerType(genericType)) {
         renderContainer(genericType);
       }
@@ -621,7 +621,7 @@ var zigAnalysis;
       domFnNoExamples.classList.add("hidden");
     }
 
-    let protoSrcNode = zigAnalysis.astNodes[protoSrcIndex];
+    let protoSrcNode = getAstNode(protoSrcIndex);
     if (
       docsSource == null &&
       protoSrcNode != null &&
@@ -639,13 +639,13 @@ var zigAnalysis;
   function renderFnParamDocs(fnDecl, typeObj) {
     let docCount = 0;
 
-    let fnNode = zigAnalysis.astNodes[fnDecl.src];
+    let fnNode = getAstNode(fnDecl.src);
     let fields = fnNode.fields;
     let isVarArgs = fnNode.varArgs;
 
     for (let i = 0; i < fields.length; i += 1) {
       let field = fields[i];
-      let fieldNode = zigAnalysis.astNodes[field];
+      let fieldNode = getAstNode(field);
       if (fieldNode.docs != null) {
         docCount += 1;
       }
@@ -659,7 +659,7 @@ var zigAnalysis;
 
     for (let i = 0; i < fields.length; i += 1) {
       let field = fields[i];
-      let fieldNode = zigAnalysis.astNodes[field];
+      let fieldNode = getAstNode(field);
       let docs = fieldNode.docs;
       if (fieldNode.docs == null) {
         continue;
@@ -967,17 +967,17 @@ var zigAnalysis;
       }
       case "switchOp": {
         let condExpr = zigAnalysis.exprs[expr.switchOp.cond_index];
-        let ast = zigAnalysis.astNodes[expr.switchOp.ast];
+        let ast = getAstNode(expr.switchOp.src);
         let file_name = expr.switchOp.file_name;
         let outer_decl_index = expr.switchOp.outer_decl;
-        let outer_decl = zigAnalysis.types[outer_decl_index];
+        let outer_decl = getType(outer_decl_index);
         let line = 0;
         // console.log(expr.switchOp)
         // console.log(outer_decl)
         while (outer_decl_index !== 0 && outer_decl.line_number > 0) {
           line += outer_decl.line_number;
           outer_decl_index = outer_decl.outer_decl;
-          outer_decl = zigAnalysis.types[outer_decl_index];
+          outer_decl = getType(outer_decl_index);
           // console.log(outer_decl)
         }
         line += ast.line + 1;
@@ -1028,8 +1028,8 @@ var zigAnalysis;
       case "fieldRef": {
         const enumObj = exprName({ type: expr.fieldRef.type }, opts);
         const field =
-          zigAnalysis.astNodes[enumObj.ast].fields[expr.fieldRef.index];
-        const name = zigAnalysis.astNodes[field].name;
+          getAstNode(enumObj.src).fields[expr.fieldRef.index];
+        const name = getAstNode(field).name;
         return name;
       }
       case "enumToInt": {
@@ -1452,13 +1452,13 @@ var zigAnalysis;
         return print_lhs + " " + operator + " " + print_rhs;
       }
       case "errorSets": {
-        const errUnionObj = zigAnalysis.types[expr.errorSets];
+        const errUnionObj = getType(expr.errorSets);
         let lhs = exprName(errUnionObj.lhs, opts);
         let rhs = exprName(errUnionObj.rhs, opts);
         return lhs + " || " + rhs;
       }
       case "errorUnion": {
-        const errUnionObj = zigAnalysis.types[expr.errorUnion];
+        const errUnionObj = getType(expr.errorUnion);
         let lhs = exprName(errUnionObj.lhs, opts);
         let rhs = exprName(errUnionObj.rhs, opts);
         return lhs + "!" + rhs;
@@ -1574,7 +1574,7 @@ var zigAnalysis;
         return exprName(exprArg, opts);
       }
       case "declRef": {
-        return zigAnalysis.decls[expr.declRef].name;
+        return getDecl(expr.declRef).name;
       }
       case "refPath": {
         return expr.refPath.map((x) => exprName(x, opts)).join(".");
@@ -1611,7 +1611,7 @@ var zigAnalysis;
         let name = "";
 
         let typeObj = expr.type;
-        if (typeof typeObj === "number") typeObj = zigAnalysis.types[typeObj];
+        if (typeof typeObj === "number") typeObj = getType(typeObj);
         switch (typeObj.kind) {
           default:
             throw "TODO";
@@ -1865,7 +1865,7 @@ var zigAnalysis;
             if (fnObj.params) {
               let fields = null;
               let isVarArgs = false;
-              let fnNode = zigAnalysis.astNodes[fnObj.src];
+              let fnNode = getAstNode(fnObj.src);
               fields = fnNode.fields;
               isVarArgs = fnNode.varArgs;
 
@@ -1880,7 +1880,7 @@ var zigAnalysis;
                 let paramValue = resolveValue({ expr: value });
 
                 if (fields != null) {
-                  let paramNode = zigAnalysis.astNodes[fields[i]];
+                  let paramNode = getAstNode(fields[i]);
 
                   if (paramNode.varArgs) {
                     payloadHtml += "...";
@@ -2046,7 +2046,7 @@ var zigAnalysis;
   function shouldSkipParamName(typeRef, paramName) {
     let resolvedTypeRef = resolveValue({ expr: typeRef });
     if ("type" in resolvedTypeRef) {
-      let typeObj = zigAnalysis.types[resolvedTypeRef.type];
+      let typeObj = getType(resolvedTypeRef.type);
       if (typeObj.kind === typeKinds.Pointer) {
         let ptrObj = typeObj;
         if (getPtrSize(ptrObj) === pointerSizeEnum.One) {
@@ -2067,7 +2067,7 @@ var zigAnalysis;
     if (
       rootIsStd &&
       typeObj ===
-        zigAnalysis.types[zigAnalysis.packages[zigAnalysis.rootPkg].main]
+        getType(zigAnalysis.packages[zigAnalysis.rootPkg].main)
     ) {
       name = "std";
     } else {
@@ -2189,7 +2189,7 @@ var zigAnalysis;
 
     if (resolvedValue.expr.fieldRef) {
       const declRef = decl.value.expr.refPath[0].declRef;
-      const type = zigAnalysis.decls[declRef];
+      const type = getDecl(declRef);
       domFnProtoCode.innerHTML =
         'const ' +
         escapeHtml(decl.name) +
@@ -2229,7 +2229,7 @@ var zigAnalysis;
         ";";
     }
 
-    let docs = zigAnalysis.astNodes[decl.src].docs;
+    let docs = getAstNode(decl.src).docs;
     if (docs != null) {
       domTldDocs.innerHTML = markdown(docs);
       domTldDocs.classList.remove("hidden");
@@ -2246,7 +2246,7 @@ var zigAnalysis;
       ": " +
       typeValueName(declTypeRef, true, true);
 
-    let docs = zigAnalysis.astNodes[decl.src].docs;
+    let docs = getAstNode(decl.src).docs;
     if (docs != null) {
       domTldDocs.innerHTML = markdown(docs);
       domTldDocs.classList.remove("hidden");
@@ -2266,7 +2266,7 @@ var zigAnalysis;
     testsList
   ) {
     for (let i = 0; i < decls.length; i += 1) {
-      let decl = zigAnalysis.decls[decls[i]];
+      let decl = getDecl(decls[i]);
       let declValue = resolveValue(decl.value);
 
       if (decl.isTest) {
@@ -2282,7 +2282,7 @@ var zigAnalysis;
       if (decl.kind === "const") {
         if ("type" in declValue.expr) {
           // We have the actual type expression at hand.
-          const typeExpr = zigAnalysis.types[declValue.expr.type];
+          const typeExpr = getType(declValue.expr.type);
           if (typeExpr.kind == typeKinds.Fn) {
             const funcRetExpr = resolveValue({
               expr: typeExpr.ret,
@@ -2310,7 +2310,7 @@ var zigAnalysis;
               typesList.push(decl);
             }
           }
-        } else if ("typeRef" in declValue) {
+        } else if (declValue.typeRef) {
           if ("type" in declValue.typeRef && declValue.typeRef == typeTypeId) {
             // We don't know what the type expression is, but we know it's a type.
             typesList.push(decl);
@@ -2324,7 +2324,7 @@ var zigAnalysis;
     }
   }
   function renderSourceFileLink(decl) {
-    let srcNode = zigAnalysis.astNodes[decl.src];
+    let srcNode = getAstNode(decl.src);
 
     return  " 0) {
       resizeDomList(domListFields, containerNode.fields.length, "
"); for (let i = 0; i < containerNode.fields.length; i += 1) { - let fieldNode = zigAnalysis.astNodes[containerNode.fields[i]]; + let fieldNode = getAstNode(containerNode.fields[i]); let divDom = domListFields.children[i]; let fieldName = fieldNode.name; let docs = fieldNode.docs; @@ -2528,7 +2528,7 @@ var zigAnalysis; tdType.innerHTML = typeValueName(typeOfDecl(decl), true, true); - let docs = zigAnalysis.astNodes[decl.src].docs; + let docs = getAstNode(decl.src).docs; if (docs != null) { tdDesc.innerHTML = shortDescMarkdown(docs); } else { @@ -2561,7 +2561,7 @@ var zigAnalysis; wantLink: true, }); - let docs = zigAnalysis.astNodes[decl.src].docs; + let docs = getAstNode(decl.src).docs; if (docs != null) { tdDesc.innerHTML = shortDescMarkdown(docs); } else { @@ -2594,7 +2594,7 @@ var zigAnalysis; wantLink: true, }); - let docs = zigAnalysis.astNodes[decl.src].docs; + let docs = getAstNode(decl.src).docs; if (docs != null) { tdDesc.innerHTML = shortDescMarkdown(docs); } else { @@ -2668,7 +2668,7 @@ var zigAnalysis; function findTypeTypeId() { for (let i = 0; i < zigAnalysis.types.length; i += 1) { - if (zigAnalysis.types[i].kind == typeKinds.Type) { + if (getType(i).kind == typeKinds.Type) { return i; } } @@ -2732,11 +2732,11 @@ var zigAnalysis; if ("value" in parentType) { const rv = resolveValue(parentType.value); if ("type" in rv.expr) { - const t = zigAnalysis.types[rv.expr.type]; + const t = getType(rv.expr.type); if (t.kind == typeKinds.Fn && t.generic_ret != null) { const rgr = resolveValue({ expr: t.generic_ret }); if ("type" in rgr.expr) { - parentType = zigAnalysis.types[rgr.expr.type]; + parentType = getType(rgr.expr.type); } } } @@ -2746,7 +2746,7 @@ var zigAnalysis; if (!parentType.pubDecls) return null; for (let i = 0; i < parentType.pubDecls.length; i += 1) { let declIndex = parentType.pubDecls[i]; - let childDecl = zigAnalysis.decls[declIndex]; + let childDecl = getDecl(declIndex); if (childDecl.name === childName) { return childDecl; } @@ -2754,7 +2754,7 @@ var zigAnalysis; if (!parentType.privDecls) return null; for (let i = 0; i < parentType.privDecls.length; i += 1) { let declIndex = parentType.privDecls[i]; - let childDecl = zigAnalysis.decls[declIndex]; + let childDecl = getDecl(declIndex); if (childDecl.name === childName) { return childDecl; } @@ -2805,7 +2805,7 @@ var zigAnalysis; let stack = [ { declNames: [], - type: zigAnalysis.types[pkg.main], + type: getType(pkg.main), }, ]; while (stack.length !== 0) { @@ -2819,7 +2819,7 @@ var zigAnalysis; let mainDeclIndex = t.pubDecls[declI]; if (list[mainDeclIndex] != null) continue; - let decl = zigAnalysis.decls[mainDeclIndex]; + let decl = getDecl(mainDeclIndex); let declVal = resolveValue(decl.value); let declNames = item.declNames.concat([decl.name]); list[mainDeclIndex] = { @@ -2827,7 +2827,7 @@ var zigAnalysis; declNames: declNames, }; if ("type" in declVal.expr) { - let value = zigAnalysis.types[declVal.expr.type]; + let value = getType(declVal.expr.type); if (declCanRepresentTypeKind(value.kind)) { canonTypeDecls[declVal.type] = mainDeclIndex; } @@ -2843,7 +2843,7 @@ var zigAnalysis; if (value.kind == typeKinds.Fn && value.generic_ret != null) { let resolvedVal = resolveValue({ expr: value.generic_ret }); if ("type" in resolvedVal.expr) { - let generic_type = zigAnalysis.types[resolvedVal.expr.type]; + let generic_type = getType(resolvedVal.expr.type); if (isContainerType(generic_type)) { stack.push({ declNames: declNames, @@ -3394,11 +3394,11 @@ var zigAnalysis; let canonPath = getCanonDeclPath(declIndex); if (canonPath == null) continue; - let decl = zigAnalysis.decls[declIndex]; + let decl = getDecl(declIndex); let lastPkgName = canonPath.pkgNames[canonPath.pkgNames.length - 1]; let fullPathSearchText = lastPkgName + "." + canonPath.declNames.join("."); - let astNode = zigAnalysis.astNodes[decl.src]; + let astNode = getAstNode(decl.src); let fileAndDocs = ""; //zigAnalysis.files[astNode.file]; // TODO: understand what this piece of code is trying to achieve // also right now `files` are expressed as a hashmap. @@ -3513,4 +3513,169 @@ var zigAnalysis; function byNameProperty(a, b) { return operatorCompare(a.name, b.name); } + + + function getDecl(idx) { + const decl = zigAnalysis.decls[idx]; + return { + name: decl[0], + kind: decl[1], + isTest: decl[2], + src: decl[3], + value: decl[4], + decltest: decl[5], + }; + } + + function getAstNode(idx) { + const ast = zigAnalysis.astNodes[idx]; + return { + file: ast[0], + line: ast[1], + col: ast[2], + name: ast[3], + code: ast[4], + docs: ast[5], + fields: ast[6], + comptime: ast[7], + }; + } + + function getType(idx){ + const ty = zigAnalysis.types[idx]; + switch(ty[0]) { + default: + throw "unhandled type kind!"; + case 0: // Unanalyzed + throw "unanalyzed type!"; + case 1: // Type + case 2: // Void + case 3: // Bool + case 4: // NoReturn + case 5: // Int + case 6: // Float + return { kind: ty[0], name: ty[1]}; + case 7: // Pointer + return { + kind: ty[0], + size: ty[1], + child: ty[2], + sentinel: ty[3], + align: ty[4], + address_space: ty[5], + bit_start: ty[6], + host_size: ty[7], + is_ref: ty[8], + is_allowzero: ty[9], + is_mutable: ty[10], + is_volatile: ty[11], + has_sentinel: ty[12], + has_align: ty[13], + has_addrspace: ty[14], + has_bit_range: ty[15], + }; + case 8: // Array + return { + kind: ty[0], + len: ty[1], + child: ty[2], + sentinel: ty[3], + }; + case 9: // Struct + return { + kind: ty[0], + name: ty[1], + src: ty[2], + privDecls: ty[3], + pubDecls: ty[4], + fields: ty[5], + line_number: ty[6], + outer_decl: ty[7], + }; + case 10: // ComptimeExpr + case 11: // ComptimeFloat + case 12: // ComptimeInt + case 13: // Undefined + case 14: // Null + return { kind: ty[0], name: ty[1] }; + case 15: // Optional + return { + kind: ty[0], + name: ty[1], + child: ty[2], + }; + case 16: // ErrorUnion + return { + kind: ty[0], + lhs: ty[1], + rhs: ty[2], + }; + case 17: // InferredErrorUnion + return { + kind: ty[0], + payload: ty[1], + }; + case 18: // ErrorSet + return { + kind: ty[0], + name: ty[1], + fields: ty[2], + }; + case 19: // Enum + return { + kind: ty[0], + name: ty[1], + src: ty[2], + privDecls: ty[3], + pubDecls: ty[4], + }; + case 20: // Union + return { + kind: ty[0], + name: ty[1], + src: ty[2], + privDecls: ty[3], + pubDecls: ty[4], + fields: ty[5], + }; + case 21: // Fn + return { + kind: ty[0], + name: ty[1], + src: ty[2], + ret: ty[3], + generic_ret: ty[4], + params: ty[5], + lib_name: ty[6], + is_var_args: ty[7], + is_inferred_error: ty[8], + has_lib_name: ty[9], + has_cc: ty[10], + cc: ty[11], + align: ty[12], + has_align: ty[13], + is_test: ty[14], + is_extern: ty[15], + }; + case 22: // BoundFn + return { kind: ty[0], name: ty[1] }; + case 23: // Opaque + return { + kind: ty[0], + name: ty[1], + src: ty[2], + privDecls: ty[3], + pubDecls: ty[4], + }; + case 24: // Frame + case 25: // AnyFrame + case 26: // Vector + case 27: // EnumLiteral + return { kind: ty[0], name: ty[1] }; + } + } + })(); + + + diff --git a/src/Autodoc.zig b/src/Autodoc.zig index e6e025b5b4..cb17c19cb3 100644 --- a/src/Autodoc.zig +++ b/src/Autodoc.zig @@ -280,8 +280,8 @@ pub fn generateZirData(self: *Autodoc) !void { try std.json.stringify( data, .{ - .whitespace = .{ .indent = if (builtin.mode == .Debug) .{ .Space = 4 } else .None }, - .emit_null_optional_fields = false, + .whitespace = .{ .indent = .None, .separator = false }, + .emit_null_optional_fields = true, }, out, ); @@ -404,6 +404,7 @@ const DocData = struct { w: anytype, ) !void { var jsw = std.json.writeStream(w, 15); + if (opts.whitespace) |ws| jsw.whitespace = ws; try jsw.beginObject(); inline for (comptime std.meta.tags(std.meta.FieldEnum(DocData))) |f| { const f_name = @tagName(f); @@ -449,6 +450,8 @@ const DocData = struct { w: anytype, ) !void { var jsw = std.json.writeStream(w, 15); + if (opts.whitespace) |ws| jsw.whitespace = ws; + try jsw.beginObject(); inline for (comptime std.meta.tags(std.meta.FieldEnum(DocPackage))) |f| { const f_name = @tagName(f); @@ -474,6 +477,22 @@ const DocData = struct { // The index in astNodes of the `test declname { }` node decltest: ?usize = null, _analyzed: bool, // omitted in json data + + pub fn jsonStringify( + self: Decl, + opts: std.json.StringifyOptions, + w: anytype, + ) !void { + var jsw = std.json.writeStream(w, 15); + if (opts.whitespace) |ws| jsw.whitespace = ws; + try jsw.beginArray(); + inline for (comptime std.meta.fields(Decl)) |f| { + try jsw.arrayElem(); + try std.json.stringify(@field(self, f.name), opts, w); + jsw.state_index -= 1; + } + try jsw.endArray(); + } }; const AstNode = struct { @@ -485,6 +504,22 @@ const DocData = struct { docs: ?[]const u8 = null, fields: ?[]usize = null, // index into astNodes @"comptime": bool = false, + + pub fn jsonStringify( + self: AstNode, + opts: std.json.StringifyOptions, + w: anytype, + ) !void { + var jsw = std.json.writeStream(w, 15); + if (opts.whitespace) |ws| jsw.whitespace = ws; + try jsw.beginArray(); + inline for (comptime std.meta.fields(AstNode)) |f| { + try jsw.arrayElem(); + try std.json.stringify(@field(self, f.name), opts, w); + jsw.state_index -= 1; + } + try jsw.endArray(); + } }; const Type = union(enum) { @@ -525,7 +560,6 @@ const DocData = struct { fields: ?[]Expr = null, // (use src->fields to find names) line_number: usize, outer_decl: usize, - ast: usize, }, ComptimeExpr: struct { name: []const u8 }, ComptimeFloat: struct { name: []const u8 }, @@ -548,7 +582,6 @@ const DocData = struct { src: usize, // index into astNodes privDecls: []usize = &.{}, // index into decls pubDecls: []usize = &.{}, // index into decls - ast: usize, // (use src->fields to find field names) }, Union: struct { @@ -557,7 +590,6 @@ const DocData = struct { privDecls: []usize = &.{}, // index into decls pubDecls: []usize = &.{}, // index into decls fields: []Expr = &.{}, // (use src->fields to find names) - ast: usize, }, Fn: struct { name: []const u8, @@ -582,7 +614,6 @@ const DocData = struct { src: usize, // index into astNodes privDecls: []usize = &.{}, // index into decls pubDecls: []usize = &.{}, // index into decls - ast: usize, }, Frame: struct { name: []const u8 }, AnyFrame: struct { name: []const u8 }, @@ -601,14 +632,15 @@ const DocData = struct { ) !void { const active_tag = std.meta.activeTag(self); var jsw = std.json.writeStream(w, 15); - try jsw.beginObject(); - try jsw.objectField("kind"); + if (opts.whitespace) |ws| jsw.whitespace = ws; + try jsw.beginArray(); + try jsw.arrayElem(); try jsw.emitNumber(@enumToInt(active_tag)); inline for (comptime std.meta.fields(Type)) |case| { if (@field(Type, case.name) == active_tag) { const current_value = @field(self, case.name); inline for (comptime std.meta.fields(case.field_type)) |f| { - try jsw.objectField(f.name); + try jsw.arrayElem(); if (f.field_type == std.builtin.TypeInfo.Pointer.Size) { try jsw.emitNumber(@enumToInt(@field(current_value, f.name))); } else { @@ -618,7 +650,7 @@ const DocData = struct { } } } - try jsw.endObject(); + try jsw.endArray(); } }; @@ -686,7 +718,7 @@ const DocData = struct { const SwitchOp = struct { cond_index: usize, file_name: []const u8, - ast: usize, + src: usize, outer_decl: usize, // index in `types` }; const BuiltinBin = struct { @@ -704,7 +736,15 @@ const DocData = struct { end: ?usize = null, sentinel: ?usize = null, // index in `exprs` }; - const Cmpxchg = struct { name: []const u8, type: usize, ptr: usize, expected_value: usize, new_value: usize, success_order: usize, failure_order: usize }; + const Cmpxchg = struct { + name: []const u8, + type: usize, + ptr: usize, + expected_value: usize, + new_value: usize, + success_order: usize, + failure_order: usize, + }; const As = struct { typeRefArg: ?usize, // index in `exprs` exprArg: usize, // index in `exprs` @@ -721,11 +761,12 @@ const DocData = struct { pub fn jsonStringify( self: Expr, - opt: std.json.StringifyOptions, + opts: std.json.StringifyOptions, w: anytype, ) !void { const active_tag = std.meta.activeTag(self); var jsw = std.json.writeStream(w, 15); + if (opts.whitespace) |ws| jsw.whitespace = ws; try jsw.beginObject(); try jsw.objectField(@tagName(active_tag)); switch (self) { @@ -742,7 +783,7 @@ const DocData = struct { if (comptime std.mem.eql(u8, case.name, "builtinField")) continue; if (@field(Expr, case.name) == active_tag) { - try std.json.stringify(@field(self, case.name), opt, w); + try std.json.stringify(@field(self, case.name), opts, w); jsw.state_index -= 1; // TODO: we should not reach into the state of the // json writer, but alas, this is what's @@ -1874,7 +1915,12 @@ fn walkInstruction( // log.debug("{s}", .{sep}); const switch_index = self.exprs.items.len; - try self.exprs.append(self.arena, .{ .switchOp = .{ .cond_index = cond_index, .file_name = file.sub_file_path, .ast = ast_index, .outer_decl = type_index } }); + try self.exprs.append(self.arena, .{ .switchOp = .{ + .cond_index = cond_index, + .file_name = file.sub_file_path, + .src = ast_index, + .outer_decl = type_index, + } }); return DocData.WalkResult{ .typeRef = .{ .type = @enumToInt(Ref.type_type) }, @@ -2505,7 +2551,6 @@ fn walkInstruction( .src = self_ast_node_index, .privDecls = priv_decl_indexes.items, .pubDecls = decl_indexes.items, - .ast = self_ast_node_index, }, }; if (self.ref_paths_pending_on_types.get(type_slot_index)) |paths| { @@ -2644,7 +2689,13 @@ fn walkInstruction( self.ast_nodes.items[self_ast_node_index].fields = field_name_indexes.items; self.types.items[type_slot_index] = .{ - .Union = .{ .name = "todo_name", .src = self_ast_node_index, .privDecls = priv_decl_indexes.items, .pubDecls = decl_indexes.items, .fields = field_type_refs.items, .ast = self_ast_node_index }, + .Union = .{ + .name = "todo_name", + .src = self_ast_node_index, + .privDecls = priv_decl_indexes.items, + .pubDecls = decl_indexes.items, + .fields = field_type_refs.items, + }, }; if (self.ref_paths_pending_on_types.get(type_slot_index)) |paths| { @@ -2796,7 +2847,12 @@ fn walkInstruction( self.ast_nodes.items[self_ast_node_index].fields = field_name_indexes.items; self.types.items[type_slot_index] = .{ - .Enum = .{ .name = "todo_name", .src = self_ast_node_index, .privDecls = priv_decl_indexes.items, .pubDecls = decl_indexes.items, .ast = self_ast_node_index }, + .Enum = .{ + .name = "todo_name", + .src = self_ast_node_index, + .privDecls = priv_decl_indexes.items, + .pubDecls = decl_indexes.items, + }, }; if (self.ref_paths_pending_on_types.get(type_slot_index)) |paths| { for (paths.items) |resume_info| { @@ -2910,7 +2966,15 @@ fn walkInstruction( self.ast_nodes.items[self_ast_node_index].fields = field_name_indexes.items; self.types.items[type_slot_index] = .{ - .Struct = .{ .name = "todo_name", .src = self_ast_node_index, .privDecls = priv_decl_indexes.items, .pubDecls = decl_indexes.items, .fields = field_type_refs.items, .line_number = self.ast_nodes.items[self_ast_node_index].line, .outer_decl = type_slot_index - 1, .ast = self_ast_node_index }, + .Struct = .{ + .name = "todo_name", + .src = self_ast_node_index, + .privDecls = priv_decl_indexes.items, + .pubDecls = decl_indexes.items, + .fields = field_type_refs.items, + .line_number = self.ast_nodes.items[self_ast_node_index].line, + .outer_decl = type_slot_index - 1, + }, }; if (self.ref_paths_pending_on_types.get(type_slot_index)) |paths| { for (paths.items) |resume_info| { From aec0e595f2a679f10e927ea4531b8f58ced7080a Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Sep 2022 13:39:58 -0700 Subject: [PATCH 27/30] stage2: no condition on system libs to link native libc Before, Zig tried to use its own libc files (e.g. glibc) when there were no system libs being linked. This prevented building against native glibc on systems that have newer glibc than the ones Zig provides. Closes #12797 --- src/Compilation.zig | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 597f5cffff..f871c0f78c 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1238,7 +1238,6 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation { options.target, options.is_native_abi, link_libc, - options.system_lib_names.len != 0 or options.frameworks.count() != 0, options.libc_installation, options.native_darwin_sdk != null, ); @@ -4522,7 +4521,6 @@ fn detectLibCIncludeDirs( target: Target, is_native_abi: bool, link_libc: bool, - link_system_libs: bool, libc_installation: ?*const LibCInstallation, has_macos_sdk: bool, ) !LibCDirs { @@ -4539,7 +4537,7 @@ fn detectLibCIncludeDirs( // If linking system libraries and targeting the native abi, default to // using the system libc installation. - if (link_system_libs and is_native_abi and !target.isMinGW()) { + if (is_native_abi and !target.isMinGW()) { if (target.isDarwin()) { return if (has_macos_sdk) // For Darwin/macOS, we are all set with getDarwinSDK found earlier. From 60678f5bafdf22e274229f983d02069e13996426 Mon Sep 17 00:00:00 2001 From: Loris Cro Date: Sun, 11 Sep 2022 23:45:18 +0200 Subject: [PATCH 28/30] autodoc: fix regression in frontend rendering of values --- lib/docs/main.js | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/lib/docs/main.js b/lib/docs/main.js index a4043fb742..18886f37fb 100644 --- a/lib/docs/main.js +++ b/lib/docs/main.js @@ -478,11 +478,13 @@ var zigAnalysis; } if (lastIsDecl && last.kind === "const") { - let typeObj = getType(resolveValue(last.value).expr.type); - if (typeObj && typeObj.kind === typeKinds.Fn) { - return renderFn(last); + const value = resolveValue(last.value); + if ("type" in value.expr) { + let typeObj = getType(value.expr.type); + if (typeObj.kind === typeKinds.Fn) { + return renderFn(last); + } } - return renderValue(last); } From 65bea514ae3860a5169d044d22ece7170c445bd3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Sep 2022 16:37:03 -0700 Subject: [PATCH 29/30] Compilation: handle system C compiler not found When linking libc and compiling natively, Zig tries to integrate with the system C compiler. However, this caused Zig to fail when no system C compiler is installed, despite the fact that Zig is perfectly capable of compiling & linking libc without one. This commit makes Zig fall back to using its own ability to provide libc in the case that no C compiler is installed. For glibc, it means sometimes getting the warning "zig cannot build new glibc version abc, providing instead xyz". Ideally, Zig would do some more validation about the system libraries being linked against, and report an error in case it could not provide the exact correct libc version of the system libraries (or that the system libraries themselves conflict with each other), however, I think it is fair to call that a separate enhancement. --- lib/std/child_process.zig | 2 +- src/Compilation.zig | 148 ++++++++++++++++++++++---------------- src/glibc.zig | 9 ++- 3 files changed, 91 insertions(+), 68 deletions(-) diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index fb521eb784..f1604bb86c 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -508,7 +508,7 @@ pub const ChildProcess = struct { // it, that's the error code returned by the child process. _ = std.os.poll(&fd, 0) catch unreachable; - // According to eventfd(2) the descriptro is readable if the counter + // According to eventfd(2) the descriptor is readable if the counter // has a value greater than 0 if ((fd[0].revents & std.os.POLL.IN) != 0) { const err_int = try readIntFd(err_pipe[0]); diff --git a/src/Compilation.zig b/src/Compilation.zig index f871c0f78c..5a1abcb52b 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -4549,74 +4549,29 @@ fn detectLibCIncludeDirs( getZigShippedLibCIncludeDirsDarwin(arena, zig_lib_dir, target); } const libc = try arena.create(LibCInstallation); - libc.* = try LibCInstallation.findNative(.{ .allocator = arena, .verbose = true }); + libc.* = LibCInstallation.findNative(.{ .allocator = arena }) catch |err| switch (err) { + error.CCompilerExitCode, + error.CCompilerCrashed, + error.CCompilerCannotFindHeaders, + error.UnableToSpawnCCompiler, + => |e| { + // We tried to integrate with the native system C compiler, + // however, it is not installed. So we must rely on our bundled + // libc files. + if (target_util.canBuildLibC(target)) { + return detectLibCFromBuilding(arena, zig_lib_dir, target, has_macos_sdk); + } + return e; + }, + else => |e| return e, + }; return detectLibCFromLibCInstallation(arena, target, libc); } // If not linking system libraries, build and provide our own libc by // default if possible. if (target_util.canBuildLibC(target)) { - switch (target.os.tag) { - .macos => return if (has_macos_sdk) - // For Darwin/macOS, we are all set with getDarwinSDK found earlier. - LibCDirs{ - .libc_include_dir_list = &[0][]u8{}, - .libc_installation = null, - } - else - getZigShippedLibCIncludeDirsDarwin(arena, zig_lib_dir, target), - else => { - const generic_name = target_util.libCGenericName(target); - // Some architectures are handled by the same set of headers. - const arch_name = if (target.abi.isMusl()) - musl.archName(target.cpu.arch) - else if (target.cpu.arch.isThumb()) - // ARM headers are valid for Thumb too. - switch (target.cpu.arch) { - .thumb => "arm", - .thumbeb => "armeb", - else => unreachable, - } - else - @tagName(target.cpu.arch); - const os_name = @tagName(target.os.tag); - // Musl's headers are ABI-agnostic and so they all have the "musl" ABI name. - const abi_name = if (target.abi.isMusl()) "musl" else @tagName(target.abi); - const s = std.fs.path.sep_str; - const arch_include_dir = try std.fmt.allocPrint( - arena, - "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-{s}", - .{ zig_lib_dir, arch_name, os_name, abi_name }, - ); - const generic_include_dir = try std.fmt.allocPrint( - arena, - "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "generic-{s}", - .{ zig_lib_dir, generic_name }, - ); - const generic_arch_name = target_util.osArchName(target); - const arch_os_include_dir = try std.fmt.allocPrint( - arena, - "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-any", - .{ zig_lib_dir, generic_arch_name, os_name }, - ); - const generic_os_include_dir = try std.fmt.allocPrint( - arena, - "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "any-{s}-any", - .{ zig_lib_dir, os_name }, - ); - - const list = try arena.alloc([]const u8, 4); - list[0] = arch_include_dir; - list[1] = generic_include_dir; - list[2] = arch_os_include_dir; - list[3] = generic_os_include_dir; - - return LibCDirs{ - .libc_include_dir_list = list, - .libc_installation = null, - }; - }, - } + return detectLibCFromBuilding(arena, zig_lib_dir, target, has_macos_sdk); } // If zig can't build the libc for the target and we are targeting the @@ -4675,6 +4630,75 @@ fn detectLibCFromLibCInstallation(arena: Allocator, target: Target, lci: *const }; } +fn detectLibCFromBuilding( + arena: Allocator, + zig_lib_dir: []const u8, + target: std.Target, + has_macos_sdk: bool, +) !LibCDirs { + switch (target.os.tag) { + .macos => return if (has_macos_sdk) + // For Darwin/macOS, we are all set with getDarwinSDK found earlier. + LibCDirs{ + .libc_include_dir_list = &[0][]u8{}, + .libc_installation = null, + } + else + getZigShippedLibCIncludeDirsDarwin(arena, zig_lib_dir, target), + else => { + const generic_name = target_util.libCGenericName(target); + // Some architectures are handled by the same set of headers. + const arch_name = if (target.abi.isMusl()) + musl.archName(target.cpu.arch) + else if (target.cpu.arch.isThumb()) + // ARM headers are valid for Thumb too. + switch (target.cpu.arch) { + .thumb => "arm", + .thumbeb => "armeb", + else => unreachable, + } + else + @tagName(target.cpu.arch); + const os_name = @tagName(target.os.tag); + // Musl's headers are ABI-agnostic and so they all have the "musl" ABI name. + const abi_name = if (target.abi.isMusl()) "musl" else @tagName(target.abi); + const s = std.fs.path.sep_str; + const arch_include_dir = try std.fmt.allocPrint( + arena, + "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-{s}", + .{ zig_lib_dir, arch_name, os_name, abi_name }, + ); + const generic_include_dir = try std.fmt.allocPrint( + arena, + "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "generic-{s}", + .{ zig_lib_dir, generic_name }, + ); + const generic_arch_name = target_util.osArchName(target); + const arch_os_include_dir = try std.fmt.allocPrint( + arena, + "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "{s}-{s}-any", + .{ zig_lib_dir, generic_arch_name, os_name }, + ); + const generic_os_include_dir = try std.fmt.allocPrint( + arena, + "{s}" ++ s ++ "libc" ++ s ++ "include" ++ s ++ "any-{s}-any", + .{ zig_lib_dir, os_name }, + ); + + const list = try arena.alloc([]const u8, 4); + list[0] = arch_include_dir; + list[1] = generic_include_dir; + list[2] = arch_os_include_dir; + list[3] = generic_os_include_dir; + + return LibCDirs{ + .libc_include_dir_list = list, + .libc_installation = null, + }; + }, + } +} + pub fn get_libc_crt_file(comp: *Compilation, arena: Allocator, basename: []const u8) ![]const u8 { if (comp.wantBuildGLibCFromSource() or comp.wantBuildMuslFromSource() or diff --git a/src/glibc.zig b/src/glibc.zig index 4e33867169..3dd7565e96 100644 --- a/src/glibc.zig +++ b/src/glibc.zig @@ -719,17 +719,16 @@ pub fn buildSharedObjects(comp: *Compilation) !void { .lt => continue, .gt => { // TODO Expose via compile error mechanism instead of log. - log.err("invalid target glibc version: {}", .{target_version}); + log.warn("invalid target glibc version: {}", .{target_version}); return error.InvalidTargetGLibCVersion; }, } - } else { + } else blk: { const latest_index = metadata.all_versions.len - 1; - // TODO Expose via compile error mechanism instead of log. - log.err("zig does not yet provide glibc version {}, the max provided version is {}", .{ + log.warn("zig cannot build new glibc version {}; providing instead {}", .{ target_version, metadata.all_versions[latest_index], }); - return error.InvalidTargetGLibCVersion; + break :blk latest_index; }; { From c97d64b677eb891144fb356e1f4b9011c60cc0e2 Mon Sep 17 00:00:00 2001 From: Shane Kennedy Date: Wed, 7 Sep 2022 23:53:04 +0200 Subject: [PATCH 30/30] chore: Remove unused constants --- lib/std/linked_list.zig | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/std/linked_list.zig b/lib/std/linked_list.zig index 5039e16583..577bae3d38 100644 --- a/lib/std/linked_list.zig +++ b/lib/std/linked_list.zig @@ -2,8 +2,6 @@ const std = @import("std.zig"); const debug = std.debug; const assert = debug.assert; const testing = std.testing; -const mem = std.mem; -const Allocator = mem.Allocator; /// A singly-linked list is headed by a single forward pointer. The elements /// are singly linked for minimum space and pointer manipulation overhead at