From c7775a9f628a7fa971e85dec65e1400866ad012c Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Fri, 11 Feb 2022 15:04:59 +0100 Subject: [PATCH 1/9] x64: impl genBinMathOpMir for ptr_stack_off and PIE memory --- src/arch/x86_64/CodeGen.zig | 16 +++++++++++----- test/behavior/slice.zig | 3 --- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index f9235512a7..df874dd492 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2244,7 +2244,9 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC .none => unreachable, .undef => try self.genSetReg(dst_ty, dst_reg, .undef), .dead, .unreach => unreachable, - .ptr_stack_offset => unreachable, + .ptr_stack_offset => |off| { + return self.genBinMathOpMir(mir_tag, dst_ty, dst_mcv, .{ .immediate = @bitCast(u32, off) }); + }, .ptr_embedded_in_code => unreachable, .register => |src_reg| { _ = try self.addInst(.{ @@ -2265,16 +2267,17 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC .data = .{ .imm = @truncate(u32, imm) }, }); }, - .embedded_in_code, .memory => { + .embedded_in_code, + .memory, + .got_load, + .direct_load, + => { assert(abi_size <= 8); self.register_manager.freezeRegs(&.{dst_reg}); defer self.register_manager.unfreezeRegs(&.{dst_reg}); const reg = try self.copyToTmpRegister(dst_ty, src_mcv); return self.genBinMathOpMir(mir_tag, dst_ty, dst_mcv, .{ .register = reg }); }, - .got_load, .direct_load => { - return self.fail("TODO implement x86 ADD/SUB/CMP source symbol at index in linker", .{}); - }, .stack_offset => |off| { if (off > math.maxInt(i32)) { return self.fail("stack offset too large", .{}); @@ -4620,6 +4623,9 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { } switch (typed_value.ty.zigTypeTag()) { + .Array => { + return self.lowerUnnamedConst(typed_value); + }, .Pointer => switch (typed_value.ty.ptrSize()) { .Slice => { return self.lowerUnnamedConst(typed_value); diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index 4b73a3a140..e64e82d474 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -29,7 +29,6 @@ comptime { test "slicing" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO var array: [20]i32 = undefined; @@ -223,7 +222,6 @@ test "compile time slice of pointer to hard coded address" { test "slice string literal has correct type" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; comptime { try expect(@TypeOf("aoeu"[0..]) == *const [4:0]u8); @@ -365,7 +363,6 @@ test "empty array to slice" { test "@ptrCast slice to pointer" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; const S = struct { fn doTheTest() !void { From 78e6f9c44c054b922ed1eaafcc4534edcf2dc9ba Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Fri, 11 Feb 2022 16:13:05 +0100 Subject: [PATCH 2/9] x64: fix ptr_add However, still missing is taking into account pointer alignment when performing arithmetic. --- src/arch/x86_64/CodeGen.zig | 28 +++++++++++++++++++++++++--- test/behavior/align.zig | 1 + test/behavior/array.zig | 1 + 3 files changed, 27 insertions(+), 3 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index df874dd492..d91beceabe 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -2217,7 +2217,26 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: // Now for step 2, we assing an MIR instruction const air_tags = self.air.instructions.items(.tag); switch (air_tags[inst]) { - .add, .addwrap, .ptr_add => try self.genBinMathOpMir(.add, dst_ty, dst_mcv, src_mcv), + .ptr_add => { + // TODO clean this up + // TODO take into account alignment + const elem_size = dst_ty.elemType2().abiSize(self.target.*); + const dst_reg = blk: { + switch (dst_mcv) { + .register => |reg| break :blk reg, + else => { + src_mcv.freezeIfRegister(&self.register_manager); + defer src_mcv.freezeIfRegister(&self.register_manager); + const reg = try self.copyToTmpRegister(dst_ty, dst_mcv); + break :blk reg; + }, + } + }; + try self.genIMulOpMir(dst_ty, .{ .register = dst_reg }, .{ .immediate = elem_size }); + dst_mcv = MCValue{ .register = dst_reg }; + try self.genBinMathOpMir(.add, dst_ty, dst_mcv, src_mcv); + }, + .add, .addwrap => try self.genBinMathOpMir(.add, dst_ty, dst_mcv, src_mcv), .bool_or, .bit_or => try self.genBinMathOpMir(.@"or", dst_ty, dst_mcv, src_mcv), .bool_and, .bit_and => try self.genBinMathOpMir(.@"and", dst_ty, dst_mcv, src_mcv), .sub, .subwrap => try self.genBinMathOpMir(.sub, dst_ty, dst_mcv, src_mcv), @@ -2244,8 +2263,11 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC .none => unreachable, .undef => try self.genSetReg(dst_ty, dst_reg, .undef), .dead, .unreach => unreachable, - .ptr_stack_offset => |off| { - return self.genBinMathOpMir(mir_tag, dst_ty, dst_mcv, .{ .immediate = @bitCast(u32, off) }); + .ptr_stack_offset => { + self.register_manager.freezeRegs(&.{dst_reg}); + defer self.register_manager.unfreezeRegs(&.{dst_reg}); + const reg = try self.copyToTmpRegister(dst_ty, src_mcv); + return self.genBinMathOpMir(mir_tag, dst_ty, dst_mcv, .{ .register = reg }); }, .ptr_embedded_in_code => unreachable, .register => |src_reg| { diff --git a/test/behavior/align.zig b/test/behavior/align.zig index a8d8fcd206..8a315ecab0 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -106,6 +106,7 @@ fn fnWithAlignedStack() i32 { test "implicitly decreasing slice alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; const a: u32 align(4) = 3; const b: u32 align(8) = 4; diff --git a/test/behavior/array.zig b/test/behavior/array.zig index e93f0f3e90..7828963a1c 100644 --- a/test/behavior/array.zig +++ b/test/behavior/array.zig @@ -8,6 +8,7 @@ const expectEqual = testing.expectEqual; test "array to slice" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; const a: u32 align(4) = 3; const b: u32 align(8) = 4; From 3383064b27cc8ad7aa997f6504d627af1fa8c960 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 16 Feb 2022 14:05:48 +0100 Subject: [PATCH 3/9] x64: implement airSlice --- src/arch/x86_64/CodeGen.zig | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index d91beceabe..a834f2bee5 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1071,10 +1071,21 @@ fn airMax(self: *Self, inst: Air.Inst.Index) !void { fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; - const result: MCValue = if (self.liveness.isUnused(inst)) - .dead - else - return self.fail("TODO implement slice for {}", .{self.target.cpu.arch}); + + if (self.liveness.isUnused(inst)) { + return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); + } + + const ptr = try self.resolveInst(bin_op.lhs); + const ptr_ty = self.air.typeOf(bin_op.lhs); + const len = try self.resolveInst(bin_op.rhs); + const len_ty = self.air.typeOf(bin_op.rhs); + + const stack_offset = @intCast(i32, try self.allocMem(inst, 16, 16)); + try self.genSetStack(ptr_ty, stack_offset + 8, ptr); + try self.genSetStack(len_ty, stack_offset, len); + const result = MCValue{ .stack_offset = stack_offset }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } From 1b7ec44924ede3816438f9de4b4b5bc3b7705711 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 16 Feb 2022 15:00:32 +0100 Subject: [PATCH 4/9] x64: separate ptr_add and ptr_sub from normal bin ops --- src/arch/x86_64/CodeGen.zig | 133 +++++++++++++++++++++++++----------- 1 file changed, 94 insertions(+), 39 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index a834f2bee5..ff3905cba7 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -582,10 +582,10 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { switch (air_tags[inst]) { // zig fmt: off - .add, .ptr_add => try self.airAdd(inst), + .add => try self.airAdd(inst), .addwrap => try self.airAddWrap(inst), .add_sat => try self.airAddSat(inst), - .sub, .ptr_sub => try self.airSub(inst), + .sub => try self.airSub(inst), .subwrap => try self.airSubWrap(inst), .sub_sat => try self.airSubSat(inst), .mul => try self.airMul(inst), @@ -597,6 +597,8 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { .shl_sat => try self.airShlSat(inst), .min => try self.airMin(inst), .max => try self.airMax(inst), + .ptr_add => try self.airPtrAdd(inst), + .ptr_sub => try self.airPtrSub(inst), .slice => try self.airSlice(inst), .sqrt, @@ -1068,6 +1070,70 @@ fn airMax(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } +fn airPtrAdd(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + if (self.liveness.isUnused(inst)) { + return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); + } + + const mcvs = try self.mcvsForBinMathOp(inst, bin_op.lhs, bin_op.rhs); + var dst_mcv = mcvs.dst; + const src_mcv = mcvs.src; + + // TODO clean this up + // TODO take into account alignment + const dst_ty = self.air.typeOfIndex(inst); + const elem_size = dst_ty.elemType2().abiSize(self.target.*); + const dst_reg = blk: { + switch (dst_mcv) { + .register => |reg| break :blk reg, + else => { + src_mcv.freezeIfRegister(&self.register_manager); + defer src_mcv.freezeIfRegister(&self.register_manager); + const reg = try self.copyToTmpRegister(dst_ty, dst_mcv); + break :blk reg; + }, + } + }; + try self.genIMulOpMir(dst_ty, .{ .register = dst_reg }, .{ .immediate = elem_size }); + dst_mcv = .{ .register = dst_reg }; + try self.genBinMathOpMir(.add, dst_ty, dst_mcv, src_mcv); + + return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none }); +} + +fn airPtrSub(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + if (self.liveness.isUnused(inst)) { + return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); + } + + const mcvs = try self.mcvsForBinMathOp(inst, bin_op.lhs, bin_op.rhs); + var dst_mcv = mcvs.dst; + const src_mcv = mcvs.src; + + // TODO clean this up + // TODO take into account alignment + const dst_ty = self.air.typeOfIndex(inst); + const elem_size = dst_ty.elemType2().abiSize(self.target.*); + const dst_reg = blk: { + switch (dst_mcv) { + .register => |reg| break :blk reg, + else => { + src_mcv.freezeIfRegister(&self.register_manager); + defer src_mcv.freezeIfRegister(&self.register_manager); + const reg = try self.copyToTmpRegister(dst_ty, dst_mcv); + break :blk reg; + }, + } + }; + try self.genIMulOpMir(dst_ty, .{ .register = dst_reg }, .{ .immediate = elem_size }); + dst_mcv = .{ .register = dst_reg }; + try self.genBinMathOpMir(.sub, dst_ty, dst_mcv, src_mcv); + + return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none }); +} + fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; @@ -2148,18 +2214,17 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); } -/// Perform "binary" operators, excluding comparisons. -/// Currently, the following ops are supported: -/// ADD, SUB, XOR, OR, AND -fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref) !MCValue { - // We'll handle these ops in two steps. - // 1) Prepare an output location (register or memory) - // This location will be the location of the operand that dies (if one exists) - // or just a temporary register (if one doesn't exist) - // 2) Perform the op with the other argument - // 3) Sometimes, the output location is memory but the op doesn't support it. - // In this case, copy that location to a register, then perform the op to that register instead. - // +const BinMathOpMCValuePair = struct { + dst: MCValue, + src: MCValue, +}; + +fn mcvsForBinMathOp( + self: *Self, + inst: Air.Inst.Index, + op_lhs: Air.Inst.Ref, + op_rhs: Air.Inst.Ref, +) !BinMathOpMCValuePair { // TODO: make this algorithm less bad const lhs = try self.resolveInst(op_lhs); const rhs = try self.resolveInst(op_rhs); @@ -2218,35 +2283,26 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: dst_mcv.freezeIfRegister(&self.register_manager); defer dst_mcv.unfreezeIfRegister(&self.register_manager); - const tmp_reg = try self.copyToTmpRegister(Type.u64, src_mcv); - src_mcv = MCValue{ .register = tmp_reg }; + src_mcv = try self.copyToNewRegister(inst, Type.u64, src_mcv); } }, else => {}, } - // Now for step 2, we assing an MIR instruction - const air_tags = self.air.instructions.items(.tag); - switch (air_tags[inst]) { - .ptr_add => { - // TODO clean this up - // TODO take into account alignment - const elem_size = dst_ty.elemType2().abiSize(self.target.*); - const dst_reg = blk: { - switch (dst_mcv) { - .register => |reg| break :blk reg, - else => { - src_mcv.freezeIfRegister(&self.register_manager); - defer src_mcv.freezeIfRegister(&self.register_manager); - const reg = try self.copyToTmpRegister(dst_ty, dst_mcv); - break :blk reg; - }, - } - }; - try self.genIMulOpMir(dst_ty, .{ .register = dst_reg }, .{ .immediate = elem_size }); - dst_mcv = MCValue{ .register = dst_reg }; - try self.genBinMathOpMir(.add, dst_ty, dst_mcv, src_mcv); - }, + return BinMathOpMCValuePair{ .dst = dst_mcv, .src = src_mcv }; +} + +/// Perform "binary" operators, excluding comparisons. +/// Currently, the following ops are supported: +/// ADD, SUB, XOR, OR, AND +fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref) !MCValue { + const dst_ty = self.air.typeOfIndex(inst); + const mcvs = try self.mcvsForBinMathOp(inst, op_lhs, op_rhs); + const dst_mcv = mcvs.dst; + const src_mcv = mcvs.src; + log.warn("dst_mcv = {}, src_mcv = {}", .{ dst_mcv, src_mcv }); + const tag = self.air.instructions.items(.tag)[inst]; + switch (tag) { .add, .addwrap => try self.genBinMathOpMir(.add, dst_ty, dst_mcv, src_mcv), .bool_or, .bit_or => try self.genBinMathOpMir(.@"or", dst_ty, dst_mcv, src_mcv), .bool_and, .bit_and => try self.genBinMathOpMir(.@"and", dst_ty, dst_mcv, src_mcv), @@ -2255,7 +2311,6 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: .mul, .mulwrap => try self.genIMulOpMir(dst_ty, dst_mcv, src_mcv), else => unreachable, } - return dst_mcv; } From d3edf298d18ff89bb0e0a3bfcee84a9fb63b1c6b Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 16 Feb 2022 15:24:04 +0100 Subject: [PATCH 5/9] x64: fix signed truncate --- src/arch/x86_64/CodeGen.zig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index ff3905cba7..c0d18a908d 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -966,7 +966,7 @@ fn airTrunc(self: *Self, inst: Air.Inst.Index) !void { const reg: Register = blk: { if (operand.isRegister()) { if (self.reuseOperand(inst, ty_op.operand, 0, operand)) { - break :blk operand.register; + break :blk operand.register.to64(); } } const mcv = try self.copyToNewRegister(inst, src_ty, operand); @@ -2300,7 +2300,6 @@ fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: const mcvs = try self.mcvsForBinMathOp(inst, op_lhs, op_rhs); const dst_mcv = mcvs.dst; const src_mcv = mcvs.src; - log.warn("dst_mcv = {}, src_mcv = {}", .{ dst_mcv, src_mcv }); const tag = self.air.instructions.items(.tag)[inst]; switch (tag) { .add, .addwrap => try self.genBinMathOpMir(.add, dst_ty, dst_mcv, src_mcv), From 09d468b237e58dd70cfa0a4311efab5ddabc9f95 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 16 Feb 2022 20:35:03 +0100 Subject: [PATCH 6/9] x64: overhaul stack handling mechanics Now, the abstracted stack offsets grow in the same direction as the real stack values in hardware, and allocating stack memory is done by the taking the last stack offset, adding required abi size and aligning to the required abi align. Stack handling is now more natural as it aligns itself with how it works in hardware; hence stepping through the debugger and printing out different stack values is intuitive. Finally, the stack pointers are now correctly aligned to the required (and not necessarily natural) alignment. --- src/arch/x86_64/CodeGen.zig | 135 ++++++++++++++++-------------------- src/arch/x86_64/Emit.zig | 9 ++- src/arch/x86_64/Mir.zig | 1 + 3 files changed, 64 insertions(+), 81 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index c0d18a908d..b593b3ea64 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -810,8 +810,8 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u if (abi_align > self.stack_align) self.stack_align = abi_align; // TODO find a free slot instead of always appending - const offset = mem.alignForwardGeneric(u32, self.next_stack_offset, abi_align); - self.next_stack_offset = offset + abi_size; + const offset = mem.alignForwardGeneric(u32, self.next_stack_offset + abi_size, abi_align); + self.next_stack_offset = offset; if (self.next_stack_offset > self.max_end_stack) self.max_end_stack = self.next_stack_offset; try self.stack.putNoClobber(self.gpa, offset, .{ @@ -823,7 +823,8 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.typeOfIndex(inst).elemType(); + const ptr_ty = self.air.typeOfIndex(inst); + const elem_ty = ptr_ty.elemType(); if (!elem_ty.hasRuntimeBits()) { return self.allocMem(inst, 8, 8); @@ -833,7 +834,7 @@ fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty}); }; // TODO swap this for inst.ty.ptrAlign - const abi_align = elem_ty.abiAlignment(self.target.*); + const abi_align = ptr_ty.ptrAlignment(self.target.*); return self.allocMem(inst, abi_size, abi_align); } @@ -1148,8 +1149,8 @@ fn airSlice(self: *Self, inst: Air.Inst.Index) !void { const len_ty = self.air.typeOf(bin_op.rhs); const stack_offset = @intCast(i32, try self.allocMem(inst, 16, 16)); - try self.genSetStack(ptr_ty, stack_offset + 8, ptr); - try self.genSetStack(len_ty, stack_offset, len); + try self.genSetStack(ptr_ty, stack_offset, ptr); + try self.genSetStack(len_ty, stack_offset - 8, len); const result = MCValue{ .stack_offset = stack_offset }; return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); @@ -1455,7 +1456,7 @@ fn airSlicePtr(self: *Self, inst: Air.Inst.Index) !void { const dst_mcv: MCValue = blk: { switch (operand) { .stack_offset => |off| { - break :blk MCValue{ .stack_offset = off + 8 }; + break :blk MCValue{ .stack_offset = off }; }, else => return self.fail("TODO implement slice_ptr for {}", .{operand}), } @@ -1472,7 +1473,7 @@ fn airSliceLen(self: *Self, inst: Air.Inst.Index) !void { const dst_mcv: MCValue = blk: { switch (operand) { .stack_offset => |off| { - break :blk MCValue{ .stack_offset = off }; + break :blk MCValue{ .stack_offset = off - 8 }; }, else => return self.fail("TODO implement slice_len for {}", .{operand}), } @@ -1540,7 +1541,7 @@ fn airSliceElemVal(self: *Self, inst: Air.Inst.Index) !void { .reg2 = .rbp, .flags = 0b01, }).encode(), - .data = .{ .imm = @bitCast(u32, -@intCast(i32, off + 16)) }, + .data = .{ .imm = @bitCast(u32, -@intCast(i32, off)) }, }); }, else => return self.fail("TODO implement slice_elem_val when slice is {}", .{slice_mcv}), @@ -1571,7 +1572,6 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const array_ty = self.air.typeOf(bin_op.lhs); - const array_abi_size = array_ty.abiSize(self.target.*); const array = try self.resolveInst(bin_op.lhs); array.freezeIfRegister(&self.register_manager); defer array.unfreezeIfRegister(&self.register_manager); @@ -1597,7 +1597,7 @@ fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void { .reg1 = addr_reg.to64(), .reg2 = .rbp, }).encode(), - .data = .{ .imm = @bitCast(u32, -(off + @intCast(i32, array_abi_size))) }, + .data = .{ .imm = @bitCast(u32, -off) }, }); }, else => return self.fail("TODO implement array_elem_val when array is {}", .{array}), @@ -1806,7 +1806,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) }); return self.genInlineMemcpy( - -(off + @intCast(i32, abi_size)), + -off, .rbp, registerAlias(addr_reg, @divExact(reg.size(), 8)), count_reg.to64(), @@ -2093,10 +2093,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde const mcv = try self.resolveInst(operand); const ptr_ty = self.air.typeOf(operand); const struct_ty = ptr_ty.childType(); - const struct_size = @intCast(u32, struct_ty.abiSize(self.target.*)); const struct_field_offset = @intCast(u32, struct_ty.structFieldOffset(index, self.target.*)); - const struct_field_ty = struct_ty.structFieldType(index); - const struct_field_size = @intCast(u32, struct_field_ty.abiSize(self.target.*)); const dst_mcv: MCValue = result: { switch (mcv) { @@ -2112,8 +2109,7 @@ fn structFieldPtr(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, inde break :result dst_mcv; }, .ptr_stack_offset => |off| { - const offset_to_field = struct_size - struct_field_offset - struct_field_size; - const ptr_stack_offset = off + @intCast(i32, offset_to_field); + const ptr_stack_offset = off - @intCast(i32, struct_field_offset); break :result MCValue{ .ptr_stack_offset = ptr_stack_offset }; }, .register => |reg| { @@ -2153,15 +2149,12 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const mcv = try self.resolveInst(operand); const struct_ty = self.air.typeOf(operand); - const struct_size = struct_ty.abiSize(self.target.*); const struct_field_offset = struct_ty.structFieldOffset(index, self.target.*); const struct_field_ty = struct_ty.structFieldType(index); - const struct_field_size = struct_field_ty.abiSize(self.target.*); switch (mcv) { .stack_offset => |off| { - const offset_to_field = struct_size - struct_field_offset - struct_field_size; - const stack_offset = off + @intCast(i32, offset_to_field); + const stack_offset = off - @intCast(i32, struct_field_offset); break :result MCValue{ .stack_offset = stack_offset }; }, .register => |reg| { @@ -2369,7 +2362,6 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC if (off > math.maxInt(i32)) { return self.fail("stack offset too large", .{}); } - const adj_off = off + @intCast(i32, abi_size); _ = try self.addInst(.{ .tag = mir_tag, .ops = (Mir.Ops{ @@ -2377,7 +2369,7 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC .reg2 = .rbp, .flags = 0b01, }).encode(), - .data = .{ .imm = @bitCast(u32, -adj_off) }, + .data = .{ .imm = @bitCast(u32, -off) }, }); }, .compare_flags_unsigned => { @@ -2395,7 +2387,6 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC if (abi_size > 8) { return self.fail("TODO implement ADD/SUB/CMP for stack dst with large ABI", .{}); } - const adj_off = off + @intCast(i32, abi_size); switch (src_mcv) { .none => unreachable, @@ -2411,7 +2402,7 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC .reg2 = registerAlias(src_reg, @intCast(u32, abi_size)), .flags = 0b10, }).encode(), - .data = .{ .imm = @bitCast(u32, -adj_off) }, + .data = .{ .imm = @bitCast(u32, -off) }, }); }, .immediate => |imm| { @@ -2432,7 +2423,7 @@ fn genBinMathOpMir(self: *Self, mir_tag: Mir.Inst.Tag, dst_ty: Type, dst_mcv: MC else => unreachable, }; const payload = try self.addExtra(Mir.ImmPair{ - .dest_off = @bitCast(u32, -adj_off), + .dest_off = @bitCast(u32, -off), .operand = @truncate(u32, imm), }); _ = try self.addInst(.{ @@ -2583,9 +2574,16 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { self.arg_index += 1; const mcv = self.args[arg_index]; + const max_stack = loop: for (self.args) |arg| { + switch (arg) { + .stack_offset => |last| break :loop last, + else => {}, + } + } else 0; const payload = try self.addExtra(Mir.ArgDbgInfo{ .air_inst = inst, .arg_index = arg_index, + .max_stack = @intCast(u32, max_stack), }); _ = try self.addInst(.{ .tag = .arg_dbg_info, @@ -2601,11 +2599,9 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void { self.register_manager.getRegAssumeFree(reg.to64(), inst); break :blk mcv; }, - .stack_offset => { - const ty = self.air.typeOfIndex(inst); - const abi_size = ty.abiSize(self.target.*); - const off = @intCast(i32, (arg_index + 1) * abi_size) + 16; - break :blk MCValue{ .stack_offset = -off }; + .stack_offset => |off| { + const offset = max_stack - off + 16; + break :blk MCValue{ .stack_offset = -offset }; }, else => return self.fail("TODO implement arg for {}", .{mcv}), } @@ -2648,7 +2644,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void { var info = try self.resolveCallingConventionValues(fn_ty); defer info.deinit(self); - var stack_adjustment: u32 = 0; + var stack_adjustment: ?u32 = null; for (args) |arg, arg_i| { const mc_arg = info.args[arg_i]; const arg_ty = self.air.typeOf(arg); @@ -2662,9 +2658,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void { try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => |off| { - const abi_size = @intCast(u32, arg_ty.abiSize(self.target.*)); try self.genSetStackArg(arg_ty, off, arg_mcv); - stack_adjustment += abi_size; + if (stack_adjustment == null) { + stack_adjustment = @intCast(u32, off); + } }, .ptr_stack_offset => { return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); @@ -2685,14 +2682,14 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void { } } - if (stack_adjustment > 0) { + if (stack_adjustment) |off| { // Adjust the stack _ = try self.addInst(.{ .tag = .sub, .ops = (Mir.Ops{ .reg1 = .rsp, }).encode(), - .data = .{ .imm = stack_adjustment }, + .data = .{ .imm = off }, }); } @@ -2820,14 +2817,14 @@ fn airCall(self: *Self, inst: Air.Inst.Index) !void { } } else unreachable; - if (stack_adjustment > 0) { + if (stack_adjustment) |off| { // Readjust the stack _ = try self.addInst(.{ .tag = .add, .ops = (Mir.Ops{ .reg1 = .rsp, }).encode(), - .data = .{ .imm = stack_adjustment }, + .data = .{ .imm = off }, }); } @@ -3583,14 +3580,13 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE return self.genSetStackArg(ty, stack_offset, .{ .register = reg }); }, .immediate => |imm| { - const off = stack_offset + @intCast(i32, abi_size); switch (abi_size) { 1, 2, 4 => { // We have a positive stack offset value but we want a twos complement negative // offset from rbp, which is at the top of the stack frame. // mov [rbp+offset], immediate const payload = try self.addExtra(Mir.ImmPair{ - .dest_off = @bitCast(u32, -off), + .dest_off = @bitCast(u32, -stack_offset), .operand = @truncate(u32, imm), }); _ = try self.addInst(.{ @@ -3680,7 +3676,7 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE // TODO allow for abi_size to be u64 try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) }); try self.genInlineMemcpy( - -(stack_offset + @intCast(i32, abi_size)), + -stack_offset, .rsp, addr_reg.to64(), count_reg.to64(), @@ -3695,14 +3691,14 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE .reg2 = registerAlias(reg, @intCast(u32, abi_size)), .flags = 0b10, }).encode(), - .data = .{ .imm = @bitCast(u32, -(stack_offset + @intCast(i32, abi_size))) }, + .data = .{ .imm = @bitCast(u32, -stack_offset) }, }); }, .ptr_stack_offset => { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg }); }, - .stack_offset => |unadjusted_off| { + .stack_offset => |off| { if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg }); @@ -3725,13 +3721,13 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE .reg1 = addr_reg.to64(), .reg2 = .rbp, }).encode(), - .data = .{ .imm = @bitCast(u32, -(unadjusted_off + @intCast(i32, abi_size))) }, + .data = .{ .imm = @bitCast(u32, -off) }, }); // TODO allow for abi_size to be u64 try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) }); try self.genInlineMemcpy( - -(stack_offset + @intCast(i32, abi_size)), + -stack_offset, .rsp, addr_reg.to64(), count_reg.to64(), @@ -3767,17 +3763,13 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro return self.genSetStack(ty, stack_offset, .{ .register = reg }); }, .immediate => |x_big| { - const adj_off = stack_offset + @intCast(i32, abi_size); - if (adj_off > 128) { + if (stack_offset > 128) { return self.fail("TODO implement set stack variable with large stack offset", .{}); } switch (abi_size) { 1, 2, 4 => { - // We have a positive stack offset value but we want a twos complement negative - // offset from rbp, which is at the top of the stack frame. - // mov [rbp+offset], immediate const payload = try self.addExtra(Mir.ImmPair{ - .dest_off = @bitCast(u32, -adj_off), + .dest_off = @bitCast(u32, -stack_offset), .operand = @truncate(u32, x_big), }); _ = try self.addInst(.{ @@ -3795,15 +3787,11 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro }); }, 8 => { - // We have a positive stack offset value but we want a twos complement negative - // offset from rbp, which is at the top of the stack frame. - const negative_offset = -adj_off; - // 64 bit write to memory would take two mov's anyways so we // insted just use two 32 bit writes to avoid register allocation { const payload = try self.addExtra(Mir.ImmPair{ - .dest_off = @bitCast(u32, negative_offset + 4), + .dest_off = @bitCast(u32, -stack_offset + 4), .operand = @truncate(u32, x_big >> 32), }); _ = try self.addInst(.{ @@ -3817,7 +3805,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro } { const payload = try self.addExtra(Mir.ImmPair{ - .dest_off = @bitCast(u32, negative_offset), + .dest_off = @bitCast(u32, -stack_offset), .operand = @truncate(u32, x_big), }); _ = try self.addInst(.{ @@ -3839,7 +3827,6 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro if (stack_offset > math.maxInt(i32)) { return self.fail("stack offset too large", .{}); } - const adj_off = stack_offset + @intCast(i32, abi_size); _ = try self.addInst(.{ .tag = .mov, .ops = (Mir.Ops{ @@ -3847,7 +3834,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro .reg2 = registerAlias(reg, @intCast(u32, abi_size)), .flags = 0b10, }).encode(), - .data = .{ .imm = @bitCast(u32, -adj_off) }, + .data = .{ .imm = @bitCast(u32, -stack_offset) }, }); }, .memory, @@ -3913,7 +3900,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) }); return self.genInlineMemcpy( - -(stack_offset + @intCast(i32, abi_size)), + -stack_offset, .rbp, addr_reg.to64(), count_reg.to64(), @@ -3952,14 +3939,14 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro .reg1 = addr_reg.to64(), .reg2 = .rbp, }).encode(), - .data = .{ .imm = @bitCast(u32, -(off + @intCast(i32, abi_size))) }, + .data = .{ .imm = @bitCast(u32, -off) }, }); // TODO allow for abi_size to be u64 try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) }); return self.genInlineMemcpy( - -(stack_offset + @intCast(i32, abi_size)), + -stack_offset, .rbp, addr_reg.to64(), count_reg.to64(), @@ -4073,11 +4060,10 @@ fn genInlineMemcpy( fn genInlineMemset(self: *Self, ty: Type, stack_offset: i32, value: MCValue) InnerError!void { try self.register_manager.getReg(.rax, null); const abi_size = ty.abiSize(self.target.*); - const adj_off = stack_offset + @intCast(i32, abi_size); - if (adj_off > 128) { + if (stack_offset > 128) { return self.fail("TODO inline memset with large stack offset", .{}); } - const negative_offset = @bitCast(u32, -adj_off); + const negative_offset = @bitCast(u32, -stack_offset); // We are actually counting `abi_size` bytes; however, we reuse the index register // as both the counter and offset scaler, hence we need to subtract one from `abi_size` @@ -4165,10 +4151,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void const abi_size = ty.abiSize(self.target.*); switch (mcv) { .dead => unreachable, - .ptr_stack_offset => |unadjusted_off| { - const elem_ty = ty.childType(); - const elem_abi_size = elem_ty.abiSize(self.target.*); - const off = unadjusted_off + @intCast(i32, elem_abi_size); + .ptr_stack_offset => |off| { if (off < std.math.minInt(i32) or off > std.math.maxInt(i32)) { return self.fail("stack offset too large", .{}); } @@ -4391,8 +4374,7 @@ fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void } } }, - .stack_offset => |unadjusted_off| { - const off = unadjusted_off + @intCast(i32, abi_size); + .stack_offset => |off| { if (off < std.math.minInt(i32) or off > std.math.maxInt(i32)) { return self.fail("stack offset too large", .{}); } @@ -4469,8 +4451,8 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) !void { const array_len = array_ty.arrayLenIncludingSentinel(); const result: MCValue = if (self.liveness.isUnused(inst)) .dead else blk: { const stack_offset = @intCast(i32, try self.allocMem(inst, 16, 16)); - try self.genSetStack(ptr_ty, stack_offset + 8, ptr); - try self.genSetStack(Type.initTag(.u64), stack_offset, .{ .immediate = array_len }); + try self.genSetStack(ptr_ty, stack_offset, ptr); + try self.genSetStack(Type.initTag(.u64), stack_offset - 8, .{ .immediate = array_len }); break :blk .{ .stack_offset = stack_offset }; }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -4883,7 +4865,6 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var next_stack_offset: u32 = 0; var count: usize = param_types.len; while (count > 0) : (count -= 1) { - // for (param_types) |ty, i| { const i = count - 1; const ty = param_types[i]; if (!ty.hasRuntimeBits()) { @@ -4892,6 +4873,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { continue; } const param_size = @intCast(u32, ty.abiSize(self.target.*)); + const param_align = @intCast(u32, ty.abiAlignment(self.target.*)); if (by_reg.get(i)) |int_reg| { const aliased_reg = registerAlias(c_abi_int_param_regs[int_reg], param_size); result.args[i] = .{ .register = aliased_reg }; @@ -4902,8 +4884,9 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { // such as ptr and len of slices as separate registers. // TODO: also we need to honor the C ABI for relevant types rather than passing on // the stack here. - result.args[i] = .{ .stack_offset = @intCast(i32, next_stack_offset) }; - next_stack_offset += param_size; + const offset = mem.alignForwardGeneric(u32, next_stack_offset + param_size, param_align); + result.args[i] = .{ .stack_offset = @intCast(i32, offset) }; + next_stack_offset = offset; } } diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig index 128ea52847..32877cb6f8 100644 --- a/src/arch/x86_64/Emit.zig +++ b/src/arch/x86_64/Emit.zig @@ -931,16 +931,15 @@ fn mirArgDbgInfo(emit: *Emit, inst: Mir.Inst.Index) InnerError!void { const payload = emit.mir.instructions.items(.data)[inst].payload; const arg_dbg_info = emit.mir.extraData(Mir.ArgDbgInfo, payload).data; const mcv = emit.mir.function.args[arg_dbg_info.arg_index]; - try emit.genArgDbgInfo(arg_dbg_info.air_inst, mcv, arg_dbg_info.arg_index); + try emit.genArgDbgInfo(arg_dbg_info.air_inst, mcv, arg_dbg_info.max_stack); } -fn genArgDbgInfo(emit: *Emit, inst: Air.Inst.Index, mcv: MCValue, arg_index: u32) !void { +fn genArgDbgInfo(emit: *Emit, inst: Air.Inst.Index, mcv: MCValue, max_stack: u32) !void { const ty_str = emit.mir.function.air.instructions.items(.data)[inst].ty_str; const zir = &emit.mir.function.mod_fn.owner_decl.getFileScope().zir; const name = zir.nullTerminatedString(ty_str.str); const name_with_null = name.ptr[0 .. name.len + 1]; const ty = emit.mir.function.air.getRefType(ty_str.ty); - const abi_size = ty.abiSize(emit.bin_file.options.target); switch (mcv) { .register => |reg| { @@ -960,7 +959,7 @@ fn genArgDbgInfo(emit: *Emit, inst: Air.Inst.Index, mcv: MCValue, arg_index: u32 .none => {}, } }, - .stack_offset => { + .stack_offset => |off| { switch (emit.debug_output) { .dwarf => |dbg_out| { // we add here +16 like we do in airArg in CodeGen since we refer directly to @@ -968,7 +967,7 @@ fn genArgDbgInfo(emit: *Emit, inst: Air.Inst.Index, mcv: MCValue, arg_index: u32 // prologue, and 8 bytes for return address. // TODO we need to make this more generic if we don't use rbp as the frame pointer // for example when -fomit-frame-pointer is set. - const disp = @intCast(i32, arg_index * abi_size + 16); + const disp = @intCast(i32, max_stack) - off + 16; try dbg_out.dbg_info.ensureUnusedCapacity(8); dbg_out.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter); const fixup = dbg_out.dbg_info.items.len; diff --git a/src/arch/x86_64/Mir.zig b/src/arch/x86_64/Mir.zig index 046cb0e9f6..83922d03a2 100644 --- a/src/arch/x86_64/Mir.zig +++ b/src/arch/x86_64/Mir.zig @@ -413,6 +413,7 @@ pub const DbgLineColumn = struct { pub const ArgDbgInfo = struct { air_inst: Air.Inst.Index, arg_index: u32, + max_stack: u32, }; pub fn deinit(mir: *Mir, gpa: std.mem.Allocator) void { From 94474ec7c783b94face643f448426bcb91adfc93 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 16 Feb 2022 21:40:08 +0100 Subject: [PATCH 7/9] x64: refactor use of inline memcpy --- src/arch/x86_64/CodeGen.zig | 301 +++++++++++------------------------- 1 file changed, 86 insertions(+), 215 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index b593b3ea64..e63edf0eac 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1782,36 +1782,7 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo return self.genSetStack(elem_ty, off, MCValue{ .register = tmp_reg }); } - self.register_manager.freezeRegs(&.{ .rax, .rcx }); - defer self.register_manager.unfreezeRegs(&.{ .rax, .rcx }); - - const regs = try self.register_manager.allocRegs(3, .{ null, null, null }); - const addr_reg = regs[0]; - const count_reg = regs[1]; - const tmp_reg = regs[2]; - - _ = try self.addInst(.{ - .tag = .mov, - .ops = (Mir.Ops{ - .reg1 = registerAlias(addr_reg, @divExact(reg.size(), 8)), - .reg2 = reg, - }).encode(), - .data = undefined, - }); - - try self.register_manager.getReg(.rax, null); - try self.register_manager.getReg(.rcx, null); - - // TODO allow for abi size to be u64 - try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) }); - - return self.genInlineMemcpy( - -off, - .rbp, - registerAlias(addr_reg, @divExact(reg.size(), 8)), - count_reg.to64(), - tmp_reg.to8(), - ); + try self.genInlineMemcpy(off, .rbp, elem_ty, ptr); }, else => return self.fail("TODO implement loading from register into {}", .{dst_mcv}), } @@ -3626,62 +3597,7 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg }); } - self.register_manager.freezeRegs(&.{ .rax, .rcx }); - defer self.register_manager.unfreezeRegs(&.{ .rax, .rcx }); - - const addr_reg: Register = blk: { - switch (mcv) { - .got_load, - .direct_load, - => |sym_index| { - const flags: u2 = switch (mcv) { - .got_load => 0b00, - .direct_load => 0b01, - else => unreachable, - }; - const addr_reg = try self.register_manager.allocReg(null); - _ = try self.addInst(.{ - .tag = .lea_pie, - .ops = (Mir.Ops{ - .reg1 = addr_reg.to64(), - .flags = flags, - }).encode(), - .data = .{ - .load_reloc = .{ - .atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index, - .sym_index = sym_index, - }, - }, - }); - break :blk addr_reg; - }, - .memory => |addr| { - const addr_reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = addr }); - break :blk addr_reg; - }, - else => unreachable, - } - }; - - self.register_manager.freezeRegs(&.{addr_reg}); - defer self.register_manager.unfreezeRegs(&.{addr_reg}); - - const regs = try self.register_manager.allocRegs(2, .{ null, null }); - const count_reg = regs[0]; - const tmp_reg = regs[1]; - - try self.register_manager.getReg(.rax, null); - try self.register_manager.getReg(.rcx, null); - - // TODO allow for abi_size to be u64 - try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) }); - try self.genInlineMemcpy( - -stack_offset, - .rsp, - addr_reg.to64(), - count_reg.to64(), - tmp_reg.to8(), - ); + try self.genInlineMemcpy(stack_offset, .rsp, ty, mcv); }, .register => |reg| { _ = try self.addInst(.{ @@ -3698,41 +3614,13 @@ fn genSetStackArg(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerE const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg }); }, - .stack_offset => |off| { + .stack_offset => { if (abi_size <= 8) { const reg = try self.copyToTmpRegister(ty, mcv); return self.genSetStackArg(ty, stack_offset, MCValue{ .register = reg }); } - self.register_manager.freezeRegs(&.{ .rax, .rcx }); - defer self.register_manager.unfreezeRegs(&.{ .rax, .rcx }); - - const regs = try self.register_manager.allocRegs(3, .{ null, null, null }); - const addr_reg = regs[0]; - const count_reg = regs[1]; - const tmp_reg = regs[2]; - - try self.register_manager.getReg(.rax, null); - try self.register_manager.getReg(.rcx, null); - - _ = try self.addInst(.{ - .tag = .lea, - .ops = (Mir.Ops{ - .reg1 = addr_reg.to64(), - .reg2 = .rbp, - }).encode(), - .data = .{ .imm = @bitCast(u32, -off) }, - }); - - // TODO allow for abi_size to be u64 - try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) }); - try self.genInlineMemcpy( - -stack_offset, - .rsp, - addr_reg.to64(), - count_reg.to64(), - tmp_reg.to8(), - ); + try self.genInlineMemcpy(stack_offset, .rsp, ty, mcv); }, else => return self.fail("TODO implement args on stack for {}", .{mcv}), } @@ -3847,65 +3735,7 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } - try self.register_manager.getReg(.rax, null); - try self.register_manager.getReg(.rcx, null); - - self.register_manager.freezeRegs(&.{ .rax, .rcx, .rbp }); - defer self.register_manager.unfreezeRegs(&.{ .rax, .rcx, .rbp }); - - const addr_reg: Register = blk: { - switch (mcv) { - .memory => |addr| { - const reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = addr }); - break :blk reg; - }, - .direct_load, - .got_load, - => |sym_index| { - const flags: u2 = switch (mcv) { - .got_load => 0b00, - .direct_load => 0b01, - else => unreachable, - }; - const addr_reg = try self.register_manager.allocReg(null); - _ = try self.addInst(.{ - .tag = .lea_pie, - .ops = (Mir.Ops{ - .reg1 = addr_reg.to64(), - .flags = flags, - }).encode(), - .data = .{ - .load_reloc = .{ - .atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index, - .sym_index = sym_index, - }, - }, - }); - break :blk addr_reg; - }, - else => { - return self.fail("TODO implement memcpy for setting stack from {}", .{mcv}); - }, - } - }; - - self.register_manager.freezeRegs(&.{addr_reg}); - defer self.register_manager.unfreezeRegs(&.{addr_reg}); - - const regs = try self.register_manager.allocRegs(2, .{ null, null }); - const count_reg = regs[0]; - const tmp_reg = regs[1]; - - // TODO allow for abi_size to be u64 - try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) }); - - return self.genInlineMemcpy( - -stack_offset, - .rbp, - addr_reg.to64(), - count_reg.to64(), - tmp_reg.to8(), - ); + try self.genInlineMemcpy(stack_offset, .rbp, ty, mcv); }, .ptr_stack_offset => { const reg = try self.copyToTmpRegister(ty, mcv); @@ -3922,48 +3752,89 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); } - self.register_manager.freezeRegs(&.{ .rax, .rcx, .rbp }); - defer self.register_manager.unfreezeRegs(&.{ .rax, .rcx, .rbp }); - - const regs = try self.register_manager.allocRegs(3, .{ null, null, null }); - const addr_reg = regs[0]; - const count_reg = regs[1]; - const tmp_reg = regs[2]; - - try self.register_manager.getReg(.rax, null); - try self.register_manager.getReg(.rcx, null); - - _ = try self.addInst(.{ - .tag = .lea, - .ops = (Mir.Ops{ - .reg1 = addr_reg.to64(), - .reg2 = .rbp, - }).encode(), - .data = .{ .imm = @bitCast(u32, -off) }, - }); - - // TODO allow for abi_size to be u64 - try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) }); - - return self.genInlineMemcpy( - -stack_offset, - .rbp, - addr_reg.to64(), - count_reg.to64(), - tmp_reg.to8(), - ); + try self.genInlineMemcpy(stack_offset, .rbp, ty, mcv); }, } } -fn genInlineMemcpy( - self: *Self, - stack_offset: i32, - stack_reg: Register, - addr_reg: Register, - count_reg: Register, - tmp_reg: Register, -) InnerError!void { +fn genInlineMemcpy(self: *Self, stack_offset: i32, stack_reg: Register, ty: Type, val: MCValue) InnerError!void { + const abi_size = ty.abiSize(self.target.*); + + try self.register_manager.getReg(.rax, null); + try self.register_manager.getReg(.rcx, null); + + self.register_manager.freezeRegs(&.{ .rax, .rcx, .rbp }); + defer self.register_manager.unfreezeRegs(&.{ .rax, .rcx, .rbp }); + + const addr_reg: Register = blk: { + switch (val) { + .memory => |addr| { + const reg = try self.copyToTmpRegister(Type.usize, .{ .immediate = addr }); + break :blk reg; + }, + .direct_load, + .got_load, + => |sym_index| { + const flags: u2 = switch (val) { + .got_load => 0b00, + .direct_load => 0b01, + else => unreachable, + }; + const addr_reg = (try self.register_manager.allocReg(null)).to64(); + _ = try self.addInst(.{ + .tag = .lea_pie, + .ops = (Mir.Ops{ + .reg1 = addr_reg, + .flags = flags, + }).encode(), + .data = .{ + .load_reloc = .{ + .atom_index = self.mod_fn.owner_decl.link.macho.local_sym_index, + .sym_index = sym_index, + }, + }, + }); + break :blk addr_reg; + }, + .stack_offset => |off| { + const addr_reg = (try self.register_manager.allocReg(null)).to64(); + _ = try self.addInst(.{ + .tag = .lea, + .ops = (Mir.Ops{ + .reg1 = addr_reg, + .reg2 = .rbp, + }).encode(), + .data = .{ .imm = @bitCast(u32, -off) }, + }); + break :blk addr_reg; + }, + .register => |reg| { + const addr_reg = try self.register_manager.allocReg(null); + _ = try self.addInst(.{ + .tag = .mov, + .ops = (Mir.Ops{ + .reg1 = registerAlias(addr_reg, @divExact(reg.size(), 8)), + .reg2 = reg, + }).encode(), + .data = undefined, + }); + break :blk addr_reg.to64(); + }, + else => { + return self.fail("TODO implement memcpy for setting stack from {}", .{val}); + }, + } + }; + + self.register_manager.freezeRegs(&.{addr_reg}); + defer self.register_manager.unfreezeRegs(&.{addr_reg}); + + const regs = try self.register_manager.allocRegs(2, .{ null, null }); + const count_reg = regs[0].to64(); + const tmp_reg = regs[1].to8(); + + try self.genSetReg(Type.u32, count_reg, .{ .immediate = @intCast(u32, abi_size) }); + // mov rcx, 0 _ = try self.addInst(.{ .tag = .mov, @@ -4016,7 +3887,7 @@ fn genInlineMemcpy( .reg1 = stack_reg, .reg2 = tmp_reg.to8(), }).encode(), - .data = .{ .imm = @bitCast(u32, stack_offset) }, + .data = .{ .imm = @bitCast(u32, -stack_offset) }, }); // add rcx, 1 @@ -4959,8 +4830,8 @@ fn parseRegName(name: []const u8) ?Register { return std.meta.stringToEnum(Register, name); } +/// Returns register wide enough to hold at least `size_bytes`. fn registerAlias(reg: Register, size_bytes: u32) Register { - // For x86_64 we have to pick a smaller register alias depending on abi size. if (size_bytes == 0) { unreachable; // should be comptime known } else if (size_bytes <= 1) { From c1fb14c51ef9b329c0e4dfedfe6ae591bd972a88 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Wed, 16 Feb 2022 22:41:19 +0100 Subject: [PATCH 8/9] x64: handle storing non-pow2 values to stack from register This actually requires storing and shifting mechanics so that we don't accidentally clobber anything else on the stack. --- src/arch/x86_64/CodeGen.zig | 57 +++++++++++++++++++++++++++++++------ 1 file changed, 48 insertions(+), 9 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index e63edf0eac..257c9e5a6c 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -3715,15 +3715,54 @@ fn genSetStack(self: *Self, ty: Type, stack_offset: i32, mcv: MCValue) InnerErro if (stack_offset > math.maxInt(i32)) { return self.fail("stack offset too large", .{}); } - _ = try self.addInst(.{ - .tag = .mov, - .ops = (Mir.Ops{ - .reg1 = .rbp, - .reg2 = registerAlias(reg, @intCast(u32, abi_size)), - .flags = 0b10, - }).encode(), - .data = .{ .imm = @bitCast(u32, -stack_offset) }, - }); + + const is_power_of_two = (abi_size % 2) == 0; + if (!is_power_of_two) { + self.register_manager.freezeRegs(&.{reg}); + defer self.register_manager.unfreezeRegs(&.{reg}); + + const tmp_reg = try self.copyToTmpRegister(ty, mcv); + + var next_offset = stack_offset; + var remainder = abi_size; + while (remainder > 0) { + const closest_power_of_two = @as(u6, 1) << @intCast(u3, math.log2(remainder)); + + _ = try self.addInst(.{ + .tag = .mov, + .ops = (Mir.Ops{ + .reg1 = .rbp, + .reg2 = registerAlias(tmp_reg, closest_power_of_two), + .flags = 0b10, + }).encode(), + .data = .{ .imm = @bitCast(u32, -next_offset) }, + }); + + if (closest_power_of_two > 1) { + _ = try self.addInst(.{ + .tag = .shr, + .ops = (Mir.Ops{ + .reg1 = tmp_reg, + .flags = 0b10, + }).encode(), + .data = .{ .imm = closest_power_of_two * 8 }, + }); + } + + remainder -= closest_power_of_two; + next_offset -= closest_power_of_two; + } + } else { + _ = try self.addInst(.{ + .tag = .mov, + .ops = (Mir.Ops{ + .reg1 = .rbp, + .reg2 = registerAlias(reg, @intCast(u32, abi_size)), + .flags = 0b10, + }).encode(), + .data = .{ .imm = @bitCast(u32, -stack_offset) }, + }); + } }, .memory, .embedded_in_code, From 3193cc1c1ea1796df2ae40d5a11396d8626a8070 Mon Sep 17 00:00:00 2001 From: Jakub Konka Date: Thu, 17 Feb 2022 00:09:33 +0100 Subject: [PATCH 9/9] x64: fix ptr_add and ptr_sub Add standalone implementation of operand reuse for ptr related arithmetic operations of add and sub. --- src/arch/x86_64/CodeGen.zig | 130 +++++++++++++++--------------------- test/behavior/align.zig | 2 - test/behavior/cast.zig | 5 +- 3 files changed, 55 insertions(+), 82 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 257c9e5a6c..aba98276c3 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1071,68 +1071,61 @@ fn airMax(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } -fn airPtrAdd(self: *Self, inst: Air.Inst.Index) !void { - const bin_op = self.air.instructions.items(.data)[inst].bin_op; - if (self.liveness.isUnused(inst)) { - return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); - } - - const mcvs = try self.mcvsForBinMathOp(inst, bin_op.lhs, bin_op.rhs); - var dst_mcv = mcvs.dst; - const src_mcv = mcvs.src; - - // TODO clean this up - // TODO take into account alignment +fn genPtrBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref) !MCValue { const dst_ty = self.air.typeOfIndex(inst); const elem_size = dst_ty.elemType2().abiSize(self.target.*); - const dst_reg = blk: { - switch (dst_mcv) { - .register => |reg| break :blk reg, - else => { - src_mcv.freezeIfRegister(&self.register_manager); - defer src_mcv.freezeIfRegister(&self.register_manager); - const reg = try self.copyToTmpRegister(dst_ty, dst_mcv); - break :blk reg; - }, - } - }; - try self.genIMulOpMir(dst_ty, .{ .register = dst_reg }, .{ .immediate = elem_size }); - dst_mcv = .{ .register = dst_reg }; - try self.genBinMathOpMir(.add, dst_ty, dst_mcv, src_mcv); + const ptr = try self.resolveInst(op_lhs); + const offset = try self.resolveInst(op_rhs); + const offset_ty = self.air.typeOf(op_rhs); - return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none }); + ptr.freezeIfRegister(&self.register_manager); + defer ptr.unfreezeIfRegister(&self.register_manager); + + offset.freezeIfRegister(&self.register_manager); + defer offset.unfreezeIfRegister(&self.register_manager); + + const dst_mcv = blk: { + if (self.reuseOperand(inst, op_lhs, 0, ptr)) { + if (ptr.isMemory() or ptr.isRegister()) break :blk ptr; + } + break :blk try self.copyToNewRegister(inst, dst_ty, ptr); + }; + + const offset_mcv = blk: { + if (self.reuseOperand(inst, op_rhs, 1, offset)) { + if (offset.isRegister()) break :blk offset; + } + break :blk MCValue{ .register = try self.copyToTmpRegister(offset_ty, offset) }; + }; + + try self.genIMulOpMir(offset_ty, offset_mcv, .{ .immediate = elem_size }); + + const tag = self.air.instructions.items(.tag)[inst]; + switch (tag) { + .ptr_add => try self.genBinMathOpMir(.add, dst_ty, dst_mcv, offset_mcv), + .ptr_sub => try self.genBinMathOpMir(.sub, dst_ty, dst_mcv, offset_mcv), + else => unreachable, + } + + return dst_mcv; +} + +fn airPtrAdd(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const result = if (self.liveness.isUnused(inst)) + .dead + else + try self.genPtrBinMathOp(inst, bin_op.lhs, bin_op.rhs); + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airPtrSub(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - if (self.liveness.isUnused(inst)) { - return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); - } - - const mcvs = try self.mcvsForBinMathOp(inst, bin_op.lhs, bin_op.rhs); - var dst_mcv = mcvs.dst; - const src_mcv = mcvs.src; - - // TODO clean this up - // TODO take into account alignment - const dst_ty = self.air.typeOfIndex(inst); - const elem_size = dst_ty.elemType2().abiSize(self.target.*); - const dst_reg = blk: { - switch (dst_mcv) { - .register => |reg| break :blk reg, - else => { - src_mcv.freezeIfRegister(&self.register_manager); - defer src_mcv.freezeIfRegister(&self.register_manager); - const reg = try self.copyToTmpRegister(dst_ty, dst_mcv); - break :blk reg; - }, - } - }; - try self.genIMulOpMir(dst_ty, .{ .register = dst_reg }, .{ .immediate = elem_size }); - dst_mcv = .{ .register = dst_reg }; - try self.genBinMathOpMir(.sub, dst_ty, dst_mcv, src_mcv); - - return self.finishAir(inst, dst_mcv, .{ bin_op.lhs, bin_op.rhs, .none }); + const result = if (self.liveness.isUnused(inst)) + .dead + else + try self.genPtrBinMathOp(inst, bin_op.lhs, bin_op.rhs); + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } fn airSlice(self: *Self, inst: Air.Inst.Index) !void { @@ -2178,17 +2171,10 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) !void { return self.finishAir(inst, result, .{ extra.struct_operand, .none, .none }); } -const BinMathOpMCValuePair = struct { - dst: MCValue, - src: MCValue, -}; - -fn mcvsForBinMathOp( - self: *Self, - inst: Air.Inst.Index, - op_lhs: Air.Inst.Ref, - op_rhs: Air.Inst.Ref, -) !BinMathOpMCValuePair { +/// Perform "binary" operators, excluding comparisons. +/// Currently, the following ops are supported: +/// ADD, SUB, XOR, OR, AND +fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref) !MCValue { // TODO: make this algorithm less bad const lhs = try self.resolveInst(op_lhs); const rhs = try self.resolveInst(op_rhs); @@ -2201,6 +2187,7 @@ fn mcvsForBinMathOp( const dst_ty = self.air.typeOfIndex(inst); var dst_mcv: MCValue = undefined; var src_mcv: MCValue = undefined; + if (self.reuseOperand(inst, op_lhs, 0, lhs)) { // LHS dies; use it as the destination. // Both operands cannot be memory. @@ -2253,17 +2240,6 @@ fn mcvsForBinMathOp( else => {}, } - return BinMathOpMCValuePair{ .dst = dst_mcv, .src = src_mcv }; -} - -/// Perform "binary" operators, excluding comparisons. -/// Currently, the following ops are supported: -/// ADD, SUB, XOR, OR, AND -fn genBinMathOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref) !MCValue { - const dst_ty = self.air.typeOfIndex(inst); - const mcvs = try self.mcvsForBinMathOp(inst, op_lhs, op_rhs); - const dst_mcv = mcvs.dst; - const src_mcv = mcvs.src; const tag = self.air.instructions.items(.tag)[inst]; switch (tag) { .add, .addwrap => try self.genBinMathOpMir(.add, dst_ty, dst_mcv, src_mcv), diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 8a315ecab0..22cbce8261 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -106,7 +106,6 @@ fn fnWithAlignedStack() i32 { test "implicitly decreasing slice alignment" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; const a: u32 align(4) = 3; const b: u32 align(8) = 4; @@ -274,7 +273,6 @@ fn whyWouldYouEverDoThis(comptime align_bytes: u8) align(align_bytes) u8 { test "runtime known array index has best alignment possible" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // take full advantage of over-alignment diff --git a/test/behavior/cast.zig b/test/behavior/cast.zig index 70b17a55a1..75d470b21f 100644 --- a/test/behavior/cast.zig +++ b/test/behavior/cast.zig @@ -199,7 +199,7 @@ fn MakeType(comptime T: type) type { test "implicit cast from *[N]T to [*c]T" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; var x: [4]u16 = [4]u16{ 0, 1, 2, 3 }; var y: [*c]u16 = &x; @@ -274,7 +274,7 @@ test "*const ?[*]const T to [*c]const [*c]const T" { test "array coersion to undefined at runtime" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; @setRuntimeSafety(true); @@ -339,7 +339,6 @@ test "peer type unsigned int to signed" { test "expected [*c]const u8, found [*:0]const u8" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; var a: [*:0]const u8 = "hello"; var b: [*c]const u8 = a;