From ac68d72d244fafb601725d22631f7834fb14212c Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Fri, 31 Mar 2023 20:31:39 -0400 Subject: [PATCH] x86_64: implement aggregate init of a packed struct --- src/arch/x86_64/CodeGen.zig | 82 +++++++++++++++++++++++++++++---- test/behavior/packed-struct.zig | 2 - test/behavior/struct.zig | 1 - 3 files changed, 74 insertions(+), 11 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index 604052ee7e..a216ed3c2c 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -7853,19 +7853,85 @@ fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void { if (self.liveness.isUnused(inst)) break :res MCValue.dead; switch (result_ty.zigTypeTag()) { .Struct => { - if (result_ty.containerLayout() == .Packed) { - return self.fail("TODO airAggregateInit implement packed structs", .{}); - } const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align)); - for (elements, 0..) |elem, elem_i| { - if (result_ty.structFieldValueComptime(elem_i) != null) continue; // comptime elem + const dst_mcv = MCValue{ .stack_offset = stack_offset }; + if (result_ty.containerLayout() == .Packed) { + const struct_obj = result_ty.castTag(.@"struct").?.data; + try self.genInlineMemset( + dst_mcv, + .{ .immediate = 0 }, + .{ .immediate = abi_size }, + .{}, + ); + for (elements, 0..) |elem, elem_i| { + if (result_ty.structFieldValueComptime(elem_i) != null) continue; + + const elem_ty = result_ty.structFieldType(elem_i); + const elem_bit_size = @intCast(u32, elem_ty.bitSize(self.target.*)); + if (elem_bit_size > 64) { + return self.fail("TODO airAggregateInit implement packed structs with large fields", .{}); + } + const elem_abi_size = @intCast(u32, elem_ty.abiSize(self.target.*)); + const elem_abi_bits = elem_abi_size * 8; + const elem_off = struct_obj.packedFieldBitOffset(self.target.*, elem_i); + const elem_byte_off = @intCast(i32, elem_off / elem_abi_bits * elem_abi_size); + const elem_bit_off = elem_off % elem_abi_bits; + const elem_mcv = try self.resolveInst(elem); + const elem_lock = switch (elem_mcv) { + .register => |reg| self.register_manager.lockReg(reg), + .immediate => |imm| lock: { + if (imm == 0) continue; + break :lock null; + }, + else => null, + }; + defer if (elem_lock) |lock| self.register_manager.unlockReg(lock); + const elem_reg = try self.copyToTmpRegister(elem_ty, elem_mcv); + const elem_extra_bits = self.regExtraBits(elem_ty); + if (elem_bit_off < elem_extra_bits) { + try self.truncateRegister(elem_ty, registerAlias(elem_reg, elem_abi_size)); + } + if (elem_bit_off > 0) try self.genShiftBinOpMir( + .sal, + elem_ty, + .{ .register = elem_reg }, + .{ .immediate = elem_bit_off }, + ); + try self.genBinOpMir( + .@"or", + elem_ty, + .{ .stack_offset = stack_offset - elem_byte_off }, + .{ .register = elem_reg }, + ); + if (elem_bit_off > elem_extra_bits) { + const reg = try self.copyToTmpRegister(elem_ty, elem_mcv); + if (elem_extra_bits > 0) { + try self.truncateRegister(elem_ty, registerAlias(reg, elem_abi_size)); + } + try self.genShiftBinOpMir( + .sar, + elem_ty, + .{ .register = reg }, + .{ .immediate = elem_abi_bits - elem_bit_off }, + ); + try self.genBinOpMir( + .@"or", + elem_ty, + .{ .stack_offset = stack_offset - elem_byte_off - + @intCast(i32, elem_abi_size) }, + .{ .register = reg }, + ); + } + } + } else for (elements, 0..) |elem, elem_i| { + if (result_ty.structFieldValueComptime(elem_i) != null) continue; const elem_ty = result_ty.structFieldType(elem_i); - const elem_off = result_ty.structFieldOffset(elem_i, self.target.*); + const elem_off = @intCast(i32, result_ty.structFieldOffset(elem_i, self.target.*)); const elem_mcv = try self.resolveInst(elem); - try self.genSetStack(elem_ty, stack_offset - @intCast(i32, elem_off), elem_mcv, .{}); + try self.genSetStack(elem_ty, stack_offset - elem_off, elem_mcv, .{}); } - break :res MCValue{ .stack_offset = stack_offset }; + break :res dst_mcv; }, .Array => { const stack_offset = @intCast(i32, try self.allocMem(inst, abi_size, abi_align)); diff --git a/test/behavior/packed-struct.zig b/test/behavior/packed-struct.zig index a7dfd46064..858d4f9c17 100644 --- a/test/behavior/packed-struct.zig +++ b/test/behavior/packed-struct.zig @@ -352,7 +352,6 @@ test "byte-aligned field pointer offsets" { } test "load pointer from packed struct" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO @@ -585,7 +584,6 @@ test "overaligned pointer to packed struct" { test "packed struct initialized in bitcast" { if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; const T = packed struct { val: u8 }; var val: u8 = 123; diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index b59615f01a..15a9861d0f 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -1244,7 +1244,6 @@ test "loading a struct pointer perfoms a copy" { } test "packed struct aggregate init" { - if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO