From 9827ffe1ded884796aafe21cd6f6941f5ac3a279 Mon Sep 17 00:00:00 2001 From: Jacob Young Date: Thu, 3 Apr 2025 12:09:07 -0400 Subject: [PATCH] x86_64: fix incorrect handling of unreusable operands Closes #23448 --- src/arch/x86_64/CodeGen.zig | 15 ++++++++++--- test/behavior/struct.zig | 44 ++++++++++++++++++++++++++++++++++++- 2 files changed, 55 insertions(+), 4 deletions(-) diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index e85af0d0ee..88d186acef 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -104889,8 +104889,9 @@ const Temp = struct { const result_temp: Temp = .{ .index = result_temp_index.toIndex() }; assert(cg.reuseTemp(result_temp.index, first_temp.index, first_temp_tracking)); assert(cg.reuseTemp(result_temp.index, second_temp.index, second_temp_tracking)); - cg.temp_type[@intFromEnum(result_temp_index)] = .slice_const_u8; result_temp_index.tracking(cg).* = .init(result); + cg.temp_type[@intFromEnum(result_temp_index)] = .slice_const_u8; + cg.next_temp_index = @enumFromInt(@intFromEnum(result_temp_index) + 1); first_temp.* = result_temp; second_temp.* = result_temp; } @@ -109598,7 +109599,8 @@ const Temp = struct { ) InnerError!void { const tomb_bits = cg.liveness.getTombBits(inst); for (0.., op_refs, op_temps) |op_index, op_ref, op_temp| { - if (op_temp.index != temp.index and op_temp.tracking(cg).short != .dead) try op_temp.die(cg); + if (op_temp.index == temp.index) continue; + if (op_temp.tracking(cg).short != .dead) try op_temp.die(cg); if (tomb_bits & @as(Liveness.Bpi, 1) << @intCast(op_index) == 0) continue; if (cg.reused_operands.isSet(op_index)) continue; try cg.processDeath(op_ref.toIndexAllowNone() orelse continue); @@ -109617,6 +109619,12 @@ const Temp = struct { assert(cg.reuseTemp(inst, temp_index.toIndex(), temp_tracking)); }, } + for (0.., op_refs, op_temps) |op_index, op_ref, op_temp| { + if (op_temp.index != temp.index) continue; + if (tomb_bits & @as(Liveness.Bpi, 1) << @intCast(op_index) == 0) continue; + if (cg.reused_operands.isSet(op_index)) continue; + try cg.processDeath(op_ref.toIndexAllowNone() orelse continue); + } } fn die(temp: Temp, cg: *CodeGen) InnerError!void { @@ -109642,7 +109650,8 @@ const Temp = struct { } fn isValid(index: Index, cg: *CodeGen) bool { - return index.tracking(cg).short != .dead; + return @intFromEnum(index) < @intFromEnum(cg.next_temp_index) and + index.tracking(cg).short != .dead; } fn typeOf(index: Index, cg: *CodeGen) Type { diff --git a/test/behavior/struct.zig b/test/behavior/struct.zig index 27f56c7cba..93fb18dd1e 100644 --- a/test/behavior/struct.zig +++ b/test/behavior/struct.zig @@ -1527,7 +1527,7 @@ test "optional generic function label struct field" { } test "struct fields get automatically reordered" { - if (builtin.zig_backend != .stage2_llvm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; const S1 = struct { a: u32, @@ -2137,3 +2137,45 @@ test "anonymous struct equivalence" { comptime assert(A != C); comptime assert(B != C); } + +test "field access through mem ptr arg" { + if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest; + + const S = struct { + fn nestedFieldAccess( + _: usize, + _: usize, + _: usize, + _: usize, + _: usize, + _: usize, + _: usize, + _: usize, + ptr_struct: *const struct { field: u32 }, + ) u32 { + return ptr_struct.field; + } + }; + try expect(S.nestedFieldAccess( + undefined, + undefined, + undefined, + undefined, + undefined, + undefined, + undefined, + undefined, + &.{ .field = 0x6b00a2eb }, + ) == 0x6b00a2eb); + comptime assert(S.nestedFieldAccess( + undefined, + undefined, + undefined, + undefined, + undefined, + undefined, + undefined, + undefined, + &.{ .field = 0x0ced271f }, + ) == 0x0ced271f); +}