Merge pull request #23188 from jacobly0/fix-23143

x86_64: fix crashes with symbols
This commit is contained in:
Andrew Kelley 2025-03-11 16:11:09 -04:00 committed by GitHub
commit 982c500be5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 212 additions and 71 deletions

View File

@ -97980,16 +97980,150 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void {
switch (pred_mcv) {
.register => |pred_reg| switch (pred_reg.class()) {
.general_purpose => {},
.sse => if (need_xmm0 and pred_reg.id() != comptime Register.xmm0.id()) {
.sse => if (elem_ty.toIntern() == .bool_type)
if (need_xmm0 and pred_reg.id() != comptime Register.xmm0.id()) {
try self.register_manager.getKnownReg(.xmm0, null);
try self.genSetReg(.xmm0, pred_ty, pred_mcv, .{});
break :mask .xmm0;
} else break :mask if (has_blend)
pred_reg
else
try self.copyToTmpRegister(pred_ty, pred_mcv),
try self.copyToTmpRegister(pred_ty, pred_mcv)
else
return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)}),
else => unreachable,
},
.register_mask => |pred_reg_mask| {
if (pred_reg_mask.info.scalar.bitSize(self.target) != 8 * elem_abi_size)
return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)});
const mask_reg: Register = if (need_xmm0 and pred_reg_mask.reg.id() != comptime Register.xmm0.id()) mask_reg: {
try self.register_manager.getKnownReg(.xmm0, null);
try self.genSetReg(.xmm0, ty, .{ .register = pred_reg_mask.reg }, .{});
break :mask_reg .xmm0;
} else pred_reg_mask.reg;
const mask_alias = registerAlias(mask_reg, abi_size);
const mask_lock = self.register_manager.lockRegAssumeUnused(mask_reg);
defer self.register_manager.unlockReg(mask_lock);
const lhs_mcv = try self.resolveInst(extra.lhs);
const lhs_lock = switch (lhs_mcv) {
.register => |lhs_reg| self.register_manager.lockRegAssumeUnused(lhs_reg),
else => null,
};
defer if (lhs_lock) |lock| self.register_manager.unlockReg(lock);
const rhs_mcv = try self.resolveInst(extra.rhs);
const rhs_lock = switch (rhs_mcv) {
.register => |rhs_reg| self.register_manager.lockReg(rhs_reg),
else => null,
};
defer if (rhs_lock) |lock| self.register_manager.unlockReg(lock);
const order = has_blend != pred_reg_mask.info.inverted;
const reuse_mcv, const other_mcv = if (order)
.{ rhs_mcv, lhs_mcv }
else
.{ lhs_mcv, rhs_mcv };
const dst_mcv: MCValue = if (reuse_mcv.isRegister() and self.reuseOperand(
inst,
if (order) extra.rhs else extra.lhs,
@intFromBool(order),
reuse_mcv,
)) reuse_mcv else if (has_avx)
.{ .register = try self.register_manager.allocReg(inst, abi.RegisterClass.sse) }
else
try self.copyToRegisterWithInstTracking(inst, ty, reuse_mcv);
const dst_reg = dst_mcv.getReg().?;
const dst_alias = registerAlias(dst_reg, abi_size);
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
const mir_tag = @as(?Mir.Inst.FixedTag, if ((pred_reg_mask.info.kind == .all and
elem_ty.toIntern() != .f32_type and elem_ty.toIntern() != .f64_type) or pred_reg_mask.info.scalar == .byte)
if (has_avx)
.{ .vp_b, .blendv }
else if (has_blend)
.{ .p_b, .blendv }
else if (pred_reg_mask.info.kind == .all)
.{ .p_, undefined }
else
null
else if ((pred_reg_mask.info.kind == .all and (elem_ty.toIntern() != .f64_type or !self.hasFeature(.sse2))) or
pred_reg_mask.info.scalar == .dword)
if (has_avx)
.{ .v_ps, .blendv }
else if (has_blend)
.{ ._ps, .blendv }
else if (pred_reg_mask.info.kind == .all)
.{ ._ps, undefined }
else
null
else if (pred_reg_mask.info.kind == .all or pred_reg_mask.info.scalar == .qword)
if (has_avx)
.{ .v_pd, .blendv }
else if (has_blend)
.{ ._pd, .blendv }
else if (pred_reg_mask.info.kind == .all)
.{ ._pd, undefined }
else
null
else
null) orelse return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)});
if (has_avx) {
const rhs_alias = if (reuse_mcv.isRegister())
registerAlias(reuse_mcv.getReg().?, abi_size)
else rhs: {
try self.genSetReg(dst_reg, ty, reuse_mcv, .{});
break :rhs dst_alias;
};
if (other_mcv.isBase()) try self.asmRegisterRegisterMemoryRegister(
mir_tag,
dst_alias,
rhs_alias,
try other_mcv.mem(self, .{ .size = self.memSize(ty) }),
mask_alias,
) else try self.asmRegisterRegisterRegisterRegister(
mir_tag,
dst_alias,
rhs_alias,
registerAlias(if (other_mcv.isRegister())
other_mcv.getReg().?
else
try self.copyToTmpRegister(ty, other_mcv), abi_size),
mask_alias,
);
} else if (has_blend) if (other_mcv.isBase()) try self.asmRegisterMemoryRegister(
mir_tag,
dst_alias,
try other_mcv.mem(self, .{ .size = self.memSize(ty) }),
mask_alias,
) else try self.asmRegisterRegisterRegister(
mir_tag,
dst_alias,
registerAlias(if (other_mcv.isRegister())
other_mcv.getReg().?
else
try self.copyToTmpRegister(ty, other_mcv), abi_size),
mask_alias,
) else {
try self.asmRegisterRegister(.{ mir_tag[0], .@"and" }, dst_alias, mask_alias);
if (other_mcv.isBase()) try self.asmRegisterMemory(
.{ mir_tag[0], .andn },
mask_alias,
try other_mcv.mem(self, .{ .size = .fromSize(abi_size) }),
) else try self.asmRegisterRegister(
.{ mir_tag[0], .andn },
mask_alias,
if (other_mcv.isRegister())
other_mcv.getReg().?
else
try self.copyToTmpRegister(ty, other_mcv),
);
try self.asmRegisterRegister(.{ mir_tag[0], .@"or" }, dst_alias, mask_alias);
}
break :result dst_mcv;
},
else => {},
}
const mask_reg: Register = if (need_xmm0) mask_reg: {
@ -98192,7 +98326,7 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void {
const dst_lock = self.register_manager.lockReg(dst_reg);
defer if (dst_lock) |lock| self.register_manager.unlockReg(lock);
const mir_tag = @as(?Mir.Inst.FixedTag, switch (ty.childType(zcu).zigTypeTag(zcu)) {
const mir_tag = @as(?Mir.Inst.FixedTag, switch (elem_ty.zigTypeTag(zcu)) {
else => null,
.int => switch (abi_size) {
0 => unreachable,
@ -98208,7 +98342,7 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void {
null,
else => null,
},
.float => switch (ty.childType(zcu).floatBits(self.target.*)) {
.float => switch (elem_ty.floatBits(self.target.*)) {
else => unreachable,
16, 80, 128 => null,
32 => switch (vec_len) {
@ -98262,30 +98396,20 @@ fn airSelect(self: *CodeGen, inst: Air.Inst.Index) !void {
try self.copyToTmpRegister(ty, lhs_mcv), abi_size),
mask_alias,
) else {
const mir_fixes = @as(?Mir.Inst.Fixes, switch (elem_ty.zigTypeTag(zcu)) {
else => null,
.int => .p_,
.float => switch (elem_ty.floatBits(self.target.*)) {
32 => ._ps,
64 => ._pd,
16, 80, 128 => null,
else => unreachable,
},
}) orelse return self.fail("TODO implement airSelect for {}", .{ty.fmt(pt)});
try self.asmRegisterRegister(.{ mir_fixes, .@"and" }, dst_alias, mask_alias);
try self.asmRegisterRegister(.{ mir_tag[0], .@"and" }, dst_alias, mask_alias);
if (rhs_mcv.isBase()) try self.asmRegisterMemory(
.{ mir_fixes, .andn },
.{ mir_tag[0], .andn },
mask_alias,
try rhs_mcv.mem(self, .{ .size = .fromSize(abi_size) }),
) else try self.asmRegisterRegister(
.{ mir_fixes, .andn },
.{ mir_tag[0], .andn },
mask_alias,
if (rhs_mcv.isRegister())
rhs_mcv.getReg().?
else
try self.copyToTmpRegister(ty, rhs_mcv),
);
try self.asmRegisterRegister(.{ mir_fixes, .@"or" }, dst_alias, mask_alias);
try self.asmRegisterRegister(.{ mir_tag[0], .@"or" }, dst_alias, mask_alias);
}
break :result dst_mcv;
};
@ -100824,11 +100948,11 @@ const Temp = struct {
const new_temp_index = cg.next_temp_index;
cg.temp_type[@intFromEnum(new_temp_index)] = .usize;
cg.next_temp_index = @enumFromInt(@intFromEnum(new_temp_index) + 1);
switch (temp.tracking(cg).short) {
else => |mcv| std.debug.panic("{s}: {}\n", .{ @src().fn_name, mcv }),
const mcv = temp.tracking(cg).short;
switch (mcv) {
else => std.debug.panic("{s}: {}\n", .{ @src().fn_name, mcv }),
.register => |reg| {
const new_reg =
try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp);
const new_reg = try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp);
new_temp_index.tracking(cg).* = .init(.{ .register = new_reg });
try cg.asmRegisterMemory(.{ ._, .lea }, new_reg.to64(), .{
.base = .{ .reg = reg.to64() },
@ -100836,33 +100960,22 @@ const Temp = struct {
});
},
.register_offset => |reg_off| {
const new_reg =
try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp);
const new_reg = try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp);
new_temp_index.tracking(cg).* = .init(.{ .register = new_reg });
try cg.asmRegisterMemory(.{ ._, .lea }, new_reg.to64(), .{
.base = .{ .reg = reg_off.reg.to64() },
.mod = .{ .rm = .{ .disp = reg_off.off + off } },
});
},
.load_symbol, .load_frame => {
const new_reg = try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp);
new_temp_index.tracking(cg).* = .init(.{ .register_offset = .{ .reg = new_reg, .off = off } });
try cg.genSetReg(new_reg, .usize, mcv, .{});
},
.lea_symbol => |sym_off| new_temp_index.tracking(cg).* = .init(.{ .lea_symbol = .{
.sym_index = sym_off.sym_index,
.off = sym_off.off + off,
} }),
.load_frame => |frame_addr| {
const new_reg =
try cg.register_manager.allocReg(new_temp_index.toIndex(), abi.RegisterClass.gp);
new_temp_index.tracking(cg).* = .init(.{ .register_offset = .{
.reg = new_reg,
.off = off,
} });
try cg.asmRegisterMemory(.{ ._, .mov }, new_reg.to64(), .{
.base = .{ .frame = frame_addr.index },
.mod = .{ .rm = .{
.size = .qword,
.disp = frame_addr.off,
} },
});
},
.lea_frame => |frame_addr| new_temp_index.tracking(cg).* = .init(.{ .lea_frame = .{
.index = frame_addr.index,
.off = frame_addr.off + off,
@ -101179,7 +101292,8 @@ const Temp = struct {
=> return temp.toRegClass(true, .general_purpose, cg),
.lea_symbol => |sym_off| {
const off = sym_off.off;
if (off == 0) return false;
// hack around linker relocation bugs
if (false and off == 0) return false;
try temp.toOffset(-off, cg);
while (try temp.toRegClass(true, .general_purpose, cg)) {}
try temp.toOffset(off, cg);

View File

@ -2008,9 +2008,9 @@ pub const WipNav = struct {
.decl_const_runtime_bits,
.decl_const_comptime_state,
.decl_const_runtime_bits_comptime_state,
.decl_empty_func,
.decl_nullary_func,
.decl_func,
.decl_empty_func_generic,
.decl_nullary_func_generic,
.decl_func_generic,
=> false,
.generic_decl_var,
@ -2626,8 +2626,8 @@ pub fn finishWipNavFunc(
abbrev_code_buf,
try dwarf.refAbbrevCode(switch (abbrev_code) {
else => unreachable,
.decl_func => .decl_empty_func,
.decl_instance_func => .decl_instance_empty_func,
.decl_func => .decl_nullary_func,
.decl_instance_func => .decl_instance_nullary_func,
}),
);
}
@ -3012,29 +3012,34 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo
if (nav_gop.found_existing) switch (try dwarf.debug_info.declAbbrevCode(wip_nav.unit, nav_gop.value_ptr.*)) {
.null => {},
else => unreachable,
.decl_empty_func, .decl_func, .decl_instance_empty_func, .decl_instance_func => return,
.decl_empty_func_generic,
.decl_nullary_func, .decl_func, .decl_instance_nullary_func, .decl_instance_func => return,
.decl_nullary_func_generic,
.decl_func_generic,
.decl_instance_empty_func_generic,
.decl_instance_nullary_func_generic,
.decl_instance_func_generic,
=> dwarf.debug_info.section.getUnit(wip_nav.unit).getEntry(nav_gop.value_ptr.*).clear(),
} else nav_gop.value_ptr.* = try dwarf.addCommonEntry(wip_nav.unit);
wip_nav.entry = nav_gop.value_ptr.*;
const func_type = ip.indexToKey(func.ty).func_type;
const is_nullary = !func_type.is_var_args and for (0..func_type.param_types.len) |param_index| {
if (!func_type.paramIsComptime(std.math.cast(u5, param_index) orelse break false)) break false;
} else true;
const diw = wip_nav.debug_info.writer(dwarf.gpa);
try wip_nav.declCommon(if (func_type.param_types.len > 0 or func_type.is_var_args) .{
try wip_nav.declCommon(if (is_nullary) .{
.decl = .decl_nullary_func_generic,
.generic_decl = .generic_decl_func,
.decl_instance = .decl_instance_nullary_func_generic,
} else .{
.decl = .decl_func_generic,
.generic_decl = .generic_decl_func,
.decl_instance = .decl_instance_func_generic,
} else .{
.decl = .decl_empty_func_generic,
.generic_decl = .generic_decl_func,
.decl_instance = .decl_instance_empty_func_generic,
}, &nav, inst_info.file, &decl);
try wip_nav.refType(.fromInterned(func_type.return_type));
if (func_type.param_types.len > 0 or func_type.is_var_args) {
if (!is_nullary) {
for (0..func_type.param_types.len) |param_index| {
if (std.math.cast(u5, param_index)) |small_param_index|
if (func_type.paramIsComptime(small_param_index)) continue;
try wip_nav.abbrevCode(.func_type_param);
try wip_nav.refType(.fromInterned(func_type.param_types.get(ip)[param_index]));
}
@ -3568,12 +3573,14 @@ fn updateLazyType(
};
try diw.writeByte(@intFromEnum(cc));
try wip_nav.refType(.fromInterned(func_type.return_type));
if (!is_nullary) {
for (0..func_type.param_types.len) |param_index| {
try wip_nav.abbrevCode(.func_type_param);
try wip_nav.refType(.fromInterned(func_type.param_types.get(ip)[param_index]));
}
if (func_type.is_var_args) try wip_nav.abbrevCode(.is_var_args);
if (!is_nullary) try uleb128(diw, @intFromEnum(AbbrevCode.null));
try uleb128(diw, @intFromEnum(AbbrevCode.null));
}
},
.error_set_type => |error_set_type| {
try wip_nav.abbrevCode(if (error_set_type.names.len == 0) .generated_empty_enum_type else .generated_enum_type);
@ -4787,9 +4794,9 @@ const AbbrevCode = enum {
decl_const_runtime_bits,
decl_const_comptime_state,
decl_const_runtime_bits_comptime_state,
decl_empty_func,
decl_nullary_func,
decl_func,
decl_empty_func_generic,
decl_nullary_func_generic,
decl_func_generic,
generic_decl_var,
generic_decl_const,
@ -4806,9 +4813,9 @@ const AbbrevCode = enum {
decl_instance_const_runtime_bits,
decl_instance_const_comptime_state,
decl_instance_const_runtime_bits_comptime_state,
decl_instance_empty_func,
decl_instance_nullary_func,
decl_instance_func,
decl_instance_empty_func_generic,
decl_instance_nullary_func_generic,
decl_instance_func_generic,
// the rest are unrestricted other than empty variants must not be longer
// than the non-empty variant, and so should appear first
@ -5019,7 +5026,7 @@ const AbbrevCode = enum {
.{ .ZIG_comptime_value, .ref_addr },
},
},
.decl_empty_func = .{
.decl_nullary_func = .{
.tag = .subprogram,
.attrs = decl_abbrev_common_attrs ++ .{
.{ .linkage_name, .strp },
@ -5044,7 +5051,7 @@ const AbbrevCode = enum {
.{ .noreturn, .flag },
},
},
.decl_empty_func_generic = .{
.decl_nullary_func_generic = .{
.tag = .subprogram,
.attrs = decl_abbrev_common_attrs ++ .{
.{ .type, .ref_addr },
@ -5167,7 +5174,7 @@ const AbbrevCode = enum {
.{ .ZIG_comptime_value, .ref_addr },
},
},
.decl_instance_empty_func = .{
.decl_instance_nullary_func = .{
.tag = .subprogram,
.attrs = decl_instance_abbrev_common_attrs ++ .{
.{ .linkage_name, .strp },
@ -5192,7 +5199,7 @@ const AbbrevCode = enum {
.{ .noreturn, .flag },
},
},
.decl_instance_empty_func_generic = .{
.decl_instance_nullary_func_generic = .{
.tag = .subprogram,
.attrs = decl_instance_abbrev_common_attrs ++ .{
.{ .type, .ref_addr },

View File

@ -66,3 +66,23 @@ fn selectArrays() !void {
const xyz = @select(f32, x, y, z);
try expect(mem.eql(f32, &@as([4]f32, xyz), &[4]f32{ 0.0, 312.1, -145.9, -3381.233 }));
}
test "@select compare result" {
if (builtin.zig_backend == .stage2_riscv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64 and builtin.target.ofmt != .elf and builtin.target.ofmt != .macho) return error.SkipZigTest;
const S = struct {
fn min(comptime V: type, lhs: V, rhs: V) V {
return @select(@typeInfo(V).vector.child, lhs < rhs, lhs, rhs);
}
fn doTheTest() !void {
try expect(@reduce(.And, min(@Vector(4, f32), .{ -1, 2, -3, 4 }, .{ 1, -2, 3, -4 }) == @Vector(4, f32){ -1, -2, -3, -4 }));
try expect(@reduce(.And, min(@Vector(2, f64), .{ -1, 2 }, .{ 1, -2 }) == @Vector(2, f64){ -1, -2 }));
}
};
try S.doTheTest();
try comptime S.doTheTest();
}