mirror of
https://github.com/ziglang/zig.git
synced 2026-01-21 06:45:24 +00:00
Sema: Fix fnptr alignment safety checks to account for potential ISA tag.
As seen on e.g. Arm/Thumb and MIPS (MIPS16/microMIPS). Fixes #22888.
This commit is contained in:
parent
d31bda13cb
commit
fc7a0c4878
30
src/Sema.zig
30
src/Sema.zig
@ -23099,8 +23099,14 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
|
||||
}
|
||||
if (ptr_align.compare(.gt, .@"1")) {
|
||||
const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1;
|
||||
const align_minus_1 = Air.internedToRef((try sema.splat(operand_ty, try pt.intValue(Type.usize, align_bytes_minus_1))).toIntern());
|
||||
const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1);
|
||||
const align_mask = Air.internedToRef((try sema.splat(operand_ty, try pt.intValue(
|
||||
Type.usize,
|
||||
if (elem_ty.fnPtrMaskOrNull(zcu)) |mask|
|
||||
align_bytes_minus_1 & mask
|
||||
else
|
||||
align_bytes_minus_1,
|
||||
))).toIntern());
|
||||
const remainder = try block.addBinOp(.bit_and, operand_coerced, align_mask);
|
||||
const is_aligned = if (is_vector) all_aligned: {
|
||||
const splat_zero_usize = Air.internedToRef((try sema.splat(operand_ty, .zero_usize)).toIntern());
|
||||
const is_aligned = try block.addCmpVector(remainder, splat_zero_usize, .eq);
|
||||
@ -23129,8 +23135,14 @@ fn zirPtrFromInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
|
||||
}
|
||||
if (ptr_align.compare(.gt, .@"1")) {
|
||||
const align_bytes_minus_1 = ptr_align.toByteUnits().? - 1;
|
||||
const align_minus_1 = Air.internedToRef((try pt.intValue(Type.usize, align_bytes_minus_1)).toIntern());
|
||||
const remainder = try block.addBinOp(.bit_and, elem_coerced, align_minus_1);
|
||||
const align_mask = Air.internedToRef((try pt.intValue(
|
||||
Type.usize,
|
||||
if (elem_ty.fnPtrMaskOrNull(zcu)) |mask|
|
||||
align_bytes_minus_1 & mask
|
||||
else
|
||||
align_bytes_minus_1,
|
||||
)).toIntern());
|
||||
const remainder = try block.addBinOp(.bit_and, elem_coerced, align_mask);
|
||||
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
|
||||
try sema.addSafetyCheck(block, src, is_aligned, .incorrect_alignment);
|
||||
}
|
||||
@ -23710,13 +23722,19 @@ fn ptrCastFull(
|
||||
try Type.fromInterned(dest_info.child).hasRuntimeBitsSema(pt))
|
||||
{
|
||||
const align_bytes_minus_1 = dest_align.toByteUnits().? - 1;
|
||||
const align_minus_1 = Air.internedToRef((try pt.intValue(Type.usize, align_bytes_minus_1)).toIntern());
|
||||
const align_mask = Air.internedToRef((try pt.intValue(
|
||||
Type.usize,
|
||||
if (Type.fromInterned(dest_info.child).fnPtrMaskOrNull(zcu)) |mask|
|
||||
align_bytes_minus_1 & mask
|
||||
else
|
||||
align_bytes_minus_1,
|
||||
)).toIntern());
|
||||
const actual_ptr = if (src_info.flags.size == .slice)
|
||||
try sema.analyzeSlicePtr(block, src, ptr, operand_ty)
|
||||
else
|
||||
ptr;
|
||||
const ptr_int = try block.addBitCast(.usize, actual_ptr);
|
||||
const remainder = try block.addBinOp(.bit_and, ptr_int, align_minus_1);
|
||||
const remainder = try block.addBinOp(.bit_and, ptr_int, align_mask);
|
||||
const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize);
|
||||
const ok = if (src_info.flags.size == .slice and dest_info.flags.size == .slice) ok: {
|
||||
const len = try sema.analyzeSliceLen(block, operand_src, ptr);
|
||||
|
||||
@ -2541,6 +2541,13 @@ pub fn fnIsVarArgs(ty: Type, zcu: *const Zcu) bool {
|
||||
return zcu.intern_pool.indexToKey(ty.toIntern()).func_type.is_var_args;
|
||||
}
|
||||
|
||||
pub fn fnPtrMaskOrNull(ty: Type, zcu: *const Zcu) ?u64 {
|
||||
return switch (ty.zigTypeTag(zcu)) {
|
||||
.@"fn" => target_util.functionPointerMask(zcu.getTarget()),
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isNumeric(ty: Type, zcu: *const Zcu) bool {
|
||||
return switch (ty.toIntern()) {
|
||||
.f16_type,
|
||||
|
||||
@ -626,6 +626,17 @@ pub fn supportsFunctionAlignment(target: std.Target) bool {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn functionPointerMask(target: std.Target) ?u64 {
|
||||
// 32-bit Arm uses the LSB to mean that the target function contains Thumb code.
|
||||
// MIPS uses the LSB to mean that the target function contains MIPS16/microMIPS code.
|
||||
return if (target.cpu.arch.isArm() or target.cpu.arch.isMIPS32())
|
||||
~@as(u32, 1)
|
||||
else if (target.cpu.arch.isMIPS64())
|
||||
~@as(u64, 1)
|
||||
else
|
||||
null;
|
||||
}
|
||||
|
||||
pub fn supportsTailCall(target: std.Target, backend: std.builtin.CompilerBackend) bool {
|
||||
switch (backend) {
|
||||
.stage1, .stage2_llvm => return @import("codegen/llvm.zig").supportsTailCall(target),
|
||||
|
||||
@ -613,3 +613,14 @@ test "zero-bit fields in extern struct pad fields appropriately" {
|
||||
try expect(@intFromPtr(&s.y) == @intFromPtr(&s.a));
|
||||
try expect(@as(*S, @fieldParentPtr("a", &s.a)) == &s);
|
||||
}
|
||||
|
||||
test "function pointer @intFromPtr/@ptrFromInt roundtrip" {
|
||||
// This only succeeds on Thumb if we handle the Thumb bit correctly; if not, the `@ptrFromInt`
|
||||
// will incorrectly trip an alignment safety check.
|
||||
|
||||
const nothing_ptr: *const fn () callconv(.c) void = ¬hing;
|
||||
const nothing_int: usize = @intFromPtr(nothing_ptr);
|
||||
const nothing_ptr2: *const fn () callconv(.c) void = @ptrFromInt(nothing_int);
|
||||
|
||||
try std.testing.expectEqual(nothing_ptr, nothing_ptr2);
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user