mirror of
https://github.com/ziglang/zig.git
synced 2026-02-21 16:54:52 +00:00
codegen/llvm: truncate padding bits when loading a non-byte-sized value
This commit is contained in:
parent
6bb10c7271
commit
370662c565
@ -6179,7 +6179,6 @@ pub const FuncGen = struct {
|
||||
const elem_alignment = elem_ty.abiAlignment(mod).toLlvm();
|
||||
return self.loadByRef(elem_ptr, elem_ty, elem_alignment, .normal);
|
||||
} else {
|
||||
const elem_llvm_ty = try o.lowerType(elem_ty);
|
||||
if (Air.refToIndex(bin_op.lhs)) |lhs_index| {
|
||||
if (self.air.instructions.items(.tag)[lhs_index] == .load) {
|
||||
const load_data = self.air.instructions.items(.data)[lhs_index];
|
||||
@ -6201,7 +6200,7 @@ pub const FuncGen = struct {
|
||||
&indices,
|
||||
"",
|
||||
);
|
||||
return self.wip.load(.normal, elem_llvm_ty, gep, .default, "");
|
||||
return self.loadTruncate(.normal, elem_ty, gep, .default);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
@ -6210,7 +6209,7 @@ pub const FuncGen = struct {
|
||||
}
|
||||
const elem_ptr =
|
||||
try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, "");
|
||||
return self.wip.load(.normal, elem_llvm_ty, elem_ptr, .default, "");
|
||||
return self.loadTruncate(.normal, elem_ty, elem_ptr, .default);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6378,13 +6377,12 @@ pub const FuncGen = struct {
|
||||
const payload_index = @intFromBool(layout.tag_align.compare(.gte, layout.payload_align));
|
||||
const field_ptr =
|
||||
try self.wip.gepStruct(union_llvm_ty, struct_llvm_val, payload_index, "");
|
||||
const llvm_field_ty = try o.lowerType(field_ty);
|
||||
const payload_alignment = layout.payload_align.toLlvm();
|
||||
if (isByRef(field_ty, mod)) {
|
||||
if (canElideLoad(self, body_tail)) return field_ptr;
|
||||
return self.loadByRef(field_ptr, field_ty, payload_alignment, .normal);
|
||||
} else {
|
||||
return self.wip.load(.normal, llvm_field_ty, field_ptr, payload_alignment, "");
|
||||
return self.loadTruncate(.normal, field_ty, field_ptr, payload_alignment);
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
@ -10219,8 +10217,7 @@ pub const FuncGen = struct {
|
||||
|
||||
return fg.loadByRef(payload_ptr, payload_ty, payload_alignment, .normal);
|
||||
}
|
||||
const payload_llvm_ty = try o.lowerType(payload_ty);
|
||||
return fg.wip.load(.normal, payload_llvm_ty, payload_ptr, payload_alignment, "");
|
||||
return fg.loadTruncate(.normal, payload_ty, payload_ptr, payload_alignment);
|
||||
}
|
||||
|
||||
assert(!isByRef(payload_ty, mod));
|
||||
@ -10321,6 +10318,36 @@ pub const FuncGen = struct {
|
||||
}
|
||||
}
|
||||
|
||||
/// Load a value and, if needed, mask out padding bits for non byte-sized integer values.
|
||||
fn loadTruncate(
|
||||
fg: *FuncGen,
|
||||
access_kind: Builder.MemoryAccessKind,
|
||||
payload_ty: Type,
|
||||
payload_ptr: Builder.Value,
|
||||
payload_alignment: Builder.Alignment,
|
||||
) !Builder.Value {
|
||||
// from https://llvm.org/docs/LangRef.html#load-instruction :
|
||||
// "When loading a value of a type like i20 with a size that is not an integral number of bytes, the result is undefined if the value was not originally written using a store of the same type. "
|
||||
// => so load the byte aligned value and trunc the unwanted bits.
|
||||
|
||||
const o = fg.dg.object;
|
||||
const mod = o.module;
|
||||
const payload_llvm_ty = try o.lowerType(payload_ty);
|
||||
const load_llvm_ty = if (payload_ty.isAbiInt(mod))
|
||||
try o.builder.intType(@intCast(payload_ty.abiSize(mod) * 8))
|
||||
else
|
||||
payload_llvm_ty;
|
||||
const loaded = try fg.wip.load(access_kind, load_llvm_ty, payload_ptr, payload_alignment, "");
|
||||
const shifted = if (payload_llvm_ty != load_llvm_ty and o.target.cpu.arch.endian() == .Big)
|
||||
try fg.wip.bin(.lshr, loaded, try o.builder.intValue(
|
||||
load_llvm_ty,
|
||||
(payload_ty.abiSize(mod) - (std.math.divCeil(u64, payload_ty.bitSize(mod), 8) catch unreachable)) * 8,
|
||||
), "")
|
||||
else
|
||||
loaded;
|
||||
return fg.wip.conv(.unneeded, shifted, payload_llvm_ty, "");
|
||||
}
|
||||
|
||||
/// Load a by-ref type by constructing a new alloca and performing a memcpy.
|
||||
fn loadByRef(
|
||||
fg: *FuncGen,
|
||||
@ -10378,21 +10405,7 @@ pub const FuncGen = struct {
|
||||
if (isByRef(elem_ty, mod)) {
|
||||
return self.loadByRef(ptr, elem_ty, ptr_alignment, access_kind);
|
||||
}
|
||||
const llvm_elem_ty = try o.lowerType(elem_ty);
|
||||
const llvm_load_ty = if (elem_ty.isAbiInt(mod))
|
||||
try o.builder.intType(@intCast(elem_ty.abiSize(mod) * 8))
|
||||
else
|
||||
llvm_elem_ty;
|
||||
const loaded = try self.wip.load(access_kind, llvm_load_ty, ptr, ptr_alignment, "");
|
||||
const shifted = if (llvm_elem_ty != llvm_load_ty and o.target.cpu.arch.endian() == .Big)
|
||||
try self.wip.bin(.lshr, loaded, try o.builder.intValue(
|
||||
llvm_load_ty,
|
||||
(elem_ty.abiSize(mod) - (std.math.divCeil(u64, elem_ty.bitSize(mod), 8) catch
|
||||
unreachable)) * 8,
|
||||
), "")
|
||||
else
|
||||
loaded;
|
||||
return self.wip.conv(.unneeded, shifted, llvm_elem_ty, "");
|
||||
return self.loadTruncate(access_kind, elem_ty, ptr, ptr_alignment);
|
||||
}
|
||||
|
||||
const containing_int_ty = try o.builder.intType(@intCast(info.packed_offset.host_size * 8));
|
||||
|
||||
@ -120,3 +120,106 @@ test "coerce non byte-sized integers accross 32bits boundary" {
|
||||
}
|
||||
}
|
||||
|
||||
const Piece = packed struct {
|
||||
color: Color,
|
||||
type: Type,
|
||||
|
||||
const Type = enum { KING, QUEEN, BISHOP, KNIGHT, ROOK, PAWN };
|
||||
const Color = enum { WHITE, BLACK };
|
||||
|
||||
fn charToPiece(c: u8) !@This() {
|
||||
return .{
|
||||
.type = try charToPieceType(c),
|
||||
.color = if (std.ascii.isUpper(c)) Color.WHITE else Color.BLACK,
|
||||
};
|
||||
}
|
||||
|
||||
fn charToPieceType(c: u8) !Type {
|
||||
return switch (std.ascii.toLower(c)) {
|
||||
'p' => .PAWN,
|
||||
'k' => .KING,
|
||||
'q' => .QUEEN,
|
||||
'b' => .BISHOP,
|
||||
'n' => .KNIGHT,
|
||||
'r' => .ROOK,
|
||||
else => error.UnexpectedCharError,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
test "load non byte-sized optional value" {
|
||||
// Originally reported at https://github.com/ziglang/zig/issues/14200
|
||||
// note: this bug is triggered by the == operator, expectEqual will hide it
|
||||
var opt: ?Piece = try Piece.charToPiece('p');
|
||||
try expect(opt.?.type == .PAWN);
|
||||
try expect(opt.?.color == .BLACK);
|
||||
|
||||
var p: Piece = undefined;
|
||||
@as(*u8, @ptrCast(&p)).* = 0b11111011;
|
||||
try expect(p.type == .PAWN);
|
||||
try expect(p.color == .BLACK);
|
||||
}
|
||||
|
||||
test "load non byte-sized value in struct" {
|
||||
if (builtin.cpu.arch.endian() != .Little) return error.SkipZigTest; // packed struct TODO
|
||||
|
||||
// note: this bug is triggered by the == operator, expectEqual will hide it
|
||||
// using ptrCast not to depend on unitialised memory state
|
||||
|
||||
var struct0: struct {
|
||||
p: Piece,
|
||||
int: u8,
|
||||
} = undefined;
|
||||
@as(*u8, @ptrCast(&struct0.p)).* = 0b11111011;
|
||||
try expect(struct0.p.type == .PAWN);
|
||||
try expect(struct0.p.color == .BLACK);
|
||||
|
||||
var struct1: packed struct {
|
||||
p0: Piece,
|
||||
p1: Piece,
|
||||
pad: u1,
|
||||
p2: Piece,
|
||||
} = undefined;
|
||||
@as(*u8, @ptrCast(&struct1.p0)).* = 0b11111011;
|
||||
struct1.p1 = try Piece.charToPiece('p');
|
||||
struct1.p2 = try Piece.charToPiece('p');
|
||||
try expect(struct1.p0.type == .PAWN);
|
||||
try expect(struct1.p0.color == .BLACK);
|
||||
try expect(struct1.p1.type == .PAWN);
|
||||
try expect(struct1.p1.color == .BLACK);
|
||||
try expect(struct1.p2.type == .PAWN);
|
||||
try expect(struct1.p2.color == .BLACK);
|
||||
}
|
||||
|
||||
test "load non byte-sized value in union" {
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||
|
||||
// note: this bug is triggered by the == operator, expectEqual will hide it
|
||||
// using ptrCast not to depend on unitialised memory state
|
||||
|
||||
var union0: packed union {
|
||||
p: Piece,
|
||||
int: u8,
|
||||
} = .{ .int = 0 };
|
||||
union0.int = 0b11111011;
|
||||
try expect(union0.p.type == .PAWN);
|
||||
try expect(union0.p.color == .BLACK);
|
||||
|
||||
var union1: union {
|
||||
p: Piece,
|
||||
int: u8,
|
||||
} = .{ .p = .{ .color = .WHITE, .type = .KING } };
|
||||
@as(*u8, @ptrCast(&union1.p)).* = 0b11111011;
|
||||
try expect(union1.p.type == .PAWN);
|
||||
try expect(union1.p.color == .BLACK);
|
||||
|
||||
var pieces: [3]Piece = undefined;
|
||||
@as(*u8, @ptrCast(&pieces[1])).* = 0b11111011;
|
||||
try expect(pieces[1].type == .PAWN);
|
||||
try expect(pieces[1].color == .BLACK);
|
||||
}
|
||||
|
||||
@ -991,7 +991,7 @@ test "bitcast back and forth" {
|
||||
|
||||
test "field access of packed struct smaller than its abi size inside struct initialized with rls" {
|
||||
// Originally reported at https://github.com/ziglang/zig/issues/14200
|
||||
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .arm) return error.SkipZigTest;
|
||||
|
||||
const S = struct {
|
||||
ps: packed struct { x: i2, y: i2 },
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user