mirror of
https://github.com/ziglang/zig.git
synced 2026-01-20 14:25:16 +00:00
breaking: fix @sizeOf to be alloc size rather than store size
* Fixes breaches of the guarantee that `@sizeOf(T) >= @alignOf(T)` * Fixes std.mem.secureZero for integers where this guarantee previously was breached * Fixes std.mem.Allocator for integers where this guarantee previously was breached Closes #1851 Closes #1864
This commit is contained in:
parent
567c9b688e
commit
7293e012d7
@ -6299,10 +6299,15 @@ pub const FloatMode = enum {
|
||||
<pre>{#syntax#}@sizeOf(comptime T: type) comptime_int{#endsyntax#}</pre>
|
||||
<p>
|
||||
This function returns the number of bytes it takes to store {#syntax#}T{#endsyntax#} in memory.
|
||||
</p>
|
||||
<p>
|
||||
The result is a target-specific compile time constant.
|
||||
</p>
|
||||
<p>
|
||||
This size may contain padding bytes. If there were two consecutive T in memory, this would be the offset
|
||||
in bytes between element at index 0 and the element at index 1. For {#link|integer|Integers#},
|
||||
consider whether you want to use {#syntax#}@sizeOf(T){#endsyntax#} or
|
||||
{#syntax#}@typeInfo(T).Int.bits{#endsyntax#}.
|
||||
</p>
|
||||
{#see_also|@typeInfo#}
|
||||
{#header_close#}
|
||||
|
||||
{#header_open|@sliceToBytes#}
|
||||
|
||||
@ -356,6 +356,28 @@ uint64_t type_size(CodeGen *g, ZigType *type_entry) {
|
||||
}
|
||||
}
|
||||
|
||||
return LLVMABISizeOfType(g->target_data_ref, type_entry->type_ref);
|
||||
}
|
||||
|
||||
uint64_t type_size_store(CodeGen *g, ZigType *type_entry) {
|
||||
assert(type_is_complete(type_entry));
|
||||
|
||||
if (!type_has_bits(type_entry))
|
||||
return 0;
|
||||
|
||||
if (type_entry->id == ZigTypeIdStruct && type_entry->data.structure.layout == ContainerLayoutPacked) {
|
||||
uint64_t size_in_bits = type_size_bits(g, type_entry);
|
||||
return (size_in_bits + 7) / 8;
|
||||
} else if (type_entry->id == ZigTypeIdArray) {
|
||||
ZigType *child_type = type_entry->data.array.child_type;
|
||||
if (child_type->id == ZigTypeIdStruct &&
|
||||
child_type->data.structure.layout == ContainerLayoutPacked)
|
||||
{
|
||||
uint64_t size_in_bits = type_size_bits(g, type_entry);
|
||||
return (size_in_bits + 7) / 8;
|
||||
}
|
||||
}
|
||||
|
||||
return LLVMStoreSizeOfType(g->target_data_ref, type_entry->type_ref);
|
||||
}
|
||||
|
||||
@ -6230,14 +6252,19 @@ void render_const_value(CodeGen *g, Buf *buf, ConstExprValue *const_val) {
|
||||
case ZigTypeIdStruct:
|
||||
{
|
||||
if (is_slice(type_entry)) {
|
||||
ConstPtrValue *ptr = &const_val->data.x_struct.fields[slice_ptr_index].data.x_ptr;
|
||||
assert(ptr->special == ConstPtrSpecialBaseArray);
|
||||
ConstExprValue *array = ptr->data.base_array.array_val;
|
||||
size_t start = ptr->data.base_array.elem_index;
|
||||
|
||||
ConstExprValue *len_val = &const_val->data.x_struct.fields[slice_len_index];
|
||||
size_t len = bigint_as_unsigned(&len_val->data.x_bigint);
|
||||
|
||||
ConstExprValue *ptr_val = &const_val->data.x_struct.fields[slice_ptr_index];
|
||||
if (ptr_val->special == ConstValSpecialUndef) {
|
||||
assert(len == 0);
|
||||
buf_appendf(buf, "((%s)(undefined))[0..0]", buf_ptr(&type_entry->name));
|
||||
return;
|
||||
}
|
||||
assert(ptr_val->data.x_ptr.special == ConstPtrSpecialBaseArray);
|
||||
ConstExprValue *array = ptr_val->data.x_ptr.data.base_array.array_val;
|
||||
size_t start = ptr_val->data.x_ptr.data.base_array.elem_index;
|
||||
|
||||
render_const_val_array(g, buf, &type_entry->name, array, start, len);
|
||||
} else {
|
||||
buf_appendf(buf, "(struct %s constant)", buf_ptr(&type_entry->name));
|
||||
|
||||
@ -19,6 +19,7 @@ ZigType *get_pointer_to_type(CodeGen *g, ZigType *child_type, bool is_const);
|
||||
ZigType *get_pointer_to_type_extra(CodeGen *g, ZigType *child_type, bool is_const,
|
||||
bool is_volatile, PtrLen ptr_len, uint32_t byte_alignment, uint32_t bit_offset, uint32_t unaligned_bit_count);
|
||||
uint64_t type_size(CodeGen *g, ZigType *type_entry);
|
||||
uint64_t type_size_store(CodeGen *g, ZigType *type_entry);
|
||||
uint64_t type_size_bits(CodeGen *g, ZigType *type_entry);
|
||||
ZigType *get_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits);
|
||||
ZigType *get_vector_type(CodeGen *g, uint32_t len, ZigType *elem_type);
|
||||
|
||||
28
src/ir.cpp
28
src/ir.cpp
@ -14331,15 +14331,15 @@ static Error ir_read_const_ptr(IrAnalyze *ira, CodeGen *codegen, AstNode *source
|
||||
if ((err = type_resolve(codegen, out_val->type, ResolveStatusSizeKnown)))
|
||||
return ErrorSemanticAnalyzeFail;
|
||||
|
||||
size_t src_size = type_size(codegen, pointee->type);
|
||||
size_t dst_size = type_size(codegen, out_val->type);
|
||||
|
||||
if (src_size == dst_size && types_have_same_zig_comptime_repr(pointee->type, out_val->type)) {
|
||||
copy_const_val(out_val, pointee, ptr_val->data.x_ptr.mut == ConstPtrMutComptimeConst);
|
||||
return ErrorNone;
|
||||
}
|
||||
// We don't need to read the padding bytes, so we look at type_size_store bytes
|
||||
size_t src_size = type_size_store(codegen, pointee->type);
|
||||
size_t dst_size = type_size_store(codegen, out_val->type);
|
||||
|
||||
if (dst_size <= src_size) {
|
||||
if (types_have_same_zig_comptime_repr(pointee->type, out_val->type)) {
|
||||
copy_const_val(out_val, pointee, ptr_val->data.x_ptr.mut == ConstPtrMutComptimeConst);
|
||||
return ErrorNone;
|
||||
}
|
||||
Buf buf = BUF_INIT;
|
||||
buf_resize(&buf, src_size);
|
||||
buf_write_value_bytes(codegen, (uint8_t*)buf_ptr(&buf), pointee);
|
||||
@ -15798,6 +15798,8 @@ static IrInstruction *ir_analyze_instruction_typeof(IrAnalyze *ira, IrInstructio
|
||||
static IrInstruction *ir_analyze_instruction_to_ptr_type(IrAnalyze *ira,
|
||||
IrInstructionToPtrType *to_ptr_type_instruction)
|
||||
{
|
||||
Error err;
|
||||
|
||||
IrInstruction *value = to_ptr_type_instruction->value->child;
|
||||
ZigType *type_entry = value->value.type;
|
||||
if (type_is_invalid(type_entry))
|
||||
@ -15813,7 +15815,17 @@ static IrInstruction *ir_analyze_instruction_to_ptr_type(IrAnalyze *ira,
|
||||
ptr_type = get_pointer_to_type(ira->codegen,
|
||||
type_entry->data.pointer.child_type->data.array.child_type, type_entry->data.pointer.is_const);
|
||||
} else if (is_slice(type_entry)) {
|
||||
ptr_type = adjust_ptr_len(ira->codegen, type_entry->data.structure.fields[0].type_entry, PtrLenSingle);
|
||||
ZigType *slice_ptr_type = type_entry->data.structure.fields[0].type_entry;
|
||||
ptr_type = adjust_ptr_len(ira->codegen, slice_ptr_type, PtrLenSingle);
|
||||
// If the pointer is over-aligned, we may have to reduce it based on the alignment of the element type.
|
||||
if (slice_ptr_type->data.pointer.explicit_alignment != 0) {
|
||||
ZigType *elem_type = slice_ptr_type->data.pointer.child_type;
|
||||
if ((err = type_resolve(ira->codegen, elem_type, ResolveStatusAlignmentKnown)))
|
||||
return ira->codegen->invalid_instruction;
|
||||
uint32_t elem_align = get_abi_alignment(ira->codegen, elem_type);
|
||||
uint32_t reduced_align = min(elem_align, slice_ptr_type->data.pointer.explicit_alignment);
|
||||
ptr_type = adjust_ptr_align(ira->codegen, ptr_type, reduced_align);
|
||||
}
|
||||
} else if (type_entry->id == ZigTypeIdArgTuple) {
|
||||
ConstExprValue *arg_tuple_val = ir_resolve_const(ira, value, UndefBad);
|
||||
if (!arg_tuple_val)
|
||||
|
||||
13
std/io.zig
13
std/io.zig
@ -935,8 +935,6 @@ pub fn BitOutStream(endian: builtin.Endian, comptime Error: type) type {
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
|
||||
pub const BufferedAtomicFile = struct {
|
||||
atomic_file: os.AtomicFile,
|
||||
file_stream: os.File.OutStream,
|
||||
@ -978,7 +976,6 @@ pub const BufferedAtomicFile = struct {
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
pub fn readLine(buf: *std.Buffer) ![]u8 {
|
||||
var stdin = try getStdIn();
|
||||
var stdin_stream = stdin.inStream();
|
||||
@ -1073,13 +1070,13 @@ pub fn Deserializer(comptime endian: builtin.Endian, is_packed: bool, comptime E
|
||||
else => in_stream,
|
||||
} };
|
||||
}
|
||||
|
||||
|
||||
pub fn alignToByte(self: *Self) void {
|
||||
if(!is_packed) return;
|
||||
if (!is_packed) return;
|
||||
self.in_stream.alignToByte();
|
||||
}
|
||||
|
||||
//@BUG: inferred error issue. See: #1386
|
||||
//@BUG: inferred error issue. See: #1386
|
||||
fn deserializeInt(self: *Self, comptime T: type) (Error || error{EndOfStream})!T {
|
||||
comptime assert(trait.is(builtin.TypeId.Int)(T) or trait.is(builtin.TypeId.Float)(T));
|
||||
|
||||
@ -1088,7 +1085,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, is_packed: bool, comptime E
|
||||
|
||||
const U = @IntType(false, t_bit_count);
|
||||
const Log2U = math.Log2Int(U);
|
||||
const int_size = @sizeOf(U);
|
||||
const int_size = (U.bit_count + 7) / 8;
|
||||
|
||||
if (is_packed) {
|
||||
const result = try self.in_stream.readBitsNoEof(U, t_bit_count);
|
||||
@ -1301,7 +1298,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime is_packed: bool, com
|
||||
|
||||
const U = @IntType(false, t_bit_count);
|
||||
const Log2U = math.Log2Int(U);
|
||||
const int_size = @sizeOf(U);
|
||||
const int_size = (U.bit_count + 7) / 8;
|
||||
|
||||
const u_value = @bitCast(U, value);
|
||||
|
||||
|
||||
49
std/mem.zig
49
std/mem.zig
@ -423,8 +423,7 @@ pub fn readVarInt(comptime ReturnType: type, bytes: []const u8, endian: builtin.
|
||||
/// This function cannot fail and cannot cause undefined behavior.
|
||||
/// Assumes the endianness of memory is native. This means the function can
|
||||
/// simply pointer cast memory.
|
||||
pub fn readIntNative(comptime T: type, bytes: *const [@sizeOf(T)]u8) T {
|
||||
comptime assert(T.bit_count % 8 == 0);
|
||||
pub fn readIntNative(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8) T {
|
||||
return @ptrCast(*align(1) const T, bytes).*;
|
||||
}
|
||||
|
||||
@ -432,7 +431,7 @@ pub fn readIntNative(comptime T: type, bytes: *const [@sizeOf(T)]u8) T {
|
||||
/// The bit count of T must be evenly divisible by 8.
|
||||
/// This function cannot fail and cannot cause undefined behavior.
|
||||
/// Assumes the endianness of memory is foreign, so it must byte-swap.
|
||||
pub fn readIntForeign(comptime T: type, bytes: *const [@sizeOf(T)]u8) T {
|
||||
pub fn readIntForeign(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8) T {
|
||||
return @bswap(T, readIntNative(T, bytes));
|
||||
}
|
||||
|
||||
@ -446,22 +445,20 @@ pub const readIntBig = switch (builtin.endian) {
|
||||
builtin.Endian.Big => readIntNative,
|
||||
};
|
||||
|
||||
/// Asserts that bytes.len >= @sizeOf(T). Reads the integer starting from index 0
|
||||
/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0
|
||||
/// and ignores extra bytes.
|
||||
/// Note that @sizeOf(u24) is 3.
|
||||
/// The bit count of T must be evenly divisible by 8.
|
||||
/// Assumes the endianness of memory is native. This means the function can
|
||||
/// simply pointer cast memory.
|
||||
pub fn readIntSliceNative(comptime T: type, bytes: []const u8) T {
|
||||
assert(@sizeOf(u24) == 3);
|
||||
assert(bytes.len >= @sizeOf(T));
|
||||
const n = @divExact(T.bit_count, 8);
|
||||
assert(bytes.len >= n);
|
||||
// TODO https://github.com/ziglang/zig/issues/863
|
||||
return readIntNative(T, @ptrCast(*const [@sizeOf(T)]u8, bytes.ptr));
|
||||
return readIntNative(T, @ptrCast(*const [n]u8, bytes.ptr));
|
||||
}
|
||||
|
||||
/// Asserts that bytes.len >= @sizeOf(T). Reads the integer starting from index 0
|
||||
/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0
|
||||
/// and ignores extra bytes.
|
||||
/// Note that @sizeOf(u24) is 3.
|
||||
/// The bit count of T must be evenly divisible by 8.
|
||||
/// Assumes the endianness of memory is foreign, so it must byte-swap.
|
||||
pub fn readIntSliceForeign(comptime T: type, bytes: []const u8) T {
|
||||
@ -481,7 +478,7 @@ pub const readIntSliceBig = switch (builtin.endian) {
|
||||
/// Reads an integer from memory with bit count specified by T.
|
||||
/// The bit count of T must be evenly divisible by 8.
|
||||
/// This function cannot fail and cannot cause undefined behavior.
|
||||
pub fn readInt(comptime T: type, bytes: *const [@sizeOf(T)]u8, endian: builtin.Endian) T {
|
||||
pub fn readInt(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8, endian: builtin.Endian) T {
|
||||
if (endian == builtin.endian) {
|
||||
return readIntNative(T, bytes);
|
||||
} else {
|
||||
@ -489,15 +486,14 @@ pub fn readInt(comptime T: type, bytes: *const [@sizeOf(T)]u8, endian: builtin.E
|
||||
}
|
||||
}
|
||||
|
||||
/// Asserts that bytes.len >= @sizeOf(T). Reads the integer starting from index 0
|
||||
/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0
|
||||
/// and ignores extra bytes.
|
||||
/// Note that @sizeOf(u24) is 3.
|
||||
/// The bit count of T must be evenly divisible by 8.
|
||||
pub fn readIntSlice(comptime T: type, bytes: []const u8, endian: builtin.Endian) T {
|
||||
assert(@sizeOf(u24) == 3);
|
||||
assert(bytes.len >= @sizeOf(T));
|
||||
const n = @divExact(T.bit_count, 8);
|
||||
assert(bytes.len >= n);
|
||||
// TODO https://github.com/ziglang/zig/issues/863
|
||||
return readInt(T, @ptrCast(*const [@sizeOf(T)]u8, bytes.ptr), endian);
|
||||
return readInt(T, @ptrCast(*const [n]u8, bytes.ptr), endian);
|
||||
}
|
||||
|
||||
test "comptime read/write int" {
|
||||
@ -540,7 +536,7 @@ test "readIntBig and readIntLittle" {
|
||||
/// accepts any integer bit width.
|
||||
/// This function stores in native endian, which means it is implemented as a simple
|
||||
/// memory store.
|
||||
pub fn writeIntNative(comptime T: type, buf: *[@sizeOf(T)]u8, value: T) void {
|
||||
pub fn writeIntNative(comptime T: type, buf: *[(T.bit_count + 7) / 8]u8, value: T) void {
|
||||
@ptrCast(*align(1) T, buf).* = value;
|
||||
}
|
||||
|
||||
@ -548,7 +544,7 @@ pub fn writeIntNative(comptime T: type, buf: *[@sizeOf(T)]u8, value: T) void {
|
||||
/// This function always succeeds, has defined behavior for all inputs, but
|
||||
/// the integer bit width must be divisible by 8.
|
||||
/// This function stores in foreign endian, which means it does a @bswap first.
|
||||
pub fn writeIntForeign(comptime T: type, buf: *[@sizeOf(T)]u8, value: T) void {
|
||||
pub fn writeIntForeign(comptime T: type, buf: *[@divExact(T.bit_count, 8)]u8, value: T) void {
|
||||
writeIntNative(T, buf, @bswap(T, value));
|
||||
}
|
||||
|
||||
@ -565,8 +561,7 @@ pub const writeIntBig = switch (builtin.endian) {
|
||||
/// Writes an integer to memory, storing it in twos-complement.
|
||||
/// This function always succeeds, has defined behavior for all inputs, but
|
||||
/// the integer bit width must be divisible by 8.
|
||||
pub fn writeInt(comptime T: type, buffer: *[@sizeOf(T)]u8, value: T, endian: builtin.Endian) void {
|
||||
comptime assert(T.bit_count % 8 == 0);
|
||||
pub fn writeInt(comptime T: type, buffer: *[@divExact(T.bit_count, 8)]u8, value: T, endian: builtin.Endian) void {
|
||||
if (endian == builtin.endian) {
|
||||
return writeIntNative(T, buffer, value);
|
||||
} else {
|
||||
@ -575,15 +570,13 @@ pub fn writeInt(comptime T: type, buffer: *[@sizeOf(T)]u8, value: T, endian: bui
|
||||
}
|
||||
|
||||
/// Writes a twos-complement little-endian integer to memory.
|
||||
/// Asserts that buf.len >= @sizeOf(T). Note that @sizeOf(u24) is 3.
|
||||
/// Asserts that buf.len >= T.bit_count / 8.
|
||||
/// The bit count of T must be divisible by 8.
|
||||
/// Any extra bytes in buffer after writing the integer are set to zero. To
|
||||
/// avoid the branch to check for extra buffer bytes, use writeIntLittle
|
||||
/// instead.
|
||||
pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void {
|
||||
comptime assert(@sizeOf(u24) == 3);
|
||||
comptime assert(T.bit_count % 8 == 0);
|
||||
assert(buffer.len >= @sizeOf(T));
|
||||
assert(buffer.len >= @divExact(T.bit_count, 8));
|
||||
|
||||
// TODO I want to call writeIntLittle here but comptime eval facilities aren't good enough
|
||||
const uint = @IntType(false, T.bit_count);
|
||||
@ -595,14 +588,12 @@ pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void {
|
||||
}
|
||||
|
||||
/// Writes a twos-complement big-endian integer to memory.
|
||||
/// Asserts that buffer.len >= @sizeOf(T). Note that @sizeOf(u24) is 3.
|
||||
/// Asserts that buffer.len >= T.bit_count / 8.
|
||||
/// The bit count of T must be divisible by 8.
|
||||
/// Any extra bytes in buffer before writing the integer are set to zero. To
|
||||
/// avoid the branch to check for extra buffer bytes, use writeIntBig instead.
|
||||
pub fn writeIntSliceBig(comptime T: type, buffer: []u8, value: T) void {
|
||||
comptime assert(@sizeOf(u24) == 3);
|
||||
comptime assert(T.bit_count % 8 == 0);
|
||||
assert(buffer.len >= @sizeOf(T));
|
||||
assert(buffer.len >= @divExact(T.bit_count, 8));
|
||||
|
||||
// TODO I want to call writeIntBig here but comptime eval facilities aren't good enough
|
||||
const uint = @IntType(false, T.bit_count);
|
||||
@ -626,7 +617,7 @@ pub const writeIntSliceForeign = switch (builtin.endian) {
|
||||
};
|
||||
|
||||
/// Writes a twos-complement integer to memory, with the specified endianness.
|
||||
/// Asserts that buf.len >= @sizeOf(T). Note that @sizeOf(u24) is 3.
|
||||
/// Asserts that buf.len >= T.bit_count / 8.
|
||||
/// The bit count of T must be evenly divisible by 8.
|
||||
/// Any extra bytes in buffer not part of the integer are set to zero, with
|
||||
/// respect to endianness. To avoid the branch to check for extra buffer bytes,
|
||||
|
||||
@ -17,6 +17,7 @@ comptime {
|
||||
_ = @import("behavior/bugs/1421.zig");
|
||||
_ = @import("behavior/bugs/1442.zig");
|
||||
_ = @import("behavior/bugs/1486.zig");
|
||||
_ = @import("behavior/bugs/1851.zig");
|
||||
_ = @import("behavior/bugs/394.zig");
|
||||
_ = @import("behavior/bugs/655.zig");
|
||||
_ = @import("behavior/bugs/656.zig");
|
||||
|
||||
27
test/stage1/behavior/bugs/1851.zig
Normal file
27
test/stage1/behavior/bugs/1851.zig
Normal file
@ -0,0 +1,27 @@
|
||||
const std = @import("std");
|
||||
const expect = std.testing.expect;
|
||||
|
||||
test "allocation and looping over 3-byte integer" {
|
||||
expect(@sizeOf(u24) == 4);
|
||||
expect(@sizeOf([1]u24) == 4);
|
||||
expect(@alignOf(u24) == 4);
|
||||
expect(@alignOf([1]u24) == 4);
|
||||
var buffer: [100]u8 = undefined;
|
||||
const a = &std.heap.FixedBufferAllocator.init(&buffer).allocator;
|
||||
|
||||
var x = a.alloc(u24, 2) catch unreachable;
|
||||
expect(x.len == 2);
|
||||
x[0] = 0xFFFFFF;
|
||||
x[1] = 0xFFFFFF;
|
||||
|
||||
const bytes = @sliceToBytes(x);
|
||||
expect(@typeOf(bytes) == []align(4) u8);
|
||||
expect(bytes.len == 8);
|
||||
|
||||
for (bytes) |*b| {
|
||||
b.* = 0x00;
|
||||
}
|
||||
|
||||
expect(x[0] == 0x00);
|
||||
expect(x[1] == 0x00);
|
||||
}
|
||||
Loading…
x
Reference in New Issue
Block a user