mirror of
https://github.com/ziglang/zig.git
synced 2026-02-12 20:37:54 +00:00
stage2: initial implementation of packed structs
Layout algorithm: all `align(0)` fields are squished together as if they were a single integer with a number of bits equal to `@bitSizeOf` each field added together. Then the natural ABI alignment of that integer is used for that pseudo-field.
This commit is contained in:
parent
3763580e1e
commit
5b171f446f
@ -849,6 +849,24 @@ pub const Struct = struct {
|
||||
/// undefined until `status` is `have_layout`.
|
||||
offset: u32,
|
||||
is_comptime: bool,
|
||||
|
||||
/// Returns the field alignment, assuming the struct is packed.
|
||||
pub fn packedAlignment(field: Field) u32 {
|
||||
if (field.abi_align.tag() == .abi_align_default) {
|
||||
return 0;
|
||||
} else {
|
||||
return @intCast(u32, field.abi_align.toUnsignedInt());
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the field alignment, assuming the struct is not packed.
|
||||
pub fn normalAlignment(field: Field, target: Target) u32 {
|
||||
if (field.abi_align.tag() == .abi_align_default) {
|
||||
return field.ty.abiAlignment(target);
|
||||
} else {
|
||||
return @intCast(u32, field.abi_align.toUnsignedInt());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub fn getFullyQualifiedName(s: *Struct, gpa: Allocator) ![:0]u8 {
|
||||
|
||||
54
src/Sema.zig
54
src/Sema.zig
@ -12125,11 +12125,61 @@ fn structFieldPtr(
|
||||
return sema.failWithBadStructFieldAccess(block, struct_obj, field_name_src, field_name);
|
||||
const field_index = @intCast(u32, field_index_big);
|
||||
const field = struct_obj.fields.values()[field_index];
|
||||
const ptr_field_ty = try Type.ptr(arena, .{
|
||||
|
||||
var ptr_ty_data: Type.Payload.Pointer.Data = .{
|
||||
.pointee_type = field.ty,
|
||||
.mutable = struct_ptr_ty.ptrIsMutable(),
|
||||
.@"addrspace" = struct_ptr_ty.ptrAddressSpace(),
|
||||
});
|
||||
};
|
||||
// TODO handle when the struct pointer is overaligned, we should return a potentially
|
||||
// over-aligned field pointer too.
|
||||
if (struct_obj.layout == .Packed) p: {
|
||||
const target = sema.mod.getTarget();
|
||||
comptime assert(Type.packed_struct_layout_version == 1);
|
||||
|
||||
var offset: u64 = 0;
|
||||
var running_bits: u16 = 0;
|
||||
for (struct_obj.fields.values()) |f, i| {
|
||||
if (!f.ty.hasCodeGenBits()) continue;
|
||||
|
||||
const field_align = f.packedAlignment();
|
||||
if (field_align == 0) {
|
||||
if (i == field_index) {
|
||||
ptr_ty_data.bit_offset = running_bits;
|
||||
}
|
||||
running_bits += @intCast(u16, f.ty.bitSize(target));
|
||||
} else {
|
||||
if (running_bits != 0) {
|
||||
var int_payload: Type.Payload.Bits = .{
|
||||
.base = .{ .tag = .int_unsigned },
|
||||
.data = running_bits,
|
||||
};
|
||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
||||
if (i > field_index) {
|
||||
ptr_ty_data.host_size = @intCast(u16, int_ty.abiSize(target));
|
||||
break :p;
|
||||
}
|
||||
const int_align = int_ty.abiAlignment(target);
|
||||
offset = std.mem.alignForwardGeneric(u64, offset, int_align);
|
||||
offset += int_ty.abiSize(target);
|
||||
running_bits = 0;
|
||||
}
|
||||
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
|
||||
if (i == field_index) {
|
||||
break :p;
|
||||
}
|
||||
offset += f.ty.abiSize(target);
|
||||
}
|
||||
}
|
||||
assert(running_bits != 0);
|
||||
var int_payload: Type.Payload.Bits = .{
|
||||
.base = .{ .tag = .int_unsigned },
|
||||
.data = running_bits,
|
||||
};
|
||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
||||
ptr_ty_data.host_size = @intCast(u16, int_ty.abiSize(target));
|
||||
}
|
||||
const ptr_field_ty = try Type.ptr(arena, ptr_ty_data);
|
||||
|
||||
if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| {
|
||||
return sema.addConstant(
|
||||
|
||||
@ -844,15 +844,77 @@ pub const DeclGen = struct {
|
||||
var llvm_field_types = try std.ArrayListUnmanaged(*const llvm.Type).initCapacity(gpa, struct_obj.fields.count());
|
||||
defer llvm_field_types.deinit(gpa);
|
||||
|
||||
for (struct_obj.fields.values()) |field| {
|
||||
if (!field.ty.hasCodeGenBits()) continue;
|
||||
llvm_field_types.appendAssumeCapacity(try dg.llvmType(field.ty));
|
||||
if (struct_obj.layout == .Packed) {
|
||||
const target = dg.module.getTarget();
|
||||
comptime assert(Type.packed_struct_layout_version == 1);
|
||||
var offset: u64 = 0;
|
||||
var running_bits: u16 = 0;
|
||||
for (struct_obj.fields.values()) |field| {
|
||||
if (!field.ty.hasCodeGenBits()) continue;
|
||||
|
||||
const field_align = field.packedAlignment();
|
||||
if (field_align == 0) {
|
||||
running_bits += @intCast(u16, field.ty.bitSize(target));
|
||||
} else {
|
||||
if (running_bits != 0) {
|
||||
var int_payload: Type.Payload.Bits = .{
|
||||
.base = .{ .tag = .int_unsigned },
|
||||
.data = running_bits,
|
||||
};
|
||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
||||
const int_align = int_ty.abiAlignment(target);
|
||||
const llvm_int_ty = try dg.llvmType(int_ty);
|
||||
const prev_offset = offset;
|
||||
offset = std.mem.alignForwardGeneric(u64, offset, int_align);
|
||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
||||
if (padding_bytes != 0) {
|
||||
const padding = dg.context.intType(8).arrayType(padding_bytes);
|
||||
llvm_field_types.appendAssumeCapacity(padding);
|
||||
}
|
||||
llvm_field_types.appendAssumeCapacity(llvm_int_ty);
|
||||
offset += int_ty.abiSize(target);
|
||||
running_bits = 0;
|
||||
}
|
||||
const prev_offset = offset;
|
||||
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
|
||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
||||
if (padding_bytes != 0) {
|
||||
const padding = dg.context.intType(8).arrayType(padding_bytes);
|
||||
llvm_field_types.appendAssumeCapacity(padding);
|
||||
}
|
||||
llvm_field_types.appendAssumeCapacity(try dg.llvmType(field.ty));
|
||||
offset += field.ty.abiSize(target);
|
||||
}
|
||||
}
|
||||
|
||||
if (running_bits != 0) {
|
||||
var int_payload: Type.Payload.Bits = .{
|
||||
.base = .{ .tag = .int_unsigned },
|
||||
.data = running_bits,
|
||||
};
|
||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
||||
const int_align = int_ty.abiAlignment(target);
|
||||
const prev_offset = offset;
|
||||
offset = std.mem.alignForwardGeneric(u64, offset, int_align);
|
||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
||||
if (padding_bytes != 0) {
|
||||
const padding = dg.context.intType(8).arrayType(padding_bytes);
|
||||
llvm_field_types.appendAssumeCapacity(padding);
|
||||
}
|
||||
const llvm_int_ty = try dg.llvmType(int_ty);
|
||||
llvm_field_types.appendAssumeCapacity(llvm_int_ty);
|
||||
}
|
||||
} else {
|
||||
for (struct_obj.fields.values()) |field| {
|
||||
if (!field.ty.hasCodeGenBits()) continue;
|
||||
llvm_field_types.appendAssumeCapacity(try dg.llvmType(field.ty));
|
||||
}
|
||||
}
|
||||
|
||||
llvm_struct_ty.structSetBody(
|
||||
llvm_field_types.items.ptr,
|
||||
@intCast(c_uint, llvm_field_types.items.len),
|
||||
llvm.Bool.fromBool(struct_obj.layout == .Packed),
|
||||
.False,
|
||||
);
|
||||
|
||||
return llvm_struct_ty;
|
||||
@ -980,23 +1042,23 @@ pub const DeclGen = struct {
|
||||
}
|
||||
}
|
||||
|
||||
fn genTypedValue(self: *DeclGen, tv: TypedValue) Error!*const llvm.Value {
|
||||
fn genTypedValue(dg: *DeclGen, tv: TypedValue) Error!*const llvm.Value {
|
||||
if (tv.val.isUndef()) {
|
||||
const llvm_type = try self.llvmType(tv.ty);
|
||||
const llvm_type = try dg.llvmType(tv.ty);
|
||||
return llvm_type.getUndef();
|
||||
}
|
||||
|
||||
switch (tv.ty.zigTypeTag()) {
|
||||
.Bool => {
|
||||
const llvm_type = try self.llvmType(tv.ty);
|
||||
const llvm_type = try dg.llvmType(tv.ty);
|
||||
return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull();
|
||||
},
|
||||
.Int => {
|
||||
var bigint_space: Value.BigIntSpace = undefined;
|
||||
const bigint = tv.val.toBigInt(&bigint_space);
|
||||
const target = self.module.getTarget();
|
||||
const target = dg.module.getTarget();
|
||||
const int_info = tv.ty.intInfo(target);
|
||||
const llvm_type = self.context.intType(int_info.bits);
|
||||
const llvm_type = dg.context.intType(int_info.bits);
|
||||
|
||||
const unsigned_val = v: {
|
||||
if (bigint.limbs.len == 1) {
|
||||
@ -1022,9 +1084,9 @@ pub const DeclGen = struct {
|
||||
var bigint_space: Value.BigIntSpace = undefined;
|
||||
const bigint = int_val.toBigInt(&bigint_space);
|
||||
|
||||
const target = self.module.getTarget();
|
||||
const target = dg.module.getTarget();
|
||||
const int_info = tv.ty.intInfo(target);
|
||||
const llvm_type = self.context.intType(int_info.bits);
|
||||
const llvm_type = dg.context.intType(int_info.bits);
|
||||
|
||||
const unsigned_val = v: {
|
||||
if (bigint.limbs.len == 1) {
|
||||
@ -1044,8 +1106,8 @@ pub const DeclGen = struct {
|
||||
return unsigned_val;
|
||||
},
|
||||
.Float => {
|
||||
const llvm_ty = try self.llvmType(tv.ty);
|
||||
if (tv.ty.floatBits(self.module.getTarget()) <= 64) {
|
||||
const llvm_ty = try dg.llvmType(tv.ty);
|
||||
if (tv.ty.floatBits(dg.module.getTarget()) <= 64) {
|
||||
return llvm_ty.constReal(tv.val.toFloat(f64));
|
||||
}
|
||||
|
||||
@ -1056,18 +1118,18 @@ pub const DeclGen = struct {
|
||||
std.mem.swap(u64, &buf[0], &buf[1]);
|
||||
}
|
||||
|
||||
const int = self.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf);
|
||||
const int = dg.context.intType(128).constIntOfArbitraryPrecision(buf.len, &buf);
|
||||
return int.constBitCast(llvm_ty);
|
||||
},
|
||||
.Pointer => switch (tv.val.tag()) {
|
||||
.decl_ref_mut => return lowerDeclRefValue(self, tv, tv.val.castTag(.decl_ref_mut).?.data.decl),
|
||||
.decl_ref => return lowerDeclRefValue(self, tv, tv.val.castTag(.decl_ref).?.data),
|
||||
.decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl),
|
||||
.decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data),
|
||||
.variable => {
|
||||
const decl = tv.val.castTag(.variable).?.data.owner_decl;
|
||||
decl.alive = true;
|
||||
const val = try self.resolveGlobalDecl(decl);
|
||||
const llvm_var_type = try self.llvmType(tv.ty);
|
||||
const llvm_addrspace = self.llvmAddressSpace(decl.@"addrspace");
|
||||
const val = try dg.resolveGlobalDecl(decl);
|
||||
const llvm_var_type = try dg.llvmType(tv.ty);
|
||||
const llvm_addrspace = dg.llvmAddressSpace(decl.@"addrspace");
|
||||
const llvm_type = llvm_var_type.pointerType(llvm_addrspace);
|
||||
return val.constBitCast(llvm_type);
|
||||
},
|
||||
@ -1075,26 +1137,26 @@ pub const DeclGen = struct {
|
||||
const slice = tv.val.castTag(.slice).?.data;
|
||||
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
|
||||
const fields: [2]*const llvm.Value = .{
|
||||
try self.genTypedValue(.{
|
||||
try dg.genTypedValue(.{
|
||||
.ty = tv.ty.slicePtrFieldType(&buf),
|
||||
.val = slice.ptr,
|
||||
}),
|
||||
try self.genTypedValue(.{
|
||||
try dg.genTypedValue(.{
|
||||
.ty = Type.usize,
|
||||
.val = slice.len,
|
||||
}),
|
||||
};
|
||||
return self.context.constStruct(&fields, fields.len, .False);
|
||||
return dg.context.constStruct(&fields, fields.len, .False);
|
||||
},
|
||||
.int_u64, .one, .int_big_positive => {
|
||||
const llvm_usize = try self.llvmType(Type.usize);
|
||||
const llvm_usize = try dg.llvmType(Type.usize);
|
||||
const llvm_int = llvm_usize.constInt(tv.val.toUnsignedInt(), .False);
|
||||
return llvm_int.constIntToPtr(try self.llvmType(tv.ty));
|
||||
return llvm_int.constIntToPtr(try dg.llvmType(tv.ty));
|
||||
},
|
||||
.field_ptr => {
|
||||
const field_ptr = tv.val.castTag(.field_ptr).?.data;
|
||||
const parent_ptr = try self.lowerParentPtr(field_ptr.container_ptr);
|
||||
const llvm_u32 = self.context.intType(32);
|
||||
const parent_ptr = try dg.lowerParentPtr(field_ptr.container_ptr);
|
||||
const llvm_u32 = dg.context.intType(32);
|
||||
const indices: [2]*const llvm.Value = .{
|
||||
llvm_u32.constInt(0, .False),
|
||||
llvm_u32.constInt(field_ptr.field_index, .False),
|
||||
@ -1103,8 +1165,8 @@ pub const DeclGen = struct {
|
||||
},
|
||||
.elem_ptr => {
|
||||
const elem_ptr = tv.val.castTag(.elem_ptr).?.data;
|
||||
const parent_ptr = try self.lowerParentPtr(elem_ptr.array_ptr);
|
||||
const llvm_usize = try self.llvmType(Type.usize);
|
||||
const parent_ptr = try dg.lowerParentPtr(elem_ptr.array_ptr);
|
||||
const llvm_usize = try dg.llvmType(Type.usize);
|
||||
if (parent_ptr.typeOf().getElementType().getTypeKind() == .Array) {
|
||||
const indices: [2]*const llvm.Value = .{
|
||||
llvm_usize.constInt(0, .False),
|
||||
@ -1119,15 +1181,15 @@ pub const DeclGen = struct {
|
||||
}
|
||||
},
|
||||
.null_value, .zero => {
|
||||
const llvm_type = try self.llvmType(tv.ty);
|
||||
const llvm_type = try dg.llvmType(tv.ty);
|
||||
return llvm_type.constNull();
|
||||
},
|
||||
else => |tag| return self.todo("implement const of pointer type '{}' ({})", .{ tv.ty, tag }),
|
||||
else => |tag| return dg.todo("implement const of pointer type '{}' ({})", .{ tv.ty, tag }),
|
||||
},
|
||||
.Array => switch (tv.val.tag()) {
|
||||
.bytes => {
|
||||
const bytes = tv.val.castTag(.bytes).?.data;
|
||||
return self.context.constString(
|
||||
return dg.context.constString(
|
||||
bytes.ptr,
|
||||
@intCast(c_uint, bytes.len),
|
||||
.True, // don't null terminate. bytes has the sentinel, if any.
|
||||
@ -1136,13 +1198,13 @@ pub const DeclGen = struct {
|
||||
.array => {
|
||||
const elem_vals = tv.val.castTag(.array).?.data;
|
||||
const elem_ty = tv.ty.elemType();
|
||||
const gpa = self.gpa;
|
||||
const gpa = dg.gpa;
|
||||
const llvm_elems = try gpa.alloc(*const llvm.Value, elem_vals.len);
|
||||
defer gpa.free(llvm_elems);
|
||||
for (elem_vals) |elem_val, i| {
|
||||
llvm_elems[i] = try self.genTypedValue(.{ .ty = elem_ty, .val = elem_val });
|
||||
llvm_elems[i] = try dg.genTypedValue(.{ .ty = elem_ty, .val = elem_val });
|
||||
}
|
||||
const llvm_elem_ty = try self.llvmType(elem_ty);
|
||||
const llvm_elem_ty = try dg.llvmType(elem_ty);
|
||||
return llvm_elem_ty.constArray(
|
||||
llvm_elems.ptr,
|
||||
@intCast(c_uint, llvm_elems.len),
|
||||
@ -1154,16 +1216,16 @@ pub const DeclGen = struct {
|
||||
const sentinel = tv.ty.sentinel();
|
||||
const len = @intCast(usize, tv.ty.arrayLen());
|
||||
const len_including_sent = len + @boolToInt(sentinel != null);
|
||||
const gpa = self.gpa;
|
||||
const gpa = dg.gpa;
|
||||
const llvm_elems = try gpa.alloc(*const llvm.Value, len_including_sent);
|
||||
defer gpa.free(llvm_elems);
|
||||
for (llvm_elems[0..len]) |*elem| {
|
||||
elem.* = try self.genTypedValue(.{ .ty = elem_ty, .val = val });
|
||||
elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = val });
|
||||
}
|
||||
if (sentinel) |sent| {
|
||||
llvm_elems[len] = try self.genTypedValue(.{ .ty = elem_ty, .val = sent });
|
||||
llvm_elems[len] = try dg.genTypedValue(.{ .ty = elem_ty, .val = sent });
|
||||
}
|
||||
const llvm_elem_ty = try self.llvmType(elem_ty);
|
||||
const llvm_elem_ty = try dg.llvmType(elem_ty);
|
||||
return llvm_elem_ty.constArray(
|
||||
llvm_elems.ptr,
|
||||
@intCast(c_uint, llvm_elems.len),
|
||||
@ -1172,9 +1234,9 @@ pub const DeclGen = struct {
|
||||
.empty_array_sentinel => {
|
||||
const elem_ty = tv.ty.elemType();
|
||||
const sent_val = tv.ty.sentinel().?;
|
||||
const sentinel = try self.genTypedValue(.{ .ty = elem_ty, .val = sent_val });
|
||||
const sentinel = try dg.genTypedValue(.{ .ty = elem_ty, .val = sent_val });
|
||||
const llvm_elems: [1]*const llvm.Value = .{sentinel};
|
||||
const llvm_elem_ty = try self.llvmType(elem_ty);
|
||||
const llvm_elem_ty = try dg.llvmType(elem_ty);
|
||||
return llvm_elem_ty.constArray(&llvm_elems, llvm_elems.len);
|
||||
},
|
||||
else => unreachable,
|
||||
@ -1182,7 +1244,7 @@ pub const DeclGen = struct {
|
||||
.Optional => {
|
||||
var buf: Type.Payload.ElemType = undefined;
|
||||
const payload_ty = tv.ty.optionalChild(&buf);
|
||||
const llvm_i1 = self.context.intType(1);
|
||||
const llvm_i1 = dg.context.intType(1);
|
||||
const is_pl = !tv.val.isNull();
|
||||
const non_null_bit = if (is_pl) llvm_i1.constAllOnes() else llvm_i1.constNull();
|
||||
if (!payload_ty.hasCodeGenBits()) {
|
||||
@ -1190,22 +1252,22 @@ pub const DeclGen = struct {
|
||||
}
|
||||
if (tv.ty.isPtrLikeOptional()) {
|
||||
if (tv.val.castTag(.opt_payload)) |payload| {
|
||||
return self.genTypedValue(.{ .ty = payload_ty, .val = payload.data });
|
||||
return dg.genTypedValue(.{ .ty = payload_ty, .val = payload.data });
|
||||
} else if (is_pl) {
|
||||
return self.genTypedValue(.{ .ty = payload_ty, .val = tv.val });
|
||||
return dg.genTypedValue(.{ .ty = payload_ty, .val = tv.val });
|
||||
} else {
|
||||
const llvm_ty = try self.llvmType(tv.ty);
|
||||
const llvm_ty = try dg.llvmType(tv.ty);
|
||||
return llvm_ty.constNull();
|
||||
}
|
||||
}
|
||||
const fields: [2]*const llvm.Value = .{
|
||||
try self.genTypedValue(.{
|
||||
try dg.genTypedValue(.{
|
||||
.ty = payload_ty,
|
||||
.val = if (tv.val.castTag(.opt_payload)) |pl| pl.data else Value.initTag(.undef),
|
||||
}),
|
||||
non_null_bit,
|
||||
};
|
||||
return self.context.constStruct(&fields, fields.len, .False);
|
||||
return dg.context.constStruct(&fields, fields.len, .False);
|
||||
},
|
||||
.Fn => {
|
||||
const fn_decl = switch (tv.val.tag()) {
|
||||
@ -1214,14 +1276,14 @@ pub const DeclGen = struct {
|
||||
else => unreachable,
|
||||
};
|
||||
fn_decl.alive = true;
|
||||
return self.resolveLlvmFunction(fn_decl);
|
||||
return dg.resolveLlvmFunction(fn_decl);
|
||||
},
|
||||
.ErrorSet => {
|
||||
const llvm_ty = try self.llvmType(tv.ty);
|
||||
const llvm_ty = try dg.llvmType(tv.ty);
|
||||
switch (tv.val.tag()) {
|
||||
.@"error" => {
|
||||
const err_name = tv.val.castTag(.@"error").?.data.name;
|
||||
const kv = try self.module.getErrorValue(err_name);
|
||||
const kv = try dg.module.getErrorValue(err_name);
|
||||
return llvm_ty.constInt(kv.value, .False);
|
||||
},
|
||||
else => {
|
||||
@ -1238,76 +1300,138 @@ pub const DeclGen = struct {
|
||||
if (!payload_type.hasCodeGenBits()) {
|
||||
// We use the error type directly as the type.
|
||||
const err_val = if (!is_pl) tv.val else Value.initTag(.zero);
|
||||
return self.genTypedValue(.{ .ty = error_type, .val = err_val });
|
||||
return dg.genTypedValue(.{ .ty = error_type, .val = err_val });
|
||||
}
|
||||
|
||||
const fields: [2]*const llvm.Value = .{
|
||||
try self.genTypedValue(.{
|
||||
try dg.genTypedValue(.{
|
||||
.ty = error_type,
|
||||
.val = if (is_pl) Value.initTag(.zero) else tv.val,
|
||||
}),
|
||||
try self.genTypedValue(.{
|
||||
try dg.genTypedValue(.{
|
||||
.ty = payload_type,
|
||||
.val = if (tv.val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef),
|
||||
}),
|
||||
};
|
||||
return self.context.constStruct(&fields, fields.len, .False);
|
||||
return dg.context.constStruct(&fields, fields.len, .False);
|
||||
},
|
||||
.Struct => {
|
||||
const llvm_struct_ty = try self.llvmType(tv.ty);
|
||||
const llvm_struct_ty = try dg.llvmType(tv.ty);
|
||||
const field_vals = tv.val.castTag(.@"struct").?.data;
|
||||
const gpa = self.gpa;
|
||||
const gpa = dg.gpa;
|
||||
|
||||
var llvm_fields = try std.ArrayListUnmanaged(*const llvm.Value).initCapacity(gpa, field_vals.len);
|
||||
defer llvm_fields.deinit(gpa);
|
||||
|
||||
for (field_vals) |field_val, i| {
|
||||
const field_ty = tv.ty.structFieldType(i);
|
||||
if (!field_ty.hasCodeGenBits()) continue;
|
||||
const struct_obj = tv.ty.castTag(.@"struct").?.data;
|
||||
if (struct_obj.layout == .Packed) {
|
||||
const target = dg.module.getTarget();
|
||||
const fields = struct_obj.fields.values();
|
||||
comptime assert(Type.packed_struct_layout_version == 1);
|
||||
var offset: u64 = 0;
|
||||
var running_bits: u16 = 0;
|
||||
var running_int: *const llvm.Value = llvm_struct_ty.structGetTypeAtIndex(0).constNull();
|
||||
for (field_vals) |field_val, i| {
|
||||
const field = fields[i];
|
||||
if (!field.ty.hasCodeGenBits()) continue;
|
||||
|
||||
llvm_fields.appendAssumeCapacity(try self.genTypedValue(.{
|
||||
.ty = field_ty,
|
||||
.val = field_val,
|
||||
}));
|
||||
const field_align = field.packedAlignment();
|
||||
if (field_align == 0) {
|
||||
const non_int_val = try dg.genTypedValue(.{
|
||||
.ty = field.ty,
|
||||
.val = field_val,
|
||||
});
|
||||
const ty_bit_size = @intCast(u16, field.ty.bitSize(target));
|
||||
const llvm_int_ty = dg.context.intType(ty_bit_size);
|
||||
const int_val = non_int_val.constBitCast(llvm_int_ty);
|
||||
const shift_rhs = llvm_int_ty.constInt(running_bits, .False);
|
||||
const shifted = int_val.constShl(shift_rhs);
|
||||
running_int = running_int.constOr(shifted);
|
||||
running_bits += ty_bit_size;
|
||||
} else {
|
||||
if (running_bits != 0) {
|
||||
var int_payload: Type.Payload.Bits = .{
|
||||
.base = .{ .tag = .int_unsigned },
|
||||
.data = running_bits,
|
||||
};
|
||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
||||
const int_align = int_ty.abiAlignment(target);
|
||||
const prev_offset = offset;
|
||||
offset = std.mem.alignForwardGeneric(u64, offset, int_align);
|
||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
||||
if (padding_bytes != 0) {
|
||||
const padding = dg.context.intType(8).arrayType(padding_bytes);
|
||||
llvm_fields.appendAssumeCapacity(padding.getUndef());
|
||||
}
|
||||
llvm_fields.appendAssumeCapacity(running_int);
|
||||
running_int = llvm_struct_ty.structGetTypeAtIndex(@intCast(c_uint, llvm_fields.items.len)).constNull();
|
||||
offset += int_ty.abiSize(target);
|
||||
running_bits = 0;
|
||||
}
|
||||
const prev_offset = offset;
|
||||
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
|
||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
||||
if (padding_bytes != 0) {
|
||||
const padding = dg.context.intType(8).arrayType(padding_bytes);
|
||||
llvm_fields.appendAssumeCapacity(padding.getUndef());
|
||||
}
|
||||
llvm_fields.appendAssumeCapacity(try dg.genTypedValue(.{
|
||||
.ty = field.ty,
|
||||
.val = field_val,
|
||||
}));
|
||||
offset += field.ty.abiSize(target);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (field_vals) |field_val, i| {
|
||||
const field_ty = tv.ty.structFieldType(i);
|
||||
if (!field_ty.hasCodeGenBits()) continue;
|
||||
|
||||
llvm_fields.appendAssumeCapacity(try dg.genTypedValue(.{
|
||||
.ty = field_ty,
|
||||
.val = field_val,
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
return llvm_struct_ty.constNamedStruct(
|
||||
llvm_fields.items.ptr,
|
||||
@intCast(c_uint, llvm_fields.items.len),
|
||||
);
|
||||
},
|
||||
.Union => {
|
||||
const llvm_union_ty = try self.llvmType(tv.ty);
|
||||
const llvm_union_ty = try dg.llvmType(tv.ty);
|
||||
const tag_and_val = tv.val.castTag(.@"union").?.data;
|
||||
|
||||
const target = self.module.getTarget();
|
||||
const target = dg.module.getTarget();
|
||||
const layout = tv.ty.unionGetLayout(target);
|
||||
|
||||
if (layout.payload_size == 0) {
|
||||
return genTypedValue(self, .{ .ty = tv.ty.unionTagType().?, .val = tag_and_val.tag });
|
||||
return genTypedValue(dg, .{ .ty = tv.ty.unionTagType().?, .val = tag_and_val.tag });
|
||||
}
|
||||
const field_ty = tv.ty.unionFieldType(tag_and_val.tag);
|
||||
const payload = p: {
|
||||
if (!field_ty.hasCodeGenBits()) {
|
||||
const padding_len = @intCast(c_uint, layout.payload_size);
|
||||
break :p self.context.intType(8).arrayType(padding_len).getUndef();
|
||||
break :p dg.context.intType(8).arrayType(padding_len).getUndef();
|
||||
}
|
||||
const field = try genTypedValue(self, .{ .ty = field_ty, .val = tag_and_val.val });
|
||||
const field = try genTypedValue(dg, .{ .ty = field_ty, .val = tag_and_val.val });
|
||||
const field_size = field_ty.abiSize(target);
|
||||
if (field_size == layout.payload_size) {
|
||||
break :p field;
|
||||
}
|
||||
const padding_len = @intCast(c_uint, layout.payload_size - field_size);
|
||||
const fields: [2]*const llvm.Value = .{
|
||||
field, self.context.intType(8).arrayType(padding_len).getUndef(),
|
||||
field, dg.context.intType(8).arrayType(padding_len).getUndef(),
|
||||
};
|
||||
break :p self.context.constStruct(&fields, fields.len, .False);
|
||||
break :p dg.context.constStruct(&fields, fields.len, .False);
|
||||
};
|
||||
if (layout.tag_size == 0) {
|
||||
const llvm_payload_ty = llvm_union_ty.structGetTypeAtIndex(0);
|
||||
const fields: [1]*const llvm.Value = .{payload.constBitCast(llvm_payload_ty)};
|
||||
return llvm_union_ty.constNamedStruct(&fields, fields.len);
|
||||
}
|
||||
const llvm_tag_value = try genTypedValue(self, .{
|
||||
const llvm_tag_value = try genTypedValue(dg, .{
|
||||
.ty = tv.ty.unionTagType().?,
|
||||
.val = tag_and_val.tag,
|
||||
});
|
||||
@ -1329,15 +1453,15 @@ pub const DeclGen = struct {
|
||||
assert(vector_len == bytes.len or vector_len + 1 == bytes.len);
|
||||
|
||||
const elem_ty = tv.ty.elemType();
|
||||
const llvm_elems = try self.gpa.alloc(*const llvm.Value, vector_len);
|
||||
defer self.gpa.free(llvm_elems);
|
||||
const llvm_elems = try dg.gpa.alloc(*const llvm.Value, vector_len);
|
||||
defer dg.gpa.free(llvm_elems);
|
||||
for (llvm_elems) |*elem, i| {
|
||||
var byte_payload: Value.Payload.U64 = .{
|
||||
.base = .{ .tag = .int_u64 },
|
||||
.data = bytes[i],
|
||||
};
|
||||
|
||||
elem.* = try self.genTypedValue(.{
|
||||
elem.* = try dg.genTypedValue(.{
|
||||
.ty = elem_ty,
|
||||
.val = Value.initPayload(&byte_payload.base),
|
||||
});
|
||||
@ -1354,10 +1478,10 @@ pub const DeclGen = struct {
|
||||
const vector_len = @intCast(usize, tv.ty.arrayLen());
|
||||
assert(vector_len == elem_vals.len or vector_len + 1 == elem_vals.len);
|
||||
const elem_ty = tv.ty.elemType();
|
||||
const llvm_elems = try self.gpa.alloc(*const llvm.Value, vector_len);
|
||||
defer self.gpa.free(llvm_elems);
|
||||
const llvm_elems = try dg.gpa.alloc(*const llvm.Value, vector_len);
|
||||
defer dg.gpa.free(llvm_elems);
|
||||
for (llvm_elems) |*elem, i| {
|
||||
elem.* = try self.genTypedValue(.{ .ty = elem_ty, .val = elem_vals[i] });
|
||||
elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = elem_vals[i] });
|
||||
}
|
||||
return llvm.constVector(
|
||||
llvm_elems.ptr,
|
||||
@ -1369,10 +1493,10 @@ pub const DeclGen = struct {
|
||||
const val = tv.val.castTag(.repeated).?.data;
|
||||
const elem_ty = tv.ty.elemType();
|
||||
const len = @intCast(usize, tv.ty.arrayLen());
|
||||
const llvm_elems = try self.gpa.alloc(*const llvm.Value, len);
|
||||
defer self.gpa.free(llvm_elems);
|
||||
const llvm_elems = try dg.gpa.alloc(*const llvm.Value, len);
|
||||
defer dg.gpa.free(llvm_elems);
|
||||
for (llvm_elems) |*elem| {
|
||||
elem.* = try self.genTypedValue(.{ .ty = elem_ty, .val = val });
|
||||
elem.* = try dg.genTypedValue(.{ .ty = elem_ty, .val = val });
|
||||
}
|
||||
return llvm.constVector(
|
||||
llvm_elems.ptr,
|
||||
@ -1395,7 +1519,7 @@ pub const DeclGen = struct {
|
||||
|
||||
.Frame,
|
||||
.AnyFrame,
|
||||
=> return self.todo("implement const of type '{}'", .{tv.ty}),
|
||||
=> return dg.todo("implement const of type '{}'", .{tv.ty}),
|
||||
}
|
||||
}
|
||||
|
||||
@ -2403,26 +2527,28 @@ pub const FuncGen = struct {
|
||||
|
||||
assert(isByRef(struct_ty));
|
||||
|
||||
const field_ptr = switch (struct_ty.zigTypeTag()) {
|
||||
.Struct => blk: {
|
||||
const llvm_field_index = llvmFieldIndex(struct_ty, field_index);
|
||||
break :blk self.builder.buildStructGEP(struct_llvm_val, llvm_field_index, "");
|
||||
const target = self.dg.module.getTarget();
|
||||
switch (struct_ty.zigTypeTag()) {
|
||||
.Struct => {
|
||||
var ptr_ty_buf: Type.Payload.Pointer = undefined;
|
||||
const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf);
|
||||
const field_ptr = self.builder.buildStructGEP(struct_llvm_val, llvm_field_index, "");
|
||||
const field_ptr_ty = Type.initPayload(&ptr_ty_buf.base);
|
||||
return self.load(field_ptr, field_ptr_ty);
|
||||
},
|
||||
.Union => blk: {
|
||||
.Union => {
|
||||
const llvm_field_ty = try self.dg.llvmType(field_ty);
|
||||
const target = self.dg.module.getTarget();
|
||||
const layout = struct_ty.unionGetLayout(target);
|
||||
const payload_index = @boolToInt(layout.tag_align >= layout.payload_align);
|
||||
const union_field_ptr = self.builder.buildStructGEP(struct_llvm_val, payload_index, "");
|
||||
break :blk self.builder.buildBitCast(union_field_ptr, llvm_field_ty.pointerType(0), "");
|
||||
const field_ptr = self.builder.buildBitCast(union_field_ptr, llvm_field_ty.pointerType(0), "");
|
||||
if (isByRef(field_ty)) {
|
||||
return field_ptr;
|
||||
} else {
|
||||
return self.builder.buildLoad(field_ptr, "");
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
if (isByRef(field_ty)) {
|
||||
return field_ptr;
|
||||
} else {
|
||||
return self.builder.buildLoad(field_ptr, "");
|
||||
}
|
||||
}
|
||||
|
||||
@ -3730,11 +3856,11 @@ pub const FuncGen = struct {
|
||||
if (opt_abi_ty) |abi_ty| {
|
||||
// operand needs widening and truncating
|
||||
const casted_ptr = self.builder.buildBitCast(ptr, abi_ty.pointerType(0), "");
|
||||
const load_inst = self.load(casted_ptr, ptr_ty).?;
|
||||
const load_inst = (try self.load(casted_ptr, ptr_ty)).?;
|
||||
load_inst.setOrdering(ordering);
|
||||
return self.builder.buildTrunc(load_inst, try self.dg.llvmType(operand_ty), "");
|
||||
}
|
||||
const load_inst = self.load(ptr, ptr_ty).?;
|
||||
const load_inst = (try self.load(ptr, ptr_ty)).?;
|
||||
load_inst.setOrdering(ordering);
|
||||
return load_inst;
|
||||
}
|
||||
@ -3997,7 +4123,9 @@ pub const FuncGen = struct {
|
||||
const struct_ty = struct_ptr_ty.childType();
|
||||
switch (struct_ty.zigTypeTag()) {
|
||||
.Struct => {
|
||||
const llvm_field_index = llvmFieldIndex(struct_ty, field_index);
|
||||
const target = self.dg.module.getTarget();
|
||||
var ty_buf: Type.Payload.Pointer = undefined;
|
||||
const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ty_buf);
|
||||
return self.builder.buildStructGEP(struct_ptr, llvm_field_index, "");
|
||||
},
|
||||
.Union => return self.unionFieldPtr(inst, struct_ptr, struct_ty, field_index),
|
||||
@ -4041,15 +4169,52 @@ pub const FuncGen = struct {
|
||||
return self.llvmModule().getIntrinsicDeclaration(id, types.ptr, types.len);
|
||||
}
|
||||
|
||||
fn load(self: *FuncGen, ptr: *const llvm.Value, ptr_ty: Type) ?*const llvm.Value {
|
||||
const pointee_ty = ptr_ty.childType();
|
||||
if (!pointee_ty.hasCodeGenBits()) return null;
|
||||
if (isByRef(pointee_ty)) return ptr;
|
||||
const llvm_inst = self.builder.buildLoad(ptr, "");
|
||||
fn load(self: *FuncGen, ptr: *const llvm.Value, ptr_ty: Type) !?*const llvm.Value {
|
||||
const info = ptr_ty.ptrInfo().data;
|
||||
if (!info.pointee_type.hasCodeGenBits()) return null;
|
||||
|
||||
const target = self.dg.module.getTarget();
|
||||
llvm_inst.setAlignment(ptr_ty.ptrAlignment(target));
|
||||
llvm_inst.setVolatile(llvm.Bool.fromBool(ptr_ty.isVolatilePtr()));
|
||||
return llvm_inst;
|
||||
const ptr_alignment = ptr_ty.ptrAlignment(target);
|
||||
const ptr_volatile = llvm.Bool.fromBool(ptr_ty.isVolatilePtr());
|
||||
if (info.host_size == 0) {
|
||||
if (isByRef(info.pointee_type)) return ptr;
|
||||
const llvm_inst = self.builder.buildLoad(ptr, "");
|
||||
llvm_inst.setAlignment(ptr_alignment);
|
||||
llvm_inst.setVolatile(ptr_volatile);
|
||||
return llvm_inst;
|
||||
}
|
||||
|
||||
const int_ptr_ty = self.context.intType(info.host_size * 8).pointerType(0);
|
||||
const int_ptr = self.builder.buildBitCast(ptr, int_ptr_ty, "");
|
||||
const containing_int = self.builder.buildLoad(int_ptr, "");
|
||||
containing_int.setAlignment(ptr_alignment);
|
||||
containing_int.setVolatile(ptr_volatile);
|
||||
|
||||
const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target));
|
||||
const shift_amt = containing_int.typeOf().constInt(info.bit_offset, .False);
|
||||
const shifted_value = self.builder.buildLShr(containing_int, shift_amt, "");
|
||||
const elem_llvm_ty = try self.dg.llvmType(info.pointee_type);
|
||||
|
||||
if (isByRef(info.pointee_type)) {
|
||||
const result_align = info.pointee_type.abiAlignment(target);
|
||||
const result_ptr = self.buildAlloca(elem_llvm_ty);
|
||||
result_ptr.setAlignment(result_align);
|
||||
|
||||
const same_size_int = self.context.intType(elem_bits);
|
||||
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
|
||||
const bitcasted_ptr = self.builder.buildBitCast(result_ptr, same_size_int.pointerType(0), "");
|
||||
const store_inst = self.builder.buildStore(truncated_int, bitcasted_ptr);
|
||||
store_inst.setAlignment(result_align);
|
||||
return result_ptr;
|
||||
}
|
||||
|
||||
if (info.pointee_type.zigTypeTag() == .Float) {
|
||||
const same_size_int = self.context.intType(elem_bits);
|
||||
const truncated_int = self.builder.buildTrunc(shifted_value, same_size_int, "");
|
||||
return self.builder.buildBitCast(truncated_int, elem_llvm_ty, "");
|
||||
}
|
||||
|
||||
return self.builder.buildTrunc(shifted_value, elem_llvm_ty, "");
|
||||
}
|
||||
|
||||
fn store(
|
||||
@ -4059,16 +4224,50 @@ pub const FuncGen = struct {
|
||||
elem: *const llvm.Value,
|
||||
ordering: llvm.AtomicOrdering,
|
||||
) void {
|
||||
const elem_ty = ptr_ty.childType();
|
||||
const info = ptr_ty.ptrInfo().data;
|
||||
const elem_ty = info.pointee_type;
|
||||
if (!elem_ty.hasCodeGenBits()) {
|
||||
return;
|
||||
}
|
||||
const target = self.dg.module.getTarget();
|
||||
const ptr_alignment = ptr_ty.ptrAlignment(target);
|
||||
const ptr_volatile = llvm.Bool.fromBool(info.@"volatile");
|
||||
if (info.host_size != 0) {
|
||||
const int_ptr_ty = self.context.intType(info.host_size * 8).pointerType(0);
|
||||
const int_ptr = self.builder.buildBitCast(ptr, int_ptr_ty, "");
|
||||
const containing_int = self.builder.buildLoad(int_ptr, "");
|
||||
assert(ordering == .NotAtomic);
|
||||
containing_int.setAlignment(ptr_alignment);
|
||||
containing_int.setVolatile(ptr_volatile);
|
||||
const elem_bits = @intCast(c_uint, ptr_ty.elemType().bitSize(target));
|
||||
const containing_int_ty = containing_int.typeOf();
|
||||
const shift_amt = containing_int_ty.constInt(info.bit_offset, .False);
|
||||
// Convert to equally-sized integer type in order to perform the bit
|
||||
// operations on the value to store
|
||||
const value_bits_type = self.context.intType(elem_bits);
|
||||
const value_bits = self.builder.buildBitCast(elem, value_bits_type, "");
|
||||
|
||||
var mask_val = value_bits_type.constAllOnes();
|
||||
mask_val = mask_val.constZExt(containing_int_ty);
|
||||
mask_val = mask_val.constShl(shift_amt);
|
||||
mask_val = mask_val.constNot();
|
||||
|
||||
const anded_containing_int = self.builder.buildAnd(containing_int, mask_val, "");
|
||||
const extended_value = self.builder.buildZExt(value_bits, containing_int_ty, "");
|
||||
const shifted_value = self.builder.buildShl(extended_value, shift_amt, "");
|
||||
const ored_value = self.builder.buildOr(shifted_value, anded_containing_int, "");
|
||||
|
||||
const store_inst = self.builder.buildStore(ored_value, int_ptr);
|
||||
assert(ordering == .NotAtomic);
|
||||
store_inst.setAlignment(ptr_alignment);
|
||||
store_inst.setVolatile(ptr_volatile);
|
||||
return;
|
||||
}
|
||||
if (!isByRef(elem_ty)) {
|
||||
const store_inst = self.builder.buildStore(elem, ptr);
|
||||
store_inst.setOrdering(ordering);
|
||||
store_inst.setAlignment(ptr_ty.ptrAlignment(target));
|
||||
store_inst.setVolatile(llvm.Bool.fromBool(ptr_ty.isVolatilePtr()));
|
||||
store_inst.setAlignment(ptr_alignment);
|
||||
store_inst.setVolatile(ptr_volatile);
|
||||
return;
|
||||
}
|
||||
assert(ordering == .NotAtomic);
|
||||
@ -4080,7 +4279,7 @@ pub const FuncGen = struct {
|
||||
self.builder.buildBitCast(elem, llvm_ptr_u8, ""),
|
||||
elem_ty.abiAlignment(target),
|
||||
self.context.intType(Type.usize.intInfo(target).bits).constInt(size_bytes, .False),
|
||||
ptr_ty.isVolatilePtr(),
|
||||
info.@"volatile",
|
||||
);
|
||||
}
|
||||
};
|
||||
@ -4323,15 +4522,104 @@ fn toLlvmCallConv(cc: std.builtin.CallingConvention, target: std.Target) llvm.Ca
|
||||
}
|
||||
|
||||
/// Take into account 0 bit fields.
|
||||
fn llvmFieldIndex(ty: Type, index: u32) c_uint {
|
||||
fn llvmFieldIndex(
|
||||
ty: Type,
|
||||
field_index: u32,
|
||||
target: std.Target,
|
||||
ptr_pl_buf: *Type.Payload.Pointer,
|
||||
) c_uint {
|
||||
const struct_obj = ty.castTag(.@"struct").?.data;
|
||||
var result: c_uint = 0;
|
||||
for (struct_obj.fields.values()[0..index]) |field| {
|
||||
if (field.ty.hasCodeGenBits()) {
|
||||
result += 1;
|
||||
if (struct_obj.layout != .Packed) {
|
||||
var llvm_field_index: c_uint = 0;
|
||||
for (struct_obj.fields.values()) |field, i| {
|
||||
if (!field.ty.hasCodeGenBits()) continue;
|
||||
|
||||
if (i == field_index) {
|
||||
ptr_pl_buf.* = .{
|
||||
.data = .{
|
||||
.pointee_type = field.ty,
|
||||
.@"align" = field.normalAlignment(target),
|
||||
.@"addrspace" = .generic,
|
||||
},
|
||||
};
|
||||
return llvm_field_index;
|
||||
}
|
||||
llvm_field_index += 1;
|
||||
}
|
||||
unreachable;
|
||||
}
|
||||
|
||||
// Our job here is to return the host integer field index.
|
||||
comptime assert(Type.packed_struct_layout_version == 1);
|
||||
var offset: u64 = 0;
|
||||
var running_bits: u16 = 0;
|
||||
var llvm_field_index: c_uint = 0;
|
||||
for (struct_obj.fields.values()) |field, i| {
|
||||
if (!field.ty.hasCodeGenBits()) continue;
|
||||
|
||||
const field_align = field.packedAlignment();
|
||||
if (field_align == 0) {
|
||||
if (i == field_index) {
|
||||
ptr_pl_buf.* = .{
|
||||
.data = .{
|
||||
.pointee_type = field.ty,
|
||||
.bit_offset = running_bits,
|
||||
.@"addrspace" = .generic,
|
||||
},
|
||||
};
|
||||
}
|
||||
running_bits += @intCast(u16, field.ty.bitSize(target));
|
||||
} else {
|
||||
if (running_bits != 0) {
|
||||
var int_payload: Type.Payload.Bits = .{
|
||||
.base = .{ .tag = .int_unsigned },
|
||||
.data = running_bits,
|
||||
};
|
||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
||||
if (i > field_index) {
|
||||
ptr_pl_buf.data.host_size = @intCast(u16, int_ty.abiSize(target));
|
||||
return llvm_field_index;
|
||||
}
|
||||
|
||||
const int_align = int_ty.abiAlignment(target);
|
||||
const prev_offset = offset;
|
||||
offset = std.mem.alignForwardGeneric(u64, offset, int_align);
|
||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
||||
if (padding_bytes != 0) {
|
||||
llvm_field_index += 1;
|
||||
}
|
||||
llvm_field_index += 1;
|
||||
offset += int_ty.abiSize(target);
|
||||
running_bits = 0;
|
||||
}
|
||||
const prev_offset = offset;
|
||||
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
|
||||
const padding_bytes = @intCast(c_uint, offset - prev_offset);
|
||||
if (padding_bytes != 0) {
|
||||
llvm_field_index += 1;
|
||||
}
|
||||
if (i == field_index) {
|
||||
ptr_pl_buf.* = .{
|
||||
.data = .{
|
||||
.pointee_type = field.ty,
|
||||
.@"align" = field_align,
|
||||
.@"addrspace" = .generic,
|
||||
},
|
||||
};
|
||||
return llvm_field_index;
|
||||
}
|
||||
llvm_field_index += 1;
|
||||
offset += field.ty.abiSize(target);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
assert(running_bits != 0);
|
||||
var int_payload: Type.Payload.Bits = .{
|
||||
.base = .{ .tag = .int_unsigned },
|
||||
.data = running_bits,
|
||||
};
|
||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
||||
ptr_pl_buf.data.host_size = @intCast(u16, int_ty.abiSize(target));
|
||||
return llvm_field_index;
|
||||
}
|
||||
|
||||
fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool {
|
||||
|
||||
@ -148,6 +148,18 @@ pub const Value = opaque {
|
||||
pub const constPtrToInt = LLVMConstPtrToInt;
|
||||
extern fn LLVMConstPtrToInt(ConstantVal: *const Value, ToType: *const Type) *const Value;
|
||||
|
||||
pub const constShl = LLVMConstShl;
|
||||
extern fn LLVMConstShl(LHSConstant: *const Value, RHSConstant: *const Value) *const Value;
|
||||
|
||||
pub const constOr = LLVMConstOr;
|
||||
extern fn LLVMConstOr(LHSConstant: *const Value, RHSConstant: *const Value) *const Value;
|
||||
|
||||
pub const constZExt = LLVMConstZExt;
|
||||
extern fn LLVMConstZExt(ConstantVal: *const Value, ToType: *const Type) *const Value;
|
||||
|
||||
pub const constNot = LLVMConstNot;
|
||||
extern fn LLVMConstNot(ConstantVal: *const Value) *const Value;
|
||||
|
||||
pub const setWeak = LLVMSetWeak;
|
||||
extern fn LLVMSetWeak(CmpXchgInst: *const Value, IsWeak: Bool) void;
|
||||
|
||||
|
||||
117
src/type.zig
117
src/type.zig
@ -1173,7 +1173,7 @@ pub const Type = extern union {
|
||||
.C => try writer.writeAll("[*c]"),
|
||||
.Slice => try writer.writeAll("[]"),
|
||||
}
|
||||
if (payload.@"align" != 0) {
|
||||
if (payload.@"align" != 0 or payload.host_size != 0) {
|
||||
try writer.print("align({d}", .{payload.@"align"});
|
||||
|
||||
if (payload.bit_offset != 0) {
|
||||
@ -1867,24 +1867,57 @@ pub const Type = extern union {
|
||||
|
||||
.@"struct" => {
|
||||
const fields = self.structFields();
|
||||
if (self.castTag(.@"struct")) |payload| {
|
||||
const is_packed = if (self.castTag(.@"struct")) |payload| p: {
|
||||
const struct_obj = payload.data;
|
||||
assert(struct_obj.status == .have_layout);
|
||||
const is_packed = struct_obj.layout == .Packed;
|
||||
if (is_packed) @panic("TODO packed structs");
|
||||
break :p struct_obj.layout == .Packed;
|
||||
} else false;
|
||||
|
||||
if (!is_packed) {
|
||||
var big_align: u32 = 0;
|
||||
for (fields.values()) |field| {
|
||||
if (!field.ty.hasCodeGenBits()) continue;
|
||||
|
||||
const field_align = field.normalAlignment(target);
|
||||
big_align = @maximum(big_align, field_align);
|
||||
}
|
||||
return big_align;
|
||||
}
|
||||
|
||||
// For packed structs, we take the maximum alignment of the backing integers.
|
||||
comptime assert(Type.packed_struct_layout_version == 1);
|
||||
var big_align: u32 = 0;
|
||||
var running_bits: u16 = 0;
|
||||
|
||||
for (fields.values()) |field| {
|
||||
if (!field.ty.hasCodeGenBits()) continue;
|
||||
|
||||
const field_align = a: {
|
||||
if (field.abi_align.tag() == .abi_align_default) {
|
||||
break :a field.ty.abiAlignment(target);
|
||||
} else {
|
||||
break :a @intCast(u32, field.abi_align.toUnsignedInt());
|
||||
const field_align = field.packedAlignment();
|
||||
if (field_align == 0) {
|
||||
running_bits += @intCast(u16, field.ty.bitSize(target));
|
||||
} else {
|
||||
if (running_bits != 0) {
|
||||
var int_payload: Payload.Bits = .{
|
||||
.base = .{ .tag = .int_unsigned },
|
||||
.data = running_bits,
|
||||
};
|
||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
||||
const int_align = int_ty.abiAlignment(target);
|
||||
big_align = @maximum(big_align, int_align);
|
||||
running_bits = 0;
|
||||
}
|
||||
big_align = @maximum(big_align, field_align);
|
||||
}
|
||||
}
|
||||
|
||||
if (running_bits != 0) {
|
||||
var int_payload: Payload.Bits = .{
|
||||
.base = .{ .tag = .int_unsigned },
|
||||
.data = running_bits,
|
||||
};
|
||||
big_align = @maximum(big_align, field_align);
|
||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
||||
const int_align = int_ty.abiAlignment(target);
|
||||
big_align = @maximum(big_align, int_align);
|
||||
}
|
||||
return big_align;
|
||||
},
|
||||
@ -3635,30 +3668,70 @@ pub const Type = extern union {
|
||||
}
|
||||
|
||||
/// Supports structs and unions.
|
||||
/// For packed structs, it returns the byte offset of the containing integer.
|
||||
pub fn structFieldOffset(ty: Type, index: usize, target: Target) u64 {
|
||||
switch (ty.tag()) {
|
||||
.@"struct" => {
|
||||
const struct_obj = ty.castTag(.@"struct").?.data;
|
||||
assert(struct_obj.status == .have_layout);
|
||||
const is_packed = struct_obj.layout == .Packed;
|
||||
if (is_packed) @panic("TODO packed structs");
|
||||
if (!is_packed) {
|
||||
var offset: u64 = 0;
|
||||
var big_align: u32 = 0;
|
||||
for (struct_obj.fields.values()) |field, i| {
|
||||
if (!field.ty.hasCodeGenBits()) continue;
|
||||
|
||||
const field_align = field.normalAlignment(target);
|
||||
big_align = @maximum(big_align, field_align);
|
||||
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
|
||||
if (i == index) return offset;
|
||||
offset += field.ty.abiSize(target);
|
||||
}
|
||||
offset = std.mem.alignForwardGeneric(u64, offset, big_align);
|
||||
return offset;
|
||||
}
|
||||
|
||||
comptime assert(Type.packed_struct_layout_version == 1);
|
||||
var offset: u64 = 0;
|
||||
var big_align: u32 = 0;
|
||||
var running_bits: u16 = 0;
|
||||
for (struct_obj.fields.values()) |field, i| {
|
||||
if (!field.ty.hasCodeGenBits()) continue;
|
||||
|
||||
const field_align = a: {
|
||||
if (field.abi_align.tag() == .abi_align_default) {
|
||||
break :a field.ty.abiAlignment(target);
|
||||
} else {
|
||||
break :a @intCast(u32, field.abi_align.toUnsignedInt());
|
||||
const field_align = field.packedAlignment();
|
||||
if (field_align == 0) {
|
||||
if (i == index) return offset;
|
||||
running_bits += @intCast(u16, field.ty.bitSize(target));
|
||||
} else {
|
||||
big_align = @maximum(big_align, field_align);
|
||||
|
||||
if (running_bits != 0) {
|
||||
var int_payload: Payload.Bits = .{
|
||||
.base = .{ .tag = .int_unsigned },
|
||||
.data = running_bits,
|
||||
};
|
||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
||||
const int_align = int_ty.abiAlignment(target);
|
||||
big_align = @maximum(big_align, int_align);
|
||||
offset = std.mem.alignForwardGeneric(u64, offset, int_align);
|
||||
offset += int_ty.abiSize(target);
|
||||
running_bits = 0;
|
||||
}
|
||||
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
|
||||
if (i == index) return offset;
|
||||
offset += field.ty.abiSize(target);
|
||||
}
|
||||
}
|
||||
if (running_bits != 0) {
|
||||
var int_payload: Payload.Bits = .{
|
||||
.base = .{ .tag = .int_unsigned },
|
||||
.data = running_bits,
|
||||
};
|
||||
big_align = @maximum(big_align, field_align);
|
||||
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
|
||||
if (i == index) return offset;
|
||||
offset += field.ty.abiSize(target);
|
||||
const int_ty: Type = .{ .ptr_otherwise = &int_payload.base };
|
||||
const int_align = int_ty.abiAlignment(target);
|
||||
big_align = @maximum(big_align, int_align);
|
||||
offset = std.mem.alignForwardGeneric(u64, offset, int_align);
|
||||
offset += int_ty.abiSize(target);
|
||||
}
|
||||
offset = std.mem.alignForwardGeneric(u64, offset, big_align);
|
||||
return offset;
|
||||
@ -4350,6 +4423,10 @@ pub const Type = extern union {
|
||||
else => return Tag.int_unsigned.create(arena, bits),
|
||||
};
|
||||
}
|
||||
|
||||
/// This is only used for comptime asserts. Bump this number when you make a change
|
||||
/// to packed struct layout to find out all the places in the codebase you need to edit!
|
||||
pub const packed_struct_layout_version = 1;
|
||||
};
|
||||
|
||||
pub const CType = enum {
|
||||
|
||||
@ -91,3 +91,158 @@ const Expr = union(enum) {
|
||||
fn alloc(comptime T: type) []T {
|
||||
return &[_]T{};
|
||||
}
|
||||
|
||||
const APackedStruct = packed struct {
|
||||
x: u8,
|
||||
y: u8,
|
||||
};
|
||||
|
||||
test "packed struct" {
|
||||
var foo = APackedStruct{
|
||||
.x = 1,
|
||||
.y = 2,
|
||||
};
|
||||
foo.y += 1;
|
||||
const four = foo.x + foo.y;
|
||||
try expect(four == 4);
|
||||
}
|
||||
|
||||
const Foo24Bits = packed struct {
|
||||
field: u24,
|
||||
};
|
||||
const Foo96Bits = packed struct {
|
||||
a: u24,
|
||||
b: u24,
|
||||
c: u24,
|
||||
d: u24,
|
||||
};
|
||||
|
||||
test "packed struct 24bits" {
|
||||
comptime {
|
||||
try expect(@sizeOf(Foo24Bits) == 4);
|
||||
if (@sizeOf(usize) == 4) {
|
||||
try expect(@sizeOf(Foo96Bits) == 12);
|
||||
} else {
|
||||
try expect(@sizeOf(Foo96Bits) == 16);
|
||||
}
|
||||
}
|
||||
|
||||
var value = Foo96Bits{
|
||||
.a = 0,
|
||||
.b = 0,
|
||||
.c = 0,
|
||||
.d = 0,
|
||||
};
|
||||
value.a += 1;
|
||||
try expect(value.a == 1);
|
||||
try expect(value.b == 0);
|
||||
try expect(value.c == 0);
|
||||
try expect(value.d == 0);
|
||||
|
||||
value.b += 1;
|
||||
try expect(value.a == 1);
|
||||
try expect(value.b == 1);
|
||||
try expect(value.c == 0);
|
||||
try expect(value.d == 0);
|
||||
|
||||
value.c += 1;
|
||||
try expect(value.a == 1);
|
||||
try expect(value.b == 1);
|
||||
try expect(value.c == 1);
|
||||
try expect(value.d == 0);
|
||||
|
||||
value.d += 1;
|
||||
try expect(value.a == 1);
|
||||
try expect(value.b == 1);
|
||||
try expect(value.c == 1);
|
||||
try expect(value.d == 1);
|
||||
}
|
||||
|
||||
test "runtime struct initialization of bitfield" {
|
||||
const s1 = Nibbles{
|
||||
.x = x1,
|
||||
.y = x1,
|
||||
};
|
||||
const s2 = Nibbles{
|
||||
.x = @intCast(u4, x2),
|
||||
.y = @intCast(u4, x2),
|
||||
};
|
||||
|
||||
try expect(s1.x == x1);
|
||||
try expect(s1.y == x1);
|
||||
try expect(s2.x == @intCast(u4, x2));
|
||||
try expect(s2.y == @intCast(u4, x2));
|
||||
}
|
||||
|
||||
var x1 = @as(u4, 1);
|
||||
var x2 = @as(u8, 2);
|
||||
|
||||
const Nibbles = packed struct {
|
||||
x: u4,
|
||||
y: u4,
|
||||
};
|
||||
|
||||
const Bitfields = packed struct {
|
||||
f1: u16,
|
||||
f2: u16,
|
||||
f3: u8,
|
||||
f4: u8,
|
||||
f5: u4,
|
||||
f6: u4,
|
||||
f7: u8,
|
||||
};
|
||||
|
||||
test "native bit field understands endianness" {
|
||||
var all: u64 = if (native_endian != .Little)
|
||||
0x1111222233445677
|
||||
else
|
||||
0x7765443322221111;
|
||||
var bytes: [8]u8 = undefined;
|
||||
@memcpy(&bytes, @ptrCast([*]u8, &all), 8);
|
||||
var bitfields = @ptrCast(*Bitfields, &bytes).*;
|
||||
|
||||
try expect(bitfields.f1 == 0x1111);
|
||||
try expect(bitfields.f2 == 0x2222);
|
||||
try expect(bitfields.f3 == 0x33);
|
||||
try expect(bitfields.f4 == 0x44);
|
||||
try expect(bitfields.f5 == 0x5);
|
||||
try expect(bitfields.f6 == 0x6);
|
||||
try expect(bitfields.f7 == 0x77);
|
||||
}
|
||||
|
||||
test "implicit cast packed struct field to const ptr" {
|
||||
const LevelUpMove = packed struct {
|
||||
move_id: u9,
|
||||
level: u7,
|
||||
|
||||
fn toInt(value: u7) u7 {
|
||||
return value;
|
||||
}
|
||||
};
|
||||
|
||||
var lup: LevelUpMove = undefined;
|
||||
lup.level = 12;
|
||||
const res = LevelUpMove.toInt(lup.level);
|
||||
try expect(res == 12);
|
||||
}
|
||||
|
||||
test "zero-bit field in packed struct" {
|
||||
const S = packed struct {
|
||||
x: u10,
|
||||
y: void,
|
||||
};
|
||||
var x: S = undefined;
|
||||
_ = x;
|
||||
}
|
||||
|
||||
test "packed struct with non-ABI-aligned field" {
|
||||
const S = packed struct {
|
||||
x: u9,
|
||||
y: u183,
|
||||
};
|
||||
var s: S = undefined;
|
||||
s.x = 1;
|
||||
s.y = 42;
|
||||
try expect(s.x == 1);
|
||||
try expect(s.y == 42);
|
||||
}
|
||||
|
||||
@ -6,21 +6,6 @@ const expectEqual = std.testing.expectEqual;
|
||||
const expectEqualSlices = std.testing.expectEqualSlices;
|
||||
const maxInt = std.math.maxInt;
|
||||
|
||||
const APackedStruct = packed struct {
|
||||
x: u8,
|
||||
y: u8,
|
||||
};
|
||||
|
||||
test "packed struct" {
|
||||
var foo = APackedStruct{
|
||||
.x = 1,
|
||||
.y = 2,
|
||||
};
|
||||
foo.y += 1;
|
||||
const four = foo.x + foo.y;
|
||||
try expect(four == 4);
|
||||
}
|
||||
|
||||
const BitField1 = packed struct {
|
||||
a: u3,
|
||||
b: u3,
|
||||
@ -60,57 +45,6 @@ fn getC(data: *const BitField1) u2 {
|
||||
return data.c;
|
||||
}
|
||||
|
||||
const Foo24Bits = packed struct {
|
||||
field: u24,
|
||||
};
|
||||
const Foo96Bits = packed struct {
|
||||
a: u24,
|
||||
b: u24,
|
||||
c: u24,
|
||||
d: u24,
|
||||
};
|
||||
|
||||
test "packed struct 24bits" {
|
||||
comptime {
|
||||
try expect(@sizeOf(Foo24Bits) == 4);
|
||||
if (@sizeOf(usize) == 4) {
|
||||
try expect(@sizeOf(Foo96Bits) == 12);
|
||||
} else {
|
||||
try expect(@sizeOf(Foo96Bits) == 16);
|
||||
}
|
||||
}
|
||||
|
||||
var value = Foo96Bits{
|
||||
.a = 0,
|
||||
.b = 0,
|
||||
.c = 0,
|
||||
.d = 0,
|
||||
};
|
||||
value.a += 1;
|
||||
try expect(value.a == 1);
|
||||
try expect(value.b == 0);
|
||||
try expect(value.c == 0);
|
||||
try expect(value.d == 0);
|
||||
|
||||
value.b += 1;
|
||||
try expect(value.a == 1);
|
||||
try expect(value.b == 1);
|
||||
try expect(value.c == 0);
|
||||
try expect(value.d == 0);
|
||||
|
||||
value.c += 1;
|
||||
try expect(value.a == 1);
|
||||
try expect(value.b == 1);
|
||||
try expect(value.c == 1);
|
||||
try expect(value.d == 0);
|
||||
|
||||
value.d += 1;
|
||||
try expect(value.a == 1);
|
||||
try expect(value.b == 1);
|
||||
try expect(value.c == 1);
|
||||
try expect(value.d == 1);
|
||||
}
|
||||
|
||||
const Foo32Bits = packed struct {
|
||||
field: u24,
|
||||
pad: u8,
|
||||
@ -188,74 +122,6 @@ test "aligned array of packed struct" {
|
||||
try expect(ptr.a[1].b == 0xbb);
|
||||
}
|
||||
|
||||
test "runtime struct initialization of bitfield" {
|
||||
const s1 = Nibbles{
|
||||
.x = x1,
|
||||
.y = x1,
|
||||
};
|
||||
const s2 = Nibbles{
|
||||
.x = @intCast(u4, x2),
|
||||
.y = @intCast(u4, x2),
|
||||
};
|
||||
|
||||
try expect(s1.x == x1);
|
||||
try expect(s1.y == x1);
|
||||
try expect(s2.x == @intCast(u4, x2));
|
||||
try expect(s2.y == @intCast(u4, x2));
|
||||
}
|
||||
|
||||
var x1 = @as(u4, 1);
|
||||
var x2 = @as(u8, 2);
|
||||
|
||||
const Nibbles = packed struct {
|
||||
x: u4,
|
||||
y: u4,
|
||||
};
|
||||
|
||||
const Bitfields = packed struct {
|
||||
f1: u16,
|
||||
f2: u16,
|
||||
f3: u8,
|
||||
f4: u8,
|
||||
f5: u4,
|
||||
f6: u4,
|
||||
f7: u8,
|
||||
};
|
||||
|
||||
test "native bit field understands endianness" {
|
||||
var all: u64 = if (native_endian != .Little)
|
||||
0x1111222233445677
|
||||
else
|
||||
0x7765443322221111;
|
||||
var bytes: [8]u8 = undefined;
|
||||
@memcpy(&bytes, @ptrCast([*]u8, &all), 8);
|
||||
var bitfields = @ptrCast(*Bitfields, &bytes).*;
|
||||
|
||||
try expect(bitfields.f1 == 0x1111);
|
||||
try expect(bitfields.f2 == 0x2222);
|
||||
try expect(bitfields.f3 == 0x33);
|
||||
try expect(bitfields.f4 == 0x44);
|
||||
try expect(bitfields.f5 == 0x5);
|
||||
try expect(bitfields.f6 == 0x6);
|
||||
try expect(bitfields.f7 == 0x77);
|
||||
}
|
||||
|
||||
test "implicit cast packed struct field to const ptr" {
|
||||
const LevelUpMove = packed struct {
|
||||
move_id: u9,
|
||||
level: u7,
|
||||
|
||||
fn toInt(value: u7) u7 {
|
||||
return value;
|
||||
}
|
||||
};
|
||||
|
||||
var lup: LevelUpMove = undefined;
|
||||
lup.level = 12;
|
||||
const res = LevelUpMove.toInt(lup.level);
|
||||
try expect(res == 12);
|
||||
}
|
||||
|
||||
test "pointer to packed struct member in a stack variable" {
|
||||
const S = packed struct {
|
||||
a: u2,
|
||||
@ -379,27 +245,6 @@ test "fn with C calling convention returns struct by value" {
|
||||
comptime try S.entry();
|
||||
}
|
||||
|
||||
test "zero-bit field in packed struct" {
|
||||
const S = packed struct {
|
||||
x: u10,
|
||||
y: void,
|
||||
};
|
||||
var x: S = undefined;
|
||||
_ = x;
|
||||
}
|
||||
|
||||
test "packed struct with non-ABI-aligned field" {
|
||||
const S = packed struct {
|
||||
x: u9,
|
||||
y: u183,
|
||||
};
|
||||
var s: S = undefined;
|
||||
s.x = 1;
|
||||
s.y = 42;
|
||||
try expect(s.x == 1);
|
||||
try expect(s.y == 42);
|
||||
}
|
||||
|
||||
test "non-packed struct with u128 entry in union" {
|
||||
const U = union(enum) {
|
||||
Num: u128,
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user