stage2: support nested structs and arrays and sret

* Add AIR instructions: ret_ptr, ret_load
   - This allows Sema to be blissfully unaware of the backend's decision
     to implement by-val/by-ref semantics for struct/union/array types.
     Backends can lower these simply as alloc, load, ret instructions,
     or they can take advantage of them to use a result pointer.
 * Add AIR instruction: array_elem_val
   - Allows for better codegen for `Sema.elemVal`.
 * Implement calculation of ABI alignment and ABI size for unions.
 * Before appending the following AIR instructions to a block,
   resolveTypeLayout is called on the type:
   - call - return type
   - ret - return type
   - store_ptr - elem type
 * Sema: fix memory leak in `zirArrayInit` and other cleanups to this
   function.
 * x86_64: implement the full x86_64 C ABI according to the spec
 * Type: implement `intInfo` for error sets.
 * Type: implement `intTagType` for tagged unions.

The Zig type tag `Fn` is now used exclusively for function bodies.
Function pointers are modeled as `*const T` where `T` is a `Fn` type.
 * The `call` AIR instruction now allows a function pointer operand as
   well as a function operand.
 * Sema now has a coercion from function body to function pointer.
 * Function type syntax, e.g. `fn()void`, now returns zig tag type of
   Pointer with child Fn, rather than Fn directly.
   - I think this should probably be reverted. Will discuss the lang
     specs before doing this. Idea being that function pointers would
     need to be specified as `*const fn()void` rather than `fn() void`.

LLVM backend:
 * Enable calling the panic handler (previously this just
   emitted `@breakpoint()` since the backend could not handle the panic
   function).
 * Implement sret
 * Introduce `isByRef` and implement it for structs and arrays. Types
   that are `isByRef` are now passed as pointers to functions, and e.g.
   `elem_val` will return a pointer instead of doing a load.
 * Move the function type creating code from `resolveLlvmFunction` to
   `llvmType` where it belongs; now there is only 1 instance of this
   logic instead of two.
 * Add the `nonnull` attribute to non-optional pointer parameters.
 * Fix `resolveGlobalDecl` not using fully-qualified names and not using
   the `decl_map`.
 * Implement `genTypedValue` for pointer-like optionals.
 * Fix memory leak when lowering `block` instruction and OOM occurs.
 * Implement volatile checks where relevant.
This commit is contained in:
Andrew Kelley 2021-10-11 11:00:32 -07:00
parent f42725c39b
commit 6d6cf59847
17 changed files with 1174 additions and 371 deletions

View File

@ -110,6 +110,10 @@ pub const Inst = struct {
/// Allocates stack local memory.
/// Uses the `ty` field.
alloc,
/// If the function will pass the result by-ref, this instruction returns the
/// result pointer. Otherwise it is equivalent to `alloc`.
/// Uses the `ty` field.
ret_ptr,
/// Inline assembly. Uses the `ty_pl` field. Payload is `Asm`.
assembly,
/// Bitwise AND. `&`.
@ -160,6 +164,7 @@ pub const Inst = struct {
/// Function call.
/// Result type is the return type of the function being called.
/// Uses the `pl_op` field with the `Call` payload. operand is the callee.
/// Triggers `resolveTypeLayout` on the return type of the callee.
call,
/// Count leading zeroes of an integer according to its representation in twos complement.
/// Result type will always be an unsigned integer big enough to fit the answer.
@ -257,7 +262,16 @@ pub const Inst = struct {
/// Return a value from a function.
/// Result type is always noreturn; no instructions in a block follow this one.
/// Uses the `un_op` field.
/// Triggers `resolveTypeLayout` on the return type.
ret,
/// This instruction communicates that the function's result value is inside
/// the operand, which is a pointer. If the function will pass the result by-ref,
/// the pointer operand is a `ret_ptr` instruction. Otherwise, this instruction
/// is equivalent to a `load` on the operand, followed by a `ret` on the loaded value.
/// Result type is always noreturn; no instructions in a block follow this one.
/// Uses the `un_op` field.
/// Triggers `resolveTypeLayout` on the return type.
ret_load,
/// Write a value to a pointer. LHS is pointer, RHS is value.
/// Result type is always void.
/// Uses the `bin_op` field.
@ -341,6 +355,10 @@ pub const Inst = struct {
/// Given a slice value, return the pointer.
/// Uses the `ty_op` field.
slice_ptr,
/// Given an array value and element index, return the element value at that index.
/// Result type is the element type of the array operand.
/// Uses the `bin_op` field.
array_elem_val,
/// Given a slice value, and element index, return the element value at that index.
/// Result type is the element type of the slice operand.
/// Uses the `bin_op` field.
@ -644,7 +662,9 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.const_ty => return Type.initTag(.type),
.alloc => return datas[inst].ty,
.alloc,
.ret_ptr,
=> return datas[inst].ty,
.assembly,
.block,
@ -690,6 +710,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.cond_br,
.switch_br,
.ret,
.ret_load,
.unreach,
=> return Type.initTag(.noreturn),
@ -714,10 +735,14 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.call => {
const callee_ty = air.typeOf(datas[inst].pl_op.operand);
return callee_ty.fnReturnType();
switch (callee_ty.zigTypeTag()) {
.Fn => return callee_ty.fnReturnType(),
.Pointer => return callee_ty.childType().fnReturnType(),
else => unreachable,
}
},
.slice_elem_val, .ptr_elem_val => {
.slice_elem_val, .ptr_elem_val, .array_elem_val => {
const ptr_ty = air.typeOf(datas[inst].bin_op.lhs);
return ptr_ty.elemType();
},

View File

@ -250,6 +250,7 @@ fn analyzeInst(
.bool_and,
.bool_or,
.store,
.array_elem_val,
.slice_elem_val,
.ptr_slice_elem_val,
.ptr_elem_val,
@ -270,6 +271,7 @@ fn analyzeInst(
.arg,
.alloc,
.ret_ptr,
.constant,
.const_ty,
.breakpoint,
@ -322,6 +324,7 @@ fn analyzeInst(
.ptrtoint,
.bool_to_int,
.ret,
.ret_load,
=> {
const operand = inst_datas[inst].un_op;
return trackOperands(a, new_set, inst, main_tomb, .{ operand, .none, .none });

View File

@ -785,7 +785,7 @@ pub const Struct = struct {
/// The Decl that corresponds to the struct itself.
owner_decl: *Decl,
/// Set of field names in declaration order.
fields: std.StringArrayHashMapUnmanaged(Field),
fields: Fields,
/// Represents the declarations inside this struct.
namespace: Namespace,
/// Offset from `owner_decl`, points to the struct AST node.
@ -805,6 +805,8 @@ pub const Struct = struct {
/// is necessary to determine whether it has bits at runtime.
known_has_bits: bool,
pub const Fields = std.StringArrayHashMapUnmanaged(Field);
/// The `Type` and `Value` memory is owned by the arena of the Struct's owner_decl.
pub const Field = struct {
/// Uses `noreturn` to indicate `anytype`.
@ -935,7 +937,7 @@ pub const Union = struct {
/// This will be set to the null type until status is `have_field_types`.
tag_ty: Type,
/// Set of field names in declaration order.
fields: std.StringArrayHashMapUnmanaged(Field),
fields: Fields,
/// Represents the declarations inside this union.
namespace: Namespace,
/// Offset from `owner_decl`, points to the union decl AST node.
@ -958,6 +960,8 @@ pub const Union = struct {
abi_align: Value,
};
pub const Fields = std.StringArrayHashMapUnmanaged(Field);
pub fn getFullyQualifiedName(s: *Union, gpa: *Allocator) ![]u8 {
return s.owner_decl.getFullyQualifiedName(gpa);
}
@ -992,14 +996,18 @@ pub const Union = struct {
pub fn mostAlignedField(u: Union, target: Target) u32 {
assert(u.haveFieldTypes());
var most_alignment: u64 = 0;
var most_alignment: u32 = 0;
var most_index: usize = undefined;
for (u.fields.values()) |field, i| {
if (!field.ty.hasCodeGenBits()) continue;
const field_align = if (field.abi_align.tag() == .abi_align_default)
field.ty.abiAlignment(target)
else
field.abi_align.toUnsignedInt();
const field_align = a: {
if (field.abi_align.tag() == .abi_align_default) {
break :a field.ty.abiAlignment(target);
} else {
break :a @intCast(u32, field.abi_align.toUnsignedInt());
}
};
if (field_align > most_alignment) {
most_alignment = field_align;
most_index = i;
@ -1007,6 +1015,69 @@ pub const Union = struct {
}
return @intCast(u32, most_index);
}
pub fn abiAlignment(u: Union, target: Target, have_tag: bool) u32 {
var max_align: u32 = 0;
if (have_tag) max_align = u.tag_ty.abiAlignment(target);
for (u.fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
const field_align = a: {
if (field.abi_align.tag() == .abi_align_default) {
break :a field.ty.abiAlignment(target);
} else {
break :a @intCast(u32, field.abi_align.toUnsignedInt());
}
};
max_align = @maximum(max_align, field_align);
}
assert(max_align != 0);
return max_align;
}
pub fn abiSize(u: Union, target: Target, have_tag: bool) u64 {
assert(u.haveFieldTypes());
const is_packed = u.layout == .Packed;
if (is_packed) @panic("TODO packed unions");
var payload_size: u64 = 0;
var payload_align: u32 = 0;
for (u.fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
const field_align = a: {
if (field.abi_align.tag() == .abi_align_default) {
break :a field.ty.abiAlignment(target);
} else {
break :a @intCast(u32, field.abi_align.toUnsignedInt());
}
};
payload_size = @maximum(payload_size, field.ty.abiSize(target));
payload_align = @maximum(payload_align, field_align);
}
if (!have_tag) {
return std.mem.alignForwardGeneric(u64, payload_size, payload_align);
}
// Put the tag before or after the payload depending on which one's
// alignment is greater.
const tag_size = u.tag_ty.abiSize(target);
const tag_align = u.tag_ty.abiAlignment(target);
var size: u64 = 0;
if (tag_align >= payload_align) {
// {Tag, Payload}
size += tag_size;
size = std.mem.alignForwardGeneric(u64, size, payload_align);
size += payload_size;
size = std.mem.alignForwardGeneric(u64, size, tag_align);
} else {
// {Payload, Tag}
size += payload_size;
size = std.mem.alignForwardGeneric(u64, size, tag_align);
size += tag_size;
size = std.mem.alignForwardGeneric(u64, size, payload_align);
}
return size;
}
};
/// Some Fn struct memory is owned by the Decl's TypedValue.Managed arena allocator.

View File

@ -1814,7 +1814,7 @@ fn zirRetPtr(
.pointee_type = sema.fn_ret_ty,
.@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
});
return block.addTy(.alloc, ptr_type);
return block.addTy(.ret_ptr, ptr_type);
}
fn zirRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@ -3331,9 +3331,20 @@ fn analyzeCall(
) CompileError!Air.Inst.Ref {
const mod = sema.mod;
const func_ty = sema.typeOf(func);
if (func_ty.zigTypeTag() != .Fn)
return sema.fail(block, func_src, "type '{}' not a function", .{func_ty});
const callee_ty = sema.typeOf(func);
const func_ty = func_ty: {
switch (callee_ty.zigTypeTag()) {
.Fn => break :func_ty callee_ty,
.Pointer => {
const ptr_info = callee_ty.ptrInfo().data;
if (ptr_info.size == .One and ptr_info.pointee_type.zigTypeTag() == .Fn) {
break :func_ty ptr_info.pointee_type;
}
},
else => {},
}
return sema.fail(block, func_src, "type '{}' not a function", .{callee_ty});
};
const func_ty_info = func_ty.fnInfo();
const cc = func_ty_info.cc;
@ -3393,6 +3404,7 @@ fn analyzeCall(
const result: Air.Inst.Ref = if (is_inline_call) res: {
const func_val = try sema.resolveConstValue(block, func_src, func);
const module_fn = switch (func_val.tag()) {
.decl_ref => func_val.castTag(.decl_ref).?.data.val.castTag(.function).?.data,
.function => func_val.castTag(.function).?.data,
.extern_fn => return sema.fail(block, call_src, "{s} call of extern function", .{
@as([]const u8, if (is_comptime_call) "comptime" else "inline"),
@ -3610,7 +3622,11 @@ fn analyzeCall(
break :res res2;
} else if (func_ty_info.is_generic) res: {
const func_val = try sema.resolveConstValue(block, func_src, func);
const module_fn = func_val.castTag(.function).?.data;
const module_fn = switch (func_val.tag()) {
.function => func_val.castTag(.function).?.data,
.decl_ref => func_val.castTag(.decl_ref).?.data.val.castTag(.function).?.data,
else => unreachable,
};
// Check the Module's generic function map with an adapted context, so that we
// can match against `uncasted_args` rather than doing the work below to create a
// generic Scope only to junk it if it matches an existing instantiation.
@ -3880,6 +3896,8 @@ fn analyzeCall(
}
try sema.requireRuntimeBlock(block, call_src);
try sema.resolveTypeLayout(block, call_src, func_ty_info.return_type);
try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len +
args.len);
const func_inst = try block.addInst(.{
@ -3954,6 +3972,8 @@ fn finishGenericCall(
}
total_i += 1;
}
try sema.resolveTypeLayout(block, call_src, new_fn_ty.fnReturnType());
}
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len +
runtime_args_len);
@ -4787,7 +4807,12 @@ fn funcCommon(
}
if (body_inst == 0) {
return sema.addType(fn_ty);
const fn_ptr_ty = try Type.ptr(sema.arena, .{
.pointee_type = fn_ty,
.@"addrspace" = .generic,
.mutable = false,
});
return sema.addType(fn_ptr_ty);
}
const is_inline = fn_ty.fnCallingConvention() == .Inline;
@ -8366,13 +8391,15 @@ fn zirRetLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Zir
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
// TODO: when implementing functions that accept a result location pointer,
// this logic will be updated to only do a load in case that the function's return
// type in fact does not need a result location pointer. Until then we assume
// the `ret_ptr` is the same as an `alloc` and do a load here.
const ret_ptr = sema.resolveInst(inst_data.operand);
const operand = try sema.analyzeLoad(block, src, ret_ptr, src);
return sema.analyzeRet(block, operand, src, false);
if (block.is_comptime or block.inlining != null) {
const operand = try sema.analyzeLoad(block, src, ret_ptr, src);
return sema.analyzeRet(block, operand, src, false);
}
try sema.requireRuntimeBlock(block, src);
_ = try block.addUnOp(.ret_load, ret_ptr);
return always_noreturn;
}
fn analyzeRet(
@ -8398,6 +8425,7 @@ fn analyzeRet(
return always_noreturn;
}
try sema.resolveTypeLayout(block, src, sema.fn_ret_ty);
_ = try block.addUnOp(.ret, operand);
return always_noreturn;
}
@ -8653,56 +8681,76 @@ fn zirStructInitAnon(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: b
return sema.fail(block, src, "TODO: Sema.zirStructInitAnon", .{});
}
fn zirArrayInit(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref {
fn zirArrayInit(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
is_ref: bool,
) CompileError!Air.Inst.Ref {
const gpa = sema.gpa;
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
const args = sema.code.refSlice(extra.end, extra.data.operands_len);
assert(args.len != 0);
const resolved_args = try gpa.alloc(Air.Inst.Ref, args.len);
defer gpa.free(resolved_args);
var resolved_args = try sema.mod.gpa.alloc(Air.Inst.Ref, args.len);
for (args) |arg, i| resolved_args[i] = sema.resolveInst(arg);
var all_args_comptime = for (resolved_args) |arg| {
if ((try sema.resolveMaybeUndefVal(block, src, arg)) == null) break false;
} else true;
const elem_ty = sema.typeOf(resolved_args[0]);
if (all_args_comptime) {
const array_ty = try Type.Tag.array.create(sema.arena, .{
.len = resolved_args.len,
.elem_type = elem_ty,
});
const opt_runtime_src: ?LazySrcLoc = for (resolved_args) |arg| {
const arg_src = src; // TODO better source location
const comptime_known = try sema.isComptimeKnown(block, arg_src, arg);
if (!comptime_known) break arg_src;
} else null;
const runtime_src = opt_runtime_src orelse {
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
assert(!(resolved_args.len == 0));
const final_ty = try Type.Tag.array.create(anon_decl.arena(), .{
.len = resolved_args.len,
.elem_type = try sema.typeOf(resolved_args[0]).copy(anon_decl.arena()),
});
const buf = try anon_decl.arena().alloc(Value, resolved_args.len);
const elem_vals = try anon_decl.arena().alloc(Value, resolved_args.len);
for (resolved_args) |arg, i| {
buf[i] = try (try sema.resolveMaybeUndefVal(block, src, arg)).?.copy(anon_decl.arena());
// We checked that all args are comptime above.
const arg_val = (sema.resolveMaybeUndefVal(block, src, arg) catch unreachable).?;
elem_vals[i] = try arg_val.copy(anon_decl.arena());
}
const val = try Value.Tag.array.create(anon_decl.arena(), buf);
if (is_ref)
return sema.analyzeDeclRef(try anon_decl.finish(final_ty, val))
else
return sema.analyzeDeclVal(block, .unneeded, try anon_decl.finish(final_ty, val));
}
const val = try Value.Tag.array.create(anon_decl.arena(), elem_vals);
const decl = try anon_decl.finish(try array_ty.copy(anon_decl.arena()), val);
if (is_ref) {
return sema.analyzeDeclRef(decl);
} else {
return sema.analyzeDeclVal(block, .unneeded, decl);
}
};
assert(!(resolved_args.len == 0));
const array_ty = try Type.Tag.array.create(sema.arena, .{ .len = resolved_args.len, .elem_type = sema.typeOf(resolved_args[0]) });
const final_ty = try Type.ptr(sema.arena, .{
try sema.requireRuntimeBlock(block, runtime_src);
const alloc_ty = try Type.ptr(sema.arena, .{
.pointee_type = array_ty,
.@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
});
const alloc = try block.addTy(.alloc, final_ty);
const alloc = try block.addTy(.alloc, alloc_ty);
for (resolved_args) |arg, i| {
const pointer_to_array_at_index = try block.addBinOp(.ptr_elem_ptr, alloc, try sema.addIntUnsigned(Type.initTag(.u64), i));
_ = try block.addBinOp(.store, pointer_to_array_at_index, arg);
const index = try sema.addIntUnsigned(Type.initTag(.u64), i);
const elem_ptr = try block.addBinOp(.ptr_elem_ptr, alloc, index);
_ = try block.addBinOp(.store, elem_ptr, arg);
}
if (is_ref) {
return alloc;
} else {
return sema.analyzeLoad(block, .unneeded, alloc, .unneeded);
}
return if (is_ref)
alloc
else
try sema.analyzeLoad(block, .unneeded, alloc, .unneeded);
}
fn zirArrayInitAnon(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref {
@ -10111,7 +10159,8 @@ fn panicWithMsg(
const arena = sema.arena;
const this_feature_is_implemented_in_the_backend =
mod.comp.bin_file.options.object_format == .c;
mod.comp.bin_file.options.object_format == .c or
mod.comp.bin_file.options.use_llvm;
if (!this_feature_is_implemented_in_the_backend) {
// TODO implement this feature in all the backends and then delete this branch
_ = try block.addNoOp(.breakpoint);
@ -10579,8 +10628,9 @@ fn fieldCallBind(
const struct_ty = try sema.resolveTypeFields(block, src, concrete_ty);
const struct_obj = struct_ty.castTag(.@"struct").?.data;
const field_index = struct_obj.fields.getIndex(field_name) orelse
const field_index_usize = struct_obj.fields.getIndex(field_name) orelse
break :find_field;
const field_index = @intCast(u32, field_index_usize);
const field = struct_obj.fields.values()[field_index];
const ptr_field_ty = try Type.ptr(arena, .{
@ -10601,33 +10651,7 @@ fn fieldCallBind(
}
try sema.requireRuntimeBlock(block, src);
const ptr_inst = ptr_inst: {
const tag: Air.Inst.Tag = switch (field_index) {
0 => .struct_field_ptr_index_0,
1 => .struct_field_ptr_index_1,
2 => .struct_field_ptr_index_2,
3 => .struct_field_ptr_index_3,
else => {
break :ptr_inst try block.addInst(.{
.tag = .struct_field_ptr,
.data = .{ .ty_pl = .{
.ty = try sema.addType(ptr_field_ty),
.payload = try sema.addExtra(Air.StructField{
.struct_operand = object_ptr,
.field_index = @intCast(u32, field_index),
}),
} },
});
},
};
break :ptr_inst try block.addInst(.{
.tag = tag,
.data = .{ .ty_op = .{
.ty = try sema.addType(ptr_field_ty),
.operand = object_ptr,
} },
});
};
const ptr_inst = try block.addStructFieldPtr(object_ptr, field_index, ptr_field_ty);
return sema.analyzeLoad(block, src, ptr_inst, src);
},
.Union => return sema.fail(block, src, "TODO implement field calls on unions", .{}),
@ -10982,10 +11006,24 @@ fn elemVal(
}
},
},
.Array => {
if (try sema.resolveMaybeUndefVal(block, src, array_maybe_ptr)) |array_val| {
const elem_ty = maybe_ptr_ty.childType();
const opt_index_val = try sema.resolveDefinedValue(block, elem_index_src, elem_index);
if (array_val.isUndef()) return sema.addConstUndef(elem_ty);
if (opt_index_val) |index_val| {
const index = @intCast(usize, index_val.toUnsignedInt());
const elem_val = try array_val.elemValue(sema.arena, index);
return sema.addConstant(elem_ty, elem_val);
}
}
try sema.requireRuntimeBlock(block, src);
return block.addBinOp(.array_elem_val, array_maybe_ptr, elem_index);
},
else => return sema.fail(
block,
array_ptr_src,
"expected pointer, found '{}'",
"expected pointer or array; found '{}'",
.{maybe_ptr_ty},
),
}
@ -11085,6 +11123,14 @@ fn coerce(
return sema.wrapOptional(block, dest_type, intermediate, inst_src);
},
.Pointer => {
// Function body to function pointer.
if (inst_ty.zigTypeTag() == .Fn) {
const fn_val = try sema.resolveConstValue(block, inst_src, inst);
const fn_decl = fn_val.castTag(.function).?.data.owner_decl;
const inst_as_ptr = try sema.analyzeDeclRef(fn_decl);
return sema.coerce(block, dest_type, inst_as_ptr, inst_src);
}
// Coercions where the source is a single pointer to an array.
src_array_ptr: {
if (!inst_ty.isSinglePointer()) break :src_array_ptr;
@ -11411,7 +11457,7 @@ fn storePtr2(
if (ptr_ty.isConstPtr())
return sema.fail(block, src, "cannot assign to constant", .{});
const elem_ty = ptr_ty.elemType();
const elem_ty = ptr_ty.childType();
const operand = try sema.coerce(block, elem_ty, uncasted_operand, operand_src);
if ((try sema.typeHasOnePossibleValue(block, src, elem_ty)) != null)
return;
@ -11429,6 +11475,7 @@ fn storePtr2(
// TODO handle if the element type requires comptime
try sema.requireRuntimeBlock(block, runtime_src);
try sema.resolveTypeLayout(block, src, elem_ty);
_ = try block.addBinOp(air_tag, ptr, operand);
}

337
src/arch/x86_64/abi.zig Normal file
View File

@ -0,0 +1,337 @@
const std = @import("std");
const Type = @import("../../type.zig").Type;
const Target = std.Target;
const assert = std.debug.assert;
pub const Class = enum { integer, sse, sseup, x87, x87up, complex_x87, memory, none };
pub fn classifyWindows(ty: Type, target: Target) Class {
// https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017
// "There's a strict one-to-one correspondence between a function call's arguments
// and the registers used for those arguments. Any argument that doesn't fit in 8
// bytes, or isn't 1, 2, 4, or 8 bytes, must be passed by reference. A single argument
// is never spread across multiple registers."
// "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
// as if they were integers of the same size."
switch (ty.abiSize(target)) {
1, 2, 4, 8 => {},
else => return .memory,
}
return switch (ty.zigTypeTag()) {
.Int, .Bool, .Enum, .Void, .NoReturn, .ErrorSet, .Struct, .Union => .integer,
.Optional => if (ty.isPtrLikeOptional()) return .integer else return .memory,
.Float, .Vector => .sse,
else => unreachable,
};
}
/// There are a maximum of 8 possible return slots. Returned values are in
/// the beginning of the array; unused slots are filled with .none.
pub fn classifySystemV(ty: Type, target: Target) [8]Class {
const memory_class = [_]Class{
.memory, .none, .none, .none,
.none, .none, .none, .none,
};
var result = [1]Class{.none} ** 8;
switch (ty.zigTypeTag()) {
.Int, .Enum, .ErrorSet => {
const bits = ty.intInfo(target).bits;
if (bits <= 64) {
result[0] = .integer;
return result;
}
if (bits <= 128) {
result[0] = .integer;
result[1] = .integer;
return result;
}
if (bits <= 192) {
result[0] = .integer;
result[1] = .integer;
result[2] = .integer;
return result;
}
if (bits <= 256) {
result[0] = .integer;
result[1] = .integer;
result[2] = .integer;
result[3] = .integer;
return result;
}
return memory_class;
},
.Bool, .Void, .NoReturn => {
result[0] = .integer;
return result;
},
.Float => switch (ty.floatBits(target)) {
16, 32, 64 => {
result[0] = .sse;
return result;
},
128 => {
// "Arguments of types__float128,_Decimal128and__m128are
// split into two halves. The least significant ones belong
// to class SSE, the mostsignificant one to class SSEUP."
result[0] = .sse;
result[1] = .sseup;
return result;
},
else => {
// "The 64-bit mantissa of arguments of typelong double
// belongs to classX87, the 16-bit exponent plus 6 bytes
// of padding belongs to class X87UP."
result[0] = .x87;
result[1] = .x87up;
return result;
},
},
.Vector => {
const elem_ty = ty.childType();
const bits = elem_ty.bitSize(target) * ty.arrayLen();
if (bits <= 64) return .{
.sse, .none, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 128) return .{
.sse, .sseup, .none, .none,
.none, .none, .none, .none,
};
if (bits <= 192) return .{
.sse, .sseup, .sseup, .none,
.none, .none, .none, .none,
};
if (bits <= 256) return .{
.sse, .sseup, .sseup, .sseup,
.none, .none, .none, .none,
};
if (bits <= 320) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .none, .none, .none,
};
if (bits <= 384) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .sseup, .none, .none,
};
if (bits <= 448) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .sseup, .sseup, .none,
};
if (bits <= 512) return .{
.sse, .sseup, .sseup, .sseup,
.sseup, .sseup, .sseup, .sseup,
};
return memory_class;
},
.Optional => {
if (ty.isPtrLikeOptional()) {
result[0] = .integer;
return result;
}
return memory_class;
},
.Struct => {
// "If the size of an object is larger than eight eightbytes, or
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.".
const ty_size = ty.abiSize(target);
if (ty_size > 64)
return memory_class;
var result_i: usize = 0; // out of 8
var byte_i: usize = 0; // out of 8
const fields = ty.structFields();
for (fields.values()) |field| {
if (field.abi_align.tag() != .abi_align_default) {
const field_alignment = field.abi_align.toUnsignedInt();
if (field_alignment < field.ty.abiAlignment(target)) {
return memory_class;
}
}
const field_size = field.ty.abiSize(target);
const field_class_array = classifySystemV(field.ty, target);
const field_class = std.mem.sliceTo(&field_class_array, .none);
if (byte_i + field_size <= 8) {
// Combine this field with the previous one.
combine: {
// "If both classes are equal, this is the resulting class."
if (result[result_i] == field_class[0]) {
break :combine;
}
// "If one of the classes is NO_CLASS, the resulting class
// is the other class."
if (result[result_i] == .none) {
result[result_i] = field_class[0];
break :combine;
}
assert(field_class[0] != .none);
// "If one of the classes is MEMORY, the result is the MEMORY class."
if (result[result_i] == .memory or field_class[0] == .memory) {
result[result_i] = .memory;
break :combine;
}
// "If one of the classes is INTEGER, the result is the INTEGER."
if (result[result_i] == .integer or field_class[0] == .integer) {
result[result_i] = .integer;
break :combine;
}
// "If one of the classes is X87, X87UP, COMPLEX_X87 class,
// MEMORY is used as class."
if (result[result_i] == .x87 or
result[result_i] == .x87up or
result[result_i] == .complex_x87 or
field_class[0] == .x87 or
field_class[0] == .x87up or
field_class[0] == .complex_x87)
{
result[result_i] = .memory;
break :combine;
}
// "Otherwise class SSE is used."
result[result_i] = .sse;
}
byte_i += field_size;
if (byte_i == 8) {
byte_i = 0;
result_i += 1;
}
} else {
// Cannot combine this field with the previous one.
if (byte_i != 0) {
byte_i = 0;
result_i += 1;
}
std.mem.copy(Class, result[result_i..], field_class);
result_i += field_class.len;
// If there are any bytes leftover, we have to try to combine
// the next field with them.
byte_i = field_size % 8;
if (byte_i != 0) result_i -= 1;
}
}
// Post-merger cleanup
// "If one of the classes is MEMORY, the whole argument is passed in memory"
// "If X87UP is not preceded by X87, the whole argument is passed in memory."
var found_sseup = false;
for (result) |item, i| switch (item) {
.memory => return memory_class,
.x87up => if (i == 0 or result[i - 1] != .x87) return memory_class,
.sseup => found_sseup = true,
else => continue,
};
// "If the size of the aggregate exceeds two eightbytes and the first eight-
// byte isnt SSE or any other eightbyte isnt SSEUP, the whole argument
// is passed in memory."
if (ty_size > 16 and (result[0] != .sse or !found_sseup)) return memory_class;
// "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
for (result) |*item, i| {
if (item.* == .sseup) switch (result[i - 1]) {
.sse, .sseup => continue,
else => item.* = .sse,
};
}
return result;
},
.Union => {
// "If the size of an object is larger than eight eightbytes, or
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.".
const ty_size = ty.abiSize(target);
if (ty_size > 64)
return memory_class;
const fields = ty.unionFields();
for (fields.values()) |field| {
if (field.abi_align.tag() != .abi_align_default) {
const field_alignment = field.abi_align.toUnsignedInt();
if (field_alignment < field.ty.abiAlignment(target)) {
return memory_class;
}
}
// Combine this field with the previous one.
const field_class = classifySystemV(field.ty, target);
for (result) |*result_item, i| {
const field_item = field_class[i];
// "If both classes are equal, this is the resulting class."
if (result_item.* == field_item) {
continue;
}
// "If one of the classes is NO_CLASS, the resulting class
// is the other class."
if (result_item.* == .none) {
result_item.* = field_item;
continue;
}
if (field_item == .none) {
continue;
}
// "If one of the classes is MEMORY, the result is the MEMORY class."
if (result_item.* == .memory or field_item == .memory) {
result_item.* = .memory;
continue;
}
// "If one of the classes is INTEGER, the result is the INTEGER."
if (result_item.* == .integer or field_item == .integer) {
result_item.* = .integer;
continue;
}
// "If one of the classes is X87, X87UP, COMPLEX_X87 class,
// MEMORY is used as class."
if (result_item.* == .x87 or
result_item.* == .x87up or
result_item.* == .complex_x87 or
field_item == .x87 or
field_item == .x87up or
field_item == .complex_x87)
{
result_item.* = .memory;
continue;
}
// "Otherwise class SSE is used."
result_item.* = .sse;
}
}
// Post-merger cleanup
// "If one of the classes is MEMORY, the whole argument is passed in memory"
// "If X87UP is not preceded by X87, the whole argument is passed in memory."
var found_sseup = false;
for (result) |item, i| switch (item) {
.memory => return memory_class,
.x87up => if (i == 0 or result[i - 1] != .x87) return memory_class,
.sseup => found_sseup = true,
else => continue,
};
// "If the size of the aggregate exceeds two eightbytes and the first eight-
// byte isnt SSE or any other eightbyte isnt SSEUP, the whole argument
// is passed in memory."
if (ty_size > 16 and (result[0] != .sse or !found_sseup)) return memory_class;
// "If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE."
for (result) |*item, i| {
if (item.* == .sseup) switch (result[i - 1]) {
.sse, .sseup => continue,
else => item.* = .sse,
};
}
return result;
},
else => unreachable,
}
}

View File

@ -855,6 +855,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.shr => try self.airShr(inst),
.alloc => try self.airAlloc(inst),
.ret_ptr => try self.airRetPtr(inst),
.arg => try self.airArg(inst),
.assembly => try self.airAsm(inst),
.bitcast => try self.airBitCast(inst),
@ -883,6 +884,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.not => try self.airNot(inst),
.ptrtoint => try self.airPtrToInt(inst),
.ret => try self.airRet(inst),
.ret_load => try self.airRetLoad(inst),
.store => try self.airStore(inst),
.struct_field_ptr=> try self.airStructFieldPtr(inst),
.struct_field_val=> try self.airStructFieldVal(inst),
@ -914,6 +916,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.slice_ptr => try self.airSlicePtr(inst),
.slice_len => try self.airSliceLen(inst),
.array_elem_val => try self.airArrayElemVal(inst),
.slice_elem_val => try self.airSliceElemVal(inst),
.ptr_slice_elem_val => try self.airPtrSliceElemVal(inst),
.ptr_elem_val => try self.airPtrElemVal(inst),
@ -1185,6 +1188,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none });
}
fn airRetPtr(self: *Self, inst: Air.Inst.Index) !void {
const stack_offset = try self.allocMemPtr(inst);
return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none });
}
fn airFptrunc(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
@ -1557,6 +1565,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airArrayElemVal(self: *Self, inst: Air.Inst.Index) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) {
else => return self.fail("TODO implement array_elem_val for {}", .{self.target.cpu.arch}),
};
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
}
fn airPtrSliceElemVal(self: *Self, inst: Air.Inst.Index) !void {
const is_volatile = false; // TODO
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
@ -3213,6 +3229,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
return self.finishAir(inst, .dead, .{ un_op, .none, .none });
}
fn airRetLoad(self: *Self, inst: Air.Inst.Index) !void {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ptr = try self.resolveInst(un_op);
_ = ptr;
return self.fail("TODO implement airRetLoad for {}", .{self.target.cpu.arch});
//return self.finishAir(inst, .dead, .{ un_op, .none, .none });
}
fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
if (self.liveness.isUnused(inst))

View File

@ -384,12 +384,6 @@ pub const DeclGen = struct {
}
},
.Fn => switch (val.tag()) {
.null_value, .zero => try writer.writeAll("NULL"),
.one => try writer.writeAll("1"),
.decl_ref => {
const decl = val.castTag(.decl_ref).?.data;
return dg.renderDeclValue(writer, ty, val, decl);
},
.function => {
const decl = val.castTag(.function).?.data.owner_decl;
return dg.renderDeclValue(writer, ty, val, decl);
@ -1026,6 +1020,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.is_non_null_ptr => try airIsNull(f, inst, "!=", "[0]"),
.alloc => try airAlloc(f, inst),
.ret_ptr => try airRetPtr(f, inst),
.assembly => try airAsm(f, inst),
.block => try airBlock(f, inst),
.bitcast => try airBitcast(f, inst),
@ -1036,6 +1031,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.bool_to_int => try airBoolToInt(f, inst),
.load => try airLoad(f, inst),
.ret => try airRet(f, inst),
.ret_load => try airRetLoad(f, inst),
.store => try airStore(f, inst),
.loop => try airLoop(f, inst),
.cond_br => try airCondBr(f, inst),
@ -1081,6 +1077,7 @@ fn genBody(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, OutO
.ptr_elem_ptr => try airPtrElemPtr(f, inst),
.slice_elem_val => try airSliceElemVal(f, inst, "["),
.ptr_slice_elem_val => try airSliceElemVal(f, inst, "[0]["),
.array_elem_val => try airArrayElemVal(f, inst),
.unwrap_errunion_payload => try airUnwrapErrUnionPay(f, inst),
.unwrap_errunion_err => try airUnwrapErrUnionErr(f, inst),
@ -1148,6 +1145,22 @@ fn airSliceElemVal(f: *Function, inst: Air.Inst.Index, prefix: []const u8) !CVal
return local;
}
fn airArrayElemVal(f: *Function, inst: Air.Inst.Index) !CValue {
if (f.liveness.isUnused(inst)) return CValue.none;
const bin_op = f.air.instructions.items(.data)[inst].bin_op;
const array = try f.resolveInst(bin_op.lhs);
const index = try f.resolveInst(bin_op.rhs);
const writer = f.object.writer();
const local = try f.allocLocal(f.air.typeOfIndex(inst), .Const);
try writer.writeAll(" = ");
try f.writeCValue(writer, array);
try writer.writeAll("[");
try f.writeCValue(writer, index);
try writer.writeAll("];\n");
return local;
}
fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
const inst_ty = f.air.typeOfIndex(inst);
@ -1161,6 +1174,18 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
return CValue{ .local_ref = local.local };
}
fn airRetPtr(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
const inst_ty = f.air.typeOfIndex(inst);
// First line: the variable used as data storage.
const elem_type = inst_ty.elemType();
const local = try f.allocLocal(elem_type, .Mut);
try writer.writeAll(";\n");
return CValue{ .local_ref = local.local };
}
fn airArg(f: *Function) CValue {
const i = f.next_arg_index;
f.next_arg_index += 1;
@ -1212,6 +1237,21 @@ fn airRet(f: *Function, inst: Air.Inst.Index) !CValue {
return CValue.none;
}
fn airRetLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const un_op = f.air.instructions.items(.data)[inst].un_op;
const writer = f.object.writer();
const ptr_ty = f.air.typeOf(un_op);
const ret_ty = ptr_ty.childType();
if (!ret_ty.hasCodeGenBits()) {
try writer.writeAll("return;\n");
}
const ptr = try f.resolveInst(un_op);
try writer.writeAll("return *");
try f.writeCValue(writer, ptr);
try writer.writeAll(";\n");
return CValue.none;
}
fn airIntCast(f: *Function, inst: Air.Inst.Index) !CValue {
if (f.liveness.isUnused(inst))
return CValue.none;
@ -1559,7 +1599,12 @@ fn airCall(f: *Function, inst: Air.Inst.Index) !CValue {
const pl_op = f.air.instructions.items(.data)[inst].pl_op;
const extra = f.air.extraData(Air.Call, pl_op.payload);
const args = @bitCast([]const Air.Inst.Ref, f.air.extra[extra.end..][0..extra.data.args_len]);
const fn_ty = f.air.typeOf(pl_op.operand);
const callee_ty = f.air.typeOf(pl_op.operand);
const fn_ty = switch (callee_ty.zigTypeTag()) {
.Fn => callee_ty,
.Pointer => callee_ty.childType(),
else => unreachable,
};
const ret_ty = fn_ty.fnReturnType();
const unused_result = f.liveness.isUnused(inst);
const writer = f.object.writer();
@ -1574,16 +1619,21 @@ fn airCall(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(" = ");
}
if (f.air.value(pl_op.operand)) |func_val| {
const fn_decl = if (func_val.castTag(.extern_fn)) |extern_fn|
extern_fn.data
else if (func_val.castTag(.function)) |func_payload|
func_payload.data.owner_decl
else
unreachable;
try f.object.dg.renderDeclName(fn_decl, writer);
} else {
callee: {
known: {
const fn_decl = fn_decl: {
const callee_val = f.air.value(pl_op.operand) orelse break :known;
break :fn_decl switch (callee_val.tag()) {
.extern_fn => callee_val.castTag(.extern_fn).?.data,
.function => callee_val.castTag(.function).?.data.owner_decl,
.decl_ref => callee_val.castTag(.decl_ref).?.data,
else => break :known,
};
};
try f.object.dg.renderDeclName(fn_decl, writer);
break :callee;
}
// Fall back to function pointer call.
const callee = try f.resolveInst(pl_op.operand);
try f.writeCValue(writer, callee);
}

File diff suppressed because it is too large Load Diff

View File

@ -163,6 +163,18 @@ pub const Value = opaque {
pub const deleteFunction = LLVMDeleteFunction;
extern fn LLVMDeleteFunction(Fn: *const Value) void;
pub const addSretAttr = ZigLLVMAddSretAttr;
extern fn ZigLLVMAddSretAttr(fn_ref: *const Value, ArgNo: c_uint, type_val: *const Type) void;
pub const setCallSret = ZigLLVMSetCallSret;
extern fn ZigLLVMSetCallSret(Call: *const Value, return_type: *const Type) void;
pub const getParam = LLVMGetParam;
extern fn LLVMGetParam(Fn: *const Value, Index: c_uint) *const Value;
pub const setInitializer = LLVMSetInitializer;
extern fn LLVMSetInitializer(GlobalVar: *const Value, ConstantVal: *const Value) void;
};
pub const Type = opaque {
@ -292,12 +304,6 @@ pub const VerifierFailureAction = enum(c_int) {
pub const constNeg = LLVMConstNeg;
extern fn LLVMConstNeg(ConstantVal: *const Value) *const Value;
pub const setInitializer = LLVMSetInitializer;
extern fn LLVMSetInitializer(GlobalVar: *const Value, ConstantVal: *const Value) void;
pub const getParam = LLVMGetParam;
extern fn LLVMGetParam(Fn: *const Value, Index: c_uint) *const Value;
pub const getEnumAttributeKindForName = LLVMGetEnumAttributeKindForName;
extern fn LLVMGetEnumAttributeKindForName(Name: [*]const u8, SLen: usize) c_uint;

View File

@ -128,6 +128,7 @@ const Writer = struct {
.bool_and,
.bool_or,
.store,
.array_elem_val,
.slice_elem_val,
.ptr_slice_elem_val,
.ptr_elem_val,
@ -150,6 +151,7 @@ const Writer = struct {
.ptrtoint,
.bool_to_int,
.ret,
.ret_load,
=> try w.writeUnOp(s, inst),
.breakpoint,
@ -158,6 +160,7 @@ const Writer = struct {
.const_ty,
.alloc,
.ret_ptr,
=> try w.writeTy(s, inst),
.not,

View File

@ -1707,32 +1707,10 @@ pub const Type = extern union {
const int_tag_ty = self.intTagType(&buffer);
return int_tag_ty.abiAlignment(target);
},
.union_tagged => {
const union_obj = self.castTag(.union_tagged).?.data;
var biggest: u32 = union_obj.tag_ty.abiAlignment(target);
for (union_obj.fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
const field_align = field.ty.abiAlignment(target);
if (field_align > biggest) {
biggest = field_align;
}
}
assert(biggest != 0);
return biggest;
},
.@"union" => {
const union_obj = self.castTag(.@"union").?.data;
var biggest: u32 = 0;
for (union_obj.fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
const field_align = field.ty.abiAlignment(target);
if (field_align > biggest) {
biggest = field_align;
}
}
assert(biggest != 0);
return biggest;
},
// TODO pass `true` for have_tag when unions have a safety tag
.@"union" => return self.castTag(.@"union").?.data.abiAlignment(target, false),
.union_tagged => return self.castTag(.union_tagged).?.data.abiAlignment(target, true),
.c_void,
.void,
.type,
@ -1790,6 +1768,7 @@ pub const Type = extern union {
const is_packed = s.layout == .Packed;
if (is_packed) @panic("TODO packed structs");
var size: u64 = 0;
var big_align: u32 = 0;
for (s.fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
@ -1797,12 +1776,14 @@ pub const Type = extern union {
if (field.abi_align.tag() == .abi_align_default) {
break :a field.ty.abiAlignment(target);
} else {
break :a field.abi_align.toUnsignedInt();
break :a @intCast(u32, field.abi_align.toUnsignedInt());
}
};
big_align = @maximum(big_align, field_align);
size = std.mem.alignForwardGeneric(u64, size, field_align);
size += field.ty.abiSize(target);
}
size = std.mem.alignForwardGeneric(u64, size, big_align);
return size;
},
.enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
@ -1810,9 +1791,9 @@ pub const Type = extern union {
const int_tag_ty = self.intTagType(&buffer);
return int_tag_ty.abiSize(target);
},
.@"union", .union_tagged => {
@panic("TODO abiSize unions");
},
// TODO pass `true` for have_tag when unions have a safety tag
.@"union" => return self.castTag(.@"union").?.data.abiSize(target, false),
.union_tagged => return self.castTag(.union_tagged).?.data.abiSize(target, true),
.u1,
.u8,
@ -2550,6 +2531,11 @@ pub const Type = extern union {
};
}
pub fn unionFields(ty: Type) Module.Union.Fields {
const union_obj = ty.cast(Payload.Union).?.data;
return union_obj.fields;
}
pub fn unionFieldType(ty: Type, enum_tag: Value) Type {
const union_obj = ty.cast(Payload.Union).?.data;
const index = union_obj.tag_ty.enumTagFieldIndex(enum_tag).?;
@ -2657,7 +2643,7 @@ pub const Type = extern union {
};
}
/// Asserts the type is an integer or enum.
/// Asserts the type is an integer, enum, or error set.
pub fn intInfo(self: Type, target: Target) struct { signedness: std.builtin.Signedness, bits: u16 } {
var ty = self;
while (true) switch (ty.tag()) {
@ -2700,6 +2686,11 @@ pub const Type = extern union {
return .{ .signedness = .unsigned, .bits = smallestUnsignedBits(field_count - 1) };
},
.error_set, .error_set_single, .anyerror, .error_set_inferred => {
// TODO revisit this when error sets support custom int types
return .{ .signedness = .unsigned, .bits = 16 };
},
else => unreachable,
};
}
@ -3151,12 +3142,12 @@ pub const Type = extern union {
/// Asserts the type is an enum or a union.
/// TODO support unions
pub fn intTagType(self: Type, buffer: *Payload.Bits) Type {
switch (self.tag()) {
.enum_full, .enum_nonexhaustive => return self.cast(Payload.EnumFull).?.data.tag_ty,
.enum_numbered => return self.castTag(.enum_numbered).?.data.tag_ty,
pub fn intTagType(ty: Type, buffer: *Payload.Bits) Type {
switch (ty.tag()) {
.enum_full, .enum_nonexhaustive => return ty.cast(Payload.EnumFull).?.data.tag_ty,
.enum_numbered => return ty.castTag(.enum_numbered).?.data.tag_ty,
.enum_simple => {
const enum_simple = self.castTag(.enum_simple).?.data;
const enum_simple = ty.castTag(.enum_simple).?.data;
const bits = std.math.log2_int_ceil(usize, enum_simple.fields.count());
buffer.* = .{
.base = .{ .tag = .int_unsigned },
@ -3164,6 +3155,7 @@ pub const Type = extern union {
};
return Type.initPayload(&buffer.base);
},
.union_tagged => return ty.castTag(.union_tagged).?.data.tag_ty.intTagType(buffer),
else => unreachable,
}
}
@ -3317,6 +3309,16 @@ pub const Type = extern union {
}
}
pub fn structFields(ty: Type) Module.Struct.Fields {
switch (ty.tag()) {
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
return struct_obj.fields;
},
else => unreachable,
}
}
pub fn structFieldCount(ty: Type) usize {
switch (ty.tag()) {
.@"struct" => {
@ -3815,7 +3817,7 @@ pub const Type = extern union {
bit_offset: u16 = 0,
host_size: u16 = 0,
@"allowzero": bool = false,
mutable: bool = true, // TODO change this to const, not mutable
mutable: bool = true, // TODO rename this to const, not mutable
@"volatile": bool = false,
size: std.builtin.TypeInfo.Pointer.Size = .One,
};

View File

@ -15,7 +15,6 @@ test {
_ = @import("behavior/bugs/4769_a.zig");
_ = @import("behavior/bugs/4769_b.zig");
_ = @import("behavior/bugs/6850.zig");
_ = @import("behavior/bugs/9584.zig");
_ = @import("behavior/call.zig");
_ = @import("behavior/cast.zig");
_ = @import("behavior/defer.zig");
@ -104,6 +103,7 @@ test {
_ = @import("behavior/bugs/7047.zig");
_ = @import("behavior/bugs/7003.zig");
_ = @import("behavior/bugs/7250.zig");
_ = @import("behavior/bugs/9584.zig");
_ = @import("behavior/byteswap.zig");
_ = @import("behavior/byval_arg_var.zig");
_ = @import("behavior/call_stage1.zig");

View File

@ -50,3 +50,29 @@ test "array literal with inferred length" {
try expect(hex_mult.len == 4);
try expect(hex_mult[1] == 256);
}
test "array dot len const expr" {
try expect(comptime x: {
break :x some_array.len == 4;
});
}
const ArrayDotLenConstExpr = struct {
y: [some_array.len]u8,
};
const some_array = [_]u8{ 0, 1, 2, 3 };
test "array literal with specified size" {
var array = [2]u8{ 1, 2 };
try expect(array[0] == 1);
try expect(array[1] == 2);
}
test "array len field" {
var arr = [4]u8{ 0, 0, 0, 0 };
var ptr = &arr;
try expect(arr.len == 4);
comptime try expect(arr.len == 4);
try expect(ptr.len == 4);
comptime try expect(ptr.len == 4);
}

View File

@ -39,17 +39,6 @@ test "void arrays" {
try expect(array.len == 4);
}
test "array dot len const expr" {
try expect(comptime x: {
break :x some_array.len == 4;
});
}
const ArrayDotLenConstExpr = struct {
y: [some_array.len]u8,
};
const some_array = [_]u8{ 0, 1, 2, 3 };
test "nested arrays" {
const array_of_strings = [_][]const u8{ "hello", "this", "is", "my", "thing" };
for (array_of_strings) |s, i| {
@ -76,24 +65,6 @@ test "set global var array via slice embedded in struct" {
try expect(s_array[2].b == 3);
}
test "array literal with specified size" {
var array = [2]u8{
1,
2,
};
try expect(array[0] == 1);
try expect(array[1] == 2);
}
test "array len field" {
var arr = [4]u8{ 0, 0, 0, 0 };
var ptr = &arr;
try expect(arr.len == 4);
comptime try expect(arr.len == 4);
try expect(ptr.len == 4);
comptime try expect(ptr.len == 4);
}
test "single-item pointer to array indexing and slicing" {
try testSingleItemPtrArrayIndexSlice();
comptime try testSingleItemPtrArrayIndexSlice();

View File

@ -57,4 +57,5 @@ test "bug 9584" {
.x = flags,
};
try b(&x);
comptime if (@sizeOf(A) != 1) unreachable;
}

View File

@ -144,3 +144,11 @@ fn makeBar2(x: i32, y: i32) Bar {
.y = y,
};
}
test "return empty struct from fn" {
_ = testReturnEmptyStructFromFn();
}
const EmptyStruct2 = struct {};
fn testReturnEmptyStructFromFn() EmptyStruct2 {
return EmptyStruct2{};
}

View File

@ -72,9 +72,6 @@ const EmptyStruct = struct {
}
};
test "return empty struct from fn" {
_ = testReturnEmptyStructFromFn();
}
const EmptyStruct2 = struct {};
fn testReturnEmptyStructFromFn() EmptyStruct2 {
return EmptyStruct2{};