Merge pull request #10649 from ziglang/stage2-tuples

stage2: implement tuples
This commit is contained in:
Andrew Kelley 2022-01-20 18:24:01 -05:00 committed by GitHub
commit c9ae24503d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 609 additions and 112 deletions

View File

@ -510,9 +510,11 @@ pub const Inst = struct {
/// Uses the `un_op` field.
error_name,
/// Constructs a vector value out of runtime-known elements.
/// Constructs a vector, tuple, or array value out of runtime-known elements.
/// Some of the elements may be comptime-known.
/// Uses the `ty_pl` field, payload is index of an array of elements, each of which
/// is a `Ref`. Length of the array is given by the vector type.
/// TODO rename this to `array_init` and make it support array values too.
vector_init,
/// Communicates an intent to load memory.

View File

@ -2581,9 +2581,12 @@ fn varDecl(
// Depending on the type of AST the initialization expression is, we may need an lvalue
// or an rvalue as a result location. If it is an rvalue, we can use the instruction as
// the variable, no memory location needed.
if (align_inst == .none and !nodeMayNeedMemoryLocation(tree, var_decl.ast.init_node)) {
const result_loc: ResultLoc = if (var_decl.ast.type_node != 0) .{
.ty = try typeExpr(gz, scope, var_decl.ast.type_node),
const type_node = var_decl.ast.type_node;
if (align_inst == .none and
!nodeMayNeedMemoryLocation(tree, var_decl.ast.init_node, type_node != 0))
{
const result_loc: ResultLoc = if (type_node != 0) .{
.ty = try typeExpr(gz, scope, type_node),
} else .none;
const init_inst = try reachableExpr(gz, scope, result_loc, var_decl.ast.init_node, node);
@ -6008,7 +6011,7 @@ fn ret(gz: *GenZir, scope: *Scope, node: Ast.Node.Index) InnerError!Zir.Inst.Ref
return Zir.Inst.Ref.unreachable_value;
}
const rl: ResultLoc = if (nodeMayNeedMemoryLocation(tree, operand_node)) .{
const rl: ResultLoc = if (nodeMayNeedMemoryLocation(tree, operand_node, true)) .{
.ptr = try gz.addNodeExtended(.ret_ptr, node),
} else .{
.ty = try gz.addNodeExtended(.ret_type, node),
@ -7725,7 +7728,7 @@ const primitives = std.ComptimeStringMap(Zir.Inst.Ref, .{
.{ "void", .void_type },
});
fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index) bool {
fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index, have_res_ty: bool) bool {
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
const main_tokens = tree.nodes.items(.main_token);
@ -7875,24 +7878,27 @@ fn nodeMayNeedMemoryLocation(tree: *const Ast, start_node: Ast.Node.Index) bool
.@"orelse",
=> node = node_datas[node].rhs,
// True because these are exactly the expressions we need memory locations for.
// Array and struct init exprs write to result locs, but anon literals do not.
.array_init_one,
.array_init_one_comma,
.struct_init_one,
.struct_init_one_comma,
.array_init,
.array_init_comma,
.struct_init,
.struct_init_comma,
=> return have_res_ty or node_datas[node].lhs != 0,
// Anon literals do not need result location.
.array_init_dot_two,
.array_init_dot_two_comma,
.array_init_dot,
.array_init_dot_comma,
.array_init,
.array_init_comma,
.struct_init_one,
.struct_init_one_comma,
.struct_init_dot_two,
.struct_init_dot_two_comma,
.struct_init_dot,
.struct_init_dot_comma,
.struct_init,
.struct_init_comma,
=> return true,
=> return have_res_ty,
// True because depending on comptime conditions, sub-expressions
// may be the kind that need memory locations.

View File

@ -373,7 +373,7 @@ fn analyzeInst(
.vector_init => {
const ty_pl = inst_datas[inst].ty_pl;
const vector_ty = a.air.getRefType(ty_pl.ty);
const len = vector_ty.vectorLen();
const len = @intCast(usize, vector_ty.arrayLen());
const elements = @bitCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]);
if (elements.len <= bpi - 1) {

View File

@ -821,6 +821,8 @@ pub const ErrorSet = struct {
}
};
pub const RequiresComptime = enum { no, yes, unknown, wip };
/// Represents the data that a struct declaration provides.
pub const Struct = struct {
/// The Decl that corresponds to the struct itself.
@ -849,6 +851,7 @@ pub const Struct = struct {
/// If true, definitely nonzero size at runtime. If false, resolving the fields
/// is necessary to determine whether it has bits at runtime.
known_has_bits: bool,
requires_comptime: RequiresComptime = .unknown,
pub const Fields = std.StringArrayHashMapUnmanaged(Field);
@ -1038,6 +1041,7 @@ pub const Union = struct {
// which `have_layout` does not ensure.
fully_resolved,
},
requires_comptime: RequiresComptime = .unknown,
pub const Field = struct {
/// undefined until `status` is `have_field_types` or `have_layout`.

View File

@ -2628,6 +2628,7 @@ fn validateUnionInit(
// Otherwise, the bitcast should be preserved and a store instruction should be
// emitted to store the constant union value through the bitcast.
},
.alloc => {},
else => |t| {
if (std.debug.runtime_safety) {
std.debug.panic("unexpected AIR tag for union pointer: {s}", .{@tagName(t)});
@ -10694,12 +10695,77 @@ fn zirArrayInit(
}
}
fn zirArrayInitAnon(sema: *Sema, block: *Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref {
fn zirArrayInitAnon(
sema: *Sema,
block: *Block,
inst: Zir.Inst.Index,
is_ref: bool,
) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.MultiOp, inst_data.payload_index);
const operands = sema.code.refSlice(extra.end, extra.data.operands_len);
_ = is_ref;
return sema.fail(block, src, "TODO: Sema.zirArrayInitAnon", .{});
const types = try sema.arena.alloc(Type, operands.len);
const values = try sema.arena.alloc(Value, operands.len);
const opt_runtime_src = rs: {
var runtime_src: ?LazySrcLoc = null;
for (operands) |operand, i| {
const elem = sema.resolveInst(operand);
types[i] = sema.typeOf(elem);
const operand_src = src; // TODO better source location
if (try sema.resolveMaybeUndefVal(block, operand_src, elem)) |val| {
values[i] = val;
} else {
values[i] = Value.initTag(.unreachable_value);
runtime_src = operand_src;
}
}
break :rs runtime_src;
};
const tuple_ty = try Type.Tag.tuple.create(sema.arena, .{
.types = types,
.values = values,
});
const runtime_src = opt_runtime_src orelse {
const tuple_val = try Value.Tag.@"struct".create(sema.arena, values);
if (!is_ref) return sema.addConstant(tuple_ty, tuple_val);
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
const decl = try anon_decl.finish(
try tuple_ty.copy(anon_decl.arena()),
try tuple_val.copy(anon_decl.arena()),
);
return sema.analyzeDeclRef(decl);
};
if (is_ref) {
const alloc = try block.addTy(.alloc, tuple_ty);
for (operands) |operand, i_usize| {
const i = @intCast(u32, i_usize);
const field_ptr_ty = try Type.ptr(sema.arena, .{
.mutable = true,
.@"addrspace" = target_util.defaultAddressSpace(sema.mod.getTarget(), .local),
.pointee_type = types[i],
});
const field_ptr = try block.addStructFieldPtr(alloc, i, field_ptr_ty);
_ = try block.addBinOp(.store, field_ptr, sema.resolveInst(operand));
}
return alloc;
}
const element_refs = try sema.arena.alloc(Air.Inst.Ref, operands.len);
for (operands) |operand, i| {
element_refs[i] = sema.resolveInst(operand);
}
try sema.requireRuntimeBlock(block, runtime_src);
return block.addVectorInit(tuple_ty, element_refs);
}
fn zirFieldTypeRef(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@ -13540,10 +13606,50 @@ fn elemVal(
// TODO: If the index is a vector, the result should be a vector.
return elemValArray(sema, block, array, elem_index, array_src, elem_index_src);
},
.Struct => {
// Tuple field access.
const index_val = try sema.resolveConstValue(block, elem_index_src, elem_index);
const index = @intCast(u32, index_val.toUnsignedInt());
return tupleField(sema, block, array, index, array_src, elem_index_src);
},
else => unreachable,
}
}
fn tupleField(
sema: *Sema,
block: *Block,
tuple: Air.Inst.Ref,
field_index: u32,
tuple_src: LazySrcLoc,
field_index_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
const tuple_ty = sema.typeOf(tuple);
const tuple_info = tuple_ty.castTag(.tuple).?.data;
if (field_index > tuple_info.types.len) {
return sema.fail(block, field_index_src, "index {d} outside tuple of length {d}", .{
field_index, tuple_info.types.len,
});
}
const field_ty = tuple_info.types[field_index];
const field_val = tuple_info.values[field_index];
if (field_val.tag() != .unreachable_value) {
return sema.addConstant(field_ty, field_val); // comptime field
}
if (try sema.resolveMaybeUndefVal(block, tuple_src, tuple)) |tuple_val| {
if (tuple_val.isUndef()) return sema.addConstUndef(field_ty);
const field_values = tuple_val.castTag(.@"struct").?.data;
return sema.addConstant(field_ty, field_values[field_index]);
}
try sema.requireRuntimeBlock(block, tuple_src);
return block.addStructFieldVal(tuple, field_index, field_ty);
}
fn elemValArray(
sema: *Sema,
block: *Block,
@ -13901,17 +14007,19 @@ fn coerce(
else => {},
},
.Array => switch (inst_ty.zigTypeTag()) {
.Vector => return sema.coerceVectorInMemory(block, dest_ty, dest_ty_src, inst, inst_src),
.Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
.Struct => {
if (inst == .empty_struct) {
return arrayInitEmpty(sema, dest_ty);
}
if (inst_ty.isTuple()) {
return sema.coerceTupleToArray(block, dest_ty, dest_ty_src, inst, inst_src);
}
},
else => {},
},
.Vector => switch (inst_ty.zigTypeTag()) {
.Array => return sema.coerceVectorInMemory(block, dest_ty, dest_ty_src, inst, inst_src),
.Vector => return sema.coerceVectors(block, dest_ty, dest_ty_src, inst, inst_src),
.Array, .Vector => return sema.coerceArrayLike(block, dest_ty, dest_ty_src, inst, inst_src),
else => {},
},
.Struct => {
@ -14276,12 +14384,30 @@ fn storePtr2(
uncasted_operand: Air.Inst.Ref,
operand_src: LazySrcLoc,
air_tag: Air.Inst.Tag,
) !void {
) CompileError!void {
const ptr_ty = sema.typeOf(ptr);
if (ptr_ty.isConstPtr())
return sema.fail(block, src, "cannot assign to constant", .{});
return sema.fail(block, ptr_src, "cannot assign to constant", .{});
const elem_ty = ptr_ty.childType();
// To generate better code for tuples, we detect a tuple operand here, and
// analyze field loads and stores directly. This avoids an extra allocation + memcpy
// which would occur if we used `coerce`.
const operand_ty = sema.typeOf(uncasted_operand);
if (operand_ty.castTag(.tuple)) |payload| {
const tuple_fields_len = payload.data.types.len;
var i: u32 = 0;
while (i < tuple_fields_len) : (i += 1) {
const elem_src = operand_src; // TODO better source location
const elem = try tupleField(sema, block, uncasted_operand, i, operand_src, elem_src);
const elem_index = try sema.addIntUnsigned(Type.usize, i);
const elem_ptr = try sema.elemPtr(block, ptr_src, ptr, elem_index, elem_src);
try sema.storePtr2(block, src, elem_ptr, elem_src, elem, elem_src, .store);
}
return;
}
const operand = try sema.coerce(block, elem_ty, uncasted_operand, operand_src);
if ((try sema.typeHasOnePossibleValue(block, src, elem_ty)) != null)
return;
@ -14847,54 +14973,8 @@ fn coerceEnumToUnion(
return sema.failWithOwnedErrorMsg(msg);
}
/// Coerces vectors/arrays which have the same in-memory layout. This can be used for
/// both coercing from and to vectors.
/// TODO (affects the lang spec) delete this in favor of always using `coerceVectors`.
fn coerceVectorInMemory(
sema: *Sema,
block: *Block,
dest_ty: Type,
dest_ty_src: LazySrcLoc,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const inst_ty = sema.typeOf(inst);
const inst_len = inst_ty.arrayLen();
const dest_len = dest_ty.arrayLen();
if (dest_len != inst_len) {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "expected {}, found {}", .{
dest_ty, inst_ty,
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len});
try sema.errNote(block, inst_src, msg, "source has length {d}", .{inst_len});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const target = sema.mod.getTarget();
const dest_elem_ty = dest_ty.childType();
const inst_elem_ty = inst_ty.childType();
const in_memory_result = try sema.coerceInMemoryAllowed(block, dest_elem_ty, inst_elem_ty, false, target, dest_ty_src, inst_src);
if (in_memory_result != .ok) {
// TODO recursive error notes for coerceInMemoryAllowed failure
return sema.fail(block, inst_src, "expected {}, found {}", .{ dest_ty, inst_ty });
}
if (try sema.resolveMaybeUndefVal(block, inst_src, inst)) |inst_val| {
// These types share the same comptime value representation.
return sema.addConstant(dest_ty, inst_val);
}
try sema.requireRuntimeBlock(block, inst_src);
return block.addBitCast(dest_ty, inst);
}
/// If the lengths match, coerces element-wise.
fn coerceVectors(
fn coerceArrayLike(
sema: *Sema,
block: *Block,
dest_ty: Type,
@ -14965,6 +15045,63 @@ fn coerceVectors(
);
}
/// If the lengths match, coerces element-wise.
fn coerceTupleToArray(
sema: *Sema,
block: *Block,
dest_ty: Type,
dest_ty_src: LazySrcLoc,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const inst_ty = sema.typeOf(inst);
const inst_len = inst_ty.arrayLen();
const dest_len = try sema.usizeCast(block, dest_ty_src, dest_ty.arrayLen());
if (dest_len != inst_len) {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "expected {}, found {}", .{
dest_ty, inst_ty,
});
errdefer msg.destroy(sema.gpa);
try sema.errNote(block, dest_ty_src, msg, "destination has length {d}", .{dest_len});
try sema.errNote(block, inst_src, msg, "source has length {d}", .{inst_len});
break :msg msg;
};
return sema.failWithOwnedErrorMsg(msg);
}
const element_vals = try sema.arena.alloc(Value, dest_len);
const element_refs = try sema.arena.alloc(Air.Inst.Ref, dest_len);
const dest_elem_ty = dest_ty.childType();
var runtime_src: ?LazySrcLoc = null;
for (element_vals) |*elem, i_usize| {
const i = @intCast(u32, i_usize);
const elem_src = inst_src; // TODO better source location
const elem_ref = try tupleField(sema, block, inst, i, inst_src, elem_src);
const coerced = try sema.coerce(block, dest_elem_ty, elem_ref, elem_src);
element_refs[i] = coerced;
if (runtime_src == null) {
if (try sema.resolveMaybeUndefVal(block, elem_src, coerced)) |elem_val| {
elem.* = elem_val;
} else {
runtime_src = elem_src;
}
}
}
if (runtime_src) |rs| {
try sema.requireRuntimeBlock(block, rs);
return block.addVectorInit(dest_ty, element_refs);
}
return sema.addConstant(
dest_ty,
try Value.Tag.array.create(sema.arena, element_vals),
);
}
fn analyzeDeclVal(
sema: *Sema,
block: *Block,
@ -15833,19 +15970,22 @@ fn resolveStructLayout(
ty: Type,
) CompileError!void {
const resolved_ty = try sema.resolveTypeFields(block, src, ty);
const struct_obj = resolved_ty.castTag(.@"struct").?.data;
switch (struct_obj.status) {
.none, .have_field_types => {},
.field_types_wip, .layout_wip => {
return sema.fail(block, src, "struct {} depends on itself", .{ty});
},
.have_layout, .fully_resolved_wip, .fully_resolved => return,
if (resolved_ty.castTag(.@"struct")) |payload| {
const struct_obj = payload.data;
switch (struct_obj.status) {
.none, .have_field_types => {},
.field_types_wip, .layout_wip => {
return sema.fail(block, src, "struct {} depends on itself", .{ty});
},
.have_layout, .fully_resolved_wip, .fully_resolved => return,
}
struct_obj.status = .layout_wip;
for (struct_obj.fields.values()) |field| {
try sema.resolveTypeLayout(block, src, field.ty);
}
struct_obj.status = .have_layout;
}
struct_obj.status = .layout_wip;
for (struct_obj.fields.values()) |field| {
try sema.resolveTypeLayout(block, src, field.ty);
}
struct_obj.status = .have_layout;
// otherwise it's a tuple; no need to resolve anything
}
fn resolveUnionLayout(
@ -16642,6 +16782,17 @@ pub fn typeHasOnePossibleValue(
}
return Value.initTag(.empty_struct_value);
},
.tuple => {
const tuple = ty.castTag(.tuple).?.data;
for (tuple.values) |val| {
if (val.tag() == .unreachable_value) {
return null; // non-comptime field
}
}
return Value.initTag(.empty_struct_value);
},
.enum_numbered => {
const resolved_ty = try sema.resolveTypeFields(block, src, ty);
const enum_obj = resolved_ty.castTag(.enum_numbered).?.data;

View File

@ -916,6 +916,31 @@ pub const DeclGen = struct {
// reference, we need to copy it here.
gop.key_ptr.* = try t.copy(dg.object.type_map_arena.allocator());
if (t.castTag(.tuple)) |tuple| {
const llvm_struct_ty = dg.context.structCreateNamed("");
gop.value_ptr.* = llvm_struct_ty; // must be done before any recursive calls
const types = tuple.data.types;
const values = tuple.data.values;
var llvm_field_types = try std.ArrayListUnmanaged(*const llvm.Type).initCapacity(gpa, types.len);
defer llvm_field_types.deinit(gpa);
for (types) |field_ty, i| {
const field_val = values[i];
if (field_val.tag() != .unreachable_value) continue;
llvm_field_types.appendAssumeCapacity(try dg.llvmType(field_ty));
}
llvm_struct_ty.structSetBody(
llvm_field_types.items.ptr,
@intCast(c_uint, llvm_field_types.items.len),
.False,
);
return llvm_struct_ty;
}
const struct_obj = t.castTag(.@"struct").?.data;
const name = try struct_obj.getFullyQualifiedName(gpa);
@ -2687,10 +2712,23 @@ pub const FuncGen = struct {
if (!field_ty.hasCodeGenBits()) {
return null;
}
assert(isByRef(struct_ty));
const target = self.dg.module.getTarget();
if (!isByRef(struct_ty)) {
assert(!isByRef(field_ty));
switch (struct_ty.zigTypeTag()) {
.Struct => {
var ptr_ty_buf: Type.Payload.Pointer = undefined;
const llvm_field_index = llvmFieldIndex(struct_ty, field_index, target, &ptr_ty_buf).?;
return self.builder.buildExtractValue(struct_llvm_val, llvm_field_index, "");
},
.Union => {
return self.todo("airStructFieldVal byval union", .{});
},
else => unreachable,
}
}
switch (struct_ty.zigTypeTag()) {
.Struct => {
var ptr_ty_buf: Type.Payload.Pointer = undefined;
@ -4370,19 +4408,85 @@ pub const FuncGen = struct {
if (self.liveness.isUnused(inst)) return null;
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const vector_ty = self.air.typeOfIndex(inst);
const len = vector_ty.arrayLen();
const result_ty = self.air.typeOfIndex(inst);
const len = @intCast(usize, result_ty.arrayLen());
const elements = @bitCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const llvm_vector_ty = try self.dg.llvmType(vector_ty);
const llvm_u32 = self.context.intType(32);
const llvm_result_ty = try self.dg.llvmType(result_ty);
var vector = llvm_vector_ty.getUndef();
for (elements) |elem, i| {
const index_u32 = llvm_u32.constInt(i, .False);
const llvm_elem = try self.resolveInst(elem);
vector = self.builder.buildInsertElement(vector, llvm_elem, index_u32, "");
switch (result_ty.zigTypeTag()) {
.Vector => {
const llvm_u32 = self.context.intType(32);
var vector = llvm_result_ty.getUndef();
for (elements) |elem, i| {
const index_u32 = llvm_u32.constInt(i, .False);
const llvm_elem = try self.resolveInst(elem);
vector = self.builder.buildInsertElement(vector, llvm_elem, index_u32, "");
}
return vector;
},
.Struct => {
const tuple = result_ty.castTag(.tuple).?.data;
if (isByRef(result_ty)) {
const llvm_u32 = self.context.intType(32);
const alloca_inst = self.buildAlloca(llvm_result_ty);
const target = self.dg.module.getTarget();
alloca_inst.setAlignment(result_ty.abiAlignment(target));
var indices: [2]*const llvm.Value = .{ llvm_u32.constNull(), undefined };
var llvm_i: u32 = 0;
for (elements) |elem, i| {
if (tuple.values[i].tag() != .unreachable_value) continue;
const field_ty = tuple.types[i];
const llvm_elem = try self.resolveInst(elem);
indices[1] = llvm_u32.constInt(llvm_i, .False);
llvm_i += 1;
const field_ptr = self.builder.buildInBoundsGEP(alloca_inst, &indices, indices.len, "");
const store_inst = self.builder.buildStore(llvm_elem, field_ptr);
store_inst.setAlignment(field_ty.abiAlignment(target));
}
return alloca_inst;
} else {
var result = llvm_result_ty.getUndef();
var llvm_i: u32 = 0;
for (elements) |elem, i| {
if (tuple.values[i].tag() != .unreachable_value) continue;
const llvm_elem = try self.resolveInst(elem);
result = self.builder.buildInsertValue(result, llvm_elem, llvm_i, "");
llvm_i += 1;
}
return result;
}
},
.Array => {
assert(isByRef(result_ty));
const llvm_usize = try self.dg.llvmType(Type.usize);
const target = self.dg.module.getTarget();
const alloca_inst = self.buildAlloca(llvm_result_ty);
alloca_inst.setAlignment(result_ty.abiAlignment(target));
const elem_ty = result_ty.childType();
for (elements) |elem, i| {
const indices: [2]*const llvm.Value = .{
llvm_usize.constNull(),
llvm_usize.constInt(@intCast(c_uint, i), .False),
};
const elem_ptr = self.builder.buildInBoundsGEP(alloca_inst, &indices, indices.len, "");
const llvm_elem = try self.resolveInst(elem);
const store_inst = self.builder.buildStore(llvm_elem, elem_ptr);
store_inst.setAlignment(elem_ty.abiAlignment(target));
}
return alloca_inst;
},
else => unreachable,
}
return vector;
}
fn airPrefetch(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value {
@ -4956,6 +5060,29 @@ fn llvmFieldIndex(
target: std.Target,
ptr_pl_buf: *Type.Payload.Pointer,
) ?c_uint {
if (ty.castTag(.tuple)) |payload| {
const values = payload.data.values;
var llvm_field_index: c_uint = 0;
for (values) |val, i| {
if (val.tag() != .unreachable_value) {
continue;
}
if (field_index > i) {
llvm_field_index += 1;
continue;
}
const field_ty = payload.data.types[i];
ptr_pl_buf.* = .{
.data = .{
.pointee_type = field_ty,
.@"align" = field_ty.abiAlignment(target),
.@"addrspace" = .generic,
},
};
return llvm_field_index;
}
return null;
}
const struct_obj = ty.castTag(.@"struct").?.data;
if (struct_obj.layout != .Packed) {
var llvm_field_index: c_uint = 0;
@ -4976,7 +5103,7 @@ fn llvmFieldIndex(
};
return llvm_field_index;
} else {
// We did not find an llvm field that corrispons to this zig field.
// We did not find an llvm field that corresponds to this zig field.
return null;
}
}
@ -5072,6 +5199,10 @@ fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool
}
fn isByRef(ty: Type) bool {
// For tuples (and TODO structs), if there are more than this many non-void
// fields, then we make it byref, otherwise byval.
const max_fields_byval = 2;
switch (ty.zigTypeTag()) {
.Type,
.ComptimeInt,
@ -5096,7 +5227,26 @@ fn isByRef(ty: Type) bool {
.AnyFrame,
=> return false,
.Array, .Struct, .Frame => return ty.hasCodeGenBits(),
.Array, .Frame => return ty.hasCodeGenBits(),
.Struct => {
if (!ty.hasCodeGenBits()) return false;
if (ty.castTag(.tuple)) |tuple| {
var count: usize = 0;
for (tuple.data.values) |field_val, i| {
if (field_val.tag() != .unreachable_value) continue;
count += 1;
if (count > max_fields_byval) {
return true;
}
const field_ty = tuple.data.types[i];
if (isByRef(field_ty)) {
return true;
}
}
return false;
}
return true;
},
.Union => return ty.hasCodeGenBits(),
.ErrorUnion => return isByRef(ty.errorUnionPayload()),
.Optional => {

View File

@ -296,7 +296,7 @@ const Writer = struct {
fn writeVectorInit(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
const vector_ty = w.air.getRefType(ty_pl.ty);
const len = vector_ty.vectorLen();
const len = @intCast(usize, vector_ty.arrayLen());
const elements = @bitCast([]const Air.Inst.Ref, w.air.extra[ty_pl.payload..][0..len]);
try s.print("{}, [", .{vector_ty});

View File

@ -1963,7 +1963,8 @@ const Writer = struct {
if (i != 0) try stream.writeAll(", ");
try self.writeInstRef(stream, arg);
}
try stream.writeAll("})");
try stream.writeAll("}) ");
try self.writeSrc(stream, inst_data.src());
}
fn writeUnreachable(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {

View File

@ -128,6 +128,7 @@ pub const Type = extern union {
.prefetch_options,
.export_options,
.extern_options,
.tuple,
=> return .Struct,
.enum_full,
@ -604,6 +605,24 @@ pub const Type = extern union {
return a_payload.data == b_payload.data;
}
}
if (a.castTag(.tuple)) |a_payload| {
if (b.castTag(.tuple)) |b_payload| {
if (a_payload.data.types.len != b_payload.data.types.len) return false;
for (a_payload.data.types) |a_ty, i| {
const b_ty = b_payload.data.types[i];
if (!eql(a_ty, b_ty)) return false;
}
for (a_payload.data.values) |a_val, i| {
const ty = a_payload.data.types[i];
const b_val = b_payload.data.values[i];
if (!Value.eql(a_val, b_val, ty)) return false;
}
return true;
}
}
return a.tag() == b.tag();
},
.Enum => {
@ -891,6 +910,21 @@ pub const Type = extern union {
.elem_type = try payload.elem_type.copy(allocator),
});
},
.tuple => {
const payload = self.castTag(.tuple).?.data;
const types = try allocator.alloc(Type, payload.types.len);
const values = try allocator.alloc(Value, payload.values.len);
for (payload.types) |ty, i| {
types[i] = try ty.copy(allocator);
}
for (payload.values) |val, i| {
values[i] = try val.copy(allocator);
}
return Tag.tuple.create(allocator, .{
.types = types,
.values = values,
});
},
.function => {
const payload = self.castTag(.function).?.data;
const param_types = try allocator.alloc(Type, payload.param_types.len);
@ -1119,6 +1153,24 @@ pub const Type = extern union {
ty = payload.elem_type;
continue;
},
.tuple => {
const tuple = ty.castTag(.tuple).?.data;
try writer.writeAll("tuple{");
for (tuple.types) |field_ty, i| {
if (i != 0) try writer.writeAll(", ");
const val = tuple.values[i];
if (val.tag() != .unreachable_value) {
try writer.writeAll("comptime ");
}
try field_ty.format("", .{}, writer);
if (val.tag() != .unreachable_value) {
try writer.writeAll(" = ");
try val.format("", .{}, writer);
}
}
try writer.writeAll("}");
return;
},
.single_const_pointer => {
const pointee_type = ty.castTag(.single_const_pointer).?.data;
try writer.writeAll("*const ");
@ -1480,15 +1532,58 @@ pub const Type = extern union {
return requiresComptime(optionalChild(ty, &buf));
},
.error_union,
.anyframe_T,
.@"struct",
.@"union",
.union_tagged,
.enum_numbered,
.enum_full,
.enum_nonexhaustive,
=> false, // TODO some of these should be `true` depending on their child types
.tuple => {
const tuple = ty.castTag(.tuple).?.data;
for (tuple.types) |field_ty| {
if (requiresComptime(field_ty)) {
return true;
}
}
return false;
},
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
switch (struct_obj.requires_comptime) {
.no, .wip => return false,
.yes => return true,
.unknown => {
struct_obj.requires_comptime = .wip;
for (struct_obj.fields.values()) |field| {
if (requiresComptime(field.ty)) {
struct_obj.requires_comptime = .yes;
return true;
}
}
struct_obj.requires_comptime = .no;
return false;
},
}
},
.@"union", .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
switch (union_obj.requires_comptime) {
.no, .wip => return false,
.yes => return true,
.unknown => {
union_obj.requires_comptime = .wip;
for (union_obj.fields.values()) |field| {
if (requiresComptime(field.ty)) {
union_obj.requires_comptime = .yes;
return true;
}
}
union_obj.requires_comptime = .no;
return false;
},
}
},
.error_union => return requiresComptime(errorUnionPayload(ty)),
.anyframe_T => return ty.castTag(.anyframe_T).?.data.requiresComptime(),
.enum_numbered => return ty.castTag(.enum_numbered).?.data.tag_ty.requiresComptime(),
.enum_full, .enum_nonexhaustive => return ty.cast(Payload.EnumFull).?.data.tag_ty.requiresComptime(),
};
}
@ -1697,6 +1792,16 @@ pub const Type = extern union {
return payload.error_set.hasCodeGenBits() or payload.payload.hasCodeGenBits();
},
.tuple => {
const tuple = self.castTag(.tuple).?.data;
for (tuple.types) |ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
if (ty.hasCodeGenBits()) return true;
}
return false;
},
.void,
.type,
.comptime_int,
@ -1968,6 +2073,21 @@ pub const Type = extern union {
}
return big_align;
},
.tuple => {
const tuple = self.castTag(.tuple).?.data;
var big_align: u32 = 0;
for (tuple.types) |field_ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
if (!field_ty.hasCodeGenBits()) continue;
const field_align = field_ty.abiAlignment(target);
big_align = @maximum(big_align, field_align);
}
return big_align;
},
.enum_full, .enum_nonexhaustive, .enum_simple, .enum_numbered => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = self.intTagType(&buffer);
@ -2037,13 +2157,14 @@ pub const Type = extern union {
.void,
=> 0,
.@"struct" => {
.@"struct", .tuple => {
const field_count = self.structFieldCount();
if (field_count == 0) {
return 0;
}
return self.structFieldOffset(field_count, target);
},
.enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = self.intTagType(&buffer);
@ -2231,6 +2352,11 @@ pub const Type = extern union {
}
return total;
},
.tuple => {
@panic("TODO bitSize tuples");
},
.enum_simple, .enum_full, .enum_nonexhaustive, .enum_numbered => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = ty.intTagType(&buffer);
@ -2926,6 +3052,7 @@ pub const Type = extern union {
pub fn containerLayout(ty: Type) std.builtin.TypeInfo.ContainerLayout {
return switch (ty.tag()) {
.tuple => .Auto,
.@"struct" => ty.castTag(.@"struct").?.data.layout,
.@"union" => ty.castTag(.@"union").?.data.layout,
.union_tagged => ty.castTag(.union_tagged).?.data.layout,
@ -2998,6 +3125,7 @@ pub const Type = extern union {
.array_sentinel => ty.castTag(.array_sentinel).?.data.len,
.array_u8 => ty.castTag(.array_u8).?.data,
.array_u8_sentinel_0 => ty.castTag(.array_u8_sentinel_0).?.data,
.tuple => ty.castTag(.tuple).?.data.types.len,
else => unreachable,
};
@ -3010,6 +3138,7 @@ pub const Type = extern union {
pub fn vectorLen(ty: Type) u32 {
return switch (ty.tag()) {
.vector => @intCast(u32, ty.castTag(.vector).?.data.len),
.tuple => @intCast(u32, ty.castTag(.tuple).?.data.types.len),
else => unreachable,
};
}
@ -3463,6 +3592,17 @@ pub const Type = extern union {
}
return Value.initTag(.empty_struct_value);
},
.tuple => {
const tuple = ty.castTag(.tuple).?.data;
for (tuple.values) |val| {
if (val.tag() == .unreachable_value) {
return null; // non-comptime field
}
}
return Value.initTag(.empty_struct_value);
},
.enum_numbered => {
const enum_numbered = ty.castTag(.enum_numbered).?.data;
if (enum_numbered.fields.count() == 1) {
@ -3539,7 +3679,8 @@ pub const Type = extern union {
.Slice, .Many, .C => true,
.One => ty.elemType().zigTypeTag() == .Array,
},
else => false, // TODO tuples are indexable
.Struct => ty.isTuple(),
else => false,
};
}
@ -3766,6 +3907,7 @@ pub const Type = extern union {
return struct_obj.fields.count();
},
.empty_struct => return 0,
.tuple => return ty.castTag(.tuple).?.data.types.len,
else => unreachable,
}
}
@ -3781,6 +3923,7 @@ pub const Type = extern union {
const union_obj = ty.cast(Payload.Union).?.data;
return union_obj.fields.values()[index].ty;
},
.tuple => return ty.castTag(.tuple).?.data.types[index],
else => unreachable,
}
}
@ -3933,6 +4076,31 @@ pub const Type = extern union {
it.offset = std.mem.alignForwardGeneric(u64, it.offset, it.big_align);
return it.offset;
},
.tuple => {
const tuple = ty.castTag(.tuple).?.data;
var offset: u64 = 0;
var big_align: u32 = 0;
for (tuple.types) |field_ty, i| {
const field_val = tuple.values[i];
if (field_val.tag() != .unreachable_value) {
// comptime field
if (i == index) return offset;
continue;
}
const field_align = field_ty.abiAlignment(target);
big_align = @maximum(big_align, field_align);
offset = std.mem.alignForwardGeneric(u64, offset, field_align);
if (i == index) return offset;
offset += field_ty.abiSize(target);
}
offset = std.mem.alignForwardGeneric(u64, offset, big_align);
return offset;
},
.@"union" => return 0,
.union_tagged => {
const union_obj = ty.castTag(.union_tagged).?.data;
@ -4182,6 +4350,8 @@ pub const Type = extern union {
array,
array_sentinel,
vector,
/// Possible Value tags for this: @"struct"
tuple,
pointer,
single_const_pointer,
single_mut_pointer,
@ -4326,6 +4496,7 @@ pub const Type = extern union {
.enum_simple => Payload.EnumSimple,
.enum_numbered => Payload.EnumNumbered,
.empty_struct => Payload.ContainerScope,
.tuple => Payload.Tuple,
};
}
@ -4348,6 +4519,10 @@ pub const Type = extern union {
}
};
pub fn isTuple(ty: Type) bool {
return ty.tag() == .tuple;
}
/// The sub-types are named after what fields they contain.
pub const Payload = struct {
tag: Tag,
@ -4490,6 +4665,14 @@ pub const Type = extern union {
data: *Module.Struct,
};
pub const Tuple = struct {
base: Payload = .{ .tag = .tuple },
data: struct {
types: []Type,
values: []Value,
},
};
pub const Union = struct {
base: Payload,
data: *Module.Union,

View File

@ -237,8 +237,6 @@ test "zero-sized array with recursive type definition" {
}
test "type coercion of anon struct literal to array" {
if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
const S = struct {
const U = union {
a: u32,
@ -254,6 +252,8 @@ test "type coercion of anon struct literal to array" {
try expect(arr1[1] == 56);
try expect(arr1[2] == 54);
if (@import("builtin").zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
var x2: U = .{ .a = 42 };
const t2 = .{ x2, .{ .b = true }, .{ .c = "hello" } };
var arr2: [3]U = t2;