Merge pull request #19190 from mlugg/struct-equivalence

compiler: namespace type equivalence based on AST node + captures
This commit is contained in:
Andrew Kelley 2024-03-07 18:41:45 -08:00 committed by GitHub
commit 97aa5f7b8a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
39 changed files with 4051 additions and 2907 deletions

View File

@ -44,6 +44,9 @@ compile_errors: ArrayListUnmanaged(Zir.Inst.CompileErrors.Item) = .{},
/// The topmost block of the current function.
fn_block: ?*GenZir = null,
fn_var_args: bool = false,
/// Whether we are somewhere within a function. If `true`, any container decls may be
/// generic and thus must be tunneled through closure.
within_fn: bool = false,
/// The return type of the current function. This may be a trivial `Ref`, or
/// otherwise it refers to a `ret_type` instruction.
fn_ret_ty: Zir.Inst.Ref = .none,
@ -2205,7 +2208,7 @@ fn breakExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index) Inn
},
.local_val => scope = scope.cast(Scope.LocalVal).?.parent,
.local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent,
.namespace, .enum_namespace => break,
.namespace => break,
.defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent,
.top => unreachable,
}
@ -2279,7 +2282,7 @@ fn continueExpr(parent_gz: *GenZir, parent_scope: *Scope, node: Ast.Node.Index)
try parent_gz.addDefer(defer_scope.index, defer_scope.len);
},
.defer_error => scope = scope.cast(Scope.Defer).?.parent,
.namespace, .enum_namespace => break,
.namespace => break,
.top => unreachable,
}
}
@ -2412,7 +2415,7 @@ fn checkLabelRedefinition(astgen: *AstGen, parent_scope: *Scope, label: Ast.Toke
.local_val => scope = scope.cast(Scope.LocalVal).?.parent,
.local_ptr => scope = scope.cast(Scope.LocalPtr).?.parent,
.defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent,
.namespace, .enum_namespace => break,
.namespace => break,
.top => unreachable,
}
}
@ -2790,7 +2793,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.@"resume",
.@"await",
.ret_err_value_code,
.closure_get,
.ret_ptr,
.ret_type,
.for_len,
@ -2860,7 +2862,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.store_to_inferred_ptr,
.resolve_inferred_alloc,
.set_runtime_safety,
.closure_capture,
.memcpy,
.memset,
.validate_deref,
@ -2928,7 +2929,7 @@ fn countDefers(outer_scope: *Scope, inner_scope: *Scope) struct {
const have_err_payload = defer_scope.remapped_err_code != .none;
need_err_code = need_err_code or have_err_payload;
},
.namespace, .enum_namespace => unreachable,
.namespace => unreachable,
.top => unreachable,
}
}
@ -2998,7 +2999,7 @@ fn genDefers(
.normal_only => continue,
}
},
.namespace, .enum_namespace => unreachable,
.namespace => unreachable,
.top => unreachable,
}
}
@ -3042,7 +3043,7 @@ fn checkUsed(gz: *GenZir, outer_scope: *Scope, inner_scope: *Scope) InnerError!v
scope = s.parent;
},
.defer_normal, .defer_error => scope = scope.cast(Scope.Defer).?.parent,
.namespace, .enum_namespace => unreachable,
.namespace => unreachable,
.top => unreachable,
}
}
@ -4052,6 +4053,11 @@ fn fnDecl(
};
defer fn_gz.unstack();
// Set this now, since parameter types, return type, etc may be generic.
const prev_within_fn = astgen.within_fn;
defer astgen.within_fn = prev_within_fn;
astgen.within_fn = true;
const is_pub = fn_proto.visib_token != null;
const is_export = blk: {
const maybe_export_token = fn_proto.extern_export_inline_token orelse break :blk false;
@ -4313,6 +4319,10 @@ fn fnDecl(
const prev_fn_block = astgen.fn_block;
const prev_fn_ret_ty = astgen.fn_ret_ty;
defer {
astgen.fn_block = prev_fn_block;
astgen.fn_ret_ty = prev_fn_ret_ty;
}
astgen.fn_block = &fn_gz;
astgen.fn_ret_ty = if (is_inferred_error or ret_ref.toIndex() != null) r: {
// We're essentially guaranteed to need the return type at some point,
@ -4321,10 +4331,6 @@ fn fnDecl(
// return type now so the rest of the function can use it.
break :r try fn_gz.addNode(.ret_type, decl_node);
} else ret_ref;
defer {
astgen.fn_block = prev_fn_block;
astgen.fn_ret_ty = prev_fn_ret_ty;
}
const prev_var_args = astgen.fn_var_args;
astgen.fn_var_args = is_var_args;
@ -4732,7 +4738,7 @@ fn testDecl(
},
.gen_zir => s = s.cast(GenZir).?.parent,
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
.namespace, .enum_namespace => {
.namespace => {
const ns = s.cast(Scope.Namespace).?;
if (ns.decls.get(name_str_index)) |i| {
if (found_already) |f| {
@ -4770,11 +4776,14 @@ fn testDecl(
};
defer fn_block.unstack();
const prev_within_fn = astgen.within_fn;
const prev_fn_block = astgen.fn_block;
const prev_fn_ret_ty = astgen.fn_ret_ty;
astgen.within_fn = true;
astgen.fn_block = &fn_block;
astgen.fn_ret_ty = .anyerror_void_error_union_type;
defer {
astgen.within_fn = prev_within_fn;
astgen.fn_block = prev_fn_block;
astgen.fn_ret_ty = prev_fn_ret_ty;
}
@ -4849,10 +4858,10 @@ fn structDeclInner(
try gz.setStruct(decl_inst, .{
.src_node = node,
.layout = layout,
.captures_len = 0,
.fields_len = 0,
.decls_len = 0,
.backing_int_ref = .none,
.backing_int_body_len = 0,
.has_backing_int = false,
.known_non_opv = false,
.known_comptime_only = false,
.is_tuple = false,
@ -4873,6 +4882,7 @@ fn structDeclInner(
.node = node,
.inst = decl_inst,
.declaring_gz = gz,
.maybe_generic = astgen.within_fn,
};
defer namespace.deinit(gpa);
@ -5142,10 +5152,10 @@ fn structDeclInner(
try gz.setStruct(decl_inst, .{
.src_node = node,
.layout = layout,
.captures_len = @intCast(namespace.captures.count()),
.fields_len = field_count,
.decls_len = decl_count,
.backing_int_ref = backing_int_ref,
.backing_int_body_len = @intCast(backing_int_body_len),
.has_backing_int = backing_int_ref != .none,
.known_non_opv = known_non_opv,
.known_comptime_only = known_comptime_only,
.is_tuple = is_tuple,
@ -5159,15 +5169,22 @@ fn structDeclInner(
const decls_slice = wip_members.declsSlice();
const fields_slice = wip_members.fieldsSlice();
const bodies_slice = astgen.scratch.items[bodies_start..];
try astgen.extra.ensureUnusedCapacity(gpa, backing_int_body_len +
decls_slice.len + fields_slice.len + bodies_slice.len);
astgen.extra.appendSliceAssumeCapacity(astgen.scratch.items[scratch_top..][0..backing_int_body_len]);
try astgen.extra.ensureUnusedCapacity(gpa, backing_int_body_len + 2 +
decls_slice.len + namespace.captures.count() + fields_slice.len + bodies_slice.len);
astgen.extra.appendSliceAssumeCapacity(@ptrCast(namespace.captures.keys()));
if (backing_int_ref != .none) {
astgen.extra.appendAssumeCapacity(@intCast(backing_int_body_len));
if (backing_int_body_len == 0) {
astgen.extra.appendAssumeCapacity(@intFromEnum(backing_int_ref));
} else {
astgen.extra.appendSliceAssumeCapacity(astgen.scratch.items[scratch_top..][0..backing_int_body_len]);
}
}
astgen.extra.appendSliceAssumeCapacity(decls_slice);
astgen.extra.appendSliceAssumeCapacity(fields_slice);
astgen.extra.appendSliceAssumeCapacity(bodies_slice);
block_scope.unstack();
try gz.addNamespaceCaptures(&namespace);
return decl_inst.toRef();
}
@ -5190,6 +5207,7 @@ fn unionDeclInner(
.node = node,
.inst = decl_inst,
.declaring_gz = gz,
.maybe_generic = astgen.within_fn,
};
defer namespace.deinit(gpa);
@ -5368,6 +5386,7 @@ fn unionDeclInner(
.src_node = node,
.layout = layout,
.tag_type = arg_inst,
.captures_len = @intCast(namespace.captures.count()),
.body_len = body_len,
.fields_len = field_count,
.decls_len = decl_count,
@ -5379,13 +5398,13 @@ fn unionDeclInner(
wip_members.finishBits(bits_per_field);
const decls_slice = wip_members.declsSlice();
const fields_slice = wip_members.fieldsSlice();
try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + body_len + fields_slice.len);
try astgen.extra.ensureUnusedCapacity(gpa, namespace.captures.count() + decls_slice.len + body_len + fields_slice.len);
astgen.extra.appendSliceAssumeCapacity(@ptrCast(namespace.captures.keys()));
astgen.extra.appendSliceAssumeCapacity(decls_slice);
astgen.appendBodyWithFixups(body);
astgen.extra.appendSliceAssumeCapacity(fields_slice);
block_scope.unstack();
try gz.addNamespaceCaptures(&namespace);
return decl_inst.toRef();
}
@ -5537,6 +5556,7 @@ fn containerDecl(
.node = node,
.inst = decl_inst,
.declaring_gz = gz,
.maybe_generic = astgen.within_fn,
};
defer namespace.deinit(gpa);
@ -5555,7 +5575,7 @@ fn containerDecl(
defer block_scope.unstack();
_ = try astgen.scanDecls(&namespace, container_decl.ast.members);
namespace.base.tag = .enum_namespace;
namespace.base.tag = .namespace;
const arg_inst: Zir.Inst.Ref = if (container_decl.ast.arg != 0)
try comptimeExpr(&block_scope, &namespace.base, coerced_type_ri, container_decl.ast.arg)
@ -5586,7 +5606,6 @@ fn containerDecl(
if (member_node == counts.nonexhaustive_node)
continue;
fields_hasher.update(tree.getNodeSource(member_node));
namespace.base.tag = .namespace;
var member = switch (try containerMember(&block_scope, &namespace.base, &wip_members, member_node)) {
.decl => continue,
.field => |field| field,
@ -5630,7 +5649,6 @@ fn containerDecl(
},
);
}
namespace.base.tag = .enum_namespace;
const tag_value_inst = try expr(&block_scope, &namespace.base, .{ .rl = .{ .ty = arg_inst } }, member.ast.value_expr);
wip_members.appendToField(@intFromEnum(tag_value_inst));
}
@ -5676,6 +5694,7 @@ fn containerDecl(
.src_node = node,
.nonexhaustive = nonexhaustive,
.tag_type = arg_inst,
.captures_len = @intCast(namespace.captures.count()),
.body_len = body_len,
.fields_len = @intCast(counts.total_fields),
.decls_len = @intCast(counts.decls),
@ -5685,13 +5704,13 @@ fn containerDecl(
wip_members.finishBits(bits_per_field);
const decls_slice = wip_members.declsSlice();
const fields_slice = wip_members.fieldsSlice();
try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + body_len + fields_slice.len);
try astgen.extra.ensureUnusedCapacity(gpa, namespace.captures.count() + decls_slice.len + body_len + fields_slice.len);
astgen.extra.appendSliceAssumeCapacity(@ptrCast(namespace.captures.keys()));
astgen.extra.appendSliceAssumeCapacity(decls_slice);
astgen.appendBodyWithFixups(body);
astgen.extra.appendSliceAssumeCapacity(fields_slice);
block_scope.unstack();
try gz.addNamespaceCaptures(&namespace);
return rvalue(gz, ri, decl_inst.toRef(), node);
},
.keyword_opaque => {
@ -5704,6 +5723,7 @@ fn containerDecl(
.node = node,
.inst = decl_inst,
.declaring_gz = gz,
.maybe_generic = astgen.within_fn,
};
defer namespace.deinit(gpa);
@ -5733,16 +5753,17 @@ fn containerDecl(
try gz.setOpaque(decl_inst, .{
.src_node = node,
.captures_len = @intCast(namespace.captures.count()),
.decls_len = decl_count,
});
wip_members.finishBits(0);
const decls_slice = wip_members.declsSlice();
try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len);
try astgen.extra.ensureUnusedCapacity(gpa, namespace.captures.count() + decls_slice.len);
astgen.extra.appendSliceAssumeCapacity(@ptrCast(namespace.captures.keys()));
astgen.extra.appendSliceAssumeCapacity(decls_slice);
block_scope.unstack();
try gz.addNamespaceCaptures(&namespace);
return rvalue(gz, ri, decl_inst.toRef(), node);
},
else => unreachable,
@ -8238,12 +8259,17 @@ fn localVarRef(
ident_token: Ast.TokenIndex,
) InnerError!Zir.Inst.Ref {
const astgen = gz.astgen;
const gpa = astgen.gpa;
const name_str_index = try astgen.identAsString(ident_token);
var s = scope;
var found_already: ?Ast.Node.Index = null; // we have found a decl with the same name already
var found_needs_tunnel: bool = undefined; // defined when `found_already != null`
var found_namespaces_out: u32 = undefined; // defined when `found_already != null`
// The number of namespaces above `gz` we currently are
var num_namespaces_out: u32 = 0;
var capturing_namespace: ?*Scope.Namespace = null;
// defined by `num_namespaces_out != 0`
var capturing_namespace: *Scope.Namespace = undefined;
while (true) switch (s.tag) {
.local_val => {
const local_val = s.cast(Scope.LocalVal).?;
@ -8257,15 +8283,13 @@ fn localVarRef(
local_val.used = ident_token;
}
const value_inst = try tunnelThroughClosure(
const value_inst = if (num_namespaces_out != 0) try tunnelThroughClosure(
gz,
ident,
num_namespaces_out,
capturing_namespace,
local_val.inst,
local_val.token_src,
gpa,
);
.{ .ref = local_val.inst },
.{ .token = local_val.token_src },
) else local_val.inst;
return rvalueNoCoercePreRef(gz, ri, value_inst, ident);
}
@ -8285,19 +8309,17 @@ fn localVarRef(
const ident_name = try astgen.identifierTokenString(ident_token);
return astgen.failNodeNotes(ident, "mutable '{s}' not accessible from here", .{ident_name}, &.{
try astgen.errNoteTok(local_ptr.token_src, "declared mutable here", .{}),
try astgen.errNoteNode(capturing_namespace.?.node, "crosses namespace boundary here", .{}),
try astgen.errNoteNode(capturing_namespace.node, "crosses namespace boundary here", .{}),
});
}
const ptr_inst = try tunnelThroughClosure(
const ptr_inst = if (num_namespaces_out != 0) try tunnelThroughClosure(
gz,
ident,
num_namespaces_out,
capturing_namespace,
local_ptr.ptr,
local_ptr.token_src,
gpa,
);
.{ .ref = local_ptr.ptr },
.{ .token = local_ptr.token_src },
) else local_ptr.ptr;
switch (ri.rl) {
.ref, .ref_coerced_ty => {
@ -8314,7 +8336,7 @@ fn localVarRef(
},
.gen_zir => s = s.cast(GenZir).?.parent,
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
.namespace, .enum_namespace => {
.namespace => {
const ns = s.cast(Scope.Namespace).?;
if (ns.decls.get(name_str_index)) |i| {
if (found_already) |f| {
@ -8325,8 +8347,10 @@ fn localVarRef(
}
// We found a match but must continue looking for ambiguous references to decls.
found_already = i;
found_needs_tunnel = ns.maybe_generic;
found_namespaces_out = num_namespaces_out;
}
if (s.tag == .namespace) num_namespaces_out += 1;
num_namespaces_out += 1;
capturing_namespace = ns;
s = ns.parent;
},
@ -8339,6 +8363,29 @@ fn localVarRef(
// Decl references happen by name rather than ZIR index so that when unrelated
// decls are modified, ZIR code containing references to them can be unmodified.
if (found_namespaces_out > 0 and found_needs_tunnel) {
switch (ri.rl) {
.ref, .ref_coerced_ty => return tunnelThroughClosure(
gz,
ident,
found_namespaces_out,
.{ .decl_ref = name_str_index },
.{ .node = found_already.? },
),
else => {
const result = try tunnelThroughClosure(
gz,
ident,
found_namespaces_out,
.{ .decl_val = name_str_index },
.{ .node = found_already.? },
);
return rvalueNoCoercePreRef(gz, ri, result, ident);
},
}
}
switch (ri.rl) {
.ref, .ref_coerced_ty => return gz.addStrTok(.decl_ref, name_str_index, ident_token),
else => {
@ -8348,41 +8395,90 @@ fn localVarRef(
}
}
/// Adds a capture to a namespace, if needed.
/// Returns the index of the closure_capture instruction.
/// Access a ZIR instruction through closure. May tunnel through arbitrarily
/// many namespaces, adding closure captures as required.
/// Returns the index of the `closure_get` instruction added to `gz`.
fn tunnelThroughClosure(
gz: *GenZir,
/// The node which references the value to be captured.
inner_ref_node: Ast.Node.Index,
/// The number of namespaces being tunnelled through. At least 1.
num_tunnels: u32,
ns: ?*Scope.Namespace,
value: Zir.Inst.Ref,
token: Ast.TokenIndex,
gpa: Allocator,
/// The value being captured.
value: union(enum) {
ref: Zir.Inst.Ref,
decl_val: Zir.NullTerminatedString,
decl_ref: Zir.NullTerminatedString,
},
/// The location of the value's declaration.
decl_src: union(enum) {
token: Ast.TokenIndex,
node: Ast.Node.Index,
},
) !Zir.Inst.Ref {
// For trivial values, we don't need a tunnel.
// Just return the ref.
if (num_tunnels == 0 or value.toIndex() == null) {
return value;
switch (value) {
.ref => |v| if (v.toIndex() == null) return v, // trivia value; do not need tunnel
.decl_val, .decl_ref => {},
}
// Otherwise we need a tunnel. Check if this namespace
// already has one for this value.
const gop = try ns.?.captures.getOrPut(gpa, value.toIndex().?);
if (!gop.found_existing) {
// Make a new capture for this value but don't add it to the declaring_gz yet
try gz.astgen.instructions.append(gz.astgen.gpa, .{
.tag = .closure_capture,
.data = .{ .un_tok = .{
.operand = value,
.src_tok = ns.?.declaring_gz.?.tokenIndexToRelative(token),
} },
const astgen = gz.astgen;
const gpa = astgen.gpa;
// Otherwise we need a tunnel. First, figure out the path of namespaces we
// are tunneling through. This is usually only going to be one or two, so
// use an SFBA to optimize for the common case.
var sfba = std.heap.stackFallback(@sizeOf(usize) * 2, astgen.arena);
var intermediate_tunnels = try sfba.get().alloc(*Scope.Namespace, num_tunnels - 1);
const root_ns = ns: {
var i: usize = num_tunnels - 1;
var scope: *Scope = gz.parent;
while (i > 0) {
if (scope.cast(Scope.Namespace)) |mid_ns| {
i -= 1;
intermediate_tunnels[i] = mid_ns;
}
scope = scope.parent().?;
}
while (true) {
if (scope.cast(Scope.Namespace)) |ns| break :ns ns;
scope = scope.parent().?;
}
};
// Now that we know the scopes we're tunneling through, begin adding
// captures as required, starting with the outermost namespace.
const root_capture = Zir.Inst.Capture.wrap(switch (value) {
.ref => |v| .{ .instruction = v.toIndex().? },
.decl_val => |str| .{ .decl_val = str },
.decl_ref => |str| .{ .decl_ref = str },
});
var cur_capture_index = std.math.cast(
u16,
(try root_ns.captures.getOrPut(gpa, root_capture)).index,
) orelse return astgen.failNodeNotes(root_ns.node, "this compiler implementation only supports up to 65536 captures per namespace", .{}, &.{
switch (decl_src) {
.token => |t| try astgen.errNoteTok(t, "captured value here", .{}),
.node => |n| try astgen.errNoteNode(n, "captured value here", .{}),
},
try astgen.errNoteNode(inner_ref_node, "value used here", .{}),
});
for (intermediate_tunnels) |tunnel_ns| {
cur_capture_index = std.math.cast(
u16,
(try tunnel_ns.captures.getOrPut(gpa, Zir.Inst.Capture.wrap(.{ .nested = cur_capture_index }))).index,
) orelse return astgen.failNodeNotes(tunnel_ns.node, "this compiler implementation only supports up to 65536 captures per namespace", .{}, &.{
switch (decl_src) {
.token => |t| try astgen.errNoteTok(t, "captured value here", .{}),
.node => |n| try astgen.errNoteNode(n, "captured value here", .{}),
},
try astgen.errNoteNode(inner_ref_node, "value used here", .{}),
});
gop.value_ptr.* = @enumFromInt(gz.astgen.instructions.len - 1);
}
// Add an instruction to get the value from the closure into
// our current context
return try gz.addInstNode(.closure_get, gop.value_ptr.*, inner_ref_node);
// Add an instruction to get the value from the closure.
return gz.addExtendedNodeSmall(.closure_get, inner_ref_node, cur_capture_index);
}
fn stringLiteral(
@ -9095,7 +9191,7 @@ fn builtinCall(
},
.gen_zir => s = s.cast(GenZir).?.parent,
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
.namespace, .enum_namespace => {
.namespace => {
const ns = s.cast(Scope.Namespace).?;
if (ns.decls.get(decl_name)) |i| {
if (found_already) |f| {
@ -11605,7 +11701,7 @@ const Scope = struct {
}
if (T == Namespace) {
switch (base.tag) {
.namespace, .enum_namespace => return @fieldParentPtr(T, "base", base),
.namespace => return @fieldParentPtr(T, "base", base),
else => return null,
}
}
@ -11621,7 +11717,7 @@ const Scope = struct {
.local_val => base.cast(LocalVal).?.parent,
.local_ptr => base.cast(LocalPtr).?.parent,
.defer_normal, .defer_error => base.cast(Defer).?.parent,
.namespace, .enum_namespace => base.cast(Namespace).?.parent,
.namespace => base.cast(Namespace).?.parent,
.top => null,
};
}
@ -11633,7 +11729,6 @@ const Scope = struct {
defer_normal,
defer_error,
namespace,
enum_namespace,
top,
};
@ -11720,14 +11815,14 @@ const Scope = struct {
decls: std.AutoHashMapUnmanaged(Zir.NullTerminatedString, Ast.Node.Index) = .{},
node: Ast.Node.Index,
inst: Zir.Inst.Index,
maybe_generic: bool,
/// The astgen scope containing this namespace.
/// Only valid during astgen.
declaring_gz: ?*GenZir,
/// Map from the raw captured value to the instruction
/// ref of the capture for decls in this namespace
captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{},
/// Set of captures used by this namespace.
captures: std.AutoArrayHashMapUnmanaged(Zir.Inst.Capture, void) = .{},
fn deinit(self: *Namespace, gpa: Allocator) void {
self.decls.deinit(gpa);
@ -11787,12 +11882,6 @@ const GenZir = struct {
// Set if this GenZir is a defer or it is inside a defer.
any_defer_node: Ast.Node.Index = 0,
/// Namespace members are lazy. When executing a decl within a namespace,
/// any references to external instructions need to be treated specially.
/// This list tracks those references. See also .closure_capture and .closure_get.
/// Keys are the raw instruction index, values are the closure_capture instruction.
captures: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{},
const unstacked_top = std.math.maxInt(usize);
/// Call unstack before adding any new instructions to containing GenZir.
fn unstack(self: *GenZir) void {
@ -12534,6 +12623,30 @@ const GenZir = struct {
return new_index.toRef();
}
fn addExtendedNodeSmall(
gz: *GenZir,
opcode: Zir.Inst.Extended,
src_node: Ast.Node.Index,
small: u16,
) !Zir.Inst.Ref {
const astgen = gz.astgen;
const gpa = astgen.gpa;
try gz.instructions.ensureUnusedCapacity(gpa, 1);
try astgen.instructions.ensureUnusedCapacity(gpa, 1);
const new_index: Zir.Inst.Index = @enumFromInt(astgen.instructions.len);
astgen.instructions.appendAssumeCapacity(.{
.tag = .extended,
.data = .{ .extended = .{
.opcode = opcode,
.small = small,
.operand = @bitCast(gz.nodeIndexToRelative(src_node)),
} },
});
gz.instructions.appendAssumeCapacity(new_index);
return new_index.toRef();
}
fn addUnTok(
gz: *GenZir,
tag: Zir.Inst.Tag,
@ -12957,10 +13070,10 @@ const GenZir = struct {
fn setStruct(gz: *GenZir, inst: Zir.Inst.Index, args: struct {
src_node: Ast.Node.Index,
captures_len: u32,
fields_len: u32,
decls_len: u32,
backing_int_ref: Zir.Inst.Ref,
backing_int_body_len: u32,
has_backing_int: bool,
layout: std.builtin.Type.ContainerLayout,
known_non_opv: bool,
known_comptime_only: bool,
@ -12978,7 +13091,7 @@ const GenZir = struct {
const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.StructDecl).Struct.fields.len + 4);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.StructDecl).Struct.fields.len + 3);
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.StructDecl{
.fields_hash_0 = fields_hash_arr[0],
.fields_hash_1 = fields_hash_arr[1],
@ -12987,26 +13100,24 @@ const GenZir = struct {
.src_node = gz.nodeIndexToRelative(args.src_node),
});
if (args.captures_len != 0) {
astgen.extra.appendAssumeCapacity(args.captures_len);
}
if (args.fields_len != 0) {
astgen.extra.appendAssumeCapacity(args.fields_len);
}
if (args.decls_len != 0) {
astgen.extra.appendAssumeCapacity(args.decls_len);
}
if (args.backing_int_ref != .none) {
astgen.extra.appendAssumeCapacity(args.backing_int_body_len);
if (args.backing_int_body_len == 0) {
astgen.extra.appendAssumeCapacity(@intFromEnum(args.backing_int_ref));
}
}
astgen.instructions.set(@intFromEnum(inst), .{
.tag = .extended,
.data = .{ .extended = .{
.opcode = .struct_decl,
.small = @bitCast(Zir.Inst.StructDecl.Small{
.has_captures_len = args.captures_len != 0,
.has_fields_len = args.fields_len != 0,
.has_decls_len = args.decls_len != 0,
.has_backing_int = args.backing_int_ref != .none,
.has_backing_int = args.has_backing_int,
.known_non_opv = args.known_non_opv,
.known_comptime_only = args.known_comptime_only,
.is_tuple = args.is_tuple,
@ -13024,6 +13135,7 @@ const GenZir = struct {
fn setUnion(gz: *GenZir, inst: Zir.Inst.Index, args: struct {
src_node: Ast.Node.Index,
tag_type: Zir.Inst.Ref,
captures_len: u32,
body_len: u32,
fields_len: u32,
decls_len: u32,
@ -13039,7 +13151,7 @@ const GenZir = struct {
const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len + 4);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.UnionDecl).Struct.fields.len + 5);
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.UnionDecl{
.fields_hash_0 = fields_hash_arr[0],
.fields_hash_1 = fields_hash_arr[1],
@ -13051,6 +13163,9 @@ const GenZir = struct {
if (args.tag_type != .none) {
astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type));
}
if (args.captures_len != 0) {
astgen.extra.appendAssumeCapacity(args.captures_len);
}
if (args.body_len != 0) {
astgen.extra.appendAssumeCapacity(args.body_len);
}
@ -13066,6 +13181,7 @@ const GenZir = struct {
.opcode = .union_decl,
.small = @bitCast(Zir.Inst.UnionDecl.Small{
.has_tag_type = args.tag_type != .none,
.has_captures_len = args.captures_len != 0,
.has_body_len = args.body_len != 0,
.has_fields_len = args.fields_len != 0,
.has_decls_len = args.decls_len != 0,
@ -13082,6 +13198,7 @@ const GenZir = struct {
fn setEnum(gz: *GenZir, inst: Zir.Inst.Index, args: struct {
src_node: Ast.Node.Index,
tag_type: Zir.Inst.Ref,
captures_len: u32,
body_len: u32,
fields_len: u32,
decls_len: u32,
@ -13095,7 +13212,7 @@ const GenZir = struct {
const fields_hash_arr: [4]u32 = @bitCast(args.fields_hash);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len + 4);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.EnumDecl).Struct.fields.len + 5);
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.EnumDecl{
.fields_hash_0 = fields_hash_arr[0],
.fields_hash_1 = fields_hash_arr[1],
@ -13107,6 +13224,9 @@ const GenZir = struct {
if (args.tag_type != .none) {
astgen.extra.appendAssumeCapacity(@intFromEnum(args.tag_type));
}
if (args.captures_len != 0) {
astgen.extra.appendAssumeCapacity(args.captures_len);
}
if (args.body_len != 0) {
astgen.extra.appendAssumeCapacity(args.body_len);
}
@ -13122,6 +13242,7 @@ const GenZir = struct {
.opcode = .enum_decl,
.small = @bitCast(Zir.Inst.EnumDecl.Small{
.has_tag_type = args.tag_type != .none,
.has_captures_len = args.captures_len != 0,
.has_body_len = args.body_len != 0,
.has_fields_len = args.fields_len != 0,
.has_decls_len = args.decls_len != 0,
@ -13135,6 +13256,7 @@ const GenZir = struct {
fn setOpaque(gz: *GenZir, inst: Zir.Inst.Index, args: struct {
src_node: Ast.Node.Index,
captures_len: u32,
decls_len: u32,
}) !void {
const astgen = gz.astgen;
@ -13142,11 +13264,14 @@ const GenZir = struct {
assert(args.src_node != 0);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.OpaqueDecl).Struct.fields.len + 1);
try astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.OpaqueDecl).Struct.fields.len + 2);
const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.OpaqueDecl{
.src_node = gz.nodeIndexToRelative(args.src_node),
});
if (args.captures_len != 0) {
astgen.extra.appendAssumeCapacity(args.captures_len);
}
if (args.decls_len != 0) {
astgen.extra.appendAssumeCapacity(args.decls_len);
}
@ -13155,6 +13280,7 @@ const GenZir = struct {
.data = .{ .extended = .{
.opcode = .opaque_decl,
.small = @bitCast(Zir.Inst.OpaqueDecl.Small{
.has_captures_len = args.captures_len != 0,
.has_decls_len = args.decls_len != 0,
.name_strategy = gz.anon_name_strategy,
}),
@ -13197,15 +13323,6 @@ const GenZir = struct {
}
}
fn addNamespaceCaptures(gz: *GenZir, namespace: *Scope.Namespace) !void {
if (namespace.captures.count() > 0) {
try gz.instructions.ensureUnusedCapacity(gz.astgen.gpa, namespace.captures.count());
for (namespace.captures.values()) |capture| {
gz.instructions.appendAssumeCapacity(capture);
}
}
}
fn addDbgVar(gz: *GenZir, tag: Zir.Inst.Tag, name: Zir.NullTerminatedString, inst: Zir.Inst.Ref) !void {
if (gz.is_comptime) return;
@ -13305,7 +13422,7 @@ fn detectLocalShadowing(
}
s = local_ptr.parent;
},
.namespace, .enum_namespace => {
.namespace => {
outer_scope = true;
const ns = s.cast(Scope.Namespace).?;
const decl_node = ns.decls.get(ident_name) orelse {
@ -13478,7 +13595,7 @@ fn scanDecls(astgen: *AstGen, namespace: *Scope.Namespace, members: []const Ast.
}
s = local_ptr.parent;
},
.namespace, .enum_namespace => s = s.cast(Scope.Namespace).?.parent,
.namespace => s = s.cast(Scope.Namespace).?.parent,
.gen_zir => s = s.cast(GenZir).?.parent,
.defer_normal, .defer_error => s = s.cast(Scope.Defer).?.parent,
.top => break,

View File

@ -1004,17 +1004,6 @@ pub const Inst = struct {
@"resume",
@"await",
/// When a type or function refers to a comptime value from an outer
/// scope, that forms a closure over comptime value. The outer scope
/// will record a capture of that value, which encodes its current state
/// and marks it to persist. Uses `un_tok` field. Operand is the
/// instruction value to capture.
closure_capture,
/// The inner scope of a closure uses closure_get to retrieve the value
/// stored by the outer scope. Uses `inst_node` field. Operand is the
/// closure_capture instruction ref.
closure_get,
/// A defer statement.
/// Uses the `defer` union field.
@"defer",
@ -1251,8 +1240,6 @@ pub const Inst = struct {
.@"await",
.ret_err_value_code,
.extended,
.closure_get,
.closure_capture,
.ret_ptr,
.ret_type,
.@"try",
@ -1542,8 +1529,6 @@ pub const Inst = struct {
.@"resume",
.@"await",
.ret_err_value_code,
.closure_get,
.closure_capture,
.@"break",
.break_inline,
.condbr,
@ -1829,9 +1814,6 @@ pub const Inst = struct {
.@"resume" = .un_node,
.@"await" = .un_node,
.closure_capture = .un_tok,
.closure_get = .inst_node,
.@"defer" = .@"defer",
.defer_err_code = .defer_err_code,
@ -2074,6 +2056,10 @@ pub const Inst = struct {
/// `operand` is payload index to `RestoreErrRetIndex`.
/// `small` is undefined.
restore_err_ret_index,
/// Retrieves a value from the current type declaration scope's closure.
/// `operand` is `src_node: i32`.
/// `small` is closure index.
closure_get,
/// Used as a placeholder instruction which is just a dummy index for Sema to replace
/// with a specific value. For instance, this is used for the capture of an `errdefer`.
/// This should never appear in a body.
@ -2949,7 +2935,7 @@ pub const Inst = struct {
/// These are stored in trailing data in `extra` for each prong.
pub const ProngInfo = packed struct(u32) {
body_len: u28,
capture: Capture,
capture: ProngInfo.Capture,
is_inline: bool,
has_tag_capture: bool,
@ -3013,19 +2999,21 @@ pub const Inst = struct {
};
/// Trailing:
/// 0. fields_len: u32, // if has_fields_len
/// 1. decls_len: u32, // if has_decls_len
/// 2. backing_int_body_len: u32, // if has_backing_int
/// 3. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0
/// 4. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0
/// 5. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 6. flags: u32 // for every 8 fields
/// 0. captures_len: u32 // if has_captures_len
/// 1. fields_len: u32, // if has_fields_len
/// 2. decls_len: u32, // if has_decls_len
/// 3. capture: Capture // for every captures_len
/// 4. backing_int_body_len: u32, // if has_backing_int
/// 5. backing_int_ref: Ref, // if has_backing_int and backing_int_body_len is 0
/// 6. backing_int_body_inst: Inst, // if has_backing_int and backing_int_body_len is > 0
/// 7. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 8. flags: u32 // for every 8 fields
/// - sets of 4 bits:
/// 0b000X: whether corresponding field has an align expression
/// 0b00X0: whether corresponding field has a default expression
/// 0b0X00: whether corresponding field is comptime
/// 0bX000: whether corresponding field has a type expression
/// 7. fields: { // for every fields_len
/// 9. fields: { // for every fields_len
/// field_name: u32, // if !is_tuple
/// doc_comment: NullTerminatedString, // .empty if no doc comment
/// field_type: Ref, // if corresponding bit is not set. none means anytype.
@ -3033,7 +3021,7 @@ pub const Inst = struct {
/// align_body_len: u32, // if corresponding bit is set
/// init_body_len: u32, // if corresponding bit is set
/// }
/// 8. bodies: { // for every fields_len
/// 10. bodies: { // for every fields_len
/// field_type_body_inst: Inst, // for each field_type_body_len
/// align_body_inst: Inst, // for each align_body_len
/// init_body_inst: Inst, // for each init_body_len
@ -3052,6 +3040,7 @@ pub const Inst = struct {
}
pub const Small = packed struct {
has_captures_len: bool,
has_fields_len: bool,
has_decls_len: bool,
has_backing_int: bool,
@ -3063,10 +3052,59 @@ pub const Inst = struct {
any_default_inits: bool,
any_comptime_fields: bool,
any_aligned_fields: bool,
_: u3 = undefined,
_: u2 = undefined,
};
};
/// Represents a single value being captured in a type declaration's closure.
pub const Capture = packed struct(u32) {
tag: enum(u2) {
/// `data` is a `u16` index into the parent closure.
nested,
/// `data` is a `Zir.Inst.Index` to an instruction whose value is being captured.
instruction,
/// `data` is a `NullTerminatedString` to a decl name.
decl_val,
/// `data` is a `NullTerminatedString` to a decl name.
decl_ref,
},
data: u30,
pub const Unwrapped = union(enum) {
nested: u16,
instruction: Zir.Inst.Index,
decl_val: NullTerminatedString,
decl_ref: NullTerminatedString,
};
pub fn wrap(cap: Unwrapped) Capture {
return switch (cap) {
.nested => |idx| .{
.tag = .nested,
.data = idx,
},
.instruction => |inst| .{
.tag = .instruction,
.data = @intCast(@intFromEnum(inst)),
},
.decl_val => |str| .{
.tag = .decl_val,
.data = @intCast(@intFromEnum(str)),
},
.decl_ref => |str| .{
.tag = .decl_ref,
.data = @intCast(@intFromEnum(str)),
},
};
}
pub fn unwrap(cap: Capture) Unwrapped {
return switch (cap.tag) {
.nested => .{ .nested = @intCast(cap.data) },
.instruction => .{ .instruction = @enumFromInt(cap.data) },
.decl_val => .{ .decl_val = @enumFromInt(cap.data) },
.decl_ref => .{ .decl_ref = @enumFromInt(cap.data) },
};
}
};
pub const NameStrategy = enum(u2) {
/// Use the same name as the parent declaration name.
/// e.g. `const Foo = struct {...};`.
@ -3098,14 +3136,16 @@ pub const Inst = struct {
/// Trailing:
/// 0. tag_type: Ref, // if has_tag_type
/// 1. body_len: u32, // if has_body_len
/// 2. fields_len: u32, // if has_fields_len
/// 3. decls_len: u32, // if has_decls_len
/// 4. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 5. inst: Index // for every body_len
/// 6. has_bits: u32 // for every 32 fields
/// 1. captures_len: u32, // if has_captures_len
/// 2. body_len: u32, // if has_body_len
/// 3. fields_len: u32, // if has_fields_len
/// 4. decls_len: u32, // if has_decls_len
/// 5. capture: Capture // for every captures_len
/// 6. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 7. inst: Index // for every body_len
/// 8. has_bits: u32 // for every 32 fields
/// - the bit is whether corresponding field has an value expression
/// 7. fields: { // for every fields_len
/// 9. fields: { // for every fields_len
/// field_name: u32,
/// doc_comment: u32, // .empty if no doc_comment
/// value: Ref, // if corresponding bit is set
@ -3125,29 +3165,32 @@ pub const Inst = struct {
pub const Small = packed struct {
has_tag_type: bool,
has_captures_len: bool,
has_body_len: bool,
has_fields_len: bool,
has_decls_len: bool,
name_strategy: NameStrategy,
nonexhaustive: bool,
_: u9 = undefined,
_: u8 = undefined,
};
};
/// Trailing:
/// 0. tag_type: Ref, // if has_tag_type
/// 1. body_len: u32, // if has_body_len
/// 2. fields_len: u32, // if has_fields_len
/// 3. decls_len: u32, // if has_decls_len
/// 4. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 5. inst: Index // for every body_len
/// 6. has_bits: u32 // for every 8 fields
/// 1. captures_len: u32 // if has_captures_len
/// 2. body_len: u32, // if has_body_len
/// 3. fields_len: u32, // if has_fields_len
/// 4. decls_len: u37, // if has_decls_len
/// 5. capture: Capture // for every captures_len
/// 6. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 7. inst: Index // for every body_len
/// 8. has_bits: u32 // for every 8 fields
/// - sets of 4 bits:
/// 0b000X: whether corresponding field has a type expression
/// 0b00X0: whether corresponding field has a align expression
/// 0b0X00: whether corresponding field has a tag value expression
/// 0bX000: unused
/// 7. fields: { // for every fields_len
/// 9. fields: { // for every fields_len
/// field_name: NullTerminatedString, // null terminated string index
/// doc_comment: NullTerminatedString, // .empty if no doc comment
/// field_type: Ref, // if corresponding bit is set
@ -3170,6 +3213,7 @@ pub const Inst = struct {
pub const Small = packed struct {
has_tag_type: bool,
has_captures_len: bool,
has_body_len: bool,
has_fields_len: bool,
has_decls_len: bool,
@ -3183,13 +3227,15 @@ pub const Inst = struct {
/// true | false | union(T) { }
auto_enum_tag: bool,
any_aligned_fields: bool,
_: u6 = undefined,
_: u5 = undefined,
};
};
/// Trailing:
/// 0. decls_len: u32, // if has_decls_len
/// 1. decl: Index, // for every decls_len; points to a `declaration` instruction
/// 0. captures_len: u32, // if has_captures_len
/// 1. decls_len: u32, // if has_decls_len
/// 2. capture: Capture, // for every captures_len
/// 3. decl: Index, // for every decls_len; points to a `declaration` instruction
pub const OpaqueDecl = struct {
src_node: i32,
@ -3198,9 +3244,10 @@ pub const Inst = struct {
}
pub const Small = packed struct {
has_captures_len: bool,
has_decls_len: bool,
name_strategy: NameStrategy,
_: u13 = undefined,
_: u12 = undefined,
};
};
@ -3502,6 +3549,11 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
.struct_decl => {
const small: Inst.StructDecl.Small = @bitCast(extended.small);
var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.StructDecl).Struct.fields.len);
const captures_len = if (small.has_captures_len) captures_len: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :captures_len captures_len;
} else 0;
extra_index += @intFromBool(small.has_fields_len);
const decls_len = if (small.has_decls_len) decls_len: {
const decls_len = zir.extra[extra_index];
@ -3509,6 +3561,8 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
break :decls_len decls_len;
} else 0;
extra_index += captures_len;
if (small.has_backing_int) {
const backing_int_body_len = zir.extra[extra_index];
extra_index += 1; // backing_int_body_len
@ -3529,6 +3583,11 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
const small: Inst.EnumDecl.Small = @bitCast(extended.small);
var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.EnumDecl).Struct.fields.len);
extra_index += @intFromBool(small.has_tag_type);
const captures_len = if (small.has_captures_len) captures_len: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :captures_len captures_len;
} else 0;
extra_index += @intFromBool(small.has_body_len);
extra_index += @intFromBool(small.has_fields_len);
const decls_len = if (small.has_decls_len) decls_len: {
@ -3537,6 +3596,8 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
break :decls_len decls_len;
} else 0;
extra_index += captures_len;
return .{
.extra_index = extra_index,
.decls_remaining = decls_len,
@ -3547,6 +3608,11 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
const small: Inst.UnionDecl.Small = @bitCast(extended.small);
var extra_index: u32 = @intCast(extended.operand + @typeInfo(Inst.UnionDecl).Struct.fields.len);
extra_index += @intFromBool(small.has_tag_type);
const captures_len = if (small.has_captures_len) captures_len: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :captures_len captures_len;
} else 0;
extra_index += @intFromBool(small.has_body_len);
extra_index += @intFromBool(small.has_fields_len);
const decls_len = if (small.has_decls_len) decls_len: {
@ -3555,6 +3621,8 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
break :decls_len decls_len;
} else 0;
extra_index += captures_len;
return .{
.extra_index = extra_index,
.decls_remaining = decls_len,
@ -3569,6 +3637,13 @@ pub fn declIterator(zir: Zir, decl_inst: Zir.Inst.Index) DeclIterator {
extra_index += 1;
break :decls_len decls_len;
} else 0;
const captures_len = if (small.has_captures_len) captures_len: {
const captures_len = zir.extra[extra_index];
extra_index += 1;
break :captures_len captures_len;
} else 0;
extra_index += captures_len;
return .{
.extra_index = extra_index,

View File

@ -450,7 +450,7 @@ const Scope = struct {
Zir.NullTerminatedString, // index into the current file's string table (decl name)
*DeclStatus,
) = .{},
captures: []const Zir.Inst.Capture = &.{},
enclosing_type: ?usize, // index into `types`, null = file top-level struct
pub const DeclStatus = union(enum) {
@ -459,6 +459,24 @@ const Scope = struct {
NotRequested: u32, // instr_index
};
fn getCapture(scope: Scope, idx: u16) struct {
union(enum) { inst: Zir.Inst.Index, decl: Zir.NullTerminatedString },
*Scope,
} {
const parent = scope.parent.?;
return switch (scope.captures[idx].unwrap()) {
.nested => |parent_idx| parent.getCapture(parent_idx),
.instruction => |inst| .{
.{ .inst = inst },
parent,
},
.decl_val, .decl_ref => |str| .{
.{ .decl = str },
parent,
},
};
}
/// Returns a pointer so that the caller has a chance to modify the value
/// in case they decide to start analyzing a previously not requested decl.
/// Another reason is that in some places we use the pointer to uniquely
@ -1151,29 +1169,6 @@ fn walkInstruction(
.expr = .{ .comptimeExpr = 0 },
};
},
.closure_get => {
const inst_node = data[@intFromEnum(inst)].inst_node;
const code = try self.getBlockSource(file, parent_src, inst_node.src_node);
const idx = self.comptime_exprs.items.len;
try self.exprs.append(self.arena, .{ .comptimeExpr = idx });
try self.comptime_exprs.append(self.arena, .{ .code = code });
return DocData.WalkResult{
.expr = .{ .comptimeExpr = idx },
};
},
.closure_capture => {
const un_tok = data[@intFromEnum(inst)].un_tok;
return try self.walkRef(
file,
parent_scope,
parent_src,
un_tok.operand,
need_type,
call_ctx,
);
},
.str => {
const str = data[@intFromEnum(inst)].str.get(file.zir);
@ -3395,11 +3390,23 @@ fn walkInstruction(
.enclosing_type = type_slot_index,
};
const small: Zir.Inst.OpaqueDecl.Small = @bitCast(extended.small);
const extra = file.zir.extraData(Zir.Inst.OpaqueDecl, extended.operand);
var extra_index: usize = extra.end;
const src_info = try self.srcLocInfo(file, extra.data.src_node, parent_src);
const captures_len = if (small.has_captures_len) blk: {
const captures_len = file.zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
if (small.has_decls_len) extra_index += 1;
scope.captures = @ptrCast(file.zir.extra[extra_index..][0..captures_len]);
extra_index += captures_len;
var decl_indexes: std.ArrayListUnmanaged(usize) = .{};
var priv_decl_indexes: std.ArrayListUnmanaged(usize) = .{};
@ -3503,6 +3510,12 @@ fn walkInstruction(
break :blk tag_ref;
} else null;
const captures_len = if (small.has_captures_len) blk: {
const captures_len = file.zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
const body_len = if (small.has_body_len) blk: {
const body_len = file.zir.extra[extra_index];
extra_index += 1;
@ -3520,6 +3533,11 @@ fn walkInstruction(
else => .{ .enumLiteral = @tagName(small.layout) },
};
if (small.has_decls_len) extra_index += 1;
scope.captures = @ptrCast(file.zir.extra[extra_index..][0..captures_len]);
extra_index += captures_len;
var decl_indexes: std.ArrayListUnmanaged(usize) = .{};
var priv_decl_indexes: std.ArrayListUnmanaged(usize) = .{};
@ -3631,6 +3649,12 @@ fn walkInstruction(
break :blk wr.expr;
} else null;
const captures_len = if (small.has_captures_len) blk: {
const captures_len = file.zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
const body_len = if (small.has_body_len) blk: {
const body_len = file.zir.extra[extra_index];
extra_index += 1;
@ -3643,6 +3667,11 @@ fn walkInstruction(
break :blk fields_len;
} else 0;
if (small.has_decls_len) extra_index += 1;
scope.captures = @ptrCast(file.zir.extra[extra_index..][0..captures_len]);
extra_index += captures_len;
var decl_indexes: std.ArrayListUnmanaged(usize) = .{};
var priv_decl_indexes: std.ArrayListUnmanaged(usize) = .{};
@ -3759,6 +3788,12 @@ fn walkInstruction(
const src_info = try self.srcLocInfo(file, extra.data.src_node, parent_src);
const captures_len = if (small.has_captures_len) blk: {
const captures_len = file.zir.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
const fields_len = if (small.has_fields_len) blk: {
const fields_len = file.zir.extra[extra_index];
extra_index += 1;
@ -3768,6 +3803,9 @@ fn walkInstruction(
// We don't care about decls yet
if (small.has_decls_len) extra_index += 1;
scope.captures = @ptrCast(file.zir.extra[extra_index..][0..captures_len]);
extra_index += captures_len;
var backing_int: ?DocData.Expr = null;
if (small.has_backing_int) {
const backing_int_body_len = file.zir.extra[extra_index];
@ -4018,6 +4056,16 @@ fn walkInstruction(
.expr = .{ .cmpxchgIndex = cmpxchg_index },
};
},
.closure_get => {
const captured, const scope = parent_scope.getCapture(extended.small);
switch (captured) {
.inst => |cap_inst| return self.walkInstruction(file, scope, parent_src, cap_inst, need_type, call_ctx),
.decl => |str| {
const decl_status = parent_scope.resolveDeclName(str, file, inst.toOptional());
return .{ .expr = .{ .declRef = decl_status } };
},
}
},
}
},
}

View File

@ -264,6 +264,8 @@ pub fn populateFile(comp: *Compilation, mod: *Module, file: *File) !void {
assert(!file.zir.hasCompileErrors()); // builtin.zig must not have astgen errors
file.zir_loaded = true;
file.status = .success_zir;
// Note that whilst we set `zir_loaded` here, we populated `path_digest`
// all the way back in `Package.Module.create`.
}
fn writeFile(file: *File, mod: *Module) !void {

View File

@ -1326,6 +1326,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.global = options.config,
.parent = options.root_mod,
.builtin_mod = options.root_mod.getBuiltinDependency(),
.builtin_modules = null, // `builtin_mod` is set
});
try options.root_mod.deps.putNoClobber(arena, "compiler_rt", compiler_rt_mod);
}
@ -1430,6 +1431,7 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.global = options.config,
.parent = options.root_mod,
.builtin_mod = options.root_mod.getBuiltinDependency(),
.builtin_modules = null, // `builtin_mod` is set
});
const zcu = try arena.create(Module);
@ -6107,6 +6109,7 @@ fn buildOutputFromZig(
.cc_argv = &.{},
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // there is only one module in this compilation
});
const root_name = src_basename[0 .. src_basename.len - std.fs.path.extension(src_basename).len];
const target = comp.getTarget();
@ -6219,6 +6222,7 @@ pub fn build_crt_file(
.cc_argv = &.{},
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // there is only one module in this compilation
});
for (c_source_files) |*item| {

File diff suppressed because it is too large Load Diff

View File

@ -131,7 +131,7 @@ fn LivenessPassData(comptime pass: LivenessPass) type {
};
}
pub fn analyze(gpa: Allocator, air: Air, intern_pool: *const InternPool) Allocator.Error!Liveness {
pub fn analyze(gpa: Allocator, air: Air, intern_pool: *InternPool) Allocator.Error!Liveness {
const tracy = trace(@src());
defer tracy.end();
@ -836,7 +836,7 @@ pub const BigTomb = struct {
const Analysis = struct {
gpa: Allocator,
air: Air,
intern_pool: *const InternPool,
intern_pool: *InternPool,
tomb_bits: []usize,
special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32),
extra: std.ArrayListUnmanaged(u32),

View File

@ -101,17 +101,6 @@ embed_table: std.StringArrayHashMapUnmanaged(*EmbedFile) = .{},
/// is not yet implemented.
intern_pool: InternPool = .{},
/// The index type for this array is `CaptureScope.Index` and the elements here are
/// the indexes of the parent capture scopes.
/// Memory is owned by gpa; garbage collected.
capture_scope_parents: std.ArrayListUnmanaged(CaptureScope.Index) = .{},
/// Value is index of type
/// Memory is owned by gpa; garbage collected.
runtime_capture_scopes: std.AutoArrayHashMapUnmanaged(CaptureScope.Key, InternPool.Index) = .{},
/// Value is index of value
/// Memory is owned by gpa; garbage collected.
comptime_capture_scopes: std.AutoArrayHashMapUnmanaged(CaptureScope.Key, InternPool.Index) = .{},
/// To be eliminated in a future commit by moving more data into InternPool.
/// Current uses that must be eliminated:
/// * comptime pointer mutation
@ -305,28 +294,6 @@ pub const Export = struct {
}
};
pub const CaptureScope = struct {
pub const Key = extern struct {
zir_index: Zir.Inst.Index,
index: Index,
};
/// Index into `capture_scope_parents` which uniquely identifies a capture scope.
pub const Index = enum(u32) {
none = std.math.maxInt(u32),
_,
pub fn parent(i: Index, mod: *Module) Index {
return mod.capture_scope_parents.items[@intFromEnum(i)];
}
};
};
pub fn createCaptureScope(mod: *Module, parent: CaptureScope.Index) error{OutOfMemory}!CaptureScope.Index {
try mod.capture_scope_parents.append(mod.gpa, parent);
return @enumFromInt(mod.capture_scope_parents.items.len - 1);
}
const ValueArena = struct {
state: std.heap.ArenaAllocator.State,
state_acquired: ?*std.heap.ArenaAllocator.State = null,
@ -386,9 +353,6 @@ pub const Decl = struct {
/// there is no parent.
src_namespace: Namespace.Index,
/// The scope which lexically contains this decl.
src_scope: CaptureScope.Index,
/// The AST node index of this declaration.
/// Must be recomputed when the corresponding source file is modified.
src_node: Ast.Node.Index,
@ -563,7 +527,7 @@ pub const Decl = struct {
/// If the Decl owns its value and it is a union, return it,
/// otherwise null.
pub fn getOwnedUnion(decl: Decl, zcu: *Zcu) ?InternPool.UnionType {
pub fn getOwnedUnion(decl: Decl, zcu: *Zcu) ?InternPool.LoadedUnionType {
if (!decl.owns_tv) return null;
if (decl.val.ip_index == .none) return null;
return zcu.typeToUnion(decl.val.toType());
@ -599,14 +563,15 @@ pub const Decl = struct {
/// enum, or opaque.
pub fn getInnerNamespaceIndex(decl: Decl, zcu: *Zcu) Namespace.OptionalIndex {
if (!decl.has_tv) return .none;
const ip = &zcu.intern_pool;
return switch (decl.val.ip_index) {
.empty_struct_type => .none,
.none => .none,
else => switch (zcu.intern_pool.indexToKey(decl.val.toIntern())) {
.opaque_type => |opaque_type| opaque_type.namespace.toOptional(),
.struct_type => |struct_type| struct_type.namespace,
.union_type => |union_type| union_type.namespace.toOptional(),
.enum_type => |enum_type| enum_type.namespace,
else => switch (ip.indexToKey(decl.val.toIntern())) {
.opaque_type => ip.loadOpaqueType(decl.val.toIntern()).namespace,
.struct_type => ip.loadStructType(decl.val.toIntern()).namespace,
.union_type => ip.loadUnionType(decl.val.toIntern()).namespace,
.enum_type => ip.loadEnumType(decl.val.toIntern()).namespace,
else => .none,
},
};
@ -792,7 +757,6 @@ pub const Namespace = struct {
/// These are only declarations named directly by the AST; anonymous
/// declarations are not stored here.
decls: std.ArrayHashMapUnmanaged(Decl.Index, void, DeclContext, true) = .{},
/// Key is usingnamespace Decl itself. To find the namespace being included,
/// the Decl Value has to be resolved as a Type which has a Namespace.
/// Value is whether the usingnamespace decl is marked `pub`.
@ -2140,10 +2104,6 @@ pub fn deinit(zcu: *Zcu) void {
zcu.intern_pool.deinit(gpa);
zcu.tmp_hack_arena.deinit();
zcu.capture_scope_parents.deinit(gpa);
zcu.runtime_capture_scopes.deinit(gpa);
zcu.comptime_capture_scopes.deinit(gpa);
}
pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
@ -3342,6 +3302,70 @@ pub fn semaPkg(mod: *Module, pkg: *Package.Module) !void {
return mod.semaFile(file);
}
fn getFileRootStruct(zcu: *Zcu, decl_index: Decl.Index, namespace_index: Namespace.Index, file: *File) Allocator.Error!InternPool.Index {
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
const extended = file.zir.instructions.items(.data)[@intFromEnum(Zir.Inst.Index.main_struct_inst)].extended;
assert(extended.opcode == .struct_decl);
const small: Zir.Inst.StructDecl.Small = @bitCast(extended.small);
assert(!small.has_captures_len);
assert(!small.has_backing_int);
assert(small.layout == .Auto);
var extra_index: usize = extended.operand + @typeInfo(Zir.Inst.StructDecl).Struct.fields.len;
const fields_len = if (small.has_fields_len) blk: {
const fields_len = file.zir.extra[extra_index];
extra_index += 1;
break :blk fields_len;
} else 0;
const decls_len = if (small.has_decls_len) blk: {
const decls_len = file.zir.extra[extra_index];
extra_index += 1;
break :blk decls_len;
} else 0;
const decls = file.zir.bodySlice(extra_index, decls_len);
extra_index += decls_len;
const tracked_inst = try ip.trackZir(gpa, file, .main_struct_inst);
const wip_ty = switch (try ip.getStructType(gpa, .{
.layout = .Auto,
.fields_len = fields_len,
.known_non_opv = small.known_non_opv,
.requires_comptime = if (small.known_comptime_only) .yes else .unknown,
.is_tuple = small.is_tuple,
.any_comptime_fields = small.any_comptime_fields,
.any_default_inits = small.any_default_inits,
.inits_resolved = false,
.any_aligned_fields = small.any_aligned_fields,
.has_namespace = true,
.key = .{ .declared = .{
.zir_index = tracked_inst,
.captures = &.{},
} },
})) {
.existing => unreachable, // we wouldn't be analysing the file root if this type existed
.wip => |wip| wip,
};
errdefer wip_ty.cancel(ip);
if (zcu.comp.debug_incremental) {
try ip.addDependency(
gpa,
InternPool.Depender.wrap(.{ .decl = decl_index }),
.{ .src_hash = tracked_inst },
);
}
const decl = zcu.declPtr(decl_index);
decl.val = Value.fromInterned(wip_ty.index);
decl.has_tv = true;
decl.owns_tv = true;
decl.analysis = .complete;
try zcu.scanNamespace(namespace_index, decls, decl);
return wip_ty.finish(ip, decl_index, namespace_index.toOptional());
}
/// Regardless of the file status, will create a `Decl` so that we
/// can track dependencies and re-analyze when the file becomes outdated.
pub fn semaFile(mod: *Module, file: *File) SemaError!void {
@ -3363,15 +3387,14 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
.decl_index = undefined,
.file_scope = file,
});
const new_namespace = mod.namespacePtr(new_namespace_index);
errdefer mod.destroyNamespace(new_namespace_index);
const new_decl_index = try mod.allocateNewDecl(new_namespace_index, 0, .none);
const new_decl_index = try mod.allocateNewDecl(new_namespace_index, 0);
const new_decl = mod.declPtr(new_decl_index);
errdefer @panic("TODO error handling");
file.root_decl = new_decl_index.toOptional();
new_namespace.decl_index = new_decl_index;
mod.namespacePtr(new_namespace_index).decl_index = new_decl_index;
new_decl.name = try file.fullyQualifiedName(mod);
new_decl.name_fully_qualified = true;
@ -3390,54 +3413,10 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
}
assert(file.zir_loaded);
var sema_arena = std.heap.ArenaAllocator.init(gpa);
defer sema_arena.deinit();
const sema_arena_allocator = sema_arena.allocator();
const struct_ty = try mod.getFileRootStruct(new_decl_index, new_namespace_index, file);
errdefer mod.intern_pool.remove(struct_ty);
var comptime_mutable_decls = std.ArrayList(Decl.Index).init(gpa);
defer comptime_mutable_decls.deinit();
var comptime_err_ret_trace = std.ArrayList(SrcLoc).init(gpa);
defer comptime_err_ret_trace.deinit();
var sema: Sema = .{
.mod = mod,
.gpa = gpa,
.arena = sema_arena_allocator,
.code = file.zir,
.owner_decl = new_decl,
.owner_decl_index = new_decl_index,
.func_index = .none,
.func_is_naked = false,
.fn_ret_ty = Type.void,
.fn_ret_ty_ies = null,
.owner_func_index = .none,
.comptime_mutable_decls = &comptime_mutable_decls,
.comptime_err_ret_trace = &comptime_err_ret_trace,
};
defer sema.deinit();
const struct_ty = sema.getStructType(
new_decl_index,
new_namespace_index,
try mod.intern_pool.trackZir(gpa, file, .main_struct_inst),
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
};
// TODO: figure out InternPool removals for incremental compilation
//errdefer ip.remove(struct_ty);
for (comptime_mutable_decls.items) |decl_index| {
const decl = mod.declPtr(decl_index);
_ = try decl.internValue(mod);
}
new_decl.val = Value.fromInterned(struct_ty);
new_decl.has_tv = true;
new_decl.owns_tv = true;
new_decl.analysis = .complete;
const comp = mod.comp;
switch (comp.cache_use) {
switch (mod.comp.cache_use) {
.whole => |whole| if (whole.cache_manifest) |man| {
const source = file.getSource(gpa) catch |err| {
try reportRetryableFileError(mod, file, "unable to load source: {s}", .{@errorName(err)});
@ -3573,7 +3552,6 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
.sema = &sema,
.src_decl = decl_index,
.namespace = decl.src_namespace,
.wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
.instructions = .{},
.inlining = null,
.is_comptime = true,
@ -4205,7 +4183,7 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void
);
const comp = zcu.comp;
if (!gop.found_existing) {
const new_decl_index = try zcu.allocateNewDecl(namespace_index, decl_node, iter.parent_decl.src_scope);
const new_decl_index = try zcu.allocateNewDecl(namespace_index, decl_node);
const new_decl = zcu.declPtr(new_decl_index);
new_decl.kind = kind;
new_decl.name = decl_name;
@ -4438,7 +4416,6 @@ pub fn analyzeFnBody(mod: *Module, func_index: InternPool.Index, arena: Allocato
.sema = &sema,
.src_decl = decl_index,
.namespace = decl.src_namespace,
.wip_capture_scope = try mod.createCaptureScope(decl.src_scope),
.instructions = .{},
.inlining = null,
.is_comptime = false,
@ -4639,7 +4616,6 @@ pub fn allocateNewDecl(
mod: *Module,
namespace: Namespace.Index,
src_node: Ast.Node.Index,
src_scope: CaptureScope.Index,
) !Decl.Index {
const ip = &mod.intern_pool;
const gpa = mod.gpa;
@ -4657,7 +4633,6 @@ pub fn allocateNewDecl(
.@"addrspace" = .generic,
.analysis = .unreferenced,
.zir_decl_index = .none,
.src_scope = src_scope,
.is_pub = false,
.is_exported = false,
.alive = false,
@ -4697,17 +4672,16 @@ pub fn errorSetBits(mod: *Module) u16 {
pub fn createAnonymousDecl(mod: *Module, block: *Sema.Block, typed_value: TypedValue) !Decl.Index {
const src_decl = mod.declPtr(block.src_decl);
return mod.createAnonymousDeclFromDecl(src_decl, block.namespace, block.wip_capture_scope, typed_value);
return mod.createAnonymousDeclFromDecl(src_decl, block.namespace, typed_value);
}
pub fn createAnonymousDeclFromDecl(
mod: *Module,
src_decl: *Decl,
namespace: Namespace.Index,
src_scope: CaptureScope.Index,
tv: TypedValue,
) !Decl.Index {
const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node, src_scope);
const new_decl_index = try mod.allocateNewDecl(namespace, src_decl.src_node);
errdefer mod.destroyDecl(new_decl_index);
const name = try mod.intern_pool.getOrPutStringFmt(mod.gpa, "{}__anon_{d}", .{
src_decl.name.fmt(&mod.intern_pool), @intFromEnum(new_decl_index),
@ -5276,7 +5250,7 @@ pub fn populateTestFunctions(
.len = test_decl_name.len,
.child = .u8_type,
});
const test_name_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, .none, .{
const test_name_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, .{
.ty = test_name_decl_ty,
.val = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = test_name_decl_ty.toIntern(),
@ -5322,7 +5296,7 @@ pub fn populateTestFunctions(
.child = test_fn_ty.toIntern(),
.sentinel = .none,
});
const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, .none, .{
const array_decl_index = try mod.createAnonymousDeclFromDecl(decl, decl.src_namespace, .{
.ty = array_decl_ty,
.val = Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = array_decl_ty.toIntern(),
@ -5686,7 +5660,7 @@ pub fn enumValue(mod: *Module, ty: Type, tag_int: InternPool.Index) Allocator.Er
pub fn enumValueFieldIndex(mod: *Module, ty: Type, field_index: u32) Allocator.Error!Value {
const ip = &mod.intern_pool;
const gpa = mod.gpa;
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
const enum_type = ip.loadEnumType(ty.toIntern());
if (enum_type.values.len == 0) {
// Auto-numbered fields.
@ -5976,14 +5950,6 @@ pub fn atomicPtrAlignment(
return .none;
}
pub fn opaqueSrcLoc(mod: *Module, opaque_type: InternPool.Key.OpaqueType) SrcLoc {
return mod.declPtr(opaque_type.decl).srcLoc(mod);
}
pub fn opaqueFullyQualifiedName(mod: *Module, opaque_type: InternPool.Key.OpaqueType) !InternPool.NullTerminatedString {
return mod.declPtr(opaque_type.decl).fullyQualifiedName(mod);
}
pub fn declFileScope(mod: *Module, decl_index: Decl.Index) *File {
return mod.declPtr(decl_index).getFileScope(mod);
}
@ -5992,28 +5958,26 @@ pub fn declFileScope(mod: *Module, decl_index: Decl.Index) *File {
/// * `@TypeOf(.{})`
/// * A struct which has no fields (`struct {}`).
/// * Not a struct.
pub fn typeToStruct(mod: *Module, ty: Type) ?InternPool.Key.StructType {
if (ty.ip_index == .none) return null;
return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.struct_type => |t| t,
else => null,
};
}
pub fn typeToPackedStruct(mod: *Module, ty: Type) ?InternPool.Key.StructType {
if (ty.ip_index == .none) return null;
return switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.struct_type => |t| if (t.layout == .Packed) t else null,
else => null,
};
}
/// This asserts that the union's enum tag type has been resolved.
pub fn typeToUnion(mod: *Module, ty: Type) ?InternPool.UnionType {
pub fn typeToStruct(mod: *Module, ty: Type) ?InternPool.LoadedStructType {
if (ty.ip_index == .none) return null;
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.ip_index)) {
.union_type => |k| ip.loadUnionType(k),
.struct_type => ip.loadStructType(ty.ip_index),
else => null,
};
}
pub fn typeToPackedStruct(mod: *Module, ty: Type) ?InternPool.LoadedStructType {
const s = mod.typeToStruct(ty) orelse return null;
if (s.layout != .Packed) return null;
return s;
}
pub fn typeToUnion(mod: *Module, ty: Type) ?InternPool.LoadedUnionType {
if (ty.ip_index == .none) return null;
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.ip_index)) {
.union_type => ip.loadUnionType(ty.ip_index),
else => null,
};
}
@ -6115,7 +6079,7 @@ pub const UnionLayout = struct {
padding: u32,
};
pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
pub fn getUnionLayout(mod: *Module, u: InternPool.LoadedUnionType) UnionLayout {
const ip = &mod.intern_pool;
assert(u.haveLayout(ip));
var most_aligned_field: u32 = undefined;
@ -6161,7 +6125,7 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
const tag_size = Type.fromInterned(u.enum_tag_ty).abiSize(mod);
const tag_align = Type.fromInterned(u.enum_tag_ty).abiAlignment(mod).max(.@"1");
return .{
.abi_size = u.size,
.abi_size = u.size(ip).*,
.abi_align = tag_align.max(payload_align),
.most_aligned_field = most_aligned_field,
.most_aligned_field_size = most_aligned_field_size,
@ -6170,16 +6134,16 @@ pub fn getUnionLayout(mod: *Module, u: InternPool.UnionType) UnionLayout {
.payload_align = payload_align,
.tag_align = tag_align,
.tag_size = tag_size,
.padding = u.padding,
.padding = u.padding(ip).*,
};
}
pub fn unionAbiSize(mod: *Module, u: InternPool.UnionType) u64 {
pub fn unionAbiSize(mod: *Module, u: InternPool.LoadedUnionType) u64 {
return mod.getUnionLayout(u).abi_size;
}
/// Returns 0 if the union is represented with 0 bits at runtime.
pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) Alignment {
pub fn unionAbiAlignment(mod: *Module, u: InternPool.LoadedUnionType) Alignment {
const ip = &mod.intern_pool;
const have_tag = u.flagsPtr(ip).runtime_tag.hasTag();
var max_align: Alignment = .none;
@ -6196,7 +6160,7 @@ pub fn unionAbiAlignment(mod: *Module, u: InternPool.UnionType) Alignment {
/// Returns the field alignment, assuming the union is not packed.
/// Keep implementation in sync with `Sema.unionFieldAlignment`.
/// Prefer to call that function instead of this one during Sema.
pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.UnionType, field_index: u32) Alignment {
pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.LoadedUnionType, field_index: u32) Alignment {
const ip = &mod.intern_pool;
const field_align = u.fieldAlign(ip, field_index);
if (field_align != .none) return field_align;
@ -6205,12 +6169,11 @@ pub fn unionFieldNormalAlignment(mod: *Module, u: InternPool.UnionType, field_in
}
/// Returns the index of the active field, given the current tag value
pub fn unionTagFieldIndex(mod: *Module, u: InternPool.UnionType, enum_tag: Value) ?u32 {
pub fn unionTagFieldIndex(mod: *Module, u: InternPool.LoadedUnionType, enum_tag: Value) ?u32 {
const ip = &mod.intern_pool;
if (enum_tag.toIntern() == .none) return null;
assert(ip.typeOf(enum_tag.toIntern()) == u.enum_tag_ty);
const enum_type = ip.indexToKey(u.enum_tag_ty).enum_type;
return enum_type.tagValueIndex(ip, enum_tag.toIntern());
return u.loadTagType(ip).tagValueIndex(ip, enum_tag.toIntern());
}
/// Returns the field alignment of a non-packed struct in byte units.
@ -6257,7 +6220,7 @@ pub fn structFieldAlignmentExtern(mod: *Module, field_ty: Type) Alignment {
/// projects.
pub fn structPackedFieldBitOffset(
mod: *Module,
struct_type: InternPool.Key.StructType,
struct_type: InternPool.LoadedStructType,
field_index: u32,
) u16 {
const ip = &mod.intern_pool;

View File

@ -63,6 +63,11 @@ pub const CreateOptions = struct {
builtin_mod: ?*Package.Module,
/// Allocated into the given `arena`. Should be shared across all module creations in a Compilation.
/// Ignored if `builtin_mod` is passed or if `!have_zcu`.
/// Otherwise, may be `null` only if this Compilation consists of a single module.
builtin_modules: ?*std.StringHashMapUnmanaged(*Module),
pub const Paths = struct {
root: Package.Path,
/// Relative to `root`. May contain path separators.
@ -364,11 +369,37 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
.wasi_exec_model = options.global.wasi_exec_model,
}, arena);
const new = if (options.builtin_modules) |builtins| new: {
const gop = try builtins.getOrPut(arena, generated_builtin_source);
if (gop.found_existing) break :b gop.value_ptr.*;
errdefer builtins.removeByPtr(gop.key_ptr);
const new = try arena.create(Module);
gop.value_ptr.* = new;
break :new new;
} else try arena.create(Module);
errdefer if (options.builtin_modules) |builtins| assert(builtins.remove(generated_builtin_source));
const new_file = try arena.create(File);
const digest = Cache.HashHelper.oneShot(generated_builtin_source);
const builtin_sub_path = try arena.dupe(u8, "b" ++ std.fs.path.sep_str ++ digest);
const new = try arena.create(Module);
const bin_digest, const hex_digest = digest: {
var hasher: Cache.Hasher = Cache.hasher_init;
hasher.update(generated_builtin_source);
var bin_digest: Cache.BinDigest = undefined;
hasher.final(&bin_digest);
var hex_digest: Cache.HexDigest = undefined;
_ = std.fmt.bufPrint(
&hex_digest,
"{s}",
.{std.fmt.fmtSliceHexLower(&bin_digest)},
) catch unreachable;
break :digest .{ bin_digest, hex_digest };
};
const builtin_sub_path = try arena.dupe(u8, "b" ++ std.fs.path.sep_str ++ hex_digest);
new.* = .{
.root = .{
.root_dir = options.global_cache_directory,
@ -415,6 +446,9 @@ pub fn create(arena: Allocator, options: CreateOptions) !*Package.Module {
.status = .never_loaded,
.mod = new,
.root_decl = .none,
// We might as well use this digest for the File `path digest`, since there's a
// one-to-one correspondence here between distinct paths and distinct contents.
.path_digest = bin_digest,
};
break :b new;
};

File diff suppressed because it is too large Load Diff

View File

@ -89,7 +89,7 @@ pub fn print(
if (payload.tag) |tag| {
try print(.{
.ty = Type.fromInterned(ip.indexToKey(ty.toIntern()).union_type.enum_tag_ty),
.ty = Type.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_ty),
.val = tag,
}, writer, level - 1, mod);
try writer.writeAll(" = ");
@ -247,7 +247,7 @@ pub fn print(
if (level == 0) {
return writer.writeAll("(enum)");
}
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
const enum_type = ip.loadEnumType(ty.toIntern());
if (enum_type.tagValueIndex(ip, val.toIntern())) |tag_index| {
try writer.print(".{i}", .{enum_type.names.get(ip)[tag_index].fmt(ip)});
return;
@ -398,7 +398,7 @@ pub fn print(
}
},
.Union => {
const field_name = mod.typeToUnion(container_ty).?.field_names.get(ip)[@intCast(field.index)];
const field_name = mod.typeToUnion(container_ty).?.loadTagType(ip).names.get(ip)[@intCast(field.index)];
try writer.print(".{i}", .{field_name.fmt(ip)});
},
.Pointer => {
@ -482,11 +482,7 @@ fn printAggregate(
for (0..max_len) |i| {
if (i != 0) try writer.writeAll(", ");
const field_name = switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |x| x.fieldName(ip, i),
.anon_struct_type => |x| if (x.isTuple()) .none else x.names.get(ip)[i].toOptional(),
else => unreachable,
};
const field_name = ty.structFieldName(@intCast(i), mod);
if (field_name.unwrap()) |name| try writer.print(".{} = ", .{name.fmt(ip)});
try print(.{

View File

@ -424,22 +424,28 @@ pub fn toType(self: Value) Type {
pub fn intFromEnum(val: Value, ty: Type, mod: *Module) Allocator.Error!Value {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ip.typeOf(val.toIntern()))) {
const enum_ty = ip.typeOf(val.toIntern());
return switch (ip.indexToKey(enum_ty)) {
// Assume it is already an integer and return it directly.
.simple_type, .int_type => val,
.enum_literal => |enum_literal| {
const field_index = ty.enumFieldIndex(enum_literal, mod).?;
return switch (ip.indexToKey(ty.toIntern())) {
switch (ip.indexToKey(ty.toIntern())) {
// Assume it is already an integer and return it directly.
.simple_type, .int_type => val,
.enum_type => |enum_type| if (enum_type.values.len != 0)
Value.fromInterned(enum_type.values.get(ip)[field_index])
else // Field index and integer values are the same.
mod.intValue(Type.fromInterned(enum_type.tag_ty), field_index),
.simple_type, .int_type => return val,
.enum_type => {
const enum_type = ip.loadEnumType(ty.toIntern());
if (enum_type.values.len != 0) {
return Value.fromInterned(enum_type.values.get(ip)[field_index]);
} else {
// Field index and integer values are the same.
return mod.intValue(Type.fromInterned(enum_type.tag_ty), field_index);
}
},
else => unreachable,
};
}
},
.enum_type => |enum_type| try mod.getCoerced(val, Type.fromInterned(enum_type.tag_ty)),
.enum_type => try mod.getCoerced(val, Type.fromInterned(ip.loadEnumType(enum_ty).tag_ty)),
else => unreachable,
};
}
@ -832,7 +838,7 @@ pub fn writeToPackedMemory(
}
},
.Struct => {
const struct_type = ip.indexToKey(ty.toIntern()).struct_type;
const struct_type = ip.loadStructType(ty.toIntern());
// Sema is supposed to have emitted a compile error already in the case of Auto,
// and Extern is handled in non-packed writeToMemory.
assert(struct_type.layout == .Packed);

View File

@ -3354,7 +3354,8 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
val.writeToMemory(ty, mod, &buf) catch unreachable;
return func.storeSimdImmd(buf);
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
// non-packed structs are not handled in this function because they
// are by-ref types.
assert(struct_type.layout == .Packed);
@ -5411,7 +5412,7 @@ fn airUnionInit(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
const layout = union_ty.unionGetLayout(mod);
const union_obj = mod.typeToUnion(union_ty).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[extra.field_index]);
const field_name = union_obj.field_names.get(ip)[extra.field_index];
const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
const tag_int = blk: {
const tag_ty = union_ty.unionTagTypeHypothetical(mod);

View File

@ -76,7 +76,7 @@ pub fn classifyType(ty: Type, mod: *Module) [2]Class {
}
const layout = ty.unionGetLayout(mod);
assert(layout.tag_size == 0);
if (union_obj.field_names.len > 1) return memory;
if (union_obj.field_types.len > 1) return memory;
const first_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[0]);
return classifyType(first_field_ty, mod);
},

View File

@ -18183,7 +18183,7 @@ fn airUnionInit(self: *Self, inst: Air.Inst.Index) !void {
const dst_mcv = try self.allocRegOrMem(inst, false);
const union_obj = mod.typeToUnion(union_ty).?;
const field_name = union_obj.field_names.get(ip)[extra.field_index];
const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
const tag_ty = Type.fromInterned(union_obj.enum_tag_ty);
const field_index = tag_ty.enumFieldIndex(field_name, mod).?;
const tag_val = try mod.enumValueFieldIndex(tag_ty, field_index);

View File

@ -510,88 +510,91 @@ pub fn generateSymbol(
}
}
},
.struct_type => |struct_type| switch (struct_type.layout) {
.Packed => {
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse
return error.Overflow;
const current_pos = code.items.len;
try code.resize(current_pos + abi_size);
var bits: u16 = 0;
.struct_type => {
const struct_type = ip.loadStructType(typed_value.ty.toIntern());
switch (struct_type.layout) {
.Packed => {
const abi_size = math.cast(usize, typed_value.ty.abiSize(mod)) orelse
return error.Overflow;
const current_pos = code.items.len;
try code.resize(current_pos + abi_size);
var bits: u16 = 0;
for (struct_type.field_types.get(ip), 0..) |field_ty, index| {
const field_val = switch (aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes[index] },
} }),
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
};
for (struct_type.field_types.get(ip), 0..) |field_ty, index| {
const field_val = switch (aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes[index] },
} }),
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
};
// pointer may point to a decl which must be marked used
// but can also result in a relocation. Therefore we handle those separately.
if (Type.fromInterned(field_ty).zigTypeTag(mod) == .Pointer) {
const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(mod)) orelse
return error.Overflow;
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
defer tmp_list.deinit();
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.fromInterned(field_ty),
.val = Value.fromInterned(field_val),
}, &tmp_list, debug_output, reloc_info)) {
.ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
.fail => |em| return Result{ .fail = em },
}
} else {
Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), mod, code.items[current_pos..], bits) catch unreachable;
}
bits += @as(u16, @intCast(Type.fromInterned(field_ty).bitSize(mod)));
}
},
.Auto, .Extern => {
const struct_begin = code.items.len;
const field_types = struct_type.field_types.get(ip);
const offsets = struct_type.offsets.get(ip);
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = field_types[field_index];
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
const field_val = switch (ip.indexToKey(typed_value.val.toIntern()).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes[field_index] },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
const padding = math.cast(
usize,
offsets[field_index] - (code.items.len - struct_begin),
) orelse return error.Overflow;
if (padding > 0) try code.appendNTimes(0, padding);
// pointer may point to a decl which must be marked used
// but can also result in a relocation. Therefore we handle those separately.
if (Type.fromInterned(field_ty).zigTypeTag(mod) == .Pointer) {
const field_size = math.cast(usize, Type.fromInterned(field_ty).abiSize(mod)) orelse
return error.Overflow;
var tmp_list = try std.ArrayList(u8).initCapacity(code.allocator, field_size);
defer tmp_list.deinit();
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.fromInterned(field_ty),
.val = Value.fromInterned(field_val),
}, &tmp_list, debug_output, reloc_info)) {
.ok => @memcpy(code.items[current_pos..][0..tmp_list.items.len], tmp_list.items),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
} else {
Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), mod, code.items[current_pos..], bits) catch unreachable;
}
bits += @as(u16, @intCast(Type.fromInterned(field_ty).bitSize(mod)));
}
},
.Auto, .Extern => {
const struct_begin = code.items.len;
const field_types = struct_type.field_types.get(ip);
const offsets = struct_type.offsets.get(ip);
var it = struct_type.iterateRuntimeOrder(ip);
while (it.next()) |field_index| {
const field_ty = field_types[field_index];
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
const field_val = switch (ip.indexToKey(typed_value.val.toIntern()).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes[field_index] },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
const size = struct_type.size(ip).*;
const alignment = struct_type.flagsPtr(ip).alignment.toByteUnitsOptional().?;
const padding = math.cast(
usize,
offsets[field_index] - (code.items.len - struct_begin),
std.mem.alignForward(u64, size, @max(alignment, 1)) -
(code.items.len - struct_begin),
) orelse return error.Overflow;
if (padding > 0) try code.appendNTimes(0, padding);
switch (try generateSymbol(bin_file, src_loc, .{
.ty = Type.fromInterned(field_ty),
.val = Value.fromInterned(field_val),
}, code, debug_output, reloc_info)) {
.ok => {},
.fail => |em| return Result{ .fail = em },
}
}
const size = struct_type.size(ip).*;
const alignment = struct_type.flagsPtr(ip).alignment.toByteUnitsOptional().?;
const padding = math.cast(
usize,
std.mem.alignForward(u64, size, @max(alignment, 1)) -
(code.items.len - struct_begin),
) orelse return error.Overflow;
if (padding > 0) try code.appendNTimes(0, padding);
},
},
}
},
else => unreachable,
},

View File

@ -1376,112 +1376,24 @@ pub const DeclGen = struct {
}
try writer.writeByte('}');
},
.struct_type => |struct_type| switch (struct_type.layout) {
.Auto, .Extern => {
if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
}
try writer.writeByte('{');
var empty = true;
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (struct_type.fieldIsComptime(ip, field_index)) continue;
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeByte(',');
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_index] },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), initializer_type);
empty = false;
}
try writer.writeByte('}');
},
.Packed => {
const int_info = ty.intInfo(mod);
const bits = Type.smallestUnsignedBits(int_info.bits - 1);
const bit_offset_ty = try mod.intType(.unsigned, bits);
var bit_offset: u64 = 0;
var eff_num_fields: usize = 0;
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
eff_num_fields += 1;
}
if (eff_num_fields == 0) {
try writer.writeByte('(');
try dg.renderValue(writer, ty, Value.undef, initializer_type);
try writer.writeByte(')');
} else if (ty.bitSize(mod) > 64) {
// zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
var num_or = eff_num_fields - 1;
while (num_or > 0) : (num_or -= 1) {
try writer.writeAll("zig_or_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
}
var eff_index: usize = 0;
var needs_closing_paren = false;
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_index] },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
const cast_context = IntCastContext{ .value = .{ .value = Value.fromInterned(field_val) } };
if (bit_offset != 0) {
try writer.writeAll("zig_shl_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
try writer.writeAll(", ");
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try writer.writeByte(')');
} else {
try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
}
if (needs_closing_paren) try writer.writeByte(')');
if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
bit_offset += field_ty.bitSize(mod);
needs_closing_paren = true;
eff_index += 1;
}
} else {
try writer.writeByte('(');
// a << a_off | b << b_off | c << c_off
var empty = true;
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeAll(" | ");
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
switch (struct_type.layout) {
.Auto, .Extern => {
if (!location.isInitializer()) {
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
}
try writer.writeByte('{');
var empty = true;
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (struct_type.fieldIsComptime(ip, field_index)) continue;
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeByte(',');
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
@ -1490,22 +1402,113 @@ pub const DeclGen = struct {
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), initializer_type);
if (bit_offset != 0) {
try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), .Other);
try writer.writeAll(" << ");
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
} else {
try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), .Other);
}
bit_offset += field_ty.bitSize(mod);
empty = false;
}
try writer.writeByte(')');
}
},
try writer.writeByte('}');
},
.Packed => {
const int_info = ty.intInfo(mod);
const bits = Type.smallestUnsignedBits(int_info.bits - 1);
const bit_offset_ty = try mod.intType(.unsigned, bits);
var bit_offset: u64 = 0;
var eff_num_fields: usize = 0;
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
eff_num_fields += 1;
}
if (eff_num_fields == 0) {
try writer.writeByte('(');
try dg.renderValue(writer, ty, Value.undef, initializer_type);
try writer.writeByte(')');
} else if (ty.bitSize(mod) > 64) {
// zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
var num_or = eff_num_fields - 1;
while (num_or > 0) : (num_or -= 1) {
try writer.writeAll("zig_or_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
}
var eff_index: usize = 0;
var needs_closing_paren = false;
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_index] },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
const cast_context = IntCastContext{ .value = .{ .value = Value.fromInterned(field_val) } };
if (bit_offset != 0) {
try writer.writeAll("zig_shl_");
try dg.renderTypeForBuiltinFnName(writer, ty);
try writer.writeByte('(');
try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
try writer.writeAll(", ");
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
try writer.writeByte(')');
} else {
try dg.renderIntCast(writer, ty, cast_context, field_ty, .FunctionArgument);
}
if (needs_closing_paren) try writer.writeByte(')');
if (eff_index != eff_num_fields - 1) try writer.writeAll(", ");
bit_offset += field_ty.bitSize(mod);
needs_closing_paren = true;
eff_index += 1;
}
} else {
try writer.writeByte('(');
// a << a_off | b << b_off | c << c_off
var empty = true;
for (0..struct_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
if (!empty) try writer.writeAll(" | ");
try writer.writeByte('(');
try dg.renderType(writer, ty);
try writer.writeByte(')');
const field_val = switch (ip.indexToKey(val.ip_index).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_index] },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
if (bit_offset != 0) {
try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), .Other);
try writer.writeAll(" << ");
const bit_offset_val = try mod.intValue(bit_offset_ty, bit_offset);
try dg.renderValue(writer, bit_offset_ty, bit_offset_val, .FunctionArgument);
} else {
try dg.renderValue(writer, field_ty, Value.fromInterned(field_val), .Other);
}
bit_offset += field_ty.bitSize(mod);
empty = false;
}
try writer.writeByte(')');
}
},
}
},
else => unreachable,
},
@ -1547,7 +1550,7 @@ pub const DeclGen = struct {
const field_index = mod.unionTagFieldIndex(union_obj, Value.fromInterned(un.tag)).?;
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
const field_name = union_obj.field_names.get(ip)[field_index];
const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
if (union_obj.getLayout(ip) == .Packed) {
if (field_ty.hasRuntimeBits(mod)) {
if (field_ty.isPtrAtRuntime(mod)) {
@ -5502,7 +5505,7 @@ fn fieldLocation(
.{ .field = .{ .identifier = "payload" } }
else
.begin;
const field_name = union_obj.field_names.get(ip)[field_index];
const field_name = union_obj.loadTagType(ip).names.get(ip)[field_index];
return .{ .field = if (container_ty.unionTagTypeSafety(mod)) |_|
.{ .payload_identifier = ip.stringToSlice(field_name) }
else
@ -5735,8 +5738,8 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
else
.{ .identifier = ip.stringToSlice(struct_ty.legacyStructFieldName(extra.field_index, mod)) },
.union_type => |union_type| field_name: {
const union_obj = ip.loadUnionType(union_type);
.union_type => field_name: {
const union_obj = ip.loadUnionType(struct_ty.toIntern());
if (union_obj.flagsPtr(ip).layout == .Packed) {
const operand_lval = if (struct_byval == .constant) blk: {
const operand_local = try f.allocLocal(inst, struct_ty);
@ -5762,8 +5765,8 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
return local;
} else {
const name = union_obj.field_names.get(ip)[extra.field_index];
break :field_name if (union_type.hasTag(ip)) .{
const name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
break :field_name if (union_obj.hasTag(ip)) .{
.payload_identifier = ip.stringToSlice(name),
} else .{
.identifier = ip.stringToSlice(name),
@ -7171,7 +7174,7 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
const union_ty = f.typeOfIndex(inst);
const union_obj = mod.typeToUnion(union_ty).?;
const field_name = union_obj.field_names.get(ip)[extra.field_index];
const field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
const payload_ty = f.typeOf(extra.init);
const payload = try f.resolveInst(extra.init);
try reap(f, inst, &.{extra.init});

View File

@ -1507,7 +1507,7 @@ pub const CType = extern union {
if (lookup.isMutable()) {
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(mod),
.Union => mod.typeToUnion(ty).?.field_names.len,
.Union => mod.typeToUnion(ty).?.field_types.len,
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i, mod);
@ -1589,7 +1589,7 @@ pub const CType = extern union {
var is_packed = false;
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(mod),
.Union => mod.typeToUnion(ty).?.field_names.len,
.Union => mod.typeToUnion(ty).?.field_types.len,
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i, mod);
@ -1940,7 +1940,7 @@ pub const CType = extern union {
const zig_ty_tag = ty.zigTypeTag(mod);
const fields_len = switch (zig_ty_tag) {
.Struct => ty.structFieldCount(mod),
.Union => mod.typeToUnion(ty).?.field_names.len,
.Union => mod.typeToUnion(ty).?.field_types.len,
else => unreachable,
};
@ -1967,7 +1967,7 @@ pub const CType = extern union {
else
arena.dupeZ(u8, ip.stringToSlice(switch (zig_ty_tag) {
.Struct => ty.legacyStructFieldName(field_i, mod),
.Union => mod.typeToUnion(ty).?.field_names.get(ip)[field_i],
.Union => ip.loadUnionType(ty.toIntern()).loadTagType(ip).names.get(ip)[field_i],
else => unreachable,
})),
.type = store.set.typeToIndex(field_ty, mod, switch (kind) {
@ -2097,7 +2097,7 @@ pub const CType = extern union {
var c_field_i: usize = 0;
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(mod),
.Union => mod.typeToUnion(ty).?.field_names.len,
.Union => mod.typeToUnion(ty).?.field_types.len,
else => unreachable,
}) |field_i_usize| {
const field_i: u32 = @intCast(field_i_usize);
@ -2120,7 +2120,7 @@ pub const CType = extern union {
else
ip.stringToSlice(switch (zig_ty_tag) {
.Struct => ty.legacyStructFieldName(field_i, mod),
.Union => mod.typeToUnion(ty).?.field_names.get(ip)[field_i],
.Union => ip.loadUnionType(ty.toIntern()).loadTagType(ip).names.get(ip)[field_i],
else => unreachable,
}),
mem.span(c_field.name),
@ -2226,7 +2226,7 @@ pub const CType = extern union {
const zig_ty_tag = ty.zigTypeTag(mod);
for (0..switch (ty.zigTypeTag(mod)) {
.Struct => ty.structFieldCount(mod),
.Union => mod.typeToUnion(ty).?.field_names.len,
.Union => mod.typeToUnion(ty).?.field_types.len,
else => unreachable,
}) |field_i_usize| {
const field_i: u32 = @intCast(field_i_usize);
@ -2245,7 +2245,7 @@ pub const CType = extern union {
else
mod.intern_pool.stringToSlice(switch (zig_ty_tag) {
.Struct => ty.legacyStructFieldName(field_i, mod),
.Union => mod.typeToUnion(ty).?.field_names.get(ip)[field_i],
.Union => ip.loadUnionType(ty.toIntern()).loadTagType(ip).names.get(ip)[field_i],
else => unreachable,
}));
autoHash(hasher, AlignAs.fieldAlign(ty, field_i, mod).@"align");

View File

@ -1997,7 +1997,7 @@ pub const Object = struct {
return debug_enum_type;
}
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
const enum_type = ip.loadEnumType(ty.toIntern());
const enumerators = try gpa.alloc(Builder.Metadata, enum_type.names.len);
defer gpa.free(enumerators);
@ -2507,8 +2507,8 @@ pub const Object = struct {
try o.debug_type_map.put(gpa, ty, debug_struct_type);
return debug_struct_type;
},
.struct_type => |struct_type| {
if (!struct_type.haveFieldTypes(ip)) {
.struct_type => {
if (!ip.loadStructType(ty.toIntern()).haveFieldTypes(ip)) {
// This can happen if a struct type makes it all the way to
// flush() without ever being instantiated or referenced (even
// via pointer). The only reason we are hearing about it now is
@ -2597,15 +2597,14 @@ pub const Object = struct {
const name = try o.allocTypeName(ty);
defer gpa.free(name);
const union_type = ip.indexToKey(ty.toIntern()).union_type;
const union_type = ip.loadUnionType(ty.toIntern());
if (!union_type.haveFieldTypes(ip) or !ty.hasRuntimeBitsIgnoreComptime(mod)) {
const debug_union_type = try o.makeEmptyNamespaceDebugType(owner_decl_index);
try o.debug_type_map.put(gpa, ty, debug_union_type);
return debug_union_type;
}
const union_obj = ip.loadUnionType(union_type);
const layout = mod.getUnionLayout(union_obj);
const layout = mod.getUnionLayout(union_type);
const debug_fwd_ref = try o.builder.debugForwardReference();
@ -2622,7 +2621,7 @@ pub const Object = struct {
ty.abiSize(mod) * 8,
ty.abiAlignment(mod).toByteUnits(0) * 8,
try o.builder.debugTuple(
&.{try o.lowerDebugType(Type.fromInterned(union_obj.enum_tag_ty))},
&.{try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty))},
),
);
@ -2636,21 +2635,23 @@ pub const Object = struct {
var fields: std.ArrayListUnmanaged(Builder.Metadata) = .{};
defer fields.deinit(gpa);
try fields.ensureUnusedCapacity(gpa, union_obj.field_names.len);
try fields.ensureUnusedCapacity(gpa, union_type.loadTagType(ip).names.len);
const debug_union_fwd_ref = if (layout.tag_size == 0)
debug_fwd_ref
else
try o.builder.debugForwardReference();
for (0..union_obj.field_names.len) |field_index| {
const field_ty = union_obj.field_types.get(ip)[field_index];
const tag_type = union_type.loadTagType(ip);
for (0..tag_type.names.len) |field_index| {
const field_ty = union_type.field_types.get(ip)[field_index];
if (!Type.fromInterned(field_ty).hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_size = Type.fromInterned(field_ty).abiSize(mod);
const field_align = mod.unionFieldNormalAlignment(union_obj, @intCast(field_index));
const field_align = mod.unionFieldNormalAlignment(union_type, @intCast(field_index));
const field_name = union_obj.field_names.get(ip)[field_index];
const field_name = tag_type.names.get(ip)[field_index];
fields.appendAssumeCapacity(try o.builder.debugMemberType(
try o.builder.metadataString(ip.stringToSlice(field_name)),
.none, // File
@ -2706,7 +2707,7 @@ pub const Object = struct {
.none, // File
debug_fwd_ref,
0, // Line
try o.lowerDebugType(Type.fromInterned(union_obj.enum_tag_ty)),
try o.lowerDebugType(Type.fromInterned(union_type.enum_tag_ty)),
layout.tag_size * 8,
layout.tag_align.toByteUnits(0) * 8,
tag_offset * 8,
@ -3321,9 +3322,11 @@ pub const Object = struct {
return o.builder.structType(.normal, fields[0..fields_len]);
},
.simple_type => unreachable,
.struct_type => |struct_type| {
.struct_type => {
if (o.type_map.get(t.toIntern())) |value| return value;
const struct_type = ip.loadStructType(t.toIntern());
if (struct_type.layout == .Packed) {
const int_ty = try o.lowerType(Type.fromInterned(struct_type.backingIntType(ip).*));
try o.type_map.put(o.gpa, t.toIntern(), int_ty);
@ -3468,10 +3471,10 @@ pub const Object = struct {
}
return o.builder.structType(.normal, llvm_field_types.items);
},
.union_type => |union_type| {
.union_type => {
if (o.type_map.get(t.toIntern())) |value| return value;
const union_obj = ip.loadUnionType(union_type);
const union_obj = ip.loadUnionType(t.toIntern());
const layout = mod.getUnionLayout(union_obj);
if (union_obj.flagsPtr(ip).layout == .Packed) {
@ -3545,17 +3548,16 @@ pub const Object = struct {
);
return ty;
},
.opaque_type => |opaque_type| {
.opaque_type => {
const gop = try o.type_map.getOrPut(o.gpa, t.toIntern());
if (!gop.found_existing) {
const name = try o.builder.string(ip.stringToSlice(
try mod.opaqueFullyQualifiedName(opaque_type),
));
const decl = mod.declPtr(ip.loadOpaqueType(t.toIntern()).decl);
const name = try o.builder.string(ip.stringToSlice(try decl.fullyQualifiedName(mod)));
gop.value_ptr.* = try o.builder.opaqueType(name);
}
return gop.value_ptr.*;
},
.enum_type => |enum_type| try o.lowerType(Type.fromInterned(enum_type.tag_ty)),
.enum_type => try o.lowerType(Type.fromInterned(ip.loadEnumType(t.toIntern()).tag_ty)),
.func_type => |func_type| try o.lowerTypeFn(func_type),
.error_set_type, .inferred_error_set_type => try o.errorIntType(),
// values, not types
@ -4032,7 +4034,8 @@ pub const Object = struct {
else
struct_ty, vals);
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.haveLayout(ip));
const struct_ty = try o.lowerType(ty);
if (struct_type.layout == .Packed) {
@ -4596,7 +4599,7 @@ pub const Object = struct {
fn getEnumTagNameFunction(o: *Object, enum_ty: Type) !Builder.Function.Index {
const zcu = o.module;
const ip = &zcu.intern_pool;
const enum_type = ip.indexToKey(enum_ty.toIntern()).enum_type;
const enum_type = ip.loadEnumType(enum_ty.toIntern());
// TODO: detect when the type changes and re-emit this function.
const gop = try o.decl_map.getOrPut(o.gpa, enum_type.decl);
@ -9620,7 +9623,7 @@ pub const FuncGen = struct {
fn getIsNamedEnumValueFunction(self: *FuncGen, enum_ty: Type) !Builder.Function.Index {
const o = self.dg.object;
const zcu = o.module;
const enum_type = zcu.intern_pool.indexToKey(enum_ty.toIntern()).enum_type;
const enum_type = zcu.intern_pool.loadEnumType(enum_ty.toIntern());
// TODO: detect when the type changes and re-emit this function.
const gop = try o.named_enum_map.getOrPut(o.gpa, enum_type.decl);
@ -10092,7 +10095,7 @@ pub const FuncGen = struct {
const tag_int = blk: {
const tag_ty = union_ty.unionTagTypeHypothetical(mod);
const union_field_name = union_obj.field_names.get(ip)[extra.field_index];
const union_field_name = union_obj.loadTagType(ip).names.get(ip)[extra.field_index];
const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?;
const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
const tag_int_val = try tag_val.intFromEnum(tag_ty, mod);
@ -11154,7 +11157,8 @@ fn lowerSystemVFnRetTy(o: *Object, fn_info: InternPool.Key.FuncType) Allocator.E
if (first_non_integer == null or classes[first_non_integer.?] == .none) {
assert(first_non_integer orelse classes.len == types_index);
switch (ip.indexToKey(return_type.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(return_type.toIntern());
assert(struct_type.haveLayout(ip));
const size: u64 = struct_type.size(ip).*;
assert((std.math.divCeil(u64, size, 8) catch unreachable) == types_index);
@ -11446,7 +11450,8 @@ const ParamTypeIterator = struct {
return .byref;
}
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.haveLayout(ip));
const size: u64 = struct_type.size(ip).*;
assert((std.math.divCeil(u64, size, 8) catch unreachable) == types_index);
@ -11562,7 +11567,7 @@ fn isByRef(ty: Type, mod: *Module) bool {
}
return false;
},
.struct_type => |s| s,
.struct_type => ip.loadStructType(ty.toIntern()),
else => unreachable,
};

View File

@ -1528,7 +1528,7 @@ const DeclGen = struct {
try self.type_map.put(self.gpa, ty.toIntern(), .{ .ty_ref = ty_ref });
return ty_ref;
},
.struct_type => |struct_type| struct_type,
.struct_type => ip.loadStructType(ty.toIntern()),
else => unreachable,
};
@ -3633,7 +3633,8 @@ const DeclGen = struct {
index += 1;
}
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(result_ty.toIntern());
var it = struct_type.iterateRuntimeOrder(ip);
for (elements, 0..) |element, i| {
const field_index = it.next().?;
@ -3901,36 +3902,33 @@ const DeclGen = struct {
const mod = self.module;
const ip = &mod.intern_pool;
const union_ty = mod.typeToUnion(ty).?;
const tag_ty = Type.fromInterned(union_ty.enum_tag_ty);
if (union_ty.getLayout(ip) == .Packed) {
unreachable; // TODO
}
const maybe_tag_ty = ty.unionTagTypeSafety(mod);
const layout = self.unionLayout(ty);
const tag_int = if (layout.tag_size != 0) blk: {
const tag_ty = maybe_tag_ty.?;
const union_field_name = union_ty.field_names.get(ip)[active_field];
const enum_field_index = tag_ty.enumFieldIndex(union_field_name, mod).?;
const tag_val = try mod.enumValueFieldIndex(tag_ty, enum_field_index);
const tag_val = try mod.enumValueFieldIndex(tag_ty, active_field);
const tag_int_val = try tag_val.intFromEnum(tag_ty, mod);
break :blk tag_int_val.toUnsignedInt(mod);
} else 0;
if (!layout.has_payload) {
const tag_ty_ref = try self.resolveType(maybe_tag_ty.?, .direct);
const tag_ty_ref = try self.resolveType(tag_ty, .direct);
return try self.constInt(tag_ty_ref, tag_int);
}
const tmp_id = try self.alloc(ty, .{ .storage_class = .Function });
if (layout.tag_size != 0) {
const tag_ty_ref = try self.resolveType(maybe_tag_ty.?, .direct);
const tag_ptr_ty_ref = try self.ptrType(maybe_tag_ty.?, .Function);
const tag_ty_ref = try self.resolveType(tag_ty, .direct);
const tag_ptr_ty_ref = try self.ptrType(tag_ty, .Function);
const ptr_id = try self.accessChain(tag_ptr_ty_ref, tmp_id, &.{@as(u32, @intCast(layout.tag_index))});
const tag_id = try self.constInt(tag_ty_ref, tag_int);
try self.store(maybe_tag_ty.?, ptr_id, tag_id, .{});
try self.store(tag_ty, ptr_id, tag_id, .{});
}
const payload_ty = Type.fromInterned(union_ty.field_types.get(ip)[active_field]);

View File

@ -1118,6 +1118,7 @@ fn buildSharedLib(
.cc_argv = &.{},
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // there is only one module in this compilation
});
const c_source_files = [1]Compilation.CSourceFile{

View File

@ -181,6 +181,7 @@ pub fn buildLibCXX(comp: *Compilation, prog_node: *std.Progress.Node) !void {
.cc_argv = &.{},
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // there is only one module in this compilation
});
var c_source_files = try std.ArrayList(Compilation.CSourceFile).initCapacity(arena, libcxx_files.len);
@ -395,6 +396,7 @@ pub fn buildLibCXXABI(comp: *Compilation, prog_node: *std.Progress.Node) !void {
.cc_argv = &.{},
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // there is only one module in this compilation
});
var c_source_files = try std.ArrayList(Compilation.CSourceFile).initCapacity(arena, libcxxabi_files.len);

View File

@ -92,6 +92,7 @@ pub fn buildTsan(comp: *Compilation, prog_node: *std.Progress.Node) BuildError!v
.cc_argv = &common_flags,
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // there is only one module in this compilation
}) catch |err| {
comp.setMiscFailure(
.libtsan,

View File

@ -58,6 +58,7 @@ pub fn buildStaticLib(comp: *Compilation, prog_node: *std.Progress.Node) !void {
.cc_argv = &.{},
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // there is only one module in this compilation
});
const root_name = "unwind";

View File

@ -311,7 +311,8 @@ pub const DeclState = struct {
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
}
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
// DW.AT.name, DW.FORM.string
try ty.print(dbg_info_buffer.writer(), mod);
try dbg_info_buffer.append(0);
@ -374,7 +375,7 @@ pub const DeclState = struct {
try ty.print(dbg_info_buffer.writer(), mod);
try dbg_info_buffer.append(0);
const enum_type = ip.indexToKey(ty.ip_index).enum_type;
const enum_type = ip.loadEnumType(ty.ip_index);
for (enum_type.names.get(ip), 0..) |field_name_index, field_i| {
const field_name = ip.stringToSlice(field_name_index);
// DW.AT.enumerator
@ -442,7 +443,7 @@ pub const DeclState = struct {
try dbg_info_buffer.append(0);
}
for (union_obj.field_types.get(ip), union_obj.field_names.get(ip)) |field_ty, field_name| {
for (union_obj.field_types.get(ip), union_obj.loadTagType(ip).names.get(ip)) |field_ty, field_name| {
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
// DW.AT.member
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_member));

View File

@ -2708,7 +2708,9 @@ fn buildOutputType(
create_module.opts.emit_bin = emit_bin != .no;
create_module.opts.any_c_source_files = create_module.c_source_files.items.len != 0;
const main_mod = try createModule(gpa, arena, &create_module, 0, null, zig_lib_directory);
var builtin_modules: std.StringHashMapUnmanaged(*Package.Module) = .{};
// `builtin_modules` allocated into `arena`, so no deinit
const main_mod = try createModule(gpa, arena, &create_module, 0, null, zig_lib_directory, &builtin_modules);
for (create_module.modules.keys(), create_module.modules.values()) |key, cli_mod| {
if (cli_mod.resolved == null)
fatal("module '{s}' declared but not used", .{key});
@ -2753,6 +2755,7 @@ fn buildOutputType(
.global = create_module.resolved_options,
.parent = main_mod,
.builtin_mod = main_mod.getBuiltinDependency(),
.builtin_modules = null, // `builtin_mod` is specified
});
test_mod.deps = try main_mod.deps.clone(arena);
break :test_mod test_mod;
@ -2771,6 +2774,7 @@ fn buildOutputType(
.global = create_module.resolved_options,
.parent = main_mod,
.builtin_mod = main_mod.getBuiltinDependency(),
.builtin_modules = null, // `builtin_mod` is specified
});
break :root_mod test_mod;
@ -3479,6 +3483,7 @@ fn createModule(
index: usize,
parent: ?*Package.Module,
zig_lib_directory: Cache.Directory,
builtin_modules: *std.StringHashMapUnmanaged(*Package.Module),
) Allocator.Error!*Package.Module {
const cli_mod = &create_module.modules.values()[index];
if (cli_mod.resolved) |m| return m;
@ -3931,6 +3936,7 @@ fn createModule(
.global = create_module.resolved_options,
.parent = parent,
.builtin_mod = null,
.builtin_modules = builtin_modules,
}) catch |err| switch (err) {
error.ValgrindUnsupportedOnTarget => fatal("unable to create module '{s}': valgrind does not support the selected target CPU architecture", .{name}),
error.TargetRequiresSingleThreaded => fatal("unable to create module '{s}': the selected target does not support multithreading", .{name}),
@ -3953,7 +3959,7 @@ fn createModule(
for (cli_mod.deps) |dep| {
const dep_index = create_module.modules.getIndex(dep.value) orelse
fatal("module '{s}' depends on non-existent module '{s}'", .{ name, dep.key });
const dep_mod = try createModule(gpa, arena, create_module, dep_index, mod, zig_lib_directory);
const dep_mod = try createModule(gpa, arena, create_module, dep_index, mod, zig_lib_directory, builtin_modules);
try mod.deps.put(arena, dep.key, dep_mod);
}
@ -5249,6 +5255,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
.global = config,
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // all modules will inherit this one's builtin
});
const builtin_mod = root_mod.getBuiltinDependency();
@ -5265,6 +5272,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
.global = config,
.parent = root_mod,
.builtin_mod = builtin_mod,
.builtin_modules = null, // `builtin_mod` is specified
});
var cleanup_build_dir: ?fs.Dir = null;
@ -5399,6 +5407,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
.global = config,
.parent = root_mod,
.builtin_mod = builtin_mod,
.builtin_modules = null, // `builtin_mod` is specified
});
const hash_cloned = try arena.dupe(u8, &hash);
deps_mod.deps.putAssumeCapacityNoClobber(hash_cloned, m);
@ -5648,6 +5657,7 @@ fn jitCmd(
.global = config,
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // all modules will inherit this one's builtin
});
if (options.depend_on_aro) {
@ -5670,6 +5680,7 @@ fn jitCmd(
.global = config,
.parent = null,
.builtin_mod = root_mod.getBuiltinDependency(),
.builtin_modules = null, // `builtin_mod` is specified
});
try root_mod.deps.put(arena, "aro", aro_mod);
}
@ -7216,10 +7227,11 @@ fn createDependenciesModule(
},
.fully_qualified_name = "root.@dependencies",
.parent = main_mod,
.builtin_mod = builtin_mod,
.cc_argv = &.{},
.inherited = .{},
.global = global_options,
.builtin_mod = builtin_mod,
.builtin_modules = null, // `builtin_mod` is specified
});
try main_mod.deps.put(arena, "@dependencies", deps_mod);
return deps_mod;

View File

@ -250,6 +250,7 @@ pub fn buildCRTFile(comp: *Compilation, crt_file: CRTFile, prog_node: *std.Progr
.cc_argv = cc_argv,
.parent = null,
.builtin_mod = null,
.builtin_modules = null, // there is only one module in this compilation
});
const sub_compilation = try Compilation.create(comp.gpa, arena, .{

View File

@ -282,7 +282,6 @@ const Writer = struct {
.ref,
.ret_implicit,
.closure_capture,
.validate_ref_ty,
=> try self.writeUnTok(stream, inst),
@ -510,8 +509,6 @@ const Writer = struct {
.dbg_stmt => try self.writeDbgStmt(stream, inst),
.closure_get => try self.writeInstNode(stream, inst),
.@"defer" => try self.writeDefer(stream, inst),
.defer_err_code => try self.writeDeferErrCode(stream, inst),
@ -611,6 +608,7 @@ const Writer = struct {
.ptr_cast_no_dest => try self.writePtrCastNoDest(stream, extended),
.restore_err_ret_index => try self.writeRestoreErrRetIndex(stream, extended),
.closure_get => try self.writeClosureGet(stream, extended),
}
}
@ -1401,6 +1399,12 @@ const Writer = struct {
var extra_index: usize = extra.end;
const captures_len = if (small.has_captures_len) blk: {
const captures_len = self.code.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
const fields_len = if (small.has_fields_len) blk: {
const fields_len = self.code.extra[extra_index];
extra_index += 1;
@ -1419,12 +1423,26 @@ const Writer = struct {
try stream.print("{s}, ", .{@tagName(small.name_strategy)});
if (small.layout == .Packed and small.has_backing_int) {
if (captures_len == 0) {
try stream.writeAll("{}, ");
} else {
try stream.writeAll("{ ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
for (1..captures_len) |_| {
try stream.writeAll(", ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
}
try stream.writeAll(" }, ");
}
if (small.has_backing_int) {
const backing_int_body_len = self.code.extra[extra_index];
extra_index += 1;
try stream.writeAll("Packed(");
if (backing_int_body_len == 0) {
const backing_int_ref = @as(Zir.Inst.Ref, @enumFromInt(self.code.extra[extra_index]));
const backing_int_ref: Zir.Inst.Ref = @enumFromInt(self.code.extra[extra_index]);
extra_index += 1;
try self.writeInstRef(stream, backing_int_ref);
} else {
@ -1601,6 +1619,12 @@ const Writer = struct {
break :blk tag_type_ref;
} else .none;
const captures_len = if (small.has_captures_len) blk: {
const captures_len = self.code.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
const body_len = if (small.has_body_len) blk: {
const body_len = self.code.extra[extra_index];
extra_index += 1;
@ -1624,6 +1648,20 @@ const Writer = struct {
});
try self.writeFlag(stream, "autoenum, ", small.auto_enum_tag);
if (captures_len == 0) {
try stream.writeAll("{}, ");
} else {
try stream.writeAll("{ ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
for (1..captures_len) |_| {
try stream.writeAll(", ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
}
try stream.writeAll(" }, ");
}
if (decls_len == 0) {
try stream.writeAll("{}");
} else {
@ -1748,6 +1786,12 @@ const Writer = struct {
break :blk tag_type_ref;
} else .none;
const captures_len = if (small.has_captures_len) blk: {
const captures_len = self.code.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
const body_len = if (small.has_body_len) blk: {
const body_len = self.code.extra[extra_index];
extra_index += 1;
@ -1769,6 +1813,20 @@ const Writer = struct {
try stream.print("{s}, ", .{@tagName(small.name_strategy)});
try self.writeFlag(stream, "nonexhaustive, ", small.nonexhaustive);
if (captures_len == 0) {
try stream.writeAll("{}, ");
} else {
try stream.writeAll("{ ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
for (1..captures_len) |_| {
try stream.writeAll(", ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
}
try stream.writeAll(" }, ");
}
if (decls_len == 0) {
try stream.writeAll("{}, ");
} else {
@ -1854,6 +1912,12 @@ const Writer = struct {
const extra = self.code.extraData(Zir.Inst.OpaqueDecl, extended.operand);
var extra_index: usize = extra.end;
const captures_len = if (small.has_captures_len) blk: {
const captures_len = self.code.extra[extra_index];
extra_index += 1;
break :blk captures_len;
} else 0;
const decls_len = if (small.has_decls_len) blk: {
const decls_len = self.code.extra[extra_index];
extra_index += 1;
@ -1862,6 +1926,20 @@ const Writer = struct {
try stream.print("{s}, ", .{@tagName(small.name_strategy)});
if (captures_len == 0) {
try stream.writeAll("{}, ");
} else {
try stream.writeAll("{ ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
for (1..captures_len) |_| {
try stream.writeAll(", ");
try self.writeCapture(stream, @bitCast(self.code.extra[extra_index]));
extra_index += 1;
}
try stream.writeAll(" }, ");
}
if (decls_len == 0) {
try stream.writeAll("{})");
} else {
@ -2706,6 +2784,12 @@ const Writer = struct {
try self.writeSrc(stream, inst_data.src());
}
fn writeClosureGet(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
const src = LazySrcLoc.nodeOffset(@bitCast(extended.operand));
try stream.print("{d})) ", .{extended.small});
try self.writeSrc(stream, src);
}
fn writeInstRef(self: *Writer, stream: anytype, ref: Zir.Inst.Ref) !void {
if (ref == .none) {
return stream.writeAll(".none");
@ -2722,6 +2806,19 @@ const Writer = struct {
return stream.print("%{d}", .{@intFromEnum(inst)});
}
fn writeCapture(self: *Writer, stream: anytype, capture: Zir.Inst.Capture) !void {
switch (capture.unwrap()) {
.nested => |i| return stream.print("[{d}]", .{i}),
.instruction => |inst| return self.writeInstIndex(stream, inst),
.decl_val => |str| try stream.print("decl_val \"{}\"", .{
std.zig.fmtEscapes(self.code.nullTerminatedString(str)),
}),
.decl_ref => |str| try stream.print("decl_ref \"{}\"", .{
std.zig.fmtEscapes(self.code.nullTerminatedString(str)),
}),
}
}
fn writeOptionalInstRef(
self: *Writer,
stream: anytype,

View File

@ -320,11 +320,12 @@ pub const Type = struct {
.generic_poison => unreachable,
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.decl.unwrap()) |decl_index| {
const decl = mod.declPtr(decl_index);
try decl.renderFullyQualifiedName(mod, writer);
} else if (struct_type.namespace.unwrap()) |namespace_index| {
} else if (ip.loadStructType(ty.toIntern()).namespace.unwrap()) |namespace_index| {
const namespace = mod.namespacePtr(namespace_index);
try namespace.renderFullyQualifiedName(mod, .empty, writer);
} else {
@ -354,16 +355,16 @@ pub const Type = struct {
try writer.writeAll("}");
},
.union_type => |union_type| {
const decl = mod.declPtr(union_type.decl);
.union_type => {
const decl = mod.declPtr(ip.loadUnionType(ty.toIntern()).decl);
try decl.renderFullyQualifiedName(mod, writer);
},
.opaque_type => |opaque_type| {
const decl = mod.declPtr(opaque_type.decl);
.opaque_type => {
const decl = mod.declPtr(ip.loadOpaqueType(ty.toIntern()).decl);
try decl.renderFullyQualifiedName(mod, writer);
},
.enum_type => |enum_type| {
const decl = mod.declPtr(enum_type.decl);
.enum_type => {
const decl = mod.declPtr(ip.loadEnumType(ty.toIntern()).decl);
try decl.renderFullyQualifiedName(mod, writer);
},
.func_type => |fn_info| {
@ -573,7 +574,8 @@ pub const Type = struct {
.generic_poison => unreachable,
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.assumeRuntimeBitsIfFieldTypesWip(ip)) {
// In this case, we guess that hasRuntimeBits() for this type is true,
// and then later if our guess was incorrect, we emit a compile error.
@ -601,7 +603,8 @@ pub const Type = struct {
return false;
},
.union_type => |union_type| {
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
switch (union_type.flagsPtr(ip).runtime_tag) {
.none => {
if (union_type.flagsPtr(ip).status == .field_types_wip) {
@ -628,9 +631,8 @@ pub const Type = struct {
.lazy => if (!union_type.flagsPtr(ip).status.haveFieldTypes())
return error.NeedLazy,
}
const union_obj = ip.loadUnionType(union_type);
for (0..union_obj.field_types.len) |field_index| {
const field_ty = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
for (0..union_type.field_types.len) |field_index| {
const field_ty = Type.fromInterned(union_type.field_types.get(ip)[field_index]);
if (try field_ty.hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat))
return true;
} else {
@ -639,7 +641,7 @@ pub const Type = struct {
},
.opaque_type => true,
.enum_type => |enum_type| Type.fromInterned(enum_type.tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
.enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
// values, not types
.undef,
@ -736,15 +738,19 @@ pub const Type = struct {
.generic_poison,
=> false,
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
// Struct with no fields have a well-defined layout of no bits.
return struct_type.layout != .Auto or struct_type.field_types.len == 0;
},
.union_type => |union_type| switch (union_type.flagsPtr(ip).runtime_tag) {
.none, .safety => union_type.flagsPtr(ip).layout != .Auto,
.tagged => false,
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
return switch (union_type.flagsPtr(ip).runtime_tag) {
.none, .safety => union_type.flagsPtr(ip).layout != .Auto,
.tagged => false,
};
},
.enum_type => |enum_type| switch (enum_type.tag_mode) {
.enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) {
.auto => false,
.explicit, .nonexhaustive => true,
},
@ -1019,7 +1025,8 @@ pub const Type = struct {
.noreturn => unreachable,
.generic_poison => unreachable,
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.layout == .Packed) {
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
@ -1066,7 +1073,8 @@ pub const Type = struct {
}
return .{ .scalar = big_align };
},
.union_type => |union_type| {
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
const flags = union_type.flagsPtr(ip).*;
if (flags.alignment != .none) return .{ .scalar = flags.alignment };
@ -1082,8 +1090,8 @@ pub const Type = struct {
return .{ .scalar = union_type.flagsPtr(ip).alignment };
},
.opaque_type => return .{ .scalar = .@"1" },
.enum_type => |enum_type| return .{
.scalar = Type.fromInterned(enum_type.tag_ty).abiAlignment(mod),
.enum_type => return .{
.scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiAlignment(mod),
},
// values, not types
@ -1394,7 +1402,8 @@ pub const Type = struct {
.noreturn => unreachable,
.generic_poison => unreachable,
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => switch (struct_type.layout) {
@ -1439,7 +1448,8 @@ pub const Type = struct {
return AbiSizeAdvanced{ .scalar = ty.structFieldOffset(field_count, mod) };
},
.union_type => |union_type| {
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
switch (strat) {
.sema => |sema| try sema.resolveTypeLayout(ty),
.lazy => if (!union_type.flagsPtr(ip).status.haveLayout()) return .{
@ -1455,7 +1465,7 @@ pub const Type = struct {
return .{ .scalar = union_type.size(ip).* };
},
.opaque_type => unreachable, // no size available
.enum_type => |enum_type| return AbiSizeAdvanced{ .scalar = Type.fromInterned(enum_type.tag_ty).abiSize(mod) },
.enum_type => return .{ .scalar = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).abiSize(mod) },
// values, not types
.undef,
@ -1644,7 +1654,8 @@ pub const Type = struct {
.extern_options => unreachable,
.type_info => unreachable,
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
const is_packed = struct_type.layout == .Packed;
if (opt_sema) |sema| {
try sema.resolveTypeFields(ty);
@ -1661,7 +1672,8 @@ pub const Type = struct {
return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
},
.union_type => |union_type| {
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
const is_packed = ty.containerLayout(mod) == .Packed;
if (opt_sema) |sema| {
try sema.resolveTypeFields(ty);
@ -1670,19 +1682,18 @@ pub const Type = struct {
if (!is_packed) {
return (try ty.abiSizeAdvanced(mod, strat)).scalar * 8;
}
const union_obj = ip.loadUnionType(union_type);
assert(union_obj.flagsPtr(ip).status.haveFieldTypes());
assert(union_type.flagsPtr(ip).status.haveFieldTypes());
var size: u64 = 0;
for (0..union_obj.field_types.len) |field_index| {
const field_ty = union_obj.field_types.get(ip)[field_index];
for (0..union_type.field_types.len) |field_index| {
const field_ty = union_type.field_types.get(ip)[field_index];
size = @max(size, try bitSizeAdvanced(Type.fromInterned(field_ty), mod, opt_sema));
}
return size;
},
.opaque_type => unreachable,
.enum_type => |enum_type| return bitSizeAdvanced(Type.fromInterned(enum_type.tag_ty), mod, opt_sema),
.enum_type => return bitSizeAdvanced(Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty), mod, opt_sema),
// values, not types
.undef,
@ -1713,8 +1724,8 @@ pub const Type = struct {
pub fn layoutIsResolved(ty: Type, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| struct_type.haveLayout(ip),
.union_type => |union_type| union_type.haveLayout(ip),
.struct_type => ip.loadStructType(ty.toIntern()).haveLayout(ip),
.union_type => ip.loadUnionType(ty.toIntern()).haveLayout(ip),
.array_type => |array_type| {
if ((array_type.len + @intFromBool(array_type.sentinel != .none)) == 0) return true;
return Type.fromInterned(array_type.child).layoutIsResolved(mod);
@ -1914,16 +1925,18 @@ pub const Type = struct {
/// Otherwise, returns `null`.
pub fn unionTagType(ty: Type, mod: *Module) ?Type {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.union_type => |union_type| switch (union_type.flagsPtr(ip).runtime_tag) {
.tagged => {
assert(union_type.flagsPtr(ip).status.haveFieldTypes());
return Type.fromInterned(union_type.enum_tag_ty);
},
else => null,
switch (ip.indexToKey(ty.toIntern())) {
.union_type => {},
else => return null,
}
const union_type = ip.loadUnionType(ty.toIntern());
switch (union_type.flagsPtr(ip).runtime_tag) {
.tagged => {
assert(union_type.flagsPtr(ip).status.haveFieldTypes());
return Type.fromInterned(union_type.enum_tag_ty);
},
else => null,
};
else => return null,
}
}
/// Same as `unionTagType` but includes safety tag.
@ -1931,7 +1944,8 @@ pub const Type = struct {
pub fn unionTagTypeSafety(ty: Type, mod: *Module) ?Type {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.union_type => |union_type| {
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
if (!union_type.hasTag(ip)) return null;
assert(union_type.haveFieldTypes(ip));
return Type.fromInterned(union_type.enum_tag_ty);
@ -1981,17 +1995,16 @@ pub const Type = struct {
pub fn unionGetLayout(ty: Type, mod: *Module) Module.UnionLayout {
const ip = &mod.intern_pool;
const union_type = ip.indexToKey(ty.toIntern()).union_type;
const union_obj = ip.loadUnionType(union_type);
const union_obj = ip.loadUnionType(ty.toIntern());
return mod.getUnionLayout(union_obj);
}
pub fn containerLayout(ty: Type, mod: *Module) std.builtin.Type.ContainerLayout {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| struct_type.layout,
.struct_type => ip.loadStructType(ty.toIntern()).layout,
.anon_struct_type => .Auto,
.union_type => |union_type| union_type.flagsPtr(ip).layout,
.union_type => ip.loadUnionType(ty.toIntern()).flagsPtr(ip).layout,
else => unreachable,
};
}
@ -2095,22 +2108,15 @@ pub const Type = struct {
/// Asserts the type is an array or vector or struct.
pub fn arrayLen(ty: Type, mod: *const Module) u64 {
return arrayLenIp(ty, &mod.intern_pool);
return ty.arrayLenIp(&mod.intern_pool);
}
pub fn arrayLenIp(ty: Type, ip: *const InternPool) u64 {
return switch (ip.indexToKey(ty.toIntern())) {
.vector_type => |vector_type| vector_type.len,
.array_type => |array_type| array_type.len,
.struct_type => |struct_type| struct_type.field_types.len,
.anon_struct_type => |tuple| tuple.types.len,
else => unreachable,
};
return ip.aggregateTypeLen(ty.toIntern());
}
pub fn arrayLenIncludingSentinel(ty: Type, mod: *const Module) u64 {
return ty.arrayLen(mod) + @intFromBool(ty.sentinel(mod) != null);
return mod.intern_pool.aggregateTypeLenIncludingSentinel(ty.toIntern());
}
pub fn vectorLen(ty: Type, mod: *const Module) u32 {
@ -2199,8 +2205,8 @@ pub const Type = struct {
.c_ulonglong_type => return .{ .signedness = .unsigned, .bits = target.c_type_bit_size(.ulonglong) },
else => switch (ip.indexToKey(ty.toIntern())) {
.int_type => |int_type| return int_type,
.struct_type => |t| ty = Type.fromInterned(t.backingIntType(ip).*),
.enum_type => |enum_type| ty = Type.fromInterned(enum_type.tag_ty),
.struct_type => ty = Type.fromInterned(ip.loadStructType(ty.toIntern()).backingIntType(ip).*),
.enum_type => ty = Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty),
.vector_type => |vector_type| ty = Type.fromInterned(vector_type.child),
.error_set_type, .inferred_error_set_type => {
@ -2463,7 +2469,8 @@ pub const Type = struct {
.generic_poison => unreachable,
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.haveFieldTypes(ip));
if (struct_type.knownNonOpv(ip))
return null;
@ -2505,11 +2512,11 @@ pub const Type = struct {
} })));
},
.union_type => |union_type| {
const union_obj = ip.loadUnionType(union_type);
.union_type => {
const union_obj = ip.loadUnionType(ty.toIntern());
const tag_val = (try Type.fromInterned(union_obj.enum_tag_ty).onePossibleValue(mod)) orelse
return null;
if (union_obj.field_names.len == 0) {
if (union_obj.field_types.len == 0) {
const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
return Value.fromInterned(only);
}
@ -2524,45 +2531,48 @@ pub const Type = struct {
return Value.fromInterned(only);
},
.opaque_type => return null,
.enum_type => |enum_type| switch (enum_type.tag_mode) {
.nonexhaustive => {
if (enum_type.tag_ty == .comptime_int_type) return null;
.enum_type => {
const enum_type = ip.loadEnumType(ty.toIntern());
switch (enum_type.tag_mode) {
.nonexhaustive => {
if (enum_type.tag_ty == .comptime_int_type) return null;
if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(mod)) |int_opv| {
const only = try mod.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = int_opv.toIntern(),
} });
return Value.fromInterned(only);
}
return null;
},
.auto, .explicit => {
if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null;
switch (enum_type.names.len) {
0 => {
const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
if (try Type.fromInterned(enum_type.tag_ty).onePossibleValue(mod)) |int_opv| {
const only = try mod.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = int_opv.toIntern(),
} });
return Value.fromInterned(only);
},
1 => {
if (enum_type.values.len == 0) {
const only = try mod.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = try mod.intern(.{ .int = .{
.ty = enum_type.tag_ty,
.storage = .{ .u64 = 0 },
} }),
} });
}
return null;
},
.auto, .explicit => {
if (Type.fromInterned(enum_type.tag_ty).hasRuntimeBits(mod)) return null;
switch (enum_type.names.len) {
0 => {
const only = try mod.intern(.{ .empty_enum_value = ty.toIntern() });
return Value.fromInterned(only);
} else {
return Value.fromInterned(enum_type.values.get(ip)[0]);
}
},
else => return null,
}
},
},
1 => {
if (enum_type.values.len == 0) {
const only = try mod.intern(.{ .enum_tag = .{
.ty = ty.toIntern(),
.int = try mod.intern(.{ .int = .{
.ty = enum_type.tag_ty,
.storage = .{ .u64 = 0 },
} }),
} });
return Value.fromInterned(only);
} else {
return Value.fromInterned(enum_type.values.get(ip)[0]);
}
},
else => return null,
}
},
}
},
// values, not types
@ -2676,7 +2686,8 @@ pub const Type = struct {
.type_info,
=> true,
},
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
// packed structs cannot be comptime-only because they have a well-defined
// memory layout and every field has a well-defined bit pattern.
if (struct_type.layout == .Packed)
@ -2726,38 +2737,40 @@ pub const Type = struct {
return false;
},
.union_type => |union_type| switch (union_type.flagsPtr(ip).requires_comptime) {
.no, .wip => false,
.yes => true,
.unknown => {
// The type is not resolved; assert that we have a Sema.
const sema = opt_sema.?;
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
switch (union_type.flagsPtr(ip).requires_comptime) {
.no, .wip => return false,
.yes => return true,
.unknown => {
// The type is not resolved; assert that we have a Sema.
const sema = opt_sema.?;
if (union_type.flagsPtr(ip).status == .field_types_wip)
return false;
if (union_type.flagsPtr(ip).status == .field_types_wip)
return false;
union_type.flagsPtr(ip).requires_comptime = .wip;
errdefer union_type.flagsPtr(ip).requires_comptime = .unknown;
union_type.flagsPtr(ip).requires_comptime = .wip;
errdefer union_type.flagsPtr(ip).requires_comptime = .unknown;
try sema.resolveTypeFieldsUnion(ty, union_type);
try sema.resolveTypeFieldsUnion(ty, union_type);
const union_obj = ip.loadUnionType(union_type);
for (0..union_obj.field_types.len) |field_idx| {
const field_ty = union_obj.field_types.get(ip)[field_idx];
if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) {
union_obj.flagsPtr(ip).requires_comptime = .yes;
return true;
for (0..union_type.field_types.len) |field_idx| {
const field_ty = union_type.field_types.get(ip)[field_idx];
if (try Type.fromInterned(field_ty).comptimeOnlyAdvanced(mod, opt_sema)) {
union_type.flagsPtr(ip).requires_comptime = .yes;
return true;
}
}
}
union_obj.flagsPtr(ip).requires_comptime = .no;
return false;
},
union_type.flagsPtr(ip).requires_comptime = .no;
return false;
},
}
},
.opaque_type => false,
.enum_type => |enum_type| return Type.fromInterned(enum_type.tag_ty).comptimeOnlyAdvanced(mod, opt_sema),
.enum_type => return Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty).comptimeOnlyAdvanced(mod, opt_sema),
// values, not types
.undef,
@ -2830,11 +2843,12 @@ pub const Type = struct {
/// Returns null if the type has no namespace.
pub fn getNamespaceIndex(ty: Type, mod: *Module) InternPool.OptionalNamespaceIndex {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.opaque_type => |opaque_type| opaque_type.namespace.toOptional(),
.struct_type => |struct_type| struct_type.namespace,
.union_type => |union_type| union_type.namespace.toOptional(),
.enum_type => |enum_type| enum_type.namespace,
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.opaque_type => ip.loadOpaqueType(ty.toIntern()).namespace,
.struct_type => ip.loadStructType(ty.toIntern()).namespace,
.union_type => ip.loadUnionType(ty.toIntern()).namespace,
.enum_type => ip.loadEnumType(ty.toIntern()).namespace,
else => .none,
};
@ -2920,16 +2934,18 @@ pub const Type = struct {
/// Asserts the type is an enum or a union.
pub fn intTagType(ty: Type, mod: *Module) Type {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.union_type => |union_type| Type.fromInterned(union_type.enum_tag_ty).intTagType(mod),
.enum_type => |enum_type| Type.fromInterned(enum_type.tag_ty),
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.union_type => Type.fromInterned(ip.loadUnionType(ty.toIntern()).enum_tag_ty).intTagType(mod),
.enum_type => Type.fromInterned(ip.loadEnumType(ty.toIntern()).tag_ty),
else => unreachable,
};
}
pub fn isNonexhaustiveEnum(ty: Type, mod: *Module) bool {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.enum_type => |enum_type| switch (enum_type.tag_mode) {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.enum_type => switch (ip.loadEnumType(ty.toIntern()).tag_mode) {
.nonexhaustive => true,
.auto, .explicit => false,
},
@ -2953,21 +2969,21 @@ pub const Type = struct {
}
pub fn enumFields(ty: Type, mod: *Module) InternPool.NullTerminatedString.Slice {
return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names;
return mod.intern_pool.loadEnumType(ty.toIntern()).names;
}
pub fn enumFieldCount(ty: Type, mod: *Module) usize {
return mod.intern_pool.indexToKey(ty.toIntern()).enum_type.names.len;
return mod.intern_pool.loadEnumType(ty.toIntern()).names.len;
}
pub fn enumFieldName(ty: Type, field_index: usize, mod: *Module) InternPool.NullTerminatedString {
const ip = &mod.intern_pool;
return ip.indexToKey(ty.toIntern()).enum_type.names.get(ip)[field_index];
return ip.loadEnumType(ty.toIntern()).names.get(ip)[field_index];
}
pub fn enumFieldIndex(ty: Type, field_name: InternPool.NullTerminatedString, mod: *Module) ?u32 {
const ip = &mod.intern_pool;
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
const enum_type = ip.loadEnumType(ty.toIntern());
return enum_type.nameIndex(ip, field_name);
}
@ -2976,7 +2992,7 @@ pub const Type = struct {
/// declaration order, or `null` if `enum_tag` does not match any field.
pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, mod: *Module) ?u32 {
const ip = &mod.intern_pool;
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
const enum_type = ip.loadEnumType(ty.toIntern());
const int_tag = switch (ip.indexToKey(enum_tag.toIntern())) {
.int => enum_tag.toIntern(),
.enum_tag => |info| info.int,
@ -2990,7 +3006,7 @@ pub const Type = struct {
pub fn structFieldName(ty: Type, field_index: u32, mod: *Module) InternPool.OptionalNullTerminatedString {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| struct_type.fieldName(ip, field_index),
.struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, field_index),
.anon_struct_type => |anon_struct| anon_struct.fieldName(ip, field_index),
else => unreachable,
};
@ -3010,7 +3026,7 @@ pub const Type = struct {
pub fn structFieldCount(ty: Type, mod: *Module) u32 {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| struct_type.field_types.len,
.struct_type => ip.loadStructType(ty.toIntern()).field_types.len,
.anon_struct_type => |anon_struct| anon_struct.types.len,
else => unreachable,
};
@ -3020,9 +3036,9 @@ pub const Type = struct {
pub fn structFieldType(ty: Type, index: usize, mod: *Module) Type {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| Type.fromInterned(struct_type.field_types.get(ip)[index]),
.union_type => |union_type| {
const union_obj = ip.loadUnionType(union_type);
.struct_type => Type.fromInterned(ip.loadStructType(ty.toIntern()).field_types.get(ip)[index]),
.union_type => {
const union_obj = ip.loadUnionType(ty.toIntern());
return Type.fromInterned(union_obj.field_types.get(ip)[index]);
},
.anon_struct_type => |anon_struct| Type.fromInterned(anon_struct.types.get(ip)[index]),
@ -3033,7 +3049,8 @@ pub const Type = struct {
pub fn structFieldAlign(ty: Type, index: usize, mod: *Module) Alignment {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.layout != .Packed);
const explicit_align = struct_type.fieldAlign(ip, index);
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[index]);
@ -3042,8 +3059,8 @@ pub const Type = struct {
.anon_struct_type => |anon_struct| {
return Type.fromInterned(anon_struct.types.get(ip)[index]).abiAlignment(mod);
},
.union_type => |union_type| {
const union_obj = ip.loadUnionType(union_type);
.union_type => {
const union_obj = ip.loadUnionType(ty.toIntern());
return mod.unionFieldNormalAlignment(union_obj, @intCast(index));
},
else => unreachable,
@ -3053,7 +3070,8 @@ pub const Type = struct {
pub fn structFieldDefaultValue(ty: Type, index: usize, mod: *Module) Value {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
const val = struct_type.fieldInit(ip, index);
// TODO: avoid using `unreachable` to indicate this.
if (val == .none) return Value.@"unreachable";
@ -3072,7 +3090,8 @@ pub const Type = struct {
pub fn structFieldValueComptime(ty: Type, mod: *Module, index: usize) !?Value {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.fieldIsComptime(ip, index)) {
assert(struct_type.haveFieldInits(ip));
return Value.fromInterned(struct_type.field_inits.get(ip)[index]);
@ -3095,7 +3114,7 @@ pub const Type = struct {
pub fn structFieldIsComptime(ty: Type, index: usize, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| struct_type.fieldIsComptime(ip, index),
.struct_type => ip.loadStructType(ty.toIntern()).fieldIsComptime(ip, index),
.anon_struct_type => |anon_struct| anon_struct.values.get(ip)[index] != .none,
else => unreachable,
};
@ -3110,7 +3129,8 @@ pub const Type = struct {
pub fn structFieldOffset(ty: Type, index: usize, mod: *Module) u64 {
const ip = &mod.intern_pool;
switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
assert(struct_type.haveLayout(ip));
assert(struct_type.layout != .Packed);
return struct_type.offsets.get(ip)[index];
@ -3137,11 +3157,11 @@ pub const Type = struct {
return offset;
},
.union_type => |union_type| {
.union_type => {
const union_type = ip.loadUnionType(ty.toIntern());
if (!union_type.hasTag(ip))
return 0;
const union_obj = ip.loadUnionType(union_type);
const layout = mod.getUnionLayout(union_obj);
const layout = mod.getUnionLayout(union_type);
if (layout.tag_align.compare(.gte, layout.payload_align)) {
// {Tag, Payload}
return layout.payload_align.forward(layout.tag_size);
@ -3160,17 +3180,8 @@ pub const Type = struct {
}
pub fn declSrcLocOrNull(ty: Type, mod: *Module) ?Module.SrcLoc {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
return mod.declPtr(struct_type.decl.unwrap() orelse return null).srcLoc(mod);
},
.union_type => |union_type| {
return mod.declPtr(union_type.decl).srcLoc(mod);
},
.opaque_type => |opaque_type| mod.opaqueSrcLoc(opaque_type),
.enum_type => |enum_type| mod.declPtr(enum_type.decl).srcLoc(mod),
else => null,
};
const decl = ty.getOwnerDeclOrNull(mod) orelse return null;
return mod.declPtr(decl).srcLoc(mod);
}
pub fn getOwnerDecl(ty: Type, mod: *Module) InternPool.DeclIndex {
@ -3178,11 +3189,12 @@ pub const Type = struct {
}
pub fn getOwnerDeclOrNull(ty: Type, mod: *Module) ?InternPool.DeclIndex {
return switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| struct_type.decl.unwrap(),
.union_type => |union_type| union_type.decl,
.opaque_type => |opaque_type| opaque_type.decl,
.enum_type => |enum_type| enum_type.decl,
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).decl.unwrap(),
.union_type => ip.loadUnionType(ty.toIntern()).decl,
.opaque_type => ip.loadOpaqueType(ty.toIntern()).decl,
.enum_type => ip.loadEnumType(ty.toIntern()).decl,
else => null,
};
}
@ -3194,7 +3206,8 @@ pub const Type = struct {
pub fn isTuple(ty: Type, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.layout == .Packed) return false;
if (struct_type.decl == .none) return false;
return struct_type.flagsPtr(ip).is_tuple;
@ -3215,7 +3228,8 @@ pub const Type = struct {
pub fn isTupleOrAnonStruct(ty: Type, mod: *Module) bool {
const ip = &mod.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => |struct_type| {
.struct_type => {
const struct_type = ip.loadStructType(ty.toIntern());
if (struct_type.layout == .Packed) return false;
if (struct_type.decl == .none) return false;
return struct_type.flagsPtr(ip).is_tuple;
@ -3262,16 +3276,28 @@ pub const Type = struct {
}
pub fn typeDeclInst(ty: Type, zcu: *const Zcu) ?InternPool.TrackedInst.Index {
return switch (zcu.intern_pool.indexToKey(ty.toIntern())) {
inline .struct_type,
.union_type,
.enum_type,
.opaque_type,
=> |info| info.zir_index.unwrap(),
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).zir_index.unwrap(),
.union_type => ip.loadUnionType(ty.toIntern()).zir_index,
.enum_type => ip.loadEnumType(ty.toIntern()).zir_index.unwrap(),
.opaque_type => ip.loadOpaqueType(ty.toIntern()).zir_index,
else => null,
};
}
/// Given a namespace type, returns its list of caotured values.
pub fn getCaptures(ty: Type, zcu: *const Zcu) InternPool.CaptureValue.Slice {
const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).captures,
.union_type => ip.loadUnionType(ty.toIntern()).captures,
.enum_type => ip.loadEnumType(ty.toIntern()).captures,
.opaque_type => ip.loadOpaqueType(ty.toIntern()).captures,
else => unreachable,
};
}
pub const @"u1": Type = .{ .ip_index = .u1_type };
pub const @"u8": Type = .{ .ip_index = .u8_type };
pub const @"u16": Type = .{ .ip_index = .u16_type };

View File

@ -1242,3 +1242,24 @@ test "Non-exhaustive enum backed by comptime_int" {
e = @as(E, @enumFromInt(378089457309184723749));
try expect(@intFromEnum(e) == 378089457309184723749);
}
test "matching captures causes enum equivalence" {
const S = struct {
fn Nonexhaustive(comptime I: type) type {
const UTag = @Type(.{ .Int = .{
.signedness = .unsigned,
.bits = @typeInfo(I).Int.bits,
} });
return enum(UTag) { _ };
}
};
comptime assert(S.Nonexhaustive(u8) == S.Nonexhaustive(i8));
comptime assert(S.Nonexhaustive(u16) == S.Nonexhaustive(i16));
comptime assert(S.Nonexhaustive(u8) != S.Nonexhaustive(u16));
const a: S.Nonexhaustive(u8) = @enumFromInt(123);
const b: S.Nonexhaustive(i8) = @enumFromInt(123);
comptime assert(@TypeOf(a) == @TypeOf(b));
try expect(@intFromEnum(a) == @intFromEnum(b));
}

View File

@ -371,8 +371,12 @@ test "extern function used as generic parameter" {
const S = struct {
extern fn usedAsGenericParameterFoo() void;
extern fn usedAsGenericParameterBar() void;
inline fn usedAsGenericParameterBaz(comptime _: anytype) type {
return struct {};
inline fn usedAsGenericParameterBaz(comptime token: anytype) type {
return struct {
comptime {
_ = token;
}
};
}
};
try expect(S.usedAsGenericParameterBaz(S.usedAsGenericParameterFoo) !=

View File

@ -23,8 +23,12 @@ test "@src" {
test "@src used as a comptime parameter" {
const S = struct {
fn Foo(comptime _: std.builtin.SourceLocation) type {
return struct {};
fn Foo(comptime src: std.builtin.SourceLocation) type {
return struct {
comptime {
_ = src;
}
};
}
};
const T1 = S.Foo(@src());

View File

@ -2127,3 +2127,26 @@ test "struct containing optional pointer to array of @This()" {
_ = &s;
try expect(s.x.?[0].x == null);
}
test "matching captures causes struct equivalence" {
const S = struct {
fn UnsignedWrapper(comptime I: type) type {
const bits = @typeInfo(I).Int.bits;
return struct {
x: @Type(.{ .Int = .{
.signedness = .unsigned,
.bits = bits,
} }),
};
}
};
comptime assert(S.UnsignedWrapper(u8) == S.UnsignedWrapper(i8));
comptime assert(S.UnsignedWrapper(u16) == S.UnsignedWrapper(i16));
comptime assert(S.UnsignedWrapper(u8) != S.UnsignedWrapper(u16));
const a: S.UnsignedWrapper(u8) = .{ .x = 10 };
const b: S.UnsignedWrapper(i8) = .{ .x = 10 };
comptime assert(@TypeOf(a) == @TypeOf(b));
try expect(a.x == b.x);
}

View File

@ -2,6 +2,7 @@ const std = @import("std");
const builtin = @import("builtin");
const Type = std.builtin.Type;
const testing = std.testing;
const assert = std.debug.assert;
fn testTypes(comptime types: []const type) !void {
inline for (types) |testType| {
@ -734,3 +735,28 @@ test "struct field names sliced at comptime from larger string" {
try testing.expectEqualStrings("f3", gen_fields[2].name);
}
}
test "matching captures causes opaque equivalence" {
const S = struct {
fn UnsignedId(comptime I: type) type {
const U = @Type(.{ .Int = .{
.signedness = .unsigned,
.bits = @typeInfo(I).Int.bits,
} });
return opaque {
fn id(x: U) U {
return x;
}
};
}
};
comptime assert(S.UnsignedId(u8) == S.UnsignedId(i8));
comptime assert(S.UnsignedId(u16) == S.UnsignedId(i16));
comptime assert(S.UnsignedId(u8) != S.UnsignedId(u16));
const a = S.UnsignedId(u8).id(123);
const b = S.UnsignedId(i8).id(123);
comptime assert(@TypeOf(a) == @TypeOf(b));
try testing.expect(a == b);
}

View File

@ -164,21 +164,30 @@ test "fn param" {
}
fn TypeFromFn(comptime T: type) type {
_ = T;
return struct {};
return struct {
comptime {
_ = T;
}
};
}
fn TypeFromFn2(comptime T1: type, comptime T2: type) type {
_ = T1;
_ = T2;
return struct {};
return struct {
comptime {
_ = T1;
_ = T2;
}
};
}
fn TypeFromFnB(comptime T1: type, comptime T2: type, comptime T3: type) type {
_ = T1;
_ = T2;
_ = T3;
return struct {};
return struct {
comptime {
_ = T1;
_ = T2;
_ = T3;
}
};
}
/// Replaces integers in `actual` with '0' before doing the test.

View File

@ -2273,3 +2273,30 @@ test "create union(enum) from other union(enum)" {
else => {},
}
}
test "matching captures causes union equivalence" {
const S = struct {
fn SignedUnsigned(comptime I: type) type {
const bits = @typeInfo(I).Int.bits;
return union {
u: @Type(.{ .Int = .{
.signedness = .unsigned,
.bits = bits,
} }),
i: @Type(.{ .Int = .{
.signedness = .signed,
.bits = bits,
} }),
};
}
};
comptime assert(S.SignedUnsigned(u8) == S.SignedUnsigned(i8));
comptime assert(S.SignedUnsigned(u16) == S.SignedUnsigned(i16));
comptime assert(S.SignedUnsigned(u8) != S.SignedUnsigned(u16));
const a: S.SignedUnsigned(u8) = .{ .u = 10 };
const b: S.SignedUnsigned(i8) = .{ .u = 10 };
comptime assert(@TypeOf(a) == @TypeOf(b));
try expect(a.u == b.u);
}

View File

@ -74,7 +74,7 @@ comptime {
// target=native
//
// :2:5: error: tuple cannot have non-numeric field 'foo'
// :16:5: error: tuple field 3 exceeds tuple field count
// :16:5: error: tuple field name '3' does not match field index 0
// :30:5: error: comptime field without default initialization value
// :44:5: error: extern struct fields cannot be marked comptime
// :58:5: error: alignment in a packed struct field must be set to 0

View File

@ -30,6 +30,6 @@ export fn entry() void {
// backend=stage2
// target=native
//
// :13:16: error: enum field(s) missing in union
// :13:16: error: enum fields missing in union
// :1:13: note: field 'arst' missing, declared here
// :1:13: note: enum declared here

View File

@ -26,7 +26,7 @@ export fn entry() void {
// backend=stage2
// target=native
//
// :12:16: error: enum field(s) missing in union
// :12:16: error: enum fields missing in union
// :1:13: note: field 'signed' missing, declared here
// :1:13: note: field 'unsigned' missing, declared here
// :1:13: note: enum declared here