stage2: rework a lot of stuff

AstGen:
 * rename the known_has_bits flag to known_non_opv to make it better
   reflect what it actually means.
 * add a known_comptime_only flag.
 * make the flags take advantage of identifiers of primitives and the
   fact that zig has no shadowing.
 * correct the known_non_opv flag for function bodies.

Sema:
 * Rename `hasCodeGenBits` to `hasRuntimeBits` to better reflect what it
   does.
   - This function got a bit more complicated in this commit because of
     the duality of function bodies: on one hand they have runtime bits,
     but on the other hand they require being comptime known.
 * WipAnonDecl now takes a LazySrcDecl parameter and performs the type
   resolutions that it needs during finish().
 * Implement comptime `@ptrToInt`.

Codegen:
 * Improved handling of lowering decl_ref; make it work for
   comptime-known ptr-to-int values.
   - This same change had to be made many different times; perhaps we
     should look into merging the implementations of `genTypedValue`
     across x86, arm, aarch64, and riscv.
This commit is contained in:
Andrew Kelley 2022-01-24 20:38:56 -07:00
parent 8bb679bc6e
commit a2abbeef90
24 changed files with 1124 additions and 489 deletions

View File

@ -3828,7 +3828,8 @@ fn structDeclInner(
.fields_len = 0,
.body_len = 0,
.decls_len = 0,
.known_has_bits = false,
.known_non_opv = false,
.known_comptime_only = false,
});
return indexToRef(decl_inst);
}
@ -3869,7 +3870,8 @@ fn structDeclInner(
var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, field_count, bits_per_field, max_field_size);
defer wip_members.deinit();
var known_has_bits = false;
var known_non_opv = false;
var known_comptime_only = false;
for (container_decl.ast.members) |member_node| {
const member = switch (try containerMember(gz, &namespace.base, &wip_members, member_node)) {
.decl => continue,
@ -3892,7 +3894,10 @@ fn structDeclInner(
const doc_comment_index = try astgen.docCommentAsString(member.firstToken());
wip_members.appendToField(doc_comment_index);
known_has_bits = known_has_bits or nodeImpliesRuntimeBits(tree, member.ast.type_expr);
known_non_opv = known_non_opv or
nodeImpliesMoreThanOnePossibleValue(tree, member.ast.type_expr);
known_comptime_only = known_comptime_only or
nodeImpliesComptimeOnly(tree, member.ast.type_expr);
const have_align = member.ast.align_expr != 0;
const have_value = member.ast.value_expr != 0;
@ -3926,7 +3931,8 @@ fn structDeclInner(
.body_len = @intCast(u32, body.len),
.fields_len = field_count,
.decls_len = decl_count,
.known_has_bits = known_has_bits,
.known_non_opv = known_non_opv,
.known_comptime_only = known_comptime_only,
});
wip_members.finishBits(bits_per_field);
@ -8195,7 +8201,9 @@ fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) BuiltinFn.Ev
}
}
fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool {
/// Returns `true` if it is known the type expression has more than one possible value;
/// `false` otherwise.
fn nodeImpliesMoreThanOnePossibleValue(tree: *const Ast, start_node: Ast.Node.Index) bool {
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
@ -8241,7 +8249,6 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool {
.multiline_string_literal,
.char_literal,
.unreachable_literal,
.identifier,
.error_set_decl,
.container_decl,
.container_decl_trailing,
@ -8355,6 +8362,11 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool {
.builtin_call_comma,
.builtin_call_two,
.builtin_call_two_comma,
// these are function bodies, not pointers
.fn_proto_simple,
.fn_proto_multi,
.fn_proto_one,
.fn_proto,
=> return false,
// Forward the question to the LHS sub-expression.
@ -8366,10 +8378,6 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool {
.unwrap_optional,
=> node = node_datas[node].lhs,
.fn_proto_simple,
.fn_proto_multi,
.fn_proto_one,
.fn_proto,
.ptr_type_aligned,
.ptr_type_sentinel,
.ptr_type,
@ -8378,6 +8386,301 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool {
.anyframe_type,
.array_type_sentinel,
=> return true,
.identifier => {
const main_tokens = tree.nodes.items(.main_token);
const ident_bytes = tree.tokenSlice(main_tokens[node]);
if (primitives.get(ident_bytes)) |primitive| switch (primitive) {
.anyerror_type,
.anyframe_type,
.anyopaque_type,
.bool_type,
.c_int_type,
.c_long_type,
.c_longdouble_type,
.c_longlong_type,
.c_short_type,
.c_uint_type,
.c_ulong_type,
.c_ulonglong_type,
.c_ushort_type,
.comptime_float_type,
.comptime_int_type,
.f128_type,
.f16_type,
.f32_type,
.f64_type,
.i16_type,
.i32_type,
.i64_type,
.i128_type,
.i8_type,
.isize_type,
.type_type,
.u16_type,
.u32_type,
.u64_type,
.u128_type,
.u1_type,
.u8_type,
.usize_type,
=> return true,
.void_type,
.bool_false,
.bool_true,
.null_value,
.undef,
.noreturn_type,
=> return false,
else => unreachable, // that's all the values from `primitives`.
} else {
return false;
}
},
}
}
}
/// Returns `true` if it is known the expression is a type that cannot be used at runtime;
/// `false` otherwise.
fn nodeImpliesComptimeOnly(tree: *const Ast, start_node: Ast.Node.Index) bool {
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
var node = start_node;
while (true) {
switch (node_tags[node]) {
.root,
.@"usingnamespace",
.test_decl,
.switch_case,
.switch_case_one,
.container_field_init,
.container_field_align,
.container_field,
.asm_output,
.asm_input,
.global_var_decl,
.local_var_decl,
.simple_var_decl,
.aligned_var_decl,
=> unreachable,
.@"return",
.@"break",
.@"continue",
.bit_not,
.bool_not,
.@"defer",
.@"errdefer",
.address_of,
.negation,
.negation_wrap,
.@"resume",
.array_type,
.@"suspend",
.@"anytype",
.fn_decl,
.anyframe_literal,
.integer_literal,
.float_literal,
.enum_literal,
.string_literal,
.multiline_string_literal,
.char_literal,
.unreachable_literal,
.error_set_decl,
.container_decl,
.container_decl_trailing,
.container_decl_two,
.container_decl_two_trailing,
.container_decl_arg,
.container_decl_arg_trailing,
.tagged_union,
.tagged_union_trailing,
.tagged_union_two,
.tagged_union_two_trailing,
.tagged_union_enum_tag,
.tagged_union_enum_tag_trailing,
.@"asm",
.asm_simple,
.add,
.add_wrap,
.add_sat,
.array_cat,
.array_mult,
.assign,
.assign_bit_and,
.assign_bit_or,
.assign_shl,
.assign_shl_sat,
.assign_shr,
.assign_bit_xor,
.assign_div,
.assign_sub,
.assign_sub_wrap,
.assign_sub_sat,
.assign_mod,
.assign_add,
.assign_add_wrap,
.assign_add_sat,
.assign_mul,
.assign_mul_wrap,
.assign_mul_sat,
.bang_equal,
.bit_and,
.bit_or,
.shl,
.shl_sat,
.shr,
.bit_xor,
.bool_and,
.bool_or,
.div,
.equal_equal,
.error_union,
.greater_or_equal,
.greater_than,
.less_or_equal,
.less_than,
.merge_error_sets,
.mod,
.mul,
.mul_wrap,
.mul_sat,
.switch_range,
.field_access,
.sub,
.sub_wrap,
.sub_sat,
.slice,
.slice_open,
.slice_sentinel,
.deref,
.array_access,
.error_value,
.while_simple,
.while_cont,
.for_simple,
.if_simple,
.@"catch",
.@"orelse",
.array_init_one,
.array_init_one_comma,
.array_init_dot_two,
.array_init_dot_two_comma,
.array_init_dot,
.array_init_dot_comma,
.array_init,
.array_init_comma,
.struct_init_one,
.struct_init_one_comma,
.struct_init_dot_two,
.struct_init_dot_two_comma,
.struct_init_dot,
.struct_init_dot_comma,
.struct_init,
.struct_init_comma,
.@"while",
.@"if",
.@"for",
.@"switch",
.switch_comma,
.call_one,
.call_one_comma,
.async_call_one,
.async_call_one_comma,
.call,
.call_comma,
.async_call,
.async_call_comma,
.block_two,
.block_two_semicolon,
.block,
.block_semicolon,
.builtin_call,
.builtin_call_comma,
.builtin_call_two,
.builtin_call_two_comma,
.ptr_type_aligned,
.ptr_type_sentinel,
.ptr_type,
.ptr_type_bit_range,
.optional_type,
.anyframe_type,
.array_type_sentinel,
=> return false,
// these are function bodies, not pointers
.fn_proto_simple,
.fn_proto_multi,
.fn_proto_one,
.fn_proto,
=> return true,
// Forward the question to the LHS sub-expression.
.grouped_expression,
.@"try",
.@"await",
.@"comptime",
.@"nosuspend",
.unwrap_optional,
=> node = node_datas[node].lhs,
.identifier => {
const main_tokens = tree.nodes.items(.main_token);
const ident_bytes = tree.tokenSlice(main_tokens[node]);
if (primitives.get(ident_bytes)) |primitive| switch (primitive) {
.anyerror_type,
.anyframe_type,
.anyopaque_type,
.bool_type,
.c_int_type,
.c_long_type,
.c_longdouble_type,
.c_longlong_type,
.c_short_type,
.c_uint_type,
.c_ulong_type,
.c_ulonglong_type,
.c_ushort_type,
.f128_type,
.f16_type,
.f32_type,
.f64_type,
.i16_type,
.i32_type,
.i64_type,
.i128_type,
.i8_type,
.isize_type,
.u16_type,
.u32_type,
.u64_type,
.u128_type,
.u1_type,
.u8_type,
.usize_type,
.void_type,
.bool_false,
.bool_true,
.null_value,
.undef,
.noreturn_type,
=> return false,
.comptime_float_type,
.comptime_int_type,
.type_type,
=> return true,
else => unreachable, // that's all the values from `primitives`.
} else {
return false;
}
},
}
}
}
@ -10118,7 +10421,8 @@ const GenZir = struct {
fields_len: u32,
decls_len: u32,
layout: std.builtin.TypeInfo.ContainerLayout,
known_has_bits: bool,
known_non_opv: bool,
known_comptime_only: bool,
}) !void {
const astgen = gz.astgen;
const gpa = astgen.gpa;
@ -10148,7 +10452,8 @@ const GenZir = struct {
.has_body_len = args.body_len != 0,
.has_fields_len = args.fields_len != 0,
.has_decls_len = args.decls_len != 0,
.known_has_bits = args.known_has_bits,
.known_non_opv = args.known_non_opv,
.known_comptime_only = args.known_comptime_only,
.name_strategy = gz.anon_name_strategy,
.layout = args.layout,
}),

View File

@ -2703,7 +2703,6 @@ fn processOneJob(comp: *Compilation, job: Job, main_progress_node: *std.Progress
const module = comp.bin_file.options.module.?;
assert(decl.has_tv);
assert(decl.ty.hasCodeGenBits());
if (decl.alive) {
try module.linkerUpdateDecl(decl);

View File

@ -848,9 +848,11 @@ pub const Struct = struct {
// which `have_layout` does not ensure.
fully_resolved,
},
/// If true, definitely nonzero size at runtime. If false, resolving the fields
/// is necessary to determine whether it has bits at runtime.
known_has_bits: bool,
/// If true, has more than one possible value. However it may still be non-runtime type
/// if it is a comptime-only type.
/// If false, resolving the fields is necessary to determine whether the type has only
/// one possible value.
known_non_opv: bool,
requires_comptime: RequiresComptime = .unknown,
pub const Fields = std.StringArrayHashMapUnmanaged(Field);
@ -1146,7 +1148,7 @@ pub const Union = struct {
pub fn hasAllZeroBitFieldTypes(u: Union) bool {
assert(u.haveFieldTypes());
for (u.fields.values()) |field| {
if (field.ty.hasCodeGenBits()) return false;
if (field.ty.hasRuntimeBits()) return false;
}
return true;
}
@ -1156,7 +1158,7 @@ pub const Union = struct {
var most_alignment: u32 = 0;
var most_index: usize = undefined;
for (u.fields.values()) |field, i| {
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasRuntimeBits()) continue;
const field_align = a: {
if (field.abi_align.tag() == .abi_align_default) {
@ -1177,7 +1179,7 @@ pub const Union = struct {
var max_align: u32 = 0;
if (have_tag) max_align = u.tag_ty.abiAlignment(target);
for (u.fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasRuntimeBits()) continue;
const field_align = a: {
if (field.abi_align.tag() == .abi_align_default) {
@ -1230,7 +1232,7 @@ pub const Union = struct {
var payload_size: u64 = 0;
var payload_align: u32 = 0;
for (u.fields.values()) |field, i| {
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasRuntimeBits()) continue;
const field_align = a: {
if (field.abi_align.tag() == .abi_align_default) {
@ -3457,7 +3459,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
.zir_index = undefined, // set below
.layout = .Auto,
.status = .none,
.known_has_bits = undefined,
.known_non_opv = undefined,
.namespace = .{
.parent = null,
.ty = struct_ty,
@ -3694,7 +3696,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
var type_changed = true;
if (decl.has_tv) {
prev_type_has_bits = decl.ty.hasCodeGenBits();
prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits();
type_changed = !decl.ty.eql(decl_tv.ty);
if (decl.getFunction()) |prev_func| {
prev_is_inline = prev_func.state == .inline_only;
@ -3714,8 +3716,9 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
decl.analysis = .complete;
decl.generation = mod.generation;
const is_inline = decl_tv.ty.fnCallingConvention() == .Inline;
if (!is_inline and decl_tv.ty.hasCodeGenBits()) {
const has_runtime_bits = try sema.fnHasRuntimeBits(&block_scope, src, decl.ty);
if (has_runtime_bits) {
// We don't fully codegen the decl until later, but we do need to reserve a global
// offset table index for it. This allows us to codegen decls out of dependency
// order, increasing how many computations can be done in parallel.
@ -3728,6 +3731,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
mod.comp.bin_file.freeDecl(decl);
}
const is_inline = decl.ty.fnCallingConvention() == .Inline;
if (decl.is_exported) {
const export_src = src; // TODO make this point at `export` token
if (is_inline) {
@ -3748,6 +3752,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
decl.owns_tv = false;
var queue_linker_work = false;
var is_extern = false;
switch (decl_tv.val.tag()) {
.variable => {
const variable = decl_tv.val.castTag(.variable).?.data;
@ -3764,6 +3769,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
if (decl == owner_decl) {
decl.owns_tv = true;
queue_linker_work = true;
is_extern = true;
}
},
@ -3789,7 +3795,10 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
decl.analysis = .complete;
decl.generation = mod.generation;
if (queue_linker_work and decl.ty.hasCodeGenBits()) {
const has_runtime_bits = is_extern or
(queue_linker_work and try sema.typeHasRuntimeBits(&block_scope, src, decl.ty));
if (has_runtime_bits) {
log.debug("queue linker work for {*} ({s})", .{ decl, decl.name });
try mod.comp.bin_file.allocateDeclIndexes(decl);
@ -4290,7 +4299,7 @@ pub fn clearDecl(
mod.deleteDeclExports(decl);
if (decl.has_tv) {
if (decl.ty.hasCodeGenBits()) {
if (decl.ty.isFnOrHasRuntimeBits()) {
mod.comp.bin_file.freeDecl(decl);
// TODO instead of a union, put this memory trailing Decl objects,
@ -4343,7 +4352,7 @@ pub fn deleteUnusedDecl(mod: *Module, decl: *Decl) void {
switch (mod.comp.bin_file.tag) {
.c => {}, // this linker backend has already migrated to the new API
else => if (decl.has_tv) {
if (decl.ty.hasCodeGenBits()) {
if (decl.ty.isFnOrHasRuntimeBits()) {
mod.comp.bin_file.freeDecl(decl);
}
},
@ -4740,7 +4749,7 @@ pub fn createAnonymousDeclFromDeclNamed(
// if the Decl is referenced by an instruction or another constant. Otherwise,
// the Decl will be garbage collected by the `codegen_decl` task instead of sent
// to the linker.
if (typed_value.ty.hasCodeGenBits()) {
if (typed_value.ty.isFnOrHasRuntimeBits()) {
try mod.comp.bin_file.allocateDeclIndexes(new_decl);
try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl });
}

View File

@ -437,9 +437,10 @@ pub const Block = struct {
}
}
pub fn startAnonDecl(block: *Block) !WipAnonDecl {
pub fn startAnonDecl(block: *Block, src: LazySrcLoc) !WipAnonDecl {
return WipAnonDecl{
.block = block,
.src = src,
.new_decl_arena = std.heap.ArenaAllocator.init(block.sema.gpa),
.finished = false,
};
@ -447,6 +448,7 @@ pub const Block = struct {
pub const WipAnonDecl = struct {
block: *Block,
src: LazySrcLoc,
new_decl_arena: std.heap.ArenaAllocator,
finished: bool,
@ -462,11 +464,15 @@ pub const Block = struct {
}
pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value) !*Decl {
const new_decl = try wad.block.sema.mod.createAnonymousDecl(wad.block, .{
const sema = wad.block.sema;
// Do this ahead of time because `createAnonymousDecl` depends on calling
// `type.hasRuntimeBits()`.
_ = try sema.typeHasRuntimeBits(wad.block, wad.src, ty);
const new_decl = try sema.mod.createAnonymousDecl(wad.block, .{
.ty = ty,
.val = val,
});
errdefer wad.block.sema.mod.abortAnonDecl(new_decl);
errdefer sema.mod.abortAnonDecl(new_decl);
try new_decl.finalizeNewArena(&wad.new_decl_arena);
wad.finished = true;
return new_decl;
@ -1505,9 +1511,6 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const ptr = sema.resolveInst(bin_inst.rhs);
const addr_space = target_util.defaultAddressSpace(sema.mod.getTarget(), .local);
// Needed for the call to `anon_decl.finish()` below which checks `ty.hasCodeGenBits()`.
_ = try sema.typeHasOnePossibleValue(block, src, pointee_ty);
if (Air.refToIndex(ptr)) |ptr_inst| {
if (sema.air_instructions.items(.tag)[ptr_inst] == .constant) {
const air_datas = sema.air_instructions.items(.data);
@ -1538,7 +1541,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE
const iac = ptr_val.castTag(.inferred_alloc_comptime).?;
// There will be only one coerce_result_ptr because we are running at comptime.
// The alloc will turn into a Decl.
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
iac.data.decl = try anon_decl.finish(
try pointee_ty.copy(anon_decl.arena()),
@ -1657,7 +1660,10 @@ pub fn analyzeStructDecl(
assert(extended.opcode == .struct_decl);
const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small);
struct_obj.known_has_bits = small.known_has_bits;
struct_obj.known_non_opv = small.known_non_opv;
if (small.known_comptime_only) {
struct_obj.requires_comptime = .yes;
}
var extra_index: usize = extended.operand;
extra_index += @boolToInt(small.has_src_node);
@ -1705,7 +1711,7 @@ fn zirStructDecl(
.zir_index = inst,
.layout = small.layout,
.status = .none,
.known_has_bits = undefined,
.known_non_opv = undefined,
.namespace = .{
.parent = block.namespace,
.ty = struct_ty,
@ -2531,7 +2537,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com
const bitcast_ty_ref = air_datas[bitcast_inst].ty_op.ty;
const new_decl = d: {
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
const new_decl = try anon_decl.finish(
try final_elem_ty.copy(anon_decl.arena()),
@ -3115,7 +3121,7 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi
if (operand_val.tag() == .variable) {
return sema.failWithNeededComptime(block, src);
}
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
iac.data.decl = try anon_decl.finish(
try operand_ty.copy(anon_decl.arena()),
@ -3187,8 +3193,7 @@ fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air
// after semantic analysis is complete, for example in the case of the initialization
// expression of a variable declaration. We need the memory to be in the new
// anonymous Decl's arena.
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(LazySrcLoc.unneeded);
defer anon_decl.deinit();
const bytes = try anon_decl.arena().dupeZ(u8, zir_bytes);
@ -5003,7 +5008,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr
// TODO do we really want to create a Decl for this?
// The reason we do it right now is for memory management.
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
var names = Module.ErrorSet.NameMap{};
@ -5784,15 +5789,16 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ptr = sema.resolveInst(inst_data.operand);
const ptr_ty = sema.typeOf(ptr);
if (!ptr_ty.isPtrAtRuntime()) {
const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty});
}
// TODO handle known-pointer-address
const src = inst_data.src();
try sema.requireRuntimeBlock(block, src);
if (try sema.resolveMaybeUndefVal(block, ptr_src, ptr)) |ptr_val| {
return sema.addConstant(Type.usize, ptr_val);
}
try sema.requireRuntimeBlock(block, ptr_src);
return block.addUnOp(.ptrtoint, ptr);
}
@ -7409,7 +7415,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
},
};
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(LazySrcLoc.unneeded);
defer anon_decl.deinit();
const bytes_including_null = embed_file.bytes[0 .. embed_file.bytes.len + 1];
@ -7673,7 +7679,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const is_pointer = lhs_ty.zigTypeTag() == .Pointer;
const lhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val;
const rhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).? else rhs_val;
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(LazySrcLoc.unneeded);
defer anon_decl.deinit();
const buf = try anon_decl.arena().alloc(Value, final_len_including_sent);
@ -7757,7 +7763,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const lhs_sub_val = if (lhs_ty.zigTypeTag() == .Pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val;
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
const final_ty = if (mulinfo.sentinel) |sent|
@ -9371,7 +9377,7 @@ fn zirBuiltinSrc(
const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{});
const func_name_val = blk: {
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
const name = std.mem.span(func.owner_decl.name);
const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]);
@ -9383,7 +9389,7 @@ fn zirBuiltinSrc(
};
const file_name_val = blk: {
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
const name = try func.owner_decl.getFileScope().fullPathZ(anon_decl.arena());
const new_decl = try anon_decl.finish(
@ -9633,7 +9639,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const is_exhaustive = if (ty.isNonexhaustiveEnum()) Value.@"false" else Value.@"true";
var fields_anon_decl = try block.startAnonDecl();
var fields_anon_decl = try block.startAnonDecl(src);
defer fields_anon_decl.deinit();
const enum_field_ty = t: {
@ -9664,7 +9670,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const name = enum_fields.keys()[i];
const name_val = v: {
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
const bytes = try anon_decl.arena().dupeZ(u8, name);
const new_decl = try anon_decl.finish(
@ -9729,7 +9735,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.Union => {
// TODO: look into memoizing this result.
var fields_anon_decl = try block.startAnonDecl();
var fields_anon_decl = try block.startAnonDecl(src);
defer fields_anon_decl.deinit();
const union_field_ty = t: {
@ -9753,7 +9759,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const field = union_fields.values()[i];
const name = union_fields.keys()[i];
const name_val = v: {
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
const bytes = try anon_decl.arena().dupeZ(u8, name);
const new_decl = try anon_decl.finish(
@ -9824,7 +9830,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
.Opaque => {
// TODO: look into memoizing this result.
var fields_anon_decl = try block.startAnonDecl();
var fields_anon_decl = try block.startAnonDecl(src);
defer fields_anon_decl.deinit();
const opaque_ty = try sema.resolveTypeFields(block, src, ty);
@ -9862,7 +9868,7 @@ fn typeInfoDecls(
const decls_len = namespace.decls.count();
if (decls_len == 0) return Value.initTag(.empty_array);
var decls_anon_decl = try block.startAnonDecl();
var decls_anon_decl = try block.startAnonDecl(src);
defer decls_anon_decl.deinit();
const declaration_ty = t: {
@ -9883,7 +9889,7 @@ fn typeInfoDecls(
const decl = namespace.decls.values()[i];
const name = namespace.decls.keys()[i];
const name_val = v: {
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
const bytes = try anon_decl.arena().dupeZ(u8, name);
const new_decl = try anon_decl.finish(
@ -10668,7 +10674,7 @@ fn zirArrayInit(
} else null;
const runtime_src = opt_runtime_src orelse {
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
const elem_vals = try anon_decl.arena().alloc(Value, resolved_args.len);
@ -10754,7 +10760,7 @@ fn zirArrayInitAnon(
const tuple_val = try Value.Tag.@"struct".create(sema.arena, values);
if (!is_ref) return sema.addConstant(tuple_ty, tuple_val);
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
const decl = try anon_decl.finish(
try tuple_ty.copy(anon_decl.arena()),
@ -11046,7 +11052,7 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
const ty = try sema.resolveType(block, ty_src, inst_data.operand);
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(LazySrcLoc.unneeded);
defer anon_decl.deinit();
const bytes = try ty.nameAlloc(anon_decl.arena());
@ -12867,7 +12873,7 @@ fn safetyPanic(
const msg_inst = msg_inst: {
// TODO instead of making a new decl for every panic in the entire compilation,
// introduce the concept of a reference-counted decl for these
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
break :msg_inst try sema.analyzeDeclRef(try anon_decl.finish(
try Type.Tag.array_u8.create(anon_decl.arena(), msg.len),
@ -13077,7 +13083,7 @@ fn fieldPtr(
switch (inner_ty.zigTypeTag()) {
.Array => {
if (mem.eql(u8, field_name, "len")) {
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
Type.initTag(.comptime_int),
@ -13103,7 +13109,7 @@ fn fieldPtr(
const slice_ptr_ty = inner_ty.slicePtrFieldType(buf);
if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
@ -13122,7 +13128,7 @@ fn fieldPtr(
return block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr);
} else if (mem.eql(u8, field_name, "len")) {
if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| {
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
@ -13172,7 +13178,7 @@ fn fieldPtr(
});
} else (try sema.mod.getErrorValue(field_name)).key;
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
try child_type.copy(anon_decl.arena()),
@ -13188,7 +13194,7 @@ fn fieldPtr(
if (child_type.unionTagType()) |enum_ty| {
if (enum_ty.enumFieldIndex(field_name)) |field_index| {
const field_index_u32 = @intCast(u32, field_index);
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
try enum_ty.copy(anon_decl.arena()),
@ -13208,7 +13214,7 @@ fn fieldPtr(
return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name);
};
const field_index_u32 = @intCast(u32, field_index);
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
try child_type.copy(anon_decl.arena()),
@ -13464,7 +13470,7 @@ fn structFieldPtr(
var offset: u64 = 0;
var running_bits: u16 = 0;
for (struct_obj.fields.values()) |f, i| {
if (!f.ty.hasCodeGenBits()) continue;
if (!(try sema.typeHasRuntimeBits(block, field_name_src, f.ty))) continue;
const field_align = f.packedAlignment();
if (field_align == 0) {
@ -14022,7 +14028,6 @@ fn coerce(
// This will give an extra hint on top of what the bottom of this func would provide.
try sema.checkPtrOperand(block, dest_ty_src, inst_ty);
unreachable;
},
.Int, .ComptimeInt => switch (inst_ty.zigTypeTag()) {
.Float, .ComptimeFloat => float: {
@ -15340,7 +15345,7 @@ fn analyzeRef(
const operand_ty = sema.typeOf(operand);
if (try sema.resolveMaybeUndefVal(block, src, operand)) |val| {
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
try operand_ty.copy(anon_decl.arena()),
@ -15754,7 +15759,7 @@ fn cmpNumeric(
lhs_bits = bigint.toConst().bitCountTwosComp();
break :x (zcmp != .lt);
} else x: {
lhs_bits = lhs_val.intBitCountTwosComp();
lhs_bits = lhs_val.intBitCountTwosComp(target);
break :x (lhs_val.orderAgainstZero() != .lt);
};
lhs_bits += @boolToInt(is_unsigned and dest_int_is_signed);
@ -15789,7 +15794,7 @@ fn cmpNumeric(
rhs_bits = bigint.toConst().bitCountTwosComp();
break :x (zcmp != .lt);
} else x: {
rhs_bits = rhs_val.intBitCountTwosComp();
rhs_bits = rhs_val.intBitCountTwosComp(target);
break :x (rhs_val.orderAgainstZero() != .lt);
};
rhs_bits += @boolToInt(is_unsigned and dest_int_is_signed);
@ -16877,6 +16882,7 @@ fn getBuiltinType(
/// in `Sema` is for calling during semantic analysis, and performs field resolution
/// to get the answer. The one in `Type` is for calling during codegen and asserts
/// that the types are already resolved.
/// TODO assert the return value matches `ty.onePossibleValue`
pub fn typeHasOnePossibleValue(
sema: *Sema,
block: *Block,
@ -17024,7 +17030,7 @@ pub fn typeHasOnePossibleValue(
},
.enum_nonexhaustive => {
const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty;
if (!tag_ty.hasCodeGenBits()) {
if (!(try sema.typeHasRuntimeBits(block, src, tag_ty))) {
return Value.zero;
} else {
return null;
@ -17288,7 +17294,7 @@ fn analyzeComptimeAlloc(
.@"align" = alignment,
});
var anon_decl = try block.startAnonDecl();
var anon_decl = try block.startAnonDecl(src);
defer anon_decl.deinit();
const align_val = if (alignment == 0)
@ -17478,10 +17484,10 @@ fn typePtrOrOptionalPtrTy(
}
}
/// Anything that reports hasCodeGenBits() false returns false here as well.
/// `generic_poison` will return false.
/// This function returns false negatives when structs and unions are having their
/// field types resolved.
/// TODO assert the return value matches `ty.comptimeOnly`
fn typeRequiresComptime(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
return switch (ty.tag()) {
.u1,
@ -17672,3 +17678,25 @@ fn typeRequiresComptime(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) C
},
};
}
pub fn typeHasRuntimeBits(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
if ((try sema.typeHasOnePossibleValue(block, src, ty)) != null) return false;
if (try sema.typeRequiresComptime(block, src, ty)) return false;
return true;
}
/// Synchronize logic with `Type.isFnOrHasRuntimeBits`.
pub fn fnHasRuntimeBits(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool {
const fn_info = ty.fnInfo();
if (fn_info.is_generic) return false;
if (fn_info.is_var_args) return true;
switch (fn_info.cc) {
// If there was a comptime calling convention, it should also return false here.
.Inline => return false,
else => {},
}
if (try sema.typeRequiresComptime(block, src, fn_info.return_type)) {
return false;
}
return true;
}

View File

@ -2599,10 +2599,11 @@ pub const Inst = struct {
has_body_len: bool,
has_fields_len: bool,
has_decls_len: bool,
known_has_bits: bool,
known_non_opv: bool,
known_comptime_only: bool,
name_strategy: NameStrategy,
layout: std.builtin.TypeInfo.ContainerLayout,
_: u7 = undefined,
_: u6 = undefined,
};
};

View File

@ -713,7 +713,7 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
switch (self.debug_output) {
.dwarf => |dbg_out| {
assert(ty.hasCodeGenBits());
assert(ty.hasRuntimeBits());
const index = dbg_out.dbg_info.items.len;
try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4
@ -1279,7 +1279,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
const result: MCValue = result: {
if (!elem_ty.hasCodeGenBits())
if (!elem_ty.hasRuntimeBits())
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
@ -2155,7 +2155,7 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void {
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?;
if (self.air.typeOf(operand).hasCodeGenBits()) {
if (self.air.typeOf(operand).hasRuntimeBits()) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
@ -2608,7 +2608,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
const ref_int = @enumToInt(inst);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
const tv = Air.Inst.Ref.typed_value_map[ref_int];
if (!tv.ty.hasCodeGenBits()) {
if (!tv.ty.hasRuntimeBits()) {
return MCValue{ .none = {} };
}
return self.genTypedValue(tv);
@ -2616,7 +2616,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// If the type has no codegen bits, no need to store it.
const inst_ty = self.air.typeOf(inst);
if (!inst_ty.hasCodeGenBits())
if (!inst_ty.hasRuntimeBits())
return MCValue{ .none = {} };
const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
@ -2672,11 +2672,43 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV
return mcv;
}
fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
_ = tv;
}
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
if (typed_value.val.castTag(.decl_ref)) |payload| {
return self.lowerDeclRef(typed_value, payload.data);
}
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
return self.lowerDeclRef(typed_value, payload.data.decl);
}
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {
@ -2693,28 +2725,6 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.fail("TODO codegen for const slices", .{});
},
else => {
if (typed_value.val.castTag(.decl_ref)) |payload| {
const decl = payload.data;
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
}
if (typed_value.val.tag() == .int_u64) {
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
}
@ -2794,7 +2804,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
const payload_type = typed_value.ty.errorUnionPayload();
const sub_val = typed_value.val.castTag(.eu_payload).?.data;
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
return self.genTypedValue(.{ .ty = error_type, .val = sub_val });
}
@ -2888,7 +2898,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
if (ret_ty.zigTypeTag() == .NoReturn) {
result.return_value = .{ .unreach = {} };
} else if (!ret_ty.hasCodeGenBits()) {
} else if (!ret_ty.hasRuntimeBits()) {
result.return_value = .{ .none = {} };
} else switch (cc) {
.Naked => unreachable,

View File

@ -1074,7 +1074,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
const error_union_ty = self.air.typeOf(ty_op.operand);
const payload_ty = error_union_ty.errorUnionPayload();
const mcv = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasCodeGenBits()) break :result mcv;
if (!payload_ty.hasRuntimeBits()) break :result mcv;
return self.fail("TODO implement unwrap error union error for non-empty payloads", .{});
};
@ -1086,7 +1086,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = self.air.typeOf(ty_op.operand);
const payload_ty = error_union_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) break :result MCValue.none;
if (!payload_ty.hasRuntimeBits()) break :result MCValue.none;
return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{});
};
@ -1135,7 +1135,7 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const error_union_ty = self.air.getRefType(ty_op.ty);
const payload_ty = error_union_ty.errorUnionPayload();
const mcv = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasCodeGenBits()) break :result mcv;
if (!payload_ty.hasRuntimeBits()) break :result mcv;
return self.fail("TODO implement wrap errunion error for non-empty payloads", .{});
};
@ -1506,7 +1506,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
const result: MCValue = result: {
if (!elem_ty.hasCodeGenBits())
if (!elem_ty.hasRuntimeBits())
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
@ -2666,9 +2666,9 @@ fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
const error_type = ty.errorUnionSet();
const payload_type = ty.errorUnionPayload();
if (!error_type.hasCodeGenBits()) {
if (!error_type.hasRuntimeBits()) {
return MCValue{ .immediate = 0 }; // always false
} else if (!payload_type.hasCodeGenBits()) {
} else if (!payload_type.hasRuntimeBits()) {
if (error_type.abiSize(self.target.*) <= 4) {
const reg_mcv: MCValue = switch (operand) {
.register => operand,
@ -2900,7 +2900,7 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void {
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?;
if (self.air.typeOf(operand).hasCodeGenBits()) {
if (self.air.typeOf(operand).hasRuntimeBits()) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
@ -3658,7 +3658,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
const ref_int = @enumToInt(inst);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
const tv = Air.Inst.Ref.typed_value_map[ref_int];
if (!tv.ty.hasCodeGenBits()) {
if (!tv.ty.hasRuntimeBits()) {
return MCValue{ .none = {} };
}
return self.genTypedValue(tv);
@ -3666,7 +3666,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// If the type has no codegen bits, no need to store it.
const inst_ty = self.air.typeOf(inst);
if (!inst_ty.hasCodeGenBits())
if (!inst_ty.hasRuntimeBits())
return MCValue{ .none = {} };
const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
@ -3701,11 +3701,45 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
_ = tv;
}
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
if (typed_value.val.castTag(.decl_ref)) |payload| {
return self.lowerDeclRef(typed_value, payload.data);
}
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
return self.lowerDeclRef(typed_value, payload.data.decl);
}
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {
@ -3722,28 +3756,6 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.fail("TODO codegen for const slices", .{});
},
else => {
if (typed_value.val.castTag(.decl_ref)) |payload| {
const decl = payload.data;
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
}
if (typed_value.val.tag() == .int_u64) {
return MCValue{ .immediate = @intCast(u32, typed_value.val.toUnsignedInt()) };
}
@ -3812,7 +3824,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
const payload_type = typed_value.ty.errorUnionPayload();
if (typed_value.val.castTag(.eu_payload)) |pl| {
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
return MCValue{ .immediate = 0 };
}
@ -3820,7 +3832,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
_ = pl;
return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty});
} else {
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val });
}
@ -3918,7 +3930,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
if (ret_ty.zigTypeTag() == .NoReturn) {
result.return_value = .{ .unreach = {} };
} else if (!ret_ty.hasCodeGenBits()) {
} else if (!ret_ty.hasRuntimeBits()) {
result.return_value = .{ .none = {} };
} else switch (cc) {
.Naked => unreachable,

View File

@ -372,7 +372,7 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
fn addDbgInfoTypeReloc(self: *Emit, ty: Type) !void {
switch (self.debug_output) {
.dwarf => |dbg_out| {
assert(ty.hasCodeGenBits());
assert(ty.hasRuntimeBits());
const index = dbg_out.dbg_info.items.len;
try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4

View File

@ -691,7 +691,7 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
switch (self.debug_output) {
.dwarf => |dbg_out| {
assert(ty.hasCodeGenBits());
assert(ty.hasRuntimeBits());
const index = dbg_out.dbg_info.items.len;
try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4
@ -1223,7 +1223,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
const result: MCValue = result: {
if (!elem_ty.hasCodeGenBits())
if (!elem_ty.hasRuntimeBits())
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
@ -1769,7 +1769,7 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void {
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?;
if (self.air.typeOf(operand).hasCodeGenBits()) {
if (self.air.typeOf(operand).hasRuntimeBits()) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
@ -2107,7 +2107,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
const ref_int = @enumToInt(inst);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
const tv = Air.Inst.Ref.typed_value_map[ref_int];
if (!tv.ty.hasCodeGenBits()) {
if (!tv.ty.hasRuntimeBits()) {
return MCValue{ .none = {} };
}
return self.genTypedValue(tv);
@ -2115,7 +2115,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// If the type has no codegen bits, no need to store it.
const inst_ty = self.air.typeOf(inst);
if (!inst_ty.hasCodeGenBits())
if (!inst_ty.hasRuntimeBits())
return MCValue{ .none = {} };
const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
@ -2171,11 +2171,42 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV
return mcv;
}
fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
_ = tv;
}
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
if (typed_value.val.castTag(.decl_ref)) |payload| {
return self.lowerDeclRef(typed_value, payload.data);
}
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
return self.lowerDeclRef(typed_value, payload.data.decl);
}
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {
@ -2192,28 +2223,6 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.fail("TODO codegen for const slices", .{});
},
else => {
if (typed_value.val.castTag(.decl_ref)) |payload| {
const decl = payload.data;
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
}
if (typed_value.val.tag() == .int_u64) {
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
}
@ -2290,7 +2299,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
const payload_type = typed_value.ty.errorUnionPayload();
const sub_val = typed_value.val.castTag(.eu_payload).?.data;
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
return self.genTypedValue(.{ .ty = error_type, .val = sub_val });
}
@ -2381,7 +2390,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
if (ret_ty.zigTypeTag() == .NoReturn) {
result.return_value = .{ .unreach = {} };
} else if (!ret_ty.hasCodeGenBits()) {
} else if (!ret_ty.hasRuntimeBits()) {
result.return_value = .{ .none = {} };
} else switch (cc) {
.Naked => unreachable,

View File

@ -598,7 +598,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!WValue {
// means we must generate it from a constant.
const val = self.air.value(ref).?;
const ty = self.air.typeOf(ref);
if (!ty.hasCodeGenBits() and !ty.isInt()) return WValue{ .none = {} };
if (!ty.hasRuntimeBits() and !ty.isInt()) return WValue{ .none = {} };
// When we need to pass the value by reference (such as a struct), we will
// leverage `genTypedValue` to lower the constant to bytes and emit it
@ -790,13 +790,13 @@ fn genFunctype(gpa: Allocator, fn_ty: Type, target: std.Target) !wasm.Type {
defer gpa.free(fn_params);
fn_ty.fnParamTypes(fn_params);
for (fn_params) |param_type| {
if (!param_type.hasCodeGenBits()) continue;
if (!param_type.hasRuntimeBits()) continue;
try params.append(typeToValtype(param_type, target));
}
}
// return type
if (!want_sret and return_type.hasCodeGenBits()) {
if (!want_sret and return_type.hasRuntimeBits()) {
try returns.append(typeToValtype(return_type, target));
}
@ -935,7 +935,7 @@ pub const DeclGen = struct {
const abi_size = @intCast(usize, ty.abiSize(self.target()));
const offset = abi_size - @intCast(usize, payload_type.abiSize(self.target()));
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
try writer.writeByteNTimes(@boolToInt(is_pl), abi_size);
return Result{ .appended = {} };
}
@ -1044,7 +1044,7 @@ pub const DeclGen = struct {
const field_vals = val.castTag(.@"struct").?.data;
for (field_vals) |field_val, index| {
const field_ty = ty.structFieldType(index);
if (!field_ty.hasCodeGenBits()) continue;
if (!field_ty.hasRuntimeBits()) continue;
switch (try self.genTypedValue(field_ty, field_val, writer)) {
.appended => {},
.externally_managed => |payload| try writer.writeAll(payload),
@ -1093,7 +1093,7 @@ pub const DeclGen = struct {
.appended => {},
}
if (payload_ty.hasCodeGenBits()) {
if (payload_ty.hasRuntimeBits()) {
const pl_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef);
switch (try self.genTypedValue(payload_ty, pl_val, writer)) {
.externally_managed => |data| try writer.writeAll(data),
@ -1180,7 +1180,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) InnerError!CallWValu
.Naked => return result,
.Unspecified, .C => {
for (param_types) |ty, ty_index| {
if (!ty.hasCodeGenBits()) {
if (!ty.hasRuntimeBits()) {
result.args[ty_index] = .{ .none = {} };
continue;
}
@ -1243,7 +1243,7 @@ fn moveStack(self: *Self, offset: u32, local: u32) !void {
///
/// Asserts Type has codegenbits
fn allocStack(self: *Self, ty: Type) !WValue {
assert(ty.hasCodeGenBits());
assert(ty.hasRuntimeBits());
// calculate needed stack space
const abi_size = std.math.cast(u32, ty.abiSize(self.target)) catch {
@ -1319,22 +1319,22 @@ fn isByRef(ty: Type, target: std.Target) bool {
.Struct,
.Frame,
.Union,
=> return ty.hasCodeGenBits(),
=> return ty.hasRuntimeBits(),
.Int => return if (ty.intInfo(target).bits > 64) true else false,
.ErrorUnion => {
const has_tag = ty.errorUnionSet().hasCodeGenBits();
const has_pl = ty.errorUnionPayload().hasCodeGenBits();
const has_tag = ty.errorUnionSet().hasRuntimeBits();
const has_pl = ty.errorUnionPayload().hasRuntimeBits();
if (!has_tag or !has_pl) return false;
return ty.hasCodeGenBits();
return ty.hasRuntimeBits();
},
.Optional => {
if (ty.isPtrLikeOptional()) return false;
var buf: Type.Payload.ElemType = undefined;
return ty.optionalChild(&buf).hasCodeGenBits();
return ty.optionalChild(&buf).hasRuntimeBits();
},
.Pointer => {
// Slices act like struct and will be passed by reference
if (ty.isSlice()) return ty.hasCodeGenBits();
if (ty.isSlice()) return ty.hasRuntimeBits();
return false;
},
}
@ -1563,7 +1563,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const ret_ty = self.air.typeOf(un_op).childType();
if (!ret_ty.hasCodeGenBits()) return WValue.none;
if (!ret_ty.hasRuntimeBits()) return WValue.none;
if (!isByRef(ret_ty, self.target)) {
const result = try self.load(operand, ret_ty, 0);
@ -1611,7 +1611,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const arg_val = try self.resolveInst(arg_ref);
const arg_ty = self.air.typeOf(arg_ref);
if (!arg_ty.hasCodeGenBits()) continue;
if (!arg_ty.hasRuntimeBits()) continue;
try self.emitWValue(arg_val);
}
@ -1631,7 +1631,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addLabel(.call_indirect, fn_type_index);
}
if (self.liveness.isUnused(inst) or !ret_ty.hasCodeGenBits()) {
if (self.liveness.isUnused(inst) or !ret_ty.hasRuntimeBits()) {
return WValue.none;
} else if (ret_ty.isNoReturn()) {
try self.addTag(.@"unreachable");
@ -1653,7 +1653,7 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.initializeStack();
}
if (!pointee_type.hasCodeGenBits()) {
if (!pointee_type.hasRuntimeBits()) {
// when the pointee is zero-sized, we still want to create a pointer.
// but instead use a default pointer type as storage.
const zero_ptr = try self.allocStack(Type.usize);
@ -1678,7 +1678,7 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
.ErrorUnion => {
const err_ty = ty.errorUnionSet();
const pl_ty = ty.errorUnionPayload();
if (!pl_ty.hasCodeGenBits()) {
if (!pl_ty.hasRuntimeBits()) {
const err_val = try self.load(rhs, err_ty, 0);
return self.store(lhs, err_val, err_ty, 0);
}
@ -1691,7 +1691,7 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
}
var buf: Type.Payload.ElemType = undefined;
const pl_ty = ty.optionalChild(&buf);
if (!pl_ty.hasCodeGenBits()) {
if (!pl_ty.hasRuntimeBits()) {
return self.store(lhs, rhs, Type.initTag(.u8), 0);
}
@ -1750,7 +1750,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const operand = try self.resolveInst(ty_op.operand);
const ty = self.air.getRefType(ty_op.ty);
if (!ty.hasCodeGenBits()) return WValue{ .none = {} };
if (!ty.hasRuntimeBits()) return WValue{ .none = {} };
if (isByRef(ty, self.target)) {
const new_local = try self.allocStack(ty);
@ -2146,7 +2146,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: std.math.CompareOperator) Inner
if (operand_ty.zigTypeTag() == .Optional and !operand_ty.isPtrLikeOptional()) {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = operand_ty.optionalChild(&buf);
if (payload_ty.hasCodeGenBits()) {
if (payload_ty.hasRuntimeBits()) {
// When we hit this case, we must check the value of optionals
// that are not pointers. This means first checking against non-null for
// both lhs and rhs, as well as checking the payload are matching of lhs and rhs
@ -2190,7 +2190,7 @@ fn airBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const block = self.blocks.get(br.block_inst).?;
// if operand has codegen bits we should break with a value
if (self.air.typeOf(br.operand).hasCodeGenBits()) {
if (self.air.typeOf(br.operand).hasRuntimeBits()) {
try self.emitWValue(try self.resolveInst(br.operand));
if (block.value != .none) {
@ -2282,7 +2282,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const operand = try self.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index);
if (!field_ty.hasCodeGenBits()) return WValue{ .none = {} };
if (!field_ty.hasRuntimeBits()) return WValue{ .none = {} };
const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, self.target)) catch {
return self.fail("Field type '{}' too big to fit into stack frame", .{field_ty});
};
@ -2452,7 +2452,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!W
// load the error tag value
try self.emitWValue(operand);
if (pl_ty.hasCodeGenBits()) {
if (pl_ty.hasRuntimeBits()) {
try self.addMemArg(.i32_load16_u, .{
.offset = 0,
.alignment = err_ty.errorUnionSet().abiAlignment(self.target),
@ -2474,7 +2474,7 @@ fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue
const operand = try self.resolveInst(ty_op.operand);
const err_ty = self.air.typeOf(ty_op.operand);
const payload_ty = err_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) return WValue{ .none = {} };
if (!payload_ty.hasRuntimeBits()) return WValue{ .none = {} };
const offset = @intCast(u32, err_ty.errorUnionSet().abiSize(self.target));
if (isByRef(payload_ty, self.target)) {
return self.buildPointerOffset(operand, offset, .new);
@ -2489,7 +2489,7 @@ fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const operand = try self.resolveInst(ty_op.operand);
const err_ty = self.air.typeOf(ty_op.operand);
const payload_ty = err_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
return operand;
}
@ -2502,7 +2502,7 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const operand = try self.resolveInst(ty_op.operand);
const op_ty = self.air.typeOf(ty_op.operand);
if (!op_ty.hasCodeGenBits()) return operand;
if (!op_ty.hasRuntimeBits()) return operand;
const err_ty = self.air.getRefType(ty_op.ty);
const offset = err_ty.errorUnionSet().abiSize(self.target);
@ -2580,7 +2580,7 @@ fn isNull(self: *Self, operand: WValue, optional_ty: Type, opcode: wasm.Opcode)
const payload_ty = optional_ty.optionalChild(&buf);
// When payload is zero-bits, we can treat operand as a value, rather than
// a pointer to the stack value
if (payload_ty.hasCodeGenBits()) {
if (payload_ty.hasRuntimeBits()) {
try self.addMemArg(.i32_load8_u, .{ .offset = 0, .alignment = 1 });
}
}
@ -2600,7 +2600,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const operand = try self.resolveInst(ty_op.operand);
const opt_ty = self.air.typeOf(ty_op.operand);
const payload_ty = self.air.typeOfIndex(inst);
if (!payload_ty.hasCodeGenBits()) return WValue{ .none = {} };
if (!payload_ty.hasRuntimeBits()) return WValue{ .none = {} };
if (opt_ty.isPtrLikeOptional()) return operand;
const offset = opt_ty.abiSize(self.target) - payload_ty.abiSize(self.target);
@ -2621,7 +2621,7 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = opt_ty.optionalChild(&buf);
if (!payload_ty.hasCodeGenBits() or opt_ty.isPtrLikeOptional()) {
if (!payload_ty.hasRuntimeBits() or opt_ty.isPtrLikeOptional()) {
return operand;
}
@ -2635,7 +2635,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue
const opt_ty = self.air.typeOf(ty_op.operand).childType();
var buf: Type.Payload.ElemType = undefined;
const payload_ty = opt_ty.optionalChild(&buf);
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
return self.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty});
}
@ -2659,7 +2659,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const payload_ty = self.air.typeOf(ty_op.operand);
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
const non_null_bit = try self.allocStack(Type.initTag(.u1));
try self.addLabel(.local_get, non_null_bit.local);
try self.addImm32(1);
@ -2851,7 +2851,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const slice_local = try self.allocStack(slice_ty);
// store the array ptr in the slice
if (array_ty.hasCodeGenBits()) {
if (array_ty.hasRuntimeBits()) {
try self.store(slice_local, operand, ty, 0);
}
@ -3105,7 +3105,7 @@ fn airPrefetch(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
fn cmpOptionals(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
assert(operand_ty.hasCodeGenBits());
assert(operand_ty.hasRuntimeBits());
assert(op == .eq or op == .neq);
var buf: Type.Payload.ElemType = undefined;
const payload_ty = operand_ty.optionalChild(&buf);

View File

@ -1202,7 +1202,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
const err_union_ty = self.air.typeOf(ty_op.operand);
const payload_ty = err_union_ty.errorUnionPayload();
const mcv = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasCodeGenBits()) break :result mcv;
if (!payload_ty.hasRuntimeBits()) break :result mcv;
return self.fail("TODO implement unwrap error union error for non-empty payloads", .{});
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
@ -1213,7 +1213,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const err_union_ty = self.air.typeOf(ty_op.operand);
const payload_ty = err_union_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) break :result MCValue.none;
if (!payload_ty.hasRuntimeBits()) break :result MCValue.none;
return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{});
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
@ -1270,7 +1270,7 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const error_union_ty = self.air.getRefType(ty_op.ty);
const payload_ty = error_union_ty.errorUnionPayload();
const mcv = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasCodeGenBits()) break :result mcv;
if (!payload_ty.hasRuntimeBits()) break :result mcv;
return self.fail("TODO implement wrap errunion error for non-empty payloads", .{});
};
@ -1636,7 +1636,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
const result: MCValue = result: {
if (!elem_ty.hasCodeGenBits())
if (!elem_ty.hasRuntimeBits())
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
@ -2739,9 +2739,9 @@ fn isNonNull(self: *Self, ty: Type, operand: MCValue) !MCValue {
fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
const err_type = ty.errorUnionSet();
const payload_type = ty.errorUnionPayload();
if (!err_type.hasCodeGenBits()) {
if (!err_type.hasRuntimeBits()) {
return MCValue{ .immediate = 0 }; // always false
} else if (!payload_type.hasCodeGenBits()) {
} else if (!payload_type.hasRuntimeBits()) {
if (err_type.abiSize(self.target.*) <= 8) {
try self.genBinMathOpMir(.cmp, err_type, .unsigned, operand, MCValue{ .immediate = 0 });
return MCValue{ .compare_flags_unsigned = .gt };
@ -2962,7 +2962,7 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void {
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?;
if (self.air.typeOf(operand).hasCodeGenBits()) {
if (self.air.typeOf(operand).hasRuntimeBits()) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
@ -3913,7 +3913,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
const ref_int = @enumToInt(inst);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
const tv = Air.Inst.Ref.typed_value_map[ref_int];
if (!tv.ty.hasCodeGenBits()) {
if (!tv.ty.hasRuntimeBits()) {
return MCValue{ .none = {} };
}
return self.genTypedValue(tv);
@ -3921,7 +3921,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// If the type has no codegen bits, no need to store it.
const inst_ty = self.air.typeOf(inst);
if (!inst_ty.hasCodeGenBits())
if (!inst_ty.hasRuntimeBits())
return MCValue{ .none = {} };
const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
@ -3977,11 +3977,45 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV
return mcv;
}
fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
_ = tv;
}
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
if (typed_value.val.castTag(.decl_ref)) |payload| {
return self.lowerDeclRef(typed_value, payload.data);
}
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
return self.lowerDeclRef(typed_value, payload.data.decl);
}
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {
@ -3998,28 +4032,6 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.fail("TODO codegen for const slices", .{});
},
else => {
if (typed_value.val.castTag(.decl_ref)) |payload| {
const decl = payload.data;
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
}
if (typed_value.val.tag() == .int_u64) {
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
}
@ -4091,7 +4103,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
const payload_type = typed_value.ty.errorUnionPayload();
if (typed_value.val.castTag(.eu_payload)) |pl| {
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
return MCValue{ .immediate = 0 };
}
@ -4099,7 +4111,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
_ = pl;
return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty});
} else {
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val });
}
@ -4156,7 +4168,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var by_reg = std.AutoHashMap(usize, usize).init(self.bin_file.allocator);
defer by_reg.deinit();
for (param_types) |ty, i| {
if (!ty.hasCodeGenBits()) continue;
if (!ty.hasRuntimeBits()) continue;
const param_size = @intCast(u32, ty.abiSize(self.target.*));
const pass_in_reg = switch (ty.zigTypeTag()) {
.Bool => true,
@ -4178,7 +4190,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
// for (param_types) |ty, i| {
const i = count - 1;
const ty = param_types[i];
if (!ty.hasCodeGenBits()) {
if (!ty.hasRuntimeBits()) {
assert(cc != .C);
result.args[i] = .{ .none = {} };
continue;
@ -4207,7 +4219,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
if (ret_ty.zigTypeTag() == .NoReturn) {
result.return_value = .{ .unreach = {} };
} else if (!ret_ty.hasCodeGenBits()) {
} else if (!ret_ty.hasRuntimeBits()) {
result.return_value = .{ .none = {} };
} else switch (cc) {
.Naked => unreachable,

View File

@ -885,7 +885,7 @@ fn genArgDbgInfo(emit: *Emit, inst: Air.Inst.Index, mcv: MCValue) !void {
fn addDbgInfoTypeReloc(emit: *Emit, ty: Type) !void {
switch (emit.debug_output) {
.dwarf => |dbg_out| {
assert(ty.hasCodeGenBits());
assert(ty.hasRuntimeBits());
const index = dbg_out.dbg_info.items.len;
try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4

View File

@ -377,7 +377,7 @@ pub fn generateSymbol(
const field_vals = typed_value.val.castTag(.@"struct").?.data;
for (field_vals) |field_val, index| {
const field_ty = typed_value.ty.structFieldType(index);
if (!field_ty.hasCodeGenBits()) continue;
if (!field_ty.hasRuntimeBits()) continue;
switch (try generateSymbol(bin_file, src_loc, .{
.ty = field_ty,
.val = field_val,

View File

@ -507,7 +507,7 @@ pub const DeclGen = struct {
const error_type = ty.errorUnionSet();
const payload_type = ty.errorUnionPayload();
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
const err_val = if (val.errorUnionIsPayload()) Value.initTag(.zero) else val;
return dg.renderValue(writer, error_type, err_val);
@ -581,7 +581,7 @@ pub const DeclGen = struct {
for (field_vals) |field_val, i| {
const field_ty = ty.structFieldType(i);
if (!field_ty.hasCodeGenBits()) continue;
if (!field_ty.hasRuntimeBits()) continue;
if (i != 0) try writer.writeAll(",");
try dg.renderValue(writer, field_ty, field_val);
@ -611,7 +611,7 @@ pub const DeclGen = struct {
const index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag).?;
const field_ty = ty.unionFields().values()[index].ty;
const field_name = ty.unionFields().keys()[index];
if (field_ty.hasCodeGenBits()) {
if (field_ty.hasRuntimeBits()) {
try writer.print(".{} = ", .{fmtIdent(field_name)});
try dg.renderValue(writer, field_ty, union_obj.val);
}
@ -652,7 +652,7 @@ pub const DeclGen = struct {
}
}
const return_ty = dg.decl.ty.fnReturnType();
if (return_ty.hasCodeGenBits()) {
if (return_ty.hasRuntimeBits()) {
try dg.renderType(w, return_ty);
} else if (return_ty.zigTypeTag() == .NoReturn) {
try w.writeAll("zig_noreturn void");
@ -784,7 +784,7 @@ pub const DeclGen = struct {
var it = struct_obj.fields.iterator();
while (it.next()) |entry| {
const field_ty = entry.value_ptr.ty;
if (!field_ty.hasCodeGenBits()) continue;
if (!field_ty.hasRuntimeBits()) continue;
const alignment = entry.value_ptr.abi_align;
const name: CValue = .{ .identifier = entry.key_ptr.* };
@ -837,7 +837,7 @@ pub const DeclGen = struct {
var it = t.unionFields().iterator();
while (it.next()) |entry| {
const field_ty = entry.value_ptr.ty;
if (!field_ty.hasCodeGenBits()) continue;
if (!field_ty.hasRuntimeBits()) continue;
const alignment = entry.value_ptr.abi_align;
const name: CValue = .{ .identifier = entry.key_ptr.* };
try buffer.append(' ');
@ -1582,7 +1582,7 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
const elem_type = inst_ty.elemType();
const mutability: Mutability = if (inst_ty.isConstPtr()) .Const else .Mut;
if (!elem_type.hasCodeGenBits()) {
if (!elem_type.isFnOrHasRuntimeBits()) {
const target = f.object.dg.module.getTarget();
const literal = switch (target.cpu.arch.ptrBitWidth()) {
32 => "(void *)0xaaaaaaaa",
@ -1683,7 +1683,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
fn airRet(f: *Function, inst: Air.Inst.Index) !CValue {
const un_op = f.air.instructions.items(.data)[inst].un_op;
const writer = f.object.writer();
if (f.air.typeOf(un_op).hasCodeGenBits()) {
if (f.air.typeOf(un_op).isFnOrHasRuntimeBits()) {
const operand = try f.resolveInst(un_op);
try writer.writeAll("return ");
try f.writeCValue(writer, operand);
@ -1699,7 +1699,7 @@ fn airRetLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
const ptr_ty = f.air.typeOf(un_op);
const ret_ty = ptr_ty.childType();
if (!ret_ty.hasCodeGenBits()) {
if (!ret_ty.isFnOrHasRuntimeBits()) {
try writer.writeAll("return;\n");
}
const ptr = try f.resolveInst(un_op);
@ -2315,7 +2315,7 @@ fn airCall(f: *Function, inst: Air.Inst.Index) !CValue {
var result_local: CValue = .none;
if (unused_result) {
if (ret_ty.hasCodeGenBits()) {
if (ret_ty.hasRuntimeBits()) {
try writer.print("(void)", .{});
}
} else {
@ -2832,7 +2832,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_ty = f.air.typeOf(ty_op.operand);
const payload_ty = operand_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
if (operand_ty.zigTypeTag() == .Pointer) {
const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = *");
@ -2864,7 +2864,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, maybe_addrof: []cons
const operand_ty = f.air.typeOf(ty_op.operand);
const payload_ty = operand_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
return CValue.none;
}
@ -2908,7 +2908,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(ty_op.operand);
const err_un_ty = f.air.typeOfIndex(inst);
const payload_ty = err_un_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
return operand;
}
@ -2951,7 +2951,7 @@ fn airIsErr(
const operand_ty = f.air.typeOf(un_op);
const local = try f.allocLocal(Type.initTag(.bool), .Const);
const payload_ty = operand_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
try writer.print(" = {s}", .{deref_prefix});
try f.writeCValue(writer, operand);
try writer.print(" {s} 0;\n", .{op_str});

View File

@ -176,7 +176,7 @@ pub const Object = struct {
/// the compiler, but the Type/Value memory here is backed by `type_map_arena`.
/// TODO we need to remove entries from this map in response to incremental compilation
/// but I think the frontend won't tell us about types that get deleted because
/// hasCodeGenBits() is false for types.
/// hasRuntimeBits() is false for types.
type_map: TypeMap,
/// The backing memory for `type_map`. Periodically garbage collected after flush().
/// The code for doing the periodical GC is not yet implemented.
@ -463,7 +463,7 @@ pub const Object = struct {
const param_offset: c_uint = @boolToInt(ret_ptr != null);
for (fn_info.param_types) |param_ty| {
if (!param_ty.hasCodeGenBits()) continue;
if (!param_ty.hasRuntimeBits()) continue;
const llvm_arg_i = @intCast(c_uint, args.items.len) + param_offset;
try args.append(llvm_func.getParam(llvm_arg_i));
@ -710,7 +710,7 @@ pub const DeclGen = struct {
// Set parameter attributes.
var llvm_param_i: c_uint = @boolToInt(sret);
for (fn_info.param_types) |param_ty| {
if (!param_ty.hasCodeGenBits()) continue;
if (!param_ty.hasRuntimeBits()) continue;
if (isByRef(param_ty)) {
dg.addArgAttr(llvm_fn, llvm_param_i, "nonnull");
@ -845,7 +845,11 @@ pub const DeclGen = struct {
}
const llvm_addrspace = dg.llvmAddressSpace(t.ptrAddressSpace());
const elem_ty = t.childType();
const llvm_elem_ty = if (elem_ty.hasCodeGenBits() or elem_ty.zigTypeTag() == .Array)
const lower_elem_ty = switch (elem_ty.zigTypeTag()) {
.Opaque, .Array, .Fn => true,
else => elem_ty.hasRuntimeBits(),
};
const llvm_elem_ty = if (lower_elem_ty)
try dg.llvmType(elem_ty)
else
dg.context.intType(8);
@ -883,13 +887,13 @@ pub const DeclGen = struct {
.Optional => {
var buf: Type.Payload.ElemType = undefined;
const child_type = t.optionalChild(&buf);
if (!child_type.hasCodeGenBits()) {
if (!child_type.hasRuntimeBits()) {
return dg.context.intType(1);
}
const payload_llvm_ty = try dg.llvmType(child_type);
if (t.isPtrLikeOptional()) {
return payload_llvm_ty;
} else if (!child_type.hasCodeGenBits()) {
} else if (!child_type.hasRuntimeBits()) {
return dg.context.intType(1);
}
@ -902,7 +906,7 @@ pub const DeclGen = struct {
const error_type = t.errorUnionSet();
const payload_type = t.errorUnionPayload();
const llvm_error_type = try dg.llvmType(error_type);
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
return llvm_error_type;
}
const llvm_payload_type = try dg.llvmType(payload_type);
@ -967,7 +971,7 @@ pub const DeclGen = struct {
var big_align: u32 = 0;
var running_bits: u16 = 0;
for (struct_obj.fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasRuntimeBits()) continue;
const field_align = field.packedAlignment();
if (field_align == 0) {
@ -1034,7 +1038,7 @@ pub const DeclGen = struct {
}
} else {
for (struct_obj.fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasRuntimeBits()) continue;
llvm_field_types.appendAssumeCapacity(try dg.llvmType(field.ty));
}
}
@ -1128,7 +1132,7 @@ pub const DeclGen = struct {
const sret = firstParamSRet(fn_info, target);
const return_type = fn_info.return_type;
const raw_llvm_ret_ty = try dg.llvmType(return_type);
const llvm_ret_ty = if (!return_type.hasCodeGenBits() or sret)
const llvm_ret_ty = if (!return_type.hasRuntimeBits() or sret)
dg.context.voidType()
else
raw_llvm_ret_ty;
@ -1141,7 +1145,7 @@ pub const DeclGen = struct {
}
for (fn_info.param_types) |param_ty| {
if (!param_ty.hasCodeGenBits()) continue;
if (!param_ty.hasRuntimeBits()) continue;
const raw_llvm_ty = try dg.llvmType(param_ty);
const actual_llvm_ty = if (!isByRef(param_ty)) raw_llvm_ty else raw_llvm_ty.pointerType(0);
@ -1181,29 +1185,35 @@ pub const DeclGen = struct {
const llvm_type = try dg.llvmType(tv.ty);
return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull();
},
.Int => {
var bigint_space: Value.BigIntSpace = undefined;
const bigint = tv.val.toBigInt(&bigint_space);
const target = dg.module.getTarget();
const int_info = tv.ty.intInfo(target);
const llvm_type = dg.context.intType(int_info.bits);
// TODO this duplicates code with Pointer but they should share the handling
// of the tv.val.tag() and then Int should do extra constPtrToInt on top
.Int => switch (tv.val.tag()) {
.decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl),
.decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data),
else => {
var bigint_space: Value.BigIntSpace = undefined;
const bigint = tv.val.toBigInt(&bigint_space);
const target = dg.module.getTarget();
const int_info = tv.ty.intInfo(target);
const llvm_type = dg.context.intType(int_info.bits);
const unsigned_val = v: {
if (bigint.limbs.len == 1) {
break :v llvm_type.constInt(bigint.limbs[0], .False);
const unsigned_val = v: {
if (bigint.limbs.len == 1) {
break :v llvm_type.constInt(bigint.limbs[0], .False);
}
if (@sizeOf(usize) == @sizeOf(u64)) {
break :v llvm_type.constIntOfArbitraryPrecision(
@intCast(c_uint, bigint.limbs.len),
bigint.limbs.ptr,
);
}
@panic("TODO implement bigint to llvm int for 32-bit compiler builds");
};
if (!bigint.positive) {
return llvm.constNeg(unsigned_val);
}
if (@sizeOf(usize) == @sizeOf(u64)) {
break :v llvm_type.constIntOfArbitraryPrecision(
@intCast(c_uint, bigint.limbs.len),
bigint.limbs.ptr,
);
}
@panic("TODO implement bigint to llvm int for 32-bit compiler builds");
};
if (!bigint.positive) {
return llvm.constNeg(unsigned_val);
}
return unsigned_val;
return unsigned_val;
},
},
.Enum => {
var int_buffer: Value.Payload.U64 = undefined;
@ -1375,7 +1385,7 @@ pub const DeclGen = struct {
const llvm_i1 = dg.context.intType(1);
const is_pl = !tv.val.isNull();
const non_null_bit = if (is_pl) llvm_i1.constAllOnes() else llvm_i1.constNull();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
return non_null_bit;
}
if (tv.ty.isPtrLikeOptional()) {
@ -1388,6 +1398,7 @@ pub const DeclGen = struct {
return llvm_ty.constNull();
}
}
assert(payload_ty.zigTypeTag() != .Fn);
const fields: [2]*const llvm.Value = .{
try dg.genTypedValue(.{
.ty = payload_ty,
@ -1425,7 +1436,7 @@ pub const DeclGen = struct {
const payload_type = tv.ty.errorUnionPayload();
const is_pl = tv.val.errorUnionIsPayload();
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
const err_val = if (!is_pl) tv.val else Value.initTag(.zero);
return dg.genTypedValue(.{ .ty = error_type, .val = err_val });
@ -1463,7 +1474,7 @@ pub const DeclGen = struct {
var running_int: *const llvm.Value = llvm_struct_ty.structGetTypeAtIndex(0).constNull();
for (field_vals) |field_val, i| {
const field = fields[i];
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasRuntimeBits()) continue;
const field_align = field.packedAlignment();
if (field_align == 0) {
@ -1545,7 +1556,7 @@ pub const DeclGen = struct {
} else {
for (field_vals) |field_val, i| {
const field_ty = tv.ty.structFieldType(i);
if (!field_ty.hasCodeGenBits()) continue;
if (!field_ty.hasRuntimeBits()) continue;
llvm_fields.appendAssumeCapacity(try dg.genTypedValue(.{
.ty = field_ty,
@ -1577,7 +1588,7 @@ pub const DeclGen = struct {
assert(union_obj.haveFieldTypes());
const field_ty = union_obj.fields.values()[field_index].ty;
const payload = p: {
if (!field_ty.hasCodeGenBits()) {
if (!field_ty.hasRuntimeBits()) {
const padding_len = @intCast(c_uint, layout.payload_size);
break :p dg.context.intType(8).arrayType(padding_len).getUndef();
}
@ -1789,13 +1800,14 @@ pub const DeclGen = struct {
return self.context.constStruct(&fields, fields.len, .False);
}
if (!tv.ty.childType().hasCodeGenBits() or !decl.ty.hasCodeGenBits()) {
const is_fn_body = decl.ty.zigTypeTag() == .Fn;
if (!is_fn_body and !decl.ty.hasRuntimeBits()) {
return self.lowerPtrToVoid(tv.ty);
}
decl.markAlive();
const llvm_val = if (decl.ty.zigTypeTag() == .Fn)
const llvm_val = if (is_fn_body)
try self.resolveLlvmFunction(decl)
else
try self.resolveGlobalDecl(decl);
@ -2187,7 +2199,7 @@ pub const FuncGen = struct {
} else {
for (args) |arg, i| {
const param_ty = fn_info.param_types[i];
if (!param_ty.hasCodeGenBits()) continue;
if (!param_ty.hasRuntimeBits()) continue;
try llvm_args.append(try self.resolveInst(arg));
}
@ -2205,7 +2217,7 @@ pub const FuncGen = struct {
if (return_type.isNoReturn()) {
_ = self.builder.buildUnreachable();
return null;
} else if (self.liveness.isUnused(inst) or !return_type.hasCodeGenBits()) {
} else if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBits()) {
return null;
} else if (sret) {
call.setCallSret(llvm_ret_ty);
@ -2229,7 +2241,7 @@ pub const FuncGen = struct {
_ = self.builder.buildRetVoid();
return null;
}
if (!ret_ty.hasCodeGenBits()) {
if (!ret_ty.hasRuntimeBits()) {
_ = self.builder.buildRetVoid();
return null;
}
@ -2242,7 +2254,7 @@ pub const FuncGen = struct {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ptr_ty = self.air.typeOf(un_op);
const ret_ty = ptr_ty.childType();
if (!ret_ty.hasCodeGenBits() or isByRef(ret_ty)) {
if (!ret_ty.hasRuntimeBits() or isByRef(ret_ty)) {
_ = self.builder.buildRetVoid();
return null;
}
@ -2278,7 +2290,7 @@ pub const FuncGen = struct {
.Int, .Bool, .Pointer, .ErrorSet => operand_ty,
.Optional => blk: {
const payload_ty = operand_ty.optionalChild(&opt_buffer);
if (!payload_ty.hasCodeGenBits() or operand_ty.isPtrLikeOptional()) {
if (!payload_ty.hasRuntimeBits() or operand_ty.isPtrLikeOptional()) {
break :blk operand_ty;
}
// We need to emit instructions to check for equality/inequality
@ -2402,7 +2414,8 @@ pub const FuncGen = struct {
self.builder.positionBuilderAtEnd(parent_bb);
// If the block does not return a value, we dont have to create a phi node.
if (!inst_ty.hasCodeGenBits()) return null;
const is_body = inst_ty.zigTypeTag() == .Fn;
if (!is_body and !inst_ty.hasRuntimeBits()) return null;
const raw_llvm_ty = try self.dg.llvmType(inst_ty);
@ -2411,7 +2424,7 @@ pub const FuncGen = struct {
// a pointer to it. LLVM IR allows the call instruction to use function bodies instead
// of function pointers, however the phi makes it a runtime value and therefore
// the LLVM type has to be wrapped in a pointer.
if (inst_ty.zigTypeTag() == .Fn or isByRef(inst_ty)) {
if (is_body or isByRef(inst_ty)) {
break :ty raw_llvm_ty.pointerType(0);
}
break :ty raw_llvm_ty;
@ -2432,7 +2445,8 @@ pub const FuncGen = struct {
// If the break doesn't break a value, then we don't have to add
// the values to the lists.
if (self.air.typeOf(branch.operand).hasCodeGenBits()) {
const operand_ty = self.air.typeOf(branch.operand);
if (operand_ty.hasRuntimeBits() or operand_ty.zigTypeTag() == .Fn) {
const val = try self.resolveInst(branch.operand);
// For the phi node, we need the basic blocks and the values of the
@ -2536,7 +2550,7 @@ pub const FuncGen = struct {
const llvm_usize = try self.dg.llvmType(Type.usize);
const len = llvm_usize.constInt(array_ty.arrayLen(), .False);
const slice_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
if (!array_ty.hasCodeGenBits()) {
if (!array_ty.hasRuntimeBits()) {
return self.builder.buildInsertValue(slice_llvm_ty.getUndef(), len, 1, "");
}
const operand = try self.resolveInst(ty_op.operand);
@ -2667,7 +2681,7 @@ pub const FuncGen = struct {
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = self.air.typeOf(bin_op.lhs);
const elem_ty = ptr_ty.childType();
if (!elem_ty.hasCodeGenBits()) return null;
if (!elem_ty.hasRuntimeBits()) return null;
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@ -2714,7 +2728,7 @@ pub const FuncGen = struct {
const struct_llvm_val = try self.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index);
if (!field_ty.hasCodeGenBits()) {
if (!field_ty.hasRuntimeBits()) {
return null;
}
const target = self.dg.module.getTarget();
@ -2919,7 +2933,7 @@ pub const FuncGen = struct {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&buf);
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
if (invert) {
return self.builder.buildNot(operand, "");
} else {
@ -2951,7 +2965,7 @@ pub const FuncGen = struct {
const err_set_ty = try self.dg.llvmType(Type.initTag(.anyerror));
const zero = err_set_ty.constNull();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
const loaded = if (operand_is_ptr) self.builder.buildLoad(operand, "") else operand;
return self.builder.buildICmp(op, loaded, zero, "");
}
@ -2974,7 +2988,7 @@ pub const FuncGen = struct {
const optional_ty = self.air.typeOf(ty_op.operand).childType();
var buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&buf);
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
// We have a pointer to a zero-bit value and we need to return
// a pointer to a zero-bit value.
return operand;
@ -2998,7 +3012,7 @@ pub const FuncGen = struct {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&buf);
const non_null_bit = self.context.intType(1).constAllOnes();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
// We have a pointer to a i1. We need to set it to 1 and then return the same pointer.
_ = self.builder.buildStore(non_null_bit, operand);
return operand;
@ -3033,7 +3047,7 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOf(ty_op.operand);
const payload_ty = self.air.typeOfIndex(inst);
if (!payload_ty.hasCodeGenBits()) return null;
if (!payload_ty.hasRuntimeBits()) return null;
if (optional_ty.isPtrLikeOptional()) {
// Payload value is the same as the optional value.
@ -3054,7 +3068,7 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(ty_op.operand);
const err_union_ty = self.air.typeOf(ty_op.operand);
const payload_ty = err_union_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) return null;
if (!payload_ty.hasRuntimeBits()) return null;
if (operand_is_ptr or isByRef(payload_ty)) {
return self.builder.buildStructGEP(operand, 1, "");
}
@ -3074,7 +3088,7 @@ pub const FuncGen = struct {
const operand_ty = self.air.typeOf(ty_op.operand);
const payload_ty = operand_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
if (!operand_is_ptr) return operand;
return self.builder.buildLoad(operand, "");
}
@ -3093,7 +3107,7 @@ pub const FuncGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const payload_ty = self.air.typeOf(ty_op.operand);
const non_null_bit = self.context.intType(1).constAllOnes();
if (!payload_ty.hasCodeGenBits()) return non_null_bit;
if (!payload_ty.hasRuntimeBits()) return non_null_bit;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOfIndex(inst);
if (optional_ty.isPtrLikeOptional()) return operand;
@ -3121,7 +3135,7 @@ pub const FuncGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const payload_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
return operand;
}
const inst_ty = self.air.typeOfIndex(inst);
@ -3152,7 +3166,7 @@ pub const FuncGen = struct {
const err_un_ty = self.air.typeOfIndex(inst);
const payload_ty = err_un_ty.errorUnionPayload();
const operand = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
return operand;
}
const err_un_llvm_ty = try self.dg.llvmType(err_un_ty);
@ -3841,7 +3855,7 @@ pub const FuncGen = struct {
if (self.liveness.isUnused(inst)) return null;
const ptr_ty = self.air.typeOfIndex(inst);
const pointee_type = ptr_ty.childType();
if (!pointee_type.hasCodeGenBits()) return self.dg.lowerPtrToVoid(ptr_ty);
if (!pointee_type.isFnOrHasRuntimeBits()) return self.dg.lowerPtrToVoid(ptr_ty);
const pointee_llvm_ty = try self.dg.llvmType(pointee_type);
const alloca_inst = self.buildAlloca(pointee_llvm_ty);
@ -3855,7 +3869,7 @@ pub const FuncGen = struct {
if (self.liveness.isUnused(inst)) return null;
const ptr_ty = self.air.typeOfIndex(inst);
const ret_ty = ptr_ty.childType();
if (!ret_ty.hasCodeGenBits()) return null;
if (!ret_ty.isFnOrHasRuntimeBits()) return null;
if (self.ret_ptr) |ret_ptr| return ret_ptr;
const ret_llvm_ty = try self.dg.llvmType(ret_ty);
const target = self.dg.module.getTarget();
@ -4079,7 +4093,7 @@ pub const FuncGen = struct {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.air.typeOf(bin_op.lhs);
const operand_ty = ptr_ty.childType();
if (!operand_ty.hasCodeGenBits()) return null;
if (!operand_ty.isFnOrHasRuntimeBits()) return null;
var ptr = try self.resolveInst(bin_op.lhs);
var element = try self.resolveInst(bin_op.rhs);
const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false);
@ -4679,7 +4693,7 @@ pub const FuncGen = struct {
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
const field = &union_obj.fields.values()[field_index];
const result_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
if (!field.ty.hasCodeGenBits()) {
if (!field.ty.hasRuntimeBits()) {
return null;
}
const target = self.dg.module.getTarget();
@ -4707,7 +4721,7 @@ pub const FuncGen = struct {
fn load(self: *FuncGen, ptr: *const llvm.Value, ptr_ty: Type) !?*const llvm.Value {
const info = ptr_ty.ptrInfo().data;
if (!info.pointee_type.hasCodeGenBits()) return null;
if (!info.pointee_type.hasRuntimeBits()) return null;
const target = self.dg.module.getTarget();
const ptr_alignment = ptr_ty.ptrAlignment(target);
@ -4762,7 +4776,7 @@ pub const FuncGen = struct {
) void {
const info = ptr_ty.ptrInfo().data;
const elem_ty = info.pointee_type;
if (!elem_ty.hasCodeGenBits()) {
if (!elem_ty.isFnOrHasRuntimeBits()) {
return;
}
const target = self.dg.module.getTarget();
@ -5092,7 +5106,7 @@ fn llvmFieldIndex(
if (struct_obj.layout != .Packed) {
var llvm_field_index: c_uint = 0;
for (struct_obj.fields.values()) |field, i| {
if (!field.ty.hasCodeGenBits())
if (!field.ty.hasRuntimeBits())
continue;
if (field_index > i) {
llvm_field_index += 1;
@ -5119,7 +5133,7 @@ fn llvmFieldIndex(
var running_bits: u16 = 0;
var llvm_field_index: c_uint = 0;
for (struct_obj.fields.values()) |field, i| {
if (!field.ty.hasCodeGenBits())
if (!field.ty.hasRuntimeBits())
continue;
const field_align = field.packedAlignment();
@ -5232,9 +5246,9 @@ fn isByRef(ty: Type) bool {
.AnyFrame,
=> return false,
.Array, .Frame => return ty.hasCodeGenBits(),
.Array, .Frame => return ty.hasRuntimeBits(),
.Struct => {
if (!ty.hasCodeGenBits()) return false;
if (!ty.hasRuntimeBits()) return false;
if (ty.castTag(.tuple)) |tuple| {
var count: usize = 0;
for (tuple.data.values) |field_val, i| {
@ -5252,7 +5266,7 @@ fn isByRef(ty: Type) bool {
}
return true;
},
.Union => return ty.hasCodeGenBits(),
.Union => return ty.hasRuntimeBits(),
.ErrorUnion => return isByRef(ty.errorUnionPayload()),
.Optional => {
var buf: Type.Payload.ElemType = undefined;

View File

@ -852,7 +852,7 @@ pub const DeclGen = struct {
try self.beginSPIRVBlock(label_id);
// If this block didn't produce a value, simply return here.
if (!ty.hasCodeGenBits())
if (!ty.hasRuntimeBits())
return null;
// Combine the result from the blocks using the Phi instruction.
@ -879,7 +879,7 @@ pub const DeclGen = struct {
const block = self.blocks.get(br.block_inst).?;
const operand_ty = self.air.typeOf(br.operand);
if (operand_ty.hasCodeGenBits()) {
if (operand_ty.hasRuntimeBits()) {
const operand_id = try self.resolve(br.operand);
// current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body.
try block.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id });
@ -958,7 +958,7 @@ pub const DeclGen = struct {
fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void {
const operand = self.air.instructions.items(.data)[inst].un_op;
const operand_ty = self.air.typeOf(operand);
if (operand_ty.hasCodeGenBits()) {
if (operand_ty.hasRuntimeBits()) {
const operand_id = try self.resolve(operand);
try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id});
} else {

View File

@ -2476,7 +2476,7 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len);
const fn_ret_type = decl.ty.fnReturnType();
const fn_ret_has_bits = fn_ret_type.hasCodeGenBits();
const fn_ret_has_bits = fn_ret_type.hasRuntimeBits();
if (fn_ret_has_bits) {
dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram);
} else {

View File

@ -920,7 +920,7 @@ pub fn initDeclDebugBuffers(
try dbg_info_buffer.ensureUnusedCapacity(27 + decl_name_with_null.len);
const fn_ret_type = decl.ty.fnReturnType();
const fn_ret_has_bits = fn_ret_type.hasCodeGenBits();
const fn_ret_has_bits = fn_ret_type.hasRuntimeBits();
if (fn_ret_has_bits) {
dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram);
} else {

View File

@ -259,7 +259,7 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl);
}
if (!decl.ty.hasCodeGenBits()) return;
if (!decl.ty.hasRuntimeBits()) return;
assert(decl.link.wasm.sym_index != 0); // Must call allocateDeclIndexes()
decl.link.wasm.clear();

View File

@ -1157,7 +1157,8 @@ const Writer = struct {
break :blk decls_len;
} else 0;
try self.writeFlag(stream, "known_has_bits, ", small.known_has_bits);
try self.writeFlag(stream, "known_non_opv, ", small.known_non_opv);
try self.writeFlag(stream, "known_comptime_only, ", small.known_comptime_only);
try stream.print("{s}, {s}, ", .{
@tagName(small.name_strategy), @tagName(small.layout),
});

View File

@ -1512,8 +1512,12 @@ pub const Type = extern union {
}
}
pub fn hasCodeGenBits(self: Type) bool {
return switch (self.tag()) {
/// true if and only if the type takes up space in memory at runtime.
/// There are two reasons a type will return false:
/// * the type is a comptime-only type. For example, the type `type` itself.
/// * the type has only one possible value, making its ABI size 0.
pub fn hasRuntimeBits(ty: Type) bool {
return switch (ty.tag()) {
.u1,
.u8,
.i8,
@ -1542,13 +1546,9 @@ pub const Type = extern union {
.f128,
.bool,
.anyerror,
.single_const_pointer_to_comptime_int,
.const_slice_u8,
.const_slice_u8_sentinel_0,
.array_u8_sentinel_0,
.optional,
.optional_single_mut_pointer,
.optional_single_const_pointer,
.anyerror_void_error_union,
.error_set,
.error_set_single,
@ -1568,100 +1568,12 @@ pub const Type = extern union {
.export_options,
.extern_options,
.@"anyframe",
.anyframe_T,
.anyopaque,
.@"opaque",
.single_const_pointer,
.single_mut_pointer,
.many_const_pointer,
.many_mut_pointer,
.c_const_pointer,
.c_mut_pointer,
.const_slice,
.mut_slice,
.pointer,
=> true,
.function => !self.castTag(.function).?.data.is_generic,
.fn_noreturn_no_args,
.fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
=> true,
.@"struct" => {
const struct_obj = self.castTag(.@"struct").?.data;
if (struct_obj.known_has_bits) {
return true;
}
assert(struct_obj.haveFieldTypes());
for (struct_obj.fields.values()) |value| {
if (value.ty.hasCodeGenBits())
return true;
} else {
return false;
}
},
.enum_full => {
const enum_full = self.castTag(.enum_full).?.data;
return enum_full.fields.count() >= 2;
},
.enum_simple => {
const enum_simple = self.castTag(.enum_simple).?.data;
return enum_simple.fields.count() >= 2;
},
.enum_numbered, .enum_nonexhaustive => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = self.intTagType(&buffer);
return int_tag_ty.hasCodeGenBits();
},
.@"union" => {
const union_obj = self.castTag(.@"union").?.data;
assert(union_obj.haveFieldTypes());
for (union_obj.fields.values()) |value| {
if (value.ty.hasCodeGenBits())
return true;
} else {
return false;
}
},
.union_tagged => {
const union_obj = self.castTag(.union_tagged).?.data;
if (union_obj.tag_ty.hasCodeGenBits()) {
return true;
}
assert(union_obj.haveFieldTypes());
for (union_obj.fields.values()) |value| {
if (value.ty.hasCodeGenBits())
return true;
} else {
return false;
}
},
.array, .vector => self.elemType().hasCodeGenBits() and self.arrayLen() != 0,
.array_u8 => self.arrayLen() != 0,
.array_sentinel => self.childType().hasCodeGenBits(),
.int_signed, .int_unsigned => self.cast(Payload.Bits).?.data != 0,
.error_union => {
const payload = self.castTag(.error_union).?.data;
return payload.error_set.hasCodeGenBits() or payload.payload.hasCodeGenBits();
},
.tuple => {
const tuple = self.castTag(.tuple).?.data;
for (tuple.types) |ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
if (ty.hasCodeGenBits()) return true;
}
return false;
},
// These are false because they are comptime-only types.
.single_const_pointer_to_comptime_int,
.void,
.type,
.comptime_int,
@ -1674,8 +1586,109 @@ pub const Type = extern union {
.empty_struct_literal,
.type_info,
.bound_fn,
// These are function *bodies*, not pointers.
// Special exceptions have to be made when emitting functions due to
// this returning false.
.function,
.fn_noreturn_no_args,
.fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
=> false,
// These types have more than one possible value, so the result is the same as
// asking whether they are comptime-only types.
.anyframe_T,
.optional,
.optional_single_mut_pointer,
.optional_single_const_pointer,
.single_const_pointer,
.single_mut_pointer,
.many_const_pointer,
.many_mut_pointer,
.c_const_pointer,
.c_mut_pointer,
.const_slice,
.mut_slice,
.pointer,
=> !ty.comptimeOnly(),
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
switch (struct_obj.requires_comptime) {
.wip => unreachable,
.yes => return false,
.no => if (struct_obj.known_non_opv) return true,
.unknown => {},
}
assert(struct_obj.haveFieldTypes());
for (struct_obj.fields.values()) |value| {
if (value.ty.hasRuntimeBits())
return true;
} else {
return false;
}
},
.enum_full => {
const enum_full = ty.castTag(.enum_full).?.data;
return enum_full.fields.count() >= 2;
},
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
return enum_simple.fields.count() >= 2;
},
.enum_numbered, .enum_nonexhaustive => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = ty.intTagType(&buffer);
return int_tag_ty.hasRuntimeBits();
},
.@"union" => {
const union_obj = ty.castTag(.@"union").?.data;
assert(union_obj.haveFieldTypes());
for (union_obj.fields.values()) |value| {
if (value.ty.hasRuntimeBits())
return true;
} else {
return false;
}
},
.union_tagged => {
const union_obj = ty.castTag(.union_tagged).?.data;
if (union_obj.tag_ty.hasRuntimeBits()) {
return true;
}
assert(union_obj.haveFieldTypes());
for (union_obj.fields.values()) |value| {
if (value.ty.hasRuntimeBits())
return true;
} else {
return false;
}
},
.array, .vector => ty.arrayLen() != 0 and ty.elemType().hasRuntimeBits(),
.array_u8 => ty.arrayLen() != 0,
.array_sentinel => ty.childType().hasRuntimeBits(),
.int_signed, .int_unsigned => ty.cast(Payload.Bits).?.data != 0,
.error_union => {
const payload = ty.castTag(.error_union).?.data;
return payload.error_set.hasRuntimeBits() or payload.payload.hasRuntimeBits();
},
.tuple => {
const tuple = ty.castTag(.tuple).?.data;
for (tuple.types) |field_ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
if (field_ty.hasRuntimeBits()) return true;
}
return false;
},
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
.var_args_param => unreachable,
@ -1683,6 +1696,24 @@ pub const Type = extern union {
};
}
pub fn isFnOrHasRuntimeBits(ty: Type) bool {
switch (ty.zigTypeTag()) {
.Fn => {
const fn_info = ty.fnInfo();
if (fn_info.is_generic) return false;
if (fn_info.is_var_args) return true;
switch (fn_info.cc) {
// If there was a comptime calling convention, it should also return false here.
.Inline => return false,
else => {},
}
if (fn_info.return_type.comptimeOnly()) return false;
return true;
},
else => return ty.hasRuntimeBits(),
}
}
pub fn isNoReturn(self: Type) bool {
const definitely_correct_result =
self.tag_if_small_enough != .bound_fn and
@ -1857,7 +1888,7 @@ pub const Type = extern union {
.optional => {
var buf: Payload.ElemType = undefined;
const child_type = self.optionalChild(&buf);
if (!child_type.hasCodeGenBits()) return 1;
if (!child_type.hasRuntimeBits()) return 1;
if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr())
return @divExact(target.cpu.arch.ptrBitWidth(), 8);
@ -1867,9 +1898,9 @@ pub const Type = extern union {
.error_union => {
const data = self.castTag(.error_union).?.data;
if (!data.error_set.hasCodeGenBits()) {
if (!data.error_set.hasRuntimeBits()) {
return data.payload.abiAlignment(target);
} else if (!data.payload.hasCodeGenBits()) {
} else if (!data.payload.hasRuntimeBits()) {
return data.error_set.abiAlignment(target);
}
return @maximum(
@ -1889,7 +1920,7 @@ pub const Type = extern union {
if (!is_packed) {
var big_align: u32 = 0;
for (fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasRuntimeBits()) continue;
const field_align = field.normalAlignment(target);
big_align = @maximum(big_align, field_align);
@ -1903,7 +1934,7 @@ pub const Type = extern union {
var running_bits: u16 = 0;
for (fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasRuntimeBits()) continue;
const field_align = field.packedAlignment();
if (field_align == 0) {
@ -1941,7 +1972,7 @@ pub const Type = extern union {
for (tuple.types) |field_ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
if (!field_ty.hasCodeGenBits()) continue;
if (!field_ty.hasRuntimeBits()) continue;
const field_align = field_ty.abiAlignment(target);
big_align = @maximum(big_align, field_align);
@ -1984,7 +2015,7 @@ pub const Type = extern union {
}
/// Asserts the type has the ABI size already resolved.
/// Types that return false for hasCodeGenBits() return 0.
/// Types that return false for hasRuntimeBits() return 0.
pub fn abiSize(self: Type, target: Target) u64 {
return switch (self.tag()) {
.fn_noreturn_no_args => unreachable, // represents machine code; not a pointer
@ -2071,24 +2102,8 @@ pub const Type = extern union {
.usize,
.@"anyframe",
.anyframe_T,
=> return @divExact(target.cpu.arch.ptrBitWidth(), 8),
.const_slice,
.mut_slice,
=> {
return @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2;
},
.const_slice_u8,
.const_slice_u8_sentinel_0,
=> return @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2,
.optional_single_const_pointer,
.optional_single_mut_pointer,
=> {
if (!self.elemType().hasCodeGenBits()) return 1;
return @divExact(target.cpu.arch.ptrBitWidth(), 8);
},
.single_const_pointer,
.single_mut_pointer,
.many_const_pointer,
@ -2100,6 +2115,12 @@ pub const Type = extern union {
.manyptr_const_u8_sentinel_0,
=> return @divExact(target.cpu.arch.ptrBitWidth(), 8),
.const_slice,
.mut_slice,
.const_slice_u8,
.const_slice_u8_sentinel_0,
=> return @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2,
.pointer => switch (self.castTag(.pointer).?.data.size) {
.Slice => @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2,
else => @divExact(target.cpu.arch.ptrBitWidth(), 8),
@ -2137,7 +2158,7 @@ pub const Type = extern union {
.optional => {
var buf: Payload.ElemType = undefined;
const child_type = self.optionalChild(&buf);
if (!child_type.hasCodeGenBits()) return 1;
if (!child_type.hasRuntimeBits()) return 1;
if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr() and !child_type.isSlice())
return @divExact(target.cpu.arch.ptrBitWidth(), 8);
@ -2151,11 +2172,11 @@ pub const Type = extern union {
.error_union => {
const data = self.castTag(.error_union).?.data;
if (!data.error_set.hasCodeGenBits() and !data.payload.hasCodeGenBits()) {
if (!data.error_set.hasRuntimeBits() and !data.payload.hasRuntimeBits()) {
return 0;
} else if (!data.error_set.hasCodeGenBits()) {
} else if (!data.error_set.hasRuntimeBits()) {
return data.payload.abiSize(target);
} else if (!data.payload.hasCodeGenBits()) {
} else if (!data.payload.hasRuntimeBits()) {
return data.error_set.abiSize(target);
}
const code_align = abiAlignment(data.error_set, target);
@ -2275,11 +2296,7 @@ pub const Type = extern union {
.optional_single_const_pointer,
.optional_single_mut_pointer,
=> {
if (ty.elemType().hasCodeGenBits()) {
return target.cpu.arch.ptrBitWidth();
} else {
return 1;
}
return target.cpu.arch.ptrBitWidth();
},
.single_const_pointer,
@ -2289,11 +2306,7 @@ pub const Type = extern union {
.c_const_pointer,
.c_mut_pointer,
=> {
if (ty.elemType().hasCodeGenBits()) {
return target.cpu.arch.ptrBitWidth();
} else {
return 0;
}
return target.cpu.arch.ptrBitWidth();
},
.pointer => switch (ty.castTag(.pointer).?.data.size) {
@ -2329,7 +2342,7 @@ pub const Type = extern union {
.optional => {
var buf: Payload.ElemType = undefined;
const child_type = ty.optionalChild(&buf);
if (!child_type.hasCodeGenBits()) return 8;
if (!child_type.hasRuntimeBits()) return 8;
if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr() and !child_type.isSlice())
return target.cpu.arch.ptrBitWidth();
@ -2343,11 +2356,11 @@ pub const Type = extern union {
.error_union => {
const payload = ty.castTag(.error_union).?.data;
if (!payload.error_set.hasCodeGenBits() and !payload.payload.hasCodeGenBits()) {
if (!payload.error_set.hasRuntimeBits() and !payload.payload.hasRuntimeBits()) {
return 0;
} else if (!payload.error_set.hasCodeGenBits()) {
} else if (!payload.error_set.hasRuntimeBits()) {
return payload.payload.bitSize(target);
} else if (!payload.payload.hasCodeGenBits()) {
} else if (!payload.payload.hasRuntimeBits()) {
return payload.error_set.bitSize(target);
}
@panic("TODO bitSize error union");
@ -2589,7 +2602,7 @@ pub const Type = extern union {
var buf: Payload.ElemType = undefined;
const child_type = self.optionalChild(&buf);
// optionals of zero sized pointers behave like bools
if (!child_type.hasCodeGenBits()) return false;
if (!child_type.hasRuntimeBits()) return false;
if (child_type.zigTypeTag() != .Pointer) return false;
const info = child_type.ptrInfo().data;
@ -2626,7 +2639,7 @@ pub const Type = extern union {
var buf: Payload.ElemType = undefined;
const child_type = self.optionalChild(&buf);
// optionals of zero sized types behave like bools, not pointers
if (!child_type.hasCodeGenBits()) return false;
if (!child_type.hasRuntimeBits()) return false;
if (child_type.zigTypeTag() != .Pointer) return false;
const info = child_type.ptrInfo().data;
@ -3494,7 +3507,7 @@ pub const Type = extern union {
},
.enum_nonexhaustive => {
const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty;
if (!tag_ty.hasCodeGenBits()) {
if (!tag_ty.hasRuntimeBits()) {
return Value.zero;
} else {
return null;
@ -3537,6 +3550,167 @@ pub const Type = extern union {
};
}
/// During semantic analysis, instead call `Sema.typeRequiresComptime` which
/// resolves field types rather than asserting they are already resolved.
pub fn comptimeOnly(ty: Type) bool {
return switch (ty.tag()) {
.u1,
.u8,
.i8,
.u16,
.i16,
.u32,
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.c_longdouble,
.f16,
.f32,
.f64,
.f128,
.anyopaque,
.bool,
.void,
.anyerror,
.noreturn,
.@"anyframe",
.@"null",
.@"undefined",
.atomic_order,
.atomic_rmw_op,
.calling_convention,
.address_space,
.float_mode,
.reduce_op,
.call_options,
.prefetch_options,
.export_options,
.extern_options,
.manyptr_u8,
.manyptr_const_u8,
.manyptr_const_u8_sentinel_0,
.const_slice_u8,
.const_slice_u8_sentinel_0,
.anyerror_void_error_union,
.empty_struct_literal,
.empty_struct,
.error_set,
.error_set_single,
.error_set_inferred,
.error_set_merged,
.@"opaque",
.generic_poison,
.array_u8,
.array_u8_sentinel_0,
.int_signed,
.int_unsigned,
.enum_simple,
=> false,
.single_const_pointer_to_comptime_int,
.type,
.comptime_int,
.comptime_float,
.enum_literal,
.type_info,
// These are function bodies, not function pointers.
.fn_noreturn_no_args,
.fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.function,
=> true,
.var_args_param => unreachable,
.inferred_alloc_mut => unreachable,
.inferred_alloc_const => unreachable,
.bound_fn => unreachable,
.array,
.array_sentinel,
.vector,
=> return ty.childType().comptimeOnly(),
.pointer,
.single_const_pointer,
.single_mut_pointer,
.many_const_pointer,
.many_mut_pointer,
.c_const_pointer,
.c_mut_pointer,
.const_slice,
.mut_slice,
=> {
const child_ty = ty.childType();
if (child_ty.zigTypeTag() == .Fn) {
return false;
} else {
return child_ty.comptimeOnly();
}
},
.optional,
.optional_single_mut_pointer,
.optional_single_const_pointer,
=> {
var buf: Type.Payload.ElemType = undefined;
return ty.optionalChild(&buf).comptimeOnly();
},
.tuple => {
const tuple = ty.castTag(.tuple).?.data;
for (tuple.types) |field_ty| {
if (field_ty.comptimeOnly()) return true;
}
return false;
},
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
switch (struct_obj.requires_comptime) {
.wip, .unknown => unreachable, // This function asserts types already resolved.
.no => return false,
.yes => return true,
}
},
.@"union", .union_tagged => {
const union_obj = ty.cast(Type.Payload.Union).?.data;
switch (union_obj.requires_comptime) {
.wip, .unknown => unreachable, // This function asserts types already resolved.
.no => return false,
.yes => return true,
}
},
.error_union => return ty.errorUnionPayload().comptimeOnly(),
.anyframe_T => {
const child_ty = ty.castTag(.anyframe_T).?.data;
return child_ty.comptimeOnly();
},
.enum_numbered => {
const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty;
return tag_ty.comptimeOnly();
},
.enum_full, .enum_nonexhaustive => {
const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty;
return tag_ty.comptimeOnly();
},
};
}
pub fn isIndexable(ty: Type) bool {
return switch (ty.zigTypeTag()) {
.Array, .Vector => true,
@ -3814,7 +3988,7 @@ pub const Type = extern union {
const field = it.struct_obj.fields.values()[it.field];
defer it.field += 1;
if (!field.ty.hasCodeGenBits()) {
if (!field.ty.hasRuntimeBits()) {
return PackedFieldOffset{
.field = it.field,
.offset = it.offset,
@ -3883,7 +4057,7 @@ pub const Type = extern union {
const field = it.struct_obj.fields.values()[it.field];
defer it.field += 1;
if (!field.ty.hasCodeGenBits())
if (!field.ty.hasRuntimeBits())
return FieldOffset{ .field = it.field, .offset = it.offset };
const field_align = field.normalAlignment(it.target);

View File

@ -1225,7 +1225,7 @@ pub const Value = extern union {
/// Asserts the value is an integer and not undefined.
/// Returns the number of bits the value requires to represent stored in twos complement form.
pub fn intBitCountTwosComp(self: Value) usize {
pub fn intBitCountTwosComp(self: Value, target: Target) usize {
switch (self.tag()) {
.zero,
.bool_false,
@ -1244,6 +1244,15 @@ pub const Value = extern union {
.int_big_positive => return self.castTag(.int_big_positive).?.asBigInt().bitCountTwosComp(),
.int_big_negative => return self.castTag(.int_big_negative).?.asBigInt().bitCountTwosComp(),
.decl_ref_mut,
.extern_fn,
.decl_ref,
.function,
.variable,
.eu_payload_ptr,
.opt_payload_ptr,
=> return target.cpu.arch.ptrBitWidth(),
else => {
var buffer: BigIntSpace = undefined;
return self.toBigInt(&buffer).bitCountTwosComp();
@ -1333,6 +1342,20 @@ pub const Value = extern union {
return true;
},
.decl_ref_mut,
.extern_fn,
.decl_ref,
.function,
.variable,
=> {
const info = ty.intInfo(target);
const ptr_bits = target.cpu.arch.ptrBitWidth();
return switch (info.signedness) {
.signed => info.bits > ptr_bits,
.unsigned => info.bits >= ptr_bits,
};
},
else => unreachable,
}
}
@ -1397,6 +1420,11 @@ pub const Value = extern union {
.one,
.bool_true,
.decl_ref,
.decl_ref_mut,
.extern_fn,
.function,
.variable,
=> .gt,
.int_u64 => std.math.order(lhs.castTag(.int_u64).?.data, 0),
@ -1417,10 +1445,18 @@ pub const Value = extern union {
pub fn order(lhs: Value, rhs: Value) std.math.Order {
const lhs_tag = lhs.tag();
const rhs_tag = rhs.tag();
const lhs_is_zero = lhs_tag == .zero;
const rhs_is_zero = rhs_tag == .zero;
if (lhs_is_zero) return rhs.orderAgainstZero().invert();
if (rhs_is_zero) return lhs.orderAgainstZero();
const lhs_against_zero = lhs.orderAgainstZero();
const rhs_against_zero = rhs.orderAgainstZero();
switch (lhs_against_zero) {
.lt => if (rhs_against_zero != .lt) return .lt,
.eq => return rhs_against_zero.invert(),
.gt => {},
}
switch (rhs_against_zero) {
.lt => if (lhs_against_zero != .lt) return .gt,
.eq => return lhs_against_zero,
.gt => {},
}
const lhs_float = lhs.isFloat();
const rhs_float = rhs.isFloat();
@ -1451,6 +1487,27 @@ pub const Value = extern union {
/// Asserts the value is comparable. Does not take a type parameter because it supports
/// comparisons between heterogeneous types.
pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value) bool {
if (lhs.pointerDecl()) |lhs_decl| {
if (rhs.pointerDecl()) |rhs_decl| {
switch (op) {
.eq => return lhs_decl == rhs_decl,
.neq => return lhs_decl != rhs_decl,
else => {},
}
} else {
switch (op) {
.eq => return false,
.neq => return true,
else => {},
}
}
} else if (rhs.pointerDecl()) |_| {
switch (op) {
.eq => return false,
.neq => return true,
else => {},
}
}
return order(lhs, rhs).compare(op);
}

View File

@ -155,10 +155,14 @@ test "implicit cast *[0]T to E![]const u8" {
}
var global_array: [4]u8 = undefined;
test "cast from array reference to fn" {
test "cast from array reference to fn: comptime fn ptr" {
const f = @ptrCast(*const fn () callconv(.C) void, &global_array);
try expect(@ptrToInt(f) == @ptrToInt(&global_array));
}
test "cast from array reference to fn: runtime fn ptr" {
var f = @ptrCast(*const fn () callconv(.C) void, &global_array);
try expect(@ptrToInt(f) == @ptrToInt(&global_array));
}
test "*const [N]null u8 to ?[]const u8" {
const S = struct {

View File

@ -751,7 +751,7 @@ pub fn addCases(ctx: *TestContext) !void {
{
var case = ctx.exe("function pointers", linux_arm);
case.addCompareOutput(
\\const PrintFn = fn () void;
\\const PrintFn = *const fn () void;
\\
\\pub fn main() void {
\\ var printFn: PrintFn = stopSayingThat;