diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index ba5801e936..3675a90257 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -730,10 +730,16 @@ pub const CompilerBackend = enum(u64) { /// therefore must be kept in sync with the compiler implementation. pub const TestFn = struct { name: []const u8, - func: fn () anyerror!void, + func: testFnProto, async_frame_size: ?usize, }; +/// stage1 is *wrong*. It is not yet updated to support the new function type semantics. +const testFnProto = switch (builtin.zig_backend) { + .stage1 => fn () anyerror!void, // wrong! + else => *const fn () anyerror!void, +}; + /// This function type is used by the Zig language code generation and /// therefore must be kept in sync with the compiler implementation. pub const PanicFn = fn ([]const u8, ?*StackTrace) noreturn; diff --git a/src/AstGen.zig b/src/AstGen.zig index 114cd5f505..6adec3fc53 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -3240,7 +3240,8 @@ fn fnDecl( const doc_comment_index = try astgen.docCommentAsString(fn_proto.firstToken()); const has_section_or_addrspace = fn_proto.ast.section_expr != 0 or fn_proto.ast.addrspace_expr != 0; - wip_members.nextDecl(is_pub, is_export, fn_proto.ast.align_expr != 0, has_section_or_addrspace); + // Alignment is passed in the func instruction in this case. + wip_members.nextDecl(is_pub, is_export, false, has_section_or_addrspace); var params_scope = &fn_gz.base; const is_var_args = is_var_args: { @@ -3380,7 +3381,7 @@ fn fnDecl( .param_block = block_inst, .body_gz = null, .cc = cc, - .align_inst = .none, // passed in the per-decl data + .align_inst = align_inst, .lib_name = lib_name, .is_var_args = is_var_args, .is_inferred_error = false, @@ -3423,7 +3424,7 @@ fn fnDecl( .ret_br = ret_br, .body_gz = &fn_gz, .cc = cc, - .align_inst = .none, // passed in the per-decl data + .align_inst = align_inst, .lib_name = lib_name, .is_var_args = is_var_args, .is_inferred_error = is_inferred_error, @@ -3449,9 +3450,6 @@ fn fnDecl( wip_members.appendToDecl(fn_name_str_index); wip_members.appendToDecl(block_inst); wip_members.appendToDecl(doc_comment_index); - if (align_inst != .none) { - wip_members.appendToDecl(@enumToInt(align_inst)); - } if (has_section_or_addrspace) { wip_members.appendToDecl(@enumToInt(section_inst)); wip_members.appendToDecl(@enumToInt(addrspace_inst)); @@ -3830,7 +3828,8 @@ fn structDeclInner( .fields_len = 0, .body_len = 0, .decls_len = 0, - .known_has_bits = false, + .known_non_opv = false, + .known_comptime_only = false, }); return indexToRef(decl_inst); } @@ -3871,7 +3870,8 @@ fn structDeclInner( var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, field_count, bits_per_field, max_field_size); defer wip_members.deinit(); - var known_has_bits = false; + var known_non_opv = false; + var known_comptime_only = false; for (container_decl.ast.members) |member_node| { const member = switch (try containerMember(gz, &namespace.base, &wip_members, member_node)) { .decl => continue, @@ -3894,7 +3894,10 @@ fn structDeclInner( const doc_comment_index = try astgen.docCommentAsString(member.firstToken()); wip_members.appendToField(doc_comment_index); - known_has_bits = known_has_bits or nodeImpliesRuntimeBits(tree, member.ast.type_expr); + known_non_opv = known_non_opv or + nodeImpliesMoreThanOnePossibleValue(tree, member.ast.type_expr); + known_comptime_only = known_comptime_only or + nodeImpliesComptimeOnly(tree, member.ast.type_expr); const have_align = member.ast.align_expr != 0; const have_value = member.ast.value_expr != 0; @@ -3928,7 +3931,8 @@ fn structDeclInner( .body_len = @intCast(u32, body.len), .fields_len = field_count, .decls_len = decl_count, - .known_has_bits = known_has_bits, + .known_non_opv = known_non_opv, + .known_comptime_only = known_comptime_only, }); wip_members.finishBits(bits_per_field); @@ -8197,7 +8201,9 @@ fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) BuiltinFn.Ev } } -fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool { +/// Returns `true` if it is known the type expression has more than one possible value; +/// `false` otherwise. +fn nodeImpliesMoreThanOnePossibleValue(tree: *const Ast, start_node: Ast.Node.Index) bool { const node_tags = tree.nodes.items(.tag); const node_datas = tree.nodes.items(.data); @@ -8243,7 +8249,6 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool { .multiline_string_literal, .char_literal, .unreachable_literal, - .identifier, .error_set_decl, .container_decl, .container_decl_trailing, @@ -8357,6 +8362,11 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool { .builtin_call_comma, .builtin_call_two, .builtin_call_two_comma, + // these are function bodies, not pointers + .fn_proto_simple, + .fn_proto_multi, + .fn_proto_one, + .fn_proto, => return false, // Forward the question to the LHS sub-expression. @@ -8368,10 +8378,6 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool { .unwrap_optional, => node = node_datas[node].lhs, - .fn_proto_simple, - .fn_proto_multi, - .fn_proto_one, - .fn_proto, .ptr_type_aligned, .ptr_type_sentinel, .ptr_type, @@ -8380,6 +8386,301 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool { .anyframe_type, .array_type_sentinel, => return true, + + .identifier => { + const main_tokens = tree.nodes.items(.main_token); + const ident_bytes = tree.tokenSlice(main_tokens[node]); + if (primitives.get(ident_bytes)) |primitive| switch (primitive) { + .anyerror_type, + .anyframe_type, + .anyopaque_type, + .bool_type, + .c_int_type, + .c_long_type, + .c_longdouble_type, + .c_longlong_type, + .c_short_type, + .c_uint_type, + .c_ulong_type, + .c_ulonglong_type, + .c_ushort_type, + .comptime_float_type, + .comptime_int_type, + .f128_type, + .f16_type, + .f32_type, + .f64_type, + .i16_type, + .i32_type, + .i64_type, + .i128_type, + .i8_type, + .isize_type, + .type_type, + .u16_type, + .u32_type, + .u64_type, + .u128_type, + .u1_type, + .u8_type, + .usize_type, + => return true, + + .void_type, + .bool_false, + .bool_true, + .null_value, + .undef, + .noreturn_type, + => return false, + + else => unreachable, // that's all the values from `primitives`. + } else { + return false; + } + }, + } + } +} + +/// Returns `true` if it is known the expression is a type that cannot be used at runtime; +/// `false` otherwise. +fn nodeImpliesComptimeOnly(tree: *const Ast, start_node: Ast.Node.Index) bool { + const node_tags = tree.nodes.items(.tag); + const node_datas = tree.nodes.items(.data); + + var node = start_node; + while (true) { + switch (node_tags[node]) { + .root, + .@"usingnamespace", + .test_decl, + .switch_case, + .switch_case_one, + .container_field_init, + .container_field_align, + .container_field, + .asm_output, + .asm_input, + .global_var_decl, + .local_var_decl, + .simple_var_decl, + .aligned_var_decl, + => unreachable, + + .@"return", + .@"break", + .@"continue", + .bit_not, + .bool_not, + .@"defer", + .@"errdefer", + .address_of, + .negation, + .negation_wrap, + .@"resume", + .array_type, + .@"suspend", + .@"anytype", + .fn_decl, + .anyframe_literal, + .integer_literal, + .float_literal, + .enum_literal, + .string_literal, + .multiline_string_literal, + .char_literal, + .unreachable_literal, + .error_set_decl, + .container_decl, + .container_decl_trailing, + .container_decl_two, + .container_decl_two_trailing, + .container_decl_arg, + .container_decl_arg_trailing, + .tagged_union, + .tagged_union_trailing, + .tagged_union_two, + .tagged_union_two_trailing, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + .@"asm", + .asm_simple, + .add, + .add_wrap, + .add_sat, + .array_cat, + .array_mult, + .assign, + .assign_bit_and, + .assign_bit_or, + .assign_shl, + .assign_shl_sat, + .assign_shr, + .assign_bit_xor, + .assign_div, + .assign_sub, + .assign_sub_wrap, + .assign_sub_sat, + .assign_mod, + .assign_add, + .assign_add_wrap, + .assign_add_sat, + .assign_mul, + .assign_mul_wrap, + .assign_mul_sat, + .bang_equal, + .bit_and, + .bit_or, + .shl, + .shl_sat, + .shr, + .bit_xor, + .bool_and, + .bool_or, + .div, + .equal_equal, + .error_union, + .greater_or_equal, + .greater_than, + .less_or_equal, + .less_than, + .merge_error_sets, + .mod, + .mul, + .mul_wrap, + .mul_sat, + .switch_range, + .field_access, + .sub, + .sub_wrap, + .sub_sat, + .slice, + .slice_open, + .slice_sentinel, + .deref, + .array_access, + .error_value, + .while_simple, + .while_cont, + .for_simple, + .if_simple, + .@"catch", + .@"orelse", + .array_init_one, + .array_init_one_comma, + .array_init_dot_two, + .array_init_dot_two_comma, + .array_init_dot, + .array_init_dot_comma, + .array_init, + .array_init_comma, + .struct_init_one, + .struct_init_one_comma, + .struct_init_dot_two, + .struct_init_dot_two_comma, + .struct_init_dot, + .struct_init_dot_comma, + .struct_init, + .struct_init_comma, + .@"while", + .@"if", + .@"for", + .@"switch", + .switch_comma, + .call_one, + .call_one_comma, + .async_call_one, + .async_call_one_comma, + .call, + .call_comma, + .async_call, + .async_call_comma, + .block_two, + .block_two_semicolon, + .block, + .block_semicolon, + .builtin_call, + .builtin_call_comma, + .builtin_call_two, + .builtin_call_two_comma, + .ptr_type_aligned, + .ptr_type_sentinel, + .ptr_type, + .ptr_type_bit_range, + .optional_type, + .anyframe_type, + .array_type_sentinel, + => return false, + + // these are function bodies, not pointers + .fn_proto_simple, + .fn_proto_multi, + .fn_proto_one, + .fn_proto, + => return true, + + // Forward the question to the LHS sub-expression. + .grouped_expression, + .@"try", + .@"await", + .@"comptime", + .@"nosuspend", + .unwrap_optional, + => node = node_datas[node].lhs, + + .identifier => { + const main_tokens = tree.nodes.items(.main_token); + const ident_bytes = tree.tokenSlice(main_tokens[node]); + if (primitives.get(ident_bytes)) |primitive| switch (primitive) { + .anyerror_type, + .anyframe_type, + .anyopaque_type, + .bool_type, + .c_int_type, + .c_long_type, + .c_longdouble_type, + .c_longlong_type, + .c_short_type, + .c_uint_type, + .c_ulong_type, + .c_ulonglong_type, + .c_ushort_type, + .f128_type, + .f16_type, + .f32_type, + .f64_type, + .i16_type, + .i32_type, + .i64_type, + .i128_type, + .i8_type, + .isize_type, + .u16_type, + .u32_type, + .u64_type, + .u128_type, + .u1_type, + .u8_type, + .usize_type, + .void_type, + .bool_false, + .bool_true, + .null_value, + .undef, + .noreturn_type, + => return false, + + .comptime_float_type, + .comptime_int_type, + .type_type, + => return true, + + else => unreachable, // that's all the values from `primitives`. + } else { + return false; + } + }, } } } @@ -10120,7 +10421,8 @@ const GenZir = struct { fields_len: u32, decls_len: u32, layout: std.builtin.TypeInfo.ContainerLayout, - known_has_bits: bool, + known_non_opv: bool, + known_comptime_only: bool, }) !void { const astgen = gz.astgen; const gpa = astgen.gpa; @@ -10150,7 +10452,8 @@ const GenZir = struct { .has_body_len = args.body_len != 0, .has_fields_len = args.fields_len != 0, .has_decls_len = args.decls_len != 0, - .known_has_bits = args.known_has_bits, + .known_non_opv = args.known_non_opv, + .known_comptime_only = args.known_comptime_only, .name_strategy = gz.anon_name_strategy, .layout = args.layout, }), diff --git a/src/Compilation.zig b/src/Compilation.zig index 2c64657903..cffda8f36e 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2703,7 +2703,6 @@ fn processOneJob(comp: *Compilation, job: Job, main_progress_node: *std.Progress const module = comp.bin_file.options.module.?; assert(decl.has_tv); - assert(decl.ty.hasCodeGenBits()); if (decl.alive) { try module.linkerUpdateDecl(decl); diff --git a/src/Module.zig b/src/Module.zig index 3892b35aea..3cae41a6a5 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -848,9 +848,11 @@ pub const Struct = struct { // which `have_layout` does not ensure. fully_resolved, }, - /// If true, definitely nonzero size at runtime. If false, resolving the fields - /// is necessary to determine whether it has bits at runtime. - known_has_bits: bool, + /// If true, has more than one possible value. However it may still be non-runtime type + /// if it is a comptime-only type. + /// If false, resolving the fields is necessary to determine whether the type has only + /// one possible value. + known_non_opv: bool, requires_comptime: RequiresComptime = .unknown, pub const Fields = std.StringArrayHashMapUnmanaged(Field); @@ -898,6 +900,45 @@ pub const Struct = struct { }; } + pub fn fieldSrcLoc(s: Struct, gpa: Allocator, query: FieldSrcQuery) SrcLoc { + @setCold(true); + const tree = s.owner_decl.getFileScope().getTree(gpa) catch |err| { + // In this case we emit a warning + a less precise source location. + log.warn("unable to load {s}: {s}", .{ + s.owner_decl.getFileScope().sub_file_path, @errorName(err), + }); + return s.srcLoc(); + }; + const node = s.owner_decl.relativeToNodeIndex(s.node_offset); + const node_tags = tree.nodes.items(.tag); + const file = s.owner_decl.getFileScope(); + switch (node_tags[node]) { + .container_decl, + .container_decl_trailing, + => return queryFieldSrc(tree.*, query, file, tree.containerDecl(node)), + .container_decl_two, .container_decl_two_trailing => { + var buffer: [2]Ast.Node.Index = undefined; + return queryFieldSrc(tree.*, query, file, tree.containerDeclTwo(&buffer, node)); + }, + .container_decl_arg, + .container_decl_arg_trailing, + => return queryFieldSrc(tree.*, query, file, tree.containerDeclArg(node)), + + .tagged_union, + .tagged_union_trailing, + => return queryFieldSrc(tree.*, query, file, tree.taggedUnion(node)), + .tagged_union_two, .tagged_union_two_trailing => { + var buffer: [2]Ast.Node.Index = undefined; + return queryFieldSrc(tree.*, query, file, tree.taggedUnionTwo(&buffer, node)); + }, + .tagged_union_enum_tag, + .tagged_union_enum_tag_trailing, + => return queryFieldSrc(tree.*, query, file, tree.taggedUnionEnumTag(node)), + + else => unreachable, + } + } + pub fn haveFieldTypes(s: Struct) bool { return switch (s.status) { .none, @@ -1063,6 +1104,33 @@ pub const Union = struct { }; } + pub fn fieldSrcLoc(u: Union, gpa: Allocator, query: FieldSrcQuery) SrcLoc { + @setCold(true); + const tree = u.owner_decl.getFileScope().getTree(gpa) catch |err| { + // In this case we emit a warning + a less precise source location. + log.warn("unable to load {s}: {s}", .{ + u.owner_decl.getFileScope().sub_file_path, @errorName(err), + }); + return u.srcLoc(); + }; + const node = u.owner_decl.relativeToNodeIndex(u.node_offset); + const node_tags = tree.nodes.items(.tag); + const file = u.owner_decl.getFileScope(); + switch (node_tags[node]) { + .container_decl, + .container_decl_trailing, + => return queryFieldSrc(tree.*, query, file, tree.containerDecl(node)), + .container_decl_two, .container_decl_two_trailing => { + var buffer: [2]Ast.Node.Index = undefined; + return queryFieldSrc(tree.*, query, file, tree.containerDeclTwo(&buffer, node)); + }, + .container_decl_arg, + .container_decl_arg_trailing, + => return queryFieldSrc(tree.*, query, file, tree.containerDeclArg(node)), + else => unreachable, + } + } + pub fn haveFieldTypes(u: Union) bool { return switch (u.status) { .none, @@ -1080,7 +1148,7 @@ pub const Union = struct { pub fn hasAllZeroBitFieldTypes(u: Union) bool { assert(u.haveFieldTypes()); for (u.fields.values()) |field| { - if (field.ty.hasCodeGenBits()) return false; + if (field.ty.hasRuntimeBits()) return false; } return true; } @@ -1090,7 +1158,7 @@ pub const Union = struct { var most_alignment: u32 = 0; var most_index: usize = undefined; for (u.fields.values()) |field, i| { - if (!field.ty.hasCodeGenBits()) continue; + if (!field.ty.hasRuntimeBits()) continue; const field_align = a: { if (field.abi_align.tag() == .abi_align_default) { @@ -1111,7 +1179,7 @@ pub const Union = struct { var max_align: u32 = 0; if (have_tag) max_align = u.tag_ty.abiAlignment(target); for (u.fields.values()) |field| { - if (!field.ty.hasCodeGenBits()) continue; + if (!field.ty.hasRuntimeBits()) continue; const field_align = a: { if (field.abi_align.tag() == .abi_align_default) { @@ -1164,7 +1232,7 @@ pub const Union = struct { var payload_size: u64 = 0; var payload_align: u32 = 0; for (u.fields.values()) |field, i| { - if (!field.ty.hasCodeGenBits()) continue; + if (!field.ty.hasRuntimeBits()) continue; const field_align = a: { if (field.abi_align.tag() == .abi_align_default) { @@ -3391,7 +3459,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void { .zir_index = undefined, // set below .layout = .Auto, .status = .none, - .known_has_bits = undefined, + .known_non_opv = undefined, .namespace = .{ .parent = null, .ty = struct_ty, @@ -3628,7 +3696,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { var type_changed = true; if (decl.has_tv) { - prev_type_has_bits = decl.ty.hasCodeGenBits(); + prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits(); type_changed = !decl.ty.eql(decl_tv.ty); if (decl.getFunction()) |prev_func| { prev_is_inline = prev_func.state == .inline_only; @@ -3648,8 +3716,9 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { decl.analysis = .complete; decl.generation = mod.generation; - const is_inline = decl_tv.ty.fnCallingConvention() == .Inline; - if (!is_inline and decl_tv.ty.hasCodeGenBits()) { + const has_runtime_bits = try sema.fnHasRuntimeBits(&block_scope, src, decl.ty); + + if (has_runtime_bits) { // We don't fully codegen the decl until later, but we do need to reserve a global // offset table index for it. This allows us to codegen decls out of dependency // order, increasing how many computations can be done in parallel. @@ -3662,6 +3731,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { mod.comp.bin_file.freeDecl(decl); } + const is_inline = decl.ty.fnCallingConvention() == .Inline; if (decl.is_exported) { const export_src = src; // TODO make this point at `export` token if (is_inline) { @@ -3682,6 +3752,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { decl.owns_tv = false; var queue_linker_work = false; + var is_extern = false; switch (decl_tv.val.tag()) { .variable => { const variable = decl_tv.val.castTag(.variable).?.data; @@ -3698,6 +3769,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { if (decl == owner_decl) { decl.owns_tv = true; queue_linker_work = true; + is_extern = true; } }, @@ -3723,7 +3795,10 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { decl.analysis = .complete; decl.generation = mod.generation; - if (queue_linker_work and decl.ty.hasCodeGenBits()) { + const has_runtime_bits = is_extern or + (queue_linker_work and try sema.typeHasRuntimeBits(&block_scope, src, decl.ty)); + + if (has_runtime_bits) { log.debug("queue linker work for {*} ({s})", .{ decl, decl.name }); try mod.comp.bin_file.allocateDeclIndexes(decl); @@ -4224,7 +4299,7 @@ pub fn clearDecl( mod.deleteDeclExports(decl); if (decl.has_tv) { - if (decl.ty.hasCodeGenBits()) { + if (decl.ty.isFnOrHasRuntimeBits()) { mod.comp.bin_file.freeDecl(decl); // TODO instead of a union, put this memory trailing Decl objects, @@ -4277,7 +4352,7 @@ pub fn deleteUnusedDecl(mod: *Module, decl: *Decl) void { switch (mod.comp.bin_file.tag) { .c => {}, // this linker backend has already migrated to the new API else => if (decl.has_tv) { - if (decl.ty.hasCodeGenBits()) { + if (decl.ty.isFnOrHasRuntimeBits()) { mod.comp.bin_file.freeDecl(decl); } }, @@ -4662,8 +4737,8 @@ pub fn createAnonymousDeclFromDeclNamed( new_decl.src_line = src_decl.src_line; new_decl.ty = typed_value.ty; new_decl.val = typed_value.val; - new_decl.align_val = Value.initTag(.null_value); - new_decl.linksection_val = Value.initTag(.null_value); + new_decl.align_val = Value.@"null"; + new_decl.linksection_val = Value.@"null"; new_decl.has_tv = true; new_decl.analysis = .complete; new_decl.generation = mod.generation; @@ -4674,7 +4749,7 @@ pub fn createAnonymousDeclFromDeclNamed( // if the Decl is referenced by an instruction or another constant. Otherwise, // the Decl will be garbage collected by the `codegen_decl` task instead of sent // to the linker. - if (typed_value.ty.hasCodeGenBits()) { + if (typed_value.ty.isFnOrHasRuntimeBits()) { try mod.comp.bin_file.allocateDeclIndexes(new_decl); try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl }); } @@ -4905,6 +4980,55 @@ pub const PeerTypeCandidateSrc = union(enum) { } }; +const FieldSrcQuery = struct { + index: usize, + range: enum { name, type, value, alignment }, +}; + +fn queryFieldSrc( + tree: Ast, + query: FieldSrcQuery, + file_scope: *File, + container_decl: Ast.full.ContainerDecl, +) SrcLoc { + const node_tags = tree.nodes.items(.tag); + var field_index: usize = 0; + for (container_decl.ast.members) |member_node| { + const field = switch (node_tags[member_node]) { + .container_field_init => tree.containerFieldInit(member_node), + .container_field_align => tree.containerFieldAlign(member_node), + .container_field => tree.containerField(member_node), + else => continue, + }; + if (field_index == query.index) { + return switch (query.range) { + .name => .{ + .file_scope = file_scope, + .parent_decl_node = 0, + .lazy = .{ .token_abs = field.ast.name_token }, + }, + .type => .{ + .file_scope = file_scope, + .parent_decl_node = 0, + .lazy = .{ .node_abs = field.ast.type_expr }, + }, + .value => .{ + .file_scope = file_scope, + .parent_decl_node = 0, + .lazy = .{ .node_abs = field.ast.value_expr }, + }, + .alignment => .{ + .file_scope = file_scope, + .parent_decl_node = 0, + .lazy = .{ .node_abs = field.ast.align_expr }, + }, + }; + } + field_index += 1; + } + unreachable; +} + /// Called from `performAllTheWork`, after all AstGen workers have finished, /// and before the main semantic analysis loop begins. pub fn processOutdatedAndDeletedDecls(mod: *Module) !void { diff --git a/src/Sema.zig b/src/Sema.zig index 6a2085cf3f..a761623b2e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -437,9 +437,10 @@ pub const Block = struct { } } - pub fn startAnonDecl(block: *Block) !WipAnonDecl { + pub fn startAnonDecl(block: *Block, src: LazySrcLoc) !WipAnonDecl { return WipAnonDecl{ .block = block, + .src = src, .new_decl_arena = std.heap.ArenaAllocator.init(block.sema.gpa), .finished = false, }; @@ -447,6 +448,7 @@ pub const Block = struct { pub const WipAnonDecl = struct { block: *Block, + src: LazySrcLoc, new_decl_arena: std.heap.ArenaAllocator, finished: bool, @@ -462,11 +464,15 @@ pub const Block = struct { } pub fn finish(wad: *WipAnonDecl, ty: Type, val: Value) !*Decl { - const new_decl = try wad.block.sema.mod.createAnonymousDecl(wad.block, .{ + const sema = wad.block.sema; + // Do this ahead of time because `createAnonymousDecl` depends on calling + // `type.hasRuntimeBits()`. + _ = try sema.typeHasRuntimeBits(wad.block, wad.src, ty); + const new_decl = try sema.mod.createAnonymousDecl(wad.block, .{ .ty = ty, .val = val, }); - errdefer wad.block.sema.mod.abortAnonDecl(new_decl); + errdefer sema.mod.abortAnonDecl(new_decl); try new_decl.finalizeNewArena(&wad.new_decl_arena); wad.finished = true; return new_decl; @@ -487,20 +493,23 @@ pub fn deinit(sema: *Sema) void { /// Returns only the result from the body that is specified. /// Only appropriate to call when it is determined at comptime that this body /// has no peers. -fn resolveBody(sema: *Sema, block: *Block, body: []const Zir.Inst.Index) CompileError!Air.Inst.Ref { +fn resolveBody( + sema: *Sema, + block: *Block, + body: []const Zir.Inst.Index, + /// This is the instruction that a break instruction within `body` can + /// use to return from the body. + body_inst: Zir.Inst.Index, +) CompileError!Air.Inst.Ref { const break_inst = try sema.analyzeBody(block, body); const break_data = sema.code.instructions.items(.data)[break_inst].@"break"; // For comptime control flow, we need to detect when `analyzeBody` reports // that we need to break from an outer block. In such case we // use Zig's error mechanism to send control flow up the stack until // we find the corresponding block to this break. - if (block.is_comptime) { - if (block.label) |label| { - if (label.zir_block != break_data.block_inst) { - sema.comptime_break_inst = break_inst; - return error.ComptimeBreak; - } - } + if (block.is_comptime and break_data.block_inst != body_inst) { + sema.comptime_break_inst = break_inst; + return error.ComptimeBreak; } return sema.resolveInst(break_data.operand); } @@ -1502,9 +1511,6 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const ptr = sema.resolveInst(bin_inst.rhs); const addr_space = target_util.defaultAddressSpace(sema.mod.getTarget(), .local); - // Needed for the call to `anon_decl.finish()` below which checks `ty.hasCodeGenBits()`. - _ = try sema.typeHasOnePossibleValue(block, src, pointee_ty); - if (Air.refToIndex(ptr)) |ptr_inst| { if (sema.air_instructions.items(.tag)[ptr_inst] == .constant) { const air_datas = sema.air_instructions.items(.data); @@ -1535,7 +1541,7 @@ fn zirCoerceResultPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileE const iac = ptr_val.castTag(.inferred_alloc_comptime).?; // There will be only one coerce_result_ptr because we are running at comptime. // The alloc will turn into a Decl. - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); iac.data.decl = try anon_decl.finish( try pointee_ty.copy(anon_decl.arena()), @@ -1654,7 +1660,10 @@ pub fn analyzeStructDecl( assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); - struct_obj.known_has_bits = small.known_has_bits; + struct_obj.known_non_opv = small.known_non_opv; + if (small.known_comptime_only) { + struct_obj.requires_comptime = .yes; + } var extra_index: usize = extended.operand; extra_index += @boolToInt(small.has_src_node); @@ -1702,7 +1711,7 @@ fn zirStructDecl( .zir_index = inst, .layout = small.layout, .status = .none, - .known_has_bits = undefined, + .known_non_opv = undefined, .namespace = .{ .parent = block.namespace, .ty = struct_ty, @@ -2528,7 +2537,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Com const bitcast_ty_ref = air_datas[bitcast_inst].ty_op.ty; const new_decl = d: { - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); const new_decl = try anon_decl.finish( try final_elem_ty.copy(anon_decl.arena()), @@ -3112,7 +3121,7 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) Compi if (operand_val.tag() == .variable) { return sema.failWithNeededComptime(block, src); } - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); iac.data.decl = try anon_decl.finish( try operand_ty.copy(anon_decl.arena()), @@ -3184,8 +3193,7 @@ fn addStrLit(sema: *Sema, block: *Block, zir_bytes: []const u8) CompileError!Air // after semantic analysis is complete, for example in the case of the initialization // expression of a variable declaration. We need the memory to be in the new // anonymous Decl's arena. - - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(LazySrcLoc.unneeded); defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, zir_bytes); @@ -3508,10 +3516,13 @@ fn resolveBlockBody( src: LazySrcLoc, child_block: *Block, body: []const Zir.Inst.Index, + /// This is the instruction that a break instruction within `body` can + /// use to return from the body. + body_inst: Zir.Inst.Index, merges: *Block.Merges, ) CompileError!Air.Inst.Ref { if (child_block.is_comptime) { - return sema.resolveBody(child_block, body); + return sema.resolveBody(child_block, body, body_inst); } else { _ = try sema.analyzeBody(child_block, body); return sema.analyzeBlockBody(parent_block, src, child_block, merges); @@ -4147,7 +4158,7 @@ fn analyzeCall( const gpa = sema.gpa; const is_comptime_call = block.is_comptime or modifier == .compile_time or - func_ty_info.return_type.requiresComptime(); + try sema.typeRequiresComptime(block, func_src, func_ty_info.return_type); const is_inline_call = is_comptime_call or modifier == .always_inline or func_ty_info.cc == .Inline; const result: Air.Inst.Ref = if (is_inline_call) res: { @@ -4251,7 +4262,7 @@ fn analyzeCall( const param_src = pl_tok.src(); const extra = sema.code.extraData(Zir.Inst.Param, pl_tok.payload_index); const param_body = sema.code.extra[extra.end..][0..extra.data.body_len]; - const param_ty_inst = try sema.resolveBody(&child_block, param_body); + const param_ty_inst = try sema.resolveBody(&child_block, param_body, inst); const param_ty = try sema.analyzeAsType(&child_block, param_src, param_ty_inst); const arg_src = call_src; // TODO: better source location const casted_arg = try sema.coerce(&child_block, param_ty, uncasted_args[arg_i], arg_src); @@ -4308,7 +4319,7 @@ fn analyzeCall( // on parameters, we must now do the same for the return type as we just did with // each of the parameters, resolving the return type and providing it to the child // `Sema` so that it can be used for the `ret_ptr` instruction. - const ret_ty_inst = try sema.resolveBody(&child_block, fn_info.ret_ty_body); + const ret_ty_inst = try sema.resolveBody(&child_block, fn_info.ret_ty_body, module_fn.zir_body_inst); const ret_ty_src = func_src; // TODO better source location const bare_return_type = try sema.analyzeAsType(&child_block, ret_ty_src, ret_ty_inst); // Create a fresh inferred error set type for inline/comptime calls. @@ -4576,7 +4587,7 @@ fn analyzeCall( } } else if (is_anytype) { const arg_ty = sema.typeOf(arg); - if (arg_ty.requiresComptime()) { + if (try sema.typeRequiresComptime(block, arg_src, arg_ty)) { const arg_val = try sema.resolveConstValue(block, arg_src, arg); const child_arg = try child_sema.addConstant(arg_ty, arg_val); child_sema.inst_map.putAssumeCapacityNoClobber(inst, child_arg); @@ -4589,7 +4600,7 @@ fn analyzeCall( } arg_i += 1; } - const new_func_inst = child_sema.resolveBody(&child_block, fn_info.param_body) catch |err| { + const new_func_inst = child_sema.resolveBody(&child_block, fn_info.param_body, fn_info.param_body_inst) catch |err| { // TODO look up the compile error that happened here and attach a note to it // pointing here, at the generic instantiation callsite. if (sema.owner_func) |owner_func| { @@ -4997,7 +5008,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileEr // TODO do we really want to create a Decl for this? // The reason we do it right now is for memory management. - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); var names = Module.ErrorSet.NameMap{}; @@ -5388,10 +5399,9 @@ fn zirFunc( const ret_ty_body = sema.code.extra[extra_index..][0..extra.data.ret_body_len]; extra_index += ret_ty_body.len; - var body_inst: Zir.Inst.Index = 0; var src_locs: Zir.Inst.Func.SrcLocs = undefined; - if (extra.data.body_len != 0) { - body_inst = inst; + const has_body = extra.data.body_len != 0; + if (has_body) { extra_index += extra.data.body_len; src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data; } @@ -5404,13 +5414,14 @@ fn zirFunc( return sema.funcCommon( block, inst_data.src_node, - body_inst, + inst, ret_ty_body, cc, Value.@"null", false, inferred_error_set, false, + has_body, src_locs, null, ); @@ -5420,17 +5431,17 @@ fn funcCommon( sema: *Sema, block: *Block, src_node_offset: i32, - body_inst: Zir.Inst.Index, + func_inst: Zir.Inst.Index, ret_ty_body: []const Zir.Inst.Index, cc: std.builtin.CallingConvention, align_val: Value, var_args: bool, inferred_error_set: bool, is_extern: bool, + has_body: bool, src_locs: Zir.Inst.Func.SrcLocs, opt_lib_name: ?[]const u8, ) CompileError!Air.Inst.Ref { - const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; // The return type body might be a type expression that depends on generic parameters. @@ -5448,7 +5459,7 @@ fn funcCommon( block.params.deinit(sema.gpa); block.params = prev_params; } - if (sema.resolveBody(block, ret_ty_body)) |ret_ty_inst| { + if (sema.resolveBody(block, ret_ty_body, func_inst)) |ret_ty_inst| { if (sema.analyzeAsType(block, ret_ty_src, ret_ty_inst)) |ret_ty| { break :ret_ty ret_ty; } else |err| break :err err; @@ -5467,25 +5478,36 @@ fn funcCommon( const mod = sema.mod; const new_func: *Module.Fn = new_func: { - if (body_inst == 0) break :new_func undefined; - if (sema.comptime_args_fn_inst == body_inst) { + if (!has_body) break :new_func undefined; + if (sema.comptime_args_fn_inst == func_inst) { const new_func = sema.preallocated_new_func.?; sema.preallocated_new_func = null; // take ownership break :new_func new_func; } break :new_func try sema.gpa.create(Module.Fn); }; - errdefer if (body_inst != 0) sema.gpa.destroy(new_func); + errdefer if (has_body) sema.gpa.destroy(new_func); var maybe_inferred_error_set_node: ?*Module.Fn.InferredErrorSetListNode = null; errdefer if (maybe_inferred_error_set_node) |node| sema.gpa.destroy(node); // Note: no need to errdefer since this will still be in its default state at the end of the function. + const target = mod.getTarget(); + const fn_ty: Type = fn_ty: { + const alignment: u32 = if (align_val.tag() == .null_value) 0 else a: { + const alignment = @intCast(u32, align_val.toUnsignedInt()); + if (alignment == target_util.defaultFunctionAlignment(target)) { + break :a 0; + } else { + break :a alignment; + } + }; + // Hot path for some common function types. // TODO can we eliminate some of these Type tag values? seems unnecessarily complicated. if (!is_generic and block.params.items.len == 0 and !var_args and - align_val.tag() == .null_value and !inferred_error_set) + alignment == 0 and !inferred_error_set) { if (bare_return_type.zigTypeTag() == .NoReturn and cc == .Unspecified) { break :fn_ty Type.initTag(.fn_noreturn_no_args); @@ -5507,16 +5529,15 @@ fn funcCommon( const param_types = try sema.arena.alloc(Type, block.params.items.len); const comptime_params = try sema.arena.alloc(bool, block.params.items.len); for (block.params.items) |param, i| { + const param_src: LazySrcLoc = .{ .node_offset = src_node_offset }; // TODO better src param_types[i] = param.ty; - comptime_params[i] = param.is_comptime or param.ty.requiresComptime(); + comptime_params[i] = param.is_comptime or + try sema.typeRequiresComptime(block, param_src, param.ty); is_generic = is_generic or comptime_params[i] or param.ty.tag() == .generic_poison; } - if (align_val.tag() != .null_value) { - return sema.fail(block, src, "TODO implement support for function prototypes to have alignment specified", .{}); - } - - is_generic = is_generic or bare_return_type.requiresComptime(); + is_generic = is_generic or + try sema.typeRequiresComptime(block, ret_ty_src, bare_return_type); const return_type = if (!inferred_error_set or bare_return_type.tag() == .generic_poison) bare_return_type @@ -5537,6 +5558,7 @@ fn funcCommon( .comptime_params = comptime_params.ptr, .return_type = return_type, .cc = cc, + .alignment = alignment, .is_var_args = var_args, .is_generic = is_generic, }); @@ -5550,7 +5572,6 @@ fn funcCommon( lib_name, @errorName(err), }); }; - const target = mod.getTarget(); if (target_util.is_libc_lib_name(target, lib_name)) { if (!mod.comp.bin_file.options.link_libc) { return sema.fail( @@ -5590,26 +5611,21 @@ fn funcCommon( ); } - if (body_inst == 0) { - const fn_ptr_ty = try Type.ptr(sema.arena, .{ - .pointee_type = fn_ty, - .@"addrspace" = .generic, - .mutable = false, - }); - return sema.addType(fn_ptr_ty); + if (!has_body) { + return sema.addType(fn_ty); } const is_inline = fn_ty.fnCallingConvention() == .Inline; const anal_state: Module.Fn.Analysis = if (is_inline) .inline_only else .queued; - const comptime_args: ?[*]TypedValue = if (sema.comptime_args_fn_inst == body_inst) blk: { + const comptime_args: ?[*]TypedValue = if (sema.comptime_args_fn_inst == func_inst) blk: { break :blk if (sema.comptime_args.len == 0) null else sema.comptime_args.ptr; } else null; const fn_payload = try sema.arena.create(Value.Payload.Function); new_func.* = .{ .state = anal_state, - .zir_body_inst = body_inst, + .zir_body_inst = func_inst, .owner_decl = sema.owner_decl, .comptime_args = comptime_args, .lbrace_line = src_locs.lbrace_line, @@ -5632,7 +5648,7 @@ fn zirParam( sema: *Sema, block: *Block, inst: Zir.Inst.Index, - is_comptime: bool, + comptime_syntax: bool, ) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].pl_tok; const src = inst_data.src(); @@ -5656,7 +5672,7 @@ fn zirParam( block.params = prev_params; } - if (sema.resolveBody(block, body)) |param_ty_inst| { + if (sema.resolveBody(block, body, inst)) |param_ty_inst| { if (sema.analyzeAsType(block, src, param_ty_inst)) |param_ty| { break :param_ty param_ty; } else |err| break :err err; @@ -5669,7 +5685,7 @@ fn zirParam( // insert an anytype parameter. try block.params.append(sema.gpa, .{ .ty = Type.initTag(.generic_poison), - .is_comptime = is_comptime, + .is_comptime = comptime_syntax, }); try sema.inst_map.putNoClobber(sema.gpa, inst, .generic_poison); return; @@ -5677,8 +5693,10 @@ fn zirParam( else => |e| return e, } }; + const is_comptime = comptime_syntax or + try sema.typeRequiresComptime(block, src, param_ty); if (sema.inst_map.get(inst)) |arg| { - if (is_comptime or param_ty.requiresComptime()) { + if (is_comptime) { // We have a comptime value for this parameter so it should be elided from the // function type of the function instruction in this block. const coerced_arg = try sema.coerce(block, param_ty, arg, src); @@ -5692,7 +5710,7 @@ fn zirParam( try block.params.append(sema.gpa, .{ .ty = param_ty, - .is_comptime = is_comptime or param_ty.requiresComptime(), + .is_comptime = is_comptime, }); const result = try sema.addConstant(param_ty, Value.initTag(.generic_poison)); try sema.inst_map.putNoClobber(sema.gpa, inst, result); @@ -5702,9 +5720,10 @@ fn zirParamAnytype( sema: *Sema, block: *Block, inst: Zir.Inst.Index, - is_comptime: bool, + comptime_syntax: bool, ) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; + const src = inst_data.src(); const param_name = inst_data.get(sema.code); // TODO check if param_name shadows a Decl. This only needs to be done if @@ -5713,7 +5732,7 @@ fn zirParamAnytype( if (sema.inst_map.get(inst)) |air_ref| { const param_ty = sema.typeOf(air_ref); - if (is_comptime or param_ty.requiresComptime()) { + if (comptime_syntax or try sema.typeRequiresComptime(block, src, param_ty)) { // We have a comptime value for this parameter so it should be elided from the // function type of the function instruction in this block. return; @@ -5730,7 +5749,7 @@ fn zirParamAnytype( try block.params.append(sema.gpa, .{ .ty = Type.initTag(.generic_poison), - .is_comptime = is_comptime, + .is_comptime = comptime_syntax, }); try sema.inst_map.put(sema.gpa, inst, .generic_poison); } @@ -5770,15 +5789,16 @@ fn zirPtrToInt(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; + const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ptr = sema.resolveInst(inst_data.operand); const ptr_ty = sema.typeOf(ptr); if (!ptr_ty.isPtrAtRuntime()) { - const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; return sema.fail(block, ptr_src, "expected pointer, found '{}'", .{ptr_ty}); } - // TODO handle known-pointer-address - const src = inst_data.src(); - try sema.requireRuntimeBlock(block, src); + if (try sema.resolveMaybeUndefVal(block, ptr_src, ptr)) |ptr_val| { + return sema.addConstant(Type.usize, ptr_val); + } + try sema.requireRuntimeBlock(block, ptr_src); return block.addUnOp(.ptrtoint, ptr); } @@ -6802,7 +6822,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError // Validation above ensured these will succeed. const item_val = sema.resolveConstValue(&child_block, .unneeded, item) catch unreachable; if (operand_val.eql(item_val, operand_ty)) { - return sema.resolveBlockBody(block, src, &child_block, body, merges); + return sema.resolveBlockBody(block, src, &child_block, body, inst, merges); } } } @@ -6824,7 +6844,7 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError // Validation above ensured these will succeed. const item_val = sema.resolveConstValue(&child_block, .unneeded, item) catch unreachable; if (operand_val.eql(item_val, operand_ty)) { - return sema.resolveBlockBody(block, src, &child_block, body, merges); + return sema.resolveBlockBody(block, src, &child_block, body, inst, merges); } } @@ -6841,18 +6861,18 @@ fn zirSwitchBlock(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError if (Value.compare(operand_val, .gte, first_tv.val, operand_ty) and Value.compare(operand_val, .lte, last_tv.val, operand_ty)) { - return sema.resolveBlockBody(block, src, &child_block, body, merges); + return sema.resolveBlockBody(block, src, &child_block, body, inst, merges); } } extra_index += body_len; } } - return sema.resolveBlockBody(block, src, &child_block, special.body, merges); + return sema.resolveBlockBody(block, src, &child_block, special.body, inst, merges); } if (scalar_cases_len + multi_cases_len == 0) { - return sema.resolveBlockBody(block, src, &child_block, special.body, merges); + return sema.resolveBlockBody(block, src, &child_block, special.body, inst, merges); } try sema.requireRuntimeBlock(block, src); @@ -7395,7 +7415,7 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A }, }; - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(LazySrcLoc.unneeded); defer anon_decl.deinit(); const bytes_including_null = embed_file.bytes[0 .. embed_file.bytes.len + 1]; @@ -7659,7 +7679,7 @@ fn zirArrayCat(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const is_pointer = lhs_ty.zigTypeTag() == .Pointer; const lhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val; const rhs_sub_val = if (is_pointer) (try sema.pointerDeref(block, rhs_src, rhs_val, rhs_ty)).? else rhs_val; - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(LazySrcLoc.unneeded); defer anon_decl.deinit(); const buf = try anon_decl.arena().alloc(Value, final_len_including_sent); @@ -7743,7 +7763,7 @@ fn zirArrayMul(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const lhs_sub_val = if (lhs_ty.zigTypeTag() == .Pointer) (try sema.pointerDeref(block, lhs_src, lhs_val, lhs_ty)).? else lhs_val; - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); const final_ty = if (mulinfo.sentinel) |sent| @@ -9357,7 +9377,7 @@ fn zirBuiltinSrc( const func = sema.func orelse return sema.fail(block, src, "@src outside function", .{}); const func_name_val = blk: { - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); const name = std.mem.span(func.owner_decl.name); const bytes = try anon_decl.arena().dupe(u8, name[0 .. name.len + 1]); @@ -9369,7 +9389,7 @@ fn zirBuiltinSrc( }; const file_name_val = blk: { - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); const name = try func.owner_decl.getFileScope().fullPathZ(anon_decl.arena()); const new_decl = try anon_decl.finish( @@ -9619,7 +9639,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const is_exhaustive = if (ty.isNonexhaustiveEnum()) Value.@"false" else Value.@"true"; - var fields_anon_decl = try block.startAnonDecl(); + var fields_anon_decl = try block.startAnonDecl(src); defer fields_anon_decl.deinit(); const enum_field_ty = t: { @@ -9650,7 +9670,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const name = enum_fields.keys()[i]; const name_val = v: { - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( @@ -9715,7 +9735,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Union => { // TODO: look into memoizing this result. - var fields_anon_decl = try block.startAnonDecl(); + var fields_anon_decl = try block.startAnonDecl(src); defer fields_anon_decl.deinit(); const union_field_ty = t: { @@ -9739,7 +9759,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const field = union_fields.values()[i]; const name = union_fields.keys()[i]; const name_val = v: { - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( @@ -9810,7 +9830,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai .Opaque => { // TODO: look into memoizing this result. - var fields_anon_decl = try block.startAnonDecl(); + var fields_anon_decl = try block.startAnonDecl(src); defer fields_anon_decl.deinit(); const opaque_ty = try sema.resolveTypeFields(block, src, ty); @@ -9848,7 +9868,7 @@ fn typeInfoDecls( const decls_len = namespace.decls.count(); if (decls_len == 0) return Value.initTag(.empty_array); - var decls_anon_decl = try block.startAnonDecl(); + var decls_anon_decl = try block.startAnonDecl(src); defer decls_anon_decl.deinit(); const declaration_ty = t: { @@ -9869,7 +9889,7 @@ fn typeInfoDecls( const decl = namespace.decls.values()[i]; const name = namespace.decls.keys()[i]; const name_val = v: { - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); const bytes = try anon_decl.arena().dupeZ(u8, name); const new_decl = try anon_decl.finish( @@ -10031,7 +10051,7 @@ fn zirBoolBr( // comptime-known left-hand side. No need for a block here; the result // is simply the rhs expression. Here we rely on there only being 1 // break instruction (`break_inline`). - return sema.resolveBody(parent_block, body); + return sema.resolveBody(parent_block, body, inst); } const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); @@ -10061,7 +10081,7 @@ fn zirBoolBr( const lhs_result: Air.Inst.Ref = if (is_bool_or) .bool_true else .bool_false; _ = try lhs_block.addBr(block_inst, lhs_result); - const rhs_result = try sema.resolveBody(rhs_block, body); + const rhs_result = try sema.resolveBody(rhs_block, body, inst); _ = try rhs_block.addBr(block_inst, rhs_result); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + @@ -10654,7 +10674,7 @@ fn zirArrayInit( } else null; const runtime_src = opt_runtime_src orelse { - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); const elem_vals = try anon_decl.arena().alloc(Value, resolved_args.len); @@ -10740,7 +10760,7 @@ fn zirArrayInitAnon( const tuple_val = try Value.Tag.@"struct".create(sema.arena, values); if (!is_ref) return sema.addConstant(tuple_ty, tuple_val); - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); const decl = try anon_decl.finish( try tuple_ty.copy(anon_decl.arena()), @@ -11032,7 +11052,7 @@ fn zirTypeName(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const ty = try sema.resolveType(block, ty_src, inst_data.operand); - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(LazySrcLoc.unneeded); defer anon_decl.deinit(); const bytes = try ty.nameAlloc(anon_decl.arena()); @@ -11118,8 +11138,7 @@ fn zirIntToPtr(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const type_res = try sema.resolveType(block, src, extra.lhs); - if (type_res.zigTypeTag() != .Pointer) - return sema.fail(block, type_src, "expected pointer, found '{}'", .{type_res}); + try sema.checkPtrType(block, type_src, type_res); const ptr_align = type_res.ptrAlignment(sema.mod.getTarget()); if (try sema.resolveDefinedValue(block, operand_src, operand_coerced)) |val| { @@ -11176,16 +11195,8 @@ fn zirPtrCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); const operand = sema.resolveInst(extra.rhs); const operand_ty = sema.typeOf(operand); - if (operand_ty.zigTypeTag() != .Pointer) { - return sema.fail(block, operand_src, "expected pointer, found {s} type '{}'", .{ - @tagName(operand_ty.zigTypeTag()), operand_ty, - }); - } - if (dest_ty.zigTypeTag() != .Pointer) { - return sema.fail(block, dest_ty_src, "expected pointer, found {s} type '{}'", .{ - @tagName(dest_ty.zigTypeTag()), dest_ty, - }); - } + try sema.checkPtrType(block, dest_ty_src, dest_ty); + try sema.checkPtrOperand(block, operand_src, operand_ty); return sema.coerceCompatiblePtrs(block, dest_ty, operand, operand_src); } @@ -11264,7 +11275,7 @@ fn zirAlignCast(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A // TODO in addition to pointers, this instruction is supposed to work for // pointer-like optionals and slices. - try sema.checkPtrType(block, ptr_src, ptr_ty); + try sema.checkPtrOperand(block, ptr_src, ptr_ty); // TODO compile error if the result pointer is comptime known and would have an // alignment that disagrees with the Decl's alignment. @@ -11462,6 +11473,34 @@ fn checkIntType(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileEr } } +fn checkPtrOperand( + sema: *Sema, + block: *Block, + ty_src: LazySrcLoc, + ty: Type, +) CompileError!void { + switch (ty.zigTypeTag()) { + .Pointer => {}, + .Fn => { + const msg = msg: { + const msg = try sema.errMsg( + block, + ty_src, + "expected pointer, found {}", + .{ty}, + ); + errdefer msg.destroy(sema.gpa); + + try sema.errNote(block, ty_src, msg, "use '&' to obtain a function pointer", .{}); + + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); + }, + else => return sema.fail(block, ty_src, "expected pointer, found '{}'", .{ty}), + } +} + fn checkPtrType( sema: *Sema, block: *Block, @@ -11470,6 +11509,22 @@ fn checkPtrType( ) CompileError!void { switch (ty.zigTypeTag()) { .Pointer => {}, + .Fn => { + const msg = msg: { + const msg = try sema.errMsg( + block, + ty_src, + "expected pointer type, found '{}'", + .{ty}, + ); + errdefer msg.destroy(sema.gpa); + + try sema.errNote(block, ty_src, msg, "use '*const ' to make a function pointer type", .{}); + + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); + }, else => return sema.fail(block, ty_src, "expected pointer type, found '{}'", .{ty}), } } @@ -12139,20 +12194,14 @@ fn zirMemcpy(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const dest_ptr = sema.resolveInst(extra.dest); const dest_ptr_ty = sema.typeOf(dest_ptr); - if (dest_ptr_ty.zigTypeTag() != .Pointer) { - return sema.fail(block, dest_src, "expected pointer, found '{}'", .{dest_ptr_ty}); - } + try sema.checkPtrOperand(block, dest_src, dest_ptr_ty); if (dest_ptr_ty.isConstPtr()) { return sema.fail(block, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty}); } const uncasted_src_ptr = sema.resolveInst(extra.source); const uncasted_src_ptr_ty = sema.typeOf(uncasted_src_ptr); - if (uncasted_src_ptr_ty.zigTypeTag() != .Pointer) { - return sema.fail(block, src_src, "expected pointer, found '{}'", .{ - uncasted_src_ptr_ty, - }); - } + try sema.checkPtrOperand(block, src_src, uncasted_src_ptr_ty); const src_ptr_info = uncasted_src_ptr_ty.ptrInfo().data; const wanted_src_ptr_ty = try Type.ptr(sema.arena, .{ .pointee_type = dest_ptr_ty.elemType2(), @@ -12203,9 +12252,7 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node }; const dest_ptr = sema.resolveInst(extra.dest); const dest_ptr_ty = sema.typeOf(dest_ptr); - if (dest_ptr_ty.zigTypeTag() != .Pointer) { - return sema.fail(block, dest_src, "expected pointer, found '{}'", .{dest_ptr_ty}); - } + try sema.checkPtrOperand(block, dest_src, dest_ptr_ty); if (dest_ptr_ty.isConstPtr()) { return sema.fail(block, dest_src, "cannot store through const pointer '{}'", .{dest_ptr_ty}); } @@ -12385,10 +12432,9 @@ fn zirFuncExtended( const ret_ty_body = sema.code.extra[extra_index..][0..extra.data.ret_body_len]; extra_index += ret_ty_body.len; - var body_inst: Zir.Inst.Index = 0; var src_locs: Zir.Inst.Func.SrcLocs = undefined; - if (extra.data.body_len != 0) { - body_inst = inst; + const has_body = extra.data.body_len != 0; + if (has_body) { extra_index += extra.data.body_len; src_locs = sema.code.extraData(Zir.Inst.Func.SrcLocs, extra_index).data; } @@ -12400,13 +12446,14 @@ fn zirFuncExtended( return sema.funcCommon( block, extra.data.src_node, - body_inst, + inst, ret_ty_body, cc, align_val, is_var_args, is_inferred_error, is_extern, + has_body, src_locs, lib_name, ); @@ -12487,7 +12534,7 @@ fn zirPrefetch( const opts_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = extra.node }; const options_ty = try sema.getBuiltinType(block, opts_src, "PrefetchOptions"); const ptr = sema.resolveInst(extra.lhs); - try sema.checkPtrType(block, ptr_src, sema.typeOf(ptr)); + try sema.checkPtrOperand(block, ptr_src, sema.typeOf(ptr)); const options = try sema.coerce(block, options_ty, sema.resolveInst(extra.rhs), opts_src); const rw = try sema.fieldVal(block, opts_src, options, "rw", opts_src); @@ -12568,12 +12615,15 @@ fn validateVarType( .Type, .Undefined, .Null, + .Fn, => break, .Pointer => { const elem_ty = ty.childType(); - if (elem_ty.zigTypeTag() == .Opaque) return; - ty = elem_ty; + switch (elem_ty.zigTypeTag()) { + .Opaque, .Fn => return, + else => ty = elem_ty, + } }, .Opaque => if (is_extern) return else break, @@ -12586,9 +12636,9 @@ fn validateVarType( .ErrorUnion => ty = ty.errorUnionPayload(), - .Fn, .Struct, .Union => { + .Struct, .Union => { const resolved_ty = try sema.resolveTypeFields(block, src, ty); - if (resolved_ty.requiresComptime()) { + if (try sema.typeRequiresComptime(block, src, resolved_ty)) { break; } else { return; @@ -12596,7 +12646,99 @@ fn validateVarType( }, } else unreachable; // TODO should not need else unreachable - return sema.fail(block, src, "variable of type '{}' must be const or comptime", .{var_ty}); + const msg = msg: { + const msg = try sema.errMsg(block, src, "variable of type '{}' must be const or comptime", .{var_ty}); + errdefer msg.destroy(sema.gpa); + + try sema.explainWhyTypeIsComptime(block, src, msg, src.toSrcLoc(block.src_decl), var_ty); + + break :msg msg; + }; + return sema.failWithOwnedErrorMsg(msg); +} + +fn explainWhyTypeIsComptime( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + msg: *Module.ErrorMsg, + src_loc: Module.SrcLoc, + ty: Type, +) CompileError!void { + const mod = sema.mod; + switch (ty.zigTypeTag()) { + .Bool, + .Int, + .Float, + .ErrorSet, + .Enum, + .Frame, + .AnyFrame, + .Void, + => return, + + .Fn => { + try mod.errNoteNonLazy(src_loc, msg, "use '*const {}' for a function pointer type", .{ + ty, + }); + }, + + .Type => { + try mod.errNoteNonLazy(src_loc, msg, "types are not available at runtime", .{}); + }, + + .BoundFn, + .ComptimeFloat, + .ComptimeInt, + .EnumLiteral, + .NoReturn, + .Undefined, + .Null, + .Opaque, + .Optional, + => return, + + .Pointer, .Array, .Vector => { + try sema.explainWhyTypeIsComptime(block, src, msg, src_loc, ty.elemType()); + }, + + .ErrorUnion => { + try sema.explainWhyTypeIsComptime(block, src, msg, src_loc, ty.errorUnionPayload()); + }, + + .Struct => { + if (ty.castTag(.@"struct")) |payload| { + const struct_obj = payload.data; + for (struct_obj.fields.values()) |field, i| { + const field_src_loc = struct_obj.fieldSrcLoc(sema.gpa, .{ + .index = i, + .range = .type, + }); + if (try sema.typeRequiresComptime(block, src, field.ty)) { + try mod.errNoteNonLazy(field_src_loc, msg, "struct requires comptime because of this field", .{}); + try sema.explainWhyTypeIsComptime(block, src, msg, field_src_loc, field.ty); + } + } + } + // TODO tuples + }, + + .Union => { + if (ty.cast(Type.Payload.Union)) |payload| { + const union_obj = payload.data; + for (union_obj.fields.values()) |field, i| { + const field_src_loc = union_obj.fieldSrcLoc(sema.gpa, .{ + .index = i, + .range = .type, + }); + if (try sema.typeRequiresComptime(block, src, field.ty)) { + try mod.errNoteNonLazy(field_src_loc, msg, "union requires comptime because of this field", .{}); + try sema.explainWhyTypeIsComptime(block, src, msg, field_src_loc, field.ty); + } + } + } + }, + } } pub const PanicId = enum { @@ -12731,7 +12873,7 @@ fn safetyPanic( const msg_inst = msg_inst: { // TODO instead of making a new decl for every panic in the entire compilation, // introduce the concept of a reference-counted decl for these - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); break :msg_inst try sema.analyzeDeclRef(try anon_decl.finish( try Type.Tag.array_u8.create(anon_decl.arena(), msg.len), @@ -12941,7 +13083,7 @@ fn fieldPtr( switch (inner_ty.zigTypeTag()) { .Array => { if (mem.eql(u8, field_name, "len")) { - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( Type.initTag(.comptime_int), @@ -12967,7 +13109,7 @@ fn fieldPtr( const slice_ptr_ty = inner_ty.slicePtrFieldType(buf); if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( @@ -12986,7 +13128,7 @@ fn fieldPtr( return block.addTyOp(.ptr_slice_ptr_ptr, result_ty, inner_ptr); } else if (mem.eql(u8, field_name, "len")) { if (try sema.resolveDefinedValue(block, object_ptr_src, inner_ptr)) |val| { - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( @@ -13036,7 +13178,7 @@ fn fieldPtr( }); } else (try sema.mod.getErrorValue(field_name)).key; - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( try child_type.copy(anon_decl.arena()), @@ -13052,7 +13194,7 @@ fn fieldPtr( if (child_type.unionTagType()) |enum_ty| { if (enum_ty.enumFieldIndex(field_name)) |field_index| { const field_index_u32 = @intCast(u32, field_index); - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( try enum_ty.copy(anon_decl.arena()), @@ -13072,7 +13214,7 @@ fn fieldPtr( return sema.failWithBadMemberAccess(block, child_type, field_name_src, field_name); }; const field_index_u32 = @intCast(u32, field_index); - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( try child_type.copy(anon_decl.arena()), @@ -13328,7 +13470,7 @@ fn structFieldPtr( var offset: u64 = 0; var running_bits: u16 = 0; for (struct_obj.fields.values()) |f, i| { - if (!f.ty.hasCodeGenBits()) continue; + if (!(try sema.typeHasRuntimeBits(block, field_name_src, f.ty))) continue; const field_align = f.packedAlignment(); if (field_align == 0) { @@ -13883,6 +14025,9 @@ fn coerce( { return sema.coerceCompatiblePtrs(block, dest_ty, inst, inst_src); } + + // This will give an extra hint on top of what the bottom of this func would provide. + try sema.checkPtrOperand(block, dest_ty_src, inst_ty); }, .Int, .ComptimeInt => switch (inst_ty.zigTypeTag()) { .Float, .ComptimeFloat => float: { @@ -14683,7 +14828,8 @@ const ComptimePtrLoadKit = struct { /// The Type of the parent Value. ty: Type, /// The starting byte offset of `val` from `root_val`. - byte_offset: usize, + /// If the type does not have a well-defined memory layout, this is null. + byte_offset: ?usize, /// Whether the `root_val` could be mutated by further /// semantic analysis and a copy must be performed. is_mutable: bool, @@ -14738,12 +14884,24 @@ fn beginComptimePtrLoad( }); } const elem_ty = parent.ty.childType(); - const elem_size = elem_ty.abiSize(target); + const byte_offset: ?usize = bo: { + if (try sema.typeRequiresComptime(block, src, elem_ty)) { + break :bo null; + } else { + if (parent.byte_offset) |off| { + try sema.resolveTypeLayout(block, src, elem_ty); + const elem_size = elem_ty.abiSize(target); + break :bo try sema.usizeCast(block, src, off + elem_size * elem_ptr.index); + } else { + break :bo null; + } + } + }; return ComptimePtrLoadKit{ .root_val = parent.root_val, .val = try parent.val.elemValue(sema.arena, elem_ptr.index), .ty = elem_ty, - .byte_offset = try sema.usizeCast(block, src, parent.byte_offset + elem_size * elem_ptr.index), + .byte_offset = byte_offset, .is_mutable = parent.is_mutable, }; }, @@ -14768,13 +14926,24 @@ fn beginComptimePtrLoad( const field_ptr = ptr_val.castTag(.field_ptr).?.data; const parent = try beginComptimePtrLoad(sema, block, src, field_ptr.container_ptr); const field_index = @intCast(u32, field_ptr.field_index); - try sema.resolveTypeLayout(block, src, parent.ty); - const field_offset = parent.ty.structFieldOffset(field_index, target); + const byte_offset: ?usize = bo: { + if (try sema.typeRequiresComptime(block, src, parent.ty)) { + break :bo null; + } else { + if (parent.byte_offset) |off| { + try sema.resolveTypeLayout(block, src, parent.ty); + const field_offset = parent.ty.structFieldOffset(field_index, target); + break :bo try sema.usizeCast(block, src, off + field_offset); + } else { + break :bo null; + } + } + }; return ComptimePtrLoadKit{ .root_val = parent.root_val, .val = try parent.val.fieldValue(sema.arena, field_index), .ty = parent.ty.structFieldType(field_index), - .byte_offset = try sema.usizeCast(block, src, parent.byte_offset + field_offset), + .byte_offset = byte_offset, .is_mutable = parent.is_mutable, }; }, @@ -14785,7 +14954,7 @@ fn beginComptimePtrLoad( .root_val = parent.root_val, .val = parent.val.castTag(.eu_payload).?.data, .ty = parent.ty.errorUnionPayload(), - .byte_offset = undefined, + .byte_offset = null, .is_mutable = parent.is_mutable, }; }, @@ -14796,7 +14965,7 @@ fn beginComptimePtrLoad( .root_val = parent.root_val, .val = parent.val.castTag(.opt_payload).?.data, .ty = try parent.ty.optionalChildAlloc(sema.arena), - .byte_offset = undefined, + .byte_offset = null, .is_mutable = parent.is_mutable, }; }, @@ -15176,7 +15345,7 @@ fn analyzeRef( const operand_ty = sema.typeOf(operand); if (try sema.resolveMaybeUndefVal(block, src, operand)) |val| { - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); return sema.analyzeDeclRef(try anon_decl.finish( try operand_ty.copy(anon_decl.arena()), @@ -15590,7 +15759,7 @@ fn cmpNumeric( lhs_bits = bigint.toConst().bitCountTwosComp(); break :x (zcmp != .lt); } else x: { - lhs_bits = lhs_val.intBitCountTwosComp(); + lhs_bits = lhs_val.intBitCountTwosComp(target); break :x (lhs_val.orderAgainstZero() != .lt); }; lhs_bits += @boolToInt(is_unsigned and dest_int_is_signed); @@ -15625,7 +15794,7 @@ fn cmpNumeric( rhs_bits = bigint.toConst().bitCountTwosComp(); break :x (zcmp != .lt); } else x: { - rhs_bits = rhs_val.intBitCountTwosComp(); + rhs_bits = rhs_val.intBitCountTwosComp(target); break :x (rhs_val.orderAgainstZero() != .lt); }; rhs_bits += @boolToInt(is_unsigned and dest_int_is_signed); @@ -16090,28 +16259,12 @@ fn resolveTypeFields(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Comp switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; - switch (struct_obj.status) { - .none => {}, - .field_types_wip => { - return sema.fail(block, src, "struct {} depends on itself", .{ty}); - }, - .have_field_types, - .have_layout, - .layout_wip, - .fully_resolved_wip, - .fully_resolved, - => return ty, - } - - struct_obj.status = .field_types_wip; - try semaStructFields(sema.mod, struct_obj); - - if (struct_obj.fields.count() == 0) { - struct_obj.status = .have_layout; - } else { - struct_obj.status = .have_field_types; - } - + try sema.resolveTypeFieldsStruct(block, src, ty, struct_obj); + return ty; + }, + .@"union", .union_tagged => { + const union_obj = ty.cast(Type.Payload.Union).?.data; + try sema.resolveTypeFieldsUnion(block, src, ty, union_obj); return ty; }, .type_info => return sema.resolveBuiltinTypeFields(block, src, "TypeInfo"), @@ -16126,31 +16279,65 @@ fn resolveTypeFields(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) Comp .call_options => return sema.resolveBuiltinTypeFields(block, src, "CallOptions"), .prefetch_options => return sema.resolveBuiltinTypeFields(block, src, "PrefetchOptions"), - .@"union", .union_tagged => { - const union_obj = ty.cast(Type.Payload.Union).?.data; - switch (union_obj.status) { - .none => {}, - .field_types_wip => { - return sema.fail(block, src, "union {} depends on itself", .{ty}); - }, - .have_field_types, - .have_layout, - .layout_wip, - .fully_resolved_wip, - .fully_resolved, - => return ty, - } - - union_obj.status = .field_types_wip; - try semaUnionFields(sema.mod, union_obj); - union_obj.status = .have_field_types; - - return ty; - }, else => return ty, } } +fn resolveTypeFieldsStruct( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + ty: Type, + struct_obj: *Module.Struct, +) CompileError!void { + switch (struct_obj.status) { + .none => {}, + .field_types_wip => { + return sema.fail(block, src, "struct {} depends on itself", .{ty}); + }, + .have_field_types, + .have_layout, + .layout_wip, + .fully_resolved_wip, + .fully_resolved, + => return, + } + + struct_obj.status = .field_types_wip; + try semaStructFields(sema.mod, struct_obj); + + if (struct_obj.fields.count() == 0) { + struct_obj.status = .have_layout; + } else { + struct_obj.status = .have_field_types; + } +} + +fn resolveTypeFieldsUnion( + sema: *Sema, + block: *Block, + src: LazySrcLoc, + ty: Type, + union_obj: *Module.Union, +) CompileError!void { + switch (union_obj.status) { + .none => {}, + .field_types_wip => { + return sema.fail(block, src, "union {} depends on itself", .{ty}); + }, + .have_field_types, + .have_layout, + .layout_wip, + .fully_resolved_wip, + .fully_resolved, + => return, + } + + union_obj.status = .field_types_wip; + try semaUnionFields(sema.mod, union_obj); + union_obj.status = .have_field_types; +} + fn resolveBuiltinTypeFields( sema: *Sema, block: *Block, @@ -16695,6 +16882,7 @@ fn getBuiltinType( /// in `Sema` is for calling during semantic analysis, and performs field resolution /// to get the answer. The one in `Type` is for calling during codegen and asserts /// that the types are already resolved. +/// TODO assert the return value matches `ty.onePossibleValue` pub fn typeHasOnePossibleValue( sema: *Sema, block: *Block, @@ -16842,7 +17030,7 @@ pub fn typeHasOnePossibleValue( }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (!tag_ty.hasCodeGenBits()) { + if (!(try sema.typeHasRuntimeBits(block, src, tag_ty))) { return Value.zero; } else { return null; @@ -17106,7 +17294,7 @@ fn analyzeComptimeAlloc( .@"align" = alignment, }); - var anon_decl = try block.startAnonDecl(); + var anon_decl = try block.startAnonDecl(src); defer anon_decl.deinit(); const align_val = if (alignment == 0) @@ -17295,3 +17483,220 @@ fn typePtrOrOptionalPtrTy( else => return null, } } + +/// `generic_poison` will return false. +/// This function returns false negatives when structs and unions are having their +/// field types resolved. +/// TODO assert the return value matches `ty.comptimeOnly` +fn typeRequiresComptime(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool { + return switch (ty.tag()) { + .u1, + .u8, + .i8, + .u16, + .i16, + .u32, + .i32, + .u64, + .i64, + .u128, + .i128, + .usize, + .isize, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .f16, + .f32, + .f64, + .f128, + .anyopaque, + .bool, + .void, + .anyerror, + .noreturn, + .@"anyframe", + .@"null", + .@"undefined", + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_options, + .prefetch_options, + .export_options, + .extern_options, + .manyptr_u8, + .manyptr_const_u8, + .manyptr_const_u8_sentinel_0, + .const_slice_u8, + .const_slice_u8_sentinel_0, + .anyerror_void_error_union, + .empty_struct_literal, + .empty_struct, + .error_set, + .error_set_single, + .error_set_inferred, + .error_set_merged, + .@"opaque", + .generic_poison, + .array_u8, + .array_u8_sentinel_0, + .int_signed, + .int_unsigned, + .enum_simple, + => false, + + .single_const_pointer_to_comptime_int, + .type, + .comptime_int, + .comptime_float, + .enum_literal, + .type_info, + // These are function bodies, not function pointers. + .fn_noreturn_no_args, + .fn_void_no_args, + .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, + .function, + => true, + + .var_args_param => unreachable, + .inferred_alloc_mut => unreachable, + .inferred_alloc_const => unreachable, + .bound_fn => unreachable, + + .array, + .array_sentinel, + .vector, + => return sema.typeRequiresComptime(block, src, ty.childType()), + + .pointer, + .single_const_pointer, + .single_mut_pointer, + .many_const_pointer, + .many_mut_pointer, + .c_const_pointer, + .c_mut_pointer, + .const_slice, + .mut_slice, + => { + const child_ty = ty.childType(); + if (child_ty.zigTypeTag() == .Fn) { + return false; + } else { + return sema.typeRequiresComptime(block, src, child_ty); + } + }, + + .optional, + .optional_single_mut_pointer, + .optional_single_const_pointer, + => { + var buf: Type.Payload.ElemType = undefined; + return sema.typeRequiresComptime(block, src, ty.optionalChild(&buf)); + }, + + .tuple => { + const tuple = ty.castTag(.tuple).?.data; + for (tuple.types) |field_ty| { + if (try sema.typeRequiresComptime(block, src, field_ty)) { + return true; + } + } + return false; + }, + + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + switch (struct_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + if (struct_obj.status == .field_types_wip) + return false; + + try sema.resolveTypeFieldsStruct(block, src, ty, struct_obj); + + struct_obj.requires_comptime = .wip; + for (struct_obj.fields.values()) |field| { + if (try sema.typeRequiresComptime(block, src, field.ty)) { + struct_obj.requires_comptime = .yes; + return true; + } + } + struct_obj.requires_comptime = .no; + return false; + }, + } + }, + + .@"union", .union_tagged => { + const union_obj = ty.cast(Type.Payload.Union).?.data; + switch (union_obj.requires_comptime) { + .no, .wip => return false, + .yes => return true, + .unknown => { + if (union_obj.status == .field_types_wip) + return false; + + try sema.resolveTypeFieldsUnion(block, src, ty, union_obj); + + union_obj.requires_comptime = .wip; + for (union_obj.fields.values()) |field| { + if (try sema.typeRequiresComptime(block, src, field.ty)) { + union_obj.requires_comptime = .yes; + return true; + } + } + union_obj.requires_comptime = .no; + return false; + }, + } + }, + + .error_union => return sema.typeRequiresComptime(block, src, ty.errorUnionPayload()), + .anyframe_T => { + const child_ty = ty.castTag(.anyframe_T).?.data; + return sema.typeRequiresComptime(block, src, child_ty); + }, + .enum_numbered => { + const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; + return sema.typeRequiresComptime(block, src, tag_ty); + }, + .enum_full, .enum_nonexhaustive => { + const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; + return sema.typeRequiresComptime(block, src, tag_ty); + }, + }; +} + +pub fn typeHasRuntimeBits(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool { + if ((try sema.typeHasOnePossibleValue(block, src, ty)) != null) return false; + if (try sema.typeRequiresComptime(block, src, ty)) return false; + return true; +} + +/// Synchronize logic with `Type.isFnOrHasRuntimeBits`. +pub fn fnHasRuntimeBits(sema: *Sema, block: *Block, src: LazySrcLoc, ty: Type) CompileError!bool { + const fn_info = ty.fnInfo(); + if (fn_info.is_generic) return false; + if (fn_info.is_var_args) return true; + switch (fn_info.cc) { + // If there was a comptime calling convention, it should also return false here. + .Inline => return false, + else => {}, + } + if (try sema.typeRequiresComptime(block, src, fn_info.return_type)) { + return false; + } + return true; +} diff --git a/src/Zir.zig b/src/Zir.zig index f38cb2d5f5..86819d10f2 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2599,10 +2599,11 @@ pub const Inst = struct { has_body_len: bool, has_fields_len: bool, has_decls_len: bool, - known_has_bits: bool, + known_non_opv: bool, + known_comptime_only: bool, name_strategy: NameStrategy, layout: std.builtin.TypeInfo.ContainerLayout, - _: u7 = undefined, + _: u6 = undefined, }; }; @@ -3273,6 +3274,7 @@ fn findDeclsBody( pub const FnInfo = struct { param_body: []const Inst.Index, + param_body_inst: Inst.Index, ret_ty_body: []const Inst.Index, body: []const Inst.Index, total_params_len: u32, @@ -3338,6 +3340,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo { } return .{ .param_body = param_body, + .param_body_inst = info.param_block, .ret_ty_body = info.ret_ty_body, .body = info.body, .total_params_len = total_params_len, diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index d5993ea5d7..cb686b8242 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -713,7 +713,7 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void { switch (self.debug_output) { .dwarf => |dbg_out| { - assert(ty.hasCodeGenBits()); + assert(ty.hasRuntimeBits()); const index = dbg_out.dbg_info.items.len; try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4 @@ -1279,7 +1279,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasCodeGenBits()) + if (!elem_ty.hasRuntimeBits()) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -2155,7 +2155,7 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasCodeGenBits()) { + if (self.air.typeOf(operand).hasRuntimeBits()) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -2608,7 +2608,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const ref_int = @enumToInt(inst); if (ref_int < Air.Inst.Ref.typed_value_map.len) { const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasCodeGenBits()) { + if (!tv.ty.hasRuntimeBits()) { return MCValue{ .none = {} }; } return self.genTypedValue(tv); @@ -2616,7 +2616,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasCodeGenBits()) + if (!inst_ty.hasRuntimeBits()) return MCValue{ .none = {} }; const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); @@ -2672,11 +2672,43 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV return mcv; } +fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue { + const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bytes: u64 = @divExact(ptr_bits, 8); + decl.alive = true; + if (self.bin_file.cast(link.File.Elf)) |elf_file| { + const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; + const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; + return MCValue{ .memory = got_addr }; + } else if (self.bin_file.cast(link.File.MachO)) |_| { + // TODO I'm hacking my way through here by repurposing .memory for storing + // index to the GOT target symbol index. + return MCValue{ .memory = decl.link.macho.local_sym_index }; + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes; + return MCValue{ .memory = got_addr }; + } else if (self.bin_file.cast(link.File.Plan9)) |p9| { + try p9.seeDecl(decl); + const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; + return MCValue{ .memory = got_addr }; + } else { + return self.fail("TODO codegen non-ELF const Decl pointer", .{}); + } + _ = tv; +} + fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { if (typed_value.val.isUndef()) return MCValue{ .undef = {} }; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); - const ptr_bytes: u64 = @divExact(ptr_bits, 8); + + if (typed_value.val.castTag(.decl_ref)) |payload| { + return self.lowerDeclRef(typed_value, payload.data); + } + if (typed_value.val.castTag(.decl_ref_mut)) |payload| { + return self.lowerDeclRef(typed_value, payload.data.decl); + } + switch (typed_value.ty.zigTypeTag()) { .Pointer => switch (typed_value.ty.ptrSize()) { .Slice => { @@ -2693,28 +2725,6 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { return self.fail("TODO codegen for const slices", .{}); }, else => { - if (typed_value.val.castTag(.decl_ref)) |payload| { - const decl = payload.data; - decl.alive = true; - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; - const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; - return MCValue{ .memory = got_addr }; - } else if (self.bin_file.cast(link.File.MachO)) |_| { - // TODO I'm hacking my way through here by repurposing .memory for storing - // index to the GOT target symbol index. - return MCValue{ .memory = decl.link.macho.local_sym_index }; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes; - return MCValue{ .memory = got_addr }; - } else if (self.bin_file.cast(link.File.Plan9)) |p9| { - try p9.seeDecl(decl); - const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; - return MCValue{ .memory = got_addr }; - } else { - return self.fail("TODO codegen non-ELF const Decl pointer", .{}); - } - } if (typed_value.val.tag() == .int_u64) { return MCValue{ .immediate = typed_value.val.toUnsignedInt() }; } @@ -2794,7 +2804,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { const payload_type = typed_value.ty.errorUnionPayload(); const sub_val = typed_value.val.castTag(.eu_payload).?.data; - if (!payload_type.hasCodeGenBits()) { + if (!payload_type.hasRuntimeBits()) { // We use the error type directly as the type. return self.genTypedValue(.{ .ty = error_type, .val = sub_val }); } @@ -2888,7 +2898,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { if (ret_ty.zigTypeTag() == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasCodeGenBits()) { + } else if (!ret_ty.hasRuntimeBits()) { result.return_value = .{ .none = {} }; } else switch (cc) { .Naked => unreachable, diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index aa8720b717..c431f28613 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -1074,7 +1074,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const error_union_ty = self.air.typeOf(ty_op.operand); const payload_ty = error_union_ty.errorUnionPayload(); const mcv = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasCodeGenBits()) break :result mcv; + if (!payload_ty.hasRuntimeBits()) break :result mcv; return self.fail("TODO implement unwrap error union error for non-empty payloads", .{}); }; @@ -1086,7 +1086,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const error_union_ty = self.air.typeOf(ty_op.operand); const payload_ty = error_union_ty.errorUnionPayload(); - if (!payload_ty.hasCodeGenBits()) break :result MCValue.none; + if (!payload_ty.hasRuntimeBits()) break :result MCValue.none; return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{}); }; @@ -1135,7 +1135,7 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const error_union_ty = self.air.getRefType(ty_op.ty); const payload_ty = error_union_ty.errorUnionPayload(); const mcv = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasCodeGenBits()) break :result mcv; + if (!payload_ty.hasRuntimeBits()) break :result mcv; return self.fail("TODO implement wrap errunion error for non-empty payloads", .{}); }; @@ -1506,7 +1506,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasCodeGenBits()) + if (!elem_ty.hasRuntimeBits()) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -2666,9 +2666,9 @@ fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { const error_type = ty.errorUnionSet(); const payload_type = ty.errorUnionPayload(); - if (!error_type.hasCodeGenBits()) { + if (!error_type.hasRuntimeBits()) { return MCValue{ .immediate = 0 }; // always false - } else if (!payload_type.hasCodeGenBits()) { + } else if (!payload_type.hasRuntimeBits()) { if (error_type.abiSize(self.target.*) <= 4) { const reg_mcv: MCValue = switch (operand) { .register => operand, @@ -2900,7 +2900,7 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasCodeGenBits()) { + if (self.air.typeOf(operand).hasRuntimeBits()) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -3658,7 +3658,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const ref_int = @enumToInt(inst); if (ref_int < Air.Inst.Ref.typed_value_map.len) { const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasCodeGenBits()) { + if (!tv.ty.hasRuntimeBits()) { return MCValue{ .none = {} }; } return self.genTypedValue(tv); @@ -3666,7 +3666,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasCodeGenBits()) + if (!inst_ty.hasRuntimeBits()) return MCValue{ .none = {} }; const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); @@ -3701,11 +3701,45 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { } } +fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue { + const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bytes: u64 = @divExact(ptr_bits, 8); + + decl.alive = true; + if (self.bin_file.cast(link.File.Elf)) |elf_file| { + const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; + const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; + return MCValue{ .memory = got_addr }; + } else if (self.bin_file.cast(link.File.MachO)) |_| { + // TODO I'm hacking my way through here by repurposing .memory for storing + // index to the GOT target symbol index. + return MCValue{ .memory = decl.link.macho.local_sym_index }; + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes; + return MCValue{ .memory = got_addr }; + } else if (self.bin_file.cast(link.File.Plan9)) |p9| { + try p9.seeDecl(decl); + const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; + return MCValue{ .memory = got_addr }; + } else { + return self.fail("TODO codegen non-ELF const Decl pointer", .{}); + } + + _ = tv; +} + fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { if (typed_value.val.isUndef()) return MCValue{ .undef = {} }; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); - const ptr_bytes: u64 = @divExact(ptr_bits, 8); + + if (typed_value.val.castTag(.decl_ref)) |payload| { + return self.lowerDeclRef(typed_value, payload.data); + } + if (typed_value.val.castTag(.decl_ref_mut)) |payload| { + return self.lowerDeclRef(typed_value, payload.data.decl); + } + switch (typed_value.ty.zigTypeTag()) { .Pointer => switch (typed_value.ty.ptrSize()) { .Slice => { @@ -3722,28 +3756,6 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { return self.fail("TODO codegen for const slices", .{}); }, else => { - if (typed_value.val.castTag(.decl_ref)) |payload| { - const decl = payload.data; - decl.alive = true; - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; - const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; - return MCValue{ .memory = got_addr }; - } else if (self.bin_file.cast(link.File.MachO)) |_| { - // TODO I'm hacking my way through here by repurposing .memory for storing - // index to the GOT target symbol index. - return MCValue{ .memory = decl.link.macho.local_sym_index }; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes; - return MCValue{ .memory = got_addr }; - } else if (self.bin_file.cast(link.File.Plan9)) |p9| { - try p9.seeDecl(decl); - const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; - return MCValue{ .memory = got_addr }; - } else { - return self.fail("TODO codegen non-ELF const Decl pointer", .{}); - } - } if (typed_value.val.tag() == .int_u64) { return MCValue{ .immediate = @intCast(u32, typed_value.val.toUnsignedInt()) }; } @@ -3812,7 +3824,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { const payload_type = typed_value.ty.errorUnionPayload(); if (typed_value.val.castTag(.eu_payload)) |pl| { - if (!payload_type.hasCodeGenBits()) { + if (!payload_type.hasRuntimeBits()) { // We use the error type directly as the type. return MCValue{ .immediate = 0 }; } @@ -3820,7 +3832,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { _ = pl; return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty}); } else { - if (!payload_type.hasCodeGenBits()) { + if (!payload_type.hasRuntimeBits()) { // We use the error type directly as the type. return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val }); } @@ -3918,7 +3930,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { if (ret_ty.zigTypeTag() == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasCodeGenBits()) { + } else if (!ret_ty.hasRuntimeBits()) { result.return_value = .{ .none = {} }; } else switch (cc) { .Naked => unreachable, diff --git a/src/arch/arm/Emit.zig b/src/arch/arm/Emit.zig index cd9503e570..72eb430769 100644 --- a/src/arch/arm/Emit.zig +++ b/src/arch/arm/Emit.zig @@ -372,7 +372,7 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void { fn addDbgInfoTypeReloc(self: *Emit, ty: Type) !void { switch (self.debug_output) { .dwarf => |dbg_out| { - assert(ty.hasCodeGenBits()); + assert(ty.hasRuntimeBits()); const index = dbg_out.dbg_info.items.len; try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4 diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 0c310d5680..eb99d479c2 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -691,7 +691,7 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void { switch (self.debug_output) { .dwarf => |dbg_out| { - assert(ty.hasCodeGenBits()); + assert(ty.hasRuntimeBits()); const index = dbg_out.dbg_info.items.len; try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4 @@ -1223,7 +1223,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasCodeGenBits()) + if (!elem_ty.hasRuntimeBits()) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -1769,7 +1769,7 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasCodeGenBits()) { + if (self.air.typeOf(operand).hasRuntimeBits()) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -2107,7 +2107,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const ref_int = @enumToInt(inst); if (ref_int < Air.Inst.Ref.typed_value_map.len) { const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasCodeGenBits()) { + if (!tv.ty.hasRuntimeBits()) { return MCValue{ .none = {} }; } return self.genTypedValue(tv); @@ -2115,7 +2115,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasCodeGenBits()) + if (!inst_ty.hasRuntimeBits()) return MCValue{ .none = {} }; const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); @@ -2171,11 +2171,42 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV return mcv; } +fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue { + const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bytes: u64 = @divExact(ptr_bits, 8); + decl.alive = true; + if (self.bin_file.cast(link.File.Elf)) |elf_file| { + const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; + const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; + return MCValue{ .memory = got_addr }; + } else if (self.bin_file.cast(link.File.MachO)) |_| { + // TODO I'm hacking my way through here by repurposing .memory for storing + // index to the GOT target symbol index. + return MCValue{ .memory = decl.link.macho.local_sym_index }; + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes; + return MCValue{ .memory = got_addr }; + } else if (self.bin_file.cast(link.File.Plan9)) |p9| { + try p9.seeDecl(decl); + const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; + return MCValue{ .memory = got_addr }; + } else { + return self.fail("TODO codegen non-ELF const Decl pointer", .{}); + } + _ = tv; +} + fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { if (typed_value.val.isUndef()) return MCValue{ .undef = {} }; + + if (typed_value.val.castTag(.decl_ref)) |payload| { + return self.lowerDeclRef(typed_value, payload.data); + } + if (typed_value.val.castTag(.decl_ref_mut)) |payload| { + return self.lowerDeclRef(typed_value, payload.data.decl); + } const ptr_bits = self.target.cpu.arch.ptrBitWidth(); - const ptr_bytes: u64 = @divExact(ptr_bits, 8); switch (typed_value.ty.zigTypeTag()) { .Pointer => switch (typed_value.ty.ptrSize()) { .Slice => { @@ -2192,28 +2223,6 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { return self.fail("TODO codegen for const slices", .{}); }, else => { - if (typed_value.val.castTag(.decl_ref)) |payload| { - const decl = payload.data; - decl.alive = true; - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; - const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; - return MCValue{ .memory = got_addr }; - } else if (self.bin_file.cast(link.File.MachO)) |_| { - // TODO I'm hacking my way through here by repurposing .memory for storing - // index to the GOT target symbol index. - return MCValue{ .memory = decl.link.macho.local_sym_index }; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes; - return MCValue{ .memory = got_addr }; - } else if (self.bin_file.cast(link.File.Plan9)) |p9| { - try p9.seeDecl(decl); - const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; - return MCValue{ .memory = got_addr }; - } else { - return self.fail("TODO codegen non-ELF const Decl pointer", .{}); - } - } if (typed_value.val.tag() == .int_u64) { return MCValue{ .immediate = typed_value.val.toUnsignedInt() }; } @@ -2290,7 +2299,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { const payload_type = typed_value.ty.errorUnionPayload(); const sub_val = typed_value.val.castTag(.eu_payload).?.data; - if (!payload_type.hasCodeGenBits()) { + if (!payload_type.hasRuntimeBits()) { // We use the error type directly as the type. return self.genTypedValue(.{ .ty = error_type, .val = sub_val }); } @@ -2381,7 +2390,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { if (ret_ty.zigTypeTag() == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasCodeGenBits()) { + } else if (!ret_ty.hasRuntimeBits()) { result.return_value = .{ .none = {} }; } else switch (cc) { .Naked => unreachable, diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index bd84dc20a1..f69fea0b0a 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -598,7 +598,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!WValue { // means we must generate it from a constant. const val = self.air.value(ref).?; const ty = self.air.typeOf(ref); - if (!ty.hasCodeGenBits() and !ty.isInt()) return WValue{ .none = {} }; + if (!ty.hasRuntimeBits() and !ty.isInt()) return WValue{ .none = {} }; // When we need to pass the value by reference (such as a struct), we will // leverage `genTypedValue` to lower the constant to bytes and emit it @@ -790,13 +790,13 @@ fn genFunctype(gpa: Allocator, fn_ty: Type, target: std.Target) !wasm.Type { defer gpa.free(fn_params); fn_ty.fnParamTypes(fn_params); for (fn_params) |param_type| { - if (!param_type.hasCodeGenBits()) continue; + if (!param_type.hasRuntimeBits()) continue; try params.append(typeToValtype(param_type, target)); } } // return type - if (!want_sret and return_type.hasCodeGenBits()) { + if (!want_sret and return_type.hasRuntimeBits()) { try returns.append(typeToValtype(return_type, target)); } @@ -935,7 +935,7 @@ pub const DeclGen = struct { const abi_size = @intCast(usize, ty.abiSize(self.target())); const offset = abi_size - @intCast(usize, payload_type.abiSize(self.target())); - if (!payload_type.hasCodeGenBits()) { + if (!payload_type.hasRuntimeBits()) { try writer.writeByteNTimes(@boolToInt(is_pl), abi_size); return Result{ .appended = {} }; } @@ -1044,7 +1044,7 @@ pub const DeclGen = struct { const field_vals = val.castTag(.@"struct").?.data; for (field_vals) |field_val, index| { const field_ty = ty.structFieldType(index); - if (!field_ty.hasCodeGenBits()) continue; + if (!field_ty.hasRuntimeBits()) continue; switch (try self.genTypedValue(field_ty, field_val, writer)) { .appended => {}, .externally_managed => |payload| try writer.writeAll(payload), @@ -1093,7 +1093,7 @@ pub const DeclGen = struct { .appended => {}, } - if (payload_ty.hasCodeGenBits()) { + if (payload_ty.hasRuntimeBits()) { const pl_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef); switch (try self.genTypedValue(payload_ty, pl_val, writer)) { .externally_managed => |data| try writer.writeAll(data), @@ -1180,7 +1180,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) InnerError!CallWValu .Naked => return result, .Unspecified, .C => { for (param_types) |ty, ty_index| { - if (!ty.hasCodeGenBits()) { + if (!ty.hasRuntimeBits()) { result.args[ty_index] = .{ .none = {} }; continue; } @@ -1243,7 +1243,7 @@ fn moveStack(self: *Self, offset: u32, local: u32) !void { /// /// Asserts Type has codegenbits fn allocStack(self: *Self, ty: Type) !WValue { - assert(ty.hasCodeGenBits()); + assert(ty.hasRuntimeBits()); // calculate needed stack space const abi_size = std.math.cast(u32, ty.abiSize(self.target)) catch { @@ -1319,22 +1319,22 @@ fn isByRef(ty: Type, target: std.Target) bool { .Struct, .Frame, .Union, - => return ty.hasCodeGenBits(), + => return ty.hasRuntimeBits(), .Int => return if (ty.intInfo(target).bits > 64) true else false, .ErrorUnion => { - const has_tag = ty.errorUnionSet().hasCodeGenBits(); - const has_pl = ty.errorUnionPayload().hasCodeGenBits(); + const has_tag = ty.errorUnionSet().hasRuntimeBits(); + const has_pl = ty.errorUnionPayload().hasRuntimeBits(); if (!has_tag or !has_pl) return false; - return ty.hasCodeGenBits(); + return ty.hasRuntimeBits(); }, .Optional => { if (ty.isPtrLikeOptional()) return false; var buf: Type.Payload.ElemType = undefined; - return ty.optionalChild(&buf).hasCodeGenBits(); + return ty.optionalChild(&buf).hasRuntimeBits(); }, .Pointer => { // Slices act like struct and will be passed by reference - if (ty.isSlice()) return ty.hasCodeGenBits(); + if (ty.isSlice()) return ty.hasRuntimeBits(); return false; }, } @@ -1563,7 +1563,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); const ret_ty = self.air.typeOf(un_op).childType(); - if (!ret_ty.hasCodeGenBits()) return WValue.none; + if (!ret_ty.hasRuntimeBits()) return WValue.none; if (!isByRef(ret_ty, self.target)) { const result = try self.load(operand, ret_ty, 0); @@ -1611,7 +1611,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const arg_val = try self.resolveInst(arg_ref); const arg_ty = self.air.typeOf(arg_ref); - if (!arg_ty.hasCodeGenBits()) continue; + if (!arg_ty.hasRuntimeBits()) continue; try self.emitWValue(arg_val); } @@ -1631,7 +1631,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) InnerError!WValue { try self.addLabel(.call_indirect, fn_type_index); } - if (self.liveness.isUnused(inst) or !ret_ty.hasCodeGenBits()) { + if (self.liveness.isUnused(inst) or !ret_ty.hasRuntimeBits()) { return WValue.none; } else if (ret_ty.isNoReturn()) { try self.addTag(.@"unreachable"); @@ -1653,7 +1653,7 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) InnerError!WValue { try self.initializeStack(); } - if (!pointee_type.hasCodeGenBits()) { + if (!pointee_type.hasRuntimeBits()) { // when the pointee is zero-sized, we still want to create a pointer. // but instead use a default pointer type as storage. const zero_ptr = try self.allocStack(Type.usize); @@ -1678,7 +1678,7 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro .ErrorUnion => { const err_ty = ty.errorUnionSet(); const pl_ty = ty.errorUnionPayload(); - if (!pl_ty.hasCodeGenBits()) { + if (!pl_ty.hasRuntimeBits()) { const err_val = try self.load(rhs, err_ty, 0); return self.store(lhs, err_val, err_ty, 0); } @@ -1691,7 +1691,7 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro } var buf: Type.Payload.ElemType = undefined; const pl_ty = ty.optionalChild(&buf); - if (!pl_ty.hasCodeGenBits()) { + if (!pl_ty.hasRuntimeBits()) { return self.store(lhs, rhs, Type.initTag(.u8), 0); } @@ -1750,7 +1750,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const operand = try self.resolveInst(ty_op.operand); const ty = self.air.getRefType(ty_op.ty); - if (!ty.hasCodeGenBits()) return WValue{ .none = {} }; + if (!ty.hasRuntimeBits()) return WValue{ .none = {} }; if (isByRef(ty, self.target)) { const new_local = try self.allocStack(ty); @@ -2146,7 +2146,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: std.math.CompareOperator) Inner if (operand_ty.zigTypeTag() == .Optional and !operand_ty.isPtrLikeOptional()) { var buf: Type.Payload.ElemType = undefined; const payload_ty = operand_ty.optionalChild(&buf); - if (payload_ty.hasCodeGenBits()) { + if (payload_ty.hasRuntimeBits()) { // When we hit this case, we must check the value of optionals // that are not pointers. This means first checking against non-null for // both lhs and rhs, as well as checking the payload are matching of lhs and rhs @@ -2190,7 +2190,7 @@ fn airBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const block = self.blocks.get(br.block_inst).?; // if operand has codegen bits we should break with a value - if (self.air.typeOf(br.operand).hasCodeGenBits()) { + if (self.air.typeOf(br.operand).hasRuntimeBits()) { try self.emitWValue(try self.resolveInst(br.operand)); if (block.value != .none) { @@ -2282,7 +2282,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const operand = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); - if (!field_ty.hasCodeGenBits()) return WValue{ .none = {} }; + if (!field_ty.hasRuntimeBits()) return WValue{ .none = {} }; const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, self.target)) catch { return self.fail("Field type '{}' too big to fit into stack frame", .{field_ty}); }; @@ -2452,7 +2452,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!W // load the error tag value try self.emitWValue(operand); - if (pl_ty.hasCodeGenBits()) { + if (pl_ty.hasRuntimeBits()) { try self.addMemArg(.i32_load16_u, .{ .offset = 0, .alignment = err_ty.errorUnionSet().abiAlignment(self.target), @@ -2474,7 +2474,7 @@ fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue const operand = try self.resolveInst(ty_op.operand); const err_ty = self.air.typeOf(ty_op.operand); const payload_ty = err_ty.errorUnionPayload(); - if (!payload_ty.hasCodeGenBits()) return WValue{ .none = {} }; + if (!payload_ty.hasRuntimeBits()) return WValue{ .none = {} }; const offset = @intCast(u32, err_ty.errorUnionSet().abiSize(self.target)); if (isByRef(payload_ty, self.target)) { return self.buildPointerOffset(operand, offset, .new); @@ -2489,7 +2489,7 @@ fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const operand = try self.resolveInst(ty_op.operand); const err_ty = self.air.typeOf(ty_op.operand); const payload_ty = err_ty.errorUnionPayload(); - if (!payload_ty.hasCodeGenBits()) { + if (!payload_ty.hasRuntimeBits()) { return operand; } @@ -2502,7 +2502,7 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const operand = try self.resolveInst(ty_op.operand); const op_ty = self.air.typeOf(ty_op.operand); - if (!op_ty.hasCodeGenBits()) return operand; + if (!op_ty.hasRuntimeBits()) return operand; const err_ty = self.air.getRefType(ty_op.ty); const offset = err_ty.errorUnionSet().abiSize(self.target); @@ -2580,7 +2580,7 @@ fn isNull(self: *Self, operand: WValue, optional_ty: Type, opcode: wasm.Opcode) const payload_ty = optional_ty.optionalChild(&buf); // When payload is zero-bits, we can treat operand as a value, rather than // a pointer to the stack value - if (payload_ty.hasCodeGenBits()) { + if (payload_ty.hasRuntimeBits()) { try self.addMemArg(.i32_load8_u, .{ .offset = 0, .alignment = 1 }); } } @@ -2600,7 +2600,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const operand = try self.resolveInst(ty_op.operand); const opt_ty = self.air.typeOf(ty_op.operand); const payload_ty = self.air.typeOfIndex(inst); - if (!payload_ty.hasCodeGenBits()) return WValue{ .none = {} }; + if (!payload_ty.hasRuntimeBits()) return WValue{ .none = {} }; if (opt_ty.isPtrLikeOptional()) return operand; const offset = opt_ty.abiSize(self.target) - payload_ty.abiSize(self.target); @@ -2621,7 +2621,7 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue { var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasCodeGenBits() or opt_ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBits() or opt_ty.isPtrLikeOptional()) { return operand; } @@ -2635,7 +2635,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue const opt_ty = self.air.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = opt_ty.optionalChild(&buf); - if (!payload_ty.hasCodeGenBits()) { + if (!payload_ty.hasRuntimeBits()) { return self.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty}); } @@ -2659,7 +2659,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const payload_ty = self.air.typeOf(ty_op.operand); - if (!payload_ty.hasCodeGenBits()) { + if (!payload_ty.hasRuntimeBits()) { const non_null_bit = try self.allocStack(Type.initTag(.u1)); try self.addLabel(.local_get, non_null_bit.local); try self.addImm32(1); @@ -2851,7 +2851,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!WValue { const slice_local = try self.allocStack(slice_ty); // store the array ptr in the slice - if (array_ty.hasCodeGenBits()) { + if (array_ty.hasRuntimeBits()) { try self.store(slice_local, operand, ty, 0); } @@ -3105,7 +3105,7 @@ fn airPrefetch(self: *Self, inst: Air.Inst.Index) InnerError!WValue { } fn cmpOptionals(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue { - assert(operand_ty.hasCodeGenBits()); + assert(operand_ty.hasRuntimeBits()); assert(op == .eq or op == .neq); var buf: Type.Payload.ElemType = undefined; const payload_ty = operand_ty.optionalChild(&buf); diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index b99d6bd039..e53ff57144 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1202,7 +1202,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { const err_union_ty = self.air.typeOf(ty_op.operand); const payload_ty = err_union_ty.errorUnionPayload(); const mcv = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasCodeGenBits()) break :result mcv; + if (!payload_ty.hasRuntimeBits()) break :result mcv; return self.fail("TODO implement unwrap error union error for non-empty payloads", .{}); }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1213,7 +1213,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { const err_union_ty = self.air.typeOf(ty_op.operand); const payload_ty = err_union_ty.errorUnionPayload(); - if (!payload_ty.hasCodeGenBits()) break :result MCValue.none; + if (!payload_ty.hasRuntimeBits()) break :result MCValue.none; return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{}); }; return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); @@ -1270,7 +1270,7 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { const error_union_ty = self.air.getRefType(ty_op.ty); const payload_ty = error_union_ty.errorUnionPayload(); const mcv = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasCodeGenBits()) break :result mcv; + if (!payload_ty.hasRuntimeBits()) break :result mcv; return self.fail("TODO implement wrap errunion error for non-empty payloads", .{}); }; @@ -1636,7 +1636,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const elem_ty = self.air.typeOfIndex(inst); const result: MCValue = result: { - if (!elem_ty.hasCodeGenBits()) + if (!elem_ty.hasRuntimeBits()) break :result MCValue.none; const ptr = try self.resolveInst(ty_op.operand); @@ -2739,9 +2739,9 @@ fn isNonNull(self: *Self, ty: Type, operand: MCValue) !MCValue { fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue { const err_type = ty.errorUnionSet(); const payload_type = ty.errorUnionPayload(); - if (!err_type.hasCodeGenBits()) { + if (!err_type.hasRuntimeBits()) { return MCValue{ .immediate = 0 }; // always false - } else if (!payload_type.hasCodeGenBits()) { + } else if (!payload_type.hasRuntimeBits()) { if (err_type.abiSize(self.target.*) <= 8) { try self.genBinMathOpMir(.cmp, err_type, .unsigned, operand, MCValue{ .immediate = 0 }); return MCValue{ .compare_flags_unsigned = .gt }; @@ -2962,7 +2962,7 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (self.air.typeOf(operand).hasCodeGenBits()) { + if (self.air.typeOf(operand).hasRuntimeBits()) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { @@ -3913,7 +3913,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { const ref_int = @enumToInt(inst); if (ref_int < Air.Inst.Ref.typed_value_map.len) { const tv = Air.Inst.Ref.typed_value_map[ref_int]; - if (!tv.ty.hasCodeGenBits()) { + if (!tv.ty.hasRuntimeBits()) { return MCValue{ .none = {} }; } return self.genTypedValue(tv); @@ -3921,7 +3921,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { // If the type has no codegen bits, no need to store it. const inst_ty = self.air.typeOf(inst); - if (!inst_ty.hasCodeGenBits()) + if (!inst_ty.hasRuntimeBits()) return MCValue{ .none = {} }; const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); @@ -3977,11 +3977,45 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV return mcv; } +fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue { + const ptr_bits = self.target.cpu.arch.ptrBitWidth(); + const ptr_bytes: u64 = @divExact(ptr_bits, 8); + + decl.alive = true; + if (self.bin_file.cast(link.File.Elf)) |elf_file| { + const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; + const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; + return MCValue{ .memory = got_addr }; + } else if (self.bin_file.cast(link.File.MachO)) |_| { + // TODO I'm hacking my way through here by repurposing .memory for storing + // index to the GOT target symbol index. + return MCValue{ .memory = decl.link.macho.local_sym_index }; + } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { + const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes; + return MCValue{ .memory = got_addr }; + } else if (self.bin_file.cast(link.File.Plan9)) |p9| { + try p9.seeDecl(decl); + const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; + return MCValue{ .memory = got_addr }; + } else { + return self.fail("TODO codegen non-ELF const Decl pointer", .{}); + } + + _ = tv; +} + fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { if (typed_value.val.isUndef()) return MCValue{ .undef = {} }; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); - const ptr_bytes: u64 = @divExact(ptr_bits, 8); + + if (typed_value.val.castTag(.decl_ref)) |payload| { + return self.lowerDeclRef(typed_value, payload.data); + } + if (typed_value.val.castTag(.decl_ref_mut)) |payload| { + return self.lowerDeclRef(typed_value, payload.data.decl); + } + switch (typed_value.ty.zigTypeTag()) { .Pointer => switch (typed_value.ty.ptrSize()) { .Slice => { @@ -3998,28 +4032,6 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { return self.fail("TODO codegen for const slices", .{}); }, else => { - if (typed_value.val.castTag(.decl_ref)) |payload| { - const decl = payload.data; - decl.alive = true; - if (self.bin_file.cast(link.File.Elf)) |elf_file| { - const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?]; - const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes; - return MCValue{ .memory = got_addr }; - } else if (self.bin_file.cast(link.File.MachO)) |_| { - // TODO I'm hacking my way through here by repurposing .memory for storing - // index to the GOT target symbol index. - return MCValue{ .memory = decl.link.macho.local_sym_index }; - } else if (self.bin_file.cast(link.File.Coff)) |coff_file| { - const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes; - return MCValue{ .memory = got_addr }; - } else if (self.bin_file.cast(link.File.Plan9)) |p9| { - try p9.seeDecl(decl); - const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; - return MCValue{ .memory = got_addr }; - } else { - return self.fail("TODO codegen non-ELF const Decl pointer", .{}); - } - } if (typed_value.val.tag() == .int_u64) { return MCValue{ .immediate = typed_value.val.toUnsignedInt() }; } @@ -4091,7 +4103,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { const payload_type = typed_value.ty.errorUnionPayload(); if (typed_value.val.castTag(.eu_payload)) |pl| { - if (!payload_type.hasCodeGenBits()) { + if (!payload_type.hasRuntimeBits()) { // We use the error type directly as the type. return MCValue{ .immediate = 0 }; } @@ -4099,7 +4111,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { _ = pl; return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty}); } else { - if (!payload_type.hasCodeGenBits()) { + if (!payload_type.hasRuntimeBits()) { // We use the error type directly as the type. return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val }); } @@ -4156,7 +4168,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { var by_reg = std.AutoHashMap(usize, usize).init(self.bin_file.allocator); defer by_reg.deinit(); for (param_types) |ty, i| { - if (!ty.hasCodeGenBits()) continue; + if (!ty.hasRuntimeBits()) continue; const param_size = @intCast(u32, ty.abiSize(self.target.*)); const pass_in_reg = switch (ty.zigTypeTag()) { .Bool => true, @@ -4178,7 +4190,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { // for (param_types) |ty, i| { const i = count - 1; const ty = param_types[i]; - if (!ty.hasCodeGenBits()) { + if (!ty.hasRuntimeBits()) { assert(cc != .C); result.args[i] = .{ .none = {} }; continue; @@ -4207,7 +4219,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { if (ret_ty.zigTypeTag() == .NoReturn) { result.return_value = .{ .unreach = {} }; - } else if (!ret_ty.hasCodeGenBits()) { + } else if (!ret_ty.hasRuntimeBits()) { result.return_value = .{ .none = {} }; } else switch (cc) { .Naked => unreachable, diff --git a/src/arch/x86_64/Emit.zig b/src/arch/x86_64/Emit.zig index 058feb56d7..ba19a6ba86 100644 --- a/src/arch/x86_64/Emit.zig +++ b/src/arch/x86_64/Emit.zig @@ -885,7 +885,7 @@ fn genArgDbgInfo(emit: *Emit, inst: Air.Inst.Index, mcv: MCValue) !void { fn addDbgInfoTypeReloc(emit: *Emit, ty: Type) !void { switch (emit.debug_output) { .dwarf => |dbg_out| { - assert(ty.hasCodeGenBits()); + assert(ty.hasRuntimeBits()); const index = dbg_out.dbg_info.items.len; try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4 diff --git a/src/codegen.zig b/src/codegen.zig index 65b0318e9e..faafe79c13 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -377,7 +377,7 @@ pub fn generateSymbol( const field_vals = typed_value.val.castTag(.@"struct").?.data; for (field_vals) |field_val, index| { const field_ty = typed_value.ty.structFieldType(index); - if (!field_ty.hasCodeGenBits()) continue; + if (!field_ty.hasRuntimeBits()) continue; switch (try generateSymbol(bin_file, src_loc, .{ .ty = field_ty, .val = field_val, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 5a13ea7914..9d6d5527e5 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -507,7 +507,7 @@ pub const DeclGen = struct { const error_type = ty.errorUnionSet(); const payload_type = ty.errorUnionPayload(); - if (!payload_type.hasCodeGenBits()) { + if (!payload_type.hasRuntimeBits()) { // We use the error type directly as the type. const err_val = if (val.errorUnionIsPayload()) Value.initTag(.zero) else val; return dg.renderValue(writer, error_type, err_val); @@ -581,7 +581,7 @@ pub const DeclGen = struct { for (field_vals) |field_val, i| { const field_ty = ty.structFieldType(i); - if (!field_ty.hasCodeGenBits()) continue; + if (!field_ty.hasRuntimeBits()) continue; if (i != 0) try writer.writeAll(","); try dg.renderValue(writer, field_ty, field_val); @@ -611,7 +611,7 @@ pub const DeclGen = struct { const index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag).?; const field_ty = ty.unionFields().values()[index].ty; const field_name = ty.unionFields().keys()[index]; - if (field_ty.hasCodeGenBits()) { + if (field_ty.hasRuntimeBits()) { try writer.print(".{} = ", .{fmtIdent(field_name)}); try dg.renderValue(writer, field_ty, union_obj.val); } @@ -652,7 +652,7 @@ pub const DeclGen = struct { } } const return_ty = dg.decl.ty.fnReturnType(); - if (return_ty.hasCodeGenBits()) { + if (return_ty.hasRuntimeBits()) { try dg.renderType(w, return_ty); } else if (return_ty.zigTypeTag() == .NoReturn) { try w.writeAll("zig_noreturn void"); @@ -784,7 +784,7 @@ pub const DeclGen = struct { var it = struct_obj.fields.iterator(); while (it.next()) |entry| { const field_ty = entry.value_ptr.ty; - if (!field_ty.hasCodeGenBits()) continue; + if (!field_ty.hasRuntimeBits()) continue; const alignment = entry.value_ptr.abi_align; const name: CValue = .{ .identifier = entry.key_ptr.* }; @@ -837,7 +837,7 @@ pub const DeclGen = struct { var it = t.unionFields().iterator(); while (it.next()) |entry| { const field_ty = entry.value_ptr.ty; - if (!field_ty.hasCodeGenBits()) continue; + if (!field_ty.hasRuntimeBits()) continue; const alignment = entry.value_ptr.abi_align; const name: CValue = .{ .identifier = entry.key_ptr.* }; try buffer.append(' '); @@ -1582,7 +1582,7 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue { const elem_type = inst_ty.elemType(); const mutability: Mutability = if (inst_ty.isConstPtr()) .Const else .Mut; - if (!elem_type.hasCodeGenBits()) { + if (!elem_type.isFnOrHasRuntimeBits()) { const target = f.object.dg.module.getTarget(); const literal = switch (target.cpu.arch.ptrBitWidth()) { 32 => "(void *)0xaaaaaaaa", @@ -1683,7 +1683,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue { fn airRet(f: *Function, inst: Air.Inst.Index) !CValue { const un_op = f.air.instructions.items(.data)[inst].un_op; const writer = f.object.writer(); - if (f.air.typeOf(un_op).hasCodeGenBits()) { + if (f.air.typeOf(un_op).isFnOrHasRuntimeBits()) { const operand = try f.resolveInst(un_op); try writer.writeAll("return "); try f.writeCValue(writer, operand); @@ -1699,7 +1699,7 @@ fn airRetLoad(f: *Function, inst: Air.Inst.Index) !CValue { const writer = f.object.writer(); const ptr_ty = f.air.typeOf(un_op); const ret_ty = ptr_ty.childType(); - if (!ret_ty.hasCodeGenBits()) { + if (!ret_ty.isFnOrHasRuntimeBits()) { try writer.writeAll("return;\n"); } const ptr = try f.resolveInst(un_op); @@ -2315,7 +2315,7 @@ fn airCall(f: *Function, inst: Air.Inst.Index) !CValue { var result_local: CValue = .none; if (unused_result) { - if (ret_ty.hasCodeGenBits()) { + if (ret_ty.hasRuntimeBits()) { try writer.print("(void)", .{}); } } else { @@ -2832,7 +2832,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const operand_ty = f.air.typeOf(ty_op.operand); const payload_ty = operand_ty.errorUnionPayload(); - if (!payload_ty.hasCodeGenBits()) { + if (!payload_ty.hasRuntimeBits()) { if (operand_ty.zigTypeTag() == .Pointer) { const local = try f.allocLocal(inst_ty, .Const); try writer.writeAll(" = *"); @@ -2864,7 +2864,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, maybe_addrof: []cons const operand_ty = f.air.typeOf(ty_op.operand); const payload_ty = operand_ty.errorUnionPayload(); - if (!payload_ty.hasCodeGenBits()) { + if (!payload_ty.hasRuntimeBits()) { return CValue.none; } @@ -2908,7 +2908,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue { const operand = try f.resolveInst(ty_op.operand); const err_un_ty = f.air.typeOfIndex(inst); const payload_ty = err_un_ty.errorUnionPayload(); - if (!payload_ty.hasCodeGenBits()) { + if (!payload_ty.hasRuntimeBits()) { return operand; } @@ -2951,7 +2951,7 @@ fn airIsErr( const operand_ty = f.air.typeOf(un_op); const local = try f.allocLocal(Type.initTag(.bool), .Const); const payload_ty = operand_ty.errorUnionPayload(); - if (!payload_ty.hasCodeGenBits()) { + if (!payload_ty.hasRuntimeBits()) { try writer.print(" = {s}", .{deref_prefix}); try f.writeCValue(writer, operand); try writer.print(" {s} 0;\n", .{op_str}); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 00733dd34b..df64c0c912 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -176,7 +176,7 @@ pub const Object = struct { /// the compiler, but the Type/Value memory here is backed by `type_map_arena`. /// TODO we need to remove entries from this map in response to incremental compilation /// but I think the frontend won't tell us about types that get deleted because - /// hasCodeGenBits() is false for types. + /// hasRuntimeBits() is false for types. type_map: TypeMap, /// The backing memory for `type_map`. Periodically garbage collected after flush(). /// The code for doing the periodical GC is not yet implemented. @@ -463,7 +463,7 @@ pub const Object = struct { const param_offset: c_uint = @boolToInt(ret_ptr != null); for (fn_info.param_types) |param_ty| { - if (!param_ty.hasCodeGenBits()) continue; + if (!param_ty.hasRuntimeBits()) continue; const llvm_arg_i = @intCast(c_uint, args.items.len) + param_offset; try args.append(llvm_func.getParam(llvm_arg_i)); @@ -662,6 +662,7 @@ pub const DeclGen = struct { new_global.setAlignment(global.getAlignment()); new_global.setInitializer(llvm_init); global.replaceAllUsesWith(new_global); + dg.object.decl_map.putAssumeCapacity(decl, new_global); new_global.takeName(global); global.deleteGlobal(); } @@ -709,7 +710,7 @@ pub const DeclGen = struct { // Set parameter attributes. var llvm_param_i: c_uint = @boolToInt(sret); for (fn_info.param_types) |param_ty| { - if (!param_ty.hasCodeGenBits()) continue; + if (!param_ty.hasRuntimeBits()) continue; if (isByRef(param_ty)) { dg.addArgAttr(llvm_fn, llvm_param_i, "nonnull"); @@ -725,6 +726,10 @@ pub const DeclGen = struct { llvm_fn.setFunctionCallConv(toLlvmCallConv(fn_info.cc, target)); } + if (fn_info.alignment != 0) { + llvm_fn.setAlignment(fn_info.alignment); + } + // Function attributes that are independent of analysis results of the function body. dg.addCommonFnAttributes(llvm_fn); @@ -840,7 +845,11 @@ pub const DeclGen = struct { } const llvm_addrspace = dg.llvmAddressSpace(t.ptrAddressSpace()); const elem_ty = t.childType(); - const llvm_elem_ty = if (elem_ty.hasCodeGenBits() or elem_ty.zigTypeTag() == .Array) + const lower_elem_ty = switch (elem_ty.zigTypeTag()) { + .Opaque, .Array, .Fn => true, + else => elem_ty.hasRuntimeBits(), + }; + const llvm_elem_ty = if (lower_elem_ty) try dg.llvmType(elem_ty) else dg.context.intType(8); @@ -878,13 +887,13 @@ pub const DeclGen = struct { .Optional => { var buf: Type.Payload.ElemType = undefined; const child_type = t.optionalChild(&buf); - if (!child_type.hasCodeGenBits()) { + if (!child_type.hasRuntimeBits()) { return dg.context.intType(1); } const payload_llvm_ty = try dg.llvmType(child_type); if (t.isPtrLikeOptional()) { return payload_llvm_ty; - } else if (!child_type.hasCodeGenBits()) { + } else if (!child_type.hasRuntimeBits()) { return dg.context.intType(1); } @@ -897,7 +906,7 @@ pub const DeclGen = struct { const error_type = t.errorUnionSet(); const payload_type = t.errorUnionPayload(); const llvm_error_type = try dg.llvmType(error_type); - if (!payload_type.hasCodeGenBits()) { + if (!payload_type.hasRuntimeBits()) { return llvm_error_type; } const llvm_payload_type = try dg.llvmType(payload_type); @@ -962,7 +971,7 @@ pub const DeclGen = struct { var big_align: u32 = 0; var running_bits: u16 = 0; for (struct_obj.fields.values()) |field| { - if (!field.ty.hasCodeGenBits()) continue; + if (!field.ty.hasRuntimeBits()) continue; const field_align = field.packedAlignment(); if (field_align == 0) { @@ -1029,7 +1038,7 @@ pub const DeclGen = struct { } } else { for (struct_obj.fields.values()) |field| { - if (!field.ty.hasCodeGenBits()) continue; + if (!field.ty.hasRuntimeBits()) continue; llvm_field_types.appendAssumeCapacity(try dg.llvmType(field.ty)); } } @@ -1123,7 +1132,7 @@ pub const DeclGen = struct { const sret = firstParamSRet(fn_info, target); const return_type = fn_info.return_type; const raw_llvm_ret_ty = try dg.llvmType(return_type); - const llvm_ret_ty = if (!return_type.hasCodeGenBits() or sret) + const llvm_ret_ty = if (!return_type.hasRuntimeBits() or sret) dg.context.voidType() else raw_llvm_ret_ty; @@ -1136,7 +1145,7 @@ pub const DeclGen = struct { } for (fn_info.param_types) |param_ty| { - if (!param_ty.hasCodeGenBits()) continue; + if (!param_ty.hasRuntimeBits()) continue; const raw_llvm_ty = try dg.llvmType(param_ty); const actual_llvm_ty = if (!isByRef(param_ty)) raw_llvm_ty else raw_llvm_ty.pointerType(0); @@ -1176,29 +1185,35 @@ pub const DeclGen = struct { const llvm_type = try dg.llvmType(tv.ty); return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull(); }, - .Int => { - var bigint_space: Value.BigIntSpace = undefined; - const bigint = tv.val.toBigInt(&bigint_space); - const target = dg.module.getTarget(); - const int_info = tv.ty.intInfo(target); - const llvm_type = dg.context.intType(int_info.bits); + // TODO this duplicates code with Pointer but they should share the handling + // of the tv.val.tag() and then Int should do extra constPtrToInt on top + .Int => switch (tv.val.tag()) { + .decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl), + .decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data), + else => { + var bigint_space: Value.BigIntSpace = undefined; + const bigint = tv.val.toBigInt(&bigint_space); + const target = dg.module.getTarget(); + const int_info = tv.ty.intInfo(target); + const llvm_type = dg.context.intType(int_info.bits); - const unsigned_val = v: { - if (bigint.limbs.len == 1) { - break :v llvm_type.constInt(bigint.limbs[0], .False); + const unsigned_val = v: { + if (bigint.limbs.len == 1) { + break :v llvm_type.constInt(bigint.limbs[0], .False); + } + if (@sizeOf(usize) == @sizeOf(u64)) { + break :v llvm_type.constIntOfArbitraryPrecision( + @intCast(c_uint, bigint.limbs.len), + bigint.limbs.ptr, + ); + } + @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); + }; + if (!bigint.positive) { + return llvm.constNeg(unsigned_val); } - if (@sizeOf(usize) == @sizeOf(u64)) { - break :v llvm_type.constIntOfArbitraryPrecision( - @intCast(c_uint, bigint.limbs.len), - bigint.limbs.ptr, - ); - } - @panic("TODO implement bigint to llvm int for 32-bit compiler builds"); - }; - if (!bigint.positive) { - return llvm.constNeg(unsigned_val); - } - return unsigned_val; + return unsigned_val; + }, }, .Enum => { var int_buffer: Value.Payload.U64 = undefined; @@ -1370,7 +1385,7 @@ pub const DeclGen = struct { const llvm_i1 = dg.context.intType(1); const is_pl = !tv.val.isNull(); const non_null_bit = if (is_pl) llvm_i1.constAllOnes() else llvm_i1.constNull(); - if (!payload_ty.hasCodeGenBits()) { + if (!payload_ty.hasRuntimeBits()) { return non_null_bit; } if (tv.ty.isPtrLikeOptional()) { @@ -1383,6 +1398,7 @@ pub const DeclGen = struct { return llvm_ty.constNull(); } } + assert(payload_ty.zigTypeTag() != .Fn); const fields: [2]*const llvm.Value = .{ try dg.genTypedValue(.{ .ty = payload_ty, @@ -1420,7 +1436,7 @@ pub const DeclGen = struct { const payload_type = tv.ty.errorUnionPayload(); const is_pl = tv.val.errorUnionIsPayload(); - if (!payload_type.hasCodeGenBits()) { + if (!payload_type.hasRuntimeBits()) { // We use the error type directly as the type. const err_val = if (!is_pl) tv.val else Value.initTag(.zero); return dg.genTypedValue(.{ .ty = error_type, .val = err_val }); @@ -1458,7 +1474,7 @@ pub const DeclGen = struct { var running_int: *const llvm.Value = llvm_struct_ty.structGetTypeAtIndex(0).constNull(); for (field_vals) |field_val, i| { const field = fields[i]; - if (!field.ty.hasCodeGenBits()) continue; + if (!field.ty.hasRuntimeBits()) continue; const field_align = field.packedAlignment(); if (field_align == 0) { @@ -1540,7 +1556,7 @@ pub const DeclGen = struct { } else { for (field_vals) |field_val, i| { const field_ty = tv.ty.structFieldType(i); - if (!field_ty.hasCodeGenBits()) continue; + if (!field_ty.hasRuntimeBits()) continue; llvm_fields.appendAssumeCapacity(try dg.genTypedValue(.{ .ty = field_ty, @@ -1572,7 +1588,7 @@ pub const DeclGen = struct { assert(union_obj.haveFieldTypes()); const field_ty = union_obj.fields.values()[field_index].ty; const payload = p: { - if (!field_ty.hasCodeGenBits()) { + if (!field_ty.hasRuntimeBits()) { const padding_len = @intCast(c_uint, layout.payload_size); break :p dg.context.intType(8).arrayType(padding_len).getUndef(); } @@ -1784,13 +1800,14 @@ pub const DeclGen = struct { return self.context.constStruct(&fields, fields.len, .False); } - if (!tv.ty.childType().hasCodeGenBits() or !decl.ty.hasCodeGenBits()) { + const is_fn_body = decl.ty.zigTypeTag() == .Fn; + if (!is_fn_body and !decl.ty.hasRuntimeBits()) { return self.lowerPtrToVoid(tv.ty); } decl.markAlive(); - const llvm_val = if (decl.ty.zigTypeTag() == .Fn) + const llvm_val = if (is_fn_body) try self.resolveLlvmFunction(decl) else try self.resolveGlobalDecl(decl); @@ -2182,7 +2199,7 @@ pub const FuncGen = struct { } else { for (args) |arg, i| { const param_ty = fn_info.param_types[i]; - if (!param_ty.hasCodeGenBits()) continue; + if (!param_ty.hasRuntimeBits()) continue; try llvm_args.append(try self.resolveInst(arg)); } @@ -2200,7 +2217,7 @@ pub const FuncGen = struct { if (return_type.isNoReturn()) { _ = self.builder.buildUnreachable(); return null; - } else if (self.liveness.isUnused(inst) or !return_type.hasCodeGenBits()) { + } else if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBits()) { return null; } else if (sret) { call.setCallSret(llvm_ret_ty); @@ -2224,7 +2241,7 @@ pub const FuncGen = struct { _ = self.builder.buildRetVoid(); return null; } - if (!ret_ty.hasCodeGenBits()) { + if (!ret_ty.hasRuntimeBits()) { _ = self.builder.buildRetVoid(); return null; } @@ -2237,7 +2254,7 @@ pub const FuncGen = struct { const un_op = self.air.instructions.items(.data)[inst].un_op; const ptr_ty = self.air.typeOf(un_op); const ret_ty = ptr_ty.childType(); - if (!ret_ty.hasCodeGenBits() or isByRef(ret_ty)) { + if (!ret_ty.hasRuntimeBits() or isByRef(ret_ty)) { _ = self.builder.buildRetVoid(); return null; } @@ -2273,7 +2290,7 @@ pub const FuncGen = struct { .Int, .Bool, .Pointer, .ErrorSet => operand_ty, .Optional => blk: { const payload_ty = operand_ty.optionalChild(&opt_buffer); - if (!payload_ty.hasCodeGenBits() or operand_ty.isPtrLikeOptional()) { + if (!payload_ty.hasRuntimeBits() or operand_ty.isPtrLikeOptional()) { break :blk operand_ty; } // We need to emit instructions to check for equality/inequality @@ -2397,7 +2414,8 @@ pub const FuncGen = struct { self.builder.positionBuilderAtEnd(parent_bb); // If the block does not return a value, we dont have to create a phi node. - if (!inst_ty.hasCodeGenBits()) return null; + const is_body = inst_ty.zigTypeTag() == .Fn; + if (!is_body and !inst_ty.hasRuntimeBits()) return null; const raw_llvm_ty = try self.dg.llvmType(inst_ty); @@ -2406,7 +2424,7 @@ pub const FuncGen = struct { // a pointer to it. LLVM IR allows the call instruction to use function bodies instead // of function pointers, however the phi makes it a runtime value and therefore // the LLVM type has to be wrapped in a pointer. - if (inst_ty.zigTypeTag() == .Fn or isByRef(inst_ty)) { + if (is_body or isByRef(inst_ty)) { break :ty raw_llvm_ty.pointerType(0); } break :ty raw_llvm_ty; @@ -2427,7 +2445,8 @@ pub const FuncGen = struct { // If the break doesn't break a value, then we don't have to add // the values to the lists. - if (self.air.typeOf(branch.operand).hasCodeGenBits()) { + const operand_ty = self.air.typeOf(branch.operand); + if (operand_ty.hasRuntimeBits() or operand_ty.zigTypeTag() == .Fn) { const val = try self.resolveInst(branch.operand); // For the phi node, we need the basic blocks and the values of the @@ -2531,7 +2550,7 @@ pub const FuncGen = struct { const llvm_usize = try self.dg.llvmType(Type.usize); const len = llvm_usize.constInt(array_ty.arrayLen(), .False); const slice_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); - if (!array_ty.hasCodeGenBits()) { + if (!array_ty.hasRuntimeBits()) { return self.builder.buildInsertValue(slice_llvm_ty.getUndef(), len, 1, ""); } const operand = try self.resolveInst(ty_op.operand); @@ -2662,7 +2681,7 @@ pub const FuncGen = struct { const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data; const ptr_ty = self.air.typeOf(bin_op.lhs); const elem_ty = ptr_ty.childType(); - if (!elem_ty.hasCodeGenBits()) return null; + if (!elem_ty.hasRuntimeBits()) return null; const base_ptr = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); @@ -2709,7 +2728,7 @@ pub const FuncGen = struct { const struct_llvm_val = try self.resolveInst(struct_field.struct_operand); const field_index = struct_field.field_index; const field_ty = struct_ty.structFieldType(field_index); - if (!field_ty.hasCodeGenBits()) { + if (!field_ty.hasRuntimeBits()) { return null; } const target = self.dg.module.getTarget(); @@ -2914,7 +2933,7 @@ pub const FuncGen = struct { var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); - if (!payload_ty.hasCodeGenBits()) { + if (!payload_ty.hasRuntimeBits()) { if (invert) { return self.builder.buildNot(operand, ""); } else { @@ -2946,7 +2965,7 @@ pub const FuncGen = struct { const err_set_ty = try self.dg.llvmType(Type.initTag(.anyerror)); const zero = err_set_ty.constNull(); - if (!payload_ty.hasCodeGenBits()) { + if (!payload_ty.hasRuntimeBits()) { const loaded = if (operand_is_ptr) self.builder.buildLoad(operand, "") else operand; return self.builder.buildICmp(op, loaded, zero, ""); } @@ -2969,7 +2988,7 @@ pub const FuncGen = struct { const optional_ty = self.air.typeOf(ty_op.operand).childType(); var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); - if (!payload_ty.hasCodeGenBits()) { + if (!payload_ty.hasRuntimeBits()) { // We have a pointer to a zero-bit value and we need to return // a pointer to a zero-bit value. return operand; @@ -2993,7 +3012,7 @@ pub const FuncGen = struct { var buf: Type.Payload.ElemType = undefined; const payload_ty = optional_ty.optionalChild(&buf); const non_null_bit = self.context.intType(1).constAllOnes(); - if (!payload_ty.hasCodeGenBits()) { + if (!payload_ty.hasRuntimeBits()) { // We have a pointer to a i1. We need to set it to 1 and then return the same pointer. _ = self.builder.buildStore(non_null_bit, operand); return operand; @@ -3028,7 +3047,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOf(ty_op.operand); const payload_ty = self.air.typeOfIndex(inst); - if (!payload_ty.hasCodeGenBits()) return null; + if (!payload_ty.hasRuntimeBits()) return null; if (optional_ty.isPtrLikeOptional()) { // Payload value is the same as the optional value. @@ -3049,7 +3068,7 @@ pub const FuncGen = struct { const operand = try self.resolveInst(ty_op.operand); const err_union_ty = self.air.typeOf(ty_op.operand); const payload_ty = err_union_ty.errorUnionPayload(); - if (!payload_ty.hasCodeGenBits()) return null; + if (!payload_ty.hasRuntimeBits()) return null; if (operand_is_ptr or isByRef(payload_ty)) { return self.builder.buildStructGEP(operand, 1, ""); } @@ -3069,7 +3088,7 @@ pub const FuncGen = struct { const operand_ty = self.air.typeOf(ty_op.operand); const payload_ty = operand_ty.errorUnionPayload(); - if (!payload_ty.hasCodeGenBits()) { + if (!payload_ty.hasRuntimeBits()) { if (!operand_is_ptr) return operand; return self.builder.buildLoad(operand, ""); } @@ -3088,7 +3107,7 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const payload_ty = self.air.typeOf(ty_op.operand); const non_null_bit = self.context.intType(1).constAllOnes(); - if (!payload_ty.hasCodeGenBits()) return non_null_bit; + if (!payload_ty.hasRuntimeBits()) return non_null_bit; const operand = try self.resolveInst(ty_op.operand); const optional_ty = self.air.typeOfIndex(inst); if (optional_ty.isPtrLikeOptional()) return operand; @@ -3116,7 +3135,7 @@ pub const FuncGen = struct { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const payload_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasCodeGenBits()) { + if (!payload_ty.hasRuntimeBits()) { return operand; } const inst_ty = self.air.typeOfIndex(inst); @@ -3147,7 +3166,7 @@ pub const FuncGen = struct { const err_un_ty = self.air.typeOfIndex(inst); const payload_ty = err_un_ty.errorUnionPayload(); const operand = try self.resolveInst(ty_op.operand); - if (!payload_ty.hasCodeGenBits()) { + if (!payload_ty.hasRuntimeBits()) { return operand; } const err_un_llvm_ty = try self.dg.llvmType(err_un_ty); @@ -3836,7 +3855,7 @@ pub const FuncGen = struct { if (self.liveness.isUnused(inst)) return null; const ptr_ty = self.air.typeOfIndex(inst); const pointee_type = ptr_ty.childType(); - if (!pointee_type.hasCodeGenBits()) return self.dg.lowerPtrToVoid(ptr_ty); + if (!pointee_type.isFnOrHasRuntimeBits()) return self.dg.lowerPtrToVoid(ptr_ty); const pointee_llvm_ty = try self.dg.llvmType(pointee_type); const alloca_inst = self.buildAlloca(pointee_llvm_ty); @@ -3850,7 +3869,7 @@ pub const FuncGen = struct { if (self.liveness.isUnused(inst)) return null; const ptr_ty = self.air.typeOfIndex(inst); const ret_ty = ptr_ty.childType(); - if (!ret_ty.hasCodeGenBits()) return null; + if (!ret_ty.isFnOrHasRuntimeBits()) return null; if (self.ret_ptr) |ret_ptr| return ret_ptr; const ret_llvm_ty = try self.dg.llvmType(ret_ty); const target = self.dg.module.getTarget(); @@ -4074,7 +4093,7 @@ pub const FuncGen = struct { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr_ty = self.air.typeOf(bin_op.lhs); const operand_ty = ptr_ty.childType(); - if (!operand_ty.hasCodeGenBits()) return null; + if (!operand_ty.isFnOrHasRuntimeBits()) return null; var ptr = try self.resolveInst(bin_op.lhs); var element = try self.resolveInst(bin_op.rhs); const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false); @@ -4674,7 +4693,7 @@ pub const FuncGen = struct { const union_obj = union_ty.cast(Type.Payload.Union).?.data; const field = &union_obj.fields.values()[field_index]; const result_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst)); - if (!field.ty.hasCodeGenBits()) { + if (!field.ty.hasRuntimeBits()) { return null; } const target = self.dg.module.getTarget(); @@ -4702,7 +4721,7 @@ pub const FuncGen = struct { fn load(self: *FuncGen, ptr: *const llvm.Value, ptr_ty: Type) !?*const llvm.Value { const info = ptr_ty.ptrInfo().data; - if (!info.pointee_type.hasCodeGenBits()) return null; + if (!info.pointee_type.hasRuntimeBits()) return null; const target = self.dg.module.getTarget(); const ptr_alignment = ptr_ty.ptrAlignment(target); @@ -4757,7 +4776,7 @@ pub const FuncGen = struct { ) void { const info = ptr_ty.ptrInfo().data; const elem_ty = info.pointee_type; - if (!elem_ty.hasCodeGenBits()) { + if (!elem_ty.isFnOrHasRuntimeBits()) { return; } const target = self.dg.module.getTarget(); @@ -5087,7 +5106,7 @@ fn llvmFieldIndex( if (struct_obj.layout != .Packed) { var llvm_field_index: c_uint = 0; for (struct_obj.fields.values()) |field, i| { - if (!field.ty.hasCodeGenBits()) + if (!field.ty.hasRuntimeBits()) continue; if (field_index > i) { llvm_field_index += 1; @@ -5114,7 +5133,7 @@ fn llvmFieldIndex( var running_bits: u16 = 0; var llvm_field_index: c_uint = 0; for (struct_obj.fields.values()) |field, i| { - if (!field.ty.hasCodeGenBits()) + if (!field.ty.hasRuntimeBits()) continue; const field_align = field.packedAlignment(); @@ -5227,9 +5246,9 @@ fn isByRef(ty: Type) bool { .AnyFrame, => return false, - .Array, .Frame => return ty.hasCodeGenBits(), + .Array, .Frame => return ty.hasRuntimeBits(), .Struct => { - if (!ty.hasCodeGenBits()) return false; + if (!ty.hasRuntimeBits()) return false; if (ty.castTag(.tuple)) |tuple| { var count: usize = 0; for (tuple.data.values) |field_val, i| { @@ -5247,7 +5266,7 @@ fn isByRef(ty: Type) bool { } return true; }, - .Union => return ty.hasCodeGenBits(), + .Union => return ty.hasRuntimeBits(), .ErrorUnion => return isByRef(ty.errorUnionPayload()), .Optional => { var buf: Type.Payload.ElemType = undefined; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 39363064a7..b4f02a14a7 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -852,7 +852,7 @@ pub const DeclGen = struct { try self.beginSPIRVBlock(label_id); // If this block didn't produce a value, simply return here. - if (!ty.hasCodeGenBits()) + if (!ty.hasRuntimeBits()) return null; // Combine the result from the blocks using the Phi instruction. @@ -879,7 +879,7 @@ pub const DeclGen = struct { const block = self.blocks.get(br.block_inst).?; const operand_ty = self.air.typeOf(br.operand); - if (operand_ty.hasCodeGenBits()) { + if (operand_ty.hasRuntimeBits()) { const operand_id = try self.resolve(br.operand); // current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body. try block.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); @@ -958,7 +958,7 @@ pub const DeclGen = struct { fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void { const operand = self.air.instructions.items(.data)[inst].un_op; const operand_ty = self.air.typeOf(operand); - if (operand_ty.hasCodeGenBits()) { + if (operand_ty.hasRuntimeBits()) { const operand_id = try self.resolve(operand); try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id}); } else { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index bfd472161a..d83b5fde73 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2476,7 +2476,7 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len); const fn_ret_type = decl.ty.fnReturnType(); - const fn_ret_has_bits = fn_ret_type.hasCodeGenBits(); + const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(); if (fn_ret_has_bits) { dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram); } else { diff --git a/src/link/MachO/DebugSymbols.zig b/src/link/MachO/DebugSymbols.zig index cda5077528..88a27ea48f 100644 --- a/src/link/MachO/DebugSymbols.zig +++ b/src/link/MachO/DebugSymbols.zig @@ -920,7 +920,7 @@ pub fn initDeclDebugBuffers( try dbg_info_buffer.ensureUnusedCapacity(27 + decl_name_with_null.len); const fn_ret_type = decl.ty.fnReturnType(); - const fn_ret_has_bits = fn_ret_type.hasCodeGenBits(); + const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(); if (fn_ret_has_bits) { dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram); } else { diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 82c1f1f630..f0b1f75239 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -259,7 +259,7 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { if (build_options.have_llvm) { if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); } - if (!decl.ty.hasCodeGenBits()) return; + if (!decl.ty.hasRuntimeBits()) return; assert(decl.link.wasm.sym_index != 0); // Must call allocateDeclIndexes() decl.link.wasm.clear(); diff --git a/src/print_zir.zig b/src/print_zir.zig index 7ce459568b..1954772e37 100644 --- a/src/print_zir.zig +++ b/src/print_zir.zig @@ -1157,7 +1157,8 @@ const Writer = struct { break :blk decls_len; } else 0; - try self.writeFlag(stream, "known_has_bits, ", small.known_has_bits); + try self.writeFlag(stream, "known_non_opv, ", small.known_non_opv); + try self.writeFlag(stream, "known_comptime_only, ", small.known_comptime_only); try stream.print("{s}, {s}, ", .{ @tagName(small.name_strategy), @tagName(small.layout), }); diff --git a/src/target.zig b/src/target.zig index 8a95e756bf..dc29129e42 100644 --- a/src/target.zig +++ b/src/target.zig @@ -637,3 +637,12 @@ pub fn llvmMachineAbi(target: std.Target) ?[:0]const u8 { else => return null, } } + +pub fn defaultFunctionAlignment(target: std.Target) u32 { + return switch (target.cpu.arch) { + .arm, .armeb => 4, + .aarch64, .aarch64_32, .aarch64_be => 4, + .riscv64 => 2, + else => 1, + }; +} diff --git a/src/type.zig b/src/type.zig index 1db6ceeb41..0020ccd7cc 100644 --- a/src/type.zig +++ b/src/type.zig @@ -5,6 +5,7 @@ const Allocator = std.mem.Allocator; const Target = std.Target; const Module = @import("Module.zig"); const log = std.log.scoped(.Type); +const target_util = @import("target.zig"); const file_struct = @This(); @@ -577,21 +578,36 @@ pub const Type = extern union { } }, .Fn => { - if (!a.fnReturnType().eql(b.fnReturnType())) + const a_info = a.fnInfo(); + const b_info = b.fnInfo(); + + if (!eql(a_info.return_type, b_info.return_type)) return false; - if (a.fnCallingConvention() != b.fnCallingConvention()) + + if (a_info.cc != b_info.cc) return false; - const a_param_len = a.fnParamLen(); - const b_param_len = b.fnParamLen(); - if (a_param_len != b_param_len) + + if (a_info.param_types.len != b_info.param_types.len) return false; - var i: usize = 0; - while (i < a_param_len) : (i += 1) { - if (!a.fnParamType(i).eql(b.fnParamType(i))) + + for (a_info.param_types) |a_param_ty, i| { + const b_param_ty = b_info.param_types[i]; + if (!eql(a_param_ty, b_param_ty)) + return false; + + if (a_info.comptime_params[i] != b_info.comptime_params[i]) return false; } - if (a.fnIsVarArgs() != b.fnIsVarArgs()) + + if (a_info.alignment != b_info.alignment) return false; + + if (a_info.is_var_args != b_info.is_var_args) + return false; + + if (a_info.is_generic != b_info.is_generic) + return false; + return true; }, .Optional => { @@ -686,6 +702,7 @@ pub const Type = extern union { return false; }, .Float => return a.tag() == b.tag(), + .BoundFn, .Frame, => std.debug.panic("TODO implement Type equality comparison of {} and {}", .{ a, b }), @@ -937,6 +954,7 @@ pub const Type = extern union { .return_type = try payload.return_type.copy(allocator), .param_types = param_types, .cc = payload.cc, + .alignment = payload.alignment, .is_var_args = payload.is_var_args, .is_generic = payload.is_generic, .comptime_params = comptime_params.ptr, @@ -1114,9 +1132,15 @@ pub const Type = extern union { } try writer.writeAll("..."); } - try writer.writeAll(") callconv(."); - try writer.writeAll(@tagName(payload.cc)); try writer.writeAll(") "); + if (payload.cc != .Unspecified) { + try writer.writeAll("callconv(."); + try writer.writeAll(@tagName(payload.cc)); + try writer.writeAll(") "); + } + if (payload.alignment != 0) { + try writer.print("align({d}) ", .{payload.alignment}); + } ty = payload.return_type; continue; }, @@ -1423,170 +1447,6 @@ pub const Type = extern union { } } - /// Anything that reports hasCodeGenBits() false returns false here as well. - /// `generic_poison` will return false. - pub fn requiresComptime(ty: Type) bool { - return switch (ty.tag()) { - .u1, - .u8, - .i8, - .u16, - .i16, - .u32, - .i32, - .u64, - .i64, - .u128, - .i128, - .usize, - .isize, - .c_short, - .c_ushort, - .c_int, - .c_uint, - .c_long, - .c_ulong, - .c_longlong, - .c_ulonglong, - .c_longdouble, - .f16, - .f32, - .f64, - .f128, - .anyopaque, - .bool, - .void, - .anyerror, - .noreturn, - .@"anyframe", - .@"null", - .@"undefined", - .atomic_order, - .atomic_rmw_op, - .calling_convention, - .address_space, - .float_mode, - .reduce_op, - .call_options, - .prefetch_options, - .export_options, - .extern_options, - .manyptr_u8, - .manyptr_const_u8, - .manyptr_const_u8_sentinel_0, - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - .const_slice_u8, - .const_slice_u8_sentinel_0, - .anyerror_void_error_union, - .empty_struct_literal, - .function, - .empty_struct, - .error_set, - .error_set_single, - .error_set_inferred, - .error_set_merged, - .@"opaque", - .generic_poison, - .array_u8, - .array_u8_sentinel_0, - .int_signed, - .int_unsigned, - .enum_simple, - => false, - - .single_const_pointer_to_comptime_int, - .type, - .comptime_int, - .comptime_float, - .enum_literal, - .type_info, - => true, - - .var_args_param => unreachable, - .inferred_alloc_mut => unreachable, - .inferred_alloc_const => unreachable, - .bound_fn => unreachable, - - .array, - .array_sentinel, - .vector, - .pointer, - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - => return requiresComptime(childType(ty)), - - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, - => { - var buf: Payload.ElemType = undefined; - return requiresComptime(optionalChild(ty, &buf)); - }, - - .tuple => { - const tuple = ty.castTag(.tuple).?.data; - for (tuple.types) |field_ty| { - if (requiresComptime(field_ty)) { - return true; - } - } - return false; - }, - - .@"struct" => { - const struct_obj = ty.castTag(.@"struct").?.data; - switch (struct_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - struct_obj.requires_comptime = .wip; - for (struct_obj.fields.values()) |field| { - if (requiresComptime(field.ty)) { - struct_obj.requires_comptime = .yes; - return true; - } - } - struct_obj.requires_comptime = .no; - return false; - }, - } - }, - - .@"union", .union_tagged => { - const union_obj = ty.cast(Payload.Union).?.data; - switch (union_obj.requires_comptime) { - .no, .wip => return false, - .yes => return true, - .unknown => { - union_obj.requires_comptime = .wip; - for (union_obj.fields.values()) |field| { - if (requiresComptime(field.ty)) { - union_obj.requires_comptime = .yes; - return true; - } - } - union_obj.requires_comptime = .no; - return false; - }, - } - }, - - .error_union => return requiresComptime(errorUnionPayload(ty)), - .anyframe_T => return ty.castTag(.anyframe_T).?.data.requiresComptime(), - .enum_numbered => return ty.castTag(.enum_numbered).?.data.tag_ty.requiresComptime(), - .enum_full, .enum_nonexhaustive => return ty.cast(Payload.EnumFull).?.data.tag_ty.requiresComptime(), - }; - } - pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value { switch (self.tag()) { .u1 => return Value.initTag(.u1_type), @@ -1652,8 +1512,12 @@ pub const Type = extern union { } } - pub fn hasCodeGenBits(self: Type) bool { - return switch (self.tag()) { + /// true if and only if the type takes up space in memory at runtime. + /// There are two reasons a type will return false: + /// * the type is a comptime-only type. For example, the type `type` itself. + /// * the type has only one possible value, making its ABI size 0. + pub fn hasRuntimeBits(ty: Type) bool { + return switch (ty.tag()) { .u1, .u8, .i8, @@ -1682,13 +1546,9 @@ pub const Type = extern union { .f128, .bool, .anyerror, - .single_const_pointer_to_comptime_int, .const_slice_u8, .const_slice_u8_sentinel_0, .array_u8_sentinel_0, - .optional, - .optional_single_mut_pointer, - .optional_single_const_pointer, .anyerror_void_error_union, .error_set, .error_set_single, @@ -1708,100 +1568,12 @@ pub const Type = extern union { .export_options, .extern_options, .@"anyframe", - .anyframe_T, .anyopaque, .@"opaque", - .single_const_pointer, - .single_mut_pointer, - .many_const_pointer, - .many_mut_pointer, - .c_const_pointer, - .c_mut_pointer, - .const_slice, - .mut_slice, - .pointer, => true, - .function => !self.castTag(.function).?.data.is_generic, - - .fn_noreturn_no_args, - .fn_void_no_args, - .fn_naked_noreturn_no_args, - .fn_ccc_void_no_args, - => true, - - .@"struct" => { - const struct_obj = self.castTag(.@"struct").?.data; - if (struct_obj.known_has_bits) { - return true; - } - assert(struct_obj.haveFieldTypes()); - for (struct_obj.fields.values()) |value| { - if (value.ty.hasCodeGenBits()) - return true; - } else { - return false; - } - }, - .enum_full => { - const enum_full = self.castTag(.enum_full).?.data; - return enum_full.fields.count() >= 2; - }, - .enum_simple => { - const enum_simple = self.castTag(.enum_simple).?.data; - return enum_simple.fields.count() >= 2; - }, - .enum_numbered, .enum_nonexhaustive => { - var buffer: Payload.Bits = undefined; - const int_tag_ty = self.intTagType(&buffer); - return int_tag_ty.hasCodeGenBits(); - }, - .@"union" => { - const union_obj = self.castTag(.@"union").?.data; - assert(union_obj.haveFieldTypes()); - for (union_obj.fields.values()) |value| { - if (value.ty.hasCodeGenBits()) - return true; - } else { - return false; - } - }, - .union_tagged => { - const union_obj = self.castTag(.union_tagged).?.data; - if (union_obj.tag_ty.hasCodeGenBits()) { - return true; - } - assert(union_obj.haveFieldTypes()); - for (union_obj.fields.values()) |value| { - if (value.ty.hasCodeGenBits()) - return true; - } else { - return false; - } - }, - - .array, .vector => self.elemType().hasCodeGenBits() and self.arrayLen() != 0, - .array_u8 => self.arrayLen() != 0, - - .array_sentinel => self.childType().hasCodeGenBits(), - - .int_signed, .int_unsigned => self.cast(Payload.Bits).?.data != 0, - - .error_union => { - const payload = self.castTag(.error_union).?.data; - return payload.error_set.hasCodeGenBits() or payload.payload.hasCodeGenBits(); - }, - - .tuple => { - const tuple = self.castTag(.tuple).?.data; - for (tuple.types) |ty, i| { - const val = tuple.values[i]; - if (val.tag() != .unreachable_value) continue; // comptime field - if (ty.hasCodeGenBits()) return true; - } - return false; - }, - + // These are false because they are comptime-only types. + .single_const_pointer_to_comptime_int, .void, .type, .comptime_int, @@ -1814,8 +1586,109 @@ pub const Type = extern union { .empty_struct_literal, .type_info, .bound_fn, + // These are function *bodies*, not pointers. + // Special exceptions have to be made when emitting functions due to + // this returning false. + .function, + .fn_noreturn_no_args, + .fn_void_no_args, + .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, => false, + // These types have more than one possible value, so the result is the same as + // asking whether they are comptime-only types. + .anyframe_T, + .optional, + .optional_single_mut_pointer, + .optional_single_const_pointer, + .single_const_pointer, + .single_mut_pointer, + .many_const_pointer, + .many_mut_pointer, + .c_const_pointer, + .c_mut_pointer, + .const_slice, + .mut_slice, + .pointer, + => !ty.comptimeOnly(), + + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + switch (struct_obj.requires_comptime) { + .wip => unreachable, + .yes => return false, + .no => if (struct_obj.known_non_opv) return true, + .unknown => {}, + } + assert(struct_obj.haveFieldTypes()); + for (struct_obj.fields.values()) |value| { + if (value.ty.hasRuntimeBits()) + return true; + } else { + return false; + } + }, + + .enum_full => { + const enum_full = ty.castTag(.enum_full).?.data; + return enum_full.fields.count() >= 2; + }, + .enum_simple => { + const enum_simple = ty.castTag(.enum_simple).?.data; + return enum_simple.fields.count() >= 2; + }, + .enum_numbered, .enum_nonexhaustive => { + var buffer: Payload.Bits = undefined; + const int_tag_ty = ty.intTagType(&buffer); + return int_tag_ty.hasRuntimeBits(); + }, + + .@"union" => { + const union_obj = ty.castTag(.@"union").?.data; + assert(union_obj.haveFieldTypes()); + for (union_obj.fields.values()) |value| { + if (value.ty.hasRuntimeBits()) + return true; + } else { + return false; + } + }, + .union_tagged => { + const union_obj = ty.castTag(.union_tagged).?.data; + if (union_obj.tag_ty.hasRuntimeBits()) { + return true; + } + assert(union_obj.haveFieldTypes()); + for (union_obj.fields.values()) |value| { + if (value.ty.hasRuntimeBits()) + return true; + } else { + return false; + } + }, + + .array, .vector => ty.arrayLen() != 0 and ty.elemType().hasRuntimeBits(), + .array_u8 => ty.arrayLen() != 0, + .array_sentinel => ty.childType().hasRuntimeBits(), + + .int_signed, .int_unsigned => ty.cast(Payload.Bits).?.data != 0, + + .error_union => { + const payload = ty.castTag(.error_union).?.data; + return payload.error_set.hasRuntimeBits() or payload.payload.hasRuntimeBits(); + }, + + .tuple => { + const tuple = ty.castTag(.tuple).?.data; + for (tuple.types) |field_ty, i| { + const val = tuple.values[i]; + if (val.tag() != .unreachable_value) continue; // comptime field + if (field_ty.hasRuntimeBits()) return true; + } + return false; + }, + .inferred_alloc_const => unreachable, .inferred_alloc_mut => unreachable, .var_args_param => unreachable, @@ -1823,6 +1696,24 @@ pub const Type = extern union { }; } + pub fn isFnOrHasRuntimeBits(ty: Type) bool { + switch (ty.zigTypeTag()) { + .Fn => { + const fn_info = ty.fnInfo(); + if (fn_info.is_generic) return false; + if (fn_info.is_var_args) return true; + switch (fn_info.cc) { + // If there was a comptime calling convention, it should also return false here. + .Inline => return false, + else => {}, + } + if (fn_info.return_type.comptimeOnly()) return false; + return true; + }, + else => return ty.hasRuntimeBits(), + } + } + pub fn isNoReturn(self: Type) bool { const definitely_correct_result = self.tag_if_small_enough != .bound_fn and @@ -1918,12 +1809,13 @@ pub const Type = extern union { .fn_void_no_args, // represents machine code; not a pointer .fn_naked_noreturn_no_args, // represents machine code; not a pointer .fn_ccc_void_no_args, // represents machine code; not a pointer - .function, // represents machine code; not a pointer - => return switch (target.cpu.arch) { - .arm, .armeb => 4, - .aarch64, .aarch64_32, .aarch64_be => 4, - .riscv64 => 2, - else => 1, + => return target_util.defaultFunctionAlignment(target), + + // represents machine code; not a pointer + .function => { + const alignment = self.castTag(.function).?.data.alignment; + if (alignment != 0) return alignment; + return target_util.defaultFunctionAlignment(target); }, .i16, .u16 => return 2, @@ -1996,7 +1888,7 @@ pub const Type = extern union { .optional => { var buf: Payload.ElemType = undefined; const child_type = self.optionalChild(&buf); - if (!child_type.hasCodeGenBits()) return 1; + if (!child_type.hasRuntimeBits()) return 1; if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr()) return @divExact(target.cpu.arch.ptrBitWidth(), 8); @@ -2006,9 +1898,9 @@ pub const Type = extern union { .error_union => { const data = self.castTag(.error_union).?.data; - if (!data.error_set.hasCodeGenBits()) { + if (!data.error_set.hasRuntimeBits()) { return data.payload.abiAlignment(target); - } else if (!data.payload.hasCodeGenBits()) { + } else if (!data.payload.hasRuntimeBits()) { return data.error_set.abiAlignment(target); } return @maximum( @@ -2028,7 +1920,7 @@ pub const Type = extern union { if (!is_packed) { var big_align: u32 = 0; for (fields.values()) |field| { - if (!field.ty.hasCodeGenBits()) continue; + if (!field.ty.hasRuntimeBits()) continue; const field_align = field.normalAlignment(target); big_align = @maximum(big_align, field_align); @@ -2042,7 +1934,7 @@ pub const Type = extern union { var running_bits: u16 = 0; for (fields.values()) |field| { - if (!field.ty.hasCodeGenBits()) continue; + if (!field.ty.hasRuntimeBits()) continue; const field_align = field.packedAlignment(); if (field_align == 0) { @@ -2080,7 +1972,7 @@ pub const Type = extern union { for (tuple.types) |field_ty, i| { const val = tuple.values[i]; if (val.tag() != .unreachable_value) continue; // comptime field - if (!field_ty.hasCodeGenBits()) continue; + if (!field_ty.hasRuntimeBits()) continue; const field_align = field_ty.abiAlignment(target); big_align = @maximum(big_align, field_align); @@ -2123,7 +2015,7 @@ pub const Type = extern union { } /// Asserts the type has the ABI size already resolved. - /// Types that return false for hasCodeGenBits() return 0. + /// Types that return false for hasRuntimeBits() return 0. pub fn abiSize(self: Type, target: Target) u64 { return switch (self.tag()) { .fn_noreturn_no_args => unreachable, // represents machine code; not a pointer @@ -2210,24 +2102,8 @@ pub const Type = extern union { .usize, .@"anyframe", .anyframe_T, - => return @divExact(target.cpu.arch.ptrBitWidth(), 8), - - .const_slice, - .mut_slice, - => { - return @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2; - }, - .const_slice_u8, - .const_slice_u8_sentinel_0, - => return @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2, - .optional_single_const_pointer, .optional_single_mut_pointer, - => { - if (!self.elemType().hasCodeGenBits()) return 1; - return @divExact(target.cpu.arch.ptrBitWidth(), 8); - }, - .single_const_pointer, .single_mut_pointer, .many_const_pointer, @@ -2239,6 +2115,12 @@ pub const Type = extern union { .manyptr_const_u8_sentinel_0, => return @divExact(target.cpu.arch.ptrBitWidth(), 8), + .const_slice, + .mut_slice, + .const_slice_u8, + .const_slice_u8_sentinel_0, + => return @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2, + .pointer => switch (self.castTag(.pointer).?.data.size) { .Slice => @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2, else => @divExact(target.cpu.arch.ptrBitWidth(), 8), @@ -2276,7 +2158,7 @@ pub const Type = extern union { .optional => { var buf: Payload.ElemType = undefined; const child_type = self.optionalChild(&buf); - if (!child_type.hasCodeGenBits()) return 1; + if (!child_type.hasRuntimeBits()) return 1; if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr() and !child_type.isSlice()) return @divExact(target.cpu.arch.ptrBitWidth(), 8); @@ -2290,11 +2172,11 @@ pub const Type = extern union { .error_union => { const data = self.castTag(.error_union).?.data; - if (!data.error_set.hasCodeGenBits() and !data.payload.hasCodeGenBits()) { + if (!data.error_set.hasRuntimeBits() and !data.payload.hasRuntimeBits()) { return 0; - } else if (!data.error_set.hasCodeGenBits()) { + } else if (!data.error_set.hasRuntimeBits()) { return data.payload.abiSize(target); - } else if (!data.payload.hasCodeGenBits()) { + } else if (!data.payload.hasRuntimeBits()) { return data.error_set.abiSize(target); } const code_align = abiAlignment(data.error_set, target); @@ -2414,11 +2296,7 @@ pub const Type = extern union { .optional_single_const_pointer, .optional_single_mut_pointer, => { - if (ty.elemType().hasCodeGenBits()) { - return target.cpu.arch.ptrBitWidth(); - } else { - return 1; - } + return target.cpu.arch.ptrBitWidth(); }, .single_const_pointer, @@ -2428,11 +2306,7 @@ pub const Type = extern union { .c_const_pointer, .c_mut_pointer, => { - if (ty.elemType().hasCodeGenBits()) { - return target.cpu.arch.ptrBitWidth(); - } else { - return 0; - } + return target.cpu.arch.ptrBitWidth(); }, .pointer => switch (ty.castTag(.pointer).?.data.size) { @@ -2468,7 +2342,7 @@ pub const Type = extern union { .optional => { var buf: Payload.ElemType = undefined; const child_type = ty.optionalChild(&buf); - if (!child_type.hasCodeGenBits()) return 8; + if (!child_type.hasRuntimeBits()) return 8; if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr() and !child_type.isSlice()) return target.cpu.arch.ptrBitWidth(); @@ -2482,11 +2356,11 @@ pub const Type = extern union { .error_union => { const payload = ty.castTag(.error_union).?.data; - if (!payload.error_set.hasCodeGenBits() and !payload.payload.hasCodeGenBits()) { + if (!payload.error_set.hasRuntimeBits() and !payload.payload.hasRuntimeBits()) { return 0; - } else if (!payload.error_set.hasCodeGenBits()) { + } else if (!payload.error_set.hasRuntimeBits()) { return payload.payload.bitSize(target); - } else if (!payload.payload.hasCodeGenBits()) { + } else if (!payload.payload.hasRuntimeBits()) { return payload.error_set.bitSize(target); } @panic("TODO bitSize error union"); @@ -2728,7 +2602,7 @@ pub const Type = extern union { var buf: Payload.ElemType = undefined; const child_type = self.optionalChild(&buf); // optionals of zero sized pointers behave like bools - if (!child_type.hasCodeGenBits()) return false; + if (!child_type.hasRuntimeBits()) return false; if (child_type.zigTypeTag() != .Pointer) return false; const info = child_type.ptrInfo().data; @@ -2765,7 +2639,7 @@ pub const Type = extern union { var buf: Payload.ElemType = undefined; const child_type = self.optionalChild(&buf); // optionals of zero sized types behave like bools, not pointers - if (!child_type.hasCodeGenBits()) return false; + if (!child_type.hasRuntimeBits()) return false; if (child_type.zigTypeTag() != .Pointer) return false; const info = child_type.ptrInfo().data; @@ -3424,6 +3298,7 @@ pub const Type = extern union { .comptime_params = undefined, .return_type = initTag(.noreturn), .cc = .Unspecified, + .alignment = 0, .is_var_args = false, .is_generic = false, }, @@ -3432,6 +3307,7 @@ pub const Type = extern union { .comptime_params = undefined, .return_type = initTag(.void), .cc = .Unspecified, + .alignment = 0, .is_var_args = false, .is_generic = false, }, @@ -3440,6 +3316,7 @@ pub const Type = extern union { .comptime_params = undefined, .return_type = initTag(.noreturn), .cc = .Naked, + .alignment = 0, .is_var_args = false, .is_generic = false, }, @@ -3448,6 +3325,7 @@ pub const Type = extern union { .comptime_params = undefined, .return_type = initTag(.void), .cc = .C, + .alignment = 0, .is_var_args = false, .is_generic = false, }, @@ -3629,7 +3507,7 @@ pub const Type = extern union { }, .enum_nonexhaustive => { const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty; - if (!tag_ty.hasCodeGenBits()) { + if (!tag_ty.hasRuntimeBits()) { return Value.zero; } else { return null; @@ -3672,6 +3550,167 @@ pub const Type = extern union { }; } + /// During semantic analysis, instead call `Sema.typeRequiresComptime` which + /// resolves field types rather than asserting they are already resolved. + pub fn comptimeOnly(ty: Type) bool { + return switch (ty.tag()) { + .u1, + .u8, + .i8, + .u16, + .i16, + .u32, + .i32, + .u64, + .i64, + .u128, + .i128, + .usize, + .isize, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .f16, + .f32, + .f64, + .f128, + .anyopaque, + .bool, + .void, + .anyerror, + .noreturn, + .@"anyframe", + .@"null", + .@"undefined", + .atomic_order, + .atomic_rmw_op, + .calling_convention, + .address_space, + .float_mode, + .reduce_op, + .call_options, + .prefetch_options, + .export_options, + .extern_options, + .manyptr_u8, + .manyptr_const_u8, + .manyptr_const_u8_sentinel_0, + .const_slice_u8, + .const_slice_u8_sentinel_0, + .anyerror_void_error_union, + .empty_struct_literal, + .empty_struct, + .error_set, + .error_set_single, + .error_set_inferred, + .error_set_merged, + .@"opaque", + .generic_poison, + .array_u8, + .array_u8_sentinel_0, + .int_signed, + .int_unsigned, + .enum_simple, + => false, + + .single_const_pointer_to_comptime_int, + .type, + .comptime_int, + .comptime_float, + .enum_literal, + .type_info, + // These are function bodies, not function pointers. + .fn_noreturn_no_args, + .fn_void_no_args, + .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, + .function, + => true, + + .var_args_param => unreachable, + .inferred_alloc_mut => unreachable, + .inferred_alloc_const => unreachable, + .bound_fn => unreachable, + + .array, + .array_sentinel, + .vector, + => return ty.childType().comptimeOnly(), + + .pointer, + .single_const_pointer, + .single_mut_pointer, + .many_const_pointer, + .many_mut_pointer, + .c_const_pointer, + .c_mut_pointer, + .const_slice, + .mut_slice, + => { + const child_ty = ty.childType(); + if (child_ty.zigTypeTag() == .Fn) { + return false; + } else { + return child_ty.comptimeOnly(); + } + }, + + .optional, + .optional_single_mut_pointer, + .optional_single_const_pointer, + => { + var buf: Type.Payload.ElemType = undefined; + return ty.optionalChild(&buf).comptimeOnly(); + }, + + .tuple => { + const tuple = ty.castTag(.tuple).?.data; + for (tuple.types) |field_ty| { + if (field_ty.comptimeOnly()) return true; + } + return false; + }, + + .@"struct" => { + const struct_obj = ty.castTag(.@"struct").?.data; + switch (struct_obj.requires_comptime) { + .wip, .unknown => unreachable, // This function asserts types already resolved. + .no => return false, + .yes => return true, + } + }, + + .@"union", .union_tagged => { + const union_obj = ty.cast(Type.Payload.Union).?.data; + switch (union_obj.requires_comptime) { + .wip, .unknown => unreachable, // This function asserts types already resolved. + .no => return false, + .yes => return true, + } + }, + + .error_union => return ty.errorUnionPayload().comptimeOnly(), + .anyframe_T => { + const child_ty = ty.castTag(.anyframe_T).?.data; + return child_ty.comptimeOnly(); + }, + .enum_numbered => { + const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty; + return tag_ty.comptimeOnly(); + }, + .enum_full, .enum_nonexhaustive => { + const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty; + return tag_ty.comptimeOnly(); + }, + }; + } + pub fn isIndexable(ty: Type) bool { return switch (ty.zigTypeTag()) { .Array, .Vector => true, @@ -3949,7 +3988,7 @@ pub const Type = extern union { const field = it.struct_obj.fields.values()[it.field]; defer it.field += 1; - if (!field.ty.hasCodeGenBits()) { + if (!field.ty.hasRuntimeBits()) { return PackedFieldOffset{ .field = it.field, .offset = it.offset, @@ -4018,7 +4057,7 @@ pub const Type = extern union { const field = it.struct_obj.fields.values()[it.field]; defer it.field += 1; - if (!field.ty.hasCodeGenBits()) + if (!field.ty.hasRuntimeBits()) return FieldOffset{ .field = it.field, .offset = it.offset }; const field_align = field.normalAlignment(it.target); @@ -4572,6 +4611,8 @@ pub const Type = extern union { param_types: []Type, comptime_params: [*]bool, return_type: Type, + /// If zero use default target function code alignment. + alignment: u32, cc: std.builtin.CallingConvention, is_var_args: bool, is_generic: bool, diff --git a/src/value.zig b/src/value.zig index c043bc9364..39cb1a4dbc 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1225,7 +1225,7 @@ pub const Value = extern union { /// Asserts the value is an integer and not undefined. /// Returns the number of bits the value requires to represent stored in twos complement form. - pub fn intBitCountTwosComp(self: Value) usize { + pub fn intBitCountTwosComp(self: Value, target: Target) usize { switch (self.tag()) { .zero, .bool_false, @@ -1244,6 +1244,15 @@ pub const Value = extern union { .int_big_positive => return self.castTag(.int_big_positive).?.asBigInt().bitCountTwosComp(), .int_big_negative => return self.castTag(.int_big_negative).?.asBigInt().bitCountTwosComp(), + .decl_ref_mut, + .extern_fn, + .decl_ref, + .function, + .variable, + .eu_payload_ptr, + .opt_payload_ptr, + => return target.cpu.arch.ptrBitWidth(), + else => { var buffer: BigIntSpace = undefined; return self.toBigInt(&buffer).bitCountTwosComp(); @@ -1333,6 +1342,20 @@ pub const Value = extern union { return true; }, + .decl_ref_mut, + .extern_fn, + .decl_ref, + .function, + .variable, + => { + const info = ty.intInfo(target); + const ptr_bits = target.cpu.arch.ptrBitWidth(); + return switch (info.signedness) { + .signed => info.bits > ptr_bits, + .unsigned => info.bits >= ptr_bits, + }; + }, + else => unreachable, } } @@ -1397,6 +1420,11 @@ pub const Value = extern union { .one, .bool_true, + .decl_ref, + .decl_ref_mut, + .extern_fn, + .function, + .variable, => .gt, .int_u64 => std.math.order(lhs.castTag(.int_u64).?.data, 0), @@ -1417,10 +1445,18 @@ pub const Value = extern union { pub fn order(lhs: Value, rhs: Value) std.math.Order { const lhs_tag = lhs.tag(); const rhs_tag = rhs.tag(); - const lhs_is_zero = lhs_tag == .zero; - const rhs_is_zero = rhs_tag == .zero; - if (lhs_is_zero) return rhs.orderAgainstZero().invert(); - if (rhs_is_zero) return lhs.orderAgainstZero(); + const lhs_against_zero = lhs.orderAgainstZero(); + const rhs_against_zero = rhs.orderAgainstZero(); + switch (lhs_against_zero) { + .lt => if (rhs_against_zero != .lt) return .lt, + .eq => return rhs_against_zero.invert(), + .gt => {}, + } + switch (rhs_against_zero) { + .lt => if (lhs_against_zero != .lt) return .gt, + .eq => return lhs_against_zero, + .gt => {}, + } const lhs_float = lhs.isFloat(); const rhs_float = rhs.isFloat(); @@ -1451,6 +1487,27 @@ pub const Value = extern union { /// Asserts the value is comparable. Does not take a type parameter because it supports /// comparisons between heterogeneous types. pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value) bool { + if (lhs.pointerDecl()) |lhs_decl| { + if (rhs.pointerDecl()) |rhs_decl| { + switch (op) { + .eq => return lhs_decl == rhs_decl, + .neq => return lhs_decl != rhs_decl, + else => {}, + } + } else { + switch (op) { + .eq => return false, + .neq => return true, + else => {}, + } + } + } else if (rhs.pointerDecl()) |_| { + switch (op) { + .eq => return false, + .neq => return true, + else => {}, + } + } return order(lhs, rhs).compare(op); } @@ -1520,6 +1577,11 @@ pub const Value = extern union { } return true; }, + .function => { + const a_payload = a.castTag(.function).?.data; + const b_payload = b.castTag(.function).?.data; + return a_payload == b_payload; + }, else => {}, } } else if (a_tag == .null_value or b_tag == .null_value) { @@ -1573,6 +1635,7 @@ pub const Value = extern union { pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash) void { const zig_ty_tag = ty.zigTypeTag(); std.hash.autoHash(hasher, zig_ty_tag); + if (val.isUndef()) return; switch (zig_ty_tag) { .BoundFn => unreachable, // TODO remove this from the language @@ -1694,7 +1757,8 @@ pub const Value = extern union { union_obj.val.hash(active_field_ty, hasher); }, .Fn => { - @panic("TODO implement hashing function values"); + const func = val.castTag(.function).?.data; + return std.hash.autoHash(hasher, func.owner_decl); }, .Frame => { @panic("TODO implement hashing frame values"); @@ -1703,7 +1767,8 @@ pub const Value = extern union { @panic("TODO implement hashing anyframe values"); }, .EnumLiteral => { - @panic("TODO implement hashing enum literal values"); + const bytes = val.castTag(.enum_literal).?.data; + hasher.update(bytes); }, } } diff --git a/test/behavior.zig b/test/behavior.zig index cbd4bd3c69..03e1c635f3 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -2,22 +2,23 @@ const builtin = @import("builtin"); test { // Tests that pass for stage1, llvm backend, C backend, wasm backend, arm backend and x86_64 backend. + _ = @import("behavior/align.zig"); + _ = @import("behavior/array.zig"); + _ = @import("behavior/bool.zig"); + _ = @import("behavior/bugs/655.zig"); + _ = @import("behavior/bugs/679.zig"); _ = @import("behavior/bugs/1111.zig"); _ = @import("behavior/bugs/2346.zig"); - _ = @import("behavior/slice_sentinel_comptime.zig"); - _ = @import("behavior/bugs/679.zig"); _ = @import("behavior/bugs/6850.zig"); + _ = @import("behavior/cast.zig"); + _ = @import("behavior/comptime_memory.zig"); _ = @import("behavior/fn_in_struct_in_comptime.zig"); _ = @import("behavior/hasdecl.zig"); _ = @import("behavior/hasfield.zig"); _ = @import("behavior/prefetch.zig"); _ = @import("behavior/pub_enum.zig"); + _ = @import("behavior/slice_sentinel_comptime.zig"); _ = @import("behavior/type.zig"); - _ = @import("behavior/bugs/655.zig"); - _ = @import("behavior/bool.zig"); - _ = @import("behavior/align.zig"); - _ = @import("behavior/array.zig"); - _ = @import("behavior/cast.zig"); if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) { // Tests that pass for stage1, llvm backend, C backend, wasm backend. @@ -113,11 +114,7 @@ test { _ = @import("behavior/switch.zig"); _ = @import("behavior/widening.zig"); - if (builtin.zig_backend != .stage1) { - // When all comptime_memory.zig tests pass, #9646 can be closed. - // _ = @import("behavior/comptime_memory.zig"); - _ = @import("behavior/slice_stage2.zig"); - } else { + if (builtin.zig_backend == .stage1) { // Tests that only pass for the stage1 backend. _ = @import("behavior/align_stage1.zig"); if (builtin.os.tag != .wasi) { diff --git a/test/behavior/align.zig b/test/behavior/align.zig index 26a914576c..1f9ebed497 100644 --- a/test/behavior/align.zig +++ b/test/behavior/align.zig @@ -165,8 +165,9 @@ fn give() anyerror!u128 { } test "page aligned array on stack" { - if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm or - builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // Large alignment value to make it hard to accidentally pass. var array align(0x1000) = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 }; @@ -181,3 +182,25 @@ test "page aligned array on stack" { try expect(number1 == 42); try expect(number2 == 43); } + +fn derp() align(@sizeOf(usize) * 2) i32 { + return 1234; +} +fn noop1() align(1) void {} +fn noop4() align(4) void {} + +test "function alignment" { + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; + + // function alignment is a compile error on wasm32/wasm64 + if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest; + + try expect(derp() == 1234); + try expect(@TypeOf(noop1) == fn () align(1) void); + try expect(@TypeOf(noop4) == fn () align(4) void); + noop1(); + noop4(); +} diff --git a/test/behavior/align_stage1.zig b/test/behavior/align_stage1.zig index 30d08abb38..71a8c87e82 100644 --- a/test/behavior/align_stage1.zig +++ b/test/behavior/align_stage1.zig @@ -3,23 +3,6 @@ const expect = std.testing.expect; const builtin = @import("builtin"); const native_arch = builtin.target.cpu.arch; -fn derp() align(@sizeOf(usize) * 2) i32 { - return 1234; -} -fn noop1() align(1) void {} -fn noop4() align(4) void {} - -test "function alignment" { - // function alignment is a compile error on wasm32/wasm64 - if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest; - - try expect(derp() == 1234); - try expect(@TypeOf(noop1) == fn () align(1) void); - try expect(@TypeOf(noop4) == fn () align(4) void); - noop1(); - noop4(); -} - test "implicitly decreasing fn alignment" { // function alignment is a compile error on wasm32/wasm64 if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest; diff --git a/test/behavior/basic.zig b/test/behavior/basic.zig index 9064339877..3a7d95457e 100644 --- a/test/behavior/basic.zig +++ b/test/behavior/basic.zig @@ -259,6 +259,8 @@ fn fB() []const u8 { } test "call function pointer in struct" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + try expect(mem.eql(u8, f3(true), "a")); try expect(mem.eql(u8, f3(false), "b")); } @@ -276,7 +278,7 @@ fn f3(x: bool) []const u8 { } const FnPtrWrapper = struct { - fn_ptr: fn () []const u8, + fn_ptr: *const fn () []const u8, }; test "const ptr from var variable" { diff --git a/test/behavior/basic_llvm.zig b/test/behavior/basic_llvm.zig index 32b35bef0a..29cad01567 100644 --- a/test/behavior/basic_llvm.zig +++ b/test/behavior/basic_llvm.zig @@ -205,9 +205,11 @@ test "multiline string literal is null terminated" { } test "self reference through fn ptr field" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + const S = struct { const A = struct { - f: fn (A) u8, + f: *const fn (A) u8, }; fn foo(a: A) u8 { diff --git a/test/behavior/bugs/1500.zig b/test/behavior/bugs/1500.zig index 5683d53721..18fd40cef2 100644 --- a/test/behavior/bugs/1500.zig +++ b/test/behavior/bugs/1500.zig @@ -2,7 +2,7 @@ const A = struct { b: B, }; -const B = fn (A) void; +const B = *const fn (A) void; test "allow these dependencies" { var a: A = undefined; diff --git a/test/behavior/bugs/3112.zig b/test/behavior/bugs/3112.zig index 68e86c7fcb..ea2197eef1 100644 --- a/test/behavior/bugs/3112.zig +++ b/test/behavior/bugs/3112.zig @@ -1,9 +1,10 @@ +const builtin = @import("builtin"); const std = @import("std"); const expect = std.testing.expect; const State = struct { const Self = @This(); - enter: fn (previous: ?Self) void, + enter: *const fn (previous: ?Self) void, }; fn prev(p: ?State) void { @@ -11,6 +12,8 @@ fn prev(p: ?State) void { } test "zig test crash" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + var global: State = undefined; global.enter = prev; global.enter(null); diff --git a/test/behavior/cast_llvm.zig b/test/behavior/cast_llvm.zig index 1b27d10d59..79c2243a50 100644 --- a/test/behavior/cast_llvm.zig +++ b/test/behavior/cast_llvm.zig @@ -47,12 +47,14 @@ fn incrementVoidPtrArray(array: ?*anyopaque, len: usize) void { } test "compile time int to ptr of function" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) return error.SkipZigTest; // TODO + try foobar(FUNCTION_CONSTANT); } pub const FUNCTION_CONSTANT = @intToPtr(PFN_void, maxInt(usize)); -pub const PFN_void = fn (*anyopaque) callconv(.C) void; +pub const PFN_void = *const fn (*anyopaque) callconv(.C) void; fn foobar(func: PFN_void) !void { try std.testing.expect(@ptrToInt(func) == maxInt(usize)); @@ -153,8 +155,12 @@ test "implicit cast *[0]T to E![]const u8" { } var global_array: [4]u8 = undefined; -test "cast from array reference to fn" { - const f = @ptrCast(fn () callconv(.C) void, &global_array); +test "cast from array reference to fn: comptime fn ptr" { + const f = @ptrCast(*const fn () callconv(.C) void, &global_array); + try expect(@ptrToInt(f) == @ptrToInt(&global_array)); +} +test "cast from array reference to fn: runtime fn ptr" { + var f = @ptrCast(*const fn () callconv(.C) void, &global_array); try expect(@ptrToInt(f) == @ptrToInt(&global_array)); } diff --git a/test/behavior/comptime_memory.zig b/test/behavior/comptime_memory.zig index 5547b9fd89..24a774aeb6 100644 --- a/test/behavior/comptime_memory.zig +++ b/test/behavior/comptime_memory.zig @@ -1,8 +1,15 @@ -const endian = @import("builtin").cpu.arch.endian(); +const builtin = @import("builtin"); +const endian = builtin.cpu.arch.endian(); const testing = @import("std").testing; const ptr_size = @sizeOf(usize); test "type pun signed and unsigned as single pointer" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend != .stage1) { + // TODO https://github.com/ziglang/zig/issues/9646 + return error.SkipZigTest; + } + comptime { var x: u32 = 0; const y = @ptrCast(*i32, &x); @@ -12,6 +19,12 @@ test "type pun signed and unsigned as single pointer" { } test "type pun signed and unsigned as many pointer" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend != .stage1) { + // TODO https://github.com/ziglang/zig/issues/9646 + return error.SkipZigTest; + } + comptime { var x: u32 = 0; const y = @ptrCast([*]i32, &x); @@ -21,6 +34,12 @@ test "type pun signed and unsigned as many pointer" { } test "type pun signed and unsigned as array pointer" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend != .stage1) { + // TODO https://github.com/ziglang/zig/issues/9646 + return error.SkipZigTest; + } + comptime { var x: u32 = 0; const y = @ptrCast(*[1]i32, &x); @@ -30,6 +49,12 @@ test "type pun signed and unsigned as array pointer" { } test "type pun signed and unsigned as offset many pointer" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend != .stage1) { + // TODO https://github.com/ziglang/zig/issues/9646 + return error.SkipZigTest; + } + comptime { var x: u32 = 0; var y = @ptrCast([*]i32, &x); @@ -40,6 +65,12 @@ test "type pun signed and unsigned as offset many pointer" { } test "type pun signed and unsigned as array pointer" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend != .stage1) { + // TODO https://github.com/ziglang/zig/issues/9646 + return error.SkipZigTest; + } + comptime { var x: u32 = 0; const y = @ptrCast([*]i32, &x) - 10; @@ -50,6 +81,12 @@ test "type pun signed and unsigned as array pointer" { } test "type pun value and struct" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend != .stage1) { + // TODO https://github.com/ziglang/zig/issues/9646 + return error.SkipZigTest; + } + comptime { const StructOfU32 = extern struct { x: u32 }; var inst: StructOfU32 = .{ .x = 0 }; @@ -64,6 +101,12 @@ fn bigToNativeEndian(comptime T: type, v: T) T { return if (endian == .Big) v else @byteSwap(T, v); } test "type pun endianness" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend != .stage1) { + // TODO https://github.com/ziglang/zig/issues/9646 + return error.SkipZigTest; + } + comptime { const StructOfBytes = extern struct { x: [4]u8 }; var inst: StructOfBytes = .{ .x = [4]u8{ 0, 0, 0, 0 } }; @@ -155,6 +198,12 @@ fn doTypePunBitsTest(as_bits: *Bits) !void { } test "type pun bits" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend != .stage1) { + // TODO https://github.com/ziglang/zig/issues/9646 + return error.SkipZigTest; + } + comptime { var v: u32 = undefined; try doTypePunBitsTest(@ptrCast(*Bits, &v)); @@ -167,6 +216,12 @@ const imports = struct { // Make sure lazy values work on their own, before getting into more complex tests test "basic pointer preservation" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend != .stage1) { + // TODO https://github.com/ziglang/zig/issues/9646 + return error.SkipZigTest; + } + comptime { const lazy_address = @ptrToInt(&imports.global_u32); try testing.expectEqual(@ptrToInt(&imports.global_u32), lazy_address); @@ -175,6 +230,12 @@ test "basic pointer preservation" { } test "byte copy preserves linker value" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend != .stage1) { + // TODO https://github.com/ziglang/zig/issues/9646 + return error.SkipZigTest; + } + const ct_value = comptime blk: { const lazy = &imports.global_u32; var result: *u32 = undefined; @@ -193,6 +254,12 @@ test "byte copy preserves linker value" { } test "unordered byte copy preserves linker value" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend != .stage1) { + // TODO https://github.com/ziglang/zig/issues/9646 + return error.SkipZigTest; + } + const ct_value = comptime blk: { const lazy = &imports.global_u32; var result: *u32 = undefined; @@ -212,6 +279,12 @@ test "unordered byte copy preserves linker value" { } test "shuffle chunks of linker value" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend != .stage1) { + // TODO https://github.com/ziglang/zig/issues/9646 + return error.SkipZigTest; + } + const lazy_address = @ptrToInt(&imports.global_u32); const shuffled1_rt = shuffle(lazy_address, Bits, ShuffledBits); const unshuffled1_rt = shuffle(shuffled1_rt, ShuffledBits, Bits); @@ -225,6 +298,12 @@ test "shuffle chunks of linker value" { } test "dance on linker values" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend != .stage1) { + // TODO https://github.com/ziglang/zig/issues/9646 + return error.SkipZigTest; + } + comptime { var arr: [2]usize = undefined; arr[0] = @ptrToInt(&imports.global_u32); @@ -251,6 +330,12 @@ test "dance on linker values" { } test "offset array ptr by element size" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend != .stage1) { + // TODO https://github.com/ziglang/zig/issues/9646 + return error.SkipZigTest; + } + comptime { const VirtualStruct = struct { x: u32 }; var arr: [4]VirtualStruct = .{ @@ -273,6 +358,12 @@ test "offset array ptr by element size" { } test "offset instance by field size" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend != .stage1) { + // TODO https://github.com/ziglang/zig/issues/9646 + return error.SkipZigTest; + } + comptime { const VirtualStruct = struct { x: u32, y: u32, z: u32, w: u32 }; var inst = VirtualStruct{ .x = 0, .y = 1, .z = 2, .w = 3 }; @@ -293,6 +384,12 @@ test "offset instance by field size" { } test "offset field ptr by enclosing array element size" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + if (builtin.zig_backend != .stage1) { + // TODO https://github.com/ziglang/zig/issues/9646 + return error.SkipZigTest; + } + comptime { const VirtualStruct = struct { x: u32 }; var arr: [4]VirtualStruct = .{ diff --git a/test/behavior/error.zig b/test/behavior/error.zig index b7d4511fe9..3cb1bcf43b 100644 --- a/test/behavior/error.zig +++ b/test/behavior/error.zig @@ -1,3 +1,4 @@ +const builtin = @import("builtin"); const std = @import("std"); const expect = std.testing.expect; const expectError = std.testing.expectError; diff --git a/test/behavior/fn.zig b/test/behavior/fn.zig index 3a1f3e0b35..8cf9fbfe48 100644 --- a/test/behavior/fn.zig +++ b/test/behavior/fn.zig @@ -57,7 +57,7 @@ test "assign inline fn to const variable" { inline fn inlineFn() void {} -fn outer(y: u32) fn (u32) u32 { +fn outer(y: u32) *const fn (u32) u32 { const Y = @TypeOf(y); const st = struct { fn get(z: u32) u32 { @@ -68,6 +68,8 @@ fn outer(y: u32) fn (u32) u32 { } test "return inner function which references comptime variable of outer function" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + var func = outer(10); try expect(func(3) == 7); } @@ -92,6 +94,8 @@ test "discard the result of a function that returns a struct" { } test "inline function call that calls optional function pointer, return pointer at callsite interacts correctly with callsite return type" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + const S = struct { field: u32, @@ -113,7 +117,7 @@ test "inline function call that calls optional function pointer, return pointer return bar2.?(); } - var bar2: ?fn () u32 = null; + var bar2: ?*const fn () u32 = null; fn actualFn() u32 { return 1234; @@ -135,8 +139,10 @@ fn fnWithUnreachable() noreturn { } test "extern struct with stdcallcc fn pointer" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + const S = extern struct { - ptr: fn () callconv(if (builtin.target.cpu.arch == .i386) .Stdcall else .C) i32, + ptr: *const fn () callconv(if (builtin.target.cpu.arch == .i386) .Stdcall else .C) i32, fn foo() callconv(if (builtin.target.cpu.arch == .i386) .Stdcall else .C) i32 { return 1234; diff --git a/test/behavior/inttoptr.zig b/test/behavior/inttoptr.zig index ec26a09699..5c1acf51cd 100644 --- a/test/behavior/inttoptr.zig +++ b/test/behavior/inttoptr.zig @@ -1,14 +1,16 @@ const builtin = @import("builtin"); -test "casting random address to function pointer" { +test "casting integer address to function pointer" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) return error.SkipZigTest; // TODO - randomAddressToFunction(); - comptime randomAddressToFunction(); + + addressToFunction(); + comptime addressToFunction(); } -fn randomAddressToFunction() void { +fn addressToFunction() void { var addr: usize = 0xdeadbeef; - _ = @intToPtr(fn () void, addr); + _ = @intToPtr(*const fn () void, addr); } test "mutate through ptr initialized with constant intToPtr value" { diff --git a/test/behavior/member_func.zig b/test/behavior/member_func.zig index 092a691901..3e4895e729 100644 --- a/test/behavior/member_func.zig +++ b/test/behavior/member_func.zig @@ -1,8 +1,10 @@ -const expect = @import("std").testing.expect; +const builtin = @import("builtin"); +const std = @import("std"); +const expect = std.testing.expect; const HasFuncs = struct { state: u32, - func_field: fn (u32) u32, + func_field: *const fn (u32) u32, fn inc(self: *HasFuncs) void { self.state += 1; @@ -25,6 +27,8 @@ const HasFuncs = struct { }; test "standard field calls" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + try expect(HasFuncs.one(0) == 1); try expect(HasFuncs.two(0) == 2); @@ -64,6 +68,8 @@ test "standard field calls" { } test "@field field calls" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + try expect(@field(HasFuncs, "one")(0) == 1); try expect(@field(HasFuncs, "two")(0) == 2); diff --git a/test/behavior/slice.zig b/test/behavior/slice.zig index 0332cff802..01ae10ee4e 100644 --- a/test/behavior/slice.zig +++ b/test/behavior/slice.zig @@ -1,3 +1,4 @@ +const builtin = @import("builtin"); const std = @import("std"); const expect = std.testing.expect; const expectEqualSlices = std.testing.expectEqualSlices; @@ -166,3 +167,15 @@ test "slicing zero length array" { try expect(mem.eql(u8, s1, "")); try expect(mem.eql(u32, s2, &[_]u32{})); } + +const x = @intToPtr([*]i32, 0x1000)[0..0x500]; +const y = x[0x100..]; +test "compile time slice of pointer to hard coded address" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + + try expect(@ptrToInt(x) == 0x1000); + try expect(x.len == 0x500); + + try expect(@ptrToInt(y) == 0x1400); + try expect(y.len == 0x400); +} diff --git a/test/behavior/slice_stage2.zig b/test/behavior/slice_stage2.zig deleted file mode 100644 index 360527e8ba..0000000000 --- a/test/behavior/slice_stage2.zig +++ /dev/null @@ -1,12 +0,0 @@ -const std = @import("std"); -const expect = std.testing.expect; - -const x = @intToPtr([*]i32, 0x1000)[0..0x500]; -const y = x[0x100..]; -test "compile time slice of pointer to hard coded address" { - try expect(@ptrToInt(x) == 0x1000); - try expect(x.len == 0x500); - - try expect(@ptrToInt(y) == 0x1400); - try expect(y.len == 0x400); -} diff --git a/test/behavior/union.zig b/test/behavior/union.zig index 12190e418c..3136646df5 100644 --- a/test/behavior/union.zig +++ b/test/behavior/union.zig @@ -1,3 +1,4 @@ +const builtin = @import("builtin"); const std = @import("std"); const expect = std.testing.expect; const expectEqual = std.testing.expectEqual; @@ -166,8 +167,10 @@ test "union with specified enum tag" { } test "packed union generates correctly aligned LLVM type" { + if (builtin.zig_backend == .stage1) return error.SkipZigTest; + const U = packed union { - f1: fn () error{TestUnexpectedResult}!void, + f1: *const fn () error{TestUnexpectedResult}!void, f2: u32, }; var foo = [_]U{ diff --git a/test/stage2/arm.zig b/test/stage2/arm.zig index 060d40e6e1..a0afcf9d6f 100644 --- a/test/stage2/arm.zig +++ b/test/stage2/arm.zig @@ -751,7 +751,7 @@ pub fn addCases(ctx: *TestContext) !void { { var case = ctx.exe("function pointers", linux_arm); case.addCompareOutput( - \\const PrintFn = fn () void; + \\const PrintFn = *const fn () void; \\ \\pub fn main() void { \\ var printFn: PrintFn = stopSayingThat;