Merge pull request #10656 from ziglang/fn-ptr-type

stage2: type system treats fn ptr and body separately
This commit is contained in:
Andrew Kelley 2022-01-25 12:42:52 -05:00 committed by GitHub
commit 0817d6b215
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
41 changed files with 2031 additions and 881 deletions

View File

@ -730,10 +730,16 @@ pub const CompilerBackend = enum(u64) {
/// therefore must be kept in sync with the compiler implementation.
pub const TestFn = struct {
name: []const u8,
func: fn () anyerror!void,
func: testFnProto,
async_frame_size: ?usize,
};
/// stage1 is *wrong*. It is not yet updated to support the new function type semantics.
const testFnProto = switch (builtin.zig_backend) {
.stage1 => fn () anyerror!void, // wrong!
else => *const fn () anyerror!void,
};
/// This function type is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const PanicFn = fn ([]const u8, ?*StackTrace) noreturn;

View File

@ -3240,7 +3240,8 @@ fn fnDecl(
const doc_comment_index = try astgen.docCommentAsString(fn_proto.firstToken());
const has_section_or_addrspace = fn_proto.ast.section_expr != 0 or fn_proto.ast.addrspace_expr != 0;
wip_members.nextDecl(is_pub, is_export, fn_proto.ast.align_expr != 0, has_section_or_addrspace);
// Alignment is passed in the func instruction in this case.
wip_members.nextDecl(is_pub, is_export, false, has_section_or_addrspace);
var params_scope = &fn_gz.base;
const is_var_args = is_var_args: {
@ -3380,7 +3381,7 @@ fn fnDecl(
.param_block = block_inst,
.body_gz = null,
.cc = cc,
.align_inst = .none, // passed in the per-decl data
.align_inst = align_inst,
.lib_name = lib_name,
.is_var_args = is_var_args,
.is_inferred_error = false,
@ -3423,7 +3424,7 @@ fn fnDecl(
.ret_br = ret_br,
.body_gz = &fn_gz,
.cc = cc,
.align_inst = .none, // passed in the per-decl data
.align_inst = align_inst,
.lib_name = lib_name,
.is_var_args = is_var_args,
.is_inferred_error = is_inferred_error,
@ -3449,9 +3450,6 @@ fn fnDecl(
wip_members.appendToDecl(fn_name_str_index);
wip_members.appendToDecl(block_inst);
wip_members.appendToDecl(doc_comment_index);
if (align_inst != .none) {
wip_members.appendToDecl(@enumToInt(align_inst));
}
if (has_section_or_addrspace) {
wip_members.appendToDecl(@enumToInt(section_inst));
wip_members.appendToDecl(@enumToInt(addrspace_inst));
@ -3830,7 +3828,8 @@ fn structDeclInner(
.fields_len = 0,
.body_len = 0,
.decls_len = 0,
.known_has_bits = false,
.known_non_opv = false,
.known_comptime_only = false,
});
return indexToRef(decl_inst);
}
@ -3871,7 +3870,8 @@ fn structDeclInner(
var wip_members = try WipMembers.init(gpa, &astgen.scratch, decl_count, field_count, bits_per_field, max_field_size);
defer wip_members.deinit();
var known_has_bits = false;
var known_non_opv = false;
var known_comptime_only = false;
for (container_decl.ast.members) |member_node| {
const member = switch (try containerMember(gz, &namespace.base, &wip_members, member_node)) {
.decl => continue,
@ -3894,7 +3894,10 @@ fn structDeclInner(
const doc_comment_index = try astgen.docCommentAsString(member.firstToken());
wip_members.appendToField(doc_comment_index);
known_has_bits = known_has_bits or nodeImpliesRuntimeBits(tree, member.ast.type_expr);
known_non_opv = known_non_opv or
nodeImpliesMoreThanOnePossibleValue(tree, member.ast.type_expr);
known_comptime_only = known_comptime_only or
nodeImpliesComptimeOnly(tree, member.ast.type_expr);
const have_align = member.ast.align_expr != 0;
const have_value = member.ast.value_expr != 0;
@ -3928,7 +3931,8 @@ fn structDeclInner(
.body_len = @intCast(u32, body.len),
.fields_len = field_count,
.decls_len = decl_count,
.known_has_bits = known_has_bits,
.known_non_opv = known_non_opv,
.known_comptime_only = known_comptime_only,
});
wip_members.finishBits(bits_per_field);
@ -8197,7 +8201,9 @@ fn nodeMayEvalToError(tree: *const Ast, start_node: Ast.Node.Index) BuiltinFn.Ev
}
}
fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool {
/// Returns `true` if it is known the type expression has more than one possible value;
/// `false` otherwise.
fn nodeImpliesMoreThanOnePossibleValue(tree: *const Ast, start_node: Ast.Node.Index) bool {
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
@ -8243,7 +8249,6 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool {
.multiline_string_literal,
.char_literal,
.unreachable_literal,
.identifier,
.error_set_decl,
.container_decl,
.container_decl_trailing,
@ -8357,6 +8362,11 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool {
.builtin_call_comma,
.builtin_call_two,
.builtin_call_two_comma,
// these are function bodies, not pointers
.fn_proto_simple,
.fn_proto_multi,
.fn_proto_one,
.fn_proto,
=> return false,
// Forward the question to the LHS sub-expression.
@ -8368,10 +8378,6 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool {
.unwrap_optional,
=> node = node_datas[node].lhs,
.fn_proto_simple,
.fn_proto_multi,
.fn_proto_one,
.fn_proto,
.ptr_type_aligned,
.ptr_type_sentinel,
.ptr_type,
@ -8380,6 +8386,301 @@ fn nodeImpliesRuntimeBits(tree: *const Ast, start_node: Ast.Node.Index) bool {
.anyframe_type,
.array_type_sentinel,
=> return true,
.identifier => {
const main_tokens = tree.nodes.items(.main_token);
const ident_bytes = tree.tokenSlice(main_tokens[node]);
if (primitives.get(ident_bytes)) |primitive| switch (primitive) {
.anyerror_type,
.anyframe_type,
.anyopaque_type,
.bool_type,
.c_int_type,
.c_long_type,
.c_longdouble_type,
.c_longlong_type,
.c_short_type,
.c_uint_type,
.c_ulong_type,
.c_ulonglong_type,
.c_ushort_type,
.comptime_float_type,
.comptime_int_type,
.f128_type,
.f16_type,
.f32_type,
.f64_type,
.i16_type,
.i32_type,
.i64_type,
.i128_type,
.i8_type,
.isize_type,
.type_type,
.u16_type,
.u32_type,
.u64_type,
.u128_type,
.u1_type,
.u8_type,
.usize_type,
=> return true,
.void_type,
.bool_false,
.bool_true,
.null_value,
.undef,
.noreturn_type,
=> return false,
else => unreachable, // that's all the values from `primitives`.
} else {
return false;
}
},
}
}
}
/// Returns `true` if it is known the expression is a type that cannot be used at runtime;
/// `false` otherwise.
fn nodeImpliesComptimeOnly(tree: *const Ast, start_node: Ast.Node.Index) bool {
const node_tags = tree.nodes.items(.tag);
const node_datas = tree.nodes.items(.data);
var node = start_node;
while (true) {
switch (node_tags[node]) {
.root,
.@"usingnamespace",
.test_decl,
.switch_case,
.switch_case_one,
.container_field_init,
.container_field_align,
.container_field,
.asm_output,
.asm_input,
.global_var_decl,
.local_var_decl,
.simple_var_decl,
.aligned_var_decl,
=> unreachable,
.@"return",
.@"break",
.@"continue",
.bit_not,
.bool_not,
.@"defer",
.@"errdefer",
.address_of,
.negation,
.negation_wrap,
.@"resume",
.array_type,
.@"suspend",
.@"anytype",
.fn_decl,
.anyframe_literal,
.integer_literal,
.float_literal,
.enum_literal,
.string_literal,
.multiline_string_literal,
.char_literal,
.unreachable_literal,
.error_set_decl,
.container_decl,
.container_decl_trailing,
.container_decl_two,
.container_decl_two_trailing,
.container_decl_arg,
.container_decl_arg_trailing,
.tagged_union,
.tagged_union_trailing,
.tagged_union_two,
.tagged_union_two_trailing,
.tagged_union_enum_tag,
.tagged_union_enum_tag_trailing,
.@"asm",
.asm_simple,
.add,
.add_wrap,
.add_sat,
.array_cat,
.array_mult,
.assign,
.assign_bit_and,
.assign_bit_or,
.assign_shl,
.assign_shl_sat,
.assign_shr,
.assign_bit_xor,
.assign_div,
.assign_sub,
.assign_sub_wrap,
.assign_sub_sat,
.assign_mod,
.assign_add,
.assign_add_wrap,
.assign_add_sat,
.assign_mul,
.assign_mul_wrap,
.assign_mul_sat,
.bang_equal,
.bit_and,
.bit_or,
.shl,
.shl_sat,
.shr,
.bit_xor,
.bool_and,
.bool_or,
.div,
.equal_equal,
.error_union,
.greater_or_equal,
.greater_than,
.less_or_equal,
.less_than,
.merge_error_sets,
.mod,
.mul,
.mul_wrap,
.mul_sat,
.switch_range,
.field_access,
.sub,
.sub_wrap,
.sub_sat,
.slice,
.slice_open,
.slice_sentinel,
.deref,
.array_access,
.error_value,
.while_simple,
.while_cont,
.for_simple,
.if_simple,
.@"catch",
.@"orelse",
.array_init_one,
.array_init_one_comma,
.array_init_dot_two,
.array_init_dot_two_comma,
.array_init_dot,
.array_init_dot_comma,
.array_init,
.array_init_comma,
.struct_init_one,
.struct_init_one_comma,
.struct_init_dot_two,
.struct_init_dot_two_comma,
.struct_init_dot,
.struct_init_dot_comma,
.struct_init,
.struct_init_comma,
.@"while",
.@"if",
.@"for",
.@"switch",
.switch_comma,
.call_one,
.call_one_comma,
.async_call_one,
.async_call_one_comma,
.call,
.call_comma,
.async_call,
.async_call_comma,
.block_two,
.block_two_semicolon,
.block,
.block_semicolon,
.builtin_call,
.builtin_call_comma,
.builtin_call_two,
.builtin_call_two_comma,
.ptr_type_aligned,
.ptr_type_sentinel,
.ptr_type,
.ptr_type_bit_range,
.optional_type,
.anyframe_type,
.array_type_sentinel,
=> return false,
// these are function bodies, not pointers
.fn_proto_simple,
.fn_proto_multi,
.fn_proto_one,
.fn_proto,
=> return true,
// Forward the question to the LHS sub-expression.
.grouped_expression,
.@"try",
.@"await",
.@"comptime",
.@"nosuspend",
.unwrap_optional,
=> node = node_datas[node].lhs,
.identifier => {
const main_tokens = tree.nodes.items(.main_token);
const ident_bytes = tree.tokenSlice(main_tokens[node]);
if (primitives.get(ident_bytes)) |primitive| switch (primitive) {
.anyerror_type,
.anyframe_type,
.anyopaque_type,
.bool_type,
.c_int_type,
.c_long_type,
.c_longdouble_type,
.c_longlong_type,
.c_short_type,
.c_uint_type,
.c_ulong_type,
.c_ulonglong_type,
.c_ushort_type,
.f128_type,
.f16_type,
.f32_type,
.f64_type,
.i16_type,
.i32_type,
.i64_type,
.i128_type,
.i8_type,
.isize_type,
.u16_type,
.u32_type,
.u64_type,
.u128_type,
.u1_type,
.u8_type,
.usize_type,
.void_type,
.bool_false,
.bool_true,
.null_value,
.undef,
.noreturn_type,
=> return false,
.comptime_float_type,
.comptime_int_type,
.type_type,
=> return true,
else => unreachable, // that's all the values from `primitives`.
} else {
return false;
}
},
}
}
}
@ -10120,7 +10421,8 @@ const GenZir = struct {
fields_len: u32,
decls_len: u32,
layout: std.builtin.TypeInfo.ContainerLayout,
known_has_bits: bool,
known_non_opv: bool,
known_comptime_only: bool,
}) !void {
const astgen = gz.astgen;
const gpa = astgen.gpa;
@ -10150,7 +10452,8 @@ const GenZir = struct {
.has_body_len = args.body_len != 0,
.has_fields_len = args.fields_len != 0,
.has_decls_len = args.decls_len != 0,
.known_has_bits = args.known_has_bits,
.known_non_opv = args.known_non_opv,
.known_comptime_only = args.known_comptime_only,
.name_strategy = gz.anon_name_strategy,
.layout = args.layout,
}),

View File

@ -2703,7 +2703,6 @@ fn processOneJob(comp: *Compilation, job: Job, main_progress_node: *std.Progress
const module = comp.bin_file.options.module.?;
assert(decl.has_tv);
assert(decl.ty.hasCodeGenBits());
if (decl.alive) {
try module.linkerUpdateDecl(decl);

View File

@ -848,9 +848,11 @@ pub const Struct = struct {
// which `have_layout` does not ensure.
fully_resolved,
},
/// If true, definitely nonzero size at runtime. If false, resolving the fields
/// is necessary to determine whether it has bits at runtime.
known_has_bits: bool,
/// If true, has more than one possible value. However it may still be non-runtime type
/// if it is a comptime-only type.
/// If false, resolving the fields is necessary to determine whether the type has only
/// one possible value.
known_non_opv: bool,
requires_comptime: RequiresComptime = .unknown,
pub const Fields = std.StringArrayHashMapUnmanaged(Field);
@ -898,6 +900,45 @@ pub const Struct = struct {
};
}
pub fn fieldSrcLoc(s: Struct, gpa: Allocator, query: FieldSrcQuery) SrcLoc {
@setCold(true);
const tree = s.owner_decl.getFileScope().getTree(gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
s.owner_decl.getFileScope().sub_file_path, @errorName(err),
});
return s.srcLoc();
};
const node = s.owner_decl.relativeToNodeIndex(s.node_offset);
const node_tags = tree.nodes.items(.tag);
const file = s.owner_decl.getFileScope();
switch (node_tags[node]) {
.container_decl,
.container_decl_trailing,
=> return queryFieldSrc(tree.*, query, file, tree.containerDecl(node)),
.container_decl_two, .container_decl_two_trailing => {
var buffer: [2]Ast.Node.Index = undefined;
return queryFieldSrc(tree.*, query, file, tree.containerDeclTwo(&buffer, node));
},
.container_decl_arg,
.container_decl_arg_trailing,
=> return queryFieldSrc(tree.*, query, file, tree.containerDeclArg(node)),
.tagged_union,
.tagged_union_trailing,
=> return queryFieldSrc(tree.*, query, file, tree.taggedUnion(node)),
.tagged_union_two, .tagged_union_two_trailing => {
var buffer: [2]Ast.Node.Index = undefined;
return queryFieldSrc(tree.*, query, file, tree.taggedUnionTwo(&buffer, node));
},
.tagged_union_enum_tag,
.tagged_union_enum_tag_trailing,
=> return queryFieldSrc(tree.*, query, file, tree.taggedUnionEnumTag(node)),
else => unreachable,
}
}
pub fn haveFieldTypes(s: Struct) bool {
return switch (s.status) {
.none,
@ -1063,6 +1104,33 @@ pub const Union = struct {
};
}
pub fn fieldSrcLoc(u: Union, gpa: Allocator, query: FieldSrcQuery) SrcLoc {
@setCold(true);
const tree = u.owner_decl.getFileScope().getTree(gpa) catch |err| {
// In this case we emit a warning + a less precise source location.
log.warn("unable to load {s}: {s}", .{
u.owner_decl.getFileScope().sub_file_path, @errorName(err),
});
return u.srcLoc();
};
const node = u.owner_decl.relativeToNodeIndex(u.node_offset);
const node_tags = tree.nodes.items(.tag);
const file = u.owner_decl.getFileScope();
switch (node_tags[node]) {
.container_decl,
.container_decl_trailing,
=> return queryFieldSrc(tree.*, query, file, tree.containerDecl(node)),
.container_decl_two, .container_decl_two_trailing => {
var buffer: [2]Ast.Node.Index = undefined;
return queryFieldSrc(tree.*, query, file, tree.containerDeclTwo(&buffer, node));
},
.container_decl_arg,
.container_decl_arg_trailing,
=> return queryFieldSrc(tree.*, query, file, tree.containerDeclArg(node)),
else => unreachable,
}
}
pub fn haveFieldTypes(u: Union) bool {
return switch (u.status) {
.none,
@ -1080,7 +1148,7 @@ pub const Union = struct {
pub fn hasAllZeroBitFieldTypes(u: Union) bool {
assert(u.haveFieldTypes());
for (u.fields.values()) |field| {
if (field.ty.hasCodeGenBits()) return false;
if (field.ty.hasRuntimeBits()) return false;
}
return true;
}
@ -1090,7 +1158,7 @@ pub const Union = struct {
var most_alignment: u32 = 0;
var most_index: usize = undefined;
for (u.fields.values()) |field, i| {
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasRuntimeBits()) continue;
const field_align = a: {
if (field.abi_align.tag() == .abi_align_default) {
@ -1111,7 +1179,7 @@ pub const Union = struct {
var max_align: u32 = 0;
if (have_tag) max_align = u.tag_ty.abiAlignment(target);
for (u.fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasRuntimeBits()) continue;
const field_align = a: {
if (field.abi_align.tag() == .abi_align_default) {
@ -1164,7 +1232,7 @@ pub const Union = struct {
var payload_size: u64 = 0;
var payload_align: u32 = 0;
for (u.fields.values()) |field, i| {
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasRuntimeBits()) continue;
const field_align = a: {
if (field.abi_align.tag() == .abi_align_default) {
@ -3391,7 +3459,7 @@ pub fn semaFile(mod: *Module, file: *File) SemaError!void {
.zir_index = undefined, // set below
.layout = .Auto,
.status = .none,
.known_has_bits = undefined,
.known_non_opv = undefined,
.namespace = .{
.parent = null,
.ty = struct_ty,
@ -3628,7 +3696,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
var type_changed = true;
if (decl.has_tv) {
prev_type_has_bits = decl.ty.hasCodeGenBits();
prev_type_has_bits = decl.ty.isFnOrHasRuntimeBits();
type_changed = !decl.ty.eql(decl_tv.ty);
if (decl.getFunction()) |prev_func| {
prev_is_inline = prev_func.state == .inline_only;
@ -3648,8 +3716,9 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
decl.analysis = .complete;
decl.generation = mod.generation;
const is_inline = decl_tv.ty.fnCallingConvention() == .Inline;
if (!is_inline and decl_tv.ty.hasCodeGenBits()) {
const has_runtime_bits = try sema.fnHasRuntimeBits(&block_scope, src, decl.ty);
if (has_runtime_bits) {
// We don't fully codegen the decl until later, but we do need to reserve a global
// offset table index for it. This allows us to codegen decls out of dependency
// order, increasing how many computations can be done in parallel.
@ -3662,6 +3731,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
mod.comp.bin_file.freeDecl(decl);
}
const is_inline = decl.ty.fnCallingConvention() == .Inline;
if (decl.is_exported) {
const export_src = src; // TODO make this point at `export` token
if (is_inline) {
@ -3682,6 +3752,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
decl.owns_tv = false;
var queue_linker_work = false;
var is_extern = false;
switch (decl_tv.val.tag()) {
.variable => {
const variable = decl_tv.val.castTag(.variable).?.data;
@ -3698,6 +3769,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
if (decl == owner_decl) {
decl.owns_tv = true;
queue_linker_work = true;
is_extern = true;
}
},
@ -3723,7 +3795,10 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool {
decl.analysis = .complete;
decl.generation = mod.generation;
if (queue_linker_work and decl.ty.hasCodeGenBits()) {
const has_runtime_bits = is_extern or
(queue_linker_work and try sema.typeHasRuntimeBits(&block_scope, src, decl.ty));
if (has_runtime_bits) {
log.debug("queue linker work for {*} ({s})", .{ decl, decl.name });
try mod.comp.bin_file.allocateDeclIndexes(decl);
@ -4224,7 +4299,7 @@ pub fn clearDecl(
mod.deleteDeclExports(decl);
if (decl.has_tv) {
if (decl.ty.hasCodeGenBits()) {
if (decl.ty.isFnOrHasRuntimeBits()) {
mod.comp.bin_file.freeDecl(decl);
// TODO instead of a union, put this memory trailing Decl objects,
@ -4277,7 +4352,7 @@ pub fn deleteUnusedDecl(mod: *Module, decl: *Decl) void {
switch (mod.comp.bin_file.tag) {
.c => {}, // this linker backend has already migrated to the new API
else => if (decl.has_tv) {
if (decl.ty.hasCodeGenBits()) {
if (decl.ty.isFnOrHasRuntimeBits()) {
mod.comp.bin_file.freeDecl(decl);
}
},
@ -4662,8 +4737,8 @@ pub fn createAnonymousDeclFromDeclNamed(
new_decl.src_line = src_decl.src_line;
new_decl.ty = typed_value.ty;
new_decl.val = typed_value.val;
new_decl.align_val = Value.initTag(.null_value);
new_decl.linksection_val = Value.initTag(.null_value);
new_decl.align_val = Value.@"null";
new_decl.linksection_val = Value.@"null";
new_decl.has_tv = true;
new_decl.analysis = .complete;
new_decl.generation = mod.generation;
@ -4674,7 +4749,7 @@ pub fn createAnonymousDeclFromDeclNamed(
// if the Decl is referenced by an instruction or another constant. Otherwise,
// the Decl will be garbage collected by the `codegen_decl` task instead of sent
// to the linker.
if (typed_value.ty.hasCodeGenBits()) {
if (typed_value.ty.isFnOrHasRuntimeBits()) {
try mod.comp.bin_file.allocateDeclIndexes(new_decl);
try mod.comp.anon_work_queue.writeItem(.{ .codegen_decl = new_decl });
}
@ -4905,6 +4980,55 @@ pub const PeerTypeCandidateSrc = union(enum) {
}
};
const FieldSrcQuery = struct {
index: usize,
range: enum { name, type, value, alignment },
};
fn queryFieldSrc(
tree: Ast,
query: FieldSrcQuery,
file_scope: *File,
container_decl: Ast.full.ContainerDecl,
) SrcLoc {
const node_tags = tree.nodes.items(.tag);
var field_index: usize = 0;
for (container_decl.ast.members) |member_node| {
const field = switch (node_tags[member_node]) {
.container_field_init => tree.containerFieldInit(member_node),
.container_field_align => tree.containerFieldAlign(member_node),
.container_field => tree.containerField(member_node),
else => continue,
};
if (field_index == query.index) {
return switch (query.range) {
.name => .{
.file_scope = file_scope,
.parent_decl_node = 0,
.lazy = .{ .token_abs = field.ast.name_token },
},
.type => .{
.file_scope = file_scope,
.parent_decl_node = 0,
.lazy = .{ .node_abs = field.ast.type_expr },
},
.value => .{
.file_scope = file_scope,
.parent_decl_node = 0,
.lazy = .{ .node_abs = field.ast.value_expr },
},
.alignment => .{
.file_scope = file_scope,
.parent_decl_node = 0,
.lazy = .{ .node_abs = field.ast.align_expr },
},
};
}
field_index += 1;
}
unreachable;
}
/// Called from `performAllTheWork`, after all AstGen workers have finished,
/// and before the main semantic analysis loop begins.
pub fn processOutdatedAndDeletedDecls(mod: *Module) !void {

File diff suppressed because it is too large Load Diff

View File

@ -2599,10 +2599,11 @@ pub const Inst = struct {
has_body_len: bool,
has_fields_len: bool,
has_decls_len: bool,
known_has_bits: bool,
known_non_opv: bool,
known_comptime_only: bool,
name_strategy: NameStrategy,
layout: std.builtin.TypeInfo.ContainerLayout,
_: u7 = undefined,
_: u6 = undefined,
};
};
@ -3273,6 +3274,7 @@ fn findDeclsBody(
pub const FnInfo = struct {
param_body: []const Inst.Index,
param_body_inst: Inst.Index,
ret_ty_body: []const Inst.Index,
body: []const Inst.Index,
total_params_len: u32,
@ -3338,6 +3340,7 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
}
return .{
.param_body = param_body,
.param_body_inst = info.param_block,
.ret_ty_body = info.ret_ty_body,
.body = info.body,
.total_params_len = total_params_len,

View File

@ -713,7 +713,7 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
switch (self.debug_output) {
.dwarf => |dbg_out| {
assert(ty.hasCodeGenBits());
assert(ty.hasRuntimeBits());
const index = dbg_out.dbg_info.items.len;
try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4
@ -1279,7 +1279,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
const result: MCValue = result: {
if (!elem_ty.hasCodeGenBits())
if (!elem_ty.hasRuntimeBits())
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
@ -2155,7 +2155,7 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void {
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?;
if (self.air.typeOf(operand).hasCodeGenBits()) {
if (self.air.typeOf(operand).hasRuntimeBits()) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
@ -2608,7 +2608,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
const ref_int = @enumToInt(inst);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
const tv = Air.Inst.Ref.typed_value_map[ref_int];
if (!tv.ty.hasCodeGenBits()) {
if (!tv.ty.hasRuntimeBits()) {
return MCValue{ .none = {} };
}
return self.genTypedValue(tv);
@ -2616,7 +2616,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// If the type has no codegen bits, no need to store it.
const inst_ty = self.air.typeOf(inst);
if (!inst_ty.hasCodeGenBits())
if (!inst_ty.hasRuntimeBits())
return MCValue{ .none = {} };
const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
@ -2672,11 +2672,43 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV
return mcv;
}
fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
_ = tv;
}
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
if (typed_value.val.castTag(.decl_ref)) |payload| {
return self.lowerDeclRef(typed_value, payload.data);
}
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
return self.lowerDeclRef(typed_value, payload.data.decl);
}
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {
@ -2693,28 +2725,6 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.fail("TODO codegen for const slices", .{});
},
else => {
if (typed_value.val.castTag(.decl_ref)) |payload| {
const decl = payload.data;
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
}
if (typed_value.val.tag() == .int_u64) {
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
}
@ -2794,7 +2804,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
const payload_type = typed_value.ty.errorUnionPayload();
const sub_val = typed_value.val.castTag(.eu_payload).?.data;
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
return self.genTypedValue(.{ .ty = error_type, .val = sub_val });
}
@ -2888,7 +2898,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
if (ret_ty.zigTypeTag() == .NoReturn) {
result.return_value = .{ .unreach = {} };
} else if (!ret_ty.hasCodeGenBits()) {
} else if (!ret_ty.hasRuntimeBits()) {
result.return_value = .{ .none = {} };
} else switch (cc) {
.Naked => unreachable,

View File

@ -1074,7 +1074,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
const error_union_ty = self.air.typeOf(ty_op.operand);
const payload_ty = error_union_ty.errorUnionPayload();
const mcv = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasCodeGenBits()) break :result mcv;
if (!payload_ty.hasRuntimeBits()) break :result mcv;
return self.fail("TODO implement unwrap error union error for non-empty payloads", .{});
};
@ -1086,7 +1086,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const error_union_ty = self.air.typeOf(ty_op.operand);
const payload_ty = error_union_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) break :result MCValue.none;
if (!payload_ty.hasRuntimeBits()) break :result MCValue.none;
return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{});
};
@ -1135,7 +1135,7 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const error_union_ty = self.air.getRefType(ty_op.ty);
const payload_ty = error_union_ty.errorUnionPayload();
const mcv = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasCodeGenBits()) break :result mcv;
if (!payload_ty.hasRuntimeBits()) break :result mcv;
return self.fail("TODO implement wrap errunion error for non-empty payloads", .{});
};
@ -1506,7 +1506,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
const result: MCValue = result: {
if (!elem_ty.hasCodeGenBits())
if (!elem_ty.hasRuntimeBits())
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
@ -2666,9 +2666,9 @@ fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
const error_type = ty.errorUnionSet();
const payload_type = ty.errorUnionPayload();
if (!error_type.hasCodeGenBits()) {
if (!error_type.hasRuntimeBits()) {
return MCValue{ .immediate = 0 }; // always false
} else if (!payload_type.hasCodeGenBits()) {
} else if (!payload_type.hasRuntimeBits()) {
if (error_type.abiSize(self.target.*) <= 4) {
const reg_mcv: MCValue = switch (operand) {
.register => operand,
@ -2900,7 +2900,7 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void {
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?;
if (self.air.typeOf(operand).hasCodeGenBits()) {
if (self.air.typeOf(operand).hasRuntimeBits()) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
@ -3658,7 +3658,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
const ref_int = @enumToInt(inst);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
const tv = Air.Inst.Ref.typed_value_map[ref_int];
if (!tv.ty.hasCodeGenBits()) {
if (!tv.ty.hasRuntimeBits()) {
return MCValue{ .none = {} };
}
return self.genTypedValue(tv);
@ -3666,7 +3666,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// If the type has no codegen bits, no need to store it.
const inst_ty = self.air.typeOf(inst);
if (!inst_ty.hasCodeGenBits())
if (!inst_ty.hasRuntimeBits())
return MCValue{ .none = {} };
const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
@ -3701,11 +3701,45 @@ fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue {
}
}
fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
_ = tv;
}
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
if (typed_value.val.castTag(.decl_ref)) |payload| {
return self.lowerDeclRef(typed_value, payload.data);
}
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
return self.lowerDeclRef(typed_value, payload.data.decl);
}
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {
@ -3722,28 +3756,6 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.fail("TODO codegen for const slices", .{});
},
else => {
if (typed_value.val.castTag(.decl_ref)) |payload| {
const decl = payload.data;
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
}
if (typed_value.val.tag() == .int_u64) {
return MCValue{ .immediate = @intCast(u32, typed_value.val.toUnsignedInt()) };
}
@ -3812,7 +3824,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
const payload_type = typed_value.ty.errorUnionPayload();
if (typed_value.val.castTag(.eu_payload)) |pl| {
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
return MCValue{ .immediate = 0 };
}
@ -3820,7 +3832,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
_ = pl;
return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty});
} else {
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val });
}
@ -3918,7 +3930,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
if (ret_ty.zigTypeTag() == .NoReturn) {
result.return_value = .{ .unreach = {} };
} else if (!ret_ty.hasCodeGenBits()) {
} else if (!ret_ty.hasRuntimeBits()) {
result.return_value = .{ .none = {} };
} else switch (cc) {
.Naked => unreachable,

View File

@ -372,7 +372,7 @@ fn dbgAdvancePCAndLine(self: *Emit, line: u32, column: u32) !void {
fn addDbgInfoTypeReloc(self: *Emit, ty: Type) !void {
switch (self.debug_output) {
.dwarf => |dbg_out| {
assert(ty.hasCodeGenBits());
assert(ty.hasRuntimeBits());
const index = dbg_out.dbg_info.items.len;
try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4

View File

@ -691,7 +691,7 @@ fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void {
fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
switch (self.debug_output) {
.dwarf => |dbg_out| {
assert(ty.hasCodeGenBits());
assert(ty.hasRuntimeBits());
const index = dbg_out.dbg_info.items.len;
try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4
@ -1223,7 +1223,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
const result: MCValue = result: {
if (!elem_ty.hasCodeGenBits())
if (!elem_ty.hasRuntimeBits())
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
@ -1769,7 +1769,7 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void {
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?;
if (self.air.typeOf(operand).hasCodeGenBits()) {
if (self.air.typeOf(operand).hasRuntimeBits()) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
@ -2107,7 +2107,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
const ref_int = @enumToInt(inst);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
const tv = Air.Inst.Ref.typed_value_map[ref_int];
if (!tv.ty.hasCodeGenBits()) {
if (!tv.ty.hasRuntimeBits()) {
return MCValue{ .none = {} };
}
return self.genTypedValue(tv);
@ -2115,7 +2115,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// If the type has no codegen bits, no need to store it.
const inst_ty = self.air.typeOf(inst);
if (!inst_ty.hasCodeGenBits())
if (!inst_ty.hasRuntimeBits())
return MCValue{ .none = {} };
const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
@ -2171,11 +2171,42 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV
return mcv;
}
fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
_ = tv;
}
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
if (typed_value.val.castTag(.decl_ref)) |payload| {
return self.lowerDeclRef(typed_value, payload.data);
}
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
return self.lowerDeclRef(typed_value, payload.data.decl);
}
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {
@ -2192,28 +2223,6 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.fail("TODO codegen for const slices", .{});
},
else => {
if (typed_value.val.castTag(.decl_ref)) |payload| {
const decl = payload.data;
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
}
if (typed_value.val.tag() == .int_u64) {
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
}
@ -2290,7 +2299,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
const payload_type = typed_value.ty.errorUnionPayload();
const sub_val = typed_value.val.castTag(.eu_payload).?.data;
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
return self.genTypedValue(.{ .ty = error_type, .val = sub_val });
}
@ -2381,7 +2390,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
if (ret_ty.zigTypeTag() == .NoReturn) {
result.return_value = .{ .unreach = {} };
} else if (!ret_ty.hasCodeGenBits()) {
} else if (!ret_ty.hasRuntimeBits()) {
result.return_value = .{ .none = {} };
} else switch (cc) {
.Naked => unreachable,

View File

@ -598,7 +598,7 @@ fn resolveInst(self: *Self, ref: Air.Inst.Ref) InnerError!WValue {
// means we must generate it from a constant.
const val = self.air.value(ref).?;
const ty = self.air.typeOf(ref);
if (!ty.hasCodeGenBits() and !ty.isInt()) return WValue{ .none = {} };
if (!ty.hasRuntimeBits() and !ty.isInt()) return WValue{ .none = {} };
// When we need to pass the value by reference (such as a struct), we will
// leverage `genTypedValue` to lower the constant to bytes and emit it
@ -790,13 +790,13 @@ fn genFunctype(gpa: Allocator, fn_ty: Type, target: std.Target) !wasm.Type {
defer gpa.free(fn_params);
fn_ty.fnParamTypes(fn_params);
for (fn_params) |param_type| {
if (!param_type.hasCodeGenBits()) continue;
if (!param_type.hasRuntimeBits()) continue;
try params.append(typeToValtype(param_type, target));
}
}
// return type
if (!want_sret and return_type.hasCodeGenBits()) {
if (!want_sret and return_type.hasRuntimeBits()) {
try returns.append(typeToValtype(return_type, target));
}
@ -935,7 +935,7 @@ pub const DeclGen = struct {
const abi_size = @intCast(usize, ty.abiSize(self.target()));
const offset = abi_size - @intCast(usize, payload_type.abiSize(self.target()));
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
try writer.writeByteNTimes(@boolToInt(is_pl), abi_size);
return Result{ .appended = {} };
}
@ -1044,7 +1044,7 @@ pub const DeclGen = struct {
const field_vals = val.castTag(.@"struct").?.data;
for (field_vals) |field_val, index| {
const field_ty = ty.structFieldType(index);
if (!field_ty.hasCodeGenBits()) continue;
if (!field_ty.hasRuntimeBits()) continue;
switch (try self.genTypedValue(field_ty, field_val, writer)) {
.appended => {},
.externally_managed => |payload| try writer.writeAll(payload),
@ -1093,7 +1093,7 @@ pub const DeclGen = struct {
.appended => {},
}
if (payload_ty.hasCodeGenBits()) {
if (payload_ty.hasRuntimeBits()) {
const pl_val = if (val.castTag(.eu_payload)) |pl| pl.data else Value.initTag(.undef);
switch (try self.genTypedValue(payload_ty, pl_val, writer)) {
.externally_managed => |data| try writer.writeAll(data),
@ -1180,7 +1180,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) InnerError!CallWValu
.Naked => return result,
.Unspecified, .C => {
for (param_types) |ty, ty_index| {
if (!ty.hasCodeGenBits()) {
if (!ty.hasRuntimeBits()) {
result.args[ty_index] = .{ .none = {} };
continue;
}
@ -1243,7 +1243,7 @@ fn moveStack(self: *Self, offset: u32, local: u32) !void {
///
/// Asserts Type has codegenbits
fn allocStack(self: *Self, ty: Type) !WValue {
assert(ty.hasCodeGenBits());
assert(ty.hasRuntimeBits());
// calculate needed stack space
const abi_size = std.math.cast(u32, ty.abiSize(self.target)) catch {
@ -1319,22 +1319,22 @@ fn isByRef(ty: Type, target: std.Target) bool {
.Struct,
.Frame,
.Union,
=> return ty.hasCodeGenBits(),
=> return ty.hasRuntimeBits(),
.Int => return if (ty.intInfo(target).bits > 64) true else false,
.ErrorUnion => {
const has_tag = ty.errorUnionSet().hasCodeGenBits();
const has_pl = ty.errorUnionPayload().hasCodeGenBits();
const has_tag = ty.errorUnionSet().hasRuntimeBits();
const has_pl = ty.errorUnionPayload().hasRuntimeBits();
if (!has_tag or !has_pl) return false;
return ty.hasCodeGenBits();
return ty.hasRuntimeBits();
},
.Optional => {
if (ty.isPtrLikeOptional()) return false;
var buf: Type.Payload.ElemType = undefined;
return ty.optionalChild(&buf).hasCodeGenBits();
return ty.optionalChild(&buf).hasRuntimeBits();
},
.Pointer => {
// Slices act like struct and will be passed by reference
if (ty.isSlice()) return ty.hasCodeGenBits();
if (ty.isSlice()) return ty.hasRuntimeBits();
return false;
},
}
@ -1563,7 +1563,7 @@ fn airRetLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const operand = try self.resolveInst(un_op);
const ret_ty = self.air.typeOf(un_op).childType();
if (!ret_ty.hasCodeGenBits()) return WValue.none;
if (!ret_ty.hasRuntimeBits()) return WValue.none;
if (!isByRef(ret_ty, self.target)) {
const result = try self.load(operand, ret_ty, 0);
@ -1611,7 +1611,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const arg_val = try self.resolveInst(arg_ref);
const arg_ty = self.air.typeOf(arg_ref);
if (!arg_ty.hasCodeGenBits()) continue;
if (!arg_ty.hasRuntimeBits()) continue;
try self.emitWValue(arg_val);
}
@ -1631,7 +1631,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.addLabel(.call_indirect, fn_type_index);
}
if (self.liveness.isUnused(inst) or !ret_ty.hasCodeGenBits()) {
if (self.liveness.isUnused(inst) or !ret_ty.hasRuntimeBits()) {
return WValue.none;
} else if (ret_ty.isNoReturn()) {
try self.addTag(.@"unreachable");
@ -1653,7 +1653,7 @@ fn airAlloc(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
try self.initializeStack();
}
if (!pointee_type.hasCodeGenBits()) {
if (!pointee_type.hasRuntimeBits()) {
// when the pointee is zero-sized, we still want to create a pointer.
// but instead use a default pointer type as storage.
const zero_ptr = try self.allocStack(Type.usize);
@ -1678,7 +1678,7 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
.ErrorUnion => {
const err_ty = ty.errorUnionSet();
const pl_ty = ty.errorUnionPayload();
if (!pl_ty.hasCodeGenBits()) {
if (!pl_ty.hasRuntimeBits()) {
const err_val = try self.load(rhs, err_ty, 0);
return self.store(lhs, err_val, err_ty, 0);
}
@ -1691,7 +1691,7 @@ fn store(self: *Self, lhs: WValue, rhs: WValue, ty: Type, offset: u32) InnerErro
}
var buf: Type.Payload.ElemType = undefined;
const pl_ty = ty.optionalChild(&buf);
if (!pl_ty.hasCodeGenBits()) {
if (!pl_ty.hasRuntimeBits()) {
return self.store(lhs, rhs, Type.initTag(.u8), 0);
}
@ -1750,7 +1750,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const operand = try self.resolveInst(ty_op.operand);
const ty = self.air.getRefType(ty_op.ty);
if (!ty.hasCodeGenBits()) return WValue{ .none = {} };
if (!ty.hasRuntimeBits()) return WValue{ .none = {} };
if (isByRef(ty, self.target)) {
const new_local = try self.allocStack(ty);
@ -2146,7 +2146,7 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: std.math.CompareOperator) Inner
if (operand_ty.zigTypeTag() == .Optional and !operand_ty.isPtrLikeOptional()) {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = operand_ty.optionalChild(&buf);
if (payload_ty.hasCodeGenBits()) {
if (payload_ty.hasRuntimeBits()) {
// When we hit this case, we must check the value of optionals
// that are not pointers. This means first checking against non-null for
// both lhs and rhs, as well as checking the payload are matching of lhs and rhs
@ -2190,7 +2190,7 @@ fn airBr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const block = self.blocks.get(br.block_inst).?;
// if operand has codegen bits we should break with a value
if (self.air.typeOf(br.operand).hasCodeGenBits()) {
if (self.air.typeOf(br.operand).hasRuntimeBits()) {
try self.emitWValue(try self.resolveInst(br.operand));
if (block.value != .none) {
@ -2282,7 +2282,7 @@ fn airStructFieldVal(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const operand = try self.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index);
if (!field_ty.hasCodeGenBits()) return WValue{ .none = {} };
if (!field_ty.hasRuntimeBits()) return WValue{ .none = {} };
const offset = std.math.cast(u32, struct_ty.structFieldOffset(field_index, self.target)) catch {
return self.fail("Field type '{}' too big to fit into stack frame", .{field_ty});
};
@ -2452,7 +2452,7 @@ fn airIsErr(self: *Self, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!W
// load the error tag value
try self.emitWValue(operand);
if (pl_ty.hasCodeGenBits()) {
if (pl_ty.hasRuntimeBits()) {
try self.addMemArg(.i32_load16_u, .{
.offset = 0,
.alignment = err_ty.errorUnionSet().abiAlignment(self.target),
@ -2474,7 +2474,7 @@ fn airUnwrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue
const operand = try self.resolveInst(ty_op.operand);
const err_ty = self.air.typeOf(ty_op.operand);
const payload_ty = err_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) return WValue{ .none = {} };
if (!payload_ty.hasRuntimeBits()) return WValue{ .none = {} };
const offset = @intCast(u32, err_ty.errorUnionSet().abiSize(self.target));
if (isByRef(payload_ty, self.target)) {
return self.buildPointerOffset(operand, offset, .new);
@ -2489,7 +2489,7 @@ fn airUnwrapErrUnionError(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const operand = try self.resolveInst(ty_op.operand);
const err_ty = self.air.typeOf(ty_op.operand);
const payload_ty = err_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
return operand;
}
@ -2502,7 +2502,7 @@ fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const operand = try self.resolveInst(ty_op.operand);
const op_ty = self.air.typeOf(ty_op.operand);
if (!op_ty.hasCodeGenBits()) return operand;
if (!op_ty.hasRuntimeBits()) return operand;
const err_ty = self.air.getRefType(ty_op.ty);
const offset = err_ty.errorUnionSet().abiSize(self.target);
@ -2580,7 +2580,7 @@ fn isNull(self: *Self, operand: WValue, optional_ty: Type, opcode: wasm.Opcode)
const payload_ty = optional_ty.optionalChild(&buf);
// When payload is zero-bits, we can treat operand as a value, rather than
// a pointer to the stack value
if (payload_ty.hasCodeGenBits()) {
if (payload_ty.hasRuntimeBits()) {
try self.addMemArg(.i32_load8_u, .{ .offset = 0, .alignment = 1 });
}
}
@ -2600,7 +2600,7 @@ fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const operand = try self.resolveInst(ty_op.operand);
const opt_ty = self.air.typeOf(ty_op.operand);
const payload_ty = self.air.typeOfIndex(inst);
if (!payload_ty.hasCodeGenBits()) return WValue{ .none = {} };
if (!payload_ty.hasRuntimeBits()) return WValue{ .none = {} };
if (opt_ty.isPtrLikeOptional()) return operand;
const offset = opt_ty.abiSize(self.target) - payload_ty.abiSize(self.target);
@ -2621,7 +2621,7 @@ fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = opt_ty.optionalChild(&buf);
if (!payload_ty.hasCodeGenBits() or opt_ty.isPtrLikeOptional()) {
if (!payload_ty.hasRuntimeBits() or opt_ty.isPtrLikeOptional()) {
return operand;
}
@ -2635,7 +2635,7 @@ fn airOptionalPayloadPtrSet(self: *Self, inst: Air.Inst.Index) InnerError!WValue
const opt_ty = self.air.typeOf(ty_op.operand).childType();
var buf: Type.Payload.ElemType = undefined;
const payload_ty = opt_ty.optionalChild(&buf);
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
return self.fail("TODO: Implement OptionalPayloadPtrSet for optional with zero-sized type {}", .{payload_ty});
}
@ -2659,7 +2659,7 @@ fn airWrapOptional(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const payload_ty = self.air.typeOf(ty_op.operand);
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
const non_null_bit = try self.allocStack(Type.initTag(.u1));
try self.addLabel(.local_get, non_null_bit.local);
try self.addImm32(1);
@ -2851,7 +2851,7 @@ fn airArrayToSlice(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
const slice_local = try self.allocStack(slice_ty);
// store the array ptr in the slice
if (array_ty.hasCodeGenBits()) {
if (array_ty.hasRuntimeBits()) {
try self.store(slice_local, operand, ty, 0);
}
@ -3105,7 +3105,7 @@ fn airPrefetch(self: *Self, inst: Air.Inst.Index) InnerError!WValue {
}
fn cmpOptionals(self: *Self, lhs: WValue, rhs: WValue, operand_ty: Type, op: std.math.CompareOperator) InnerError!WValue {
assert(operand_ty.hasCodeGenBits());
assert(operand_ty.hasRuntimeBits());
assert(op == .eq or op == .neq);
var buf: Type.Payload.ElemType = undefined;
const payload_ty = operand_ty.optionalChild(&buf);

View File

@ -1202,7 +1202,7 @@ fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void {
const err_union_ty = self.air.typeOf(ty_op.operand);
const payload_ty = err_union_ty.errorUnionPayload();
const mcv = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasCodeGenBits()) break :result mcv;
if (!payload_ty.hasRuntimeBits()) break :result mcv;
return self.fail("TODO implement unwrap error union error for non-empty payloads", .{});
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
@ -1213,7 +1213,7 @@ fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void {
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const err_union_ty = self.air.typeOf(ty_op.operand);
const payload_ty = err_union_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) break :result MCValue.none;
if (!payload_ty.hasRuntimeBits()) break :result MCValue.none;
return self.fail("TODO implement unwrap error union payload for non-empty payloads", .{});
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
@ -1270,7 +1270,7 @@ fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void {
const error_union_ty = self.air.getRefType(ty_op.ty);
const payload_ty = error_union_ty.errorUnionPayload();
const mcv = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasCodeGenBits()) break :result mcv;
if (!payload_ty.hasRuntimeBits()) break :result mcv;
return self.fail("TODO implement wrap errunion error for non-empty payloads", .{});
};
@ -1636,7 +1636,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
const result: MCValue = result: {
if (!elem_ty.hasCodeGenBits())
if (!elem_ty.hasRuntimeBits())
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
@ -2739,9 +2739,9 @@ fn isNonNull(self: *Self, ty: Type, operand: MCValue) !MCValue {
fn isErr(self: *Self, ty: Type, operand: MCValue) !MCValue {
const err_type = ty.errorUnionSet();
const payload_type = ty.errorUnionPayload();
if (!err_type.hasCodeGenBits()) {
if (!err_type.hasRuntimeBits()) {
return MCValue{ .immediate = 0 }; // always false
} else if (!payload_type.hasCodeGenBits()) {
} else if (!payload_type.hasRuntimeBits()) {
if (err_type.abiSize(self.target.*) <= 8) {
try self.genBinMathOpMir(.cmp, err_type, .unsigned, operand, MCValue{ .immediate = 0 });
return MCValue{ .compare_flags_unsigned = .gt };
@ -2962,7 +2962,7 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void {
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?;
if (self.air.typeOf(operand).hasCodeGenBits()) {
if (self.air.typeOf(operand).hasRuntimeBits()) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
@ -3913,7 +3913,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
const ref_int = @enumToInt(inst);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
const tv = Air.Inst.Ref.typed_value_map[ref_int];
if (!tv.ty.hasCodeGenBits()) {
if (!tv.ty.hasRuntimeBits()) {
return MCValue{ .none = {} };
}
return self.genTypedValue(tv);
@ -3921,7 +3921,7 @@ fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// If the type has no codegen bits, no need to store it.
const inst_ty = self.air.typeOf(inst);
if (!inst_ty.hasCodeGenBits())
if (!inst_ty.hasRuntimeBits())
return MCValue{ .none = {} };
const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
@ -3977,11 +3977,45 @@ fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCV
return mcv;
}
fn lowerDeclRef(self: *Self, tv: TypedValue, decl: *Module.Decl) InnerError!MCValue {
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
_ = tv;
}
fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
if (typed_value.val.isUndef())
return MCValue{ .undef = {} };
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
if (typed_value.val.castTag(.decl_ref)) |payload| {
return self.lowerDeclRef(typed_value, payload.data);
}
if (typed_value.val.castTag(.decl_ref_mut)) |payload| {
return self.lowerDeclRef(typed_value, payload.data.decl);
}
switch (typed_value.ty.zigTypeTag()) {
.Pointer => switch (typed_value.ty.ptrSize()) {
.Slice => {
@ -3998,28 +4032,6 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
return self.fail("TODO codegen for const slices", .{});
},
else => {
if (typed_value.val.castTag(.decl_ref)) |payload| {
const decl = payload.data;
decl.alive = true;
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.local_sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail("TODO codegen non-ELF const Decl pointer", .{});
}
}
if (typed_value.val.tag() == .int_u64) {
return MCValue{ .immediate = typed_value.val.toUnsignedInt() };
}
@ -4091,7 +4103,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
const payload_type = typed_value.ty.errorUnionPayload();
if (typed_value.val.castTag(.eu_payload)) |pl| {
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
return MCValue{ .immediate = 0 };
}
@ -4099,7 +4111,7 @@ fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue {
_ = pl;
return self.fail("TODO implement error union const of type '{}' (non-error)", .{typed_value.ty});
} else {
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
return self.genTypedValue(.{ .ty = error_type, .val = typed_value.val });
}
@ -4156,7 +4168,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var by_reg = std.AutoHashMap(usize, usize).init(self.bin_file.allocator);
defer by_reg.deinit();
for (param_types) |ty, i| {
if (!ty.hasCodeGenBits()) continue;
if (!ty.hasRuntimeBits()) continue;
const param_size = @intCast(u32, ty.abiSize(self.target.*));
const pass_in_reg = switch (ty.zigTypeTag()) {
.Bool => true,
@ -4178,7 +4190,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
// for (param_types) |ty, i| {
const i = count - 1;
const ty = param_types[i];
if (!ty.hasCodeGenBits()) {
if (!ty.hasRuntimeBits()) {
assert(cc != .C);
result.args[i] = .{ .none = {} };
continue;
@ -4207,7 +4219,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
if (ret_ty.zigTypeTag() == .NoReturn) {
result.return_value = .{ .unreach = {} };
} else if (!ret_ty.hasCodeGenBits()) {
} else if (!ret_ty.hasRuntimeBits()) {
result.return_value = .{ .none = {} };
} else switch (cc) {
.Naked => unreachable,

View File

@ -885,7 +885,7 @@ fn genArgDbgInfo(emit: *Emit, inst: Air.Inst.Index, mcv: MCValue) !void {
fn addDbgInfoTypeReloc(emit: *Emit, ty: Type) !void {
switch (emit.debug_output) {
.dwarf => |dbg_out| {
assert(ty.hasCodeGenBits());
assert(ty.hasRuntimeBits());
const index = dbg_out.dbg_info.items.len;
try dbg_out.dbg_info.resize(index + 4); // DW.AT.type, DW.FORM.ref4

View File

@ -377,7 +377,7 @@ pub fn generateSymbol(
const field_vals = typed_value.val.castTag(.@"struct").?.data;
for (field_vals) |field_val, index| {
const field_ty = typed_value.ty.structFieldType(index);
if (!field_ty.hasCodeGenBits()) continue;
if (!field_ty.hasRuntimeBits()) continue;
switch (try generateSymbol(bin_file, src_loc, .{
.ty = field_ty,
.val = field_val,

View File

@ -507,7 +507,7 @@ pub const DeclGen = struct {
const error_type = ty.errorUnionSet();
const payload_type = ty.errorUnionPayload();
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
const err_val = if (val.errorUnionIsPayload()) Value.initTag(.zero) else val;
return dg.renderValue(writer, error_type, err_val);
@ -581,7 +581,7 @@ pub const DeclGen = struct {
for (field_vals) |field_val, i| {
const field_ty = ty.structFieldType(i);
if (!field_ty.hasCodeGenBits()) continue;
if (!field_ty.hasRuntimeBits()) continue;
if (i != 0) try writer.writeAll(",");
try dg.renderValue(writer, field_ty, field_val);
@ -611,7 +611,7 @@ pub const DeclGen = struct {
const index = union_ty.tag_ty.enumTagFieldIndex(union_obj.tag).?;
const field_ty = ty.unionFields().values()[index].ty;
const field_name = ty.unionFields().keys()[index];
if (field_ty.hasCodeGenBits()) {
if (field_ty.hasRuntimeBits()) {
try writer.print(".{} = ", .{fmtIdent(field_name)});
try dg.renderValue(writer, field_ty, union_obj.val);
}
@ -652,7 +652,7 @@ pub const DeclGen = struct {
}
}
const return_ty = dg.decl.ty.fnReturnType();
if (return_ty.hasCodeGenBits()) {
if (return_ty.hasRuntimeBits()) {
try dg.renderType(w, return_ty);
} else if (return_ty.zigTypeTag() == .NoReturn) {
try w.writeAll("zig_noreturn void");
@ -784,7 +784,7 @@ pub const DeclGen = struct {
var it = struct_obj.fields.iterator();
while (it.next()) |entry| {
const field_ty = entry.value_ptr.ty;
if (!field_ty.hasCodeGenBits()) continue;
if (!field_ty.hasRuntimeBits()) continue;
const alignment = entry.value_ptr.abi_align;
const name: CValue = .{ .identifier = entry.key_ptr.* };
@ -837,7 +837,7 @@ pub const DeclGen = struct {
var it = t.unionFields().iterator();
while (it.next()) |entry| {
const field_ty = entry.value_ptr.ty;
if (!field_ty.hasCodeGenBits()) continue;
if (!field_ty.hasRuntimeBits()) continue;
const alignment = entry.value_ptr.abi_align;
const name: CValue = .{ .identifier = entry.key_ptr.* };
try buffer.append(' ');
@ -1582,7 +1582,7 @@ fn airAlloc(f: *Function, inst: Air.Inst.Index) !CValue {
const elem_type = inst_ty.elemType();
const mutability: Mutability = if (inst_ty.isConstPtr()) .Const else .Mut;
if (!elem_type.hasCodeGenBits()) {
if (!elem_type.isFnOrHasRuntimeBits()) {
const target = f.object.dg.module.getTarget();
const literal = switch (target.cpu.arch.ptrBitWidth()) {
32 => "(void *)0xaaaaaaaa",
@ -1683,7 +1683,7 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
fn airRet(f: *Function, inst: Air.Inst.Index) !CValue {
const un_op = f.air.instructions.items(.data)[inst].un_op;
const writer = f.object.writer();
if (f.air.typeOf(un_op).hasCodeGenBits()) {
if (f.air.typeOf(un_op).isFnOrHasRuntimeBits()) {
const operand = try f.resolveInst(un_op);
try writer.writeAll("return ");
try f.writeCValue(writer, operand);
@ -1699,7 +1699,7 @@ fn airRetLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const writer = f.object.writer();
const ptr_ty = f.air.typeOf(un_op);
const ret_ty = ptr_ty.childType();
if (!ret_ty.hasCodeGenBits()) {
if (!ret_ty.isFnOrHasRuntimeBits()) {
try writer.writeAll("return;\n");
}
const ptr = try f.resolveInst(un_op);
@ -2315,7 +2315,7 @@ fn airCall(f: *Function, inst: Air.Inst.Index) !CValue {
var result_local: CValue = .none;
if (unused_result) {
if (ret_ty.hasCodeGenBits()) {
if (ret_ty.hasRuntimeBits()) {
try writer.print("(void)", .{});
}
} else {
@ -2832,7 +2832,7 @@ fn airUnwrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
const operand_ty = f.air.typeOf(ty_op.operand);
const payload_ty = operand_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
if (operand_ty.zigTypeTag() == .Pointer) {
const local = try f.allocLocal(inst_ty, .Const);
try writer.writeAll(" = *");
@ -2864,7 +2864,7 @@ fn airUnwrapErrUnionPay(f: *Function, inst: Air.Inst.Index, maybe_addrof: []cons
const operand_ty = f.air.typeOf(ty_op.operand);
const payload_ty = operand_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
return CValue.none;
}
@ -2908,7 +2908,7 @@ fn airWrapErrUnionErr(f: *Function, inst: Air.Inst.Index) !CValue {
const operand = try f.resolveInst(ty_op.operand);
const err_un_ty = f.air.typeOfIndex(inst);
const payload_ty = err_un_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
return operand;
}
@ -2951,7 +2951,7 @@ fn airIsErr(
const operand_ty = f.air.typeOf(un_op);
const local = try f.allocLocal(Type.initTag(.bool), .Const);
const payload_ty = operand_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
try writer.print(" = {s}", .{deref_prefix});
try f.writeCValue(writer, operand);
try writer.print(" {s} 0;\n", .{op_str});

View File

@ -176,7 +176,7 @@ pub const Object = struct {
/// the compiler, but the Type/Value memory here is backed by `type_map_arena`.
/// TODO we need to remove entries from this map in response to incremental compilation
/// but I think the frontend won't tell us about types that get deleted because
/// hasCodeGenBits() is false for types.
/// hasRuntimeBits() is false for types.
type_map: TypeMap,
/// The backing memory for `type_map`. Periodically garbage collected after flush().
/// The code for doing the periodical GC is not yet implemented.
@ -463,7 +463,7 @@ pub const Object = struct {
const param_offset: c_uint = @boolToInt(ret_ptr != null);
for (fn_info.param_types) |param_ty| {
if (!param_ty.hasCodeGenBits()) continue;
if (!param_ty.hasRuntimeBits()) continue;
const llvm_arg_i = @intCast(c_uint, args.items.len) + param_offset;
try args.append(llvm_func.getParam(llvm_arg_i));
@ -662,6 +662,7 @@ pub const DeclGen = struct {
new_global.setAlignment(global.getAlignment());
new_global.setInitializer(llvm_init);
global.replaceAllUsesWith(new_global);
dg.object.decl_map.putAssumeCapacity(decl, new_global);
new_global.takeName(global);
global.deleteGlobal();
}
@ -709,7 +710,7 @@ pub const DeclGen = struct {
// Set parameter attributes.
var llvm_param_i: c_uint = @boolToInt(sret);
for (fn_info.param_types) |param_ty| {
if (!param_ty.hasCodeGenBits()) continue;
if (!param_ty.hasRuntimeBits()) continue;
if (isByRef(param_ty)) {
dg.addArgAttr(llvm_fn, llvm_param_i, "nonnull");
@ -725,6 +726,10 @@ pub const DeclGen = struct {
llvm_fn.setFunctionCallConv(toLlvmCallConv(fn_info.cc, target));
}
if (fn_info.alignment != 0) {
llvm_fn.setAlignment(fn_info.alignment);
}
// Function attributes that are independent of analysis results of the function body.
dg.addCommonFnAttributes(llvm_fn);
@ -840,7 +845,11 @@ pub const DeclGen = struct {
}
const llvm_addrspace = dg.llvmAddressSpace(t.ptrAddressSpace());
const elem_ty = t.childType();
const llvm_elem_ty = if (elem_ty.hasCodeGenBits() or elem_ty.zigTypeTag() == .Array)
const lower_elem_ty = switch (elem_ty.zigTypeTag()) {
.Opaque, .Array, .Fn => true,
else => elem_ty.hasRuntimeBits(),
};
const llvm_elem_ty = if (lower_elem_ty)
try dg.llvmType(elem_ty)
else
dg.context.intType(8);
@ -878,13 +887,13 @@ pub const DeclGen = struct {
.Optional => {
var buf: Type.Payload.ElemType = undefined;
const child_type = t.optionalChild(&buf);
if (!child_type.hasCodeGenBits()) {
if (!child_type.hasRuntimeBits()) {
return dg.context.intType(1);
}
const payload_llvm_ty = try dg.llvmType(child_type);
if (t.isPtrLikeOptional()) {
return payload_llvm_ty;
} else if (!child_type.hasCodeGenBits()) {
} else if (!child_type.hasRuntimeBits()) {
return dg.context.intType(1);
}
@ -897,7 +906,7 @@ pub const DeclGen = struct {
const error_type = t.errorUnionSet();
const payload_type = t.errorUnionPayload();
const llvm_error_type = try dg.llvmType(error_type);
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
return llvm_error_type;
}
const llvm_payload_type = try dg.llvmType(payload_type);
@ -962,7 +971,7 @@ pub const DeclGen = struct {
var big_align: u32 = 0;
var running_bits: u16 = 0;
for (struct_obj.fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasRuntimeBits()) continue;
const field_align = field.packedAlignment();
if (field_align == 0) {
@ -1029,7 +1038,7 @@ pub const DeclGen = struct {
}
} else {
for (struct_obj.fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasRuntimeBits()) continue;
llvm_field_types.appendAssumeCapacity(try dg.llvmType(field.ty));
}
}
@ -1123,7 +1132,7 @@ pub const DeclGen = struct {
const sret = firstParamSRet(fn_info, target);
const return_type = fn_info.return_type;
const raw_llvm_ret_ty = try dg.llvmType(return_type);
const llvm_ret_ty = if (!return_type.hasCodeGenBits() or sret)
const llvm_ret_ty = if (!return_type.hasRuntimeBits() or sret)
dg.context.voidType()
else
raw_llvm_ret_ty;
@ -1136,7 +1145,7 @@ pub const DeclGen = struct {
}
for (fn_info.param_types) |param_ty| {
if (!param_ty.hasCodeGenBits()) continue;
if (!param_ty.hasRuntimeBits()) continue;
const raw_llvm_ty = try dg.llvmType(param_ty);
const actual_llvm_ty = if (!isByRef(param_ty)) raw_llvm_ty else raw_llvm_ty.pointerType(0);
@ -1176,29 +1185,35 @@ pub const DeclGen = struct {
const llvm_type = try dg.llvmType(tv.ty);
return if (tv.val.toBool()) llvm_type.constAllOnes() else llvm_type.constNull();
},
.Int => {
var bigint_space: Value.BigIntSpace = undefined;
const bigint = tv.val.toBigInt(&bigint_space);
const target = dg.module.getTarget();
const int_info = tv.ty.intInfo(target);
const llvm_type = dg.context.intType(int_info.bits);
// TODO this duplicates code with Pointer but they should share the handling
// of the tv.val.tag() and then Int should do extra constPtrToInt on top
.Int => switch (tv.val.tag()) {
.decl_ref_mut => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref_mut).?.data.decl),
.decl_ref => return lowerDeclRefValue(dg, tv, tv.val.castTag(.decl_ref).?.data),
else => {
var bigint_space: Value.BigIntSpace = undefined;
const bigint = tv.val.toBigInt(&bigint_space);
const target = dg.module.getTarget();
const int_info = tv.ty.intInfo(target);
const llvm_type = dg.context.intType(int_info.bits);
const unsigned_val = v: {
if (bigint.limbs.len == 1) {
break :v llvm_type.constInt(bigint.limbs[0], .False);
const unsigned_val = v: {
if (bigint.limbs.len == 1) {
break :v llvm_type.constInt(bigint.limbs[0], .False);
}
if (@sizeOf(usize) == @sizeOf(u64)) {
break :v llvm_type.constIntOfArbitraryPrecision(
@intCast(c_uint, bigint.limbs.len),
bigint.limbs.ptr,
);
}
@panic("TODO implement bigint to llvm int for 32-bit compiler builds");
};
if (!bigint.positive) {
return llvm.constNeg(unsigned_val);
}
if (@sizeOf(usize) == @sizeOf(u64)) {
break :v llvm_type.constIntOfArbitraryPrecision(
@intCast(c_uint, bigint.limbs.len),
bigint.limbs.ptr,
);
}
@panic("TODO implement bigint to llvm int for 32-bit compiler builds");
};
if (!bigint.positive) {
return llvm.constNeg(unsigned_val);
}
return unsigned_val;
return unsigned_val;
},
},
.Enum => {
var int_buffer: Value.Payload.U64 = undefined;
@ -1370,7 +1385,7 @@ pub const DeclGen = struct {
const llvm_i1 = dg.context.intType(1);
const is_pl = !tv.val.isNull();
const non_null_bit = if (is_pl) llvm_i1.constAllOnes() else llvm_i1.constNull();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
return non_null_bit;
}
if (tv.ty.isPtrLikeOptional()) {
@ -1383,6 +1398,7 @@ pub const DeclGen = struct {
return llvm_ty.constNull();
}
}
assert(payload_ty.zigTypeTag() != .Fn);
const fields: [2]*const llvm.Value = .{
try dg.genTypedValue(.{
.ty = payload_ty,
@ -1420,7 +1436,7 @@ pub const DeclGen = struct {
const payload_type = tv.ty.errorUnionPayload();
const is_pl = tv.val.errorUnionIsPayload();
if (!payload_type.hasCodeGenBits()) {
if (!payload_type.hasRuntimeBits()) {
// We use the error type directly as the type.
const err_val = if (!is_pl) tv.val else Value.initTag(.zero);
return dg.genTypedValue(.{ .ty = error_type, .val = err_val });
@ -1458,7 +1474,7 @@ pub const DeclGen = struct {
var running_int: *const llvm.Value = llvm_struct_ty.structGetTypeAtIndex(0).constNull();
for (field_vals) |field_val, i| {
const field = fields[i];
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasRuntimeBits()) continue;
const field_align = field.packedAlignment();
if (field_align == 0) {
@ -1540,7 +1556,7 @@ pub const DeclGen = struct {
} else {
for (field_vals) |field_val, i| {
const field_ty = tv.ty.structFieldType(i);
if (!field_ty.hasCodeGenBits()) continue;
if (!field_ty.hasRuntimeBits()) continue;
llvm_fields.appendAssumeCapacity(try dg.genTypedValue(.{
.ty = field_ty,
@ -1572,7 +1588,7 @@ pub const DeclGen = struct {
assert(union_obj.haveFieldTypes());
const field_ty = union_obj.fields.values()[field_index].ty;
const payload = p: {
if (!field_ty.hasCodeGenBits()) {
if (!field_ty.hasRuntimeBits()) {
const padding_len = @intCast(c_uint, layout.payload_size);
break :p dg.context.intType(8).arrayType(padding_len).getUndef();
}
@ -1784,13 +1800,14 @@ pub const DeclGen = struct {
return self.context.constStruct(&fields, fields.len, .False);
}
if (!tv.ty.childType().hasCodeGenBits() or !decl.ty.hasCodeGenBits()) {
const is_fn_body = decl.ty.zigTypeTag() == .Fn;
if (!is_fn_body and !decl.ty.hasRuntimeBits()) {
return self.lowerPtrToVoid(tv.ty);
}
decl.markAlive();
const llvm_val = if (decl.ty.zigTypeTag() == .Fn)
const llvm_val = if (is_fn_body)
try self.resolveLlvmFunction(decl)
else
try self.resolveGlobalDecl(decl);
@ -2182,7 +2199,7 @@ pub const FuncGen = struct {
} else {
for (args) |arg, i| {
const param_ty = fn_info.param_types[i];
if (!param_ty.hasCodeGenBits()) continue;
if (!param_ty.hasRuntimeBits()) continue;
try llvm_args.append(try self.resolveInst(arg));
}
@ -2200,7 +2217,7 @@ pub const FuncGen = struct {
if (return_type.isNoReturn()) {
_ = self.builder.buildUnreachable();
return null;
} else if (self.liveness.isUnused(inst) or !return_type.hasCodeGenBits()) {
} else if (self.liveness.isUnused(inst) or !return_type.hasRuntimeBits()) {
return null;
} else if (sret) {
call.setCallSret(llvm_ret_ty);
@ -2224,7 +2241,7 @@ pub const FuncGen = struct {
_ = self.builder.buildRetVoid();
return null;
}
if (!ret_ty.hasCodeGenBits()) {
if (!ret_ty.hasRuntimeBits()) {
_ = self.builder.buildRetVoid();
return null;
}
@ -2237,7 +2254,7 @@ pub const FuncGen = struct {
const un_op = self.air.instructions.items(.data)[inst].un_op;
const ptr_ty = self.air.typeOf(un_op);
const ret_ty = ptr_ty.childType();
if (!ret_ty.hasCodeGenBits() or isByRef(ret_ty)) {
if (!ret_ty.hasRuntimeBits() or isByRef(ret_ty)) {
_ = self.builder.buildRetVoid();
return null;
}
@ -2273,7 +2290,7 @@ pub const FuncGen = struct {
.Int, .Bool, .Pointer, .ErrorSet => operand_ty,
.Optional => blk: {
const payload_ty = operand_ty.optionalChild(&opt_buffer);
if (!payload_ty.hasCodeGenBits() or operand_ty.isPtrLikeOptional()) {
if (!payload_ty.hasRuntimeBits() or operand_ty.isPtrLikeOptional()) {
break :blk operand_ty;
}
// We need to emit instructions to check for equality/inequality
@ -2397,7 +2414,8 @@ pub const FuncGen = struct {
self.builder.positionBuilderAtEnd(parent_bb);
// If the block does not return a value, we dont have to create a phi node.
if (!inst_ty.hasCodeGenBits()) return null;
const is_body = inst_ty.zigTypeTag() == .Fn;
if (!is_body and !inst_ty.hasRuntimeBits()) return null;
const raw_llvm_ty = try self.dg.llvmType(inst_ty);
@ -2406,7 +2424,7 @@ pub const FuncGen = struct {
// a pointer to it. LLVM IR allows the call instruction to use function bodies instead
// of function pointers, however the phi makes it a runtime value and therefore
// the LLVM type has to be wrapped in a pointer.
if (inst_ty.zigTypeTag() == .Fn or isByRef(inst_ty)) {
if (is_body or isByRef(inst_ty)) {
break :ty raw_llvm_ty.pointerType(0);
}
break :ty raw_llvm_ty;
@ -2427,7 +2445,8 @@ pub const FuncGen = struct {
// If the break doesn't break a value, then we don't have to add
// the values to the lists.
if (self.air.typeOf(branch.operand).hasCodeGenBits()) {
const operand_ty = self.air.typeOf(branch.operand);
if (operand_ty.hasRuntimeBits() or operand_ty.zigTypeTag() == .Fn) {
const val = try self.resolveInst(branch.operand);
// For the phi node, we need the basic blocks and the values of the
@ -2531,7 +2550,7 @@ pub const FuncGen = struct {
const llvm_usize = try self.dg.llvmType(Type.usize);
const len = llvm_usize.constInt(array_ty.arrayLen(), .False);
const slice_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
if (!array_ty.hasCodeGenBits()) {
if (!array_ty.hasRuntimeBits()) {
return self.builder.buildInsertValue(slice_llvm_ty.getUndef(), len, 1, "");
}
const operand = try self.resolveInst(ty_op.operand);
@ -2662,7 +2681,7 @@ pub const FuncGen = struct {
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const ptr_ty = self.air.typeOf(bin_op.lhs);
const elem_ty = ptr_ty.childType();
if (!elem_ty.hasCodeGenBits()) return null;
if (!elem_ty.hasRuntimeBits()) return null;
const base_ptr = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
@ -2709,7 +2728,7 @@ pub const FuncGen = struct {
const struct_llvm_val = try self.resolveInst(struct_field.struct_operand);
const field_index = struct_field.field_index;
const field_ty = struct_ty.structFieldType(field_index);
if (!field_ty.hasCodeGenBits()) {
if (!field_ty.hasRuntimeBits()) {
return null;
}
const target = self.dg.module.getTarget();
@ -2914,7 +2933,7 @@ pub const FuncGen = struct {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&buf);
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
if (invert) {
return self.builder.buildNot(operand, "");
} else {
@ -2946,7 +2965,7 @@ pub const FuncGen = struct {
const err_set_ty = try self.dg.llvmType(Type.initTag(.anyerror));
const zero = err_set_ty.constNull();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
const loaded = if (operand_is_ptr) self.builder.buildLoad(operand, "") else operand;
return self.builder.buildICmp(op, loaded, zero, "");
}
@ -2969,7 +2988,7 @@ pub const FuncGen = struct {
const optional_ty = self.air.typeOf(ty_op.operand).childType();
var buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&buf);
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
// We have a pointer to a zero-bit value and we need to return
// a pointer to a zero-bit value.
return operand;
@ -2993,7 +3012,7 @@ pub const FuncGen = struct {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = optional_ty.optionalChild(&buf);
const non_null_bit = self.context.intType(1).constAllOnes();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
// We have a pointer to a i1. We need to set it to 1 and then return the same pointer.
_ = self.builder.buildStore(non_null_bit, operand);
return operand;
@ -3028,7 +3047,7 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOf(ty_op.operand);
const payload_ty = self.air.typeOfIndex(inst);
if (!payload_ty.hasCodeGenBits()) return null;
if (!payload_ty.hasRuntimeBits()) return null;
if (optional_ty.isPtrLikeOptional()) {
// Payload value is the same as the optional value.
@ -3049,7 +3068,7 @@ pub const FuncGen = struct {
const operand = try self.resolveInst(ty_op.operand);
const err_union_ty = self.air.typeOf(ty_op.operand);
const payload_ty = err_union_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) return null;
if (!payload_ty.hasRuntimeBits()) return null;
if (operand_is_ptr or isByRef(payload_ty)) {
return self.builder.buildStructGEP(operand, 1, "");
}
@ -3069,7 +3088,7 @@ pub const FuncGen = struct {
const operand_ty = self.air.typeOf(ty_op.operand);
const payload_ty = operand_ty.errorUnionPayload();
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
if (!operand_is_ptr) return operand;
return self.builder.buildLoad(operand, "");
}
@ -3088,7 +3107,7 @@ pub const FuncGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const payload_ty = self.air.typeOf(ty_op.operand);
const non_null_bit = self.context.intType(1).constAllOnes();
if (!payload_ty.hasCodeGenBits()) return non_null_bit;
if (!payload_ty.hasRuntimeBits()) return non_null_bit;
const operand = try self.resolveInst(ty_op.operand);
const optional_ty = self.air.typeOfIndex(inst);
if (optional_ty.isPtrLikeOptional()) return operand;
@ -3116,7 +3135,7 @@ pub const FuncGen = struct {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const payload_ty = self.air.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
return operand;
}
const inst_ty = self.air.typeOfIndex(inst);
@ -3147,7 +3166,7 @@ pub const FuncGen = struct {
const err_un_ty = self.air.typeOfIndex(inst);
const payload_ty = err_un_ty.errorUnionPayload();
const operand = try self.resolveInst(ty_op.operand);
if (!payload_ty.hasCodeGenBits()) {
if (!payload_ty.hasRuntimeBits()) {
return operand;
}
const err_un_llvm_ty = try self.dg.llvmType(err_un_ty);
@ -3836,7 +3855,7 @@ pub const FuncGen = struct {
if (self.liveness.isUnused(inst)) return null;
const ptr_ty = self.air.typeOfIndex(inst);
const pointee_type = ptr_ty.childType();
if (!pointee_type.hasCodeGenBits()) return self.dg.lowerPtrToVoid(ptr_ty);
if (!pointee_type.isFnOrHasRuntimeBits()) return self.dg.lowerPtrToVoid(ptr_ty);
const pointee_llvm_ty = try self.dg.llvmType(pointee_type);
const alloca_inst = self.buildAlloca(pointee_llvm_ty);
@ -3850,7 +3869,7 @@ pub const FuncGen = struct {
if (self.liveness.isUnused(inst)) return null;
const ptr_ty = self.air.typeOfIndex(inst);
const ret_ty = ptr_ty.childType();
if (!ret_ty.hasCodeGenBits()) return null;
if (!ret_ty.isFnOrHasRuntimeBits()) return null;
if (self.ret_ptr) |ret_ptr| return ret_ptr;
const ret_llvm_ty = try self.dg.llvmType(ret_ty);
const target = self.dg.module.getTarget();
@ -4074,7 +4093,7 @@ pub const FuncGen = struct {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr_ty = self.air.typeOf(bin_op.lhs);
const operand_ty = ptr_ty.childType();
if (!operand_ty.hasCodeGenBits()) return null;
if (!operand_ty.isFnOrHasRuntimeBits()) return null;
var ptr = try self.resolveInst(bin_op.lhs);
var element = try self.resolveInst(bin_op.rhs);
const opt_abi_ty = self.dg.getAtomicAbiType(operand_ty, false);
@ -4674,7 +4693,7 @@ pub const FuncGen = struct {
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
const field = &union_obj.fields.values()[field_index];
const result_llvm_ty = try self.dg.llvmType(self.air.typeOfIndex(inst));
if (!field.ty.hasCodeGenBits()) {
if (!field.ty.hasRuntimeBits()) {
return null;
}
const target = self.dg.module.getTarget();
@ -4702,7 +4721,7 @@ pub const FuncGen = struct {
fn load(self: *FuncGen, ptr: *const llvm.Value, ptr_ty: Type) !?*const llvm.Value {
const info = ptr_ty.ptrInfo().data;
if (!info.pointee_type.hasCodeGenBits()) return null;
if (!info.pointee_type.hasRuntimeBits()) return null;
const target = self.dg.module.getTarget();
const ptr_alignment = ptr_ty.ptrAlignment(target);
@ -4757,7 +4776,7 @@ pub const FuncGen = struct {
) void {
const info = ptr_ty.ptrInfo().data;
const elem_ty = info.pointee_type;
if (!elem_ty.hasCodeGenBits()) {
if (!elem_ty.isFnOrHasRuntimeBits()) {
return;
}
const target = self.dg.module.getTarget();
@ -5087,7 +5106,7 @@ fn llvmFieldIndex(
if (struct_obj.layout != .Packed) {
var llvm_field_index: c_uint = 0;
for (struct_obj.fields.values()) |field, i| {
if (!field.ty.hasCodeGenBits())
if (!field.ty.hasRuntimeBits())
continue;
if (field_index > i) {
llvm_field_index += 1;
@ -5114,7 +5133,7 @@ fn llvmFieldIndex(
var running_bits: u16 = 0;
var llvm_field_index: c_uint = 0;
for (struct_obj.fields.values()) |field, i| {
if (!field.ty.hasCodeGenBits())
if (!field.ty.hasRuntimeBits())
continue;
const field_align = field.packedAlignment();
@ -5227,9 +5246,9 @@ fn isByRef(ty: Type) bool {
.AnyFrame,
=> return false,
.Array, .Frame => return ty.hasCodeGenBits(),
.Array, .Frame => return ty.hasRuntimeBits(),
.Struct => {
if (!ty.hasCodeGenBits()) return false;
if (!ty.hasRuntimeBits()) return false;
if (ty.castTag(.tuple)) |tuple| {
var count: usize = 0;
for (tuple.data.values) |field_val, i| {
@ -5247,7 +5266,7 @@ fn isByRef(ty: Type) bool {
}
return true;
},
.Union => return ty.hasCodeGenBits(),
.Union => return ty.hasRuntimeBits(),
.ErrorUnion => return isByRef(ty.errorUnionPayload()),
.Optional => {
var buf: Type.Payload.ElemType = undefined;

View File

@ -852,7 +852,7 @@ pub const DeclGen = struct {
try self.beginSPIRVBlock(label_id);
// If this block didn't produce a value, simply return here.
if (!ty.hasCodeGenBits())
if (!ty.hasRuntimeBits())
return null;
// Combine the result from the blocks using the Phi instruction.
@ -879,7 +879,7 @@ pub const DeclGen = struct {
const block = self.blocks.get(br.block_inst).?;
const operand_ty = self.air.typeOf(br.operand);
if (operand_ty.hasCodeGenBits()) {
if (operand_ty.hasRuntimeBits()) {
const operand_id = try self.resolve(br.operand);
// current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body.
try block.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id });
@ -958,7 +958,7 @@ pub const DeclGen = struct {
fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void {
const operand = self.air.instructions.items(.data)[inst].un_op;
const operand_ty = self.air.typeOf(operand);
if (operand_ty.hasCodeGenBits()) {
if (operand_ty.hasRuntimeBits()) {
const operand_id = try self.resolve(operand);
try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id});
} else {

View File

@ -2476,7 +2476,7 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len);
const fn_ret_type = decl.ty.fnReturnType();
const fn_ret_has_bits = fn_ret_type.hasCodeGenBits();
const fn_ret_has_bits = fn_ret_type.hasRuntimeBits();
if (fn_ret_has_bits) {
dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram);
} else {

View File

@ -920,7 +920,7 @@ pub fn initDeclDebugBuffers(
try dbg_info_buffer.ensureUnusedCapacity(27 + decl_name_with_null.len);
const fn_ret_type = decl.ty.fnReturnType();
const fn_ret_has_bits = fn_ret_type.hasCodeGenBits();
const fn_ret_has_bits = fn_ret_type.hasRuntimeBits();
if (fn_ret_has_bits) {
dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram);
} else {

View File

@ -259,7 +259,7 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void {
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl);
}
if (!decl.ty.hasCodeGenBits()) return;
if (!decl.ty.hasRuntimeBits()) return;
assert(decl.link.wasm.sym_index != 0); // Must call allocateDeclIndexes()
decl.link.wasm.clear();

View File

@ -1157,7 +1157,8 @@ const Writer = struct {
break :blk decls_len;
} else 0;
try self.writeFlag(stream, "known_has_bits, ", small.known_has_bits);
try self.writeFlag(stream, "known_non_opv, ", small.known_non_opv);
try self.writeFlag(stream, "known_comptime_only, ", small.known_comptime_only);
try stream.print("{s}, {s}, ", .{
@tagName(small.name_strategy), @tagName(small.layout),
});

View File

@ -637,3 +637,12 @@ pub fn llvmMachineAbi(target: std.Target) ?[:0]const u8 {
else => return null,
}
}
pub fn defaultFunctionAlignment(target: std.Target) u32 {
return switch (target.cpu.arch) {
.arm, .armeb => 4,
.aarch64, .aarch64_32, .aarch64_be => 4,
.riscv64 => 2,
else => 1,
};
}

View File

@ -5,6 +5,7 @@ const Allocator = std.mem.Allocator;
const Target = std.Target;
const Module = @import("Module.zig");
const log = std.log.scoped(.Type);
const target_util = @import("target.zig");
const file_struct = @This();
@ -577,21 +578,36 @@ pub const Type = extern union {
}
},
.Fn => {
if (!a.fnReturnType().eql(b.fnReturnType()))
const a_info = a.fnInfo();
const b_info = b.fnInfo();
if (!eql(a_info.return_type, b_info.return_type))
return false;
if (a.fnCallingConvention() != b.fnCallingConvention())
if (a_info.cc != b_info.cc)
return false;
const a_param_len = a.fnParamLen();
const b_param_len = b.fnParamLen();
if (a_param_len != b_param_len)
if (a_info.param_types.len != b_info.param_types.len)
return false;
var i: usize = 0;
while (i < a_param_len) : (i += 1) {
if (!a.fnParamType(i).eql(b.fnParamType(i)))
for (a_info.param_types) |a_param_ty, i| {
const b_param_ty = b_info.param_types[i];
if (!eql(a_param_ty, b_param_ty))
return false;
if (a_info.comptime_params[i] != b_info.comptime_params[i])
return false;
}
if (a.fnIsVarArgs() != b.fnIsVarArgs())
if (a_info.alignment != b_info.alignment)
return false;
if (a_info.is_var_args != b_info.is_var_args)
return false;
if (a_info.is_generic != b_info.is_generic)
return false;
return true;
},
.Optional => {
@ -686,6 +702,7 @@ pub const Type = extern union {
return false;
},
.Float => return a.tag() == b.tag(),
.BoundFn,
.Frame,
=> std.debug.panic("TODO implement Type equality comparison of {} and {}", .{ a, b }),
@ -937,6 +954,7 @@ pub const Type = extern union {
.return_type = try payload.return_type.copy(allocator),
.param_types = param_types,
.cc = payload.cc,
.alignment = payload.alignment,
.is_var_args = payload.is_var_args,
.is_generic = payload.is_generic,
.comptime_params = comptime_params.ptr,
@ -1114,9 +1132,15 @@ pub const Type = extern union {
}
try writer.writeAll("...");
}
try writer.writeAll(") callconv(.");
try writer.writeAll(@tagName(payload.cc));
try writer.writeAll(") ");
if (payload.cc != .Unspecified) {
try writer.writeAll("callconv(.");
try writer.writeAll(@tagName(payload.cc));
try writer.writeAll(") ");
}
if (payload.alignment != 0) {
try writer.print("align({d}) ", .{payload.alignment});
}
ty = payload.return_type;
continue;
},
@ -1423,170 +1447,6 @@ pub const Type = extern union {
}
}
/// Anything that reports hasCodeGenBits() false returns false here as well.
/// `generic_poison` will return false.
pub fn requiresComptime(ty: Type) bool {
return switch (ty.tag()) {
.u1,
.u8,
.i8,
.u16,
.i16,
.u32,
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.c_longdouble,
.f16,
.f32,
.f64,
.f128,
.anyopaque,
.bool,
.void,
.anyerror,
.noreturn,
.@"anyframe",
.@"null",
.@"undefined",
.atomic_order,
.atomic_rmw_op,
.calling_convention,
.address_space,
.float_mode,
.reduce_op,
.call_options,
.prefetch_options,
.export_options,
.extern_options,
.manyptr_u8,
.manyptr_const_u8,
.manyptr_const_u8_sentinel_0,
.fn_noreturn_no_args,
.fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.const_slice_u8,
.const_slice_u8_sentinel_0,
.anyerror_void_error_union,
.empty_struct_literal,
.function,
.empty_struct,
.error_set,
.error_set_single,
.error_set_inferred,
.error_set_merged,
.@"opaque",
.generic_poison,
.array_u8,
.array_u8_sentinel_0,
.int_signed,
.int_unsigned,
.enum_simple,
=> false,
.single_const_pointer_to_comptime_int,
.type,
.comptime_int,
.comptime_float,
.enum_literal,
.type_info,
=> true,
.var_args_param => unreachable,
.inferred_alloc_mut => unreachable,
.inferred_alloc_const => unreachable,
.bound_fn => unreachable,
.array,
.array_sentinel,
.vector,
.pointer,
.single_const_pointer,
.single_mut_pointer,
.many_const_pointer,
.many_mut_pointer,
.c_const_pointer,
.c_mut_pointer,
.const_slice,
.mut_slice,
=> return requiresComptime(childType(ty)),
.optional,
.optional_single_mut_pointer,
.optional_single_const_pointer,
=> {
var buf: Payload.ElemType = undefined;
return requiresComptime(optionalChild(ty, &buf));
},
.tuple => {
const tuple = ty.castTag(.tuple).?.data;
for (tuple.types) |field_ty| {
if (requiresComptime(field_ty)) {
return true;
}
}
return false;
},
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
switch (struct_obj.requires_comptime) {
.no, .wip => return false,
.yes => return true,
.unknown => {
struct_obj.requires_comptime = .wip;
for (struct_obj.fields.values()) |field| {
if (requiresComptime(field.ty)) {
struct_obj.requires_comptime = .yes;
return true;
}
}
struct_obj.requires_comptime = .no;
return false;
},
}
},
.@"union", .union_tagged => {
const union_obj = ty.cast(Payload.Union).?.data;
switch (union_obj.requires_comptime) {
.no, .wip => return false,
.yes => return true,
.unknown => {
union_obj.requires_comptime = .wip;
for (union_obj.fields.values()) |field| {
if (requiresComptime(field.ty)) {
union_obj.requires_comptime = .yes;
return true;
}
}
union_obj.requires_comptime = .no;
return false;
},
}
},
.error_union => return requiresComptime(errorUnionPayload(ty)),
.anyframe_T => return ty.castTag(.anyframe_T).?.data.requiresComptime(),
.enum_numbered => return ty.castTag(.enum_numbered).?.data.tag_ty.requiresComptime(),
.enum_full, .enum_nonexhaustive => return ty.cast(Payload.EnumFull).?.data.tag_ty.requiresComptime(),
};
}
pub fn toValue(self: Type, allocator: Allocator) Allocator.Error!Value {
switch (self.tag()) {
.u1 => return Value.initTag(.u1_type),
@ -1652,8 +1512,12 @@ pub const Type = extern union {
}
}
pub fn hasCodeGenBits(self: Type) bool {
return switch (self.tag()) {
/// true if and only if the type takes up space in memory at runtime.
/// There are two reasons a type will return false:
/// * the type is a comptime-only type. For example, the type `type` itself.
/// * the type has only one possible value, making its ABI size 0.
pub fn hasRuntimeBits(ty: Type) bool {
return switch (ty.tag()) {
.u1,
.u8,
.i8,
@ -1682,13 +1546,9 @@ pub const Type = extern union {
.f128,
.bool,
.anyerror,
.single_const_pointer_to_comptime_int,
.const_slice_u8,
.const_slice_u8_sentinel_0,
.array_u8_sentinel_0,
.optional,
.optional_single_mut_pointer,
.optional_single_const_pointer,
.anyerror_void_error_union,
.error_set,
.error_set_single,
@ -1708,100 +1568,12 @@ pub const Type = extern union {
.export_options,
.extern_options,
.@"anyframe",
.anyframe_T,
.anyopaque,
.@"opaque",
.single_const_pointer,
.single_mut_pointer,
.many_const_pointer,
.many_mut_pointer,
.c_const_pointer,
.c_mut_pointer,
.const_slice,
.mut_slice,
.pointer,
=> true,
.function => !self.castTag(.function).?.data.is_generic,
.fn_noreturn_no_args,
.fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
=> true,
.@"struct" => {
const struct_obj = self.castTag(.@"struct").?.data;
if (struct_obj.known_has_bits) {
return true;
}
assert(struct_obj.haveFieldTypes());
for (struct_obj.fields.values()) |value| {
if (value.ty.hasCodeGenBits())
return true;
} else {
return false;
}
},
.enum_full => {
const enum_full = self.castTag(.enum_full).?.data;
return enum_full.fields.count() >= 2;
},
.enum_simple => {
const enum_simple = self.castTag(.enum_simple).?.data;
return enum_simple.fields.count() >= 2;
},
.enum_numbered, .enum_nonexhaustive => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = self.intTagType(&buffer);
return int_tag_ty.hasCodeGenBits();
},
.@"union" => {
const union_obj = self.castTag(.@"union").?.data;
assert(union_obj.haveFieldTypes());
for (union_obj.fields.values()) |value| {
if (value.ty.hasCodeGenBits())
return true;
} else {
return false;
}
},
.union_tagged => {
const union_obj = self.castTag(.union_tagged).?.data;
if (union_obj.tag_ty.hasCodeGenBits()) {
return true;
}
assert(union_obj.haveFieldTypes());
for (union_obj.fields.values()) |value| {
if (value.ty.hasCodeGenBits())
return true;
} else {
return false;
}
},
.array, .vector => self.elemType().hasCodeGenBits() and self.arrayLen() != 0,
.array_u8 => self.arrayLen() != 0,
.array_sentinel => self.childType().hasCodeGenBits(),
.int_signed, .int_unsigned => self.cast(Payload.Bits).?.data != 0,
.error_union => {
const payload = self.castTag(.error_union).?.data;
return payload.error_set.hasCodeGenBits() or payload.payload.hasCodeGenBits();
},
.tuple => {
const tuple = self.castTag(.tuple).?.data;
for (tuple.types) |ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
if (ty.hasCodeGenBits()) return true;
}
return false;
},
// These are false because they are comptime-only types.
.single_const_pointer_to_comptime_int,
.void,
.type,
.comptime_int,
@ -1814,8 +1586,109 @@ pub const Type = extern union {
.empty_struct_literal,
.type_info,
.bound_fn,
// These are function *bodies*, not pointers.
// Special exceptions have to be made when emitting functions due to
// this returning false.
.function,
.fn_noreturn_no_args,
.fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
=> false,
// These types have more than one possible value, so the result is the same as
// asking whether they are comptime-only types.
.anyframe_T,
.optional,
.optional_single_mut_pointer,
.optional_single_const_pointer,
.single_const_pointer,
.single_mut_pointer,
.many_const_pointer,
.many_mut_pointer,
.c_const_pointer,
.c_mut_pointer,
.const_slice,
.mut_slice,
.pointer,
=> !ty.comptimeOnly(),
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
switch (struct_obj.requires_comptime) {
.wip => unreachable,
.yes => return false,
.no => if (struct_obj.known_non_opv) return true,
.unknown => {},
}
assert(struct_obj.haveFieldTypes());
for (struct_obj.fields.values()) |value| {
if (value.ty.hasRuntimeBits())
return true;
} else {
return false;
}
},
.enum_full => {
const enum_full = ty.castTag(.enum_full).?.data;
return enum_full.fields.count() >= 2;
},
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
return enum_simple.fields.count() >= 2;
},
.enum_numbered, .enum_nonexhaustive => {
var buffer: Payload.Bits = undefined;
const int_tag_ty = ty.intTagType(&buffer);
return int_tag_ty.hasRuntimeBits();
},
.@"union" => {
const union_obj = ty.castTag(.@"union").?.data;
assert(union_obj.haveFieldTypes());
for (union_obj.fields.values()) |value| {
if (value.ty.hasRuntimeBits())
return true;
} else {
return false;
}
},
.union_tagged => {
const union_obj = ty.castTag(.union_tagged).?.data;
if (union_obj.tag_ty.hasRuntimeBits()) {
return true;
}
assert(union_obj.haveFieldTypes());
for (union_obj.fields.values()) |value| {
if (value.ty.hasRuntimeBits())
return true;
} else {
return false;
}
},
.array, .vector => ty.arrayLen() != 0 and ty.elemType().hasRuntimeBits(),
.array_u8 => ty.arrayLen() != 0,
.array_sentinel => ty.childType().hasRuntimeBits(),
.int_signed, .int_unsigned => ty.cast(Payload.Bits).?.data != 0,
.error_union => {
const payload = ty.castTag(.error_union).?.data;
return payload.error_set.hasRuntimeBits() or payload.payload.hasRuntimeBits();
},
.tuple => {
const tuple = ty.castTag(.tuple).?.data;
for (tuple.types) |field_ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
if (field_ty.hasRuntimeBits()) return true;
}
return false;
},
.inferred_alloc_const => unreachable,
.inferred_alloc_mut => unreachable,
.var_args_param => unreachable,
@ -1823,6 +1696,24 @@ pub const Type = extern union {
};
}
pub fn isFnOrHasRuntimeBits(ty: Type) bool {
switch (ty.zigTypeTag()) {
.Fn => {
const fn_info = ty.fnInfo();
if (fn_info.is_generic) return false;
if (fn_info.is_var_args) return true;
switch (fn_info.cc) {
// If there was a comptime calling convention, it should also return false here.
.Inline => return false,
else => {},
}
if (fn_info.return_type.comptimeOnly()) return false;
return true;
},
else => return ty.hasRuntimeBits(),
}
}
pub fn isNoReturn(self: Type) bool {
const definitely_correct_result =
self.tag_if_small_enough != .bound_fn and
@ -1918,12 +1809,13 @@ pub const Type = extern union {
.fn_void_no_args, // represents machine code; not a pointer
.fn_naked_noreturn_no_args, // represents machine code; not a pointer
.fn_ccc_void_no_args, // represents machine code; not a pointer
.function, // represents machine code; not a pointer
=> return switch (target.cpu.arch) {
.arm, .armeb => 4,
.aarch64, .aarch64_32, .aarch64_be => 4,
.riscv64 => 2,
else => 1,
=> return target_util.defaultFunctionAlignment(target),
// represents machine code; not a pointer
.function => {
const alignment = self.castTag(.function).?.data.alignment;
if (alignment != 0) return alignment;
return target_util.defaultFunctionAlignment(target);
},
.i16, .u16 => return 2,
@ -1996,7 +1888,7 @@ pub const Type = extern union {
.optional => {
var buf: Payload.ElemType = undefined;
const child_type = self.optionalChild(&buf);
if (!child_type.hasCodeGenBits()) return 1;
if (!child_type.hasRuntimeBits()) return 1;
if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr())
return @divExact(target.cpu.arch.ptrBitWidth(), 8);
@ -2006,9 +1898,9 @@ pub const Type = extern union {
.error_union => {
const data = self.castTag(.error_union).?.data;
if (!data.error_set.hasCodeGenBits()) {
if (!data.error_set.hasRuntimeBits()) {
return data.payload.abiAlignment(target);
} else if (!data.payload.hasCodeGenBits()) {
} else if (!data.payload.hasRuntimeBits()) {
return data.error_set.abiAlignment(target);
}
return @maximum(
@ -2028,7 +1920,7 @@ pub const Type = extern union {
if (!is_packed) {
var big_align: u32 = 0;
for (fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasRuntimeBits()) continue;
const field_align = field.normalAlignment(target);
big_align = @maximum(big_align, field_align);
@ -2042,7 +1934,7 @@ pub const Type = extern union {
var running_bits: u16 = 0;
for (fields.values()) |field| {
if (!field.ty.hasCodeGenBits()) continue;
if (!field.ty.hasRuntimeBits()) continue;
const field_align = field.packedAlignment();
if (field_align == 0) {
@ -2080,7 +1972,7 @@ pub const Type = extern union {
for (tuple.types) |field_ty, i| {
const val = tuple.values[i];
if (val.tag() != .unreachable_value) continue; // comptime field
if (!field_ty.hasCodeGenBits()) continue;
if (!field_ty.hasRuntimeBits()) continue;
const field_align = field_ty.abiAlignment(target);
big_align = @maximum(big_align, field_align);
@ -2123,7 +2015,7 @@ pub const Type = extern union {
}
/// Asserts the type has the ABI size already resolved.
/// Types that return false for hasCodeGenBits() return 0.
/// Types that return false for hasRuntimeBits() return 0.
pub fn abiSize(self: Type, target: Target) u64 {
return switch (self.tag()) {
.fn_noreturn_no_args => unreachable, // represents machine code; not a pointer
@ -2210,24 +2102,8 @@ pub const Type = extern union {
.usize,
.@"anyframe",
.anyframe_T,
=> return @divExact(target.cpu.arch.ptrBitWidth(), 8),
.const_slice,
.mut_slice,
=> {
return @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2;
},
.const_slice_u8,
.const_slice_u8_sentinel_0,
=> return @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2,
.optional_single_const_pointer,
.optional_single_mut_pointer,
=> {
if (!self.elemType().hasCodeGenBits()) return 1;
return @divExact(target.cpu.arch.ptrBitWidth(), 8);
},
.single_const_pointer,
.single_mut_pointer,
.many_const_pointer,
@ -2239,6 +2115,12 @@ pub const Type = extern union {
.manyptr_const_u8_sentinel_0,
=> return @divExact(target.cpu.arch.ptrBitWidth(), 8),
.const_slice,
.mut_slice,
.const_slice_u8,
.const_slice_u8_sentinel_0,
=> return @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2,
.pointer => switch (self.castTag(.pointer).?.data.size) {
.Slice => @divExact(target.cpu.arch.ptrBitWidth(), 8) * 2,
else => @divExact(target.cpu.arch.ptrBitWidth(), 8),
@ -2276,7 +2158,7 @@ pub const Type = extern union {
.optional => {
var buf: Payload.ElemType = undefined;
const child_type = self.optionalChild(&buf);
if (!child_type.hasCodeGenBits()) return 1;
if (!child_type.hasRuntimeBits()) return 1;
if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr() and !child_type.isSlice())
return @divExact(target.cpu.arch.ptrBitWidth(), 8);
@ -2290,11 +2172,11 @@ pub const Type = extern union {
.error_union => {
const data = self.castTag(.error_union).?.data;
if (!data.error_set.hasCodeGenBits() and !data.payload.hasCodeGenBits()) {
if (!data.error_set.hasRuntimeBits() and !data.payload.hasRuntimeBits()) {
return 0;
} else if (!data.error_set.hasCodeGenBits()) {
} else if (!data.error_set.hasRuntimeBits()) {
return data.payload.abiSize(target);
} else if (!data.payload.hasCodeGenBits()) {
} else if (!data.payload.hasRuntimeBits()) {
return data.error_set.abiSize(target);
}
const code_align = abiAlignment(data.error_set, target);
@ -2414,11 +2296,7 @@ pub const Type = extern union {
.optional_single_const_pointer,
.optional_single_mut_pointer,
=> {
if (ty.elemType().hasCodeGenBits()) {
return target.cpu.arch.ptrBitWidth();
} else {
return 1;
}
return target.cpu.arch.ptrBitWidth();
},
.single_const_pointer,
@ -2428,11 +2306,7 @@ pub const Type = extern union {
.c_const_pointer,
.c_mut_pointer,
=> {
if (ty.elemType().hasCodeGenBits()) {
return target.cpu.arch.ptrBitWidth();
} else {
return 0;
}
return target.cpu.arch.ptrBitWidth();
},
.pointer => switch (ty.castTag(.pointer).?.data.size) {
@ -2468,7 +2342,7 @@ pub const Type = extern union {
.optional => {
var buf: Payload.ElemType = undefined;
const child_type = ty.optionalChild(&buf);
if (!child_type.hasCodeGenBits()) return 8;
if (!child_type.hasRuntimeBits()) return 8;
if (child_type.zigTypeTag() == .Pointer and !child_type.isCPtr() and !child_type.isSlice())
return target.cpu.arch.ptrBitWidth();
@ -2482,11 +2356,11 @@ pub const Type = extern union {
.error_union => {
const payload = ty.castTag(.error_union).?.data;
if (!payload.error_set.hasCodeGenBits() and !payload.payload.hasCodeGenBits()) {
if (!payload.error_set.hasRuntimeBits() and !payload.payload.hasRuntimeBits()) {
return 0;
} else if (!payload.error_set.hasCodeGenBits()) {
} else if (!payload.error_set.hasRuntimeBits()) {
return payload.payload.bitSize(target);
} else if (!payload.payload.hasCodeGenBits()) {
} else if (!payload.payload.hasRuntimeBits()) {
return payload.error_set.bitSize(target);
}
@panic("TODO bitSize error union");
@ -2728,7 +2602,7 @@ pub const Type = extern union {
var buf: Payload.ElemType = undefined;
const child_type = self.optionalChild(&buf);
// optionals of zero sized pointers behave like bools
if (!child_type.hasCodeGenBits()) return false;
if (!child_type.hasRuntimeBits()) return false;
if (child_type.zigTypeTag() != .Pointer) return false;
const info = child_type.ptrInfo().data;
@ -2765,7 +2639,7 @@ pub const Type = extern union {
var buf: Payload.ElemType = undefined;
const child_type = self.optionalChild(&buf);
// optionals of zero sized types behave like bools, not pointers
if (!child_type.hasCodeGenBits()) return false;
if (!child_type.hasRuntimeBits()) return false;
if (child_type.zigTypeTag() != .Pointer) return false;
const info = child_type.ptrInfo().data;
@ -3424,6 +3298,7 @@ pub const Type = extern union {
.comptime_params = undefined,
.return_type = initTag(.noreturn),
.cc = .Unspecified,
.alignment = 0,
.is_var_args = false,
.is_generic = false,
},
@ -3432,6 +3307,7 @@ pub const Type = extern union {
.comptime_params = undefined,
.return_type = initTag(.void),
.cc = .Unspecified,
.alignment = 0,
.is_var_args = false,
.is_generic = false,
},
@ -3440,6 +3316,7 @@ pub const Type = extern union {
.comptime_params = undefined,
.return_type = initTag(.noreturn),
.cc = .Naked,
.alignment = 0,
.is_var_args = false,
.is_generic = false,
},
@ -3448,6 +3325,7 @@ pub const Type = extern union {
.comptime_params = undefined,
.return_type = initTag(.void),
.cc = .C,
.alignment = 0,
.is_var_args = false,
.is_generic = false,
},
@ -3629,7 +3507,7 @@ pub const Type = extern union {
},
.enum_nonexhaustive => {
const tag_ty = ty.castTag(.enum_nonexhaustive).?.data.tag_ty;
if (!tag_ty.hasCodeGenBits()) {
if (!tag_ty.hasRuntimeBits()) {
return Value.zero;
} else {
return null;
@ -3672,6 +3550,167 @@ pub const Type = extern union {
};
}
/// During semantic analysis, instead call `Sema.typeRequiresComptime` which
/// resolves field types rather than asserting they are already resolved.
pub fn comptimeOnly(ty: Type) bool {
return switch (ty.tag()) {
.u1,
.u8,
.i8,
.u16,
.i16,
.u32,
.i32,
.u64,
.i64,
.u128,
.i128,
.usize,
.isize,
.c_short,
.c_ushort,
.c_int,
.c_uint,
.c_long,
.c_ulong,
.c_longlong,
.c_ulonglong,
.c_longdouble,
.f16,
.f32,
.f64,
.f128,
.anyopaque,
.bool,
.void,
.anyerror,
.noreturn,
.@"anyframe",
.@"null",
.@"undefined",
.atomic_order,
.atomic_rmw_op,
.calling_convention,
.address_space,
.float_mode,
.reduce_op,
.call_options,
.prefetch_options,
.export_options,
.extern_options,
.manyptr_u8,
.manyptr_const_u8,
.manyptr_const_u8_sentinel_0,
.const_slice_u8,
.const_slice_u8_sentinel_0,
.anyerror_void_error_union,
.empty_struct_literal,
.empty_struct,
.error_set,
.error_set_single,
.error_set_inferred,
.error_set_merged,
.@"opaque",
.generic_poison,
.array_u8,
.array_u8_sentinel_0,
.int_signed,
.int_unsigned,
.enum_simple,
=> false,
.single_const_pointer_to_comptime_int,
.type,
.comptime_int,
.comptime_float,
.enum_literal,
.type_info,
// These are function bodies, not function pointers.
.fn_noreturn_no_args,
.fn_void_no_args,
.fn_naked_noreturn_no_args,
.fn_ccc_void_no_args,
.function,
=> true,
.var_args_param => unreachable,
.inferred_alloc_mut => unreachable,
.inferred_alloc_const => unreachable,
.bound_fn => unreachable,
.array,
.array_sentinel,
.vector,
=> return ty.childType().comptimeOnly(),
.pointer,
.single_const_pointer,
.single_mut_pointer,
.many_const_pointer,
.many_mut_pointer,
.c_const_pointer,
.c_mut_pointer,
.const_slice,
.mut_slice,
=> {
const child_ty = ty.childType();
if (child_ty.zigTypeTag() == .Fn) {
return false;
} else {
return child_ty.comptimeOnly();
}
},
.optional,
.optional_single_mut_pointer,
.optional_single_const_pointer,
=> {
var buf: Type.Payload.ElemType = undefined;
return ty.optionalChild(&buf).comptimeOnly();
},
.tuple => {
const tuple = ty.castTag(.tuple).?.data;
for (tuple.types) |field_ty| {
if (field_ty.comptimeOnly()) return true;
}
return false;
},
.@"struct" => {
const struct_obj = ty.castTag(.@"struct").?.data;
switch (struct_obj.requires_comptime) {
.wip, .unknown => unreachable, // This function asserts types already resolved.
.no => return false,
.yes => return true,
}
},
.@"union", .union_tagged => {
const union_obj = ty.cast(Type.Payload.Union).?.data;
switch (union_obj.requires_comptime) {
.wip, .unknown => unreachable, // This function asserts types already resolved.
.no => return false,
.yes => return true,
}
},
.error_union => return ty.errorUnionPayload().comptimeOnly(),
.anyframe_T => {
const child_ty = ty.castTag(.anyframe_T).?.data;
return child_ty.comptimeOnly();
},
.enum_numbered => {
const tag_ty = ty.castTag(.enum_numbered).?.data.tag_ty;
return tag_ty.comptimeOnly();
},
.enum_full, .enum_nonexhaustive => {
const tag_ty = ty.cast(Type.Payload.EnumFull).?.data.tag_ty;
return tag_ty.comptimeOnly();
},
};
}
pub fn isIndexable(ty: Type) bool {
return switch (ty.zigTypeTag()) {
.Array, .Vector => true,
@ -3949,7 +3988,7 @@ pub const Type = extern union {
const field = it.struct_obj.fields.values()[it.field];
defer it.field += 1;
if (!field.ty.hasCodeGenBits()) {
if (!field.ty.hasRuntimeBits()) {
return PackedFieldOffset{
.field = it.field,
.offset = it.offset,
@ -4018,7 +4057,7 @@ pub const Type = extern union {
const field = it.struct_obj.fields.values()[it.field];
defer it.field += 1;
if (!field.ty.hasCodeGenBits())
if (!field.ty.hasRuntimeBits())
return FieldOffset{ .field = it.field, .offset = it.offset };
const field_align = field.normalAlignment(it.target);
@ -4572,6 +4611,8 @@ pub const Type = extern union {
param_types: []Type,
comptime_params: [*]bool,
return_type: Type,
/// If zero use default target function code alignment.
alignment: u32,
cc: std.builtin.CallingConvention,
is_var_args: bool,
is_generic: bool,

View File

@ -1225,7 +1225,7 @@ pub const Value = extern union {
/// Asserts the value is an integer and not undefined.
/// Returns the number of bits the value requires to represent stored in twos complement form.
pub fn intBitCountTwosComp(self: Value) usize {
pub fn intBitCountTwosComp(self: Value, target: Target) usize {
switch (self.tag()) {
.zero,
.bool_false,
@ -1244,6 +1244,15 @@ pub const Value = extern union {
.int_big_positive => return self.castTag(.int_big_positive).?.asBigInt().bitCountTwosComp(),
.int_big_negative => return self.castTag(.int_big_negative).?.asBigInt().bitCountTwosComp(),
.decl_ref_mut,
.extern_fn,
.decl_ref,
.function,
.variable,
.eu_payload_ptr,
.opt_payload_ptr,
=> return target.cpu.arch.ptrBitWidth(),
else => {
var buffer: BigIntSpace = undefined;
return self.toBigInt(&buffer).bitCountTwosComp();
@ -1333,6 +1342,20 @@ pub const Value = extern union {
return true;
},
.decl_ref_mut,
.extern_fn,
.decl_ref,
.function,
.variable,
=> {
const info = ty.intInfo(target);
const ptr_bits = target.cpu.arch.ptrBitWidth();
return switch (info.signedness) {
.signed => info.bits > ptr_bits,
.unsigned => info.bits >= ptr_bits,
};
},
else => unreachable,
}
}
@ -1397,6 +1420,11 @@ pub const Value = extern union {
.one,
.bool_true,
.decl_ref,
.decl_ref_mut,
.extern_fn,
.function,
.variable,
=> .gt,
.int_u64 => std.math.order(lhs.castTag(.int_u64).?.data, 0),
@ -1417,10 +1445,18 @@ pub const Value = extern union {
pub fn order(lhs: Value, rhs: Value) std.math.Order {
const lhs_tag = lhs.tag();
const rhs_tag = rhs.tag();
const lhs_is_zero = lhs_tag == .zero;
const rhs_is_zero = rhs_tag == .zero;
if (lhs_is_zero) return rhs.orderAgainstZero().invert();
if (rhs_is_zero) return lhs.orderAgainstZero();
const lhs_against_zero = lhs.orderAgainstZero();
const rhs_against_zero = rhs.orderAgainstZero();
switch (lhs_against_zero) {
.lt => if (rhs_against_zero != .lt) return .lt,
.eq => return rhs_against_zero.invert(),
.gt => {},
}
switch (rhs_against_zero) {
.lt => if (lhs_against_zero != .lt) return .gt,
.eq => return lhs_against_zero,
.gt => {},
}
const lhs_float = lhs.isFloat();
const rhs_float = rhs.isFloat();
@ -1451,6 +1487,27 @@ pub const Value = extern union {
/// Asserts the value is comparable. Does not take a type parameter because it supports
/// comparisons between heterogeneous types.
pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value) bool {
if (lhs.pointerDecl()) |lhs_decl| {
if (rhs.pointerDecl()) |rhs_decl| {
switch (op) {
.eq => return lhs_decl == rhs_decl,
.neq => return lhs_decl != rhs_decl,
else => {},
}
} else {
switch (op) {
.eq => return false,
.neq => return true,
else => {},
}
}
} else if (rhs.pointerDecl()) |_| {
switch (op) {
.eq => return false,
.neq => return true,
else => {},
}
}
return order(lhs, rhs).compare(op);
}
@ -1520,6 +1577,11 @@ pub const Value = extern union {
}
return true;
},
.function => {
const a_payload = a.castTag(.function).?.data;
const b_payload = b.castTag(.function).?.data;
return a_payload == b_payload;
},
else => {},
}
} else if (a_tag == .null_value or b_tag == .null_value) {
@ -1573,6 +1635,7 @@ pub const Value = extern union {
pub fn hash(val: Value, ty: Type, hasher: *std.hash.Wyhash) void {
const zig_ty_tag = ty.zigTypeTag();
std.hash.autoHash(hasher, zig_ty_tag);
if (val.isUndef()) return;
switch (zig_ty_tag) {
.BoundFn => unreachable, // TODO remove this from the language
@ -1694,7 +1757,8 @@ pub const Value = extern union {
union_obj.val.hash(active_field_ty, hasher);
},
.Fn => {
@panic("TODO implement hashing function values");
const func = val.castTag(.function).?.data;
return std.hash.autoHash(hasher, func.owner_decl);
},
.Frame => {
@panic("TODO implement hashing frame values");
@ -1703,7 +1767,8 @@ pub const Value = extern union {
@panic("TODO implement hashing anyframe values");
},
.EnumLiteral => {
@panic("TODO implement hashing enum literal values");
const bytes = val.castTag(.enum_literal).?.data;
hasher.update(bytes);
},
}
}

View File

@ -2,22 +2,23 @@ const builtin = @import("builtin");
test {
// Tests that pass for stage1, llvm backend, C backend, wasm backend, arm backend and x86_64 backend.
_ = @import("behavior/align.zig");
_ = @import("behavior/array.zig");
_ = @import("behavior/bool.zig");
_ = @import("behavior/bugs/655.zig");
_ = @import("behavior/bugs/679.zig");
_ = @import("behavior/bugs/1111.zig");
_ = @import("behavior/bugs/2346.zig");
_ = @import("behavior/slice_sentinel_comptime.zig");
_ = @import("behavior/bugs/679.zig");
_ = @import("behavior/bugs/6850.zig");
_ = @import("behavior/cast.zig");
_ = @import("behavior/comptime_memory.zig");
_ = @import("behavior/fn_in_struct_in_comptime.zig");
_ = @import("behavior/hasdecl.zig");
_ = @import("behavior/hasfield.zig");
_ = @import("behavior/prefetch.zig");
_ = @import("behavior/pub_enum.zig");
_ = @import("behavior/slice_sentinel_comptime.zig");
_ = @import("behavior/type.zig");
_ = @import("behavior/bugs/655.zig");
_ = @import("behavior/bool.zig");
_ = @import("behavior/align.zig");
_ = @import("behavior/array.zig");
_ = @import("behavior/cast.zig");
if (builtin.zig_backend != .stage2_arm and builtin.zig_backend != .stage2_x86_64) {
// Tests that pass for stage1, llvm backend, C backend, wasm backend.
@ -113,11 +114,7 @@ test {
_ = @import("behavior/switch.zig");
_ = @import("behavior/widening.zig");
if (builtin.zig_backend != .stage1) {
// When all comptime_memory.zig tests pass, #9646 can be closed.
// _ = @import("behavior/comptime_memory.zig");
_ = @import("behavior/slice_stage2.zig");
} else {
if (builtin.zig_backend == .stage1) {
// Tests that only pass for the stage1 backend.
_ = @import("behavior/align_stage1.zig");
if (builtin.os.tag != .wasi) {

View File

@ -165,8 +165,9 @@ fn give() anyerror!u128 {
}
test "page aligned array on stack" {
if (builtin.zig_backend == .stage2_x86_64 or builtin.zig_backend == .stage2_arm or
builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
// Large alignment value to make it hard to accidentally pass.
var array align(0x1000) = [_]u8{ 1, 2, 3, 4, 5, 6, 7, 8 };
@ -181,3 +182,25 @@ test "page aligned array on stack" {
try expect(number1 == 42);
try expect(number2 == 43);
}
fn derp() align(@sizeOf(usize) * 2) i32 {
return 1234;
}
fn noop1() align(1) void {}
fn noop4() align(4) void {}
test "function alignment" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
// function alignment is a compile error on wasm32/wasm64
if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;
try expect(derp() == 1234);
try expect(@TypeOf(noop1) == fn () align(1) void);
try expect(@TypeOf(noop4) == fn () align(4) void);
noop1();
noop4();
}

View File

@ -3,23 +3,6 @@ const expect = std.testing.expect;
const builtin = @import("builtin");
const native_arch = builtin.target.cpu.arch;
fn derp() align(@sizeOf(usize) * 2) i32 {
return 1234;
}
fn noop1() align(1) void {}
fn noop4() align(4) void {}
test "function alignment" {
// function alignment is a compile error on wasm32/wasm64
if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;
try expect(derp() == 1234);
try expect(@TypeOf(noop1) == fn () align(1) void);
try expect(@TypeOf(noop4) == fn () align(4) void);
noop1();
noop4();
}
test "implicitly decreasing fn alignment" {
// function alignment is a compile error on wasm32/wasm64
if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;

View File

@ -259,6 +259,8 @@ fn fB() []const u8 {
}
test "call function pointer in struct" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
try expect(mem.eql(u8, f3(true), "a"));
try expect(mem.eql(u8, f3(false), "b"));
}
@ -276,7 +278,7 @@ fn f3(x: bool) []const u8 {
}
const FnPtrWrapper = struct {
fn_ptr: fn () []const u8,
fn_ptr: *const fn () []const u8,
};
test "const ptr from var variable" {

View File

@ -205,9 +205,11 @@ test "multiline string literal is null terminated" {
}
test "self reference through fn ptr field" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
const S = struct {
const A = struct {
f: fn (A) u8,
f: *const fn (A) u8,
};
fn foo(a: A) u8 {

View File

@ -2,7 +2,7 @@ const A = struct {
b: B,
};
const B = fn (A) void;
const B = *const fn (A) void;
test "allow these dependencies" {
var a: A = undefined;

View File

@ -1,9 +1,10 @@
const builtin = @import("builtin");
const std = @import("std");
const expect = std.testing.expect;
const State = struct {
const Self = @This();
enter: fn (previous: ?Self) void,
enter: *const fn (previous: ?Self) void,
};
fn prev(p: ?State) void {
@ -11,6 +12,8 @@ fn prev(p: ?State) void {
}
test "zig test crash" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
var global: State = undefined;
global.enter = prev;
global.enter(null);

View File

@ -47,12 +47,14 @@ fn incrementVoidPtrArray(array: ?*anyopaque, len: usize) void {
}
test "compile time int to ptr of function" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) return error.SkipZigTest; // TODO
try foobar(FUNCTION_CONSTANT);
}
pub const FUNCTION_CONSTANT = @intToPtr(PFN_void, maxInt(usize));
pub const PFN_void = fn (*anyopaque) callconv(.C) void;
pub const PFN_void = *const fn (*anyopaque) callconv(.C) void;
fn foobar(func: PFN_void) !void {
try std.testing.expect(@ptrToInt(func) == maxInt(usize));
@ -153,8 +155,12 @@ test "implicit cast *[0]T to E![]const u8" {
}
var global_array: [4]u8 = undefined;
test "cast from array reference to fn" {
const f = @ptrCast(fn () callconv(.C) void, &global_array);
test "cast from array reference to fn: comptime fn ptr" {
const f = @ptrCast(*const fn () callconv(.C) void, &global_array);
try expect(@ptrToInt(f) == @ptrToInt(&global_array));
}
test "cast from array reference to fn: runtime fn ptr" {
var f = @ptrCast(*const fn () callconv(.C) void, &global_array);
try expect(@ptrToInt(f) == @ptrToInt(&global_array));
}

View File

@ -1,8 +1,15 @@
const endian = @import("builtin").cpu.arch.endian();
const builtin = @import("builtin");
const endian = builtin.cpu.arch.endian();
const testing = @import("std").testing;
const ptr_size = @sizeOf(usize);
test "type pun signed and unsigned as single pointer" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
comptime {
var x: u32 = 0;
const y = @ptrCast(*i32, &x);
@ -12,6 +19,12 @@ test "type pun signed and unsigned as single pointer" {
}
test "type pun signed and unsigned as many pointer" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
comptime {
var x: u32 = 0;
const y = @ptrCast([*]i32, &x);
@ -21,6 +34,12 @@ test "type pun signed and unsigned as many pointer" {
}
test "type pun signed and unsigned as array pointer" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
comptime {
var x: u32 = 0;
const y = @ptrCast(*[1]i32, &x);
@ -30,6 +49,12 @@ test "type pun signed and unsigned as array pointer" {
}
test "type pun signed and unsigned as offset many pointer" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
comptime {
var x: u32 = 0;
var y = @ptrCast([*]i32, &x);
@ -40,6 +65,12 @@ test "type pun signed and unsigned as offset many pointer" {
}
test "type pun signed and unsigned as array pointer" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
comptime {
var x: u32 = 0;
const y = @ptrCast([*]i32, &x) - 10;
@ -50,6 +81,12 @@ test "type pun signed and unsigned as array pointer" {
}
test "type pun value and struct" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
comptime {
const StructOfU32 = extern struct { x: u32 };
var inst: StructOfU32 = .{ .x = 0 };
@ -64,6 +101,12 @@ fn bigToNativeEndian(comptime T: type, v: T) T {
return if (endian == .Big) v else @byteSwap(T, v);
}
test "type pun endianness" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
comptime {
const StructOfBytes = extern struct { x: [4]u8 };
var inst: StructOfBytes = .{ .x = [4]u8{ 0, 0, 0, 0 } };
@ -155,6 +198,12 @@ fn doTypePunBitsTest(as_bits: *Bits) !void {
}
test "type pun bits" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
comptime {
var v: u32 = undefined;
try doTypePunBitsTest(@ptrCast(*Bits, &v));
@ -167,6 +216,12 @@ const imports = struct {
// Make sure lazy values work on their own, before getting into more complex tests
test "basic pointer preservation" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
comptime {
const lazy_address = @ptrToInt(&imports.global_u32);
try testing.expectEqual(@ptrToInt(&imports.global_u32), lazy_address);
@ -175,6 +230,12 @@ test "basic pointer preservation" {
}
test "byte copy preserves linker value" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
const ct_value = comptime blk: {
const lazy = &imports.global_u32;
var result: *u32 = undefined;
@ -193,6 +254,12 @@ test "byte copy preserves linker value" {
}
test "unordered byte copy preserves linker value" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
const ct_value = comptime blk: {
const lazy = &imports.global_u32;
var result: *u32 = undefined;
@ -212,6 +279,12 @@ test "unordered byte copy preserves linker value" {
}
test "shuffle chunks of linker value" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
const lazy_address = @ptrToInt(&imports.global_u32);
const shuffled1_rt = shuffle(lazy_address, Bits, ShuffledBits);
const unshuffled1_rt = shuffle(shuffled1_rt, ShuffledBits, Bits);
@ -225,6 +298,12 @@ test "shuffle chunks of linker value" {
}
test "dance on linker values" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
comptime {
var arr: [2]usize = undefined;
arr[0] = @ptrToInt(&imports.global_u32);
@ -251,6 +330,12 @@ test "dance on linker values" {
}
test "offset array ptr by element size" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
comptime {
const VirtualStruct = struct { x: u32 };
var arr: [4]VirtualStruct = .{
@ -273,6 +358,12 @@ test "offset array ptr by element size" {
}
test "offset instance by field size" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
comptime {
const VirtualStruct = struct { x: u32, y: u32, z: u32, w: u32 };
var inst = VirtualStruct{ .x = 0, .y = 1, .z = 2, .w = 3 };
@ -293,6 +384,12 @@ test "offset instance by field size" {
}
test "offset field ptr by enclosing array element size" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend != .stage1) {
// TODO https://github.com/ziglang/zig/issues/9646
return error.SkipZigTest;
}
comptime {
const VirtualStruct = struct { x: u32 };
var arr: [4]VirtualStruct = .{

View File

@ -1,3 +1,4 @@
const builtin = @import("builtin");
const std = @import("std");
const expect = std.testing.expect;
const expectError = std.testing.expectError;

View File

@ -57,7 +57,7 @@ test "assign inline fn to const variable" {
inline fn inlineFn() void {}
fn outer(y: u32) fn (u32) u32 {
fn outer(y: u32) *const fn (u32) u32 {
const Y = @TypeOf(y);
const st = struct {
fn get(z: u32) u32 {
@ -68,6 +68,8 @@ fn outer(y: u32) fn (u32) u32 {
}
test "return inner function which references comptime variable of outer function" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
var func = outer(10);
try expect(func(3) == 7);
}
@ -92,6 +94,8 @@ test "discard the result of a function that returns a struct" {
}
test "inline function call that calls optional function pointer, return pointer at callsite interacts correctly with callsite return type" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
const S = struct {
field: u32,
@ -113,7 +117,7 @@ test "inline function call that calls optional function pointer, return pointer
return bar2.?();
}
var bar2: ?fn () u32 = null;
var bar2: ?*const fn () u32 = null;
fn actualFn() u32 {
return 1234;
@ -135,8 +139,10 @@ fn fnWithUnreachable() noreturn {
}
test "extern struct with stdcallcc fn pointer" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
const S = extern struct {
ptr: fn () callconv(if (builtin.target.cpu.arch == .i386) .Stdcall else .C) i32,
ptr: *const fn () callconv(if (builtin.target.cpu.arch == .i386) .Stdcall else .C) i32,
fn foo() callconv(if (builtin.target.cpu.arch == .i386) .Stdcall else .C) i32 {
return 1234;

View File

@ -1,14 +1,16 @@
const builtin = @import("builtin");
test "casting random address to function pointer" {
test "casting integer address to function pointer" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) return error.SkipZigTest; // TODO
randomAddressToFunction();
comptime randomAddressToFunction();
addressToFunction();
comptime addressToFunction();
}
fn randomAddressToFunction() void {
fn addressToFunction() void {
var addr: usize = 0xdeadbeef;
_ = @intToPtr(fn () void, addr);
_ = @intToPtr(*const fn () void, addr);
}
test "mutate through ptr initialized with constant intToPtr value" {

View File

@ -1,8 +1,10 @@
const expect = @import("std").testing.expect;
const builtin = @import("builtin");
const std = @import("std");
const expect = std.testing.expect;
const HasFuncs = struct {
state: u32,
func_field: fn (u32) u32,
func_field: *const fn (u32) u32,
fn inc(self: *HasFuncs) void {
self.state += 1;
@ -25,6 +27,8 @@ const HasFuncs = struct {
};
test "standard field calls" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
try expect(HasFuncs.one(0) == 1);
try expect(HasFuncs.two(0) == 2);
@ -64,6 +68,8 @@ test "standard field calls" {
}
test "@field field calls" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
try expect(@field(HasFuncs, "one")(0) == 1);
try expect(@field(HasFuncs, "two")(0) == 2);

View File

@ -1,3 +1,4 @@
const builtin = @import("builtin");
const std = @import("std");
const expect = std.testing.expect;
const expectEqualSlices = std.testing.expectEqualSlices;
@ -166,3 +167,15 @@ test "slicing zero length array" {
try expect(mem.eql(u8, s1, ""));
try expect(mem.eql(u32, s2, &[_]u32{}));
}
const x = @intToPtr([*]i32, 0x1000)[0..0x500];
const y = x[0x100..];
test "compile time slice of pointer to hard coded address" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
try expect(@ptrToInt(x) == 0x1000);
try expect(x.len == 0x500);
try expect(@ptrToInt(y) == 0x1400);
try expect(y.len == 0x400);
}

View File

@ -1,12 +0,0 @@
const std = @import("std");
const expect = std.testing.expect;
const x = @intToPtr([*]i32, 0x1000)[0..0x500];
const y = x[0x100..];
test "compile time slice of pointer to hard coded address" {
try expect(@ptrToInt(x) == 0x1000);
try expect(x.len == 0x500);
try expect(@ptrToInt(y) == 0x1400);
try expect(y.len == 0x400);
}

View File

@ -1,3 +1,4 @@
const builtin = @import("builtin");
const std = @import("std");
const expect = std.testing.expect;
const expectEqual = std.testing.expectEqual;
@ -166,8 +167,10 @@ test "union with specified enum tag" {
}
test "packed union generates correctly aligned LLVM type" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
const U = packed union {
f1: fn () error{TestUnexpectedResult}!void,
f1: *const fn () error{TestUnexpectedResult}!void,
f2: u32,
};
var foo = [_]U{

View File

@ -751,7 +751,7 @@ pub fn addCases(ctx: *TestContext) !void {
{
var case = ctx.exe("function pointers", linux_arm);
case.addCompareOutput(
\\const PrintFn = fn () void;
\\const PrintFn = *const fn () void;
\\
\\pub fn main() void {
\\ var printFn: PrintFn = stopSayingThat;