Merge pull request #15569 from ziglang/intern-pool-3

Use InternPool for all types and constant values
This commit is contained in:
Andrew Kelley 2023-06-12 22:50:50 -07:00 committed by GitHub
commit 529ef75101
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
76 changed files with 29776 additions and 28437 deletions

View File

@ -30,6 +30,7 @@ pub fn build(b: *std.Build) !void {
const test_step = b.step("test", "Run all the tests");
const skip_install_lib_files = b.option(bool, "no-lib", "skip copying of lib/ files and langref to installation prefix. Useful for development") orelse false;
const skip_install_langref = b.option(bool, "no-langref", "skip copying of langref to the installation prefix") orelse skip_install_lib_files;
const no_bin = b.option(bool, "no-bin", "skip emitting compiler binary") orelse false;
const docgen_exe = b.addExecutable(.{
.name = "docgen",
@ -166,6 +167,7 @@ pub fn build(b: *std.Build) !void {
exe.pie = pie;
exe.sanitize_thread = sanitize_thread;
exe.entitlements = entitlements;
if (no_bin) exe.emit_bin = .no_emit;
exe.build_id = b.option(
std.Build.Step.Compile.BuildId,

View File

@ -10176,7 +10176,7 @@ pub fn main() void {
{#header_open|Invalid Error Set Cast#}
<p>At compile-time:</p>
{#code_begin|test_err|test_comptime_invalid_error_set_cast|'error.B' not a member of error set 'error{A,C}'#}
{#code_begin|test_err|test_comptime_invalid_error_set_cast|'error.B' not a member of error set 'error{C,A}'#}
const Set1 = error{
A,
B,

View File

@ -459,6 +459,28 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is a slice pointing to the newly allocated elements.
/// The returned pointer becomes invalid when the list is resized.
/// Resizes list if `self.capacity` is not large enough.
pub fn addManyAsSlice(self: *Self, n: usize) Allocator.Error![]T {
const prev_len = self.items.len;
try self.resize(self.items.len + n);
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is a slice pointing to the newly allocated elements.
/// Asserts that there is already space for the new item without allocating more.
/// **Does not** invalidate element pointers.
/// The returned pointer becomes invalid when the list is resized.
pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T {
assert(self.items.len + n <= self.capacity);
const prev_len = self.items.len;
self.items.len += n;
return self.items[prev_len..][0..n];
}
/// Remove and return the last element from the list.
/// Asserts the list has at least one item.
/// Invalidates pointers to the removed element.
@ -949,6 +971,28 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is a slice pointing to the newly allocated elements.
/// The returned pointer becomes invalid when the list is resized.
/// Resizes list if `self.capacity` is not large enough.
pub fn addManyAsSlice(self: *Self, allocator: Allocator, n: usize) Allocator.Error![]T {
const prev_len = self.items.len;
try self.resize(allocator, self.items.len + n);
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is a slice pointing to the newly allocated elements.
/// Asserts that there is already space for the new item without allocating more.
/// **Does not** invalidate element pointers.
/// The returned pointer becomes invalid when the list is resized.
pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T {
assert(self.items.len + n <= self.capacity);
const prev_len = self.items.len;
self.items.len += n;
return self.items[prev_len..][0..n];
}
/// Remove and return the last element from the list.
/// Asserts the list has at least one item.
/// Invalidates pointers to last element.

View File

@ -143,7 +143,7 @@ pub const Mode = OptimizeMode;
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const CallingConvention = enum {
pub const CallingConvention = enum(u8) {
/// This is the default Zig calling convention used when not using `export` on `fn`
/// and no other calling convention is specified.
Unspecified,
@ -190,7 +190,7 @@ pub const CallingConvention = enum {
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const AddressSpace = enum {
pub const AddressSpace = enum(u5) {
generic,
gs,
fs,
@ -283,7 +283,7 @@ pub const Type = union(enum) {
/// This data structure is used by the Zig language code generation and
/// therefore must be kept in sync with the compiler implementation.
pub const Size = enum {
pub const Size = enum(u2) {
One,
Many,
Slice,

View File

@ -530,7 +530,7 @@ pub const ChildProcess = struct {
// can fail between fork() and execve().
// Therefore, we do all the allocation for the execve() before the fork().
// This means we must do the null-termination of argv and env vars here.
const argv_buf = try arena.allocSentinel(?[*:0]u8, self.argv.len, null);
const argv_buf = try arena.allocSentinel(?[*:0]const u8, self.argv.len, null);
for (self.argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
const envp = m: {
@ -542,7 +542,7 @@ pub const ChildProcess = struct {
} else if (builtin.output_mode == .Exe) {
// Then we have Zig start code and this works.
// TODO type-safety for null-termination of `os.environ`.
break :m @ptrCast([*:null]?[*:0]u8, os.environ.ptr);
break :m @ptrCast([*:null]const ?[*:0]const u8, os.environ.ptr);
} else {
// TODO come up with a solution for this.
@compileError("missing std lib enhancement: ChildProcess implementation has no way to collect the environment variables to forward to the child process");

View File

@ -1256,10 +1256,8 @@ fn limitedOverlapCopy(frag: []u8, in: usize) void {
// A single, non-overlapping memcpy suffices.
@memcpy(frag[0..first.len], first);
} else {
// Need two memcpy calls because one alone would overlap.
@memcpy(frag[0..in], first[0..in]);
const leftover = first.len - in;
@memcpy(frag[in..][0..leftover], first[in..][0..leftover]);
// One memcpy call would overlap, so just do this instead.
std.mem.copyForwards(u8, frag, first);
}
}

View File

@ -936,6 +936,7 @@ pub const DwarfInfo = struct {
const ranges_val = compile_unit.die.getAttr(AT.ranges) orelse continue;
const ranges_offset = switch (ranges_val.*) {
.SecOffset => |off| off,
.Const => |c| try c.asUnsignedLe(),
.RangeListOffset => |idx| off: {
if (compile_unit.is_64) {
const offset_loc = @intCast(usize, compile_unit.rnglists_base + 8 * idx);

View File

@ -36,6 +36,20 @@ const xxhash = @import("hash/xxhash.zig");
pub const XxHash64 = xxhash.XxHash64;
pub const XxHash32 = xxhash.XxHash32;
/// This is handy if you have a u32 and want a u32 and don't want to take a
/// detour through many layers of abstraction elsewhere in the std.hash
/// namespace.
/// Copied from https://nullprogram.com/blog/2018/07/31/
pub fn uint32(input: u32) u32 {
var x: u32 = input;
x ^= x >> 16;
x *%= 0x7feb352d;
x ^= x >> 15;
x *%= 0x846ca68b;
x ^= x >> 16;
return x;
}
test {
_ = adler;
_ = auto_hash;

View File

@ -91,15 +91,21 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
// Help the optimizer see that hashing an int is easy by inlining!
// TODO Check if the situation is better after #561 is resolved.
.Int => {
if (comptime meta.trait.hasUniqueRepresentation(Key)) {
@call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key) });
} else {
// Take only the part containing the key value, the remaining
// bytes are undefined and must not be hashed!
const byte_size = comptime std.math.divCeil(comptime_int, @bitSizeOf(Key), 8) catch unreachable;
@call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key)[0..byte_size] });
}
.Int => |int| switch (int.signedness) {
.signed => hash(hasher, @bitCast(@Type(.{ .Int = .{
.bits = int.bits,
.signedness = .unsigned,
} }), key), strat),
.unsigned => {
if (comptime meta.trait.hasUniqueRepresentation(Key)) {
@call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key) });
} else {
// Take only the part containing the key value, the remaining
// bytes are undefined and must not be hashed!
const byte_size = comptime std.math.divCeil(comptime_int, @bitSizeOf(Key), 8) catch unreachable;
@call(.always_inline, Hasher.update, .{ hasher, std.mem.asBytes(&key)[0..byte_size] });
}
},
},
.Bool => hash(hasher, @boolToInt(key), strat),

View File

@ -2158,6 +2158,9 @@ pub const Const = struct {
pub fn to(self: Const, comptime T: type) ConvertError!T {
switch (@typeInfo(T)) {
.Int => |info| {
// Make sure -0 is handled correctly.
if (self.eqZero()) return 0;
const UT = std.meta.Int(.unsigned, info.bits);
if (!self.fitsInTwosComp(info.signedness, info.bits)) {
@ -2509,7 +2512,7 @@ pub const Const = struct {
return total_limb_lz + bits - total_limb_bits;
}
pub fn ctz(a: Const) Limb {
pub fn ctz(a: Const, bits: Limb) Limb {
// Limbs are stored in little-endian order.
var result: Limb = 0;
for (a.limbs) |limb| {
@ -2517,7 +2520,7 @@ pub const Const = struct {
result += limb_tz;
if (limb_tz != @sizeOf(Limb) * 8) break;
}
return result;
return @min(result, bits);
}
};

View File

@ -4226,7 +4226,8 @@ pub fn alignForwardLog2(addr: usize, log2_alignment: u8) usize {
/// The alignment must be a power of 2 and greater than 0.
/// Asserts that rounding up the address does not cause integer overflow.
pub fn alignForwardGeneric(comptime T: type, addr: T, alignment: T) T {
assert(isValidAlignGeneric(T, alignment));
assert(alignment > 0);
assert(std.math.isPowerOfTwo(alignment));
return alignBackwardGeneric(T, addr + (alignment - 1), alignment);
}

View File

@ -1131,7 +1131,7 @@ pub fn execve(
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
const argv_buf = try arena.allocSentinel(?[*:0]u8, argv.len, null);
const argv_buf = try arena.allocSentinel(?[*:0]const u8, argv.len, null);
for (argv, 0..) |arg, i| argv_buf[i] = (try arena.dupeZ(u8, arg)).ptr;
const envp = m: {
@ -1143,7 +1143,7 @@ pub fn execve(
} else if (builtin.output_mode == .Exe) {
// Then we have Zig start code and this works.
// TODO type-safety for null-termination of `os.environ`.
break :m @ptrCast([*:null]?[*:0]u8, os.environ.ptr);
break :m @ptrCast([*:null]const ?[*:0]const u8, os.environ.ptr);
} else {
// TODO come up with a solution for this.
@compileError("missing std lib enhancement: std.process.execv implementation has no way to collect the environment variables to forward to the child process");

View File

@ -5,16 +5,18 @@
const std = @import("std");
const builtin = @import("builtin");
const assert = std.debug.assert;
const Air = @This();
const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
const assert = std.debug.assert;
const Air = @This();
const InternPool = @import("InternPool.zig");
const Module = @import("Module.zig");
instructions: std.MultiArrayList(Inst).Slice,
/// The meaning of this data is determined by `Inst.Tag` value.
/// The first few indexes are reserved. See `ExtraIndex` for the values.
extra: []const u32,
values: []const Value,
pub const ExtraIndex = enum(u32) {
/// Payload index of the main `Block` in the `extra` array.
@ -183,6 +185,18 @@ pub const Inst = struct {
/// Allocates stack local memory.
/// Uses the `ty` field.
alloc,
/// This special instruction only exists temporarily during semantic
/// analysis and is guaranteed to be unreachable in machine code
/// backends. It tracks a set of types that have been stored to an
/// inferred allocation.
/// Uses the `inferred_alloc` field.
inferred_alloc,
/// This special instruction only exists temporarily during semantic
/// analysis and is guaranteed to be unreachable in machine code
/// backends. Used to coordinate alloc_inferred, store_to_inferred_ptr,
/// and resolve_inferred_alloc instructions for comptime code.
/// Uses the `inferred_alloc_comptime` field.
inferred_alloc_comptime,
/// If the function will pass the result by-ref, this instruction returns the
/// result pointer. Otherwise it is equivalent to `alloc`.
/// Uses the `ty` field.
@ -394,11 +408,9 @@ pub const Inst = struct {
/// was executed on the operand.
/// Uses the `ty_pl` field. Payload is `TryPtr`.
try_ptr,
/// A comptime-known value. Uses the `ty_pl` field, payload is index of
/// `values` array.
constant,
/// A comptime-known type. Uses the `ty` field.
const_ty,
/// A comptime-known value via an index into the InternPool.
/// Uses the `interned` field.
interned,
/// Notes the beginning of a source code statement and marks the line and column.
/// Result type is always void.
/// Uses the `dbg_stmt` field.
@ -408,10 +420,10 @@ pub const Inst = struct {
/// Marks the end of a semantic scope for debug info variables.
dbg_block_end,
/// Marks the start of an inline call.
/// Uses `ty_pl` with the payload being the index of a Value.Function in air.values.
/// Uses the `ty_fn` field.
dbg_inline_begin,
/// Marks the end of an inline call.
/// Uses `ty_pl` with the payload being the index of a Value.Function in air.values.
/// Uses the `ty_fn` field.
dbg_inline_end,
/// Marks the beginning of a local variable. The operand is a pointer pointing
/// to the storage for the variable. The local may be a const or a var.
@ -837,7 +849,96 @@ pub const Inst = struct {
/// The position of an AIR instruction within the `Air` instructions array.
pub const Index = u32;
pub const Ref = @import("Zir.zig").Inst.Ref;
pub const Ref = enum(u32) {
u1_type = @enumToInt(InternPool.Index.u1_type),
u8_type = @enumToInt(InternPool.Index.u8_type),
i8_type = @enumToInt(InternPool.Index.i8_type),
u16_type = @enumToInt(InternPool.Index.u16_type),
i16_type = @enumToInt(InternPool.Index.i16_type),
u29_type = @enumToInt(InternPool.Index.u29_type),
u32_type = @enumToInt(InternPool.Index.u32_type),
i32_type = @enumToInt(InternPool.Index.i32_type),
u64_type = @enumToInt(InternPool.Index.u64_type),
i64_type = @enumToInt(InternPool.Index.i64_type),
u80_type = @enumToInt(InternPool.Index.u80_type),
u128_type = @enumToInt(InternPool.Index.u128_type),
i128_type = @enumToInt(InternPool.Index.i128_type),
usize_type = @enumToInt(InternPool.Index.usize_type),
isize_type = @enumToInt(InternPool.Index.isize_type),
c_char_type = @enumToInt(InternPool.Index.c_char_type),
c_short_type = @enumToInt(InternPool.Index.c_short_type),
c_ushort_type = @enumToInt(InternPool.Index.c_ushort_type),
c_int_type = @enumToInt(InternPool.Index.c_int_type),
c_uint_type = @enumToInt(InternPool.Index.c_uint_type),
c_long_type = @enumToInt(InternPool.Index.c_long_type),
c_ulong_type = @enumToInt(InternPool.Index.c_ulong_type),
c_longlong_type = @enumToInt(InternPool.Index.c_longlong_type),
c_ulonglong_type = @enumToInt(InternPool.Index.c_ulonglong_type),
c_longdouble_type = @enumToInt(InternPool.Index.c_longdouble_type),
f16_type = @enumToInt(InternPool.Index.f16_type),
f32_type = @enumToInt(InternPool.Index.f32_type),
f64_type = @enumToInt(InternPool.Index.f64_type),
f80_type = @enumToInt(InternPool.Index.f80_type),
f128_type = @enumToInt(InternPool.Index.f128_type),
anyopaque_type = @enumToInt(InternPool.Index.anyopaque_type),
bool_type = @enumToInt(InternPool.Index.bool_type),
void_type = @enumToInt(InternPool.Index.void_type),
type_type = @enumToInt(InternPool.Index.type_type),
anyerror_type = @enumToInt(InternPool.Index.anyerror_type),
comptime_int_type = @enumToInt(InternPool.Index.comptime_int_type),
comptime_float_type = @enumToInt(InternPool.Index.comptime_float_type),
noreturn_type = @enumToInt(InternPool.Index.noreturn_type),
anyframe_type = @enumToInt(InternPool.Index.anyframe_type),
null_type = @enumToInt(InternPool.Index.null_type),
undefined_type = @enumToInt(InternPool.Index.undefined_type),
enum_literal_type = @enumToInt(InternPool.Index.enum_literal_type),
atomic_order_type = @enumToInt(InternPool.Index.atomic_order_type),
atomic_rmw_op_type = @enumToInt(InternPool.Index.atomic_rmw_op_type),
calling_convention_type = @enumToInt(InternPool.Index.calling_convention_type),
address_space_type = @enumToInt(InternPool.Index.address_space_type),
float_mode_type = @enumToInt(InternPool.Index.float_mode_type),
reduce_op_type = @enumToInt(InternPool.Index.reduce_op_type),
call_modifier_type = @enumToInt(InternPool.Index.call_modifier_type),
prefetch_options_type = @enumToInt(InternPool.Index.prefetch_options_type),
export_options_type = @enumToInt(InternPool.Index.export_options_type),
extern_options_type = @enumToInt(InternPool.Index.extern_options_type),
type_info_type = @enumToInt(InternPool.Index.type_info_type),
manyptr_u8_type = @enumToInt(InternPool.Index.manyptr_u8_type),
manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type),
manyptr_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.manyptr_const_u8_sentinel_0_type),
single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type),
slice_const_u8_type = @enumToInt(InternPool.Index.slice_const_u8_type),
slice_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.slice_const_u8_sentinel_0_type),
anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type),
generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type),
empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type),
undef = @enumToInt(InternPool.Index.undef),
zero = @enumToInt(InternPool.Index.zero),
zero_usize = @enumToInt(InternPool.Index.zero_usize),
zero_u8 = @enumToInt(InternPool.Index.zero_u8),
one = @enumToInt(InternPool.Index.one),
one_usize = @enumToInt(InternPool.Index.one_usize),
one_u8 = @enumToInt(InternPool.Index.one_u8),
four_u8 = @enumToInt(InternPool.Index.four_u8),
negative_one = @enumToInt(InternPool.Index.negative_one),
calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c),
calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline),
void_value = @enumToInt(InternPool.Index.void_value),
unreachable_value = @enumToInt(InternPool.Index.unreachable_value),
null_value = @enumToInt(InternPool.Index.null_value),
bool_true = @enumToInt(InternPool.Index.bool_true),
bool_false = @enumToInt(InternPool.Index.bool_false),
empty_struct = @enumToInt(InternPool.Index.empty_struct),
generic_poison = @enumToInt(InternPool.Index.generic_poison),
/// This Ref does not correspond to any AIR instruction or constant
/// value. It is used to handle argument types of var args functions.
var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type),
/// This Ref does not correspond to any AIR instruction or constant
/// value and may instead be used as a sentinel to indicate null.
none = @enumToInt(InternPool.Index.none),
_,
};
/// All instructions have an 8-byte payload, which is contained within
/// this union. `Tag` determines which union field is active, as well as
@ -845,6 +946,7 @@ pub const Inst = struct {
pub const Data = union {
no_op: void,
un_op: Ref,
interned: InternPool.Index,
bin_op: struct {
lhs: Ref,
@ -864,6 +966,10 @@ pub const Inst = struct {
// Index into a different array.
payload: u32,
},
ty_fn: struct {
ty: Ref,
func: Module.Fn.Index,
},
br: struct {
block_inst: Index,
operand: Ref,
@ -896,6 +1002,19 @@ pub const Inst = struct {
// Index into a different array.
payload: u32,
},
inferred_alloc_comptime: InferredAllocComptime,
inferred_alloc: InferredAlloc,
pub const InferredAllocComptime = struct {
decl_index: Module.Decl.Index,
alignment: InternPool.Alignment,
is_const: bool,
};
pub const InferredAlloc = struct {
alignment: InternPool.Alignment,
is_const: bool,
};
// Make sure we don't accidentally add a field to make this union
// bigger than expected. Note that in Debug builds, Zig is allowed
@ -974,8 +1093,7 @@ pub const FieldParentPtr = struct {
pub const Shuffle = struct {
a: Inst.Ref,
b: Inst.Ref,
// index to air_values
mask: u32,
mask: InternPool.Index,
mask_len: u32,
};
@ -1064,15 +1182,15 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index {
return air.extra[extra.end..][0..extra.data.body_len];
}
pub fn typeOf(air: Air, inst: Air.Inst.Ref) Type {
pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: *const InternPool) Type {
const ref_int = @enumToInt(inst);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
return Air.Inst.Ref.typed_value_map[ref_int].ty;
if (ref_int < InternPool.static_keys.len) {
return InternPool.static_keys[ref_int].typeOf().toType();
}
return air.typeOfIndex(@intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len));
return air.typeOfIndex(ref_int - ref_start_index, ip);
}
pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: *const InternPool) Type {
const datas = air.instructions.items(.data);
switch (air.instructions.items(.tag)[inst]) {
.add,
@ -1114,7 +1232,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.div_exact_optimized,
.rem_optimized,
.mod_optimized,
=> return air.typeOf(datas[inst].bin_op.lhs),
=> return air.typeOf(datas[inst].bin_op.lhs, ip),
.sqrt,
.sin,
@ -1132,7 +1250,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.trunc_float,
.neg,
.neg_optimized,
=> return air.typeOf(datas[inst].un_op),
=> return air.typeOf(datas[inst].un_op, ip),
.cmp_lt,
.cmp_lte,
@ -1159,8 +1277,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.error_set_has_value,
=> return Type.bool,
.const_ty => return Type.type,
.alloc,
.ret_ptr,
.err_return_trace,
@ -1171,7 +1287,6 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.assembly,
.block,
.constant,
.struct_field_ptr,
.struct_field_val,
.slice_elem_ptr,
@ -1194,6 +1309,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.try_ptr,
=> return air.getRefType(datas[inst].ty_pl.ty),
.interned => return ip.typeOf(datas[inst].interned).toType(),
.not,
.bitcast,
.load,
@ -1243,7 +1360,7 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.ret_load,
.unreach,
.trap,
=> return Type.initTag(.noreturn),
=> return Type.noreturn,
.breakpoint,
.dbg_stmt,
@ -1280,63 +1397,67 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
.wasm_memory_grow => return Type.i32,
.wasm_memory_size => return Type.u32,
.bool_to_int => return Type.initTag(.u1),
.bool_to_int => return Type.u1,
.tag_name, .error_name => return Type.initTag(.const_slice_u8_sentinel_0),
.tag_name, .error_name => return Type.slice_const_u8_sentinel_0,
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
const callee_ty = air.typeOf(datas[inst].pl_op.operand);
switch (callee_ty.zigTypeTag()) {
.Fn => return callee_ty.fnReturnType(),
.Pointer => return callee_ty.childType().fnReturnType(),
else => unreachable,
}
const callee_ty = air.typeOf(datas[inst].pl_op.operand, ip);
return callee_ty.fnReturnTypeIp(ip);
},
.slice_elem_val, .ptr_elem_val, .array_elem_val => {
const ptr_ty = air.typeOf(datas[inst].bin_op.lhs);
return ptr_ty.elemType();
const ptr_ty = air.typeOf(datas[inst].bin_op.lhs, ip);
return ptr_ty.childTypeIp(ip);
},
.atomic_load => {
const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr);
return ptr_ty.elemType();
const ptr_ty = air.typeOf(datas[inst].atomic_load.ptr, ip);
return ptr_ty.childTypeIp(ip);
},
.atomic_rmw => {
const ptr_ty = air.typeOf(datas[inst].pl_op.operand);
return ptr_ty.elemType();
const ptr_ty = air.typeOf(datas[inst].pl_op.operand, ip);
return ptr_ty.childTypeIp(ip);
},
.reduce, .reduce_optimized => return air.typeOf(datas[inst].reduce.operand).childType(),
.reduce, .reduce_optimized => {
const operand_ty = air.typeOf(datas[inst].reduce.operand, ip);
return ip.indexToKey(operand_ty.ip_index).vector_type.child.toType();
},
.mul_add => return air.typeOf(datas[inst].pl_op.operand),
.mul_add => return air.typeOf(datas[inst].pl_op.operand, ip),
.select => {
const extra = air.extraData(Air.Bin, datas[inst].pl_op.payload).data;
return air.typeOf(extra.lhs);
return air.typeOf(extra.lhs, ip);
},
.@"try" => {
const err_union_ty = air.typeOf(datas[inst].pl_op.operand);
return err_union_ty.errorUnionPayload();
const err_union_ty = air.typeOf(datas[inst].pl_op.operand, ip);
return ip.indexToKey(err_union_ty.ip_index).error_union_type.payload_type.toType();
},
.work_item_id,
.work_group_size,
.work_group_id,
=> return Type.u32,
.inferred_alloc => unreachable,
.inferred_alloc_comptime => unreachable,
}
}
pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type {
const ref_int = @enumToInt(ref);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
var buffer: Value.ToTypeBuffer = undefined;
return Air.Inst.Ref.typed_value_map[ref_int].val.toType(&buffer);
if (ref_int < ref_start_index) {
const ip_index = @intToEnum(InternPool.Index, ref_int);
return ip_index.toType();
}
const inst_index = ref_int - Air.Inst.Ref.typed_value_map.len;
const inst_index = ref_int - ref_start_index;
const air_tags = air.instructions.items(.tag);
const air_datas = air.instructions.items(.data);
assert(air_tags[inst_index] == .const_ty);
return air_datas[inst_index].ty;
return switch (air_tags[inst_index]) {
.interned => air_datas[inst_index].interned.toType(),
else => unreachable,
};
}
/// Returns the requested data, as well as the new index which is at the start of the
@ -1350,7 +1471,8 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end
u32 => air.extra[i],
Inst.Ref => @intToEnum(Inst.Ref, air.extra[i]),
i32 => @bitCast(i32, air.extra[i]),
else => @compileError("bad field type"),
InternPool.Index => @intToEnum(InternPool.Index, air.extra[i]),
else => @compileError("bad field type: " ++ @typeName(field.type)),
};
i += 1;
}
@ -1363,17 +1485,17 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end
pub fn deinit(air: *Air, gpa: std.mem.Allocator) void {
air.instructions.deinit(gpa);
gpa.free(air.extra);
gpa.free(air.values);
air.* = undefined;
}
const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len;
pub const ref_start_index: u32 = InternPool.static_len;
pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref {
return @intToEnum(Air.Inst.Ref, ref_start_index + inst);
pub fn indexToRef(inst: Inst.Index) Inst.Ref {
return @intToEnum(Inst.Ref, ref_start_index + inst);
}
pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index {
pub fn refToIndex(inst: Inst.Ref) ?Inst.Index {
assert(inst != .none);
const ref_int = @enumToInt(inst);
if (ref_int >= ref_start_index) {
return ref_int - ref_start_index;
@ -1382,18 +1504,23 @@ pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index {
}
}
pub fn refToIndexAllowNone(inst: Inst.Ref) ?Inst.Index {
if (inst == .none) return null;
return refToIndex(inst);
}
/// Returns `null` if runtime-known.
pub fn value(air: Air, inst: Air.Inst.Ref) ?Value {
pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value {
const ref_int = @enumToInt(inst);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
return Air.Inst.Ref.typed_value_map[ref_int].val;
if (ref_int < ref_start_index) {
const ip_index = @intToEnum(InternPool.Index, ref_int);
return ip_index.toValue();
}
const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
const inst_index = @intCast(Air.Inst.Index, ref_int - ref_start_index);
const air_datas = air.instructions.items(.data);
switch (air.instructions.items(.tag)[inst_index]) {
.constant => return air.values[air_datas[inst_index].ty_pl.payload],
.const_ty => unreachable,
else => return air.typeOfIndex(inst_index).onePossibleValue(),
.interned => return air_datas[inst_index].interned.toValue(),
else => return air.typeOfIndex(inst_index, &mod.intern_pool).onePossibleValue(mod),
}
}
@ -1406,10 +1533,11 @@ pub fn nullTerminatedString(air: Air, index: usize) [:0]const u8 {
return bytes[0..end :0];
}
/// Returns whether the given instruction must always be lowered, for instance because it can cause
/// side effects. If an instruction does not need to be lowered, and Liveness determines its result
/// is unused, backends should avoid lowering it.
pub fn mustLower(air: Air, inst: Air.Inst.Index) bool {
/// Returns whether the given instruction must always be lowered, for instance
/// because it can cause side effects. If an instruction does not need to be
/// lowered, and Liveness determines its result is unused, backends should
/// avoid lowering it.
pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
const data = air.instructions.items(.data)[inst];
return switch (air.instructions.items(.tag)[inst]) {
.arg,
@ -1498,6 +1626,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index) bool {
.mul_with_overflow,
.shl_with_overflow,
.alloc,
.inferred_alloc,
.inferred_alloc_comptime,
.ret_ptr,
.bit_and,
.bit_or,
@ -1546,8 +1676,7 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index) bool {
.cmp_neq_optimized,
.cmp_vector,
.cmp_vector_optimized,
.constant,
.const_ty,
.interned,
.is_null,
.is_non_null,
.is_null_ptr,
@ -1616,8 +1745,8 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index) bool {
=> false,
.assembly => @truncate(u1, air.extraData(Air.Asm, data.ty_pl.payload).data.flags >> 31) != 0,
.load => air.typeOf(data.ty_op.operand).isVolatilePtr(),
.slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs).isVolatilePtr(),
.atomic_load => air.typeOf(data.atomic_load.ptr).isVolatilePtr(),
.load => air.typeOf(data.ty_op.operand, ip).isVolatilePtrIp(ip),
.slice_elem_val, .ptr_elem_val => air.typeOf(data.bin_op.lhs, ip).isVolatilePtrIp(ip),
.atomic_load => air.typeOf(data.atomic_load.ptr, ip).isVolatilePtrIp(ip),
};
}

View File

@ -3934,7 +3934,7 @@ fn fnDecl(
var section_gz = decl_gz.makeSubBlock(params_scope);
defer section_gz.unstack();
const section_ref: Zir.Inst.Ref = if (fn_proto.ast.section_expr == 0) .none else inst: {
const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .const_slice_u8_type } }, fn_proto.ast.section_expr);
const inst = try expr(&decl_gz, params_scope, .{ .rl = .{ .coerced_ty = .slice_const_u8_type } }, fn_proto.ast.section_expr);
if (section_gz.instructionsSlice().len == 0) {
// In this case we will send a len=0 body which can be encoded more efficiently.
break :inst inst;
@ -4137,7 +4137,7 @@ fn globalVarDecl(
break :inst try expr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .address_space_type } }, var_decl.ast.addrspace_node);
};
const section_inst: Zir.Inst.Ref = if (var_decl.ast.section_node == 0) .none else inst: {
break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .const_slice_u8_type } }, var_decl.ast.section_node);
break :inst try comptimeExpr(&block_scope, &block_scope.base, .{ .rl = .{ .ty = .slice_const_u8_type } }, var_decl.ast.section_node);
};
const has_section_or_addrspace = section_inst != .none or addrspace_inst != .none;
wip_members.nextDecl(is_pub, is_export, align_inst != .none, has_section_or_addrspace);
@ -4497,7 +4497,7 @@ fn testDecl(
.cc_gz = null,
.align_ref = .none,
.align_gz = null,
.ret_ref = .void_type,
.ret_ref = .anyerror_void_error_union_type,
.ret_gz = null,
.section_ref = .none,
.section_gz = null,
@ -4510,7 +4510,7 @@ fn testDecl(
.body_gz = &fn_block,
.lib_name = 0,
.is_var_args = false,
.is_inferred_error = true,
.is_inferred_error = false,
.is_test = true,
.is_extern = false,
.is_noinline = false,
@ -7878,7 +7878,7 @@ fn unionInit(
params: []const Ast.Node.Index,
) InnerError!Zir.Inst.Ref {
const union_type = try typeExpr(gz, scope, params[0]);
const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]);
const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]);
const field_type = try gz.addPlNode(.field_type_ref, params[1], Zir.Inst.FieldTypeRef{
.container_type = union_type,
.field_name = field_name,
@ -8100,12 +8100,12 @@ fn builtinCall(
if (ri.rl == .ref) {
return gz.addPlNode(.field_ptr_named, node, Zir.Inst.FieldNamed{
.lhs = try expr(gz, scope, .{ .rl = .ref }, params[0]),
.field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]),
.field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]),
});
}
const result = try gz.addPlNode(.field_val_named, node, Zir.Inst.FieldNamed{
.lhs = try expr(gz, scope, .{ .rl = .none }, params[0]),
.field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]),
.field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]),
});
return rvalue(gz, ri, result, node);
},
@ -8271,11 +8271,11 @@ fn builtinCall(
.align_of => return simpleUnOpType(gz, scope, ri, node, params[0], .align_of),
.ptr_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .ptr_to_int),
.compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .compile_error),
.compile_error => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .compile_error),
.set_eval_branch_quota => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .coerced_ty = .u32_type } }, params[0], .set_eval_branch_quota),
.enum_to_int => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .enum_to_int),
.bool_to_int => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .bool_to_int),
.embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .embed_file),
.embed_file => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .embed_file),
.error_name => return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .anyerror_type } }, params[0], .error_name),
.set_runtime_safety => return simpleUnOp(gz, scope, ri, node, bool_ri, params[0], .set_runtime_safety),
.sqrt => return simpleUnOp(gz, scope, ri, node, .{ .rl = .none }, params[0], .sqrt),
@ -8334,7 +8334,7 @@ fn builtinCall(
},
.panic => {
try emitDbgNode(gz, node);
return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0], .panic);
return simpleUnOp(gz, scope, ri, node, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0], .panic);
},
.trap => {
try emitDbgNode(gz, node);
@ -8450,7 +8450,7 @@ fn builtinCall(
},
.c_define => {
if (!gz.c_import) return gz.astgen.failNode(node, "C define valid only inside C import block", .{});
const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[0]);
const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[0]);
const value = try comptimeExpr(gz, scope, .{ .rl = .none }, params[1]);
const result = try gz.addExtendedPayload(.c_define, Zir.Inst.BinNode{
.node = gz.nodeIndexToRelative(node),
@ -8530,7 +8530,7 @@ fn builtinCall(
return rvalue(gz, ri, result, node);
},
.call => {
const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .modifier_type } }, params[0]);
const modifier = try comptimeExpr(gz, scope, .{ .rl = .{ .coerced_ty = .call_modifier_type } }, params[0]);
const callee = try expr(gz, scope, .{ .rl = .none }, params[1]);
const args = try expr(gz, scope, .{ .rl = .none }, params[2]);
const result = try gz.addPlNode(.builtin_call, node, Zir.Inst.BuiltinCall{
@ -8546,7 +8546,7 @@ fn builtinCall(
},
.field_parent_ptr => {
const parent_type = try typeExpr(gz, scope, params[0]);
const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, params[1]);
const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, params[1]);
const result = try gz.addPlNode(.field_parent_ptr, node, Zir.Inst.FieldParentPtr{
.parent_type = parent_type,
.field_name = field_name,
@ -8701,7 +8701,7 @@ fn hasDeclOrField(
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const container_type = try typeExpr(gz, scope, lhs_node);
const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node);
const name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node);
const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{
.lhs = container_type,
.rhs = name,
@ -8851,7 +8851,7 @@ fn simpleCBuiltin(
) InnerError!Zir.Inst.Ref {
const name: []const u8 = if (tag == .c_undef) "C undef" else "C include";
if (!gz.c_import) return gz.astgen.failNode(node, "{s} valid only inside C import block", .{name});
const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, operand_node);
const operand = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, operand_node);
_ = try gz.addExtendedPayload(tag, Zir.Inst.UnNode{
.node = gz.nodeIndexToRelative(node),
.operand = operand,
@ -8869,7 +8869,7 @@ fn offsetOf(
tag: Zir.Inst.Tag,
) InnerError!Zir.Inst.Ref {
const type_inst = try typeExpr(gz, scope, lhs_node);
const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .const_slice_u8_type } }, rhs_node);
const field_name = try comptimeExpr(gz, scope, .{ .rl = .{ .ty = .slice_const_u8_type } }, rhs_node);
const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{
.lhs = type_inst,
.rhs = field_name,
@ -10271,6 +10271,8 @@ fn rvalue(
as_ty | @enumToInt(Zir.Inst.Ref.i32_type),
as_ty | @enumToInt(Zir.Inst.Ref.u64_type),
as_ty | @enumToInt(Zir.Inst.Ref.i64_type),
as_ty | @enumToInt(Zir.Inst.Ref.u128_type),
as_ty | @enumToInt(Zir.Inst.Ref.i128_type),
as_ty | @enumToInt(Zir.Inst.Ref.usize_type),
as_ty | @enumToInt(Zir.Inst.Ref.isize_type),
as_ty | @enumToInt(Zir.Inst.Ref.c_char_type),
@ -10296,15 +10298,30 @@ fn rvalue(
as_ty | @enumToInt(Zir.Inst.Ref.comptime_int_type),
as_ty | @enumToInt(Zir.Inst.Ref.comptime_float_type),
as_ty | @enumToInt(Zir.Inst.Ref.noreturn_type),
as_ty | @enumToInt(Zir.Inst.Ref.anyframe_type),
as_ty | @enumToInt(Zir.Inst.Ref.null_type),
as_ty | @enumToInt(Zir.Inst.Ref.undefined_type),
as_ty | @enumToInt(Zir.Inst.Ref.fn_noreturn_no_args_type),
as_ty | @enumToInt(Zir.Inst.Ref.fn_void_no_args_type),
as_ty | @enumToInt(Zir.Inst.Ref.fn_naked_noreturn_no_args_type),
as_ty | @enumToInt(Zir.Inst.Ref.fn_ccc_void_no_args_type),
as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type),
as_ty | @enumToInt(Zir.Inst.Ref.const_slice_u8_type),
as_ty | @enumToInt(Zir.Inst.Ref.enum_literal_type),
as_ty | @enumToInt(Zir.Inst.Ref.atomic_order_type),
as_ty | @enumToInt(Zir.Inst.Ref.atomic_rmw_op_type),
as_ty | @enumToInt(Zir.Inst.Ref.calling_convention_type),
as_ty | @enumToInt(Zir.Inst.Ref.address_space_type),
as_ty | @enumToInt(Zir.Inst.Ref.float_mode_type),
as_ty | @enumToInt(Zir.Inst.Ref.reduce_op_type),
as_ty | @enumToInt(Zir.Inst.Ref.call_modifier_type),
as_ty | @enumToInt(Zir.Inst.Ref.prefetch_options_type),
as_ty | @enumToInt(Zir.Inst.Ref.export_options_type),
as_ty | @enumToInt(Zir.Inst.Ref.extern_options_type),
as_ty | @enumToInt(Zir.Inst.Ref.type_info_type),
as_ty | @enumToInt(Zir.Inst.Ref.manyptr_u8_type),
as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_type),
as_ty | @enumToInt(Zir.Inst.Ref.manyptr_const_u8_sentinel_0_type),
as_ty | @enumToInt(Zir.Inst.Ref.single_const_pointer_to_comptime_int_type),
as_ty | @enumToInt(Zir.Inst.Ref.slice_const_u8_type),
as_ty | @enumToInt(Zir.Inst.Ref.slice_const_u8_sentinel_0_type),
as_ty | @enumToInt(Zir.Inst.Ref.anyerror_void_error_union_type),
as_ty | @enumToInt(Zir.Inst.Ref.generic_poison_type),
as_ty | @enumToInt(Zir.Inst.Ref.empty_struct_type),
as_comptime_int | @enumToInt(Zir.Inst.Ref.zero),
as_comptime_int | @enumToInt(Zir.Inst.Ref.one),
as_bool | @enumToInt(Zir.Inst.Ref.bool_true),
@ -10677,8 +10694,8 @@ fn identAsString(astgen: *AstGen, ident_token: Ast.TokenIndex) !u32 {
const string_bytes = &astgen.string_bytes;
const str_index = @intCast(u32, string_bytes.items.len);
try astgen.appendIdentStr(ident_token, string_bytes);
const key = string_bytes.items[str_index..];
const gop = try astgen.string_table.getOrPutContextAdapted(gpa, @as([]const u8, key), StringIndexAdapter{
const key: []const u8 = string_bytes.items[str_index..];
const gop = try astgen.string_table.getOrPutContextAdapted(gpa, key, StringIndexAdapter{
.bytes = string_bytes,
}, StringIndexContext{
.bytes = string_bytes,

View File

@ -8,6 +8,7 @@ const CompilationModule = @import("Module.zig");
const File = CompilationModule.File;
const Module = @import("Package.zig");
const Tokenizer = std.zig.Tokenizer;
const InternPool = @import("InternPool.zig");
const Zir = @import("Zir.zig");
const Ref = Zir.Inst.Ref;
const log = std.log.scoped(.autodoc);
@ -95,8 +96,6 @@ pub fn generateZirData(self: *Autodoc) !void {
}
}
log.debug("Ref map size: {}", .{Ref.typed_value_map.len});
const root_src_dir = self.comp_module.main_pkg.root_src_directory;
const root_src_path = self.comp_module.main_pkg.root_src_path;
const joined_src_path = try root_src_dir.join(self.arena, &.{root_src_path});
@ -108,18 +107,20 @@ pub fn generateZirData(self: *Autodoc) !void {
const file = self.comp_module.import_table.get(abs_root_src_path).?; // file is expected to be present in the import table
// Append all the types in Zir.Inst.Ref.
{
try self.types.append(self.arena, .{
.ComptimeExpr = .{ .name = "ComptimeExpr" },
});
// this skips Ref.none but it's ok becuse we replaced it with ComptimeExpr
var i: u32 = 1;
while (i <= @enumToInt(Ref.anyerror_void_error_union_type)) : (i += 1) {
comptime std.debug.assert(@enumToInt(InternPool.Index.first_type) == 0);
var i: u32 = 0;
while (i <= @enumToInt(InternPool.Index.last_type)) : (i += 1) {
const ip_index = @intToEnum(InternPool.Index, i);
var tmpbuf = std.ArrayList(u8).init(self.arena);
try Ref.typed_value_map[i].val.fmtDebug().format("", .{}, tmpbuf.writer());
if (ip_index == .generic_poison_type) {
// Not a real type, doesn't have a normal name
try tmpbuf.writer().writeAll("(generic poison)");
} else {
try ip_index.toType().fmt(self.comp_module).format("", .{}, tmpbuf.writer());
}
try self.types.append(
self.arena,
switch (@intToEnum(Ref, i)) {
switch (ip_index) {
else => blk: {
// TODO: map the remaining refs to a correct type
// instead of just assinging "array" to them.
@ -1040,7 +1041,7 @@ fn walkInstruction(
.ret_load => {
const un_node = data[inst_index].un_node;
const res_ptr_ref = un_node.operand;
const res_ptr_inst = @enumToInt(res_ptr_ref) - Ref.typed_value_map.len;
const res_ptr_inst = Zir.refToIndex(res_ptr_ref).?;
// TODO: this instruction doesn't let us know trivially if there's
// branching involved or not. For now here's the strat:
// We search backwarts until `ret_ptr` for `store_node`,
@ -2157,11 +2158,10 @@ fn walkInstruction(
const lhs_ref = blk: {
var lhs_extra = extra;
while (true) {
if (@enumToInt(lhs_extra.data.lhs) < Ref.typed_value_map.len) {
const lhs = Zir.refToIndex(lhs_extra.data.lhs) orelse {
break :blk lhs_extra.data.lhs;
}
};
const lhs = @enumToInt(lhs_extra.data.lhs) - Ref.typed_value_map.len;
if (tags[lhs] != .field_val and
tags[lhs] != .field_ptr and
tags[lhs] != .field_type) break :blk lhs_extra.data.lhs;
@ -2188,8 +2188,7 @@ fn walkInstruction(
// TODO: double check that we really don't need type info here
const wr = blk: {
if (@enumToInt(lhs_ref) >= Ref.typed_value_map.len) {
const lhs_inst = @enumToInt(lhs_ref) - Ref.typed_value_map.len;
if (Zir.refToIndex(lhs_ref)) |lhs_inst| {
if (tags[lhs_inst] == .call or tags[lhs_inst] == .field_call) {
break :blk DocData.WalkResult{
.expr = .{
@ -4672,16 +4671,19 @@ fn walkRef(
ref: Ref,
need_type: bool, // true when the caller needs also a typeRef for the return value
) AutodocErrors!DocData.WalkResult {
const enum_value = @enumToInt(ref);
if (enum_value <= @enumToInt(Ref.anyerror_void_error_union_type)) {
if (ref == .none) {
return .{ .expr = .{ .comptimeExpr = 0 } };
} else if (@enumToInt(ref) <= @enumToInt(InternPool.Index.last_type)) {
// We can just return a type that indexes into `types` with the
// enum value because in the beginning we pre-filled `types` with
// the types that are listed in `Ref`.
return DocData.WalkResult{
.typeRef = .{ .type = @enumToInt(std.builtin.TypeId.Type) },
.expr = .{ .type = enum_value },
.expr = .{ .type = @enumToInt(ref) },
};
} else if (enum_value < Ref.typed_value_map.len) {
} else if (Zir.refToIndex(ref)) |zir_index| {
return self.walkInstruction(file, parent_scope, parent_src, zir_index, need_type);
} else {
switch (ref) {
else => {
panicWithContext(
@ -4774,9 +4776,6 @@ fn walkRef(
// } };
// },
}
} else {
const zir_index = enum_value - Ref.typed_value_map.len;
return self.walkInstruction(file, parent_scope, parent_src, zir_index, need_type);
}
}

View File

@ -87,6 +87,7 @@ clang_preprocessor_mode: ClangPreprocessorMode,
/// Whether to print clang argvs to stdout.
verbose_cc: bool,
verbose_air: bool,
verbose_intern_pool: bool,
verbose_llvm_ir: ?[]const u8,
verbose_llvm_bc: ?[]const u8,
verbose_cimport: bool,
@ -226,7 +227,7 @@ const Job = union(enum) {
/// Write the constant value for a Decl to the output file.
codegen_decl: Module.Decl.Index,
/// Write the machine code for a function to the output file.
codegen_func: *Module.Fn,
codegen_func: Module.Fn.Index,
/// Render the .h file snippet for the Decl.
emit_h_decl: Module.Decl.Index,
/// The Decl needs to be analyzed and possibly export itself.
@ -593,6 +594,7 @@ pub const InitOptions = struct {
verbose_cc: bool = false,
verbose_link: bool = false,
verbose_air: bool = false,
verbose_intern_pool: bool = false,
verbose_llvm_ir: ?[]const u8 = null,
verbose_llvm_bc: ?[]const u8 = null,
verbose_cimport: bool = false,
@ -1315,9 +1317,9 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.global_zir_cache = global_zir_cache,
.local_zir_cache = local_zir_cache,
.emit_h = emit_h,
.error_name_list = .{},
.tmp_hack_arena = std.heap.ArenaAllocator.init(gpa),
};
try module.error_name_list.append(gpa, "(no error)");
try module.init();
break :blk module;
} else blk: {
@ -1574,6 +1576,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.clang_preprocessor_mode = options.clang_preprocessor_mode,
.verbose_cc = options.verbose_cc,
.verbose_air = options.verbose_air,
.verbose_intern_pool = options.verbose_intern_pool,
.verbose_llvm_ir = options.verbose_llvm_ir,
.verbose_llvm_bc = options.verbose_llvm_bc,
.verbose_cimport = options.verbose_cimport,
@ -2026,6 +2029,13 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void
try comp.performAllTheWork(main_progress_node);
if (comp.bin_file.options.module) |module| {
if (builtin.mode == .Debug and comp.verbose_intern_pool) {
std.debug.print("intern pool stats for '{s}':\n", .{
comp.bin_file.options.root_name,
});
module.intern_pool.dump();
}
if (comp.bin_file.options.is_test and comp.totalErrorCount() == 0) {
// The `test_functions` decl has been intentionally postponed until now,
// at which point we must populate it with the list of test functions that
@ -2042,7 +2052,7 @@ pub fn update(comp: *Compilation, main_progress_node: *std.Progress.Node) !void
assert(decl.deletion_flag);
assert(decl.dependants.count() == 0);
const is_anon = if (decl.zir_decl_index == 0) blk: {
break :blk decl.src_namespace.anon_decls.swapRemove(decl_index);
break :blk module.namespacePtr(decl.src_namespace).anon_decls.swapRemove(decl_index);
} else false;
try module.clearDecl(decl_index, null);
@ -2523,8 +2533,7 @@ pub fn totalErrorCount(self: *Compilation) u32 {
// the previous parse success, including compile errors, but we cannot
// emit them until the file succeeds parsing.
for (module.failed_decls.keys()) |key| {
const decl = module.declPtr(key);
if (decl.getFileScope().okToReportErrors()) {
if (module.declFileScope(key).okToReportErrors()) {
total += 1;
if (module.cimport_errors.get(key)) |errors| {
total += errors.len;
@ -2533,8 +2542,7 @@ pub fn totalErrorCount(self: *Compilation) u32 {
}
if (module.emit_h) |emit_h| {
for (emit_h.failed_decls.keys()) |key| {
const decl = module.declPtr(key);
if (decl.getFileScope().okToReportErrors()) {
if (module.declFileScope(key).okToReportErrors()) {
total += 1;
}
}
@ -2618,7 +2626,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle {
var it = module.failed_files.iterator();
while (it.next()) |entry| {
if (entry.value_ptr.*) |msg| {
try addModuleErrorMsg(&bundle, msg.*);
try addModuleErrorMsg(module, &bundle, msg.*);
} else {
// Must be ZIR errors. Note that this may include AST errors.
// addZirErrorMessages asserts that the tree is loaded.
@ -2631,17 +2639,17 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle {
var it = module.failed_embed_files.iterator();
while (it.next()) |entry| {
const msg = entry.value_ptr.*;
try addModuleErrorMsg(&bundle, msg.*);
try addModuleErrorMsg(module, &bundle, msg.*);
}
}
{
var it = module.failed_decls.iterator();
while (it.next()) |entry| {
const decl = module.declPtr(entry.key_ptr.*);
const decl_index = entry.key_ptr.*;
// Skip errors for Decls within files that had a parse failure.
// We'll try again once parsing succeeds.
if (decl.getFileScope().okToReportErrors()) {
try addModuleErrorMsg(&bundle, entry.value_ptr.*.*);
if (module.declFileScope(decl_index).okToReportErrors()) {
try addModuleErrorMsg(module, &bundle, entry.value_ptr.*.*);
if (module.cimport_errors.get(entry.key_ptr.*)) |cimport_errors| for (cimport_errors) |c_error| {
try bundle.addRootErrorMessage(.{
.msg = try bundle.addString(std.mem.span(c_error.msg)),
@ -2662,16 +2670,16 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle {
if (module.emit_h) |emit_h| {
var it = emit_h.failed_decls.iterator();
while (it.next()) |entry| {
const decl = module.declPtr(entry.key_ptr.*);
const decl_index = entry.key_ptr.*;
// Skip errors for Decls within files that had a parse failure.
// We'll try again once parsing succeeds.
if (decl.getFileScope().okToReportErrors()) {
try addModuleErrorMsg(&bundle, entry.value_ptr.*.*);
if (module.declFileScope(decl_index).okToReportErrors()) {
try addModuleErrorMsg(module, &bundle, entry.value_ptr.*.*);
}
}
}
for (module.failed_exports.values()) |value| {
try addModuleErrorMsg(&bundle, value.*);
try addModuleErrorMsg(module, &bundle, value.*);
}
}
@ -2703,7 +2711,7 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle {
const values = module.compile_log_decls.values();
// First one will be the error; subsequent ones will be notes.
const err_decl = module.declPtr(keys[0]);
const src_loc = err_decl.nodeOffsetSrcLoc(values[0]);
const src_loc = err_decl.nodeOffsetSrcLoc(values[0], module);
const err_msg = Module.ErrorMsg{
.src_loc = src_loc,
.msg = "found compile log statement",
@ -2714,12 +2722,12 @@ pub fn getAllErrorsAlloc(self: *Compilation) !ErrorBundle {
for (keys[1..], 0..) |key, i| {
const note_decl = module.declPtr(key);
err_msg.notes[i] = .{
.src_loc = note_decl.nodeOffsetSrcLoc(values[i + 1]),
.src_loc = note_decl.nodeOffsetSrcLoc(values[i + 1], module),
.msg = "also here",
};
}
try addModuleErrorMsg(&bundle, err_msg);
try addModuleErrorMsg(module, &bundle, err_msg);
}
}
@ -2775,8 +2783,9 @@ pub const ErrorNoteHashContext = struct {
}
};
pub fn addModuleErrorMsg(eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) !void {
pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg) !void {
const gpa = eb.gpa;
const ip = &mod.intern_pool;
const err_source = module_err_msg.src_loc.file_scope.getSource(gpa) catch |err| {
const file_path = try module_err_msg.src_loc.file_scope.fullPath(gpa);
defer gpa.free(file_path);
@ -2802,7 +2811,7 @@ pub fn addModuleErrorMsg(eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg)
.src_loc = .none,
});
break;
} else if (module_reference.decl == null) {
} else if (module_reference.decl == .none) {
try ref_traces.append(gpa, .{
.decl_name = 0,
.src_loc = .none,
@ -2815,7 +2824,7 @@ pub fn addModuleErrorMsg(eb: *ErrorBundle.Wip, module_err_msg: Module.ErrorMsg)
const rt_file_path = try module_reference.src_loc.file_scope.fullPath(gpa);
defer gpa.free(rt_file_path);
try ref_traces.append(gpa, .{
.decl_name = try eb.addString(std.mem.sliceTo(module_reference.decl.?, 0)),
.decl_name = try eb.addString(ip.stringToSliceUnwrap(module_reference.decl).?),
.src_loc = try eb.addSourceLocation(.{
.src_path = try eb.addString(rt_file_path),
.span_start = span.start,
@ -3204,7 +3213,8 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
// Tests are always emitted in test binaries. The decl_refs are created by
// Module.populateTestFunctions, but this will not queue body analysis, so do
// that now.
try module.ensureFuncBodyAnalysisQueued(decl.val.castTag(.function).?.data);
const func_index = module.intern_pool.indexToFunc(decl.val.ip_index).unwrap().?;
try module.ensureFuncBodyAnalysisQueued(func_index);
}
},
.update_embed_file => |embed_file| {
@ -3228,7 +3238,7 @@ fn processOneJob(comp: *Compilation, job: Job, prog_node: *std.Progress.Node) !v
try module.failed_decls.ensureUnusedCapacity(gpa, 1);
module.failed_decls.putAssumeCapacityNoClobber(decl_index, try Module.ErrorMsg.create(
gpa,
decl.srcLoc(),
decl.srcLoc(module),
"unable to update line number: {s}",
.{@errorName(err)},
));
@ -3841,7 +3851,7 @@ fn reportRetryableEmbedFileError(
const mod = comp.bin_file.options.module.?;
const gpa = mod.gpa;
const src_loc: Module.SrcLoc = mod.declPtr(embed_file.owner_decl).srcLoc();
const src_loc: Module.SrcLoc = mod.declPtr(embed_file.owner_decl).srcLoc(mod);
const err_msg = if (embed_file.pkg.root_src_directory.path) |dir_path|
try Module.ErrorMsg.create(
@ -5417,6 +5427,7 @@ fn buildOutputFromZig(
.verbose_cc = comp.verbose_cc,
.verbose_link = comp.bin_file.options.verbose_link,
.verbose_air = comp.verbose_air,
.verbose_intern_pool = comp.verbose_intern_pool,
.verbose_llvm_ir = comp.verbose_llvm_ir,
.verbose_llvm_bc = comp.verbose_llvm_bc,
.verbose_cimport = comp.verbose_cimport,
@ -5495,6 +5506,7 @@ pub fn build_crt_file(
.verbose_cc = comp.verbose_cc,
.verbose_link = comp.bin_file.options.verbose_link,
.verbose_air = comp.verbose_air,
.verbose_intern_pool = comp.verbose_intern_pool,
.verbose_llvm_ir = comp.verbose_llvm_ir,
.verbose_llvm_bc = comp.verbose_llvm_bc,
.verbose_cimport = comp.verbose_cimport,

File diff suppressed because it is too large Load Diff

View File

@ -5,15 +5,17 @@
//! Some instructions are special, such as:
//! * Conditional Branches
//! * Switch Branches
const Liveness = @This();
const std = @import("std");
const trace = @import("tracy.zig").trace;
const log = std.log.scoped(.liveness);
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Air = @import("Air.zig");
const Log2Int = std.math.Log2Int;
const Liveness = @This();
const trace = @import("tracy.zig").trace;
const Air = @import("Air.zig");
const InternPool = @import("InternPool.zig");
pub const Verify = @import("Liveness/Verify.zig");
/// This array is split into sets of 4 bits per AIR instruction.
@ -129,7 +131,7 @@ fn LivenessPassData(comptime pass: LivenessPass) type {
};
}
pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness {
pub fn analyze(gpa: Allocator, air: Air, intern_pool: *const InternPool) Allocator.Error!Liveness {
const tracy = trace(@src());
defer tracy.end();
@ -142,6 +144,7 @@ pub fn analyze(gpa: Allocator, air: Air) Allocator.Error!Liveness {
),
.extra = .{},
.special = .{},
.intern_pool = intern_pool,
};
errdefer gpa.free(a.tomb_bits);
errdefer a.special.deinit(gpa);
@ -222,6 +225,7 @@ pub fn categorizeOperand(
air: Air,
inst: Air.Inst.Index,
operand: Air.Inst.Index,
ip: *const InternPool,
) OperandCategory {
const air_tags = air.instructions.items(.tag);
const air_datas = air.instructions.items(.data);
@ -317,9 +321,10 @@ pub fn categorizeOperand(
.arg,
.alloc,
.inferred_alloc,
.inferred_alloc_comptime,
.ret_ptr,
.constant,
.const_ty,
.interned,
.trap,
.breakpoint,
.dbg_stmt,
@ -530,7 +535,7 @@ pub fn categorizeOperand(
.aggregate_init => {
const ty_pl = air_datas[inst].ty_pl;
const aggregate_ty = air.getRefType(ty_pl.ty);
const len = @intCast(usize, aggregate_ty.arrayLen());
const len = @intCast(usize, aggregate_ty.arrayLenIp(ip));
const elements = @ptrCast([]const Air.Inst.Ref, air.extra[ty_pl.payload..][0..len]);
if (elements.len <= bpi - 1) {
@ -621,7 +626,7 @@ pub fn categorizeOperand(
var operand_live: bool = true;
for (air.extra[cond_extra.end..][0..2]) |cond_inst| {
if (l.categorizeOperand(air, cond_inst, operand) == .tomb)
if (l.categorizeOperand(air, cond_inst, operand, ip) == .tomb)
operand_live = false;
switch (air_tags[cond_inst]) {
@ -818,6 +823,7 @@ pub const BigTomb = struct {
const Analysis = struct {
gpa: Allocator,
air: Air,
intern_pool: *const InternPool,
tomb_bits: []usize,
special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32),
extra: std.ArrayListUnmanaged(u32),
@ -867,6 +873,7 @@ fn analyzeInst(
data: *LivenessPassData(pass),
inst: Air.Inst.Index,
) Allocator.Error!void {
const ip = a.intern_pool;
const inst_tags = a.air.instructions.items(.tag);
const inst_datas = a.air.instructions.items(.data);
@ -967,9 +974,7 @@ fn analyzeInst(
.work_group_id,
=> return analyzeOperands(a, pass, data, inst, .{ .none, .none, .none }),
.constant,
.const_ty,
=> unreachable,
.inferred_alloc, .inferred_alloc_comptime, .interned => unreachable,
.trap,
.unreach,
@ -1134,7 +1139,7 @@ fn analyzeInst(
.aggregate_init => {
const ty_pl = inst_datas[inst].ty_pl;
const aggregate_ty = a.air.getRefType(ty_pl.ty);
const len = @intCast(usize, aggregate_ty.arrayLen());
const len = @intCast(usize, aggregate_ty.arrayLenIp(ip));
const elements = @ptrCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]);
if (elements.len <= bpi - 1) {
@ -1253,19 +1258,17 @@ fn analyzeOperands(
) Allocator.Error!void {
const gpa = a.gpa;
const inst_tags = a.air.instructions.items(.tag);
const ip = a.intern_pool;
switch (pass) {
.loop_analysis => {
_ = data.live_set.remove(inst);
for (operands) |op_ref| {
const operand = Air.refToIndex(op_ref) orelse continue;
const operand = Air.refToIndexAllowNone(op_ref) orelse continue;
// Don't compute any liveness for constants
switch (inst_tags[operand]) {
.constant, .const_ty => continue,
else => {},
}
if (inst_tags[operand] == .interned) continue;
_ = try data.live_set.put(gpa, operand, {});
}
@ -1288,20 +1291,17 @@ fn analyzeOperands(
// If our result is unused and the instruction doesn't need to be lowered, backends will
// skip the lowering of this instruction, so we don't want to record uses of operands.
// That way, we can mark as many instructions as possible unused.
if (!immediate_death or a.air.mustLower(inst)) {
if (!immediate_death or a.air.mustLower(inst, ip)) {
// Note that it's important we iterate over the operands backwards, so that if a dying
// operand is used multiple times we mark its last use as its death.
var i = operands.len;
while (i > 0) {
i -= 1;
const op_ref = operands[i];
const operand = Air.refToIndex(op_ref) orelse continue;
const operand = Air.refToIndexAllowNone(op_ref) orelse continue;
// Don't compute any liveness for constants
switch (inst_tags[operand]) {
.constant, .const_ty => continue,
else => {},
}
if (inst_tags[operand] == .interned) continue;
const mask = @as(Bpi, 1) << @intCast(OperandInt, i);
@ -1407,7 +1407,7 @@ fn analyzeInstBlock(
// If the block is noreturn, block deaths not only aren't useful, they're impossible to
// find: there could be more stuff alive after the block than before it!
if (!a.air.getRefType(ty_pl.ty).isNoReturn()) {
if (!a.intern_pool.isNoReturn(a.air.getRefType(ty_pl.ty).ip_index)) {
// The block kills the difference in the live sets
const block_scope = data.block_scopes.get(inst).?;
const num_deaths = data.live_set.count() - block_scope.live_set.count();
@ -1819,6 +1819,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type {
/// Must be called with operands in reverse order.
fn feed(big: *Self, op_ref: Air.Inst.Ref) !void {
const ip = big.a.intern_pool;
// Note that after this, `operands_remaining` becomes the index of the current operand
big.operands_remaining -= 1;
@ -1831,15 +1832,12 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type {
// Don't compute any liveness for constants
const inst_tags = big.a.air.instructions.items(.tag);
switch (inst_tags[operand]) {
.constant, .const_ty => return,
else => {},
}
if (inst_tags[operand] == .interned) return
// If our result is unused and the instruction doesn't need to be lowered, backends will
// skip the lowering of this instruction, so we don't want to record uses of operands.
// That way, we can mark as many instructions as possible unused.
if (big.will_die_immediately and !big.a.air.mustLower(big.inst)) return;
if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip)) return;
const extra_byte = (big.operands_remaining - (bpi - 1)) / 31;
const extra_bit = @intCast(u5, big.operands_remaining - (bpi - 1) - extra_byte * 31);

View File

@ -5,6 +5,7 @@ air: Air,
liveness: Liveness,
live: LiveMap = .{},
blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, LiveMap) = .{},
intern_pool: *const InternPool,
pub const Error = error{ LivenessInvalid, OutOfMemory };
@ -27,10 +28,11 @@ pub fn verify(self: *Verify) Error!void {
const LiveMap = std.AutoHashMapUnmanaged(Air.Inst.Index, void);
fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
const ip = self.intern_pool;
const tag = self.air.instructions.items(.tag);
const data = self.air.instructions.items(.data);
for (body) |inst| {
if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) {
if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) {
// This instruction will not be lowered and should be ignored.
continue;
}
@ -39,9 +41,10 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
// no operands
.arg,
.alloc,
.inferred_alloc,
.inferred_alloc_comptime,
.ret_ptr,
.constant,
.const_ty,
.interned,
.breakpoint,
.dbg_stmt,
.dbg_inline_begin,
@ -58,10 +61,10 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.work_item_id,
.work_group_size,
.work_group_id,
=> try self.verifyInst(inst, .{ .none, .none, .none }),
=> try self.verifyInstOperands(inst, .{ .none, .none, .none }),
.trap, .unreach => {
try self.verifyInst(inst, .{ .none, .none, .none });
try self.verifyInstOperands(inst, .{ .none, .none, .none });
// This instruction terminates the function, so everything should be dead
if (self.live.count() > 0) return invalid("%{}: instructions still alive", .{inst});
},
@ -110,7 +113,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.c_va_copy,
=> {
const ty_op = data[inst].ty_op;
try self.verifyInst(inst, .{ ty_op.operand, .none, .none });
try self.verifyInstOperands(inst, .{ ty_op.operand, .none, .none });
},
.is_null,
.is_non_null,
@ -146,13 +149,13 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.c_va_end,
=> {
const un_op = data[inst].un_op;
try self.verifyInst(inst, .{ un_op, .none, .none });
try self.verifyInstOperands(inst, .{ un_op, .none, .none });
},
.ret,
.ret_load,
=> {
const un_op = data[inst].un_op;
try self.verifyInst(inst, .{ un_op, .none, .none });
try self.verifyInstOperands(inst, .{ un_op, .none, .none });
// This instruction terminates the function, so everything should be dead
if (self.live.count() > 0) return invalid("%{}: instructions still alive", .{inst});
},
@ -161,36 +164,36 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.wasm_memory_grow,
=> {
const pl_op = data[inst].pl_op;
try self.verifyInst(inst, .{ pl_op.operand, .none, .none });
try self.verifyInstOperands(inst, .{ pl_op.operand, .none, .none });
},
.prefetch => {
const prefetch = data[inst].prefetch;
try self.verifyInst(inst, .{ prefetch.ptr, .none, .none });
try self.verifyInstOperands(inst, .{ prefetch.ptr, .none, .none });
},
.reduce,
.reduce_optimized,
=> {
const reduce = data[inst].reduce;
try self.verifyInst(inst, .{ reduce.operand, .none, .none });
try self.verifyInstOperands(inst, .{ reduce.operand, .none, .none });
},
.union_init => {
const ty_pl = data[inst].ty_pl;
const extra = self.air.extraData(Air.UnionInit, ty_pl.payload).data;
try self.verifyInst(inst, .{ extra.init, .none, .none });
try self.verifyInstOperands(inst, .{ extra.init, .none, .none });
},
.struct_field_ptr, .struct_field_val => {
const ty_pl = data[inst].ty_pl;
const extra = self.air.extraData(Air.StructField, ty_pl.payload).data;
try self.verifyInst(inst, .{ extra.struct_operand, .none, .none });
try self.verifyInstOperands(inst, .{ extra.struct_operand, .none, .none });
},
.field_parent_ptr => {
const ty_pl = data[inst].ty_pl;
const extra = self.air.extraData(Air.FieldParentPtr, ty_pl.payload).data;
try self.verifyInst(inst, .{ extra.field_ptr, .none, .none });
try self.verifyInstOperands(inst, .{ extra.field_ptr, .none, .none });
},
.atomic_load => {
const atomic_load = data[inst].atomic_load;
try self.verifyInst(inst, .{ atomic_load.ptr, .none, .none });
try self.verifyInstOperands(inst, .{ atomic_load.ptr, .none, .none });
},
// binary
@ -260,7 +263,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
.memcpy,
=> {
const bin_op = data[inst].bin_op;
try self.verifyInst(inst, .{ bin_op.lhs, bin_op.rhs, .none });
try self.verifyInstOperands(inst, .{ bin_op.lhs, bin_op.rhs, .none });
},
.add_with_overflow,
.sub_with_overflow,
@ -274,62 +277,62 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
=> {
const ty_pl = data[inst].ty_pl;
const extra = self.air.extraData(Air.Bin, ty_pl.payload).data;
try self.verifyInst(inst, .{ extra.lhs, extra.rhs, .none });
try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, .none });
},
.shuffle => {
const ty_pl = data[inst].ty_pl;
const extra = self.air.extraData(Air.Shuffle, ty_pl.payload).data;
try self.verifyInst(inst, .{ extra.a, extra.b, .none });
try self.verifyInstOperands(inst, .{ extra.a, extra.b, .none });
},
.cmp_vector,
.cmp_vector_optimized,
=> {
const ty_pl = data[inst].ty_pl;
const extra = self.air.extraData(Air.VectorCmp, ty_pl.payload).data;
try self.verifyInst(inst, .{ extra.lhs, extra.rhs, .none });
try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, .none });
},
.atomic_rmw => {
const pl_op = data[inst].pl_op;
const extra = self.air.extraData(Air.AtomicRmw, pl_op.payload).data;
try self.verifyInst(inst, .{ pl_op.operand, extra.operand, .none });
try self.verifyInstOperands(inst, .{ pl_op.operand, extra.operand, .none });
},
// ternary
.select => {
const pl_op = data[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
try self.verifyInst(inst, .{ pl_op.operand, extra.lhs, extra.rhs });
try self.verifyInstOperands(inst, .{ pl_op.operand, extra.lhs, extra.rhs });
},
.mul_add => {
const pl_op = data[inst].pl_op;
const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
try self.verifyInst(inst, .{ extra.lhs, extra.rhs, pl_op.operand });
try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, pl_op.operand });
},
.vector_store_elem => {
const vector_store_elem = data[inst].vector_store_elem;
const extra = self.air.extraData(Air.Bin, vector_store_elem.payload).data;
try self.verifyInst(inst, .{ vector_store_elem.vector_ptr, extra.lhs, extra.rhs });
try self.verifyInstOperands(inst, .{ vector_store_elem.vector_ptr, extra.lhs, extra.rhs });
},
.cmpxchg_strong,
.cmpxchg_weak,
=> {
const ty_pl = data[inst].ty_pl;
const extra = self.air.extraData(Air.Cmpxchg, ty_pl.payload).data;
try self.verifyInst(inst, .{ extra.ptr, extra.expected_value, extra.new_value });
try self.verifyInstOperands(inst, .{ extra.ptr, extra.expected_value, extra.new_value });
},
// big tombs
.aggregate_init => {
const ty_pl = data[inst].ty_pl;
const aggregate_ty = self.air.getRefType(ty_pl.ty);
const len = @intCast(usize, aggregate_ty.arrayLen());
const len = @intCast(usize, aggregate_ty.arrayLenIp(ip));
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
var bt = self.liveness.iterateBigTomb(inst);
for (elements) |element| {
try self.verifyOperand(inst, element, bt.feed());
}
try self.verifyInst(inst, .{ .none, .none, .none });
try self.verifyInst(inst);
},
.call, .call_always_tail, .call_never_tail, .call_never_inline => {
const pl_op = data[inst].pl_op;
@ -344,7 +347,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
for (args) |arg| {
try self.verifyOperand(inst, arg, bt.feed());
}
try self.verifyInst(inst, .{ .none, .none, .none });
try self.verifyInst(inst);
},
.assembly => {
const ty_pl = data[inst].ty_pl;
@ -370,7 +373,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
for (inputs) |input| {
try self.verifyOperand(inst, input, bt.feed());
}
try self.verifyInst(inst, .{ .none, .none, .none });
try self.verifyInst(inst);
},
// control flow
@ -394,7 +397,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
for (cond_br_liveness.then_deaths) |death| try self.verifyDeath(inst, death);
try self.verifyInst(inst, .{ .none, .none, .none });
try self.verifyInst(inst);
},
.try_ptr => {
const ty_pl = data[inst].ty_pl;
@ -416,7 +419,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
for (cond_br_liveness.then_deaths) |death| try self.verifyDeath(inst, death);
try self.verifyInst(inst, .{ .none, .none, .none });
try self.verifyInst(inst);
},
.br => {
const br = data[inst].br;
@ -428,7 +431,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
} else {
gop.value_ptr.* = try self.live.clone(self.gpa);
}
try self.verifyInst(inst, .{ .none, .none, .none });
try self.verifyInst(inst);
},
.block => {
const ty_pl = data[inst].ty_pl;
@ -450,7 +453,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
for (block_liveness.deaths) |death| try self.verifyDeath(inst, death);
if (block_ty.isNoReturn()) {
if (ip.isNoReturn(block_ty.toIntern())) {
assert(!self.blocks.contains(inst));
} else {
var live = self.blocks.fetchRemove(inst).?.value;
@ -459,7 +462,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
try self.verifyMatchingLiveness(inst, live);
}
try self.verifyInst(inst, .{ .none, .none, .none });
try self.verifyInstOperands(inst, .{ .none, .none, .none });
},
.loop => {
const ty_pl = data[inst].ty_pl;
@ -474,7 +477,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
// The same stuff should be alive after the loop as before it
try self.verifyMatchingLiveness(inst, live);
try self.verifyInst(inst, .{ .none, .none, .none });
try self.verifyInstOperands(inst, .{ .none, .none, .none });
},
.cond_br => {
const pl_op = data[inst].pl_op;
@ -497,7 +500,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
for (cond_br_liveness.else_deaths) |death| try self.verifyDeath(inst, death);
try self.verifyBody(else_body);
try self.verifyInst(inst, .{ .none, .none, .none });
try self.verifyInst(inst);
},
.switch_br => {
const pl_op = data[inst].pl_op;
@ -541,7 +544,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
try self.verifyBody(else_body);
}
try self.verifyInst(inst, .{ .none, .none, .none });
try self.verifyInst(inst);
},
}
}
@ -552,20 +555,22 @@ fn verifyDeath(self: *Verify, inst: Air.Inst.Index, operand: Air.Inst.Index) Err
}
fn verifyOperand(self: *Verify, inst: Air.Inst.Index, op_ref: Air.Inst.Ref, dies: bool) Error!void {
const operand = Air.refToIndex(op_ref) orelse return;
switch (self.air.instructions.items(.tag)[operand]) {
.constant, .const_ty => {},
else => {
if (dies) {
if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand });
} else {
if (!self.live.contains(operand)) return invalid("%{}: dead operand %{} reused", .{ inst, operand });
}
},
const operand = Air.refToIndexAllowNone(op_ref) orelse {
assert(!dies);
return;
};
if (self.air.instructions.items(.tag)[operand] == .interned) {
assert(!dies);
return;
}
if (dies) {
if (!self.live.remove(operand)) return invalid("%{}: dead operand %{} reused and killed again", .{ inst, operand });
} else {
if (!self.live.contains(operand)) return invalid("%{}: dead operand %{} reused", .{ inst, operand });
}
}
fn verifyInst(
fn verifyInstOperands(
self: *Verify,
inst: Air.Inst.Index,
operands: [Liveness.bpi - 1]Air.Inst.Ref,
@ -574,16 +579,15 @@ fn verifyInst(
const dies = self.liveness.operandDies(inst, @intCast(Liveness.OperandInt, operand_index));
try self.verifyOperand(inst, operand, dies);
}
const tag = self.air.instructions.items(.tag);
switch (tag[inst]) {
.constant, .const_ty => unreachable,
else => {
if (self.liveness.isUnused(inst)) {
assert(!self.live.contains(inst));
} else {
try self.live.putNoClobber(self.gpa, inst, {});
}
},
try self.verifyInst(inst);
}
fn verifyInst(self: *Verify, inst: Air.Inst.Index) Error!void {
if (self.air.instructions.items(.tag)[inst] == .interned) return;
if (self.liveness.isUnused(inst)) {
assert(!self.live.contains(inst));
} else {
try self.live.putNoClobber(self.gpa, inst, {});
}
}
@ -604,4 +608,5 @@ const log = std.log.scoped(.liveness_verify);
const Air = @import("../Air.zig");
const Liveness = @import("../Liveness.zig");
const InternPool = @import("../InternPool.zig");
const Verify = @This();

File diff suppressed because it is too large Load Diff

View File

@ -1,18 +1,18 @@
const std = @import("std");
const assert = std.debug.assert;
const Order = std.math.Order;
const RangeSet = @This();
const InternPool = @import("InternPool.zig");
const Module = @import("Module.zig");
const RangeSet = @This();
const SwitchProngSrc = @import("Module.zig").SwitchProngSrc;
const Type = @import("type.zig").Type;
const Value = @import("value.zig").Value;
ranges: std.ArrayList(Range),
module: *Module,
pub const Range = struct {
first: Value,
last: Value,
first: InternPool.Index,
last: InternPool.Index,
src: SwitchProngSrc,
};
@ -29,18 +29,27 @@ pub fn deinit(self: *RangeSet) void {
pub fn add(
self: *RangeSet,
first: Value,
last: Value,
ty: Type,
first: InternPool.Index,
last: InternPool.Index,
src: SwitchProngSrc,
) !?SwitchProngSrc {
const mod = self.module;
const ip = &mod.intern_pool;
const ty = ip.typeOf(first);
assert(ty == ip.typeOf(last));
for (self.ranges.items) |range| {
if (last.compareAll(.gte, range.first, ty, self.module) and
first.compareAll(.lte, range.last, ty, self.module))
assert(ty == ip.typeOf(range.first));
assert(ty == ip.typeOf(range.last));
if (last.toValue().compareScalar(.gte, range.first.toValue(), ty.toType(), mod) and
first.toValue().compareScalar(.lte, range.last.toValue(), ty.toType(), mod))
{
return range.src; // They overlap.
}
}
try self.ranges.append(.{
.first = first,
.last = last,
@ -49,45 +58,43 @@ pub fn add(
return null;
}
const LessThanContext = struct { ty: Type, module: *Module };
/// Assumes a and b do not overlap
fn lessThan(ctx: LessThanContext, a: Range, b: Range) bool {
return a.first.compareAll(.lt, b.first, ctx.ty, ctx.module);
fn lessThan(mod: *Module, a: Range, b: Range) bool {
const ty = mod.intern_pool.typeOf(a.first).toType();
return a.first.toValue().compareScalar(.lt, b.first.toValue(), ty, mod);
}
pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
pub fn spans(self: *RangeSet, first: InternPool.Index, last: InternPool.Index) !bool {
const mod = self.module;
const ip = &mod.intern_pool;
assert(ip.typeOf(first) == ip.typeOf(last));
if (self.ranges.items.len == 0)
return false;
std.mem.sort(Range, self.ranges.items, LessThanContext{
.ty = ty,
.module = self.module,
}, lessThan);
std.mem.sort(Range, self.ranges.items, mod, lessThan);
if (!self.ranges.items[0].first.eql(first, ty, self.module) or
!self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty, self.module))
if (self.ranges.items[0].first != first or
self.ranges.items[self.ranges.items.len - 1].last != last)
{
return false;
}
var space: Value.BigIntSpace = undefined;
var space: InternPool.Key.Int.Storage.BigIntSpace = undefined;
var counter = try std.math.big.int.Managed.init(self.ranges.allocator);
defer counter.deinit();
const target = self.module.getTarget();
// look for gaps
for (self.ranges.items[1..], 0..) |cur, i| {
// i starts counting from the second item.
const prev = self.ranges.items[i];
// prev.last + 1 == cur.first
try counter.copy(prev.last.toBigInt(&space, target));
try counter.copy(prev.last.toValue().toBigInt(&space, mod));
try counter.addScalar(&counter, 1);
const cur_start_int = cur.first.toBigInt(&space, target);
const cur_start_int = cur.first.toValue().toBigInt(&space, mod);
if (!cur_start_int.eq(counter.toConst())) {
return false;
}

16909
src/Sema.zig

File diff suppressed because it is too large Load Diff

View File

@ -27,13 +27,13 @@ pub const Managed = struct {
/// Assumes arena allocation. Does a recursive copy.
pub fn copy(self: TypedValue, arena: Allocator) error{OutOfMemory}!TypedValue {
return TypedValue{
.ty = try self.ty.copy(arena),
.ty = self.ty,
.val = try self.val.copy(arena),
};
}
pub fn eql(a: TypedValue, b: TypedValue, mod: *Module) bool {
if (!a.ty.eql(b.ty, mod)) return false;
if (a.ty.toIntern() != b.ty.toIntern()) return false;
return a.val.eql(b.val, a.ty, mod);
}
@ -41,8 +41,8 @@ pub fn hash(tv: TypedValue, hasher: *std.hash.Wyhash, mod: *Module) void {
return tv.val.hash(tv.ty, hasher, mod);
}
pub fn enumToInt(tv: TypedValue, buffer: *Value.Payload.U64) Value {
return tv.val.enumToInt(tv.ty, buffer);
pub fn enumToInt(tv: TypedValue, mod: *Module) Allocator.Error!Value {
return tv.val.enumToInt(tv.ty, mod);
}
const max_aggregate_items = 100;
@ -61,7 +61,10 @@ pub fn format(
) !void {
_ = options;
comptime std.debug.assert(fmt.len == 0);
return ctx.tv.print(writer, 3, ctx.mod);
return ctx.tv.print(writer, 3, ctx.mod) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"), // We're not allowed to return this from a format function
else => |e| return e,
};
}
/// Prints the Value according to the Type, not according to the Value Tag.
@ -70,106 +73,61 @@ pub fn print(
writer: anytype,
level: u8,
mod: *Module,
) @TypeOf(writer).Error!void {
const target = mod.getTarget();
) (@TypeOf(writer).Error || Allocator.Error)!void {
var val = tv.val;
var ty = tv.ty;
if (val.isVariable(mod))
return writer.writeAll("(variable)");
const ip = &mod.intern_pool;
while (true) switch (val.ip_index) {
.none => switch (val.tag()) {
.aggregate => return printAggregate(ty, val, writer, level, mod),
.@"union" => {
if (level == 0) {
return writer.writeAll(".{ ... }");
}
const union_val = val.castTag(.@"union").?.data;
try writer.writeAll(".{ ");
while (true) switch (val.tag()) {
.u1_type => return writer.writeAll("u1"),
.u8_type => return writer.writeAll("u8"),
.i8_type => return writer.writeAll("i8"),
.u16_type => return writer.writeAll("u16"),
.i16_type => return writer.writeAll("i16"),
.u29_type => return writer.writeAll("u29"),
.u32_type => return writer.writeAll("u32"),
.i32_type => return writer.writeAll("i32"),
.u64_type => return writer.writeAll("u64"),
.i64_type => return writer.writeAll("i64"),
.u128_type => return writer.writeAll("u128"),
.i128_type => return writer.writeAll("i128"),
.isize_type => return writer.writeAll("isize"),
.usize_type => return writer.writeAll("usize"),
.c_char_type => return writer.writeAll("c_char"),
.c_short_type => return writer.writeAll("c_short"),
.c_ushort_type => return writer.writeAll("c_ushort"),
.c_int_type => return writer.writeAll("c_int"),
.c_uint_type => return writer.writeAll("c_uint"),
.c_long_type => return writer.writeAll("c_long"),
.c_ulong_type => return writer.writeAll("c_ulong"),
.c_longlong_type => return writer.writeAll("c_longlong"),
.c_ulonglong_type => return writer.writeAll("c_ulonglong"),
.c_longdouble_type => return writer.writeAll("c_longdouble"),
.f16_type => return writer.writeAll("f16"),
.f32_type => return writer.writeAll("f32"),
.f64_type => return writer.writeAll("f64"),
.f80_type => return writer.writeAll("f80"),
.f128_type => return writer.writeAll("f128"),
.anyopaque_type => return writer.writeAll("anyopaque"),
.bool_type => return writer.writeAll("bool"),
.void_type => return writer.writeAll("void"),
.type_type => return writer.writeAll("type"),
.anyerror_type => return writer.writeAll("anyerror"),
.comptime_int_type => return writer.writeAll("comptime_int"),
.comptime_float_type => return writer.writeAll("comptime_float"),
.noreturn_type => return writer.writeAll("noreturn"),
.null_type => return writer.writeAll("@Type(.Null)"),
.undefined_type => return writer.writeAll("@Type(.Undefined)"),
.fn_noreturn_no_args_type => return writer.writeAll("fn() noreturn"),
.fn_void_no_args_type => return writer.writeAll("fn() void"),
.fn_naked_noreturn_no_args_type => return writer.writeAll("fn() callconv(.Naked) noreturn"),
.fn_ccc_void_no_args_type => return writer.writeAll("fn() callconv(.C) void"),
.single_const_pointer_to_comptime_int_type => return writer.writeAll("*const comptime_int"),
.anyframe_type => return writer.writeAll("anyframe"),
.const_slice_u8_type => return writer.writeAll("[]const u8"),
.const_slice_u8_sentinel_0_type => return writer.writeAll("[:0]const u8"),
.anyerror_void_error_union_type => return writer.writeAll("anyerror!void"),
.enum_literal_type => return writer.writeAll("@Type(.EnumLiteral)"),
.manyptr_u8_type => return writer.writeAll("[*]u8"),
.manyptr_const_u8_type => return writer.writeAll("[*]const u8"),
.manyptr_const_u8_sentinel_0_type => return writer.writeAll("[*:0]const u8"),
.atomic_order_type => return writer.writeAll("std.builtin.AtomicOrder"),
.atomic_rmw_op_type => return writer.writeAll("std.builtin.AtomicRmwOp"),
.calling_convention_type => return writer.writeAll("std.builtin.CallingConvention"),
.address_space_type => return writer.writeAll("std.builtin.AddressSpace"),
.float_mode_type => return writer.writeAll("std.builtin.FloatMode"),
.reduce_op_type => return writer.writeAll("std.builtin.ReduceOp"),
.modifier_type => return writer.writeAll("std.builtin.CallModifier"),
.prefetch_options_type => return writer.writeAll("std.builtin.PrefetchOptions"),
.export_options_type => return writer.writeAll("std.builtin.ExportOptions"),
.extern_options_type => return writer.writeAll("std.builtin.ExternOptions"),
.type_info_type => return writer.writeAll("std.builtin.Type"),
.empty_struct_value, .aggregate => {
if (level == 0) {
return writer.writeAll(".{ ... }");
}
if (ty.zigTypeTag() == .Struct) {
try writer.writeAll(".{");
const max_len = std.math.min(ty.structFieldCount(), max_aggregate_items);
try print(.{
.ty = mod.unionPtr(ip.indexToKey(ty.toIntern()).union_type.index).tag_ty,
.val = union_val.tag,
}, writer, level - 1, mod);
try writer.writeAll(" = ");
try print(.{
.ty = ty.unionFieldType(union_val.tag, mod),
.val = union_val.val,
}, writer, level - 1, mod);
return writer.writeAll(" }");
},
.bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}),
.repeated => {
if (level == 0) {
return writer.writeAll(".{ ... }");
}
var i: u32 = 0;
try writer.writeAll(".{ ");
const elem_tv = TypedValue{
.ty = ty.elemType2(mod),
.val = val.castTag(.repeated).?.data,
};
const len = ty.arrayLen(mod);
const max_len = std.math.min(len, max_aggregate_items);
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
switch (ty.tag()) {
.anon_struct, .@"struct" => try writer.print(".{s} = ", .{ty.structFieldName(i)}),
else => {},
}
try print(.{
.ty = ty.structFieldType(i),
.val = val.fieldValue(ty, i),
}, writer, level - 1, mod);
try print(elem_tv, writer, level - 1, mod);
}
if (ty.structFieldCount() > max_aggregate_items) {
if (len > max_aggregate_items) {
try writer.writeAll(", ...");
}
return writer.writeAll("}");
} else {
const elem_ty = ty.elemType2();
const len = ty.arrayLen();
return writer.writeAll(" }");
},
.slice => {
if (level == 0) {
return writer.writeAll(".{ ... }");
}
const payload = val.castTag(.slice).?.data;
const elem_ty = ty.elemType2(mod);
const len = payload.len.toUnsignedInt(mod);
if (elem_ty.eql(Type.u8, mod)) str: {
const max_len = @intCast(usize, std.math.min(len, max_string_len));
@ -177,11 +135,14 @@ pub fn print(
var i: u32 = 0;
while (i < max_len) : (i += 1) {
const elem = val.fieldValue(ty, i);
if (elem.isUndef()) break :str;
buf[i] = std.math.cast(u8, elem.toUnsignedInt(target)) orelse break :str;
const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic
};
if (elem_val.isUndef(mod)) break :str;
buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(mod)) orelse break :str;
}
// TODO would be nice if this had a bit of unicode awareness.
const truncated = if (len > max_string_len) " (truncated)" else "";
return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated });
}
@ -192,315 +153,334 @@ pub fn print(
var i: u32 = 0;
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
const elem_val = payload.ptr.elemValue(mod, i) catch |err| switch (err) {
error.OutOfMemory => @panic("OOM"), // TODO: eliminate this panic
};
try print(.{
.ty = elem_ty,
.val = val.fieldValue(ty, i),
.val = elem_val,
}, writer, level - 1, mod);
}
if (len > max_aggregate_items) {
try writer.writeAll(", ...");
}
return writer.writeAll(" }");
}
},
.eu_payload => {
val = val.castTag(.eu_payload).?.data;
ty = ty.errorUnionPayload(mod);
},
.opt_payload => {
val = val.castTag(.opt_payload).?.data;
ty = ty.optionalChild(mod);
},
},
.@"union" => {
if (level == 0) {
return writer.writeAll(".{ ... }");
}
const union_val = val.castTag(.@"union").?.data;
try writer.writeAll(".{ ");
try print(.{
.ty = ty.cast(Type.Payload.Union).?.data.tag_ty,
.val = union_val.tag,
}, writer, level - 1, mod);
try writer.writeAll(" = ");
try print(.{
.ty = ty.unionFieldType(union_val.tag, mod),
.val = union_val.val,
}, writer, level - 1, mod);
return writer.writeAll(" }");
},
.null_value => return writer.writeAll("null"),
.undef => return writer.writeAll("undefined"),
.zero => return writer.writeAll("0"),
.one => return writer.writeAll("1"),
.void_value => return writer.writeAll("{}"),
.unreachable_value => return writer.writeAll("unreachable"),
.the_only_possible_value => return writer.writeAll("0"),
.bool_true => return writer.writeAll("true"),
.bool_false => return writer.writeAll("false"),
.ty => return val.castTag(.ty).?.data.print(writer, mod),
.int_type => {
const int_type = val.castTag(.int_type).?.data;
return writer.print("{s}{d}", .{
if (int_type.signed) "s" else "u",
int_type.bits,
});
},
.int_u64 => return std.fmt.formatIntValue(val.castTag(.int_u64).?.data, "", .{}, writer),
.int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", .{}, writer),
.int_big_positive => return writer.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}),
.int_big_negative => return writer.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}),
.lazy_align => {
const sub_ty = val.castTag(.lazy_align).?.data;
const x = sub_ty.abiAlignment(target);
return writer.print("{d}", .{x});
},
.lazy_size => {
const sub_ty = val.castTag(.lazy_size).?.data;
const x = sub_ty.abiSize(target);
return writer.print("{d}", .{x});
},
.function => return writer.print("(function '{s}')", .{
mod.declPtr(val.castTag(.function).?.data.owner_decl).name,
}),
.extern_fn => return writer.writeAll("(extern function)"),
.variable => unreachable,
.decl_ref_mut => {
const decl_index = val.castTag(.decl_ref_mut).?.data.decl_index;
const decl = mod.declPtr(decl_index);
if (level == 0) {
return writer.print("(decl ref mut '{s}')", .{decl.name});
}
return print(.{
.ty = decl.ty,
.val = decl.val,
}, writer, level - 1, mod);
},
.decl_ref => {
const decl_index = val.castTag(.decl_ref).?.data;
const decl = mod.declPtr(decl_index);
if (level == 0) {
return writer.print("(decl ref '{s}')", .{decl.name});
}
return print(.{
.ty = decl.ty,
.val = decl.val,
}, writer, level - 1, mod);
},
.comptime_field_ptr => {
const payload = val.castTag(.comptime_field_ptr).?.data;
if (level == 0) {
return writer.writeAll("(comptime field ptr)");
}
return print(.{
.ty = payload.field_ty,
.val = payload.field_val,
}, writer, level - 1, mod);
},
.elem_ptr => {
const elem_ptr = val.castTag(.elem_ptr).?.data;
try writer.writeAll("&");
if (level == 0) {
try writer.writeAll("(ptr)");
} else {
else => switch (ip.indexToKey(val.toIntern())) {
.int_type,
.ptr_type,
.array_type,
.vector_type,
.opt_type,
.anyframe_type,
.error_union_type,
.simple_type,
.struct_type,
.anon_struct_type,
.union_type,
.opaque_type,
.enum_type,
.func_type,
.error_set_type,
.inferred_error_set_type,
=> return Type.print(val.toType(), writer, mod),
.undef => return writer.writeAll("undefined"),
.runtime_value => return writer.writeAll("(runtime value)"),
.simple_value => |simple_value| switch (simple_value) {
.empty_struct => return printAggregate(ty, val, writer, level, mod),
.generic_poison => return writer.writeAll("(generic poison)"),
else => return writer.writeAll(@tagName(simple_value)),
},
.variable => return writer.writeAll("(variable)"),
.extern_func => |extern_func| return writer.print("(extern function '{}')", .{
mod.declPtr(extern_func.decl).name.fmt(ip),
}),
.func => |func| return writer.print("(function '{}')", .{
mod.declPtr(mod.funcPtr(func.index).owner_decl).name.fmt(ip),
}),
.int => |int| switch (int.storage) {
inline .u64, .i64, .big_int => |x| return writer.print("{}", .{x}),
.lazy_align => |lazy_ty| return writer.print("{d}", .{
lazy_ty.toType().abiAlignment(mod),
}),
.lazy_size => |lazy_ty| return writer.print("{d}", .{
lazy_ty.toType().abiSize(mod),
}),
},
.err => |err| return writer.print("error.{}", .{
err.name.fmt(ip),
}),
.error_union => |error_union| switch (error_union.val) {
.err_name => |err_name| return writer.print("error.{}", .{
err_name.fmt(ip),
}),
.payload => |payload| {
val = payload.toValue();
ty = ty.errorUnionPayload(mod);
},
},
.enum_literal => |enum_literal| return writer.print(".{}", .{
enum_literal.fmt(ip),
}),
.enum_tag => |enum_tag| {
if (level == 0) {
return writer.writeAll("(enum)");
}
const enum_type = ip.indexToKey(ty.toIntern()).enum_type;
if (enum_type.tagValueIndex(ip, val.toIntern())) |tag_index| {
try writer.print(".{i}", .{enum_type.names[tag_index].fmt(ip)});
return;
}
try writer.writeAll("@intToEnum(");
try print(.{
.ty = elem_ptr.elem_ty,
.val = elem_ptr.array_ptr,
.ty = Type.type,
.val = enum_tag.ty.toValue(),
}, writer, level - 1, mod);
}
return writer.print("[{}]", .{elem_ptr.index});
},
.field_ptr => {
const field_ptr = val.castTag(.field_ptr).?.data;
try writer.writeAll("&");
if (level == 0) {
try writer.writeAll("(ptr)");
} else {
try writer.writeAll(", ");
try print(.{
.ty = field_ptr.container_ty,
.val = field_ptr.container_ptr,
.ty = ip.typeOf(enum_tag.int).toType(),
.val = enum_tag.int.toValue(),
}, writer, level - 1, mod);
}
try writer.writeAll(")");
return;
},
.empty_enum_value => return writer.writeAll("(empty enum value)"),
.float => |float| switch (float.storage) {
inline else => |x| return writer.print("{d}", .{@floatCast(f64, x)}),
},
.ptr => |ptr| {
if (ptr.addr == .int) {
const i = ip.indexToKey(ptr.addr.int).int;
switch (i.storage) {
inline else => |addr| return writer.print("{x:0>8}", .{addr}),
}
}
if (field_ptr.container_ty.zigTypeTag() == .Struct) {
switch (field_ptr.container_ty.tag()) {
.tuple => return writer.print(".@\"{d}\"", .{field_ptr.field_index}),
else => {
const field_name = field_ptr.container_ty.structFieldName(field_ptr.field_index);
return writer.print(".{s}", .{field_name});
const ptr_ty = ip.indexToKey(ty.toIntern()).ptr_type;
if (ptr_ty.flags.size == .Slice) {
if (level == 0) {
return writer.writeAll(".{ ... }");
}
const elem_ty = ptr_ty.child.toType();
const len = ptr.len.toValue().toUnsignedInt(mod);
if (elem_ty.eql(Type.u8, mod)) str: {
const max_len = @min(len, max_string_len);
var buf: [max_string_len]u8 = undefined;
for (buf[0..max_len], 0..) |*c, i| {
const elem = try val.elemValue(mod, i);
if (elem.isUndef(mod)) break :str;
c.* = @intCast(u8, elem.toUnsignedInt(mod));
}
const truncated = if (len > max_string_len) " (truncated)" else "";
return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated });
}
try writer.writeAll(".{ ");
const max_len = @min(len, max_aggregate_items);
for (0..max_len) |i| {
if (i != 0) try writer.writeAll(", ");
try print(.{
.ty = elem_ty,
.val = try val.elemValue(mod, i),
}, writer, level - 1, mod);
}
if (len > max_aggregate_items) {
try writer.writeAll(", ...");
}
return writer.writeAll(" }");
}
switch (ptr.addr) {
.decl => |decl_index| {
const decl = mod.declPtr(decl_index);
if (level == 0) return writer.print("(decl '{}')", .{decl.name.fmt(ip)});
return print(.{
.ty = decl.ty,
.val = decl.val,
}, writer, level - 1, mod);
},
.mut_decl => |mut_decl| {
const decl = mod.declPtr(mut_decl.decl);
if (level == 0) return writer.print("(mut decl '{}')", .{decl.name.fmt(ip)});
return print(.{
.ty = decl.ty,
.val = decl.val,
}, writer, level - 1, mod);
},
.comptime_field => |field_val_ip| {
return print(.{
.ty = ip.typeOf(field_val_ip).toType(),
.val = field_val_ip.toValue(),
}, writer, level - 1, mod);
},
.int => unreachable,
.eu_payload => |eu_ip| {
try writer.writeAll("(payload of ");
try print(.{
.ty = ip.typeOf(eu_ip).toType(),
.val = eu_ip.toValue(),
}, writer, level - 1, mod);
try writer.writeAll(")");
},
.opt_payload => |opt_ip| {
try print(.{
.ty = ip.typeOf(opt_ip).toType(),
.val = opt_ip.toValue(),
}, writer, level - 1, mod);
try writer.writeAll(".?");
},
.elem => |elem| {
try print(.{
.ty = ip.typeOf(elem.base).toType(),
.val = elem.base.toValue(),
}, writer, level - 1, mod);
try writer.print("[{}]", .{elem.index});
},
.field => |field| {
const container_ty = ip.typeOf(field.base).toType();
try print(.{
.ty = container_ty,
.val = field.base.toValue(),
}, writer, level - 1, mod);
switch (container_ty.zigTypeTag(mod)) {
.Struct => {
if (container_ty.isTuple(mod)) {
try writer.print("[{d}]", .{field.index});
}
const field_name = container_ty.structFieldName(@intCast(usize, field.index), mod);
try writer.print(".{i}", .{field_name.fmt(ip)});
},
.Union => {
const field_name = container_ty.unionFields(mod).keys()[@intCast(usize, field.index)];
try writer.print(".{i}", .{field_name.fmt(ip)});
},
.Pointer => {
std.debug.assert(container_ty.isSlice(mod));
try writer.writeAll(switch (field.index) {
Value.slice_ptr_index => ".ptr",
Value.slice_len_index => ".len",
else => unreachable,
});
},
else => unreachable,
}
},
}
} else if (field_ptr.container_ty.zigTypeTag() == .Union) {
const field_name = field_ptr.container_ty.unionFields().keys()[field_ptr.field_index];
return writer.print(".{s}", .{field_name});
} else if (field_ptr.container_ty.isSlice()) {
switch (field_ptr.field_index) {
Value.Payload.Slice.ptr_index => return writer.writeAll(".ptr"),
Value.Payload.Slice.len_index => return writer.writeAll(".len"),
else => unreachable,
}
}
},
.opt => |opt| switch (opt.val) {
.none => return writer.writeAll("null"),
else => |payload| {
val = payload.toValue();
ty = ty.optionalChild(mod);
},
},
.aggregate => |aggregate| switch (aggregate.storage) {
.bytes => |bytes| {
// Strip the 0 sentinel off of strings before printing
const zero_sent = blk: {
const sent = ty.sentinel(mod) orelse break :blk false;
break :blk sent.eql(Value.zero_u8, Type.u8, mod);
};
const str = if (zero_sent) bytes[0 .. bytes.len - 1] else bytes;
return writer.print("\"{}\"", .{std.zig.fmtEscapes(str)});
},
.elems, .repeated_elem => return printAggregate(ty, val, writer, level, mod),
},
.un => |un| {
try writer.writeAll(".{ ");
if (level > 0) {
try print(.{
.ty = ty.unionTagTypeHypothetical(mod),
.val = un.tag.toValue(),
}, writer, level - 1, mod);
try writer.writeAll(" = ");
try print(.{
.ty = ty.unionFieldType(un.tag.toValue(), mod),
.val = un.val.toValue(),
}, writer, level - 1, mod);
} else try writer.writeAll("...");
return writer.writeAll(" }");
},
.memoized_call => unreachable,
},
.empty_array => return writer.writeAll(".{}"),
.enum_literal => return writer.print(".{}", .{std.zig.fmtId(val.castTag(.enum_literal).?.data)}),
.enum_field_index => {
return writer.print(".{s}", .{ty.enumFieldName(val.castTag(.enum_field_index).?.data)});
},
.bytes => return writer.print("\"{}\"", .{std.zig.fmtEscapes(val.castTag(.bytes).?.data)}),
.str_lit => {
const str_lit = val.castTag(.str_lit).?.data;
const bytes = mod.string_literal_bytes.items[str_lit.index..][0..str_lit.len];
return writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)});
},
.repeated => {
if (level == 0) {
return writer.writeAll(".{ ... }");
}
var i: u32 = 0;
try writer.writeAll(".{ ");
const elem_tv = TypedValue{
.ty = ty.elemType2(),
.val = val.castTag(.repeated).?.data,
};
const len = ty.arrayLen();
const max_len = std.math.min(len, max_aggregate_items);
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
try print(elem_tv, writer, level - 1, mod);
}
if (len > max_aggregate_items) {
try writer.writeAll(", ...");
}
return writer.writeAll(" }");
},
.empty_array_sentinel => {
if (level == 0) {
return writer.writeAll(".{ (sentinel) }");
}
try writer.writeAll(".{ ");
try print(.{
.ty = ty.elemType2(),
.val = ty.sentinel().?,
}, writer, level - 1, mod);
return writer.writeAll(" }");
},
.slice => {
if (level == 0) {
return writer.writeAll(".{ ... }");
}
const payload = val.castTag(.slice).?.data;
const elem_ty = ty.elemType2();
const len = payload.len.toUnsignedInt(target);
if (elem_ty.eql(Type.u8, mod)) str: {
const max_len = @intCast(usize, std.math.min(len, max_string_len));
var buf: [max_string_len]u8 = undefined;
var i: u32 = 0;
while (i < max_len) : (i += 1) {
var elem_buf: Value.ElemValueBuffer = undefined;
const elem_val = payload.ptr.elemValueBuffer(mod, i, &elem_buf);
if (elem_val.isUndef()) break :str;
buf[i] = std.math.cast(u8, elem_val.toUnsignedInt(target)) orelse break :str;
}
// TODO would be nice if this had a bit of unicode awareness.
const truncated = if (len > max_string_len) " (truncated)" else "";
return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated });
}
try writer.writeAll(".{ ");
const max_len = std.math.min(len, max_aggregate_items);
var i: u32 = 0;
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
var buf: Value.ElemValueBuffer = undefined;
try print(.{
.ty = elem_ty,
.val = payload.ptr.elemValueBuffer(mod, i, &buf),
}, writer, level - 1, mod);
}
if (len > max_aggregate_items) {
try writer.writeAll(", ...");
}
return writer.writeAll(" }");
},
.float_16 => return writer.print("{d}", .{val.castTag(.float_16).?.data}),
.float_32 => return writer.print("{d}", .{val.castTag(.float_32).?.data}),
.float_64 => return writer.print("{d}", .{val.castTag(.float_64).?.data}),
.float_80 => return writer.print("{d}", .{@floatCast(f64, val.castTag(.float_80).?.data)}),
.float_128 => return writer.print("{d}", .{@floatCast(f64, val.castTag(.float_128).?.data)}),
.@"error" => return writer.print("error.{s}", .{val.castTag(.@"error").?.data.name}),
.eu_payload => {
val = val.castTag(.eu_payload).?.data;
ty = ty.errorUnionPayload();
},
.opt_payload => {
val = val.castTag(.opt_payload).?.data;
var buf: Type.Payload.ElemType = undefined;
ty = ty.optionalChild(&buf);
return print(.{ .ty = ty, .val = val }, writer, level, mod);
},
.eu_payload_ptr => {
try writer.writeAll("&");
const data = val.castTag(.eu_payload_ptr).?.data;
var ty_val: Value.Payload.Ty = .{
.base = .{ .tag = .ty },
.data = ty,
};
try writer.writeAll("@as(");
try print(.{
.ty = Type.type,
.val = Value.initPayload(&ty_val.base),
}, writer, level - 1, mod);
try writer.writeAll(", &(payload of ");
var ptr_ty: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
.data = data.container_ty,
};
try print(.{
.ty = Type.initPayload(&ptr_ty.base),
.val = data.container_ptr,
}, writer, level - 1, mod);
try writer.writeAll("))");
return;
},
.opt_payload_ptr => {
const data = val.castTag(.opt_payload_ptr).?.data;
var ty_val: Value.Payload.Ty = .{
.base = .{ .tag = .ty },
.data = ty,
};
try writer.writeAll("@as(");
try print(.{
.ty = Type.type,
.val = Value.initPayload(&ty_val.base),
}, writer, level - 1, mod);
try writer.writeAll(", &(payload of ");
var ptr_ty: Type.Payload.ElemType = .{
.base = .{ .tag = .single_mut_pointer },
.data = data.container_ty,
};
try print(.{
.ty = Type.initPayload(&ptr_ty.base),
.val = data.container_ptr,
}, writer, level - 1, mod);
try writer.writeAll("))");
return;
},
// TODO these should not appear in this function
.inferred_alloc => return writer.writeAll("(inferred allocation value)"),
.inferred_alloc_comptime => return writer.writeAll("(inferred comptime allocation value)"),
.generic_poison_type => return writer.writeAll("(generic poison type)"),
.generic_poison => return writer.writeAll("(generic poison)"),
.runtime_value => return writer.writeAll("[runtime value]"),
};
}
fn printAggregate(
ty: Type,
val: Value,
writer: anytype,
level: u8,
mod: *Module,
) (@TypeOf(writer).Error || Allocator.Error)!void {
if (level == 0) {
return writer.writeAll(".{ ... }");
}
if (ty.zigTypeTag(mod) == .Struct) {
try writer.writeAll(".{");
const max_len = @min(ty.structFieldCount(mod), max_aggregate_items);
for (0..max_len) |i| {
if (i != 0) try writer.writeAll(", ");
const field_name = switch (mod.intern_pool.indexToKey(ty.toIntern())) {
.struct_type => |x| mod.structPtrUnwrap(x.index).?.fields.keys()[i].toOptional(),
.anon_struct_type => |x| if (x.isTuple()) .none else x.names[i].toOptional(),
else => unreachable,
};
if (field_name.unwrap()) |name| try writer.print(".{} = ", .{name.fmt(&mod.intern_pool)});
try print(.{
.ty = ty.structFieldType(i, mod),
.val = try val.fieldValue(mod, i),
}, writer, level - 1, mod);
}
if (ty.structFieldCount(mod) > max_aggregate_items) {
try writer.writeAll(", ...");
}
return writer.writeAll("}");
} else {
const elem_ty = ty.elemType2(mod);
const len = ty.arrayLen(mod);
if (elem_ty.eql(Type.u8, mod)) str: {
const max_len = @intCast(usize, std.math.min(len, max_string_len));
var buf: [max_string_len]u8 = undefined;
var i: u32 = 0;
while (i < max_len) : (i += 1) {
const elem = try val.fieldValue(mod, i);
if (elem.isUndef(mod)) break :str;
buf[i] = std.math.cast(u8, elem.toUnsignedInt(mod)) orelse break :str;
}
const truncated = if (len > max_string_len) " (truncated)" else "";
return writer.print("\"{}{s}\"", .{ std.zig.fmtEscapes(buf[0..max_len]), truncated });
}
try writer.writeAll(".{ ");
const max_len = std.math.min(len, max_aggregate_items);
var i: u32 = 0;
while (i < max_len) : (i += 1) {
if (i != 0) try writer.writeAll(", ");
try print(.{
.ty = elem_ty,
.val = try val.fieldValue(mod, i),
}, writer, level - 1, mod);
}
if (len > max_aggregate_items) {
try writer.writeAll(", ...");
}
return writer.writeAll(" }");
}
}

View File

@ -19,6 +19,7 @@ const BigIntConst = std.math.big.int.Const;
const BigIntMutable = std.math.big.int.Mutable;
const Ast = std.zig.Ast;
const InternPool = @import("InternPool.zig");
const Zir = @This();
const Type = @import("type.zig").Type;
const Value = @import("value.zig").Value;
@ -2041,448 +2042,103 @@ pub const Inst = struct {
/// The position of a ZIR instruction within the `Zir` instructions array.
pub const Index = u32;
/// A reference to a TypedValue or ZIR instruction.
/// A reference to ZIR instruction, or to an InternPool index, or neither.
///
/// If the Ref has a tag in this enum, it refers to a TypedValue.
///
/// If the value of a Ref does not have a tag, it refers to a ZIR instruction.
///
/// The first values after the the last tag refer to ZIR instructions which may
/// be derived by subtracting `typed_value_map.len`.
///
/// When adding a tag to this enum, consider adding a corresponding entry to
/// `primitives` in astgen.
/// If the integer tag value is < InternPool.static_len, then it
/// corresponds to an InternPool index. Otherwise, this refers to a ZIR
/// instruction.
///
/// The tag type is specified so that it is safe to bitcast between `[]u32`
/// and `[]Ref`.
pub const Ref = enum(u32) {
u1_type = @enumToInt(InternPool.Index.u1_type),
u8_type = @enumToInt(InternPool.Index.u8_type),
i8_type = @enumToInt(InternPool.Index.i8_type),
u16_type = @enumToInt(InternPool.Index.u16_type),
i16_type = @enumToInt(InternPool.Index.i16_type),
u29_type = @enumToInt(InternPool.Index.u29_type),
u32_type = @enumToInt(InternPool.Index.u32_type),
i32_type = @enumToInt(InternPool.Index.i32_type),
u64_type = @enumToInt(InternPool.Index.u64_type),
i64_type = @enumToInt(InternPool.Index.i64_type),
u80_type = @enumToInt(InternPool.Index.u80_type),
u128_type = @enumToInt(InternPool.Index.u128_type),
i128_type = @enumToInt(InternPool.Index.i128_type),
usize_type = @enumToInt(InternPool.Index.usize_type),
isize_type = @enumToInt(InternPool.Index.isize_type),
c_char_type = @enumToInt(InternPool.Index.c_char_type),
c_short_type = @enumToInt(InternPool.Index.c_short_type),
c_ushort_type = @enumToInt(InternPool.Index.c_ushort_type),
c_int_type = @enumToInt(InternPool.Index.c_int_type),
c_uint_type = @enumToInt(InternPool.Index.c_uint_type),
c_long_type = @enumToInt(InternPool.Index.c_long_type),
c_ulong_type = @enumToInt(InternPool.Index.c_ulong_type),
c_longlong_type = @enumToInt(InternPool.Index.c_longlong_type),
c_ulonglong_type = @enumToInt(InternPool.Index.c_ulonglong_type),
c_longdouble_type = @enumToInt(InternPool.Index.c_longdouble_type),
f16_type = @enumToInt(InternPool.Index.f16_type),
f32_type = @enumToInt(InternPool.Index.f32_type),
f64_type = @enumToInt(InternPool.Index.f64_type),
f80_type = @enumToInt(InternPool.Index.f80_type),
f128_type = @enumToInt(InternPool.Index.f128_type),
anyopaque_type = @enumToInt(InternPool.Index.anyopaque_type),
bool_type = @enumToInt(InternPool.Index.bool_type),
void_type = @enumToInt(InternPool.Index.void_type),
type_type = @enumToInt(InternPool.Index.type_type),
anyerror_type = @enumToInt(InternPool.Index.anyerror_type),
comptime_int_type = @enumToInt(InternPool.Index.comptime_int_type),
comptime_float_type = @enumToInt(InternPool.Index.comptime_float_type),
noreturn_type = @enumToInt(InternPool.Index.noreturn_type),
anyframe_type = @enumToInt(InternPool.Index.anyframe_type),
null_type = @enumToInt(InternPool.Index.null_type),
undefined_type = @enumToInt(InternPool.Index.undefined_type),
enum_literal_type = @enumToInt(InternPool.Index.enum_literal_type),
atomic_order_type = @enumToInt(InternPool.Index.atomic_order_type),
atomic_rmw_op_type = @enumToInt(InternPool.Index.atomic_rmw_op_type),
calling_convention_type = @enumToInt(InternPool.Index.calling_convention_type),
address_space_type = @enumToInt(InternPool.Index.address_space_type),
float_mode_type = @enumToInt(InternPool.Index.float_mode_type),
reduce_op_type = @enumToInt(InternPool.Index.reduce_op_type),
call_modifier_type = @enumToInt(InternPool.Index.call_modifier_type),
prefetch_options_type = @enumToInt(InternPool.Index.prefetch_options_type),
export_options_type = @enumToInt(InternPool.Index.export_options_type),
extern_options_type = @enumToInt(InternPool.Index.extern_options_type),
type_info_type = @enumToInt(InternPool.Index.type_info_type),
manyptr_u8_type = @enumToInt(InternPool.Index.manyptr_u8_type),
manyptr_const_u8_type = @enumToInt(InternPool.Index.manyptr_const_u8_type),
manyptr_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.manyptr_const_u8_sentinel_0_type),
single_const_pointer_to_comptime_int_type = @enumToInt(InternPool.Index.single_const_pointer_to_comptime_int_type),
slice_const_u8_type = @enumToInt(InternPool.Index.slice_const_u8_type),
slice_const_u8_sentinel_0_type = @enumToInt(InternPool.Index.slice_const_u8_sentinel_0_type),
anyerror_void_error_union_type = @enumToInt(InternPool.Index.anyerror_void_error_union_type),
generic_poison_type = @enumToInt(InternPool.Index.generic_poison_type),
empty_struct_type = @enumToInt(InternPool.Index.empty_struct_type),
undef = @enumToInt(InternPool.Index.undef),
zero = @enumToInt(InternPool.Index.zero),
zero_usize = @enumToInt(InternPool.Index.zero_usize),
zero_u8 = @enumToInt(InternPool.Index.zero_u8),
one = @enumToInt(InternPool.Index.one),
one_usize = @enumToInt(InternPool.Index.one_usize),
one_u8 = @enumToInt(InternPool.Index.one_u8),
four_u8 = @enumToInt(InternPool.Index.four_u8),
negative_one = @enumToInt(InternPool.Index.negative_one),
calling_convention_c = @enumToInt(InternPool.Index.calling_convention_c),
calling_convention_inline = @enumToInt(InternPool.Index.calling_convention_inline),
void_value = @enumToInt(InternPool.Index.void_value),
unreachable_value = @enumToInt(InternPool.Index.unreachable_value),
null_value = @enumToInt(InternPool.Index.null_value),
bool_true = @enumToInt(InternPool.Index.bool_true),
bool_false = @enumToInt(InternPool.Index.bool_false),
empty_struct = @enumToInt(InternPool.Index.empty_struct),
generic_poison = @enumToInt(InternPool.Index.generic_poison),
/// This tag is here to match Air and InternPool, however it is unused
/// for ZIR purposes.
var_args_param_type = @enumToInt(InternPool.Index.var_args_param_type),
/// This Ref does not correspond to any ZIR instruction or constant
/// value and may instead be used as a sentinel to indicate null.
none,
u1_type,
u8_type,
i8_type,
u16_type,
i16_type,
u29_type,
u32_type,
i32_type,
u64_type,
i64_type,
u128_type,
i128_type,
usize_type,
isize_type,
c_char_type,
c_short_type,
c_ushort_type,
c_int_type,
c_uint_type,
c_long_type,
c_ulong_type,
c_longlong_type,
c_ulonglong_type,
c_longdouble_type,
f16_type,
f32_type,
f64_type,
f80_type,
f128_type,
anyopaque_type,
bool_type,
void_type,
type_type,
anyerror_type,
comptime_int_type,
comptime_float_type,
noreturn_type,
anyframe_type,
null_type,
undefined_type,
enum_literal_type,
atomic_order_type,
atomic_rmw_op_type,
calling_convention_type,
address_space_type,
float_mode_type,
reduce_op_type,
modifier_type,
prefetch_options_type,
export_options_type,
extern_options_type,
type_info_type,
manyptr_u8_type,
manyptr_const_u8_type,
fn_noreturn_no_args_type,
fn_void_no_args_type,
fn_naked_noreturn_no_args_type,
fn_ccc_void_no_args_type,
single_const_pointer_to_comptime_int_type,
const_slice_u8_type,
anyerror_void_error_union_type,
generic_poison_type,
/// `undefined` (untyped)
undef,
/// `0` (comptime_int)
zero,
/// `1` (comptime_int)
one,
/// `{}`
void_value,
/// `unreachable` (noreturn type)
unreachable_value,
/// `null` (untyped)
null_value,
/// `true`
bool_true,
/// `false`
bool_false,
/// `.{}` (untyped)
empty_struct,
/// `0` (usize)
zero_usize,
/// `1` (usize)
one_usize,
/// `std.builtin.CallingConvention.C`
calling_convention_c,
/// `std.builtin.CallingConvention.Inline`
calling_convention_inline,
/// Used for generic parameters where the type and value
/// is not known until generic function instantiation.
generic_poison,
/// This is a special type for variadic parameters of a function call.
/// Casts to it will validate that the type can be passed to a c
/// calling convention function.
var_args_param,
none = @enumToInt(InternPool.Index.none),
_,
pub const typed_value_map = std.enums.directEnumArray(Ref, TypedValue, 0, .{
.none = undefined,
.u1_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.u1_type),
},
.u8_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.u8_type),
},
.i8_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.i8_type),
},
.u16_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.u16_type),
},
.i16_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.i16_type),
},
.u29_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.u29_type),
},
.u32_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.u32_type),
},
.i32_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.i32_type),
},
.u64_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.u64_type),
},
.i64_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.i64_type),
},
.u128_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.u128_type),
},
.i128_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.i128_type),
},
.usize_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.usize_type),
},
.isize_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.isize_type),
},
.c_char_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_char_type),
},
.c_short_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_short_type),
},
.c_ushort_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_ushort_type),
},
.c_int_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_int_type),
},
.c_uint_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_uint_type),
},
.c_long_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_long_type),
},
.c_ulong_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_ulong_type),
},
.c_longlong_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_longlong_type),
},
.c_ulonglong_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_ulonglong_type),
},
.c_longdouble_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.c_longdouble_type),
},
.f16_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.f16_type),
},
.f32_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.f32_type),
},
.f64_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.f64_type),
},
.f80_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.f80_type),
},
.f128_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.f128_type),
},
.anyopaque_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.anyopaque_type),
},
.bool_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.bool_type),
},
.void_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.void_type),
},
.type_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.type_type),
},
.anyerror_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.anyerror_type),
},
.comptime_int_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.comptime_int_type),
},
.comptime_float_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.comptime_float_type),
},
.noreturn_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.noreturn_type),
},
.anyframe_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.anyframe_type),
},
.null_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.null_type),
},
.undefined_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.undefined_type),
},
.fn_noreturn_no_args_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.fn_noreturn_no_args_type),
},
.fn_void_no_args_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.fn_void_no_args_type),
},
.fn_naked_noreturn_no_args_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.fn_naked_noreturn_no_args_type),
},
.fn_ccc_void_no_args_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.fn_ccc_void_no_args_type),
},
.single_const_pointer_to_comptime_int_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.single_const_pointer_to_comptime_int_type),
},
.const_slice_u8_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.const_slice_u8_type),
},
.anyerror_void_error_union_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.anyerror_void_error_union_type),
},
.generic_poison_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.generic_poison_type),
},
.enum_literal_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.enum_literal_type),
},
.manyptr_u8_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.manyptr_u8_type),
},
.manyptr_const_u8_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.manyptr_const_u8_type),
},
.atomic_order_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.atomic_order_type),
},
.atomic_rmw_op_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.atomic_rmw_op_type),
},
.calling_convention_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.calling_convention_type),
},
.address_space_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.address_space_type),
},
.float_mode_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.float_mode_type),
},
.reduce_op_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.reduce_op_type),
},
.modifier_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.modifier_type),
},
.prefetch_options_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.prefetch_options_type),
},
.export_options_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.export_options_type),
},
.extern_options_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.extern_options_type),
},
.type_info_type = .{
.ty = Type.initTag(.type),
.val = Value.initTag(.type_info_type),
},
.undef = .{
.ty = Type.initTag(.undefined),
.val = Value.initTag(.undef),
},
.zero = .{
.ty = Type.initTag(.comptime_int),
.val = Value.initTag(.zero),
},
.zero_usize = .{
.ty = Type.initTag(.usize),
.val = Value.initTag(.zero),
},
.one = .{
.ty = Type.initTag(.comptime_int),
.val = Value.initTag(.one),
},
.one_usize = .{
.ty = Type.initTag(.usize),
.val = Value.initTag(.one),
},
.void_value = .{
.ty = Type.initTag(.void),
.val = Value.initTag(.void_value),
},
.unreachable_value = .{
.ty = Type.initTag(.noreturn),
.val = Value.initTag(.unreachable_value),
},
.null_value = .{
.ty = Type.initTag(.null),
.val = Value.initTag(.null_value),
},
.bool_true = .{
.ty = Type.initTag(.bool),
.val = Value.initTag(.bool_true),
},
.bool_false = .{
.ty = Type.initTag(.bool),
.val = Value.initTag(.bool_false),
},
.empty_struct = .{
.ty = Type.initTag(.empty_struct_literal),
.val = Value.initTag(.empty_struct_value),
},
.calling_convention_c = .{
.ty = Type.initTag(.calling_convention),
.val = .{ .ptr_otherwise = &calling_convention_c_payload.base },
},
.calling_convention_inline = .{
.ty = Type.initTag(.calling_convention),
.val = .{ .ptr_otherwise = &calling_convention_inline_payload.base },
},
.generic_poison = .{
.ty = Type.initTag(.generic_poison),
.val = Value.initTag(.generic_poison),
},
.var_args_param = undefined,
});
};
/// We would like this to be const but `Value` wants a mutable pointer for
/// its payload field. Nothing should mutate this though.
var calling_convention_c_payload: Value.Payload.U32 = .{
.base = .{ .tag = .enum_field_index },
.data = @enumToInt(std.builtin.CallingConvention.C),
};
/// We would like this to be const but `Value` wants a mutable pointer for
/// its payload field. Nothing should mutate this though.
var calling_convention_inline_payload: Value.Payload.U32 = .{
.base = .{ .tag = .enum_field_index },
.data = @enumToInt(std.builtin.CallingConvention.Inline),
};
/// All instructions have an 8-byte payload, which is contained within
@ -4163,13 +3819,14 @@ pub fn getFnInfo(zir: Zir, fn_inst: Inst.Index) FnInfo {
};
}
const ref_start_index: u32 = Inst.Ref.typed_value_map.len;
pub const ref_start_index: u32 = InternPool.static_len;
pub fn indexToRef(inst: Inst.Index) Inst.Ref {
return @intToEnum(Inst.Ref, ref_start_index + inst);
}
pub fn refToIndex(inst: Inst.Ref) ?Inst.Index {
assert(inst != .none);
const ref_int = @enumToInt(inst);
if (ref_int >= ref_start_index) {
return ref_int - ref_start_index;
@ -4177,3 +3834,8 @@ pub fn refToIndex(inst: Inst.Ref) ?Inst.Index {
return null;
}
}
pub fn refToIndexAllowNone(inst: Inst.Ref) ?Inst.Index {
if (inst == .none) return null;
return refToIndex(inst);
}

File diff suppressed because it is too large Load Diff

View File

@ -4,6 +4,7 @@ const bits = @import("bits.zig");
const Register = bits.Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../type.zig").Type;
const Module = @import("../../Module.zig");
pub const Class = union(enum) {
memory,
@ -14,44 +15,44 @@ pub const Class = union(enum) {
};
/// For `float_array` the second element will be the amount of floats.
pub fn classifyType(ty: Type, target: std.Target) Class {
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime());
pub fn classifyType(ty: Type, mod: *Module) Class {
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod));
var maybe_float_bits: ?u16 = null;
switch (ty.zigTypeTag()) {
switch (ty.zigTypeTag(mod)) {
.Struct => {
if (ty.containerLayout() == .Packed) return .byval;
const float_count = countFloats(ty, target, &maybe_float_bits);
if (ty.containerLayout(mod) == .Packed) return .byval;
const float_count = countFloats(ty, mod, &maybe_float_bits);
if (float_count <= sret_float_count) return .{ .float_array = float_count };
const bit_size = ty.bitSize(target);
const bit_size = ty.bitSize(mod);
if (bit_size > 128) return .memory;
if (bit_size > 64) return .double_integer;
return .integer;
},
.Union => {
if (ty.containerLayout() == .Packed) return .byval;
const float_count = countFloats(ty, target, &maybe_float_bits);
if (ty.containerLayout(mod) == .Packed) return .byval;
const float_count = countFloats(ty, mod, &maybe_float_bits);
if (float_count <= sret_float_count) return .{ .float_array = float_count };
const bit_size = ty.bitSize(target);
const bit_size = ty.bitSize(mod);
if (bit_size > 128) return .memory;
if (bit_size > 64) return .double_integer;
return .integer;
},
.Int, .Enum, .ErrorSet, .Float, .Bool => return .byval,
.Vector => {
const bit_size = ty.bitSize(target);
const bit_size = ty.bitSize(mod);
// TODO is this controlled by a cpu feature?
if (bit_size > 128) return .memory;
return .byval;
},
.Optional => {
std.debug.assert(ty.isPtrLikeOptional());
std.debug.assert(ty.isPtrLikeOptional(mod));
return .byval;
},
.Pointer => {
std.debug.assert(!ty.isSlice());
std.debug.assert(!ty.isSlice(mod));
return .byval;
},
.ErrorUnion,
@ -73,14 +74,15 @@ pub fn classifyType(ty: Type, target: std.Target) Class {
}
const sret_float_count = 4;
fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 {
fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u8 {
const target = mod.getTarget();
const invalid = std.math.maxInt(u8);
switch (ty.zigTypeTag()) {
switch (ty.zigTypeTag(mod)) {
.Union => {
const fields = ty.unionFields();
const fields = ty.unionFields(mod);
var max_count: u8 = 0;
for (fields.values()) |field| {
const field_count = countFloats(field.ty, target, maybe_float_bits);
const field_count = countFloats(field.ty, mod, maybe_float_bits);
if (field_count == invalid) return invalid;
if (field_count > max_count) max_count = field_count;
if (max_count > sret_float_count) return invalid;
@ -88,12 +90,12 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 {
return max_count;
},
.Struct => {
const fields_len = ty.structFieldCount();
const fields_len = ty.structFieldCount(mod);
var count: u8 = 0;
var i: u32 = 0;
while (i < fields_len) : (i += 1) {
const field_ty = ty.structFieldType(i);
const field_count = countFloats(field_ty, target, maybe_float_bits);
const field_ty = ty.structFieldType(i, mod);
const field_count = countFloats(field_ty, mod, maybe_float_bits);
if (field_count == invalid) return invalid;
count += field_count;
if (count > sret_float_count) return invalid;
@ -113,21 +115,21 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u8 {
}
}
pub fn getFloatArrayType(ty: Type) ?Type {
switch (ty.zigTypeTag()) {
pub fn getFloatArrayType(ty: Type, mod: *Module) ?Type {
switch (ty.zigTypeTag(mod)) {
.Union => {
const fields = ty.unionFields();
const fields = ty.unionFields(mod);
for (fields.values()) |field| {
if (getFloatArrayType(field.ty)) |some| return some;
if (getFloatArrayType(field.ty, mod)) |some| return some;
}
return null;
},
.Struct => {
const fields_len = ty.structFieldCount();
const fields_len = ty.structFieldCount(mod);
var i: u32 = 0;
while (i < fields_len) : (i += 1) {
const field_ty = ty.structFieldType(i);
if (getFloatArrayType(field_ty)) |some| return some;
const field_ty = ty.structFieldType(i, mod);
if (getFloatArrayType(field_ty, mod)) |some| return some;
}
return null;
},

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,10 @@
const std = @import("std");
const assert = std.debug.assert;
const bits = @import("bits.zig");
const Register = bits.Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../type.zig").Type;
const Module = @import("../../Module.zig");
pub const Class = union(enum) {
memory,
@ -22,28 +24,28 @@ pub const Class = union(enum) {
pub const Context = enum { ret, arg };
pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class {
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime());
pub fn classifyType(ty: Type, mod: *Module, ctx: Context) Class {
assert(ty.hasRuntimeBitsIgnoreComptime(mod));
var maybe_float_bits: ?u16 = null;
const max_byval_size = 512;
switch (ty.zigTypeTag()) {
switch (ty.zigTypeTag(mod)) {
.Struct => {
const bit_size = ty.bitSize(target);
if (ty.containerLayout() == .Packed) {
const bit_size = ty.bitSize(mod);
if (ty.containerLayout(mod) == .Packed) {
if (bit_size > 64) return .memory;
return .byval;
}
if (bit_size > max_byval_size) return .memory;
const float_count = countFloats(ty, target, &maybe_float_bits);
const float_count = countFloats(ty, mod, &maybe_float_bits);
if (float_count <= byval_float_count) return .byval;
const fields = ty.structFieldCount();
const fields = ty.structFieldCount(mod);
var i: u32 = 0;
while (i < fields) : (i += 1) {
const field_ty = ty.structFieldType(i);
const field_alignment = ty.structFieldAlign(i, target);
const field_size = field_ty.bitSize(target);
const field_ty = ty.structFieldType(i, mod);
const field_alignment = ty.structFieldAlign(i, mod);
const field_size = field_ty.bitSize(mod);
if (field_size > 32 or field_alignment > 32) {
return Class.arrSize(bit_size, 64);
}
@ -51,17 +53,17 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class {
return Class.arrSize(bit_size, 32);
},
.Union => {
const bit_size = ty.bitSize(target);
if (ty.containerLayout() == .Packed) {
const bit_size = ty.bitSize(mod);
if (ty.containerLayout(mod) == .Packed) {
if (bit_size > 64) return .memory;
return .byval;
}
if (bit_size > max_byval_size) return .memory;
const float_count = countFloats(ty, target, &maybe_float_bits);
const float_count = countFloats(ty, mod, &maybe_float_bits);
if (float_count <= byval_float_count) return .byval;
for (ty.unionFields().values()) |field| {
if (field.ty.bitSize(target) > 32 or field.normalAlignment(target) > 32) {
for (ty.unionFields(mod).values()) |field| {
if (field.ty.bitSize(mod) > 32 or field.normalAlignment(mod) > 32) {
return Class.arrSize(bit_size, 64);
}
}
@ -71,28 +73,28 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class {
.Int => {
// TODO this is incorrect for _BitInt(128) but implementing
// this correctly makes implementing compiler-rt impossible.
// const bit_size = ty.bitSize(target);
// const bit_size = ty.bitSize(mod);
// if (bit_size > 64) return .memory;
return .byval;
},
.Enum, .ErrorSet => {
const bit_size = ty.bitSize(target);
const bit_size = ty.bitSize(mod);
if (bit_size > 64) return .memory;
return .byval;
},
.Vector => {
const bit_size = ty.bitSize(target);
const bit_size = ty.bitSize(mod);
// TODO is this controlled by a cpu feature?
if (ctx == .ret and bit_size > 128) return .memory;
if (bit_size > 512) return .memory;
return .byval;
},
.Optional => {
std.debug.assert(ty.isPtrLikeOptional());
assert(ty.isPtrLikeOptional(mod));
return .byval;
},
.Pointer => {
std.debug.assert(!ty.isSlice());
assert(!ty.isSlice(mod));
return .byval;
},
.ErrorUnion,
@ -114,14 +116,15 @@ pub fn classifyType(ty: Type, target: std.Target, ctx: Context) Class {
}
const byval_float_count = 4;
fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 {
fn countFloats(ty: Type, mod: *Module, maybe_float_bits: *?u16) u32 {
const target = mod.getTarget();
const invalid = std.math.maxInt(u32);
switch (ty.zigTypeTag()) {
switch (ty.zigTypeTag(mod)) {
.Union => {
const fields = ty.unionFields();
const fields = ty.unionFields(mod);
var max_count: u32 = 0;
for (fields.values()) |field| {
const field_count = countFloats(field.ty, target, maybe_float_bits);
const field_count = countFloats(field.ty, mod, maybe_float_bits);
if (field_count == invalid) return invalid;
if (field_count > max_count) max_count = field_count;
if (max_count > byval_float_count) return invalid;
@ -129,12 +132,12 @@ fn countFloats(ty: Type, target: std.Target, maybe_float_bits: *?u16) u32 {
return max_count;
},
.Struct => {
const fields_len = ty.structFieldCount();
const fields_len = ty.structFieldCount(mod);
var count: u32 = 0;
var i: u32 = 0;
while (i < fields_len) : (i += 1) {
const field_ty = ty.structFieldType(i);
const field_count = countFloats(field_ty, target, maybe_float_bits);
const field_ty = ty.structFieldType(i, mod);
const field_count = countFloats(field_ty, mod, maybe_float_bits);
if (field_count == invalid) return invalid;
count += field_count;
if (count > byval_float_count) return invalid;

View File

@ -217,7 +217,7 @@ const Self = @This();
pub fn generate(
bin_file: *link.File,
src_loc: Module.SrcLoc,
module_fn: *Module.Fn,
module_fn_index: Module.Fn.Index,
air: Air,
liveness: Liveness,
code: *std.ArrayList(u8),
@ -228,6 +228,7 @@ pub fn generate(
}
const mod = bin_file.options.module.?;
const module_fn = mod.funcPtr(module_fn_index);
const fn_owner_decl = mod.declPtr(module_fn.owner_decl);
assert(fn_owner_decl.has_tv);
const fn_type = fn_owner_decl.ty;
@ -347,7 +348,8 @@ pub fn addExtraAssumeCapacity(self: *Self, extra: anytype) u32 {
}
fn gen(self: *Self) !void {
const cc = self.fn_type.fnCallingConvention();
const mod = self.bin_file.options.module.?;
const cc = self.fn_type.fnCallingConvention(mod);
if (cc != .Naked) {
// TODO Finish function prologue and epilogue for riscv64.
@ -470,13 +472,14 @@ fn gen(self: *Self) !void {
}
fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
const mod = self.bin_file.options.module.?;
const ip = &mod.intern_pool;
const air_tags = self.air.instructions.items(.tag);
for (body) |inst| {
// TODO: remove now-redundant isUnused calls from AIR handler functions
if (self.liveness.isUnused(inst) and !self.air.mustLower(inst)) {
if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip))
continue;
}
const old_air_bookkeeping = self.air_bookkeeping;
try self.ensureProcessDeathCapacity(Liveness.bpi);
@ -656,8 +659,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.ptr_elem_val => try self.airPtrElemVal(inst),
.ptr_elem_ptr => try self.airPtrElemPtr(inst),
.constant => unreachable, // excluded from function bodies
.const_ty => unreachable, // excluded from function bodies
.inferred_alloc, .inferred_alloc_comptime, .interned => unreachable,
.unreach => self.finishAirBookkeeping(),
.optional_payload => try self.airOptionalPayload(inst),
@ -727,8 +729,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
/// Asserts there is already capacity to insert into top branch inst_table.
fn processDeath(self: *Self, inst: Air.Inst.Index) void {
const air_tags = self.air.instructions.items(.tag);
if (air_tags[inst] == .constant) return; // Constants are immortal.
assert(self.air.instructions.items(.tag)[inst] != .interned);
// When editing this function, note that the logic must synchronize with `reuseOperand`.
const prev_value = self.getResolvedInstValue(inst);
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
@ -755,8 +756,8 @@ fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Live
tomb_bits >>= 1;
if (!dies) continue;
const op_int = @enumToInt(op);
if (op_int < Air.Inst.Ref.typed_value_map.len) continue;
const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len);
if (op_int < Air.ref_start_index) continue;
const op_index = @intCast(Air.Inst.Index, op_int - Air.ref_start_index);
self.processDeath(op_index);
}
const is_used = @truncate(u1, tomb_bits) == 0;
@ -804,23 +805,23 @@ fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u
/// Use a pointer instruction as the basis for allocating stack memory.
fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 {
const elem_ty = self.air.typeOfIndex(inst).elemType();
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
const mod = self.bin_file.options.module.?;
const mod = self.bin_file.options.module.?;
const elem_ty = self.typeOfIndex(inst).childType(mod);
const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
// TODO swap this for inst.ty.ptrAlign
const abi_align = elem_ty.abiAlignment(self.target.*);
const abi_align = elem_ty.abiAlignment(mod);
return self.allocMem(inst, abi_size, abi_align);
}
fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue {
const elem_ty = self.air.typeOfIndex(inst);
const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) orelse {
const mod = self.bin_file.options.module.?;
const mod = self.bin_file.options.module.?;
const elem_ty = self.typeOfIndex(inst);
const abi_size = math.cast(u32, elem_ty.abiSize(mod)) orelse {
return self.fail("type '{}' too big to fit into stack frame", .{elem_ty.fmt(mod)});
};
const abi_align = elem_ty.abiAlignment(self.target.*);
const abi_align = elem_ty.abiAlignment(mod);
if (abi_align > self.stack_align)
self.stack_align = abi_align;
@ -845,7 +846,7 @@ pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void
assert(reg == reg_mcv.register);
const branch = &self.branch_stack.items[self.branch_stack.items.len - 1];
try branch.inst_table.put(self.gpa, inst, stack_mcv);
try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv);
try self.genSetStack(self.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv);
}
/// Copies a value to a register without tracking the register. The register is not considered
@ -862,7 +863,7 @@ fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register {
/// This can have a side effect of spilling instructions to the stack to free up a register.
fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue {
const reg = try self.register_manager.allocReg(reg_owner, gp);
try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv);
try self.genSetReg(self.typeOfIndex(reg_owner), reg, mcv);
return MCValue{ .register = reg };
}
@ -893,10 +894,11 @@ fn airIntCast(self: *Self, inst: Air.Inst.Index) !void {
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none });
const operand_ty = self.air.typeOf(ty_op.operand);
const mod = self.bin_file.options.module.?;
const operand_ty = self.typeOf(ty_op.operand);
const operand = try self.resolveInst(ty_op.operand);
const info_a = operand_ty.intInfo(self.target.*);
const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*);
const info_a = operand_ty.intInfo(mod);
const info_b = self.typeOfIndex(inst).intInfo(mod);
if (info_a.signedness != info_b.signedness)
return self.fail("TODO gen intcast sign safety in semantic analysis", .{});
@ -1068,18 +1070,18 @@ fn binOp(
lhs_ty: Type,
rhs_ty: Type,
) InnerError!MCValue {
const mod = self.bin_file.options.module.?;
switch (tag) {
// Arithmetic operations on integers and floats
.add,
.sub,
=> {
switch (lhs_ty.zigTypeTag()) {
switch (lhs_ty.zigTypeTag(mod)) {
.Float => return self.fail("TODO binary operations on floats", .{}),
.Vector => return self.fail("TODO binary operations on vectors", .{}),
.Int => {
const mod = self.bin_file.options.module.?;
assert(lhs_ty.eql(rhs_ty, mod));
const int_info = lhs_ty.intInfo(self.target.*);
const int_info = lhs_ty.intInfo(mod);
if (int_info.bits <= 64) {
// TODO immediate operands
return try self.binOpRegister(tag, maybe_inst, lhs, rhs, lhs_ty, rhs_ty);
@ -1093,14 +1095,14 @@ fn binOp(
.ptr_add,
.ptr_sub,
=> {
switch (lhs_ty.zigTypeTag()) {
switch (lhs_ty.zigTypeTag(mod)) {
.Pointer => {
const ptr_ty = lhs_ty;
const elem_ty = switch (ptr_ty.ptrSize()) {
.One => ptr_ty.childType().childType(), // ptr to array, so get array element type
else => ptr_ty.childType(),
const elem_ty = switch (ptr_ty.ptrSize(mod)) {
.One => ptr_ty.childType(mod).childType(mod), // ptr to array, so get array element type
else => ptr_ty.childType(mod),
};
const elem_size = elem_ty.abiSize(self.target.*);
const elem_size = elem_ty.abiSize(mod);
if (elem_size == 1) {
const base_tag: Air.Inst.Tag = switch (tag) {
@ -1125,8 +1127,8 @@ fn airBinOp(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
const lhs_ty = self.typeOf(bin_op.lhs);
const rhs_ty = self.typeOf(bin_op.rhs);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
@ -1137,8 +1139,8 @@ fn airPtrArithmetic(self: *Self, inst: Air.Inst.Index, tag: Air.Inst.Tag) !void
const bin_op = self.air.extraData(Air.Bin, ty_pl.payload).data;
const lhs = try self.resolveInst(bin_op.lhs);
const rhs = try self.resolveInst(bin_op.rhs);
const lhs_ty = self.air.typeOf(bin_op.lhs);
const rhs_ty = self.air.typeOf(bin_op.rhs);
const lhs_ty = self.typeOf(bin_op.lhs);
const rhs_ty = self.typeOf(bin_op.rhs);
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else try self.binOp(tag, inst, lhs, rhs, lhs_ty, rhs_ty);
return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none });
@ -1331,10 +1333,11 @@ fn airSaveErrReturnTraceIndex(self: *Self, inst: Air.Inst.Index) !void {
fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void {
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: {
const optional_ty = self.air.typeOfIndex(inst);
const mod = self.bin_file.options.module.?;
const optional_ty = self.typeOfIndex(inst);
// Optional with a zero-bit payload type is just a boolean true
if (optional_ty.abiSize(self.target.*) == 1)
if (optional_ty.abiSize(mod) == 1)
break :result MCValue{ .immediate = 1 };
return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch});
@ -1498,7 +1501,8 @@ fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_ind
}
fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!void {
const elem_ty = ptr_ty.elemType();
const mod = self.bin_file.options.module.?;
const elem_ty = ptr_ty.childType(mod);
switch (ptr) {
.none => unreachable,
.undef => unreachable,
@ -1523,14 +1527,15 @@ fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) InnerError!vo
}
fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
const mod = self.bin_file.options.module.?;
const ty_op = self.air.instructions.items(.data)[inst].ty_op;
const elem_ty = self.air.typeOfIndex(inst);
const elem_ty = self.typeOfIndex(inst);
const result: MCValue = result: {
if (!elem_ty.hasRuntimeBits())
if (!elem_ty.hasRuntimeBits(mod))
break :result MCValue.none;
const ptr = try self.resolveInst(ty_op.operand);
const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr();
const is_volatile = self.typeOf(ty_op.operand).isVolatilePtr(mod);
if (self.liveness.isUnused(inst) and !is_volatile)
break :result MCValue.dead;
@ -1542,7 +1547,7 @@ fn airLoad(self: *Self, inst: Air.Inst.Index) !void {
break :blk try self.allocRegOrMem(inst, true);
}
};
try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand));
try self.load(dst_mcv, ptr, self.typeOf(ty_op.operand));
break :result dst_mcv;
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
@ -1583,8 +1588,8 @@ fn airStore(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
const ptr = try self.resolveInst(bin_op.lhs);
const value = try self.resolveInst(bin_op.rhs);
const ptr_ty = self.air.typeOf(bin_op.lhs);
const value_ty = self.air.typeOf(bin_op.rhs);
const ptr_ty = self.typeOf(bin_op.lhs);
const value_ty = self.typeOf(bin_op.rhs);
try self.store(ptr, value, ptr_ty, value_ty);
@ -1644,7 +1649,7 @@ fn airArg(self: *Self, inst: Air.Inst.Index) !void {
const arg_index = self.arg_index;
self.arg_index += 1;
const ty = self.air.typeOfIndex(inst);
const ty = self.typeOfIndex(inst);
_ = ty;
const result = self.args[arg_index];
@ -1698,9 +1703,10 @@ fn airFence(self: *Self) !void {
}
fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier) !void {
const mod = self.bin_file.options.module.?;
if (modifier == .always_tail) return self.fail("TODO implement tail calls for riscv64", .{});
const pl_op = self.air.instructions.items(.data)[inst].pl_op;
const fn_ty = self.air.typeOf(pl_op.operand);
const fn_ty = self.typeOf(pl_op.operand);
const callee = pl_op.operand;
const extra = self.air.extraData(Air.Call, pl_op.payload);
const args = @ptrCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]);
@ -1713,7 +1719,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
for (info.args, 0..) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
const arg_ty = self.typeOf(arg);
const arg_mcv = try self.resolveInst(args[arg_i]);
switch (mc_arg) {
@ -1736,14 +1742,13 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}
}
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
if (try self.air.value(callee, mod)) |func_value| {
if (mod.funcPtrUnwrap(mod.intern_pool.indexToFunc(func_value.ip_index))) |func| {
const atom_index = try elf_file.getOrCreateAtomForDecl(func.owner_decl);
const atom = elf_file.getAtom(atom_index);
_ = try atom.getOrCreateOffsetTableEntry(elf_file);
const got_addr = @intCast(u32, atom.getOffsetTableAddress(elf_file));
try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr });
try self.genSetReg(Type.usize, .ra, .{ .memory = got_addr });
_ = try self.addInst(.{
.tag = .jalr,
.data = .{ .i_type = .{
@ -1752,7 +1757,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.imm12 = 0,
} },
});
} else if (func_value.castTag(.extern_fn)) |_| {
} else if (mod.intern_pool.indexToKey(func_value.ip_index) == .extern_func) {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
@ -1796,7 +1801,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
}
fn ret(self: *Self, mcv: MCValue) !void {
const ret_ty = self.fn_type.fnReturnType();
const mod = self.bin_file.options.module.?;
const ret_ty = self.fn_type.fnReturnType(mod);
try self.setRegOrMem(ret_ty, self.ret_mcv, mcv);
// Just add space for an instruction, patch this later
const index = try self.addInst(.{
@ -1825,10 +1831,10 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
const bin_op = self.air.instructions.items(.data)[inst].bin_op;
if (self.liveness.isUnused(inst))
return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none });
const ty = self.air.typeOf(bin_op.lhs);
const ty = self.typeOf(bin_op.lhs);
const mod = self.bin_file.options.module.?;
assert(ty.eql(self.air.typeOf(bin_op.rhs), mod));
if (ty.zigTypeTag() == .ErrorSet)
assert(ty.eql(self.typeOf(bin_op.rhs), mod));
if (ty.zigTypeTag(mod) == .ErrorSet)
return self.fail("TODO implement cmp for errors", .{});
const lhs = try self.resolveInst(bin_op.lhs);
@ -1869,8 +1875,9 @@ fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void {
}
fn airDbgInline(self: *Self, inst: Air.Inst.Index) !void {
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const function = self.air.values[ty_pl.payload].castTag(.function).?.data;
const ty_fn = self.air.instructions.items(.data)[inst].ty_fn;
const mod = self.bin_file.options.module.?;
const function = mod.funcPtr(ty_fn.func);
// TODO emit debug info for function change
_ = function;
return self.finishAir(inst, .dead, .{ .none, .none, .none });
@ -1946,7 +1953,7 @@ fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void {
break :blk try self.allocRegOrMem(inst, true);
}
};
try self.load(operand, operand_ptr, self.air.typeOf(un_op));
try self.load(operand, operand_ptr, self.typeOf(un_op));
break :result try self.isNull(operand);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
@ -1973,7 +1980,7 @@ fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void {
break :blk try self.allocRegOrMem(inst, true);
}
};
try self.load(operand, operand_ptr, self.air.typeOf(un_op));
try self.load(operand, operand_ptr, self.typeOf(un_op));
break :result try self.isNonNull(operand);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
@ -2000,7 +2007,7 @@ fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void {
break :blk try self.allocRegOrMem(inst, true);
}
};
try self.load(operand, operand_ptr, self.air.typeOf(un_op));
try self.load(operand, operand_ptr, self.typeOf(un_op));
break :result try self.isErr(operand);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
@ -2027,7 +2034,7 @@ fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void {
break :blk try self.allocRegOrMem(inst, true);
}
};
try self.load(operand, operand_ptr, self.air.typeOf(un_op));
try self.load(operand, operand_ptr, self.typeOf(un_op));
break :result try self.isNonErr(operand);
};
return self.finishAir(inst, result, .{ un_op, .none, .none });
@ -2107,13 +2114,14 @@ fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void {
fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void {
const block_data = self.blocks.getPtr(block).?;
if (self.air.typeOf(operand).hasRuntimeBits()) {
const mod = self.bin_file.options.module.?;
if (self.typeOf(operand).hasRuntimeBits(mod)) {
const operand_mcv = try self.resolveInst(operand);
const block_mcv = block_data.mcv;
if (block_mcv == .none) {
block_data.mcv = operand_mcv;
} else {
try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv);
try self.setRegOrMem(self.typeOfIndex(block), block_mcv, operand_mcv);
}
}
return self.brVoid(block);
@ -2176,7 +2184,7 @@ fn airAsm(self: *Self, inst: Air.Inst.Index) !void {
const arg_mcv = try self.resolveInst(input);
try self.register_manager.getReg(reg, null);
try self.genSetReg(self.air.typeOf(input), reg, arg_mcv);
try self.genSetReg(self.typeOf(input), reg, arg_mcv);
}
{
@ -2372,7 +2380,7 @@ fn airBitCast(self: *Self, inst: Air.Inst.Index) !void {
defer if (operand_lock) |lock| self.register_manager.unlockReg(lock);
const dest = try self.allocRegOrMem(inst, true);
try self.setRegOrMem(self.air.typeOfIndex(inst), dest, operand);
try self.setRegOrMem(self.typeOfIndex(inst), dest, operand);
break :result dest;
};
return self.finishAir(inst, result, .{ ty_op.operand, .none, .none });
@ -2489,8 +2497,9 @@ fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
}
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
const vector_ty = self.air.typeOfIndex(inst);
const len = vector_ty.vectorLen();
const mod = self.bin_file.options.module.?;
const vector_ty = self.typeOfIndex(inst);
const len = vector_ty.vectorLen(mod);
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]);
const result: MCValue = res: {
@ -2533,37 +2542,32 @@ fn airMulAdd(self: *Self, inst: Air.Inst.Index) !void {
}
fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue {
// First section of indexes correspond to a set number of constant values.
const ref_int = @enumToInt(inst);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
const tv = Air.Inst.Ref.typed_value_map[ref_int];
if (!tv.ty.hasRuntimeBits()) {
return MCValue{ .none = {} };
}
return self.genTypedValue(tv);
}
const mod = self.bin_file.options.module.?;
// If the type has no codegen bits, no need to store it.
const inst_ty = self.air.typeOf(inst);
if (!inst_ty.hasRuntimeBits())
const inst_ty = self.typeOf(inst);
if (!inst_ty.hasRuntimeBits(mod))
return MCValue{ .none = {} };
const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len);
const inst_index = Air.refToIndex(inst) orelse return self.genTypedValue(.{
.ty = inst_ty,
.val = (try self.air.value(inst, mod)).?,
});
switch (self.air.instructions.items(.tag)[inst_index]) {
.constant => {
.interned => {
// Constants have static lifetimes, so they are always memoized in the outer most table.
const branch = &self.branch_stack.items[0];
const gop = try branch.inst_table.getOrPut(self.gpa, inst_index);
if (!gop.found_existing) {
const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl;
const interned = self.air.instructions.items(.data)[inst_index].interned;
gop.value_ptr.* = try self.genTypedValue(.{
.ty = inst_ty,
.val = self.air.values[ty_pl.payload],
.val = interned.toValue(),
});
}
return gop.value_ptr.*;
},
.const_ty => unreachable,
else => return self.getResolvedInstValue(inst_index),
}
}
@ -2616,12 +2620,11 @@ const CallMCValues = struct {
/// Caller must call `CallMCValues.deinit`.
fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
const cc = fn_ty.fnCallingConvention();
const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen());
defer self.gpa.free(param_types);
fn_ty.fnParamTypes(param_types);
const mod = self.bin_file.options.module.?;
const fn_info = mod.typeToFunc(fn_ty).?;
const cc = fn_info.cc;
var result: CallMCValues = .{
.args = try self.gpa.alloc(MCValue, param_types.len),
.args = try self.gpa.alloc(MCValue, fn_info.param_types.len),
// These undefined values must be populated before returning from this function.
.return_value = undefined,
.stack_byte_count = undefined,
@ -2629,7 +2632,7 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
};
errdefer self.gpa.free(result.args);
const ret_ty = fn_ty.fnReturnType();
const ret_ty = fn_ty.fnReturnType(mod);
switch (cc) {
.Naked => {
@ -2649,8 +2652,8 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
var next_stack_offset: u32 = 0;
const argument_registers = [_]Register{ .a0, .a1, .a2, .a3, .a4, .a5, .a6, .a7 };
for (param_types, 0..) |ty, i| {
const param_size = @intCast(u32, ty.abiSize(self.target.*));
for (fn_info.param_types, 0..) |ty, i| {
const param_size = @intCast(u32, ty.toType().abiSize(mod));
if (param_size <= 8) {
if (next_register < argument_registers.len) {
result.args[i] = .{ .register = argument_registers[next_register] };
@ -2680,14 +2683,14 @@ fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues {
else => return self.fail("TODO implement function parameters for {} on riscv64", .{cc}),
}
if (ret_ty.zigTypeTag() == .NoReturn) {
if (ret_ty.zigTypeTag(mod) == .NoReturn) {
result.return_value = .{ .unreach = {} };
} else if (!ret_ty.hasRuntimeBits()) {
} else if (!ret_ty.hasRuntimeBits(mod)) {
result.return_value = .{ .none = {} };
} else switch (cc) {
.Naked => unreachable,
.Unspecified, .C => {
const ret_ty_size = @intCast(u32, ret_ty.abiSize(self.target.*));
const ret_ty_size = @intCast(u32, ret_ty.abiSize(mod));
if (ret_ty_size <= 8) {
result.return_value = .{ .register = .a0 };
} else if (ret_ty_size <= 16) {
@ -2731,3 +2734,13 @@ fn parseRegName(name: []const u8) ?Register {
}
return std.meta.stringToEnum(Register, name);
}
fn typeOf(self: *Self, inst: Air.Inst.Ref) Type {
const mod = self.bin_file.options.module.?;
return self.air.typeOf(inst, &mod.intern_pool);
}
fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type {
const mod = self.bin_file.options.module.?;
return self.air.typeOfIndex(inst, &mod.intern_pool);
}

View File

@ -3,17 +3,19 @@ const bits = @import("bits.zig");
const Register = bits.Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../type.zig").Type;
const Module = @import("../../Module.zig");
pub const Class = enum { memory, byval, integer, double_integer };
pub fn classifyType(ty: Type, target: std.Target) Class {
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime());
pub fn classifyType(ty: Type, mod: *Module) Class {
const target = mod.getTarget();
std.debug.assert(ty.hasRuntimeBitsIgnoreComptime(mod));
const max_byval_size = target.ptrBitWidth() * 2;
switch (ty.zigTypeTag()) {
switch (ty.zigTypeTag(mod)) {
.Struct => {
const bit_size = ty.bitSize(target);
if (ty.containerLayout() == .Packed) {
const bit_size = ty.bitSize(mod);
if (ty.containerLayout(mod) == .Packed) {
if (bit_size > max_byval_size) return .memory;
return .byval;
}
@ -23,8 +25,8 @@ pub fn classifyType(ty: Type, target: std.Target) Class {
return .integer;
},
.Union => {
const bit_size = ty.bitSize(target);
if (ty.containerLayout() == .Packed) {
const bit_size = ty.bitSize(mod);
if (ty.containerLayout(mod) == .Packed) {
if (bit_size > max_byval_size) return .memory;
return .byval;
}
@ -36,21 +38,21 @@ pub fn classifyType(ty: Type, target: std.Target) Class {
.Bool => return .integer,
.Float => return .byval,
.Int, .Enum, .ErrorSet => {
const bit_size = ty.bitSize(target);
const bit_size = ty.bitSize(mod);
if (bit_size > max_byval_size) return .memory;
return .byval;
},
.Vector => {
const bit_size = ty.bitSize(target);
const bit_size = ty.bitSize(mod);
if (bit_size > max_byval_size) return .memory;
return .integer;
},
.Optional => {
std.debug.assert(ty.isPtrLikeOptional());
std.debug.assert(ty.isPtrLikeOptional(mod));
return .byval;
},
.Pointer => {
std.debug.assert(!ty.isSlice());
std.debug.assert(!ty.isSlice(mod));
return .byval;
},
.ErrorUnion,

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -254,7 +254,7 @@ fn fail(emit: *Emit, comptime format: []const u8, args: anytype) InnerError {
@setCold(true);
std.debug.assert(emit.error_msg == null);
const mod = emit.bin_file.base.options.module.?;
emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(), format, args);
emit.error_msg = try Module.ErrorMsg.create(emit.bin_file.base.allocator, mod.declPtr(emit.decl_index).srcLoc(mod), format, args);
return error.EmitFail;
}

View File

@ -5,9 +5,11 @@
//! Note: Above mentioned document is not an official specification, therefore called a convention.
const std = @import("std");
const Type = @import("../../type.zig").Type;
const Target = std.Target;
const Type = @import("../../type.zig").Type;
const Module = @import("../../Module.zig");
/// Defines how to pass a type as part of a function signature,
/// both for parameters as well as return values.
pub const Class = enum { direct, indirect, none };
@ -19,27 +21,28 @@ const direct: [2]Class = .{ .direct, .none };
/// Classifies a given Zig type to determine how they must be passed
/// or returned as value within a wasm function.
/// When all elements result in `.none`, no value must be passed in or returned.
pub fn classifyType(ty: Type, target: Target) [2]Class {
if (!ty.hasRuntimeBitsIgnoreComptime()) return none;
switch (ty.zigTypeTag()) {
pub fn classifyType(ty: Type, mod: *Module) [2]Class {
const target = mod.getTarget();
if (!ty.hasRuntimeBitsIgnoreComptime(mod)) return none;
switch (ty.zigTypeTag(mod)) {
.Struct => {
if (ty.containerLayout() == .Packed) {
if (ty.bitSize(target) <= 64) return direct;
if (ty.containerLayout(mod) == .Packed) {
if (ty.bitSize(mod) <= 64) return direct;
return .{ .direct, .direct };
}
// When the struct type is non-scalar
if (ty.structFieldCount() > 1) return memory;
if (ty.structFieldCount(mod) > 1) return memory;
// When the struct's alignment is non-natural
const field = ty.structFields().values()[0];
const field = ty.structFields(mod).values()[0];
if (field.abi_align != 0) {
if (field.abi_align > field.ty.abiAlignment(target)) {
if (field.abi_align > field.ty.abiAlignment(mod)) {
return memory;
}
}
return classifyType(field.ty, target);
return classifyType(field.ty, mod);
},
.Int, .Enum, .ErrorSet, .Vector => {
const int_bits = ty.intInfo(target).bits;
const int_bits = ty.intInfo(mod).bits;
if (int_bits <= 64) return direct;
if (int_bits <= 128) return .{ .direct, .direct };
return memory;
@ -53,22 +56,22 @@ pub fn classifyType(ty: Type, target: Target) [2]Class {
.Bool => return direct,
.Array => return memory,
.Optional => {
std.debug.assert(ty.isPtrLikeOptional());
std.debug.assert(ty.isPtrLikeOptional(mod));
return direct;
},
.Pointer => {
std.debug.assert(!ty.isSlice());
std.debug.assert(!ty.isSlice(mod));
return direct;
},
.Union => {
if (ty.containerLayout() == .Packed) {
if (ty.bitSize(target) <= 64) return direct;
if (ty.containerLayout(mod) == .Packed) {
if (ty.bitSize(mod) <= 64) return direct;
return .{ .direct, .direct };
}
const layout = ty.unionGetLayout(target);
const layout = ty.unionGetLayout(mod);
std.debug.assert(layout.tag_size == 0);
if (ty.unionFields().count() > 1) return memory;
return classifyType(ty.unionFields().values()[0].ty, target);
if (ty.unionFields(mod).count() > 1) return memory;
return classifyType(ty.unionFields(mod).values()[0].ty, mod);
},
.ErrorUnion,
.Frame,
@ -90,29 +93,29 @@ pub fn classifyType(ty: Type, target: Target) [2]Class {
/// Returns the scalar type a given type can represent.
/// Asserts given type can be represented as scalar, such as
/// a struct with a single scalar field.
pub fn scalarType(ty: Type, target: std.Target) Type {
switch (ty.zigTypeTag()) {
pub fn scalarType(ty: Type, mod: *Module) Type {
switch (ty.zigTypeTag(mod)) {
.Struct => {
switch (ty.containerLayout()) {
switch (ty.containerLayout(mod)) {
.Packed => {
const struct_obj = ty.castTag(.@"struct").?.data;
return scalarType(struct_obj.backing_int_ty, target);
const struct_obj = mod.typeToStruct(ty).?;
return scalarType(struct_obj.backing_int_ty, mod);
},
else => {
std.debug.assert(ty.structFieldCount() == 1);
return scalarType(ty.structFieldType(0), target);
std.debug.assert(ty.structFieldCount(mod) == 1);
return scalarType(ty.structFieldType(0, mod), mod);
},
}
},
.Union => {
if (ty.containerLayout() != .Packed) {
const layout = ty.unionGetLayout(target);
if (ty.containerLayout(mod) != .Packed) {
const layout = ty.unionGetLayout(mod);
if (layout.payload_size == 0 and layout.tag_size != 0) {
return scalarType(ty.unionTagTypeSafety().?, target);
return scalarType(ty.unionTagTypeSafety(mod).?, mod);
}
std.debug.assert(ty.unionFields().count() == 1);
std.debug.assert(ty.unionFields(mod).count() == 1);
}
return scalarType(ty.unionFields().values()[0].ty, target);
return scalarType(ty.unionFields(mod).values()[0].ty, mod);
},
else => return ty,
}

File diff suppressed because it is too large Load Diff

View File

@ -1,10 +1,3 @@
const std = @import("std");
const Type = @import("../../type.zig").Type;
const Target = std.Target;
const assert = std.debug.assert;
const Register = @import("bits.zig").Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
pub const Class = enum {
integer,
sse,
@ -19,7 +12,7 @@ pub const Class = enum {
float_combine,
};
pub fn classifyWindows(ty: Type, target: Target) Class {
pub fn classifyWindows(ty: Type, mod: *Module) Class {
// https://docs.microsoft.com/en-gb/cpp/build/x64-calling-convention?view=vs-2017
// "There's a strict one-to-one correspondence between a function call's arguments
// and the registers used for those arguments. Any argument that doesn't fit in 8
@ -28,7 +21,7 @@ pub fn classifyWindows(ty: Type, target: Target) Class {
// "All floating point operations are done using the 16 XMM registers."
// "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
// as if they were integers of the same size."
switch (ty.zigTypeTag()) {
switch (ty.zigTypeTag(mod)) {
.Pointer,
.Int,
.Bool,
@ -43,12 +36,12 @@ pub fn classifyWindows(ty: Type, target: Target) Class {
.ErrorUnion,
.AnyFrame,
.Frame,
=> switch (ty.abiSize(target)) {
=> switch (ty.abiSize(mod)) {
0 => unreachable,
1, 2, 4, 8 => return .integer,
else => switch (ty.zigTypeTag()) {
else => switch (ty.zigTypeTag(mod)) {
.Int => return .win_i128,
.Struct, .Union => if (ty.containerLayout() == .Packed) {
.Struct, .Union => if (ty.containerLayout(mod) == .Packed) {
return .win_i128;
} else {
return .memory;
@ -75,14 +68,15 @@ pub const Context = enum { ret, arg, other };
/// There are a maximum of 8 possible return slots. Returned values are in
/// the beginning of the array; unused slots are filled with .none.
pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
pub fn classifySystemV(ty: Type, mod: *Module, ctx: Context) [8]Class {
const target = mod.getTarget();
const memory_class = [_]Class{
.memory, .none, .none, .none,
.none, .none, .none, .none,
};
var result = [1]Class{.none} ** 8;
switch (ty.zigTypeTag()) {
.Pointer => switch (ty.ptrSize()) {
switch (ty.zigTypeTag(mod)) {
.Pointer => switch (ty.ptrSize(mod)) {
.Slice => {
result[0] = .integer;
result[1] = .integer;
@ -94,7 +88,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
},
},
.Int, .Enum, .ErrorSet => {
const bits = ty.intInfo(target).bits;
const bits = ty.intInfo(mod).bits;
if (bits <= 64) {
result[0] = .integer;
return result;
@ -164,8 +158,8 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
else => unreachable,
},
.Vector => {
const elem_ty = ty.childType();
const bits = elem_ty.bitSize(target) * ty.arrayLen();
const elem_ty = ty.childType(mod);
const bits = elem_ty.bitSize(mod) * ty.arrayLen(mod);
if (bits <= 64) return .{
.sse, .none, .none, .none,
.none, .none, .none, .none,
@ -204,7 +198,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
return memory_class;
},
.Optional => {
if (ty.isPtrLikeOptional()) {
if (ty.isPtrLikeOptional(mod)) {
result[0] = .integer;
return result;
}
@ -215,8 +209,8 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.".
const ty_size = ty.abiSize(target);
if (ty.containerLayout() == .Packed) {
const ty_size = ty.abiSize(mod);
if (ty.containerLayout(mod) == .Packed) {
assert(ty_size <= 128);
result[0] = .integer;
if (ty_size > 64) result[1] = .integer;
@ -227,15 +221,15 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
var result_i: usize = 0; // out of 8
var byte_i: usize = 0; // out of 8
const fields = ty.structFields();
const fields = ty.structFields(mod);
for (fields.values()) |field| {
if (field.abi_align != 0) {
if (field.abi_align < field.ty.abiAlignment(target)) {
if (field.abi_align < field.ty.abiAlignment(mod)) {
return memory_class;
}
}
const field_size = field.ty.abiSize(target);
const field_class_array = classifySystemV(field.ty, target, .other);
const field_size = field.ty.abiSize(mod);
const field_class_array = classifySystemV(field.ty, mod, .other);
const field_class = std.mem.sliceTo(&field_class_array, .none);
if (byte_i + field_size <= 8) {
// Combine this field with the previous one.
@ -334,8 +328,8 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
// it contains unaligned fields, it has class MEMORY"
// "If the size of the aggregate exceeds a single eightbyte, each is classified
// separately.".
const ty_size = ty.abiSize(target);
if (ty.containerLayout() == .Packed) {
const ty_size = ty.abiSize(mod);
if (ty.containerLayout(mod) == .Packed) {
assert(ty_size <= 128);
result[0] = .integer;
if (ty_size > 64) result[1] = .integer;
@ -344,15 +338,15 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
if (ty_size > 64)
return memory_class;
const fields = ty.unionFields();
const fields = ty.unionFields(mod);
for (fields.values()) |field| {
if (field.abi_align != 0) {
if (field.abi_align < field.ty.abiAlignment(target)) {
if (field.abi_align < field.ty.abiAlignment(mod)) {
return memory_class;
}
}
// Combine this field with the previous one.
const field_class = classifySystemV(field.ty, target, .other);
const field_class = classifySystemV(field.ty, mod, .other);
for (&result, 0..) |*result_item, i| {
const field_item = field_class[i];
// "If both classes are equal, this is the resulting class."
@ -426,7 +420,7 @@ pub fn classifySystemV(ty: Type, target: Target, ctx: Context) [8]Class {
return result;
},
.Array => {
const ty_size = ty.abiSize(target);
const ty_size = ty.abiSize(mod);
if (ty_size <= 64) {
result[0] = .integer;
return result;
@ -527,10 +521,17 @@ pub const RegisterClass = struct {
};
};
const testing = std.testing;
const Module = @import("../../Module.zig");
const Value = @import("../../value.zig").Value;
const builtin = @import("builtin");
const std = @import("std");
const Target = std.Target;
const assert = std.debug.assert;
const testing = std.testing;
const Module = @import("../../Module.zig");
const Register = @import("bits.zig").Register;
const RegisterManagerFn = @import("../../register_manager.zig").RegisterManager;
const Type = @import("../../type.zig").Type;
const Value = @import("../../value.zig").Value;
fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field {
return .{
@ -541,34 +542,3 @@ fn _field(comptime tag: Type.Tag, offset: u32) Module.Struct.Field {
.is_comptime = false,
};
}
test "C_C_D" {
var fields = Module.Struct.Fields{};
// const C_C_D = extern struct { v1: i8, v2: i8, v3: f64 };
try fields.ensureTotalCapacity(testing.allocator, 3);
defer fields.deinit(testing.allocator);
fields.putAssumeCapacity("v1", _field(.i8, 0));
fields.putAssumeCapacity("v2", _field(.i8, 1));
fields.putAssumeCapacity("v3", _field(.f64, 4));
var C_C_D_struct = Module.Struct{
.fields = fields,
.namespace = undefined,
.owner_decl = undefined,
.zir_index = undefined,
.layout = .Extern,
.status = .fully_resolved,
.known_non_opv = true,
.is_tuple = false,
};
var C_C_D = Type.Payload.Struct{ .data = &C_C_D_struct };
try testing.expectEqual(
[_]Class{ .integer, .sse, .none, .none, .none, .none, .none, .none },
classifySystemV(Type.initPayload(&C_C_D.base), builtin.target, .ret),
);
try testing.expectEqual(
[_]Class{ .integer, .sse, .none, .none, .none, .none, .none, .none },
classifySystemV(Type.initPayload(&C_C_D.base), builtin.target, .arg),
);
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -292,19 +292,19 @@ pub const CType = extern union {
.abi = std.math.log2_int(u32, abi_alignment),
};
}
pub fn abiAlign(ty: Type, target: Target) AlignAs {
const abi_align = ty.abiAlignment(target);
pub fn abiAlign(ty: Type, mod: *Module) AlignAs {
const abi_align = ty.abiAlignment(mod);
return init(abi_align, abi_align);
}
pub fn fieldAlign(struct_ty: Type, field_i: usize, target: Target) AlignAs {
pub fn fieldAlign(struct_ty: Type, field_i: usize, mod: *Module) AlignAs {
return init(
struct_ty.structFieldAlign(field_i, target),
struct_ty.structFieldType(field_i).abiAlignment(target),
struct_ty.structFieldAlign(field_i, mod),
struct_ty.structFieldType(field_i, mod).abiAlignment(mod),
);
}
pub fn unionPayloadAlign(union_ty: Type, target: Target) AlignAs {
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
const union_payload_align = union_obj.abiAlignment(target, false);
pub fn unionPayloadAlign(union_ty: Type, mod: *Module) AlignAs {
const union_obj = mod.typeToUnion(union_ty).?;
const union_payload_align = union_obj.abiAlignment(mod, false);
return init(union_payload_align, union_payload_align);
}
@ -344,8 +344,8 @@ pub const CType = extern union {
return self.map.entries.items(.hash)[index - Tag.no_payload_count];
}
pub fn typeToIndex(self: Set, ty: Type, target: Target, kind: Kind) ?Index {
const lookup = Convert.Lookup{ .imm = .{ .set = &self, .target = target } };
pub fn typeToIndex(self: Set, ty: Type, mod: *Module, kind: Kind) ?Index {
const lookup = Convert.Lookup{ .imm = .{ .set = &self, .mod = mod } };
var convert: Convert = undefined;
convert.initType(ty, kind, lookup) catch unreachable;
@ -405,7 +405,7 @@ pub const CType = extern union {
);
if (!gop.found_existing) {
errdefer _ = self.set.map.pop();
gop.key_ptr.* = try createFromConvert(self, ty, lookup.getTarget(), kind, convert);
gop.key_ptr.* = try createFromConvert(self, ty, lookup.getModule(), kind, convert);
}
if (std.debug.runtime_safety) {
const adapter = TypeAdapter64{
@ -1236,10 +1236,10 @@ pub const CType = extern union {
}
pub const Lookup = union(enum) {
fail: Target,
fail: *Module,
imm: struct {
set: *const Store.Set,
target: Target,
mod: *Module,
},
mut: struct {
promoted: *Store.Promoted,
@ -1254,10 +1254,14 @@ pub const CType = extern union {
}
pub fn getTarget(self: @This()) Target {
return self.getModule().getTarget();
}
pub fn getModule(self: @This()) *Module {
return switch (self) {
.fail => |target| target,
.imm => |imm| imm.target,
.mut => |mut| mut.mod.getTarget(),
.fail => |mod| mod,
.imm => |imm| imm.mod,
.mut => |mut| mut.mod,
};
}
@ -1272,7 +1276,7 @@ pub const CType = extern union {
pub fn typeToIndex(self: @This(), ty: Type, kind: Kind) !?Index {
return switch (self) {
.fail => null,
.imm => |imm| imm.set.typeToIndex(ty, imm.target, kind),
.imm => |imm| imm.set.typeToIndex(ty, imm.mod, kind),
.mut => |mut| try mut.promoted.typeToIndex(ty, mut.mod, kind),
};
}
@ -1284,7 +1288,7 @@ pub const CType = extern union {
pub fn freeze(self: @This()) @This() {
return switch (self) {
.fail, .imm => self,
.mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .target = self.getTarget() } },
.mut => |mut| .{ .imm = .{ .set = &mut.promoted.set, .mod = mut.mod } },
};
}
};
@ -1338,7 +1342,7 @@ pub const CType = extern union {
self.storage.anon.fields[0] = .{
.name = "array",
.type = array_idx,
.alignas = AlignAs.abiAlign(ty, lookup.getTarget()),
.alignas = AlignAs.abiAlign(ty, lookup.getModule()),
};
self.initAnon(kind, fwd_idx, 1);
} else self.init(switch (kind) {
@ -1350,30 +1354,30 @@ pub const CType = extern union {
}
pub fn initType(self: *@This(), ty: Type, kind: Kind, lookup: Lookup) !void {
const target = lookup.getTarget();
const mod = lookup.getModule();
self.* = undefined;
if (!ty.isFnOrHasRuntimeBitsIgnoreComptime())
if (!ty.isFnOrHasRuntimeBitsIgnoreComptime(mod))
self.init(.void)
else if (ty.isAbiInt()) switch (ty.tag()) {
.usize => self.init(.uintptr_t),
.isize => self.init(.intptr_t),
.c_char => self.init(.char),
.c_short => self.init(.short),
.c_ushort => self.init(.@"unsigned short"),
.c_int => self.init(.int),
.c_uint => self.init(.@"unsigned int"),
.c_long => self.init(.long),
.c_ulong => self.init(.@"unsigned long"),
.c_longlong => self.init(.@"long long"),
.c_ulonglong => self.init(.@"unsigned long long"),
else => switch (tagFromIntInfo(ty.intInfo(target))) {
else if (ty.isAbiInt(mod)) switch (ty.ip_index) {
.usize_type => self.init(.uintptr_t),
.isize_type => self.init(.intptr_t),
.c_char_type => self.init(.char),
.c_short_type => self.init(.short),
.c_ushort_type => self.init(.@"unsigned short"),
.c_int_type => self.init(.int),
.c_uint_type => self.init(.@"unsigned int"),
.c_long_type => self.init(.long),
.c_ulong_type => self.init(.@"unsigned long"),
.c_longlong_type => self.init(.@"long long"),
.c_ulonglong_type => self.init(.@"unsigned long long"),
else => switch (tagFromIntInfo(ty.intInfo(mod))) {
.void => unreachable,
else => |t| self.init(t),
.array => switch (kind) {
.forward, .complete, .global => {
const abi_size = ty.abiSize(target);
const abi_align = ty.abiAlignment(target);
const abi_size = ty.abiSize(mod);
const abi_align = ty.abiAlignment(mod);
self.storage = .{ .seq = .{ .base = .{ .tag = .array }, .data = .{
.len = @divExact(abi_size, abi_align),
.elem_type = tagFromIntInfo(.{
@ -1389,7 +1393,7 @@ pub const CType = extern union {
.payload => unreachable,
},
},
} else switch (ty.zigTypeTag()) {
} else switch (ty.zigTypeTag(mod)) {
.Frame => unreachable,
.AnyFrame => unreachable,
@ -1408,18 +1412,18 @@ pub const CType = extern union {
.Bool => self.init(.bool),
.Float => self.init(switch (ty.tag()) {
.f16 => .zig_f16,
.f32 => .zig_f32,
.f64 => .zig_f64,
.f80 => .zig_f80,
.f128 => .zig_f128,
.c_longdouble => .zig_c_longdouble,
.Float => self.init(switch (ty.ip_index) {
.f16_type => .zig_f16,
.f32_type => .zig_f32,
.f64_type => .zig_f64,
.f80_type => .zig_f80,
.f128_type => .zig_f128,
.c_longdouble_type => .zig_c_longdouble,
else => unreachable,
}),
.Pointer => {
const info = ty.ptrInfo().data;
const info = ty.ptrInfo(mod);
switch (info.size) {
.Slice => {
if (switch (kind) {
@ -1427,19 +1431,18 @@ pub const CType = extern union {
.complete, .parameter, .global => try lookup.typeToIndex(ty, .forward),
.payload => unreachable,
}) |fwd_idx| {
var buf: Type.SlicePtrFieldTypeBuffer = undefined;
const ptr_ty = ty.slicePtrFieldType(&buf);
const ptr_ty = ty.slicePtrFieldType(mod);
if (try lookup.typeToIndex(ptr_ty, kind)) |ptr_idx| {
self.storage = .{ .anon = undefined };
self.storage.anon.fields[0] = .{
.name = "ptr",
.type = ptr_idx,
.alignas = AlignAs.abiAlign(ptr_ty, target),
.alignas = AlignAs.abiAlign(ptr_ty, mod),
};
self.storage.anon.fields[1] = .{
.name = "len",
.type = Tag.uintptr_t.toIndex(),
.alignas = AlignAs.abiAlign(Type.usize, target),
.alignas = AlignAs.abiAlign(Type.usize, mod),
};
self.initAnon(kind, fwd_idx, 2);
} else self.init(switch (kind) {
@ -1462,16 +1465,12 @@ pub const CType = extern union {
},
};
var host_int_pl = Type.Payload.Bits{
.base = .{ .tag = .int_unsigned },
.data = info.host_size * 8,
};
const pointee_ty = if (info.host_size > 0 and info.vector_index == .none)
Type.initPayload(&host_int_pl.base)
try mod.intType(.unsigned, info.host_size * 8)
else
info.pointee_type;
if (if (info.size == .C and pointee_ty.tag() == .u8)
if (if (info.size == .C and pointee_ty.ip_index == .u8_type)
Tag.char.toIndex()
else
try lookup.typeToIndex(pointee_ty, .forward)) |child_idx|
@ -1486,26 +1485,24 @@ pub const CType = extern union {
}
},
.Struct, .Union => |zig_ty_tag| if (ty.containerLayout() == .Packed) {
if (ty.castTag(.@"struct")) |struct_obj| {
try self.initType(struct_obj.data.backing_int_ty, kind, lookup);
.Struct, .Union => |zig_ty_tag| if (ty.containerLayout(mod) == .Packed) {
if (mod.typeToStruct(ty)) |struct_obj| {
try self.initType(struct_obj.backing_int_ty, kind, lookup);
} else {
var buf: Type.Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
.data = @intCast(u16, ty.bitSize(target)),
};
try self.initType(Type.initPayload(&buf.base), kind, lookup);
const bits = @intCast(u16, ty.bitSize(mod));
const int_ty = try mod.intType(.unsigned, bits);
try self.initType(int_ty, kind, lookup);
}
} else if (ty.isTupleOrAnonStruct()) {
} else if (ty.isTupleOrAnonStruct(mod)) {
if (lookup.isMutable()) {
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(),
.Union => ty.unionFields().count(),
.Struct => ty.structFieldCount(mod),
.Union => ty.unionFields(mod).count(),
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
const field_ty = ty.structFieldType(field_i, mod);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
_ = try lookup.typeToIndex(field_ty, switch (kind) {
.forward, .forward_parameter => .forward,
.complete, .parameter => .complete,
@ -1533,14 +1530,14 @@ pub const CType = extern union {
.payload => unreachable,
});
} else {
const tag_ty = ty.unionTagTypeSafety();
const tag_ty = ty.unionTagTypeSafety(mod);
const is_tagged_union_wrapper = kind != .payload and tag_ty != null;
const is_struct = zig_ty_tag == .Struct or is_tagged_union_wrapper;
switch (kind) {
.forward, .forward_parameter => {
self.storage = .{ .fwd = .{
.base = .{ .tag = if (is_struct) .fwd_struct else .fwd_union },
.data = ty.getOwnerDecl(),
.data = ty.getOwnerDecl(mod),
} };
self.value = .{ .cty = initPayload(&self.storage.fwd) };
},
@ -1555,7 +1552,7 @@ pub const CType = extern union {
self.storage.anon.fields[field_count] = .{
.name = "payload",
.type = payload_idx.?,
.alignas = AlignAs.unionPayloadAlign(ty, target),
.alignas = AlignAs.unionPayloadAlign(ty, mod),
};
field_count += 1;
}
@ -1563,7 +1560,7 @@ pub const CType = extern union {
self.storage.anon.fields[field_count] = .{
.name = "tag",
.type = tag_idx.?,
.alignas = AlignAs.abiAlign(tag_ty.?, target),
.alignas = AlignAs.abiAlign(tag_ty.?, mod),
};
field_count += 1;
}
@ -1576,19 +1573,19 @@ pub const CType = extern union {
} };
self.value = .{ .cty = initPayload(&self.storage.anon.pl.complete) };
} else self.init(.@"struct");
} else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes()) {
} else if (kind == .payload and ty.unionHasAllZeroBitFieldTypes(mod)) {
self.init(.void);
} else {
var is_packed = false;
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(),
.Union => ty.unionFields().count(),
.Struct => ty.structFieldCount(mod),
.Union => ty.unionFields(mod).count(),
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
const field_ty = ty.structFieldType(field_i, mod);
if (!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
const field_align = AlignAs.fieldAlign(ty, field_i, target);
const field_align = AlignAs.fieldAlign(ty, field_i, mod);
if (field_align.@"align" < field_align.abi) {
is_packed = true;
if (!lookup.isMutable()) break;
@ -1627,9 +1624,9 @@ pub const CType = extern union {
.Vector => .vector,
else => unreachable,
};
if (try lookup.typeToIndex(ty.childType(), kind)) |child_idx| {
if (try lookup.typeToIndex(ty.childType(mod), kind)) |child_idx| {
self.storage = .{ .seq = .{ .base = .{ .tag = t }, .data = .{
.len = ty.arrayLenIncludingSentinel(),
.len = ty.arrayLenIncludingSentinel(mod),
.elem_type = child_idx,
} } };
self.value = .{ .cty = initPayload(&self.storage.seq) };
@ -1641,10 +1638,9 @@ pub const CType = extern union {
},
.Optional => {
var buf: Type.Payload.ElemType = undefined;
const payload_ty = ty.optionalChild(&buf);
if (payload_ty.hasRuntimeBitsIgnoreComptime()) {
if (ty.optionalReprIsPayload()) {
const payload_ty = ty.optionalChild(mod);
if (payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
if (ty.optionalReprIsPayload(mod)) {
try self.initType(payload_ty, kind, lookup);
} else if (switch (kind) {
.forward, .forward_parameter => @as(Index, undefined),
@ -1661,12 +1657,12 @@ pub const CType = extern union {
self.storage.anon.fields[0] = .{
.name = "payload",
.type = payload_idx,
.alignas = AlignAs.abiAlign(payload_ty, target),
.alignas = AlignAs.abiAlign(payload_ty, mod),
};
self.storage.anon.fields[1] = .{
.name = "is_null",
.type = Tag.bool.toIndex(),
.alignas = AlignAs.abiAlign(Type.bool, target),
.alignas = AlignAs.abiAlign(Type.bool, mod),
};
self.initAnon(kind, fwd_idx, 2);
} else self.init(switch (kind) {
@ -1684,14 +1680,14 @@ pub const CType = extern union {
.complete, .parameter, .global => try lookup.typeToIndex(ty, .forward),
.payload => unreachable,
}) |fwd_idx| {
const payload_ty = ty.errorUnionPayload();
const payload_ty = ty.errorUnionPayload(mod);
if (try lookup.typeToIndex(payload_ty, switch (kind) {
.forward, .forward_parameter => .forward,
.complete, .parameter => .complete,
.global => .global,
.payload => unreachable,
})) |payload_idx| {
const error_ty = ty.errorUnionSet();
const error_ty = ty.errorUnionSet(mod);
if (payload_idx == Tag.void.toIndex()) {
try self.initType(error_ty, kind, lookup);
} else if (try lookup.typeToIndex(error_ty, kind)) |error_idx| {
@ -1699,12 +1695,12 @@ pub const CType = extern union {
self.storage.anon.fields[0] = .{
.name = "payload",
.type = payload_idx,
.alignas = AlignAs.abiAlign(payload_ty, target),
.alignas = AlignAs.abiAlign(payload_ty, mod),
};
self.storage.anon.fields[1] = .{
.name = "error",
.type = error_idx,
.alignas = AlignAs.abiAlign(error_ty, target),
.alignas = AlignAs.abiAlign(error_ty, mod),
};
self.initAnon(kind, fwd_idx, 2);
} else self.init(switch (kind) {
@ -1723,7 +1719,7 @@ pub const CType = extern union {
.Opaque => self.init(.void),
.Fn => {
const info = ty.fnInfo();
const info = mod.typeToFunc(ty).?;
if (!info.is_generic) {
if (lookup.isMutable()) {
const param_kind: Kind = switch (kind) {
@ -1731,10 +1727,10 @@ pub const CType = extern union {
.complete, .parameter, .global => .parameter,
.payload => unreachable,
};
_ = try lookup.typeToIndex(info.return_type, param_kind);
_ = try lookup.typeToIndex(info.return_type.toType(), param_kind);
for (info.param_types) |param_type| {
if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
_ = try lookup.typeToIndex(param_type, param_kind);
if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
_ = try lookup.typeToIndex(param_type.toType(), param_kind);
}
}
self.init(if (info.is_var_args) .varargs_function else .function);
@ -1900,16 +1896,16 @@ pub const CType = extern union {
}
}
fn createFromType(store: *Store.Promoted, ty: Type, target: Target, kind: Kind) !CType {
fn createFromType(store: *Store.Promoted, ty: Type, mod: *Module, kind: Kind) !CType {
var convert: Convert = undefined;
try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .target = target } });
return createFromConvert(store, ty, target, kind, &convert);
try convert.initType(ty, kind, .{ .imm = .{ .set = &store.set, .mod = mod } });
return createFromConvert(store, ty, mod, kind, &convert);
}
fn createFromConvert(
store: *Store.Promoted,
ty: Type,
target: Target,
mod: *Module,
kind: Kind,
convert: Convert,
) !CType {
@ -1930,44 +1926,44 @@ pub const CType = extern union {
.packed_struct,
.packed_union,
=> {
const zig_ty_tag = ty.zigTypeTag();
const zig_ty_tag = ty.zigTypeTag(mod);
const fields_len = switch (zig_ty_tag) {
.Struct => ty.structFieldCount(),
.Union => ty.unionFields().count(),
.Struct => ty.structFieldCount(mod),
.Union => ty.unionFields(mod).count(),
else => unreachable,
};
var c_fields_len: usize = 0;
for (0..fields_len) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
const field_ty = ty.structFieldType(field_i, mod);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
c_fields_len += 1;
}
const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len);
var c_field_i: usize = 0;
for (0..fields_len) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
const field_ty = ty.structFieldType(field_i, mod);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
defer c_field_i += 1;
fields_pl[c_field_i] = .{
.name = try if (ty.isSimpleTuple())
.name = try if (ty.isSimpleTuple(mod))
std.fmt.allocPrintZ(arena, "f{}", .{field_i})
else
arena.dupeZ(u8, switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i),
.Union => ty.unionFields().keys()[field_i],
arena.dupeZ(u8, mod.intern_pool.stringToSlice(switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i, mod),
.Union => ty.unionFields(mod).keys()[field_i],
else => unreachable,
}),
.type = store.set.typeToIndex(field_ty, target, switch (kind) {
})),
.type = store.set.typeToIndex(field_ty, mod, switch (kind) {
.forward, .forward_parameter => .forward,
.complete, .parameter, .payload => .complete,
.global => .global,
}).?,
.alignas = AlignAs.fieldAlign(ty, field_i, target),
.alignas = AlignAs.fieldAlign(ty, field_i, mod),
};
}
@ -1988,8 +1984,8 @@ pub const CType = extern union {
const unnamed_pl = try arena.create(Payload.Unnamed);
unnamed_pl.* = .{ .base = .{ .tag = t }, .data = .{
.fields = fields_pl,
.owner_decl = ty.getOwnerDecl(),
.id = if (ty.unionTagTypeSafety()) |_| 0 else unreachable,
.owner_decl = ty.getOwnerDecl(mod),
.id = if (ty.unionTagTypeSafety(mod)) |_| 0 else unreachable,
} };
return initPayload(unnamed_pl);
},
@ -2004,7 +2000,7 @@ pub const CType = extern union {
const struct_pl = try arena.create(Payload.Aggregate);
struct_pl.* = .{ .base = .{ .tag = t }, .data = .{
.fields = fields_pl,
.fwd_decl = store.set.typeToIndex(ty, target, .forward).?,
.fwd_decl = store.set.typeToIndex(ty, mod, .forward).?,
} };
return initPayload(struct_pl);
},
@ -2016,7 +2012,7 @@ pub const CType = extern union {
.function,
.varargs_function,
=> {
const info = ty.fnInfo();
const info = mod.typeToFunc(ty).?;
assert(!info.is_generic);
const param_kind: Kind = switch (kind) {
.forward, .forward_parameter => .forward_parameter,
@ -2026,21 +2022,21 @@ pub const CType = extern union {
var c_params_len: usize = 0;
for (info.param_types) |param_type| {
if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
c_params_len += 1;
}
const params_pl = try arena.alloc(Index, c_params_len);
var c_param_i: usize = 0;
for (info.param_types) |param_type| {
if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
params_pl[c_param_i] = store.set.typeToIndex(param_type, target, param_kind).?;
if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
params_pl[c_param_i] = store.set.typeToIndex(param_type.toType(), mod, param_kind).?;
c_param_i += 1;
}
const fn_pl = try arena.create(Payload.Function);
fn_pl.* = .{ .base = .{ .tag = t }, .data = .{
.return_type = store.set.typeToIndex(info.return_type, target, param_kind).?,
.return_type = store.set.typeToIndex(info.return_type.toType(), mod, param_kind).?,
.param_types = params_pl,
} };
return initPayload(fn_pl);
@ -2067,33 +2063,33 @@ pub const CType = extern union {
}
pub fn eql(self: @This(), ty: Type, cty: CType) bool {
const mod = self.lookup.getModule();
switch (self.convert.value) {
.cty => |c| return c.eql(cty),
.tag => |t| {
if (t != cty.tag()) return false;
const target = self.lookup.getTarget();
switch (t) {
.fwd_anon_struct,
.fwd_anon_union,
=> {
if (!ty.isTupleOrAnonStruct()) return false;
if (!ty.isTupleOrAnonStruct(mod)) return false;
var name_buf: [
std.fmt.count("f{}", .{std.math.maxInt(usize)})
]u8 = undefined;
const c_fields = cty.cast(Payload.Fields).?.data;
const zig_ty_tag = ty.zigTypeTag();
const zig_ty_tag = ty.zigTypeTag(mod);
var c_field_i: usize = 0;
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(),
.Union => ty.unionFields().count(),
.Struct => ty.structFieldCount(mod),
.Union => ty.unionFields(mod).count(),
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
const field_ty = ty.structFieldType(field_i, mod);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
defer c_field_i += 1;
const c_field = &c_fields[c_field_i];
@ -2105,15 +2101,16 @@ pub const CType = extern union {
.payload => unreachable,
}) or !mem.eql(
u8,
if (ty.isSimpleTuple())
std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
else switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i),
.Union => ty.unionFields().keys()[field_i],
else => unreachable,
},
if (ty.isSimpleTuple(mod))
std.fmt.bufPrintZ(&name_buf, "f{}", .{field_i}) catch unreachable
else
mod.intern_pool.stringToSlice(switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i, mod),
.Union => ty.unionFields(mod).keys()[field_i],
else => unreachable,
}),
mem.span(c_field.name),
) or AlignAs.fieldAlign(ty, field_i, target).@"align" !=
) or AlignAs.fieldAlign(ty, field_i, mod).@"align" !=
c_field.alignas.@"align") return false;
}
return true;
@ -2125,9 +2122,9 @@ pub const CType = extern union {
.packed_unnamed_union,
=> switch (self.kind) {
.forward, .forward_parameter, .complete, .parameter, .global => unreachable,
.payload => if (ty.unionTagTypeSafety()) |_| {
.payload => if (ty.unionTagTypeSafety(mod)) |_| {
const data = cty.cast(Payload.Unnamed).?.data;
return ty.getOwnerDecl() == data.owner_decl and data.id == 0;
return ty.getOwnerDecl(mod) == data.owner_decl and data.id == 0;
} else unreachable,
},
@ -2146,9 +2143,9 @@ pub const CType = extern union {
.function,
.varargs_function,
=> {
if (ty.zigTypeTag() != .Fn) return false;
if (ty.zigTypeTag(mod) != .Fn) return false;
const info = ty.fnInfo();
const info = mod.typeToFunc(ty).?;
assert(!info.is_generic);
const data = cty.cast(Payload.Function).?.data;
const param_kind: Kind = switch (self.kind) {
@ -2157,18 +2154,18 @@ pub const CType = extern union {
.payload => unreachable,
};
if (!self.eqlRecurse(info.return_type, data.return_type, param_kind))
if (!self.eqlRecurse(info.return_type.toType(), data.return_type, param_kind))
return false;
var c_param_i: usize = 0;
for (info.param_types) |param_type| {
if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
if (c_param_i >= data.param_types.len) return false;
const param_cty = data.param_types[c_param_i];
c_param_i += 1;
if (!self.eqlRecurse(param_type, param_cty, param_kind))
if (!self.eqlRecurse(param_type.toType(), param_cty, param_kind))
return false;
}
return c_param_i == data.param_types.len;
@ -2202,7 +2199,7 @@ pub const CType = extern union {
.tag => |t| {
autoHash(hasher, t);
const target = self.lookup.getTarget();
const mod = self.lookup.getModule();
switch (t) {
.fwd_anon_struct,
.fwd_anon_union,
@ -2211,15 +2208,15 @@ pub const CType = extern union {
std.fmt.count("f{}", .{std.math.maxInt(usize)})
]u8 = undefined;
const zig_ty_tag = ty.zigTypeTag();
for (0..switch (ty.zigTypeTag()) {
.Struct => ty.structFieldCount(),
.Union => ty.unionFields().count(),
const zig_ty_tag = ty.zigTypeTag(mod);
for (0..switch (ty.zigTypeTag(mod)) {
.Struct => ty.structFieldCount(mod),
.Union => ty.unionFields(mod).count(),
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
const field_ty = ty.structFieldType(field_i, mod);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i, mod)) or
!field_ty.hasRuntimeBitsIgnoreComptime(mod)) continue;
self.updateHasherRecurse(hasher, field_ty, switch (self.kind) {
.forward, .forward_parameter => .forward,
@ -2227,14 +2224,15 @@ pub const CType = extern union {
.global => .global,
.payload => unreachable,
});
hasher.update(if (ty.isSimpleTuple())
hasher.update(if (ty.isSimpleTuple(mod))
std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
else switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i),
.Union => ty.unionFields().keys()[field_i],
else => unreachable,
});
autoHash(hasher, AlignAs.fieldAlign(ty, field_i, target).@"align");
else
mod.intern_pool.stringToSlice(switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i, mod),
.Union => ty.unionFields(mod).keys()[field_i],
else => unreachable,
}));
autoHash(hasher, AlignAs.fieldAlign(ty, field_i, mod).@"align");
}
},
@ -2244,8 +2242,8 @@ pub const CType = extern union {
.packed_unnamed_union,
=> switch (self.kind) {
.forward, .forward_parameter, .complete, .parameter, .global => unreachable,
.payload => if (ty.unionTagTypeSafety()) |_| {
autoHash(hasher, ty.getOwnerDecl());
.payload => if (ty.unionTagTypeSafety(mod)) |_| {
autoHash(hasher, ty.getOwnerDecl(mod));
autoHash(hasher, @as(u32, 0));
} else unreachable,
},
@ -2261,7 +2259,7 @@ pub const CType = extern union {
.function,
.varargs_function,
=> {
const info = ty.fnInfo();
const info = mod.typeToFunc(ty).?;
assert(!info.is_generic);
const param_kind: Kind = switch (self.kind) {
.forward, .forward_parameter => .forward_parameter,
@ -2269,10 +2267,10 @@ pub const CType = extern union {
.payload => unreachable,
};
self.updateHasherRecurse(hasher, info.return_type, param_kind);
self.updateHasherRecurse(hasher, info.return_type.toType(), param_kind);
for (info.param_types) |param_type| {
if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
self.updateHasherRecurse(hasher, param_type, param_kind);
if (!param_type.toType().hasRuntimeBitsIgnoreComptime(mod)) continue;
self.updateHasherRecurse(hasher, param_type.toType(), param_kind);
}
},

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -11,7 +11,8 @@ const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const ZigDecl = @import("../../Module.zig").Decl;
const ZigModule = @import("../../Module.zig");
const ZigDecl = ZigModule.Decl;
const spec = @import("spec.zig");
const Word = spec.Word;
@ -389,8 +390,8 @@ pub fn addFunction(self: *Module, decl_index: Decl.Index, func: Fn) !void {
/// Fetch the result-id of an OpString instruction that encodes the path of the source
/// file of the decl. This function may also emit an OpSource with source-level information regarding
/// the decl.
pub fn resolveSourceFileName(self: *Module, decl: *ZigDecl) !IdRef {
const path = decl.getFileScope().sub_file_path;
pub fn resolveSourceFileName(self: *Module, zig_module: *ZigModule, zig_decl: *ZigDecl) !IdRef {
const path = zig_decl.getFileScope(zig_module).sub_file_path;
const result = try self.source_file_names.getOrPut(self.gpa, path);
if (!result.found_existing) {
const file_result_id = self.allocId();

View File

@ -99,7 +99,7 @@ fn dumpStatusReport() !void {
allocator,
anal.body,
anal.body_index,
block.namespace.file_scope,
mod.namespacePtr(block.namespace).file_scope,
block_src_decl.src_node,
6, // indent
stderr,
@ -108,7 +108,7 @@ fn dumpStatusReport() !void {
else => |e| return e,
};
try stderr.writeAll(" For full context, use the command\n zig ast-check -t ");
try writeFilePath(block.namespace.file_scope, stderr);
try writeFilePath(mod.namespacePtr(block.namespace).file_scope, stderr);
try stderr.writeAll("\n\n");
var parent = anal.parent;
@ -121,7 +121,7 @@ fn dumpStatusReport() !void {
print_zir.renderSingleInstruction(
allocator,
curr.body[curr.body_index],
curr.block.namespace.file_scope,
mod.namespacePtr(curr.block.namespace).file_scope,
curr_block_src_decl.src_node,
6, // indent
stderr,
@ -148,7 +148,7 @@ fn writeFilePath(file: *Module.File, stream: anytype) !void {
}
fn writeFullyQualifiedDeclWithFile(mod: *Module, decl: *Decl, stream: anytype) !void {
try writeFilePath(decl.getFileScope(), stream);
try writeFilePath(decl.getFileScope(mod), stream);
try stream.writeAll(": ");
try decl.renderFullyQualifiedDebugName(mod, stream);
}

View File

@ -502,8 +502,6 @@ pub const File = struct {
/// of the final binary.
pub fn lowerUnnamedConst(base: *File, tv: TypedValue, decl_index: Module.Decl.Index) UpdateDeclError!u32 {
if (build_options.only_c) @compileError("unreachable");
const decl = base.options.module.?.declPtr(decl_index);
log.debug("lowerUnnamedConst {*} ({s})", .{ decl, decl.name });
switch (base.tag) {
// zig fmt: off
.coff => return @fieldParentPtr(Coff, "base", base).lowerUnnamedConst(tv, decl_index),
@ -543,7 +541,6 @@ pub const File = struct {
/// May be called before or after updateDeclExports for any given Decl.
pub fn updateDecl(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void {
const decl = module.declPtr(decl_index);
log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty.fmt(module) });
assert(decl.has_tv);
if (build_options.only_c) {
assert(base.tag == .c);
@ -564,34 +561,27 @@ pub const File = struct {
}
/// May be called before or after updateDeclExports for any given Decl.
pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) UpdateDeclError!void {
const owner_decl = module.declPtr(func.owner_decl);
log.debug("updateFunc {*} ({s}), type={}", .{
owner_decl, owner_decl.name, owner_decl.ty.fmt(module),
});
pub fn updateFunc(base: *File, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) UpdateDeclError!void {
if (build_options.only_c) {
assert(base.tag == .c);
return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness);
return @fieldParentPtr(C, "base", base).updateFunc(module, func_index, air, liveness);
}
switch (base.tag) {
// zig fmt: off
.coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func, air, liveness),
.elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func, air, liveness),
.macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func, air, liveness),
.c => return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness),
.wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func, air, liveness),
.spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func, air, liveness),
.plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func, air, liveness),
.nvptx => return @fieldParentPtr(NvPtx, "base", base).updateFunc(module, func, air, liveness),
.coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func_index, air, liveness),
.elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func_index, air, liveness),
.macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func_index, air, liveness),
.c => return @fieldParentPtr(C, "base", base).updateFunc(module, func_index, air, liveness),
.wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func_index, air, liveness),
.spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func_index, air, liveness),
.plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func_index, air, liveness),
.nvptx => return @fieldParentPtr(NvPtx, "base", base).updateFunc(module, func_index, air, liveness),
// zig fmt: on
}
}
pub fn updateDeclLineNumber(base: *File, module: *Module, decl_index: Module.Decl.Index) UpdateDeclError!void {
const decl = module.declPtr(decl_index);
log.debug("updateDeclLineNumber {*} ({s}), line={}", .{
decl, decl.name, decl.src_line + 1,
});
assert(decl.has_tv);
if (build_options.only_c) {
assert(base.tag == .c);
@ -867,7 +857,6 @@ pub const File = struct {
exports: []const *Module.Export,
) UpdateDeclExportsError!void {
const decl = module.declPtr(decl_index);
log.debug("updateDeclExports {*} ({s})", .{ decl, decl.name });
assert(decl.has_tv);
if (build_options.only_c) {
assert(base.tag == .c);
@ -1124,13 +1113,13 @@ pub const File = struct {
pub fn initDecl(kind: Kind, decl: ?Module.Decl.Index, mod: *Module) LazySymbol {
return .{ .kind = kind, .ty = if (decl) |decl_index|
mod.declPtr(decl_index).val.castTag(.ty).?.data
mod.declPtr(decl_index).val.toType()
else
Type.anyerror };
}
pub fn getDecl(self: LazySymbol) Module.Decl.OptionalIndex {
return Module.Decl.OptionalIndex.init(self.ty.getOwnerDeclOrNull());
pub fn getDecl(self: LazySymbol, mod: *Module) Module.Decl.OptionalIndex {
return Module.Decl.OptionalIndex.init(self.ty.getOwnerDeclOrNull(mod));
}
};

View File

@ -6,6 +6,7 @@ const fs = std.fs;
const C = @This();
const Module = @import("../Module.zig");
const InternPool = @import("../InternPool.zig");
const Compilation = @import("../Compilation.zig");
const codegen = @import("../codegen/c.zig");
const link = @import("../link.zig");
@ -87,12 +88,13 @@ pub fn freeDecl(self: *C, decl_index: Module.Decl.Index) void {
}
}
pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
pub fn updateFunc(self: *C, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
const tracy = trace(@src());
defer tracy.end();
const gpa = self.base.allocator;
const func = module.funcPtr(func_index);
const decl_index = func.owner_decl;
const gop = try self.decl_table.getOrPut(gpa, decl_index);
if (!gop.found_existing) {
@ -111,7 +113,7 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes
.value_map = codegen.CValueMap.init(gpa),
.air = air,
.liveness = liveness,
.func = func,
.func_index = func_index,
.object = .{
.dg = .{
.gpa = gpa,
@ -288,11 +290,11 @@ pub fn flushModule(self: *C, _: *Compilation, prog_node: *std.Progress.Node) !vo
}
{
var export_names = std.StringHashMapUnmanaged(void){};
var export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void) = .{};
defer export_names.deinit(gpa);
try export_names.ensureTotalCapacity(gpa, @intCast(u32, module.decl_exports.entries.len));
for (module.decl_exports.values()) |exports| for (exports.items) |@"export"|
try export_names.put(gpa, @"export".options.name, {});
try export_names.put(gpa, @"export".opts.name, {});
while (f.remaining_decls.popOrNull()) |kv| {
const decl_index = kv.key;
@ -552,10 +554,11 @@ fn flushDecl(
self: *C,
f: *Flush,
decl_index: Module.Decl.Index,
export_names: std.StringHashMapUnmanaged(void),
export_names: std.AutoHashMapUnmanaged(InternPool.NullTerminatedString, void),
) FlushDeclError!void {
const gpa = self.base.allocator;
const decl = self.base.options.module.?.declPtr(decl_index);
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
// Before flushing any particular Decl we must ensure its
// dependencies are already flushed, so that the order in the .c
// file comes out correctly.
@ -569,7 +572,7 @@ fn flushDecl(
try self.flushLazyFns(f, decl_block.lazy_fns);
try f.all_buffers.ensureUnusedCapacity(gpa, 1);
if (!(decl.isExtern() and export_names.contains(mem.span(decl.name))))
if (!(decl.isExtern(mod) and export_names.contains(decl.name)))
f.appendBufAssumeCapacity(decl_block.fwd_decl.items);
}

View File

@ -1032,20 +1032,21 @@ fn freeAtom(self: *Coff, atom_index: Atom.Index) void {
self.getAtomPtr(atom_index).sym_index = 0;
}
pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
pub fn updateFunc(self: *Coff, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .coff) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| {
return llvm_object.updateFunc(module, func, air, liveness);
return llvm_object.updateFunc(mod, func_index, air, liveness);
}
}
const tracy = trace(@src());
defer tracy.end();
const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
const decl = mod.declPtr(decl_index);
const atom_index = try self.getOrCreateAtomForDecl(decl_index);
self.freeUnnamedConsts(decl_index);
@ -1056,8 +1057,8 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
const res = try codegen.generateFunction(
&self.base,
decl.srcLoc(),
func,
decl.srcLoc(mod),
func_index,
air,
liveness,
&code_buffer,
@ -1067,7 +1068,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
.ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
try mod.failed_decls.put(mod.gpa, decl_index, em);
return;
},
};
@ -1076,7 +1077,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live
// Since we updated the vaddr and the size, each corresponding export
// symbol also needs to be updated.
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
return self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index));
}
pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.Index) !u32 {
@ -1096,8 +1097,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
const atom_index = try self.createAtom();
const sym_name = blk: {
const decl_name = try decl.getFullyQualifiedName(mod);
defer gpa.free(decl_name);
const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
const index = unnamed_consts.items.len;
break :blk try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
@ -1110,7 +1110,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
sym.section_number = @intToEnum(coff.SectionNumber, self.rdata_section_index.? + 1);
}
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .none, .{
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), tv, &code_buffer, .none, .{
.parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
var code = switch (res) {
@ -1123,7 +1123,7 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
},
};
const required_alignment = tv.ty.abiAlignment(self.base.options.target);
const required_alignment = tv.ty.abiAlignment(mod);
const atom = self.getAtomPtr(atom_index);
atom.size = @intCast(u32, code.len);
atom.getSymbolPtr(self).value = try self.allocateAtom(atom_index, atom.size, required_alignment);
@ -1141,25 +1141,24 @@ pub fn lowerUnnamedConst(self: *Coff, tv: TypedValue, decl_index: Module.Decl.In
pub fn updateDecl(
self: *Coff,
module: *Module,
mod: *Module,
decl_index: Module.Decl.Index,
) link.File.UpdateDeclError!void {
if (build_options.skip_non_native and builtin.object_format != .coff) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index);
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index);
}
const tracy = trace(@src());
defer tracy.end();
const decl = module.declPtr(decl_index);
const decl = mod.declPtr(decl_index);
if (decl.val.tag() == .extern_fn) {
if (decl.val.getExternFunc(mod)) |_| {
return; // TODO Should we do more when front-end analyzed extern decl?
}
if (decl.val.castTag(.variable)) |payload| {
const variable = payload.data;
if (decl.val.getVariable(mod)) |variable| {
if (variable.is_extern) {
return; // TODO Should we do more when front-end analyzed extern decl?
}
@ -1172,8 +1171,8 @@ pub fn updateDecl(
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val;
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
@ -1183,7 +1182,7 @@ pub fn updateDecl(
.ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
try mod.failed_decls.put(mod.gpa, decl_index, em);
return;
},
};
@ -1192,7 +1191,7 @@ pub fn updateDecl(
// Since we updated the vaddr and the size, each corresponding export
// symbol also needs to be updated.
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
return self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index));
}
fn updateLazySymbolAtom(
@ -1217,8 +1216,8 @@ fn updateLazySymbolAtom(
const atom = self.getAtomPtr(atom_index);
const local_sym_index = atom.getSymbolIndex().?;
const src = if (sym.ty.getOwnerDeclOrNull()) |owner_decl|
mod.declPtr(owner_decl).srcLoc()
const src = if (sym.ty.getOwnerDeclOrNull(mod)) |owner_decl|
mod.declPtr(owner_decl).srcLoc(mod)
else
Module.SrcLoc{
.file_scope = undefined,
@ -1262,7 +1261,8 @@ fn updateLazySymbolAtom(
}
pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Atom.Index {
const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl());
const mod = self.base.options.module.?;
const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl(mod));
errdefer _ = if (!gop.found_existing) self.lazy_syms.pop();
if (!gop.found_existing) gop.value_ptr.* = .{};
const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) {
@ -1277,7 +1277,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *Coff, sym: link.File.LazySymbol) !Ato
metadata.state.* = .pending_flush;
const atom = metadata.atom.*;
// anyerror needs to be deferred until flushModule
if (sym.getDecl() != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) {
if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) {
.code => self.text_section_index.?,
.const_data => self.rdata_section_index.?,
});
@ -1299,10 +1299,11 @@ pub fn getOrCreateAtomForDecl(self: *Coff, decl_index: Module.Decl.Index) !Atom.
fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 {
const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const zig_ty = ty.zigTypeTag();
const mod = self.base.options.module.?;
const zig_ty = ty.zigTypeTag(mod);
const val = decl.val;
const index: u16 = blk: {
if (val.isUndefDeep()) {
if (val.isUndefDeep(mod)) {
// TODO in release-fast and release-small, we should put undef in .bss
break :blk self.data_section_index.?;
}
@ -1311,7 +1312,7 @@ fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 {
// TODO: what if this is a function pointer?
.Fn => break :blk self.text_section_index.?,
else => {
if (val.castTag(.variable)) |_| {
if (val.getVariable(mod)) |_| {
break :blk self.data_section_index.?;
}
break :blk self.rdata_section_index.?;
@ -1322,15 +1323,13 @@ fn getDeclOutputSection(self: *Coff, decl_index: Module.Decl.Index) u16 {
}
fn updateDeclCode(self: *Coff, decl_index: Module.Decl.Index, code: []u8, complex_type: coff.ComplexType) !void {
const gpa = self.base.allocator;
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
const decl_name = try decl.getFullyQualifiedName(mod);
defer gpa.free(decl_name);
const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
const required_alignment = decl.getAlignment(self.base.options.target);
const required_alignment = decl.getAlignment(mod);
const decl_metadata = self.decls.get(decl_index).?;
const atom_index = decl_metadata.atom;
@ -1410,7 +1409,7 @@ pub fn freeDecl(self: *Coff, decl_index: Module.Decl.Index) void {
pub fn updateDeclExports(
self: *Coff,
module: *Module,
mod: *Module,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) link.File.UpdateDeclExportsError!void {
@ -1418,61 +1417,60 @@ pub fn updateDeclExports(
@panic("Attempted to compile for object format that was disabled by build configuration");
}
const ip = &mod.intern_pool;
if (build_options.have_llvm) {
// Even in the case of LLVM, we need to notice certain exported symbols in order to
// detect the default subsystem.
for (exports) |exp| {
const exported_decl = module.declPtr(exp.exported_decl);
if (exported_decl.getFunction() == null) continue;
const exported_decl = mod.declPtr(exp.exported_decl);
if (exported_decl.getOwnedFunctionIndex(mod) == .none) continue;
const winapi_cc = switch (self.base.options.target.cpu.arch) {
.x86 => std.builtin.CallingConvention.Stdcall,
else => std.builtin.CallingConvention.C,
};
const decl_cc = exported_decl.ty.fnCallingConvention();
if (decl_cc == .C and mem.eql(u8, exp.options.name, "main") and
const decl_cc = exported_decl.ty.fnCallingConvention(mod);
if (decl_cc == .C and ip.stringEqlSlice(exp.opts.name, "main") and
self.base.options.link_libc)
{
module.stage1_flags.have_c_main = true;
mod.stage1_flags.have_c_main = true;
} else if (decl_cc == winapi_cc and self.base.options.target.os.tag == .windows) {
if (mem.eql(u8, exp.options.name, "WinMain")) {
module.stage1_flags.have_winmain = true;
} else if (mem.eql(u8, exp.options.name, "wWinMain")) {
module.stage1_flags.have_wwinmain = true;
} else if (mem.eql(u8, exp.options.name, "WinMainCRTStartup")) {
module.stage1_flags.have_winmain_crt_startup = true;
} else if (mem.eql(u8, exp.options.name, "wWinMainCRTStartup")) {
module.stage1_flags.have_wwinmain_crt_startup = true;
} else if (mem.eql(u8, exp.options.name, "DllMainCRTStartup")) {
module.stage1_flags.have_dllmain_crt_startup = true;
if (ip.stringEqlSlice(exp.opts.name, "WinMain")) {
mod.stage1_flags.have_winmain = true;
} else if (ip.stringEqlSlice(exp.opts.name, "wWinMain")) {
mod.stage1_flags.have_wwinmain = true;
} else if (ip.stringEqlSlice(exp.opts.name, "WinMainCRTStartup")) {
mod.stage1_flags.have_winmain_crt_startup = true;
} else if (ip.stringEqlSlice(exp.opts.name, "wWinMainCRTStartup")) {
mod.stage1_flags.have_wwinmain_crt_startup = true;
} else if (ip.stringEqlSlice(exp.opts.name, "DllMainCRTStartup")) {
mod.stage1_flags.have_dllmain_crt_startup = true;
}
}
}
if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl_index, exports);
if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(mod, decl_index, exports);
}
const tracy = trace(@src());
defer tracy.end();
const gpa = self.base.allocator;
const decl = module.declPtr(decl_index);
const decl = mod.declPtr(decl_index);
const atom_index = try self.getOrCreateAtomForDecl(decl_index);
const atom = self.getAtom(atom_index);
const decl_sym = atom.getSymbol(self);
const decl_metadata = self.decls.getPtr(decl_index).?;
for (exports) |exp| {
log.debug("adding new export '{s}'", .{exp.options.name});
log.debug("adding new export '{}'", .{exp.opts.name.fmt(&mod.intern_pool)});
if (exp.options.section) |section_name| {
if (mod.intern_pool.stringToSliceUnwrap(exp.opts.section)) |section_name| {
if (!mem.eql(u8, section_name, ".text")) {
try module.failed_exports.putNoClobber(
module.gpa,
try mod.failed_exports.putNoClobber(
gpa,
exp,
try Module.ErrorMsg.create(
gpa,
decl.srcLoc(),
decl.srcLoc(mod),
"Unimplemented: ExportOptions.section",
.{},
),
@ -1481,13 +1479,13 @@ pub fn updateDeclExports(
}
}
if (exp.options.linkage == .LinkOnce) {
try module.failed_exports.putNoClobber(
module.gpa,
if (exp.opts.linkage == .LinkOnce) {
try mod.failed_exports.putNoClobber(
gpa,
exp,
try Module.ErrorMsg.create(
gpa,
decl.srcLoc(),
decl.srcLoc(mod),
"Unimplemented: GlobalLinkage.LinkOnce",
.{},
),
@ -1495,19 +1493,19 @@ pub fn updateDeclExports(
continue;
}
const sym_index = decl_metadata.getExport(self, exp.options.name) orelse blk: {
const sym_index = decl_metadata.getExport(self, mod.intern_pool.stringToSlice(exp.opts.name)) orelse blk: {
const sym_index = try self.allocateSymbol();
try decl_metadata.exports.append(gpa, sym_index);
break :blk sym_index;
};
const sym_loc = SymbolWithLoc{ .sym_index = sym_index, .file = null };
const sym = self.getSymbolPtr(sym_loc);
try self.setSymbolName(sym, exp.options.name);
try self.setSymbolName(sym, mod.intern_pool.stringToSlice(exp.opts.name));
sym.value = decl_sym.value;
sym.section_number = @intToEnum(coff.SectionNumber, self.text_section_index.? + 1);
sym.type = .{ .complex_type = .FUNCTION, .base_type = .NULL };
switch (exp.options.linkage) {
switch (exp.opts.linkage) {
.Strong => {
sym.storage_class = .EXTERNAL;
},
@ -1520,9 +1518,15 @@ pub fn updateDeclExports(
}
}
pub fn deleteDeclExport(self: *Coff, decl_index: Module.Decl.Index, name: []const u8) void {
pub fn deleteDeclExport(
self: *Coff,
decl_index: Module.Decl.Index,
name_ip: InternPool.NullTerminatedString,
) void {
if (self.llvm_object) |_| return;
const metadata = self.decls.getPtr(decl_index) orelse return;
const mod = self.base.options.module.?;
const name = mod.intern_pool.stringToSlice(name_ip);
const sym_index = metadata.getExportPtr(self, name) orelse return;
const gpa = self.base.allocator;
@ -2538,6 +2542,7 @@ const ImportTable = @import("Coff/ImportTable.zig");
const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
const Module = @import("../Module.zig");
const InternPool = @import("../InternPool.zig");
const Object = @import("Coff/Object.zig");
const Relocation = @import("Coff/Relocation.zig");
const TableSection = @import("table_section.zig").TableSection;

View File

@ -18,6 +18,7 @@ const LinkBlock = File.LinkBlock;
const LinkFn = File.LinkFn;
const LinkerLoad = @import("../codegen.zig").LinkerLoad;
const Module = @import("../Module.zig");
const InternPool = @import("../InternPool.zig");
const StringTable = @import("strtab.zig").StringTable;
const Type = @import("../type.zig").Type;
const Value = @import("../value.zig").Value;
@ -86,12 +87,7 @@ pub const DeclState = struct {
dbg_info: std.ArrayList(u8),
abbrev_type_arena: std.heap.ArenaAllocator,
abbrev_table: std.ArrayListUnmanaged(AbbrevEntry) = .{},
abbrev_resolver: std.HashMapUnmanaged(
Type,
u32,
Type.HashContext64,
std.hash_map.default_max_load_percentage,
) = .{},
abbrev_resolver: std.AutoHashMapUnmanaged(InternPool.Index, u32) = .{},
abbrev_relocs: std.ArrayListUnmanaged(AbbrevRelocation) = .{},
exprloc_relocs: std.ArrayListUnmanaged(ExprlocRelocation) = .{},
@ -141,9 +137,7 @@ pub const DeclState = struct {
/// @symbol signifies a type abbreviation posititioned somewhere in the .debug_abbrev section
/// which we use as our target of the relocation.
fn addTypeRelocGlobal(self: *DeclState, atom_index: Atom.Index, ty: Type, offset: u32) !void {
const resolv = self.abbrev_resolver.getContext(ty, .{
.mod = self.mod,
}) orelse blk: {
const resolv = self.abbrev_resolver.get(ty.toIntern()) orelse blk: {
const sym_index = @intCast(u32, self.abbrev_table.items.len);
try self.abbrev_table.append(self.gpa, .{
.atom_index = atom_index,
@ -151,12 +145,8 @@ pub const DeclState = struct {
.offset = undefined,
});
log.debug("%{d}: {}", .{ sym_index, ty.fmt(self.mod) });
try self.abbrev_resolver.putNoClobberContext(self.gpa, ty, sym_index, .{
.mod = self.mod,
});
break :blk self.abbrev_resolver.getContext(ty, .{
.mod = self.mod,
}).?;
try self.abbrev_resolver.putNoClobber(self.gpa, ty.toIntern(), sym_index);
break :blk sym_index;
};
log.debug("{x}: %{d} + 0", .{ offset, resolv });
try self.abbrev_relocs.append(self.gpa, .{
@ -169,16 +159,16 @@ pub const DeclState = struct {
fn addDbgInfoType(
self: *DeclState,
module: *Module,
mod: *Module,
atom_index: Atom.Index,
ty: Type,
) error{OutOfMemory}!void {
const arena = self.abbrev_type_arena.allocator();
const dbg_info_buffer = &self.dbg_info;
const target = module.getTarget();
const target = mod.getTarget();
const target_endian = target.cpu.arch.endian();
switch (ty.zigTypeTag()) {
switch (ty.zigTypeTag(mod)) {
.NoReturn => unreachable,
.Void => {
try dbg_info_buffer.append(@enumToInt(AbbrevKind.pad1));
@ -189,12 +179,12 @@ pub const DeclState = struct {
// DW.AT.encoding, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(DW.ATE.boolean);
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target));
try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
},
.Int => {
const info = ty.intInfo(target);
const info = ty.intInfo(mod);
try dbg_info_buffer.ensureUnusedCapacity(12);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.base_type));
// DW.AT.encoding, DW.FORM.data1
@ -203,31 +193,30 @@ pub const DeclState = struct {
.unsigned => DW.ATE.unsigned,
});
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target));
try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
},
.Optional => {
if (ty.isPtrLikeOptional()) {
if (ty.isPtrLikeOptional(mod)) {
try dbg_info_buffer.ensureUnusedCapacity(12);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.base_type));
// DW.AT.encoding, DW.FORM.data1
dbg_info_buffer.appendAssumeCapacity(DW.ATE.address);
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target));
try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
} else {
// Non-pointer optionals are structs: struct { .maybe = *, .val = * }
var buf = try arena.create(Type.Payload.ElemType);
const payload_ty = ty.optionalChild(buf);
const payload_ty = ty.optionalChild(mod);
// DW.AT.structure_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type));
// DW.AT.byte_size, DW.FORM.udata
const abi_size = ty.abiSize(target);
const abi_size = ty.abiSize(mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(7);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member));
@ -251,14 +240,14 @@ pub const DeclState = struct {
try dbg_info_buffer.resize(index + 4);
try self.addTypeRelocGlobal(atom_index, payload_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.udata
const offset = abi_size - payload_ty.abiSize(target);
const offset = abi_size - payload_ty.abiSize(mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), offset);
// DW.AT.structure_type delimit children
try dbg_info_buffer.append(0);
}
},
.Pointer => {
if (ty.isSlice()) {
if (ty.isSlice(mod)) {
// Slices are structs: struct { .ptr = *, .len = N }
const ptr_bits = target.ptrBitWidth();
const ptr_bytes = @intCast(u8, @divExact(ptr_bits, 8));
@ -266,9 +255,9 @@ pub const DeclState = struct {
try dbg_info_buffer.ensureUnusedCapacity(2);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_type));
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target));
try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(5);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member));
@ -278,8 +267,7 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
var buf = try arena.create(Type.SlicePtrFieldTypeBuffer);
const ptr_ty = ty.slicePtrFieldType(buf);
const ptr_ty = ty.slicePtrFieldType(mod);
try self.addTypeRelocGlobal(atom_index, ptr_ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.udata
try dbg_info_buffer.ensureUnusedCapacity(6);
@ -304,18 +292,18 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index));
try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index));
}
},
.Array => {
// DW.AT.array_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_type));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
try self.addTypeRelocGlobal(atom_index, ty.childType(), @intCast(u32, index));
try self.addTypeRelocGlobal(atom_index, ty.childType(mod), @intCast(u32, index));
// DW.AT.subrange_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.array_dim));
// DW.AT.type, DW.FORM.ref4
@ -323,7 +311,7 @@ pub const DeclState = struct {
try dbg_info_buffer.resize(index + 4);
try self.addTypeRelocGlobal(atom_index, Type.usize, @intCast(u32, index));
// DW.AT.count, DW.FORM.udata
const len = ty.arrayLenIncludingSentinel();
const len = ty.arrayLenIncludingSentinel(mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), len);
// DW.AT.array_type delimit children
try dbg_info_buffer.append(0);
@ -332,15 +320,14 @@ pub const DeclState = struct {
// DW.AT.structure_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type));
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target));
try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod));
switch (ty.tag()) {
.tuple, .anon_struct => {
switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.anon_struct_type => |fields| {
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(module)});
try dbg_info_buffer.writer().print("{}\x00", .{ty.fmt(mod)});
const fields = ty.tupleFields();
for (fields.types, 0..) |field, field_index| {
for (fields.types, 0..) |field_ty, field_index| {
// DW.AT.member
try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member));
// DW.AT.name, DW.FORM.string
@ -348,29 +335,32 @@ pub const DeclState = struct {
// DW.AT.type, DW.FORM.ref4
var index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
try self.addTypeRelocGlobal(atom_index, field, @intCast(u32, index));
try self.addTypeRelocGlobal(atom_index, field_ty.toType(), @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.udata
const field_off = ty.structFieldOffset(field_index, target);
const field_off = ty.structFieldOffset(field_index, mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
}
},
else => {
.struct_type => |struct_type| s: {
const struct_obj = mod.structPtrUnwrap(struct_type.index) orelse break :s;
// DW.AT.name, DW.FORM.string
const struct_name = try ty.nameAllocArena(arena, module);
const struct_name = try ty.nameAllocArena(arena, mod);
try dbg_info_buffer.ensureUnusedCapacity(struct_name.len + 1);
dbg_info_buffer.appendSliceAssumeCapacity(struct_name);
dbg_info_buffer.appendAssumeCapacity(0);
const struct_obj = ty.castTag(.@"struct").?.data;
if (struct_obj.layout == .Packed) {
log.debug("TODO implement .debug_info for packed structs", .{});
break :blk;
}
const fields = ty.structFields();
for (fields.keys(), 0..) |field_name, field_index| {
const field = fields.get(field_name).?;
if (!field.ty.hasRuntimeBits()) continue;
for (
struct_obj.fields.keys(),
struct_obj.fields.values(),
0..,
) |field_name_ip, field, field_index| {
if (!field.ty.hasRuntimeBits(mod)) continue;
const field_name = mod.intern_pool.stringToSlice(field_name_ip);
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member));
@ -382,10 +372,11 @@ pub const DeclState = struct {
try dbg_info_buffer.resize(index + 4);
try self.addTypeRelocGlobal(atom_index, field.ty, @intCast(u32, index));
// DW.AT.data_member_location, DW.FORM.udata
const field_off = ty.structFieldOffset(field_index, target);
const field_off = ty.structFieldOffset(field_index, mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), field_off);
}
},
else => unreachable,
}
// DW.AT.structure_type delimit children
@ -395,21 +386,16 @@ pub const DeclState = struct {
// DW.AT.enumeration_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.enum_type));
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(target));
try leb128.writeULEB128(dbg_info_buffer.writer(), ty.abiSize(mod));
// DW.AT.name, DW.FORM.string
const enum_name = try ty.nameAllocArena(arena, module);
const enum_name = try ty.nameAllocArena(arena, mod);
try dbg_info_buffer.ensureUnusedCapacity(enum_name.len + 1);
dbg_info_buffer.appendSliceAssumeCapacity(enum_name);
dbg_info_buffer.appendAssumeCapacity(0);
const fields = ty.enumFields();
const values: ?Module.EnumFull.ValueMap = switch (ty.tag()) {
.enum_full, .enum_nonexhaustive => ty.cast(Type.Payload.EnumFull).?.data.values,
.enum_simple => null,
.enum_numbered => ty.castTag(.enum_numbered).?.data.values,
else => unreachable,
};
for (fields.keys(), 0..) |field_name, field_i| {
const enum_type = mod.intern_pool.indexToKey(ty.ip_index).enum_type;
for (enum_type.names, 0..) |field_name_index, field_i| {
const field_name = mod.intern_pool.stringToSlice(field_name_index);
// DW.AT.enumerator
try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2 + @sizeOf(u64));
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.enum_variant));
@ -417,15 +403,14 @@ pub const DeclState = struct {
dbg_info_buffer.appendSliceAssumeCapacity(field_name);
dbg_info_buffer.appendAssumeCapacity(0);
// DW.AT.const_value, DW.FORM.data8
const value: u64 = if (values) |vals| value: {
if (vals.count() == 0) break :value @intCast(u64, field_i); // auto-numbered
const value = vals.keys()[field_i];
const value: u64 = value: {
if (enum_type.values.len == 0) break :value field_i; // auto-numbered
const value = enum_type.values[field_i];
// TODO do not assume a 64bit enum value - could be bigger.
// See https://github.com/ziglang/zig/issues/645
var int_buffer: Value.Payload.U64 = undefined;
const field_int_val = value.enumToInt(ty, &int_buffer);
break :value @bitCast(u64, field_int_val.toSignedInt(target));
} else @intCast(u64, field_i);
const field_int_val = try value.toValue().enumToInt(ty, mod);
break :value @bitCast(u64, field_int_val.toSignedInt(mod));
};
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), value, target_endian);
}
@ -433,12 +418,12 @@ pub const DeclState = struct {
try dbg_info_buffer.append(0);
},
.Union => {
const layout = ty.unionGetLayout(target);
const union_obj = ty.cast(Type.Payload.Union).?.data;
const layout = ty.unionGetLayout(mod);
const union_obj = mod.typeToUnion(ty).?;
const payload_offset = if (layout.tag_align >= layout.payload_align) layout.tag_size else 0;
const tag_offset = if (layout.tag_align >= layout.payload_align) 0 else layout.payload_size;
const is_tagged = layout.tag_size > 0;
const union_name = try ty.nameAllocArena(arena, module);
const union_name = try ty.nameAllocArena(arena, mod);
// TODO this is temporary to match current state of unions in Zig - we don't yet have
// safety checks implemented meaning the implicit tag is not yet stored and generated
@ -478,14 +463,15 @@ pub const DeclState = struct {
try dbg_info_buffer.writer().print("{s}\x00", .{union_name});
}
const fields = ty.unionFields();
const fields = ty.unionFields(mod);
for (fields.keys()) |field_name| {
const field = fields.get(field_name).?;
if (!field.ty.hasRuntimeBits()) continue;
if (!field.ty.hasRuntimeBits(mod)) continue;
// DW.AT.member
try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_member));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.writer().print("{s}\x00", .{field_name});
try dbg_info_buffer.appendSlice(mod.intern_pool.stringToSlice(field_name));
try dbg_info_buffer.append(0);
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.resize(index + 4);
@ -517,30 +503,30 @@ pub const DeclState = struct {
.ErrorSet => {
try addDbgInfoErrorSet(
self.abbrev_type_arena.allocator(),
module,
mod,
ty,
target,
&self.dbg_info,
);
},
.ErrorUnion => {
const error_ty = ty.errorUnionSet();
const payload_ty = ty.errorUnionPayload();
const payload_align = if (payload_ty.isNoReturn()) 0 else payload_ty.abiAlignment(target);
const error_align = Type.anyerror.abiAlignment(target);
const abi_size = ty.abiSize(target);
const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(target) else 0;
const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(target);
const error_ty = ty.errorUnionSet(mod);
const payload_ty = ty.errorUnionPayload(mod);
const payload_align = if (payload_ty.isNoReturn(mod)) 0 else payload_ty.abiAlignment(mod);
const error_align = Type.anyerror.abiAlignment(mod);
const abi_size = ty.abiSize(mod);
const payload_off = if (error_align >= payload_align) Type.anyerror.abiSize(mod) else 0;
const error_off = if (error_align >= payload_align) 0 else payload_ty.abiSize(mod);
// DW.AT.structure_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.struct_type));
// DW.AT.byte_size, DW.FORM.udata
try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
const name = try ty.nameAllocArena(arena, module);
const name = try ty.nameAllocArena(arena, mod);
try dbg_info_buffer.writer().print("{s}\x00", .{name});
if (!payload_ty.isNoReturn()) {
if (!payload_ty.isNoReturn(mod)) {
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(7);
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.struct_member));
@ -685,9 +671,10 @@ pub const DeclState = struct {
const atom_index = self.di_atom_decls.get(owner_decl).?;
const name_with_null = name.ptr[0 .. name.len + 1];
try dbg_info.append(@enumToInt(AbbrevKind.variable));
const target = self.mod.getTarget();
const mod = self.mod;
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
const child_ty = if (is_ptr) ty.childType() else ty;
const child_ty = if (is_ptr) ty.childType(mod) else ty;
switch (loc) {
.register => |reg| {
@ -790,9 +777,9 @@ pub const DeclState = struct {
const fixup = dbg_info.items.len;
dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT.location, DW.FORM.exprloc
1,
if (child_ty.isSignedInt()) DW.OP.consts else DW.OP.constu,
if (child_ty.isSignedInt(mod)) DW.OP.consts else DW.OP.constu,
});
if (child_ty.isSignedInt()) {
if (child_ty.isSignedInt(mod)) {
try leb128.writeILEB128(dbg_info.writer(), @bitCast(i64, x));
} else {
try leb128.writeULEB128(dbg_info.writer(), x);
@ -805,7 +792,7 @@ pub const DeclState = struct {
// DW.AT.location, DW.FORM.exprloc
// uleb128(exprloc_len)
// DW.OP.implicit_value uleb128(len_of_bytes) bytes
const abi_size = @intCast(u32, child_ty.abiSize(target));
const abi_size = @intCast(u32, child_ty.abiSize(mod));
var implicit_value_len = std.ArrayList(u8).init(self.gpa);
defer implicit_value_len.deinit();
try leb128.writeULEB128(implicit_value_len.writer(), abi_size);
@ -964,8 +951,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
defer tracy.end();
const decl = mod.declPtr(decl_index);
const decl_name = try decl.getFullyQualifiedName(mod);
defer self.allocator.free(decl_name);
const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
log.debug("initDeclState {s}{*}", .{ decl_name, decl });
@ -979,14 +965,14 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
assert(decl.has_tv);
switch (decl.ty.zigTypeTag()) {
switch (decl.ty.zigTypeTag(mod)) {
.Fn => {
_ = try self.getOrCreateAtomForDecl(.src_fn, decl_index);
// For functions we need to add a prologue to the debug line program.
try dbg_line_buffer.ensureTotalCapacity(26);
const func = decl.val.castTag(.function).?.data;
const func = decl.val.getFunction(mod).?;
log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{
decl.src_line,
func.lbrace_line,
@ -1026,8 +1012,8 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
const decl_name_with_null = decl_name[0 .. decl_name.len + 1];
try dbg_info_buffer.ensureUnusedCapacity(25 + decl_name_with_null.len);
const fn_ret_type = decl.ty.fnReturnType();
const fn_ret_has_bits = fn_ret_type.hasRuntimeBits();
const fn_ret_type = decl.ty.fnReturnType(mod);
const fn_ret_has_bits = fn_ret_type.hasRuntimeBits(mod);
if (fn_ret_has_bits) {
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.subprogram));
} else {
@ -1059,7 +1045,7 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index)
pub fn commitDeclState(
self: *Dwarf,
module: *Module,
mod: *Module,
decl_index: Module.Decl.Index,
sym_addr: u64,
sym_size: u64,
@ -1071,12 +1057,12 @@ pub fn commitDeclState(
const gpa = self.allocator;
var dbg_line_buffer = &decl_state.dbg_line;
var dbg_info_buffer = &decl_state.dbg_info;
const decl = module.declPtr(decl_index);
const decl = mod.declPtr(decl_index);
const target_endian = self.target.cpu.arch.endian();
assert(decl.has_tv);
switch (decl.ty.zigTypeTag()) {
switch (decl.ty.zigTypeTag(mod)) {
.Fn => {
// Since the Decl is a function, we need to update the .debug_line program.
// Perform the relocations based on vaddr.
@ -1271,10 +1257,11 @@ pub fn commitDeclState(
const symbol = &decl_state.abbrev_table.items[sym_index];
const ty = symbol.type;
const deferred: bool = blk: {
if (ty.isAnyError()) break :blk true;
switch (ty.tag()) {
.error_set_inferred => {
if (!ty.castTag(.error_set_inferred).?.data.is_resolved) break :blk true;
if (ty.isAnyError(mod)) break :blk true;
switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.inferred_error_set_type => |ies_index| {
const ies = mod.inferredErrorSetPtr(ies_index);
if (!ies.is_resolved) break :blk true;
},
else => {},
}
@ -1283,11 +1270,10 @@ pub fn commitDeclState(
if (deferred) continue;
symbol.offset = @intCast(u32, dbg_info_buffer.items.len);
try decl_state.addDbgInfoType(module, di_atom_index, ty);
try decl_state.addDbgInfoType(mod, di_atom_index, ty);
}
}
log.debug("updateDeclDebugInfoAllocation for '{s}'", .{decl.name});
try self.updateDeclDebugInfoAllocation(di_atom_index, @intCast(u32, dbg_info_buffer.items.len));
while (decl_state.abbrev_relocs.popOrNull()) |reloc| {
@ -1295,10 +1281,11 @@ pub fn commitDeclState(
const symbol = decl_state.abbrev_table.items[target];
const ty = symbol.type;
const deferred: bool = blk: {
if (ty.isAnyError()) break :blk true;
switch (ty.tag()) {
.error_set_inferred => {
if (!ty.castTag(.error_set_inferred).?.data.is_resolved) break :blk true;
if (ty.isAnyError(mod)) break :blk true;
switch (mod.intern_pool.indexToKey(ty.ip_index)) {
.inferred_error_set_type => |ies_index| {
const ies = mod.inferredErrorSetPtr(ies_index);
if (!ies.is_resolved) break :blk true;
},
else => {},
}
@ -1319,7 +1306,7 @@ pub fn commitDeclState(
reloc.offset,
value,
target,
ty.fmt(module),
ty.fmt(mod),
});
mem.writeInt(
u32,
@ -1358,7 +1345,6 @@ pub fn commitDeclState(
}
}
log.debug("writeDeclDebugInfo for '{s}", .{decl.name});
try self.writeDeclDebugInfo(di_atom_index, dbg_info_buffer.items);
}
@ -1527,7 +1513,7 @@ fn writeDeclDebugInfo(self: *Dwarf, atom_index: Atom.Index, dbg_info_buf: []cons
}
}
pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.Decl.Index) !void {
pub fn updateDeclLineNumber(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) !void {
const tracy = trace(@src());
defer tracy.end();
@ -1535,8 +1521,8 @@ pub fn updateDeclLineNumber(self: *Dwarf, module: *Module, decl_index: Module.De
const atom = self.getAtom(.src_fn, atom_index);
if (atom.len == 0) return;
const decl = module.declPtr(decl_index);
const func = decl.val.castTag(.function).?.data;
const decl = mod.declPtr(decl_index);
const func = decl.val.getFunction(mod).?;
log.debug("decl.src_line={d}, func.lbrace_line={d}, func.rbrace_line={d}", .{
decl.src_line,
func.lbrace_line,
@ -2534,18 +2520,14 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
defer arena_alloc.deinit();
const arena = arena_alloc.allocator();
const error_set = try arena.create(Module.ErrorSet);
const error_ty = try Type.Tag.error_set.create(arena, error_set);
var names = Module.ErrorSet.NameMap{};
try names.ensureUnusedCapacity(arena, module.global_error_set.count());
var it = module.global_error_set.keyIterator();
while (it.next()) |key| {
names.putAssumeCapacityNoClobber(key.*, {});
}
error_set.names = names;
// TODO: don't create a zig type for this, just make the dwarf info
// without touching the zig type system.
const names = try arena.dupe(InternPool.NullTerminatedString, module.global_error_set.keys());
std.mem.sort(InternPool.NullTerminatedString, names, {}, InternPool.NullTerminatedString.indexLessThan);
const error_ty = try module.intern(.{ .error_set_type = .{ .names = names } });
var dbg_info_buffer = std.ArrayList(u8).init(arena);
try addDbgInfoErrorSet(arena, module, error_ty, self.target, &dbg_info_buffer);
try addDbgInfoErrorSet(arena, module, error_ty.toType(), self.target, &dbg_info_buffer);
const di_atom_index = try self.createAtom(.di_atom);
log.debug("updateDeclDebugInfoAllocation in flushModule", .{});
@ -2598,7 +2580,7 @@ pub fn flushModule(self: *Dwarf, module: *Module) !void {
fn addDIFile(self: *Dwarf, mod: *Module, decl_index: Module.Decl.Index) !u28 {
const decl = mod.declPtr(decl_index);
const file_scope = decl.getFileScope();
const file_scope = decl.getFileScope(mod);
const gop = try self.di_files.getOrPut(self.allocator, file_scope);
if (!gop.found_existing) {
switch (self.bin_file.tag) {
@ -2663,7 +2645,7 @@ fn genIncludeDirsAndFileNames(self: *Dwarf, arena: Allocator) !struct {
fn addDbgInfoErrorSet(
arena: Allocator,
module: *Module,
mod: *Module,
ty: Type,
target: std.Target,
dbg_info_buffer: *std.ArrayList(u8),
@ -2673,10 +2655,10 @@ fn addDbgInfoErrorSet(
// DW.AT.enumeration_type
try dbg_info_buffer.append(@enumToInt(AbbrevKind.enum_type));
// DW.AT.byte_size, DW.FORM.udata
const abi_size = Type.anyerror.abiSize(target);
const abi_size = Type.anyerror.abiSize(mod);
try leb128.writeULEB128(dbg_info_buffer.writer(), abi_size);
// DW.AT.name, DW.FORM.string
const name = try ty.nameAllocArena(arena, module);
const name = try ty.nameAllocArena(arena, mod);
try dbg_info_buffer.writer().print("{s}\x00", .{name});
// DW.AT.enumerator
@ -2689,9 +2671,10 @@ fn addDbgInfoErrorSet(
// DW.AT.const_value, DW.FORM.data8
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian);
const error_names = ty.errorSetNames();
for (error_names) |error_name| {
const kv = module.getErrorValue(error_name) catch unreachable;
const error_names = ty.errorSetNames(mod);
for (error_names) |error_name_ip| {
const int = try mod.getErrorValue(error_name_ip);
const error_name = mod.intern_pool.stringToSlice(error_name_ip);
// DW.AT.enumerator
try dbg_info_buffer.ensureUnusedCapacity(error_name.len + 2 + @sizeOf(u64));
dbg_info_buffer.appendAssumeCapacity(@enumToInt(AbbrevKind.enum_variant));
@ -2699,7 +2682,7 @@ fn addDbgInfoErrorSet(
dbg_info_buffer.appendSliceAssumeCapacity(error_name);
dbg_info_buffer.appendAssumeCapacity(0);
// DW.AT.const_value, DW.FORM.data8
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), kv.value, target_endian);
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), int, target_endian);
}
// DW.AT.enumeration_type delimit children

View File

@ -28,6 +28,7 @@ const File = link.File;
const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
const Module = @import("../Module.zig");
const InternPool = @import("../InternPool.zig");
const Package = @import("../Package.zig");
const StringTable = @import("strtab.zig").StringTable;
const TableSection = @import("table_section.zig").TableSection;
@ -2414,7 +2415,8 @@ pub fn freeDecl(self: *Elf, decl_index: Module.Decl.Index) void {
}
pub fn getOrCreateAtomForLazySymbol(self: *Elf, sym: File.LazySymbol) !Atom.Index {
const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl());
const mod = self.base.options.module.?;
const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl(mod));
errdefer _ = if (!gop.found_existing) self.lazy_syms.pop();
if (!gop.found_existing) gop.value_ptr.* = .{};
const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) {
@ -2429,7 +2431,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *Elf, sym: File.LazySymbol) !Atom.Inde
metadata.state.* = .pending_flush;
const atom = metadata.atom.*;
// anyerror needs to be deferred until flushModule
if (sym.getDecl() != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) {
if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) {
.code => self.text_section_index.?,
.const_data => self.rodata_section_index.?,
});
@ -2449,12 +2451,13 @@ pub fn getOrCreateAtomForDecl(self: *Elf, decl_index: Module.Decl.Index) !Atom.I
}
fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 {
const decl = self.base.options.module.?.declPtr(decl_index);
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
const ty = decl.ty;
const zig_ty = ty.zigTypeTag();
const zig_ty = ty.zigTypeTag(mod);
const val = decl.val;
const shdr_index: u16 = blk: {
if (val.isUndefDeep()) {
if (val.isUndefDeep(mod)) {
// TODO in release-fast and release-small, we should put undef in .bss
break :blk self.data_section_index.?;
}
@ -2463,7 +2466,7 @@ fn getDeclShdrIndex(self: *Elf, decl_index: Module.Decl.Index) u16 {
// TODO: what if this is a function pointer?
.Fn => break :blk self.text_section_index.?,
else => {
if (val.castTag(.variable)) |_| {
if (val.getVariable(mod)) |_| {
break :blk self.data_section_index.?;
}
break :blk self.rodata_section_index.?;
@ -2478,11 +2481,10 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
const decl_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(decl_name);
const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
const required_alignment = decl.getAlignment(self.base.options.target);
const required_alignment = decl.getAlignment(mod);
const decl_metadata = self.decls.get(decl_index).?;
const atom_index = decl_metadata.atom;
@ -2572,19 +2574,20 @@ fn updateDeclCode(self: *Elf, decl_index: Module.Decl.Index, code: []const u8, s
return local_sym;
}
pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
pub fn updateFunc(self: *Elf, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness);
if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness);
}
const tracy = trace(@src());
defer tracy.end();
const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
const decl = mod.declPtr(decl_index);
const atom_index = try self.getOrCreateAtomForDecl(decl_index);
self.freeUnnamedConsts(decl_index);
@ -2593,28 +2596,28 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null;
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null;
defer if (decl_state) |*ds| ds.deinit();
const res = if (decl_state) |*ds|
try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .{
.dwarf = ds,
})
else
try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .none);
const code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
try mod.failed_decls.put(mod.gpa, decl_index, em);
return;
},
};
const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_FUNC);
if (decl_state) |*ds| {
try self.dwarf.?.commitDeclState(
module,
mod,
decl_index,
local_sym.st_value,
local_sym.st_size,
@ -2624,31 +2627,30 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven
// Since we updated the vaddr and the size, each corresponding export
// symbol also needs to be updated.
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
return self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index));
}
pub fn updateDecl(
self: *Elf,
module: *Module,
mod: *Module,
decl_index: Module.Decl.Index,
) File.UpdateDeclError!void {
if (build_options.skip_non_native and builtin.object_format != .elf) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index);
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index);
}
const tracy = trace(@src());
defer tracy.end();
const decl = module.declPtr(decl_index);
const decl = mod.declPtr(decl_index);
if (decl.val.tag() == .extern_fn) {
if (decl.val.getExternFunc(mod)) |_| {
return; // TODO Should we do more when front-end analyzed extern decl?
}
if (decl.val.castTag(.variable)) |payload| {
const variable = payload.data;
if (decl.val.getVariable(mod)) |variable| {
if (variable.is_extern) {
return; // TODO Should we do more when front-end analyzed extern decl?
}
@ -2661,13 +2663,13 @@ pub fn updateDecl(
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(module, decl_index) else null;
var decl_state: ?Dwarf.DeclState = if (self.dwarf) |*dw| try dw.initDeclState(mod, decl_index) else null;
defer if (decl_state) |*ds| ds.deinit();
// TODO implement .debug_info for global variables
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val;
const res = if (decl_state) |*ds|
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{
@ -2676,7 +2678,7 @@ pub fn updateDecl(
.parent_atom_index = atom.getSymbolIndex().?,
})
else
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
@ -2687,7 +2689,7 @@ pub fn updateDecl(
.ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
try mod.failed_decls.put(mod.gpa, decl_index, em);
return;
},
};
@ -2695,7 +2697,7 @@ pub fn updateDecl(
const local_sym = try self.updateDeclCode(decl_index, code, elf.STT_OBJECT);
if (decl_state) |*ds| {
try self.dwarf.?.commitDeclState(
module,
mod,
decl_index,
local_sym.st_value,
local_sym.st_size,
@ -2705,7 +2707,7 @@ pub fn updateDecl(
// Since we updated the vaddr and the size, each corresponding export
// symbol also needs to be updated.
return self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
return self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index));
}
fn updateLazySymbolAtom(
@ -2734,8 +2736,8 @@ fn updateLazySymbolAtom(
const atom = self.getAtom(atom_index);
const local_sym_index = atom.getSymbolIndex().?;
const src = if (sym.ty.getOwnerDeclOrNull()) |owner_decl|
mod.declPtr(owner_decl).srcLoc()
const src = if (sym.ty.getOwnerDeclOrNull(mod)) |owner_decl|
mod.declPtr(owner_decl).srcLoc(mod)
else
Module.SrcLoc{
.file_scope = undefined,
@ -2800,8 +2802,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
const decl = mod.declPtr(decl_index);
const name_str_index = blk: {
const decl_name = try decl.getFullyQualifiedName(mod);
defer gpa.free(decl_name);
const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
const index = unnamed_consts.items.len;
const name = try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
defer gpa.free(name);
@ -2811,7 +2812,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
const atom_index = try self.createAtom();
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .{
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), typed_value, &code_buffer, .{
.none = {},
}, .{
.parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
@ -2826,7 +2827,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
},
};
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
const required_alignment = typed_value.ty.abiAlignment(mod);
const shdr_index = self.rodata_section_index.?;
const phdr_index = self.sections.items(.phdr_index)[shdr_index];
const local_sym = self.getAtom(atom_index).getSymbolPtr(self);
@ -2852,7 +2853,7 @@ pub fn lowerUnnamedConst(self: *Elf, typed_value: TypedValue, decl_index: Module
pub fn updateDeclExports(
self: *Elf,
module: *Module,
mod: *Module,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) File.UpdateDeclExportsError!void {
@ -2860,7 +2861,7 @@ pub fn updateDeclExports(
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(module, decl_index, exports);
if (self.llvm_object) |llvm_object| return llvm_object.updateDeclExports(mod, decl_index, exports);
}
const tracy = trace(@src());
@ -2868,7 +2869,7 @@ pub fn updateDeclExports(
const gpa = self.base.allocator;
const decl = module.declPtr(decl_index);
const decl = mod.declPtr(decl_index);
const atom_index = try self.getOrCreateAtomForDecl(decl_index);
const atom = self.getAtom(atom_index);
const decl_sym = atom.getSymbol(self);
@ -2878,40 +2879,41 @@ pub fn updateDeclExports(
try self.global_symbols.ensureUnusedCapacity(gpa, exports.len);
for (exports) |exp| {
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, ".text")) {
try module.failed_exports.ensureUnusedCapacity(module.gpa, 1);
module.failed_exports.putAssumeCapacityNoClobber(
const exp_name = mod.intern_pool.stringToSlice(exp.opts.name);
if (exp.opts.section.unwrap()) |section_name| {
if (!mod.intern_pool.stringEqlSlice(section_name, ".text")) {
try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1);
mod.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: ExportOptions.section", .{}),
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(mod), "Unimplemented: ExportOptions.section", .{}),
);
continue;
}
}
const stb_bits: u8 = switch (exp.options.linkage) {
const stb_bits: u8 = switch (exp.opts.linkage) {
.Internal => elf.STB_LOCAL,
.Strong => blk: {
const entry_name = self.base.options.entry orelse "_start";
if (mem.eql(u8, exp.options.name, entry_name)) {
if (mem.eql(u8, exp_name, entry_name)) {
self.entry_addr = decl_sym.st_value;
}
break :blk elf.STB_GLOBAL;
},
.Weak => elf.STB_WEAK,
.LinkOnce => {
try module.failed_exports.ensureUnusedCapacity(module.gpa, 1);
module.failed_exports.putAssumeCapacityNoClobber(
try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1);
mod.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "Unimplemented: GlobalLinkage.LinkOnce", .{}),
try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(mod), "Unimplemented: GlobalLinkage.LinkOnce", .{}),
);
continue;
},
};
const stt_bits: u8 = @truncate(u4, decl_sym.st_info);
if (decl_metadata.getExport(self, exp.options.name)) |i| {
if (decl_metadata.getExport(self, exp_name)) |i| {
const sym = &self.global_symbols.items[i];
sym.* = .{
.st_name = try self.shstrtab.insert(gpa, exp.options.name),
.st_name = try self.shstrtab.insert(gpa, exp_name),
.st_info = (stb_bits << 4) | stt_bits,
.st_other = 0,
.st_shndx = shdr_index,
@ -2925,7 +2927,7 @@ pub fn updateDeclExports(
};
try decl_metadata.exports.append(gpa, @intCast(u32, i));
self.global_symbols.items[i] = .{
.st_name = try self.shstrtab.insert(gpa, exp.options.name),
.st_name = try self.shstrtab.insert(gpa, exp_name),
.st_info = (stb_bits << 4) | stt_bits,
.st_other = 0,
.st_shndx = shdr_index,
@ -2942,8 +2944,7 @@ pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: Module.Decl.In
defer tracy.end();
const decl = mod.declPtr(decl_index);
const decl_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(decl_name);
const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl });
@ -2953,11 +2954,15 @@ pub fn updateDeclLineNumber(self: *Elf, mod: *Module, decl_index: Module.Decl.In
}
}
pub fn deleteDeclExport(self: *Elf, decl_index: Module.Decl.Index, name: []const u8) void {
pub fn deleteDeclExport(
self: *Elf,
decl_index: Module.Decl.Index,
name: InternPool.NullTerminatedString,
) void {
if (self.llvm_object) |_| return;
const metadata = self.decls.getPtr(decl_index) orelse return;
const sym_index = metadata.getExportPtr(self, name) orelse return;
log.debug("deleting export '{s}'", .{name});
const mod = self.base.options.module.?;
const sym_index = metadata.getExportPtr(self, mod.intern_pool.stringToSlice(name)) orelse return;
self.global_symbol_free_list.append(self.base.allocator, sym_index.*) catch {};
self.global_symbols.items[sym_index.*].st_info = 0;
sym_index.* = 0;

View File

@ -40,6 +40,7 @@ const Liveness = @import("../Liveness.zig");
const LlvmObject = @import("../codegen/llvm.zig").Object;
const Md5 = std.crypto.hash.Md5;
const Module = @import("../Module.zig");
const InternPool = @import("../InternPool.zig");
const Relocation = @import("MachO/Relocation.zig");
const StringTable = @import("strtab.zig").StringTable;
const TableSection = @import("table_section.zig").TableSection;
@ -1847,18 +1848,19 @@ fn addStubEntry(self: *MachO, target: SymbolWithLoc) !void {
self.markRelocsDirtyByTarget(target);
}
pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
pub fn updateFunc(self: *MachO, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness);
if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness);
}
const tracy = trace(@src());
defer tracy.end();
const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
const decl = mod.declPtr(decl_index);
const atom_index = try self.getOrCreateAtomForDecl(decl_index);
self.freeUnnamedConsts(decl_index);
@ -1868,23 +1870,23 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
defer code_buffer.deinit();
var decl_state = if (self.d_sym) |*d_sym|
try d_sym.dwarf.initDeclState(module, decl_index)
try d_sym.dwarf.initDeclState(mod, decl_index)
else
null;
defer if (decl_state) |*ds| ds.deinit();
const res = if (decl_state) |*ds|
try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{
try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .{
.dwarf = ds,
})
else
try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none);
try codegen.generateFunction(&self.base, decl.srcLoc(mod), func_index, air, liveness, &code_buffer, .none);
var code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
try mod.failed_decls.put(mod.gpa, decl_index, em);
return;
},
};
@ -1893,7 +1895,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
if (decl_state) |*ds| {
try self.d_sym.?.dwarf.commitDeclState(
module,
mod,
decl_index,
addr,
self.getAtom(atom_index).size,
@ -1903,7 +1905,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv
// Since we updated the vaddr and the size, each corresponding export symbol also
// needs to be updated.
try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
try self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index));
}
pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Module.Decl.Index) !u32 {
@ -1912,16 +1914,15 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
var code_buffer = std.ArrayList(u8).init(gpa);
defer code_buffer.deinit();
const module = self.base.options.module.?;
const mod = self.base.options.module.?;
const gop = try self.unnamed_const_atoms.getOrPut(gpa, decl_index);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
const unnamed_consts = gop.value_ptr;
const decl = module.declPtr(decl_index);
const decl_name = try decl.getFullyQualifiedName(module);
defer gpa.free(decl_name);
const decl = mod.declPtr(decl_index);
const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
const name_str_index = blk: {
const index = unnamed_consts.items.len;
@ -1935,20 +1936,20 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
const atom_index = try self.createAtom();
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), typed_value, &code_buffer, .none, .{
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), typed_value, &code_buffer, .none, .{
.parent_atom_index = self.getAtom(atom_index).getSymbolIndex().?,
});
var code = switch (res) {
.ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
try mod.failed_decls.put(mod.gpa, decl_index, em);
log.err("{s}", .{em.msg});
return error.CodegenFail;
},
};
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
const required_alignment = typed_value.ty.abiAlignment(mod);
const atom = self.getAtomPtr(atom_index);
atom.size = code.len;
// TODO: work out logic for disambiguating functions from function pointers
@ -1971,33 +1972,32 @@ pub fn lowerUnnamedConst(self: *MachO, typed_value: TypedValue, decl_index: Modu
return atom.getSymbolIndex().?;
}
pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void {
pub fn updateDecl(self: *MachO, mod: *Module, decl_index: Module.Decl.Index) !void {
if (build_options.skip_non_native and builtin.object_format != .macho) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl_index);
if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(mod, decl_index);
}
const tracy = trace(@src());
defer tracy.end();
const decl = module.declPtr(decl_index);
const decl = mod.declPtr(decl_index);
if (decl.val.tag() == .extern_fn) {
if (decl.val.getExternFunc(mod)) |_| {
return; // TODO Should we do more when front-end analyzed extern decl?
}
if (decl.val.castTag(.variable)) |payload| {
const variable = payload.data;
if (decl.val.getVariable(mod)) |variable| {
if (variable.is_extern) {
return; // TODO Should we do more when front-end analyzed extern decl?
}
}
const is_threadlocal = if (decl.val.castTag(.variable)) |payload|
payload.data.is_threadlocal and !self.base.options.single_threaded
const is_threadlocal = if (decl.val.getVariable(mod)) |variable|
variable.is_threadlocal and !self.base.options.single_threaded
else
false;
if (is_threadlocal) return self.updateThreadlocalVariable(module, decl_index);
if (is_threadlocal) return self.updateThreadlocalVariable(mod, decl_index);
const atom_index = try self.getOrCreateAtomForDecl(decl_index);
const sym_index = self.getAtom(atom_index).getSymbolIndex().?;
@ -2007,14 +2007,14 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
defer code_buffer.deinit();
var decl_state: ?Dwarf.DeclState = if (self.d_sym) |*d_sym|
try d_sym.dwarf.initDeclState(module, decl_index)
try d_sym.dwarf.initDeclState(mod, decl_index)
else
null;
defer if (decl_state) |*ds| ds.deinit();
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val;
const res = if (decl_state) |*ds|
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{
@ -2023,7 +2023,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
.parent_atom_index = sym_index,
})
else
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
@ -2034,7 +2034,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
.ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
try mod.failed_decls.put(mod.gpa, decl_index, em);
return;
},
};
@ -2042,7 +2042,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
if (decl_state) |*ds| {
try self.d_sym.?.dwarf.commitDeclState(
module,
mod,
decl_index,
addr,
self.getAtom(atom_index).size,
@ -2052,7 +2052,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl_index: Module.Decl.Index)
// Since we updated the vaddr and the size, each corresponding export symbol also
// needs to be updated.
try self.updateDeclExports(module, decl_index, module.getDeclExports(decl_index));
try self.updateDeclExports(mod, decl_index, mod.getDeclExports(decl_index));
}
fn updateLazySymbolAtom(
@ -2081,8 +2081,8 @@ fn updateLazySymbolAtom(
const atom = self.getAtomPtr(atom_index);
const local_sym_index = atom.getSymbolIndex().?;
const src = if (sym.ty.getOwnerDeclOrNull()) |owner_decl|
mod.declPtr(owner_decl).srcLoc()
const src = if (sym.ty.getOwnerDeclOrNull(mod)) |owner_decl|
mod.declPtr(owner_decl).srcLoc(mod)
else
Module.SrcLoc{
.file_scope = undefined,
@ -2126,7 +2126,8 @@ fn updateLazySymbolAtom(
}
pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.Index {
const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl());
const mod = self.base.options.module.?;
const gop = try self.lazy_syms.getOrPut(self.base.allocator, sym.getDecl(mod));
errdefer _ = if (!gop.found_existing) self.lazy_syms.pop();
if (!gop.found_existing) gop.value_ptr.* = .{};
const metadata: struct { atom: *Atom.Index, state: *LazySymbolMetadata.State } = switch (sym.kind) {
@ -2144,7 +2145,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.In
metadata.state.* = .pending_flush;
const atom = metadata.atom.*;
// anyerror needs to be deferred until flushModule
if (sym.getDecl() != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) {
if (sym.getDecl(mod) != .none) try self.updateLazySymbolAtom(sym, atom, switch (sym.kind) {
.code => self.text_section_index.?,
.const_data => self.data_const_section_index.?,
});
@ -2152,6 +2153,7 @@ pub fn getOrCreateAtomForLazySymbol(self: *MachO, sym: File.LazySymbol) !Atom.In
}
fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.Decl.Index) !void {
const mod = self.base.options.module.?;
// Lowering a TLV on macOS involves two stages:
// 1. first we lower the initializer into appopriate section (__thread_data or __thread_bss)
// 2. next, we create a corresponding threadlocal variable descriptor in __thread_vars
@ -2175,9 +2177,9 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D
const decl = module.declPtr(decl_index);
const decl_metadata = self.decls.get(decl_index).?;
const decl_val = decl.val.castTag(.variable).?.data.init;
const decl_val = decl.val.getVariable(mod).?.init.toValue();
const res = if (decl_state) |*ds|
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{
@ -2186,7 +2188,7 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D
.parent_atom_index = init_sym_index,
})
else
try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .none, .{
@ -2202,10 +2204,9 @@ fn updateThreadlocalVariable(self: *MachO, module: *Module, decl_index: Module.D
},
};
const required_alignment = decl.getAlignment(self.base.options.target);
const required_alignment = decl.getAlignment(mod);
const decl_name = try decl.getFullyQualifiedName(module);
defer gpa.free(decl_name);
const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(module));
const init_sym_name = try std.fmt.allocPrint(gpa, "{s}$tlv$init", .{decl_name});
defer gpa.free(init_sym_name);
@ -2262,12 +2263,13 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 {
const decl = self.base.options.module.?.declPtr(decl_index);
const ty = decl.ty;
const val = decl.val;
const zig_ty = ty.zigTypeTag();
const mod = self.base.options.module.?;
const zig_ty = ty.zigTypeTag(mod);
const mode = self.base.options.optimize_mode;
const single_threaded = self.base.options.single_threaded;
const sect_id: u8 = blk: {
// TODO finish and audit this function
if (val.isUndefDeep()) {
if (val.isUndefDeep(mod)) {
if (mode == .ReleaseFast or mode == .ReleaseSmall) {
@panic("TODO __DATA,__bss");
} else {
@ -2275,8 +2277,8 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 {
}
}
if (val.castTag(.variable)) |variable| {
if (variable.data.is_threadlocal and !single_threaded) {
if (val.getVariable(mod)) |variable| {
if (variable.is_threadlocal and !single_threaded) {
break :blk self.thread_data_section_index.?;
}
break :blk self.data_section_index.?;
@ -2286,7 +2288,7 @@ fn getDeclOutputSection(self: *MachO, decl_index: Module.Decl.Index) u8 {
// TODO: what if this is a function pointer?
.Fn => break :blk self.text_section_index.?,
else => {
if (val.castTag(.variable)) |_| {
if (val.getVariable(mod)) |_| {
break :blk self.data_section_index.?;
}
break :blk self.data_const_section_index.?;
@ -2301,10 +2303,9 @@ fn updateDeclCode(self: *MachO, decl_index: Module.Decl.Index, code: []u8) !u64
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
const required_alignment = decl.getAlignment(self.base.options.target);
const required_alignment = decl.getAlignment(mod);
const decl_name = try decl.getFullyQualifiedName(mod);
defer gpa.free(decl_name);
const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
const decl_metadata = self.decls.get(decl_index).?;
const atom_index = decl_metadata.atom;
@ -2376,7 +2377,7 @@ pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl_index: Module.De
pub fn updateDeclExports(
self: *MachO,
module: *Module,
mod: *Module,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) File.UpdateDeclExportsError!void {
@ -2385,7 +2386,7 @@ pub fn updateDeclExports(
}
if (build_options.have_llvm) {
if (self.llvm_object) |llvm_object|
return llvm_object.updateDeclExports(module, decl_index, exports);
return llvm_object.updateDeclExports(mod, decl_index, exports);
}
const tracy = trace(@src());
@ -2393,26 +2394,28 @@ pub fn updateDeclExports(
const gpa = self.base.allocator;
const decl = module.declPtr(decl_index);
const decl = mod.declPtr(decl_index);
const atom_index = try self.getOrCreateAtomForDecl(decl_index);
const atom = self.getAtom(atom_index);
const decl_sym = atom.getSymbol(self);
const decl_metadata = self.decls.getPtr(decl_index).?;
for (exports) |exp| {
const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{exp.options.name});
const exp_name = try std.fmt.allocPrint(gpa, "_{}", .{
exp.opts.name.fmt(&mod.intern_pool),
});
defer gpa.free(exp_name);
log.debug("adding new export '{s}'", .{exp_name});
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, "__text")) {
try module.failed_exports.putNoClobber(
module.gpa,
if (exp.opts.section.unwrap()) |section_name| {
if (!mod.intern_pool.stringEqlSlice(section_name, "__text")) {
try mod.failed_exports.putNoClobber(
mod.gpa,
exp,
try Module.ErrorMsg.create(
gpa,
decl.srcLoc(),
decl.srcLoc(mod),
"Unimplemented: ExportOptions.section",
.{},
),
@ -2421,13 +2424,13 @@ pub fn updateDeclExports(
}
}
if (exp.options.linkage == .LinkOnce) {
try module.failed_exports.putNoClobber(
module.gpa,
if (exp.opts.linkage == .LinkOnce) {
try mod.failed_exports.putNoClobber(
mod.gpa,
exp,
try Module.ErrorMsg.create(
gpa,
decl.srcLoc(),
decl.srcLoc(mod),
"Unimplemented: GlobalLinkage.LinkOnce",
.{},
),
@ -2450,7 +2453,7 @@ pub fn updateDeclExports(
.n_value = decl_sym.n_value,
};
switch (exp.options.linkage) {
switch (exp.opts.linkage) {
.Internal => {
// Symbol should be hidden, or in MachO lingo, private extern.
// We should also mark the symbol as Weak: n_desc == N_WEAK_DEF.
@ -2471,9 +2474,9 @@ pub fn updateDeclExports(
// TODO: this needs rethinking
const global = self.getGlobal(exp_name).?;
if (sym_loc.sym_index != global.sym_index and global.file != null) {
_ = try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(
_ = try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create(
gpa,
decl.srcLoc(),
decl.srcLoc(mod),
\\LinkError: symbol '{s}' defined multiple times
,
.{exp_name},
@ -2485,12 +2488,17 @@ pub fn updateDeclExports(
}
}
pub fn deleteDeclExport(self: *MachO, decl_index: Module.Decl.Index, name: []const u8) Allocator.Error!void {
pub fn deleteDeclExport(
self: *MachO,
decl_index: Module.Decl.Index,
name: InternPool.NullTerminatedString,
) Allocator.Error!void {
if (self.llvm_object) |_| return;
const metadata = self.decls.getPtr(decl_index) orelse return;
const gpa = self.base.allocator;
const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{name});
const mod = self.base.options.module.?;
const exp_name = try std.fmt.allocPrint(gpa, "_{s}", .{mod.intern_pool.stringToSlice(name)});
defer gpa.free(exp_name);
const sym_index = metadata.getExportPtr(self, exp_name) orelse return;

View File

@ -68,9 +68,9 @@ pub fn deinit(self: *NvPtx) void {
self.base.allocator.free(self.ptx_file_name);
}
pub fn updateFunc(self: *NvPtx, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
pub fn updateFunc(self: *NvPtx, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
if (!build_options.have_llvm) return;
try self.llvm_object.updateFunc(module, func, air, liveness);
try self.llvm_object.updateFunc(module, func_index, air, liveness);
}
pub fn updateDecl(self: *NvPtx, module: *Module, decl_index: Module.Decl.Index) !void {

View File

@ -213,14 +213,14 @@ fn putFn(self: *Plan9, decl_index: Module.Decl.Index, out: FnDeclOutput) !void {
const gpa = self.base.allocator;
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
const fn_map_res = try self.fn_decl_table.getOrPut(gpa, decl.getFileScope());
const fn_map_res = try self.fn_decl_table.getOrPut(gpa, decl.getFileScope(mod));
if (fn_map_res.found_existing) {
if (try fn_map_res.value_ptr.functions.fetchPut(gpa, decl_index, out)) |old_entry| {
gpa.free(old_entry.value.code);
gpa.free(old_entry.value.lineinfo);
}
} else {
const file = decl.getFileScope();
const file = decl.getFileScope(mod);
const arena = self.path_arena.allocator();
// each file gets a symbol
fn_map_res.value_ptr.* = .{
@ -276,17 +276,17 @@ fn addPathComponents(self: *Plan9, path: []const u8, a: *std.ArrayList(u8)) !voi
}
}
pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
pub fn updateFunc(self: *Plan9, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .plan9) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = module.declPtr(decl_index);
const decl = mod.declPtr(decl_index);
self.freeUnnamedConsts(decl_index);
_ = try self.seeDecl(decl_index);
log.debug("codegen decl {*} ({s})", .{ decl, decl.name });
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
@ -298,8 +298,8 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
const res = try codegen.generateFunction(
&self.base,
decl.srcLoc(),
func,
decl.srcLoc(mod),
func_index,
air,
liveness,
&code_buffer,
@ -316,7 +316,7 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv
.ok => try code_buffer.toOwnedSlice(),
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
try mod.failed_decls.put(mod.gpa, decl_index, em);
return;
},
};
@ -344,8 +344,7 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I
}
const unnamed_consts = gop.value_ptr;
const decl_name = try decl.getFullyQualifiedName(mod);
defer self.base.allocator.free(decl_name);
const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
const index = unnamed_consts.items.len;
// name is freed when the unnamed const is freed
@ -366,7 +365,7 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I
};
self.syms.items[info.sym_index.?] = sym;
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), tv, &code_buffer, .{
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), tv, &code_buffer, .{
.none = {},
}, .{
.parent_atom_index = @enumToInt(decl_index),
@ -388,14 +387,13 @@ pub fn lowerUnnamedConst(self: *Plan9, tv: TypedValue, decl_index: Module.Decl.I
return @intCast(u32, info.got_index.?);
}
pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index) !void {
const decl = module.declPtr(decl_index);
pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: Module.Decl.Index) !void {
const decl = mod.declPtr(decl_index);
if (decl.val.tag() == .extern_fn) {
if (decl.val.getExternFunc(mod)) |_| {
return; // TODO Should we do more when front-end analyzed extern decl?
}
if (decl.val.castTag(.variable)) |payload| {
const variable = payload.data;
if (decl.val.getVariable(mod)) |variable| {
if (variable.is_extern) {
return; // TODO Should we do more when front-end analyzed extern decl?
}
@ -403,13 +401,11 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
_ = try self.seeDecl(decl_index);
log.debug("codegen decl {*} ({s}) ({d})", .{ decl, decl.name, decl_index });
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
const decl_val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val;
// TODO we need the symbol index for symbol in the table of locals for the containing atom
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{
const res = try codegen.generateSymbol(&self.base, decl.srcLoc(mod), .{
.ty = decl.ty,
.val = decl_val,
}, &code_buffer, .{ .none = {} }, .{
@ -419,7 +415,7 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
.ok => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl_index, em);
try mod.failed_decls.put(mod.gpa, decl_index, em);
return;
},
};
@ -432,9 +428,9 @@ pub fn updateDecl(self: *Plan9, module: *Module, decl_index: Module.Decl.Index)
}
/// called at the end of update{Decl,Func}
fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void {
const decl = self.base.options.module.?.declPtr(decl_index);
const is_fn = (decl.ty.zigTypeTag() == .Fn);
log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name });
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
const is_fn = (decl.ty.zigTypeTag(mod) == .Fn);
const sym_t: aout.Sym.Type = if (is_fn) .t else .d;
const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
@ -445,7 +441,7 @@ fn updateFinish(self: *Plan9, decl_index: Module.Decl.Index) !void {
const sym: aout.Sym = .{
.value = undefined, // the value of stuff gets filled in in flushModule
.type = decl_block.type,
.name = mem.span(decl.name),
.name = try self.base.allocator.dupe(u8, mod.intern_pool.stringToSlice(decl.name)),
};
if (decl_block.sym_index) |s| {
@ -566,10 +562,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
var it = fentry.value_ptr.functions.iterator();
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
const out = entry.value_ptr.*;
log.debug("write text decl {*} ({s}), lines {d} to {d}", .{ decl, decl.name, out.start_line + 1, out.end_line });
{
// connect the previous decl to the next
const delta_line = @intCast(i32, out.start_line) - @intCast(i32, linecount);
@ -615,10 +609,8 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
var it = self.data_decl_table.iterator();
while (it.next()) |entry| {
const decl_index = entry.key_ptr.*;
const decl = mod.declPtr(decl_index);
const decl_block = self.getDeclBlockPtr(self.decls.get(decl_index).?.index);
const code = entry.value_ptr.*;
log.debug("write data decl {*} ({s})", .{ decl, decl.name });
foff += code.len;
iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len };
@ -694,19 +686,16 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
const source_decl = mod.declPtr(source_decl_index);
for (kv.value_ptr.items) |reloc| {
const target_decl_index = reloc.target;
const target_decl = mod.declPtr(target_decl_index);
const target_decl_block = self.getDeclBlock(self.decls.get(target_decl_index).?.index);
const target_decl_offset = target_decl_block.offset.?;
const offset = reloc.offset;
const addend = reloc.addend;
log.debug("relocating the address of '{s}' + {d} into '{s}' + {d}", .{ target_decl.name, addend, source_decl.name, offset });
const code = blk: {
const is_fn = source_decl.ty.zigTypeTag() == .Fn;
const is_fn = source_decl.ty.zigTypeTag(mod) == .Fn;
if (is_fn) {
const table = self.fn_decl_table.get(source_decl.getFileScope()).?.functions;
const table = self.fn_decl_table.get(source_decl.getFileScope(mod)).?.functions;
const output = table.get(source_decl_index).?;
break :blk output.code;
} else {
@ -728,7 +717,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation, prog_node: *std.Progress.No
}
fn addDeclExports(
self: *Plan9,
module: *Module,
mod: *Module,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
@ -736,12 +725,13 @@ fn addDeclExports(
const decl_block = self.getDeclBlock(metadata.index);
for (exports) |exp| {
const exp_name = mod.intern_pool.stringToSlice(exp.opts.name);
// plan9 does not support custom sections
if (exp.options.section) |section_name| {
if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) {
try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(
if (exp.opts.section.unwrap()) |section_name| {
if (!mod.intern_pool.stringEqlSlice(section_name, ".text") and !mod.intern_pool.stringEqlSlice(section_name, ".data")) {
try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create(
self.base.allocator,
module.declPtr(decl_index).srcLoc(),
mod.declPtr(decl_index).srcLoc(mod),
"plan9 does not support extra sections",
.{},
));
@ -751,10 +741,10 @@ fn addDeclExports(
const sym = .{
.value = decl_block.offset.?,
.type = decl_block.type.toGlobal(),
.name = exp.options.name,
.name = try self.base.allocator.dupe(u8, exp_name),
};
if (metadata.getExport(self, exp.options.name)) |i| {
if (metadata.getExport(self, exp_name)) |i| {
self.syms.items[i] = sym;
} else {
try self.syms.append(self.base.allocator, sym);
@ -770,9 +760,9 @@ pub fn freeDecl(self: *Plan9, decl_index: Module.Decl.Index) void {
// in the deleteUnusedDecl function.
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
const is_fn = (decl.val.tag() == .function);
const is_fn = decl.val.getFunctionIndex(mod) != .none;
if (is_fn) {
var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope()).?;
var symidx_and_submap = self.fn_decl_table.get(decl.getFileScope(mod)).?;
var submap = symidx_and_submap.functions;
if (submap.fetchSwapRemove(decl_index)) |removed_entry| {
self.base.allocator.free(removed_entry.value.code);
@ -955,7 +945,10 @@ pub fn writeSym(self: *Plan9, w: anytype, sym: aout.Sym) !void {
try w.writeAll(sym.name);
try w.writeByte(0);
}
pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
const mod = self.base.options.module.?;
const ip = &mod.intern_pool;
const writer = buf.writer();
// write the f symbols
{
@ -979,7 +972,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
const sym = self.syms.items[decl_block.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| {
for (exports.items) |e| if (decl_metadata.getExport(self, ip.stringToSlice(e.opts.name))) |exp_i| {
try self.writeSym(writer, self.syms.items[exp_i]);
};
}
@ -1005,7 +998,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
const sym = self.syms.items[decl_block.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.options.module.?.decl_exports.get(decl_index)) |exports| {
for (exports.items) |e| if (decl_metadata.getExport(self, e.options.name)) |exp_i| {
for (exports.items) |e| if (decl_metadata.getExport(self, ip.stringToSlice(e.opts.name))) |exp_i| {
const s = self.syms.items[exp_i];
if (mem.eql(u8, s.name, "_start"))
self.entry_val = s.value;
@ -1031,7 +1024,7 @@ pub fn getDeclVAddr(
) !u64 {
const mod = self.base.options.module.?;
const decl = mod.declPtr(decl_index);
if (decl.ty.zigTypeTag() == .Fn) {
if (decl.ty.zigTypeTag(mod) == .Fn) {
var start = self.bases.text;
var it_file = self.fn_decl_table.iterator();
while (it_file.next()) |fentry| {

View File

@ -103,11 +103,13 @@ pub fn deinit(self: *SpirV) void {
self.decl_link.deinit();
}
pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
pub fn updateFunc(self: *SpirV, module: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native) {
@panic("Attempted to compile for architecture that was disabled by build configuration");
}
const func = module.funcPtr(func_index);
var decl_gen = codegen.DeclGen.init(self.base.allocator, module, &self.spv, &self.decl_link);
defer decl_gen.deinit();
@ -131,12 +133,12 @@ pub fn updateDecl(self: *SpirV, module: *Module, decl_index: Module.Decl.Index)
pub fn updateDeclExports(
self: *SpirV,
module: *Module,
mod: *Module,
decl_index: Module.Decl.Index,
exports: []const *Module.Export,
) !void {
const decl = module.declPtr(decl_index);
if (decl.val.tag() == .function and decl.ty.fnCallingConvention() == .Kernel) {
const decl = mod.declPtr(decl_index);
if (decl.val.getFunctionIndex(mod) != .none and decl.ty.fnCallingConvention(mod) == .Kernel) {
// TODO: Unify with resolveDecl in spirv.zig.
const entry = try self.decl_link.getOrPut(decl_index);
if (!entry.found_existing) {
@ -145,7 +147,7 @@ pub fn updateDeclExports(
const spv_decl_index = entry.value_ptr.*;
for (exports) |exp| {
try self.spv.declareEntryPoint(spv_decl_index, exp.options.name);
try self.spv.declareEntryPoint(spv_decl_index, mod.intern_pool.stringToSlice(exp.opts.name));
}
}
@ -188,7 +190,8 @@ pub fn flushModule(self: *SpirV, comp: *Compilation, prog_node: *std.Progress.No
var error_info = std.ArrayList(u8).init(self.spv.arena);
try error_info.appendSlice("zig_errors");
const module = self.base.options.module.?;
for (module.error_name_list.items) |name| {
for (module.global_error_set.keys()) |name_nts| {
const name = module.intern_pool.stringToSlice(name_nts);
// Errors can contain pretty much any character - to encode them in a string we must escape
// them somehow. Easiest here is to use some established scheme, one which also preseves the
// name if it contains no strange characters is nice for debugging. URI encoding fits the bill.

View File

@ -149,7 +149,8 @@ discarded: std.AutoHashMapUnmanaged(SymbolLoc, SymbolLoc) = .{},
/// into the final binary.
resolved_symbols: std.AutoArrayHashMapUnmanaged(SymbolLoc, void) = .{},
/// Symbols that remain undefined after symbol resolution.
undefs: std.StringArrayHashMapUnmanaged(SymbolLoc) = .{},
/// Note: The key represents an offset into the string table, rather than the actual string.
undefs: std.AutoArrayHashMapUnmanaged(u32, SymbolLoc) = .{},
/// Maps a symbol's location to an atom. This can be used to find meta
/// data of a symbol, such as its size, or its offset to perform a relocation.
/// Undefined (and synthetic) symbols do not have an Atom and therefore cannot be mapped.
@ -514,6 +515,10 @@ pub fn createEmpty(gpa: Allocator, options: link.Options) !*Wasm {
/// Leaves index undefined and the default flags (0).
fn createSyntheticSymbol(wasm: *Wasm, name: []const u8, tag: Symbol.Tag) !SymbolLoc {
const name_offset = try wasm.string_table.put(wasm.base.allocator, name);
return wasm.createSyntheticSymbolOffset(name_offset, tag);
}
fn createSyntheticSymbolOffset(wasm: *Wasm, name_offset: u32, tag: Symbol.Tag) !SymbolLoc {
const sym_index = @intCast(u32, wasm.symbols.items.len);
const loc: SymbolLoc = .{ .index = sym_index, .file = null };
try wasm.symbols.append(wasm.base.allocator, .{
@ -691,7 +696,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
try wasm.resolved_symbols.putNoClobber(wasm.base.allocator, location, {});
if (symbol.isUndefined()) {
try wasm.undefs.putNoClobber(wasm.base.allocator, sym_name, location);
try wasm.undefs.putNoClobber(wasm.base.allocator, sym_name_index, location);
}
continue;
}
@ -801,7 +806,7 @@ fn resolveSymbolsInObject(wasm: *Wasm, object_index: u16) !void {
try wasm.resolved_symbols.put(wasm.base.allocator, location, {});
assert(wasm.resolved_symbols.swapRemove(existing_loc));
if (existing_sym.isUndefined()) {
_ = wasm.undefs.swapRemove(sym_name);
_ = wasm.undefs.swapRemove(sym_name_index);
}
}
}
@ -812,15 +817,16 @@ fn resolveSymbolsInArchives(wasm: *Wasm) !void {
log.debug("Resolving symbols in archives", .{});
var index: u32 = 0;
undef_loop: while (index < wasm.undefs.count()) {
const sym_name = wasm.undefs.keys()[index];
const sym_name_index = wasm.undefs.keys()[index];
for (wasm.archives.items) |archive| {
const sym_name = wasm.string_table.get(sym_name_index);
log.debug("Detected symbol '{s}' in archive '{s}', parsing objects..", .{ sym_name, archive.name });
const offset = archive.toc.get(sym_name) orelse {
// symbol does not exist in this archive
continue;
};
log.debug("Detected symbol '{s}' in archive '{s}', parsing objects..", .{ sym_name, archive.name });
// Symbol is found in unparsed object file within current archive.
// Parse object and and resolve symbols again before we check remaining
// undefined symbols.
@ -1191,28 +1197,36 @@ fn validateFeatures(
/// if one or multiple undefined references exist. When none exist, the symbol will
/// not be created, ensuring we don't unneccesarily emit unreferenced symbols.
fn resolveLazySymbols(wasm: *Wasm) !void {
if (wasm.undefs.fetchSwapRemove("__heap_base")) |kv| {
const loc = try wasm.createSyntheticSymbol("__heap_base", .data);
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(loc); // we don't want to emit this symbol, only use it for relocations.
if (wasm.string_table.getOffset("__heap_base")) |name_offset| {
if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| {
const loc = try wasm.createSyntheticSymbolOffset(name_offset, .data);
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(loc); // we don't want to emit this symbol, only use it for relocations.
}
}
if (wasm.undefs.fetchSwapRemove("__heap_end")) |kv| {
const loc = try wasm.createSyntheticSymbol("__heap_end", .data);
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(loc);
if (wasm.string_table.getOffset("__heap_end")) |name_offset| {
if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| {
const loc = try wasm.createSyntheticSymbolOffset(name_offset, .data);
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(loc);
}
}
if (!wasm.base.options.shared_memory) {
if (wasm.undefs.fetchSwapRemove("__tls_base")) |kv| {
const loc = try wasm.createSyntheticSymbol("__tls_base", .global);
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
if (wasm.string_table.getOffset("__tls_base")) |name_offset| {
if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| {
const loc = try wasm.createSyntheticSymbolOffset(name_offset, .global);
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
}
}
}
if (wasm.undefs.fetchSwapRemove("__zig_errors_len")) |kv| {
const loc = try wasm.createSyntheticSymbol("__zig_errors_len", .data);
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(kv.value);
if (wasm.string_table.getOffset("__zig_errors_len")) |name_offset| {
if (wasm.undefs.fetchSwapRemove(name_offset)) |kv| {
const loc = try wasm.createSyntheticSymbolOffset(name_offset, .data);
try wasm.discarded.putNoClobber(wasm.base.allocator, kv.value, loc);
_ = wasm.resolved_symbols.swapRemove(kv.value);
}
}
}
@ -1324,17 +1338,18 @@ pub fn allocateSymbol(wasm: *Wasm) !u32 {
return index;
}
pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void {
pub fn updateFunc(wasm: *Wasm, mod: *Module, func_index: Module.Fn.Index, air: Air, liveness: Liveness) !void {
if (build_options.skip_non_native and builtin.object_format != .wasm) {
@panic("Attempted to compile for object format that was disabled by build configuration");
}
if (build_options.have_llvm) {
if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func, air, liveness);
if (wasm.llvm_object) |llvm_object| return llvm_object.updateFunc(mod, func_index, air, liveness);
}
const tracy = trace(@src());
defer tracy.end();
const func = mod.funcPtr(func_index);
const decl_index = func.owner_decl;
const decl = mod.declPtr(decl_index);
const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
@ -1348,7 +1363,7 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
defer code_writer.deinit();
// const result = try codegen.generateFunction(
// &wasm.base,
// decl.srcLoc(),
// decl.srcLoc(mod),
// func,
// air,
// liveness,
@ -1357,8 +1372,8 @@ pub fn updateFunc(wasm: *Wasm, mod: *Module, func: *Module.Fn, air: Air, livenes
// );
const result = try codegen.generateFunction(
&wasm.base,
decl.srcLoc(),
func,
decl.srcLoc(mod),
func_index,
air,
liveness,
&code_writer,
@ -1403,9 +1418,9 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
defer tracy.end();
const decl = mod.declPtr(decl_index);
if (decl.val.castTag(.function)) |_| {
if (decl.val.getFunction(mod)) |_| {
return;
} else if (decl.val.castTag(.extern_fn)) |_| {
} else if (decl.val.getExternFunc(mod)) |_| {
return;
}
@ -1413,19 +1428,20 @@ pub fn updateDecl(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.Index) !voi
const atom = wasm.getAtomPtr(atom_index);
atom.clear();
if (decl.isExtern()) {
const variable = decl.getVariable().?;
const name = mem.sliceTo(decl.name, 0);
return wasm.addOrUpdateImport(name, atom.sym_index, variable.lib_name, null);
if (decl.isExtern(mod)) {
const variable = decl.getOwnedVariable(mod).?;
const name = mod.intern_pool.stringToSlice(decl.name);
const lib_name = mod.intern_pool.stringToSliceUnwrap(variable.lib_name);
return wasm.addOrUpdateImport(name, atom.sym_index, lib_name, null);
}
const val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val;
const val = if (decl.val.getVariable(mod)) |variable| variable.init.toValue() else decl.val;
var code_writer = std.ArrayList(u8).init(wasm.base.allocator);
defer code_writer.deinit();
const res = try codegen.generateSymbol(
&wasm.base,
decl.srcLoc(),
decl.srcLoc(mod),
.{ .ty = decl.ty, .val = val },
&code_writer,
.none,
@ -1451,8 +1467,7 @@ pub fn updateDeclLineNumber(wasm: *Wasm, mod: *Module, decl_index: Module.Decl.I
defer tracy.end();
const decl = mod.declPtr(decl_index);
const decl_name = try decl.getFullyQualifiedName(mod);
defer wasm.base.allocator.free(decl_name);
const decl_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl });
try dw.updateDeclLineNumber(mod, decl_index);
@ -1465,15 +1480,14 @@ fn finishUpdateDecl(wasm: *Wasm, decl_index: Module.Decl.Index, code: []const u8
const atom_index = wasm.decls.get(decl_index).?;
const atom = wasm.getAtomPtr(atom_index);
const symbol = &wasm.symbols.items[atom.sym_index];
const full_name = try decl.getFullyQualifiedName(mod);
defer wasm.base.allocator.free(full_name);
const full_name = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
symbol.name = try wasm.string_table.put(wasm.base.allocator, full_name);
try atom.code.appendSlice(wasm.base.allocator, code);
try wasm.resolved_symbols.put(wasm.base.allocator, atom.symbolLoc(), {});
atom.size = @intCast(u32, code.len);
if (code.len == 0) return;
atom.alignment = decl.ty.abiAlignment(wasm.base.options.target);
atom.alignment = decl.ty.abiAlignment(mod);
}
/// From a given symbol location, returns its `wasm.GlobalType`.
@ -1523,9 +1537,8 @@ fn getFunctionSignature(wasm: *const Wasm, loc: SymbolLoc) std.wasm.Type {
/// Returns the symbol index of the local
/// The given `decl` is the parent decl whom owns the constant.
pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.Index) !u32 {
assert(tv.ty.zigTypeTag() != .Fn); // cannot create local symbols for functions
const mod = wasm.base.options.module.?;
assert(tv.ty.zigTypeTag(mod) != .Fn); // cannot create local symbols for functions
const decl = mod.declPtr(decl_index);
// Create and initialize a new local symbol and atom
@ -1534,16 +1547,17 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In
const parent_atom = wasm.getAtomPtr(parent_atom_index);
const local_index = parent_atom.locals.items.len;
try parent_atom.locals.append(wasm.base.allocator, atom_index);
const fqdn = try decl.getFullyQualifiedName(mod);
defer wasm.base.allocator.free(fqdn);
const name = try std.fmt.allocPrintZ(wasm.base.allocator, "__unnamed_{s}_{d}", .{ fqdn, local_index });
const fqn = mod.intern_pool.stringToSlice(try decl.getFullyQualifiedName(mod));
const name = try std.fmt.allocPrintZ(wasm.base.allocator, "__unnamed_{s}_{d}", .{
fqn, local_index,
});
defer wasm.base.allocator.free(name);
var value_bytes = std.ArrayList(u8).init(wasm.base.allocator);
defer value_bytes.deinit();
const code = code: {
const atom = wasm.getAtomPtr(atom_index);
atom.alignment = tv.ty.abiAlignment(wasm.base.options.target);
atom.alignment = tv.ty.abiAlignment(mod);
wasm.symbols.items[atom.sym_index] = .{
.name = try wasm.string_table.put(wasm.base.allocator, name),
.flags = @enumToInt(Symbol.Flag.WASM_SYM_BINDING_LOCAL),
@ -1555,7 +1569,7 @@ pub fn lowerUnnamedConst(wasm: *Wasm, tv: TypedValue, decl_index: Module.Decl.In
const result = try codegen.generateSymbol(
&wasm.base,
decl.srcLoc(),
decl.srcLoc(mod),
tv,
&value_bytes,
.none,
@ -1611,7 +1625,7 @@ pub fn getGlobalSymbol(wasm: *Wasm, name: []const u8, lib_name: ?[]const u8) !u3
wasm.symbols.items[sym_index] = symbol;
gop.value_ptr.* = .{ .index = sym_index, .file = null };
try wasm.resolved_symbols.put(wasm.base.allocator, gop.value_ptr.*, {});
try wasm.undefs.putNoClobber(wasm.base.allocator, name, gop.value_ptr.*);
try wasm.undefs.putNoClobber(wasm.base.allocator, name_index, gop.value_ptr.*);
return sym_index;
}
@ -1632,7 +1646,7 @@ pub fn getDeclVAddr(
const atom_index = wasm.symbol_atom.get(.{ .file = null, .index = reloc_info.parent_atom_index }).?;
const atom = wasm.getAtomPtr(atom_index);
const is_wasm32 = wasm.base.options.target.cpu.arch == .wasm32;
if (decl.ty.zigTypeTag() == .Fn) {
if (decl.ty.zigTypeTag(mod) == .Fn) {
assert(reloc_info.addend == 0); // addend not allowed for function relocations
// We found a function pointer, so add it to our table,
// as function pointers are not allowed to be stored inside the data section.
@ -1689,36 +1703,37 @@ pub fn updateDeclExports(
const decl = mod.declPtr(decl_index);
const atom_index = try wasm.getOrCreateAtomForDecl(decl_index);
const atom = wasm.getAtom(atom_index);
const gpa = mod.gpa;
for (exports) |exp| {
if (exp.options.section) |section| {
try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create(
mod.gpa,
decl.srcLoc(),
if (mod.intern_pool.stringToSliceUnwrap(exp.opts.section)) |section| {
try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create(
gpa,
decl.srcLoc(mod),
"Unimplemented: ExportOptions.section '{s}'",
.{section},
));
continue;
}
const export_name = try wasm.string_table.put(wasm.base.allocator, exp.options.name);
const export_name = try wasm.string_table.put(wasm.base.allocator, mod.intern_pool.stringToSlice(exp.opts.name));
if (wasm.globals.getPtr(export_name)) |existing_loc| {
if (existing_loc.index == atom.sym_index) continue;
const existing_sym: Symbol = existing_loc.getSymbol(wasm).*;
const exp_is_weak = exp.options.linkage == .Internal or exp.options.linkage == .Weak;
const exp_is_weak = exp.opts.linkage == .Internal or exp.opts.linkage == .Weak;
// When both the to-be-exported symbol and the already existing symbol
// are strong symbols, we have a linker error.
// In the other case we replace one with the other.
if (!exp_is_weak and !existing_sym.isWeak()) {
try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create(
mod.gpa,
decl.srcLoc(),
\\LinkError: symbol '{s}' defined multiple times
try mod.failed_exports.put(gpa, exp, try Module.ErrorMsg.create(
gpa,
decl.srcLoc(mod),
\\LinkError: symbol '{}' defined multiple times
\\ first definition in '{s}'
\\ next definition in '{s}'
,
.{ exp.options.name, wasm.name, wasm.name },
.{ exp.opts.name.fmt(&mod.intern_pool), wasm.name, wasm.name },
));
continue;
} else if (exp_is_weak) {
@ -1735,7 +1750,7 @@ pub fn updateDeclExports(
const exported_atom = wasm.getAtom(exported_atom_index);
const sym_loc = exported_atom.symbolLoc();
const symbol = sym_loc.getSymbol(wasm);
switch (exp.options.linkage) {
switch (exp.opts.linkage) {
.Internal => {
symbol.setFlag(.WASM_SYM_VISIBILITY_HIDDEN);
},
@ -1744,9 +1759,9 @@ pub fn updateDeclExports(
},
.Strong => {}, // symbols are strong by default
.LinkOnce => {
try mod.failed_exports.putNoClobber(mod.gpa, exp, try Module.ErrorMsg.create(
mod.gpa,
decl.srcLoc(),
try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create(
gpa,
decl.srcLoc(mod),
"Unimplemented: LinkOnce",
.{},
));
@ -1754,7 +1769,7 @@ pub fn updateDeclExports(
},
}
// Ensure the symbol will be exported using the given name
if (!mem.eql(u8, exp.options.name, sym_loc.getName(wasm))) {
if (!mod.intern_pool.stringEqlSlice(exp.opts.name, sym_loc.getName(wasm))) {
try wasm.export_names.put(wasm.base.allocator, sym_loc, export_name);
}
@ -1768,7 +1783,7 @@ pub fn updateDeclExports(
// if the symbol was previously undefined, remove it as an import
_ = wasm.imports.remove(sym_loc);
_ = wasm.undefs.swapRemove(exp.options.name);
_ = wasm.undefs.swapRemove(export_name);
}
}
@ -1792,7 +1807,7 @@ pub fn freeDecl(wasm: *Wasm, decl_index: Module.Decl.Index) void {
assert(wasm.symbol_atom.remove(local_atom.symbolLoc()));
}
if (decl.isExtern()) {
if (decl.isExtern(mod)) {
_ = wasm.imports.remove(atom.symbolLoc());
}
_ = wasm.resolved_symbols.swapRemove(atom.symbolLoc());
@ -1853,7 +1868,7 @@ pub fn addOrUpdateImport(
/// Symbol index that is external
symbol_index: u32,
/// Optional library name (i.e. `extern "c" fn foo() void`
lib_name: ?[*:0]const u8,
lib_name: ?[:0]const u8,
/// The index of the type that represents the function signature
/// when the extern is a function. When this is null, a data-symbol
/// is asserted instead.
@ -1864,7 +1879,7 @@ pub fn addOrUpdateImport(
// Also mangle the name when the lib name is set and not equal to "C" so imports with the same
// name but different module can be resolved correctly.
const mangle_name = lib_name != null and
!std.mem.eql(u8, std.mem.sliceTo(lib_name.?, 0), "c");
!std.mem.eql(u8, lib_name.?, "c");
const full_name = if (mangle_name) full_name: {
break :full_name try std.fmt.allocPrint(wasm.base.allocator, "{s}|{s}", .{ name, lib_name.? });
} else name;
@ -1884,13 +1899,13 @@ pub fn addOrUpdateImport(
const loc: SymbolLoc = .{ .file = null, .index = symbol_index };
global_gop.value_ptr.* = loc;
try wasm.resolved_symbols.put(wasm.base.allocator, loc, {});
try wasm.undefs.putNoClobber(wasm.base.allocator, full_name, loc);
try wasm.undefs.putNoClobber(wasm.base.allocator, decl_name_index, loc);
}
if (type_index) |ty_index| {
const gop = try wasm.imports.getOrPut(wasm.base.allocator, .{ .index = symbol_index, .file = null });
const module_name = if (lib_name) |l_name| blk: {
break :blk mem.sliceTo(l_name, 0);
break :blk l_name;
} else wasm.host_name;
if (!gop.found_existing) {
gop.value_ptr.* = .{
@ -2932,8 +2947,9 @@ pub fn getErrorTableSymbol(wasm: *Wasm) !u32 {
const atom_index = try wasm.createAtom();
const atom = wasm.getAtomPtr(atom_index);
const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
atom.alignment = slice_ty.abiAlignment(wasm.base.options.target);
const slice_ty = Type.slice_const_u8_sentinel_0;
const mod = wasm.base.options.module.?;
atom.alignment = slice_ty.abiAlignment(mod);
const sym_index = atom.sym_index;
const sym_name = try wasm.string_table.put(wasm.base.allocator, "__zig_err_name_table");
@ -2985,10 +3001,11 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
// Addend for each relocation to the table
var addend: u32 = 0;
const mod = wasm.base.options.module.?;
for (mod.error_name_list.items) |error_name| {
for (mod.global_error_set.keys()) |error_name_nts| {
const error_name = mod.intern_pool.stringToSlice(error_name_nts);
const len = @intCast(u32, error_name.len + 1); // names are 0-termianted
const slice_ty = Type.initTag(.const_slice_u8_sentinel_0);
const slice_ty = Type.slice_const_u8_sentinel_0;
const offset = @intCast(u32, atom.code.items.len);
// first we create the data for the slice of the name
try atom.code.appendNTimes(wasm.base.allocator, 0, 4); // ptr to name, will be relocated
@ -3000,7 +3017,7 @@ fn populateErrorNameTable(wasm: *Wasm) !void {
.offset = offset,
.addend = @intCast(i32, addend),
});
atom.size += @intCast(u32, slice_ty.abiSize(wasm.base.options.target));
atom.size += @intCast(u32, slice_ty.abiSize(mod));
addend += len;
// as we updated the error name table, we now store the actual name within the names atom
@ -3366,15 +3383,15 @@ pub fn flushModule(wasm: *Wasm, comp: *Compilation, prog_node: *std.Progress.Nod
var decl_it = wasm.decls.iterator();
while (decl_it.next()) |entry| {
const decl = mod.declPtr(entry.key_ptr.*);
if (decl.isExtern()) continue;
if (decl.isExtern(mod)) continue;
const atom_index = entry.value_ptr.*;
const atom = wasm.getAtomPtr(atom_index);
if (decl.ty.zigTypeTag() == .Fn) {
if (decl.ty.zigTypeTag(mod) == .Fn) {
try wasm.parseAtom(atom_index, .function);
} else if (decl.getVariable()) |variable| {
if (!variable.is_mutable) {
} else if (decl.getOwnedVariable(mod)) |variable| {
if (variable.is_const) {
try wasm.parseAtom(atom_index, .{ .data = .read_only });
} else if (variable.init.isUndefDeep()) {
} else if (variable.init.toValue().isUndefDeep(mod)) {
// for safe build modes, we store the atom in the data segment,
// whereas for unsafe build modes we store it in bss.
const is_initialized = wasm.base.options.optimize_mode == .Debug or

View File

@ -569,6 +569,7 @@ const usage_build_generic =
\\ --verbose-link Display linker invocations
\\ --verbose-cc Display C compiler invocations
\\ --verbose-air Enable compiler debug output for Zig AIR
\\ --verbose-intern-pool Enable compiler debug output for InternPool
\\ --verbose-llvm-ir[=path] Enable compiler debug output for unoptimized LLVM IR
\\ --verbose-llvm-bc=[path] Enable compiler debug output for unoptimized LLVM BC
\\ --verbose-cimport Enable compiler debug output for C imports
@ -735,6 +736,7 @@ fn buildOutputType(
var verbose_link = (builtin.os.tag != .wasi or builtin.link_libc) and std.process.hasEnvVarConstant("ZIG_VERBOSE_LINK");
var verbose_cc = (builtin.os.tag != .wasi or builtin.link_libc) and std.process.hasEnvVarConstant("ZIG_VERBOSE_CC");
var verbose_air = false;
var verbose_intern_pool = false;
var verbose_llvm_ir: ?[]const u8 = null;
var verbose_llvm_bc: ?[]const u8 = null;
var verbose_cimport = false;
@ -1460,6 +1462,8 @@ fn buildOutputType(
verbose_cc = true;
} else if (mem.eql(u8, arg, "--verbose-air")) {
verbose_air = true;
} else if (mem.eql(u8, arg, "--verbose-intern-pool")) {
verbose_intern_pool = true;
} else if (mem.eql(u8, arg, "--verbose-llvm-ir")) {
verbose_llvm_ir = "-";
} else if (mem.startsWith(u8, arg, "--verbose-llvm-ir=")) {
@ -3156,6 +3160,7 @@ fn buildOutputType(
.verbose_cc = verbose_cc,
.verbose_link = verbose_link,
.verbose_air = verbose_air,
.verbose_intern_pool = verbose_intern_pool,
.verbose_llvm_ir = verbose_llvm_ir,
.verbose_llvm_bc = verbose_llvm_bc,
.verbose_cimport = verbose_cimport,

View File

@ -7,6 +7,7 @@ const Value = @import("value.zig").Value;
const Type = @import("type.zig").Type;
const Air = @import("Air.zig");
const Liveness = @import("Liveness.zig");
const InternPool = @import("InternPool.zig");
pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) void {
const instruction_bytes = air.instructions.len *
@ -14,12 +15,11 @@ pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) vo
// the debug safety tag but we want to measure release size.
(@sizeOf(Air.Inst.Tag) + 8);
const extra_bytes = air.extra.len * @sizeOf(u32);
const values_bytes = air.values.len * @sizeOf(Value);
const tomb_bytes = if (liveness) |l| l.tomb_bits.len * @sizeOf(usize) else 0;
const liveness_extra_bytes = if (liveness) |l| l.extra.len * @sizeOf(u32) else 0;
const liveness_special_bytes = if (liveness) |l| l.special.count() * 8 else 0;
const total_bytes = @sizeOf(Air) + instruction_bytes + extra_bytes +
values_bytes + @sizeOf(Liveness) + liveness_extra_bytes +
@sizeOf(Liveness) + liveness_extra_bytes +
liveness_special_bytes + tomb_bytes;
// zig fmt: off
@ -27,7 +27,6 @@ pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) vo
\\# Total AIR+Liveness bytes: {}
\\# AIR Instructions: {d} ({})
\\# AIR Extra Data: {d} ({})
\\# AIR Values Bytes: {d} ({})
\\# Liveness tomb_bits: {}
\\# Liveness Extra Data: {d} ({})
\\# Liveness special table: {d} ({})
@ -36,7 +35,6 @@ pub fn write(stream: anytype, module: *Module, air: Air, liveness: ?Liveness) vo
fmtIntSizeBin(total_bytes),
air.instructions.len, fmtIntSizeBin(instruction_bytes),
air.extra.len, fmtIntSizeBin(extra_bytes),
air.values.len, fmtIntSizeBin(values_bytes),
fmtIntSizeBin(tomb_bytes),
if (liveness) |l| l.extra.len else 0, fmtIntSizeBin(liveness_extra_bytes),
if (liveness) |l| l.special.count() else 0, fmtIntSizeBin(liveness_special_bytes),
@ -92,14 +90,10 @@ const Writer = struct {
fn writeAllConstants(w: *Writer, s: anytype) @TypeOf(s).Error!void {
for (w.air.instructions.items(.tag), 0..) |tag, i| {
if (tag != .interned) continue;
const inst = @intCast(Air.Inst.Index, i);
switch (tag) {
.constant, .const_ty => {
try w.writeInst(s, inst);
try s.writeByte('\n');
},
else => continue,
}
try w.writeInst(s, inst);
try s.writeByte('\n');
}
}
@ -225,7 +219,6 @@ const Writer = struct {
.save_err_return_trace_index,
=> try w.writeNoOp(s, inst),
.const_ty,
.alloc,
.ret_ptr,
.err_return_trace,
@ -304,7 +297,9 @@ const Writer = struct {
.struct_field_ptr => try w.writeStructField(s, inst),
.struct_field_val => try w.writeStructField(s, inst),
.constant => try w.writeConstant(s, inst),
.inferred_alloc => @panic("TODO"),
.inferred_alloc_comptime => @panic("TODO"),
.interned => try w.writeInterned(s, inst),
.assembly => try w.writeAssembly(s, inst),
.dbg_stmt => try w.writeDbgStmt(s, inst),
@ -364,13 +359,7 @@ const Writer = struct {
}
fn writeType(w: *Writer, s: anytype, ty: Type) !void {
const t = ty.tag();
switch (t) {
.inferred_alloc_const => try s.writeAll("(inferred_alloc_const)"),
.inferred_alloc_mut => try s.writeAll("(inferred_alloc_mut)"),
.generic_poison => try s.writeAll("(generic_poison)"),
else => try ty.print(s, w.module),
}
return ty.print(s, w.module);
}
fn writeTy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
@ -432,9 +421,10 @@ const Writer = struct {
}
fn writeAggregateInit(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const mod = w.module;
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
const vector_ty = w.air.getRefType(ty_pl.ty);
const len = @intCast(usize, vector_ty.arrayLen());
const len = @intCast(usize, vector_ty.arrayLen(mod));
const elements = @ptrCast([]const Air.Inst.Ref, w.air.extra[ty_pl.payload..][0..len]);
try w.writeType(s, vector_ty);
@ -511,10 +501,11 @@ const Writer = struct {
}
fn writeSelect(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const mod = w.module;
const pl_op = w.air.instructions.items(.data)[inst].pl_op;
const extra = w.air.extraData(Air.Bin, pl_op.payload).data;
const elem_ty = w.air.typeOfIndex(inst).childType();
const elem_ty = w.typeOfIndex(inst).childType(mod);
try w.writeType(s, elem_ty);
try s.writeAll(", ");
try w.writeOperand(s, inst, 0, pl_op.operand);
@ -605,12 +596,12 @@ const Writer = struct {
try s.print(", {d}", .{extra.field_index});
}
fn writeConstant(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
const val = w.air.values[ty_pl.payload];
const ty = w.air.getRefType(ty_pl.ty);
fn writeInterned(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const mod = w.module;
const ip_index = w.air.instructions.items(.data)[inst].interned;
const ty = mod.intern_pool.indexToKey(ip_index).typeOf().toType();
try w.writeType(s, ty);
try s.print(", {}", .{val.fmtValue(ty, w.module)});
try s.print(", {}", .{ip_index.toValue().fmtValue(ty, mod)});
}
fn writeAssembly(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
@ -621,7 +612,7 @@ const Writer = struct {
var extra_i: usize = extra.end;
var op_index: usize = 0;
const ret_ty = w.air.typeOfIndex(inst);
const ret_ty = w.typeOfIndex(inst);
try w.writeType(s, ret_ty);
if (is_volatile) {
@ -692,17 +683,17 @@ const Writer = struct {
}
fn writeDbgInline(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const ty_pl = w.air.instructions.items(.data)[inst].ty_pl;
const function = w.air.values[ty_pl.payload].castTag(.function).?.data;
const owner_decl = w.module.declPtr(function.owner_decl);
try s.print("{s}", .{owner_decl.name});
const ty_fn = w.air.instructions.items(.data)[inst].ty_fn;
const func_index = ty_fn.func;
const owner_decl = w.module.declPtr(w.module.funcPtr(func_index).owner_decl);
try s.print("{}", .{owner_decl.name.fmt(&w.module.intern_pool)});
}
fn writeDbgVar(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
const pl_op = w.air.instructions.items(.data)[inst].pl_op;
try w.writeOperand(s, inst, 0, pl_op.operand);
const name = w.air.nullTerminatedString(pl_op.payload);
try s.print(", {s}", .{name});
try s.print(", \"{}\"", .{std.zig.fmtEscapes(name)});
}
fn writeCall(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void {
@ -965,14 +956,13 @@ const Writer = struct {
operand: Air.Inst.Ref,
dies: bool,
) @TypeOf(s).Error!void {
var i: usize = @enumToInt(operand);
const i = @enumToInt(operand);
if (i < Air.Inst.Ref.typed_value_map.len) {
if (i < InternPool.static_len) {
return s.print("@{}", .{operand});
}
i -= Air.Inst.Ref.typed_value_map.len;
return w.writeInstIndex(s, @intCast(Air.Inst.Index, i), dies);
return w.writeInstIndex(s, i - InternPool.static_len, dies);
}
fn writeInstIndex(
@ -985,4 +975,9 @@ const Writer = struct {
try s.print("%{d}", .{inst});
if (dies) try s.writeByte('!');
}
fn typeOfIndex(w: *Writer, inst: Air.Inst.Index) Type {
const mod = w.module;
return w.air.typeOfIndex(inst, &mod.intern_pool);
}
};

View File

@ -3,6 +3,7 @@ const mem = std.mem;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const Ast = std.zig.Ast;
const InternPool = @import("InternPool.zig");
const Zir = @import("Zir.zig");
const Module = @import("Module.zig");
@ -1191,7 +1192,7 @@ const Writer = struct {
.field => {
const field_name = self.code.nullTerminatedString(extra.data.field_name_start);
try self.writeInstRef(stream, extra.data.obj_ptr);
try stream.print(", {}", .{std.zig.fmtId(field_name)});
try stream.print(", \"{}\"", .{std.zig.fmtEscapes(field_name)});
},
}
try stream.writeAll(", [");
@ -2468,14 +2469,9 @@ const Writer = struct {
}
fn writeInstRef(self: *Writer, stream: anytype, ref: Zir.Inst.Ref) !void {
var i: usize = @enumToInt(ref);
if (i < Zir.Inst.Ref.typed_value_map.len) {
return stream.print("@{}", .{ref});
}
i -= Zir.Inst.Ref.typed_value_map.len;
return self.writeInstIndex(stream, @intCast(Zir.Inst.Index, i));
const i = @enumToInt(ref);
if (i < InternPool.static_len) return stream.print("@{}", .{@intToEnum(InternPool.Index, i)});
return self.writeInstIndex(stream, i - InternPool.static_len);
}
fn writeInstIndex(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {

View File

@ -512,134 +512,6 @@ pub fn needUnwindTables(target: std.Target) bool {
return target.os.tag == .windows;
}
pub const AtomicPtrAlignmentError = error{
FloatTooBig,
IntTooBig,
BadType,
};
pub const AtomicPtrAlignmentDiagnostics = struct {
bits: u16 = undefined,
max_bits: u16 = undefined,
};
/// If ABI alignment of `ty` is OK for atomic operations, returns 0.
/// Otherwise returns the alignment required on a pointer for the target
/// to perform atomic operations.
// TODO this function does not take into account CPU features, which can affect
// this value. Audit this!
pub fn atomicPtrAlignment(
target: std.Target,
ty: Type,
diags: *AtomicPtrAlignmentDiagnostics,
) AtomicPtrAlignmentError!u32 {
const max_atomic_bits: u16 = switch (target.cpu.arch) {
.avr,
.msp430,
.spu_2,
=> 16,
.arc,
.arm,
.armeb,
.hexagon,
.m68k,
.le32,
.mips,
.mipsel,
.nvptx,
.powerpc,
.powerpcle,
.r600,
.riscv32,
.sparc,
.sparcel,
.tce,
.tcele,
.thumb,
.thumbeb,
.x86,
.xcore,
.amdil,
.hsail,
.spir,
.kalimba,
.lanai,
.shave,
.wasm32,
.renderscript32,
.csky,
.spirv32,
.dxil,
.loongarch32,
.xtensa,
=> 32,
.amdgcn,
.bpfel,
.bpfeb,
.le64,
.mips64,
.mips64el,
.nvptx64,
.powerpc64,
.powerpc64le,
.riscv64,
.sparc64,
.s390x,
.amdil64,
.hsail64,
.spir64,
.wasm64,
.renderscript64,
.ve,
.spirv64,
.loongarch64,
=> 64,
.aarch64,
.aarch64_be,
.aarch64_32,
=> 128,
.x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .cx16)) 128 else 64,
};
var buffer: Type.Payload.Bits = undefined;
const int_ty = switch (ty.zigTypeTag()) {
.Int => ty,
.Enum => ty.intTagType(&buffer),
.Float => {
const bit_count = ty.floatBits(target);
if (bit_count > max_atomic_bits) {
diags.* = .{
.bits = bit_count,
.max_bits = max_atomic_bits,
};
return error.FloatTooBig;
}
return 0;
},
.Bool => return 0,
else => {
if (ty.isPtrAtRuntime()) return 0;
return error.BadType;
},
};
const bit_count = int_ty.intInfo(target).bits;
if (bit_count > max_atomic_bits) {
diags.* = .{
.bits = bit_count,
.max_bits = max_atomic_bits,
};
return error.IntTooBig;
}
return 0;
}
pub fn defaultAddressSpace(
target: std.Target,
context: enum {
@ -777,3 +649,14 @@ pub fn compilerRtIntAbbrev(bits: u16) []const u8 {
else => "o", // Non-standard
};
}
pub fn fnCallConvAllowsZigTypes(target: std.Target, cc: std.builtin.CallingConvention) bool {
return switch (cc) {
.Unspecified, .Async, .Inline => true,
// For now we want to authorize PTX kernel to use zig objects, even if
// we end up exposing the ABI. The goal is to experiment with more
// integrated CPU/GPU code.
.Kernel => target.cpu.arch == .nvptx or target.cpu.arch == .nvptx64,
else => false,
};
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -17,6 +17,7 @@ test "union that needs padding bytes inside an array" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_spirv64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
var as = [_]A{
A{ .B = B{ .D = 1 } },

View File

@ -24,7 +24,7 @@ test "issue 6456" {
.alignment = 0,
.name = name,
.type = usize,
.default_value = &@as(?usize, null),
.default_value = null,
.is_comptime = false,
}};
}

View File

@ -746,8 +746,8 @@ test "peer type resolution: disjoint error sets" {
try expect(error_set_info == .ErrorSet);
try expect(error_set_info.ErrorSet.?.len == 3);
try expect(mem.eql(u8, error_set_info.ErrorSet.?[0].name, "One"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Three"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Two"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Two"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Three"));
}
{
@ -756,8 +756,8 @@ test "peer type resolution: disjoint error sets" {
try expect(error_set_info == .ErrorSet);
try expect(error_set_info.ErrorSet.?.len == 3);
try expect(mem.eql(u8, error_set_info.ErrorSet.?[0].name, "One"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Three"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Two"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Two"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Three"));
}
}
@ -778,8 +778,8 @@ test "peer type resolution: error union and error set" {
const error_set_info = @typeInfo(info.ErrorUnion.error_set);
try expect(error_set_info.ErrorSet.?.len == 3);
try expect(mem.eql(u8, error_set_info.ErrorSet.?[0].name, "One"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Three"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Two"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Two"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Three"));
}
{
@ -790,8 +790,8 @@ test "peer type resolution: error union and error set" {
const error_set_info = @typeInfo(info.ErrorUnion.error_set);
try expect(error_set_info.ErrorSet.?.len == 3);
try expect(mem.eql(u8, error_set_info.ErrorSet.?[0].name, "One"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Three"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Two"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Two"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Three"));
}
}

View File

@ -214,8 +214,8 @@ test "type info: error set merged" {
try expect(error_set_info == .ErrorSet);
try expect(error_set_info.ErrorSet.?.len == 3);
try expect(mem.eql(u8, error_set_info.ErrorSet.?[0].name, "One"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Three"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Two"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[1].name, "Two"));
try expect(mem.eql(u8, error_set_info.ErrorSet.?[2].name, "Three"));
}
test "type info: enum info" {

View File

@ -9,4 +9,3 @@ comptime {
// target=native
//
// :3:18: error: no error named 'Bar' in 'error{A}'
// :1:13: note: error set declared here

View File

@ -14,4 +14,4 @@ export fn entry() void {
// :2:5: error: found compile log statement
//
// Compile Log Output:
// @as(*const [3:0]u8, "i32\x00")
// @as(*const [3:0]u8, "i32")

View File

@ -1,5 +1,5 @@
const Set1 = error {A, B};
const Set2 = error {A, C};
const Set1 = error{ A, B };
const Set2 = error{ A, C };
comptime {
var x = Set1.B;
var y = @errSetCast(Set2, x);
@ -10,5 +10,4 @@ comptime {
// backend=stage2
// target=native
//
// :5:13: error: 'error.B' not a member of error set 'error{A,C}'
// :2:14: note: error set declared here
// :5:13: error: 'error.B' not a member of error set 'error{C,A}'

View File

@ -1,5 +1,5 @@
const Set1 = error{A, B};
const Set2 = error{A, C};
const Set1 = error{ A, B };
const Set2 = error{ A, C };
export fn entry() void {
foo(Set1.B);
}
@ -12,5 +12,5 @@ fn foo(set1: Set1) void {
// backend=stage2
// target=native
//
// :7:19: error: expected type 'error{A,C}', found 'error{A,B}'
// :7:19: error: expected type 'error{C,A}', found 'error{A,B}'
// :7:19: note: 'error.B' not a member of destination error set

View File

@ -16,5 +16,4 @@ comptime {
// backend=llvm
// target=native
//
// :11:13: error: 'error.B' not a member of error set 'error{A,C}'
// :5:14: note: error set declared here
// :11:13: error: 'error.B' not a member of error set 'error{C,A}'

View File

@ -24,5 +24,5 @@ export fn bar() void {
//
// :12:16: error: runtime coercion to union 'tmp.U' from non-exhaustive enum
// :1:11: note: enum declared here
// :17:16: error: union 'tmp.U' has no tag with value '15'
// :17:16: error: union 'tmp.U' has no tag with value '@intToEnum(tmp.E, 15)'
// :6:11: note: union declared here

View File

@ -16,9 +16,9 @@ comptime {
// backend=stage2
// target=native
//
// :2:29: error: expected type '[][]const u8', found '*const tuple{comptime *const [5:0]u8 = "hello", comptime *const [5:0]u8 = "world"}'
// :2:29: error: expected type '[][]const u8', found '*const struct{comptime *const [5:0]u8 = "hello", comptime *const [5:0]u8 = "world"}'
// :2:29: note: cast discards const qualifier
// :6:31: error: expected type '*[2][]const u8', found '*const tuple{comptime *const [5:0]u8 = "hello", comptime *const [5:0]u8 = "world"}'
// :6:31: error: expected type '*[2][]const u8', found '*const struct{comptime *const [5:0]u8 = "hello", comptime *const [5:0]u8 = "world"}'
// :6:31: note: cast discards const qualifier
// :11:19: error: expected type '*tmp.S', found '*const struct{comptime a: comptime_int = 2}'
// :11:19: note: cast discards const qualifier

View File

@ -1,8 +1,10 @@
test "example" { return 1; }
test "example" {
return 1;
}
// error
// backend=stage2
// target=native
// is_test=1
//
// :1:25: error: expected type '@typeInfo(@typeInfo(@TypeOf(tmp.test.example)).Fn.return_type.?).ErrorUnion.error_set!void', found 'comptime_int'
// :2:12: error: expected type 'anyerror!void', found 'comptime_int'

View File

@ -1,5 +1,5 @@
test "enum" {
const E = enum(u8) {A, B, _};
const E = enum(u8) { A, B, _ };
_ = @tagName(@intToEnum(E, 5));
}
@ -8,5 +8,5 @@ test "enum" {
// target=native
// is_test=1
//
// :3:9: error: no field with value '5' in enum 'test.enum.E'
// :3:9: error: no field with value '@intToEnum(tmp.test.enum.E, 5)' in enum 'test.enum.E'
// :2:15: note: declared here

View File

@ -41,4 +41,4 @@ pub export fn entry5() void {
// :12:14: error: missing tuple field with index 1
// :17:14: error: missing tuple field with index 1
// :29:14: error: expected at most 2 tuple fields; found 3
// :34:30: error: index '2' out of bounds of tuple 'tuple{comptime comptime_int = 123, u32}'
// :34:30: error: index '2' out of bounds of tuple 'struct{comptime comptime_int = 123, u32}'

View File

@ -7,4 +7,4 @@ export fn entry() void {
// backend=stage2
// target=native
//
// :3:11: error: expected type '@TypeOf(.{})', found 'tuple{comptime comptime_int = 1, comptime comptime_int = 2, comptime comptime_int = 3}'
// :3:11: error: expected type '@TypeOf(.{})', found 'struct{comptime comptime_int = 1, comptime comptime_int = 2, comptime comptime_int = 3}'

View File

@ -115,7 +115,7 @@ class zig_Slice_SynthProvider:
try: return int(name.removeprefix('[').removesuffix(']'))
except: return -1
def get_child_at_index(self, index):
if index < 0 or index >= self.len: return None
if index not in range(self.len): return None
try: return self.ptr.CreateChildAtOffset('[%d]' % index, index * self.elem_size, self.elem_type)
except: return None
@ -176,7 +176,7 @@ class zig_TaggedUnion_SynthProvider:
def get_child_index(self, name):
try: return ('tag', 'payload').index(name)
except: return -1
def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index >= 0 and index < 2 else None
def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index in range(2) else None
# Define Zig Standard Library
@ -196,7 +196,7 @@ class std_SegmentedList_SynthProvider:
except: return -1
def get_child_at_index(self, index):
try:
if index < 0 or index >= self.len: return None
if index not in range(self.len): return None
prealloc_item_count = len(self.prealloc_segment)
if index < prealloc_item_count: return self.prealloc_segment.child[index]
prealloc_exp = prealloc_item_count.bit_length() - 1
@ -231,7 +231,7 @@ class std_MultiArrayList_SynthProvider:
except: return -1
def get_child_at_index(self, index):
try:
if index < 0 or index >= self.len: return None
if index not in range(self.len): return None
offset = 0
data = lldb.SBData()
for field in self.entry_type.fields:
@ -266,7 +266,7 @@ class std_MultiArrayList_Slice_SynthProvider:
except: return -1
def get_child_at_index(self, index):
try:
if index < 0 or index >= self.len: return None
if index not in range(self.len): return None
data = lldb.SBData()
for field in self.entry_type.fields:
field_type = field.type.GetPointeeType()
@ -328,7 +328,7 @@ class std_Entry_SynthProvider:
def has_children(self): return self.num_children() != 0
def num_children(self): return len(self.children)
def get_child_index(self, name): return self.indices.get(name)
def get_child_at_index(self, index): return self.children[index].deref if index >= 0 and index < len(self.children) else None
def get_child_at_index(self, index): return self.children[index].deref if index in range(len(self.children)) else None
# Define Zig Stage2 Compiler
@ -345,11 +345,17 @@ class TagAndPayload_SynthProvider:
def get_child_index(self, name):
try: return ('tag', 'payload').index(name)
except: return -1
def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index >= 0 and index < 2 else None
def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index in range(2) else None
def Inst_Ref_SummaryProvider(value, _=None):
def Zir_Inst__Zir_Inst_Ref_SummaryProvider(value, _=None):
members = value.type.enum_members
return value if any(value.unsigned == member.unsigned for member in members) else 'instructions[%d]' % (value.unsigned - len(members))
# ignore .var_args_param_type and .none
return value if any(value.unsigned == member.unsigned for member in members) else 'instructions[%d]' % (value.unsigned + 2 - len(members))
def Air_Inst__Air_Inst_Ref_SummaryProvider(value, _=None):
members = value.type.enum_members
# ignore .var_args_param_type and .none
return value if any(value.unsigned == member.unsigned for member in members) else 'instructions[%d]' % (value.unsigned + 2 - len(members))
class Module_Decl__Module_Decl_Index_SynthProvider:
def __init__(self, value, _=None): self.value = value
@ -359,7 +365,7 @@ class Module_Decl__Module_Decl_Index_SynthProvider:
mod = frame.FindVariable('mod') or frame.FindVariable('module')
if mod: break
else: return
self.ptr = mod.GetChildMemberWithName('allocated_decls').GetChildAtIndex(self.value.unsigned).Clone('decl')
self.ptr = mod.GetChildMemberWithName('allocated_decls').GetChildAtIndex(self.value.unsigned).address_of.Clone('decl')
except: pass
def has_children(self): return True
def num_children(self): return 1
@ -392,7 +398,7 @@ class TagOrPayloadPtr_SynthProvider:
def get_child_index(self, name):
try: return ('tag', 'payload').index(name)
except: return -1
def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index >= 0 and index < 2 else None
def get_child_at_index(self, index): return (self.tag, self.payload)[index] if index in range(2) else None
def Module_Decl_name(decl):
error = lldb.SBError()
@ -407,6 +413,89 @@ def Module_Decl_RenderFullyQualifiedName(decl): return '.'.join((Module_Namespac
def OwnerDecl_RenderFullyQualifiedName(payload): return Module_Decl_RenderFullyQualifiedName(payload.GetChildMemberWithName('owner_decl').GetChildMemberWithName('decl'))
def InternPool_Find(thread):
for frame in thread:
ip = frame.FindVariable('ip') or frame.FindVariable('intern_pool')
if ip: return ip
mod = frame.FindVariable('mod') or frame.FindVariable('module')
if mod:
ip = mod.GetChildMemberWithName('intern_pool')
if ip: return ip
class InternPool_Index_SynthProvider:
def __init__(self, value, _=None): self.value = value
def update(self):
try:
index_type = self.value.type
for helper in self.value.target.FindFunctions('%s.dbHelper' % index_type.name, lldb.eFunctionNameTypeFull):
ptr_self_type, ptr_tag_to_encoding_map_type = helper.function.type.GetFunctionArgumentTypes()
if ptr_self_type.GetPointeeType() == index_type: break
else: return
tag_to_encoding_map = {field.name: field.type for field in ptr_tag_to_encoding_map_type.GetPointeeType().fields}
ip = InternPool_Find(self.value.thread)
if not ip: return
self.item = ip.GetChildMemberWithName('items').GetChildAtIndex(self.value.unsigned)
extra = ip.GetChildMemberWithName('extra').GetChildMemberWithName('items')
self.tag = self.item.GetChildMemberWithName('tag').Clone('tag')
self.data = None
self.trailing = None
data = self.item.GetChildMemberWithName('data')
encoding_type = tag_to_encoding_map[self.tag.value]
dynamic_values = {}
for encoding_field in encoding_type.fields:
if encoding_field.name == 'data':
if encoding_field.type.IsPointerType():
data_type = encoding_field.type.GetPointeeType()
extra_index = data.unsigned
self.data = extra.GetChildAtIndex(extra_index).Cast(data_type).Clone('data')
extra_index += data_type.num_fields
else:
self.data = data.Cast(encoding_field.type).Clone('data')
elif encoding_field.name == 'trailing':
trailing_data = lldb.SBData()
for trailing_field in encoding_field.type.fields:
trailing_data.Append(extra.GetChildAtIndex(extra_index).address_of.data)
trailing_len = dynamic_values['trailing.%s.len' % trailing_field.name].unsigned
trailing_data.Append(lldb.SBData.CreateDataFromInt(trailing_len, trailing_data.GetAddressByteSize()))
extra_index += trailing_len
self.trailing = self.data.CreateValueFromData('trailing', trailing_data, encoding_field.type)
else:
for path in encoding_field.type.GetPointeeType().name.removeprefix('%s::' % encoding_type.name).removeprefix('%s.' % encoding_type.name).partition('__')[0].split(' orelse '):
if path.startswith('data.'):
root = self.data
path = path[len('data'):]
else: return
dynamic_value = root.GetValueForExpressionPath(path)
if dynamic_value:
dynamic_values[encoding_field.name] = dynamic_value
break
except: pass
def has_children(self): return True
def num_children(self): return 2 + (self.trailing is not None)
def get_child_index(self, name):
try: return ('tag', 'data', 'trailing').index(name)
except: return -1
def get_child_at_index(self, index): return (self.tag, self.data, self.trailing)[index] if index in range(3) else None
def InternPool_NullTerminatedString_SummaryProvider(value, _=None):
try:
ip = InternPool_Find(value.thread)
if not ip: return
items = ip.GetChildMemberWithName('string_bytes').GetChildMemberWithName('items')
b = bytearray()
i = 0
while True:
x = items.GetChildAtIndex(value.unsigned + i).GetValueAsUnsigned()
if x == 0: break
b.append(x)
i += 1
s = b.decode(encoding='utf8', errors='backslashreplace')
s1 = s if s.isprintable() else ''.join((c if c.isprintable() else '\\x%02x' % ord(c) for c in s))
return '"%s"' % s1
except:
pass
def type_Type_pointer(payload):
pointee_type = payload.GetChildMemberWithName('pointee_type')
sentinel = payload.GetChildMemberWithName('sentinel').GetChildMemberWithName('child')
@ -468,8 +557,8 @@ type_tag_handlers = {
'empty_struct_literal': lambda payload: '@TypeOf(.{})',
'anyerror_void_error_union': lambda payload: 'anyerror!void',
'const_slice_u8': lambda payload: '[]const u8',
'const_slice_u8_sentinel_0': lambda payload: '[:0]const u8',
'slice_const_u8': lambda payload: '[]const u8',
'slice_const_u8_sentinel_0': lambda payload: '[:0]const u8',
'fn_noreturn_no_args': lambda payload: 'fn() noreturn',
'fn_void_no_args': lambda payload: 'fn() void',
'fn_naked_noreturn_no_args': lambda payload: 'fn() callconv(.Naked) noreturn',
@ -495,7 +584,7 @@ type_tag_handlers = {
'many_mut_pointer': lambda payload: '[*]%s' % type_Type_SummaryProvider(payload),
'c_const_pointer': lambda payload: '[*c]const %s' % type_Type_SummaryProvider(payload),
'c_mut_pointer': lambda payload: '[*c]%s' % type_Type_SummaryProvider(payload),
'const_slice': lambda payload: '[]const %s' % type_Type_SummaryProvider(payload),
'slice_const': lambda payload: '[]const %s' % type_Type_SummaryProvider(payload),
'mut_slice': lambda payload: '[]%s' % type_Type_SummaryProvider(payload),
'int_signed': lambda payload: 'i%d' % payload.unsigned,
'int_unsigned': lambda payload: 'u%d' % payload.unsigned,
@ -611,13 +700,19 @@ def __lldb_init_module(debugger, _=None):
add(debugger, category='zig.stage2', type='Zir.Inst', identifier='TagAndPayload', synth=True, inline_children=True, summary=True)
add(debugger, category='zig.stage2', regex=True, type=MultiArrayList_Entry('Zir\\.Inst'), identifier='TagAndPayload', synth=True, inline_children=True, summary=True)
add(debugger, category='zig.stage2', regex=True, type='^Zir\\.Inst\\.Data\\.Data__struct_[1-9][0-9]*$', inline_children=True, summary=True)
add(debugger, category='zig.stage2', type='Zir.Inst::Zir.Inst.Ref', identifier='Inst_Ref', summary=True)
add(debugger, category='zig.stage2', type='Zir.Inst::Zir.Inst.Ref', summary=True)
add(debugger, category='zig.stage2', type='Air.Inst', identifier='TagAndPayload', synth=True, inline_children=True, summary=True)
add(debugger, category='zig.stage2', type='Air.Inst::Air.Inst.Ref', summary=True)
add(debugger, category='zig.stage2', regex=True, type=MultiArrayList_Entry('Air\\.Inst'), identifier='TagAndPayload', synth=True, inline_children=True, summary=True)
add(debugger, category='zig.stage2', regex=True, type='^Air\\.Inst\\.Data\\.Data__struct_[1-9][0-9]*$', inline_children=True, summary=True)
add(debugger, category='zig.stage2', type='Module.Decl::Module.Decl.Index', synth=True)
add(debugger, category='zig.stage2', type='type.Type', identifier='TagOrPayloadPtr', synth=True)
add(debugger, category='zig.stage2', type='type.Type', summary=True)
add(debugger, category='zig.stage2', type='value.Value', identifier='TagOrPayloadPtr', synth=True)
add(debugger, category='zig.stage2', type='value.Value', summary=True)
add(debugger, category='zig.stage2', type='Module.LazySrcLoc', identifier='zig_TaggedUnion', synth=True)
add(debugger, category='zig.stage2', type='InternPool.Index', synth=True)
add(debugger, category='zig.stage2', type='InternPool.NullTerminatedString', summary=True)
add(debugger, category='zig.stage2', type='InternPool.Key', identifier='zig_TaggedUnion', synth=True)
add(debugger, category='zig.stage2', type='InternPool.Key.Int.Storage', identifier='zig_TaggedUnion', synth=True)
add(debugger, category='zig.stage2', type='InternPool.Key.ErrorUnion.Value', identifier='zig_TaggedUnion', synth=True)
add(debugger, category='zig.stage2', type='InternPool.Key.Float.Storage', identifier='zig_TaggedUnion', synth=True)
add(debugger, category='zig.stage2', type='InternPool.Key.Ptr.Addr', identifier='zig_TaggedUnion', synth=True)
add(debugger, category='zig.stage2', type='InternPool.Key.Aggregate.Storage', identifier='zig_TaggedUnion', synth=True)
add(debugger, category='zig.stage2', type='arch.x86_64.CodeGen.MCValue', identifier='zig_TaggedUnion', synth=True, inline_children=True, summary=True)

View File

@ -18,7 +18,7 @@ class TypePrinter:
'many_mut_pointer': 'Type.Payload.ElemType',
'c_const_pointer': 'Type.Payload.ElemType',
'c_mut_pointer': 'Type.Payload.ElemType',
'const_slice': 'Type.Payload.ElemType',
'slice_const': 'Type.Payload.ElemType',
'mut_slice': 'Type.Payload.ElemType',
'optional': 'Type.Payload.ElemType',
'optional_single_mut_pointer': 'Type.Payload.ElemType',