From 5d6f7b44c19b064a543b0c1eecb6ef5c671b612e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 8 Jul 2021 20:42:47 -0700 Subject: [PATCH 01/53] stage2: rework AIR memory layout This commit changes the AIR file and the documentation of the memory layout. The actual work of modifying the surrounding code (in Sema and codegen) is not yet done. --- src/air.zig => BRANCH_TODO | 548 +++---------------------------------- CMakeLists.txt | 2 +- src/Air.zig | 335 +++++++++++++++++++++++ src/Module.zig | 2 +- src/Sema.zig | 2 +- src/Zir.zig | 3 +- src/codegen.zig | 2 +- src/codegen/c.zig | 3 +- src/codegen/llvm.zig | 2 +- src/codegen/spirv.zig | 3 +- src/codegen/wasm.zig | 3 +- src/link/Elf.zig | 2 +- src/liveness.zig | 2 +- src/register_manager.zig | 2 +- src/value.zig | 2 +- 15 files changed, 392 insertions(+), 521 deletions(-) rename src/air.zig => BRANCH_TODO (68%) create mode 100644 src/Air.zig diff --git a/src/air.zig b/BRANCH_TODO similarity index 68% rename from src/air.zig rename to BRANCH_TODO index e73367945b..ed77d6bd03 100644 --- a/src/air.zig +++ b/BRANCH_TODO @@ -1,18 +1,6 @@ -const std = @import("std"); -const Value = @import("value.zig").Value; -const Type = @import("type.zig").Type; -const Module = @import("Module.zig"); -const assert = std.debug.assert; -const codegen = @import("codegen.zig"); -const ast = std.zig.ast; + * be sure to test debug info of parameters + -/// These are in-memory, analyzed instructions. See `zir.Inst` for the representation -/// of instructions that correspond to the ZIR text format. -/// This struct owns the `Value` and `Type` memory. When the struct is deallocated, -/// so are the `Value` and `Type`. The value of a constant must be copied into -/// a memory location for the value to survive after a const instruction. -pub const Inst = struct { - tag: Tag, /// Each bit represents the index of an `Inst` parameter in the `args` field. /// If a bit is set, it marks the end of the lifetime of the corresponding /// instruction parameter. For example, 0b101 means that the first and @@ -24,8 +12,7 @@ pub const Inst = struct { /// If bit 14 (0bx1xx_xxxx_xxxx_xxxx) is set, it means this is a special case and the /// lifetimes of operands are encoded elsewhere. deaths: DeathsInt = undefined, - ty: Type, - src: Module.LazySrcLoc, + pub const DeathsInt = u16; pub const DeathsBitIndex = std.math.Log2Int(DeathsInt); @@ -50,96 +37,25 @@ pub const Inst = struct { return (self.deaths & (1 << deaths_bits)) != 0; } - pub const Tag = enum { - add, - addwrap, - alloc, - arg, - assembly, - bit_and, - bitcast, - bit_or, - block, - br, - /// Same as `br` except the operand is a list of instructions to be treated as - /// a flat block; that is there is only 1 break instruction from the block, and - /// it is implied to be after the last instruction, and the last instruction is - /// the break operand. - /// This instruction exists for late-stage semantic analysis patch ups, to - /// replace one br operand with multiple instructions, without moving anything else around. - br_block_flat, - breakpoint, - br_void, - call, - cmp_lt, - cmp_lte, - cmp_eq, - cmp_gte, - cmp_gt, - cmp_neq, - condbr, - constant, - dbg_stmt, - /// ?T => bool - is_null, - /// ?T => bool (inverted logic) - is_non_null, - /// *?T => bool - is_null_ptr, - /// *?T => bool (inverted logic) - is_non_null_ptr, - /// E!T => bool - is_err, - /// E!T => bool (inverted logic) - is_non_err, - /// *E!T => bool - is_err_ptr, - /// *E!T => bool (inverted logic) - is_non_err_ptr, - bool_and, - bool_or, - /// Read a value from a pointer. - load, - /// A labeled block of code that loops forever. At the end of the body it is implied - /// to repeat; no explicit "repeat" instruction terminates loop bodies. - loop, - ptrtoint, - ref, - ret, - retvoid, - varptr, - /// Write a value to a pointer. LHS is pointer, RHS is value. - store, - sub, - subwrap, - unreach, - mul, - mulwrap, - div, - not, - floatcast, - intcast, - /// ?T => T - optional_payload, - /// *?T => *T - optional_payload_ptr, - wrap_optional, - /// E!T -> T - unwrap_errunion_payload, - /// E!T -> E - unwrap_errunion_err, - /// *(E!T) -> *T - unwrap_errunion_payload_ptr, - /// *(E!T) -> E - unwrap_errunion_err_ptr, - /// wrap from T to E!T - wrap_errunion_payload, - /// wrap from E to E!T - wrap_errunion_err, - xor, - switchbr, - /// Given a pointer to a struct and a field index, returns a pointer to the field. - struct_field_ptr, + pub fn operandCount(base: *Inst) usize { + inline for (@typeInfo(Tag).Enum.fields) |field| { + const tag = @intToEnum(Tag, field.value); + if (tag == base.tag) { + return @fieldParentPtr(tag.Type(), "base", base).operandCount(); + } + } + unreachable; + } + + pub fn getOperand(base: *Inst, index: usize) ?*Inst { + inline for (@typeInfo(Tag).Enum.fields) |field| { + const tag = @intToEnum(Tag, field.value); + if (tag == base.tag) { + return @fieldParentPtr(tag.Type(), "base", base).getOperand(index); + } + } + unreachable; + } pub fn Type(tag: Tag) type { return switch (tag) { @@ -214,42 +130,6 @@ pub const Inst = struct { }; } - pub fn fromCmpOp(op: std.math.CompareOperator) Tag { - return switch (op) { - .lt => .cmp_lt, - .lte => .cmp_lte, - .eq => .cmp_eq, - .gte => .cmp_gte, - .gt => .cmp_gt, - .neq => .cmp_neq, - }; - } - }; - - /// Prefer `castTag` to this. - pub fn cast(base: *Inst, comptime T: type) ?*T { - if (@hasField(T, "base_tag")) { - return base.castTag(T.base_tag); - } - inline for (@typeInfo(Tag).Enum.fields) |field| { - const tag = @intToEnum(Tag, field.value); - if (base.tag == tag) { - if (T == tag.Type()) { - return @fieldParentPtr(T, "base", base); - } - return null; - } - } - unreachable; - } - - pub fn castTag(base: *Inst, comptime tag: Tag) ?*tag.Type() { - if (base.tag == tag) { - return @fieldParentPtr(tag.Type(), "base", base); - } - return null; - } - pub fn Args(comptime T: type) type { return std.meta.fieldInfo(T, .args).field_type; } @@ -265,38 +145,6 @@ pub const Inst = struct { return inst.val; } - pub fn cmpOperator(base: *Inst) ?std.math.CompareOperator { - return switch (base.tag) { - .cmp_lt => .lt, - .cmp_lte => .lte, - .cmp_eq => .eq, - .cmp_gte => .gte, - .cmp_gt => .gt, - .cmp_neq => .neq, - else => null, - }; - } - - pub fn operandCount(base: *Inst) usize { - inline for (@typeInfo(Tag).Enum.fields) |field| { - const tag = @intToEnum(Tag, field.value); - if (tag == base.tag) { - return @fieldParentPtr(tag.Type(), "base", base).operandCount(); - } - } - unreachable; - } - - pub fn getOperand(base: *Inst, index: usize) ?*Inst { - inline for (@typeInfo(Tag).Enum.fields) |field| { - const tag = @intToEnum(Tag, field.value); - if (tag == base.tag) { - return @fieldParentPtr(tag.Type(), "base", base).getOperand(index); - } - } - unreachable; - } - pub fn breakBlock(base: *Inst) ?*Block { return switch (base.tag) { .br => base.castTag(.br).?.block, @@ -306,115 +154,6 @@ pub const Inst = struct { }; } - pub const NoOp = struct { - base: Inst, - - pub fn operandCount(self: *const NoOp) usize { - _ = self; - return 0; - } - pub fn getOperand(self: *const NoOp, index: usize) ?*Inst { - _ = self; - _ = index; - return null; - } - }; - - pub const UnOp = struct { - base: Inst, - operand: *Inst, - - pub fn operandCount(self: *const UnOp) usize { - _ = self; - return 1; - } - pub fn getOperand(self: *const UnOp, index: usize) ?*Inst { - if (index == 0) - return self.operand; - return null; - } - }; - - pub const BinOp = struct { - base: Inst, - lhs: *Inst, - rhs: *Inst, - - pub fn operandCount(self: *const BinOp) usize { - _ = self; - return 2; - } - pub fn getOperand(self: *const BinOp, index: usize) ?*Inst { - var i = index; - - if (i < 1) - return self.lhs; - i -= 1; - - if (i < 1) - return self.rhs; - i -= 1; - - return null; - } - }; - - pub const Arg = struct { - pub const base_tag = Tag.arg; - - base: Inst, - /// This exists to be emitted into debug info. - name: [*:0]const u8, - - pub fn operandCount(self: *const Arg) usize { - _ = self; - return 0; - } - pub fn getOperand(self: *const Arg, index: usize) ?*Inst { - _ = self; - _ = index; - return null; - } - }; - - pub const Assembly = struct { - pub const base_tag = Tag.assembly; - - base: Inst, - asm_source: []const u8, - is_volatile: bool, - output_constraint: ?[]const u8, - inputs: []const []const u8, - clobbers: []const []const u8, - args: []const *Inst, - - pub fn operandCount(self: *const Assembly) usize { - return self.args.len; - } - pub fn getOperand(self: *const Assembly, index: usize) ?*Inst { - if (index < self.args.len) - return self.args[index]; - return null; - } - }; - - pub const Block = struct { - pub const base_tag = Tag.block; - - base: Inst, - body: Body, - - pub fn operandCount(self: *const Block) usize { - _ = self; - return 0; - } - pub fn getOperand(self: *const Block, index: usize) ?*Inst { - _ = self; - _ = index; - return null; - } - }; - pub const convertable_br_size = std.math.max(@sizeOf(BrBlockFlat), @sizeOf(Br)); pub const convertable_br_align = std.math.max(@alignOf(BrBlockFlat), @alignOf(Br)); comptime { @@ -439,241 +178,42 @@ pub const Inst = struct { } }; - pub const Br = struct { - pub const base_tag = Tag.br; + /// Same as `br` except the operand is a list of instructions to be treated as + /// a flat block; that is there is only 1 break instruction from the block, and + /// it is implied to be after the last instruction, and the last instruction is + /// the break operand. + /// This instruction exists for late-stage semantic analysis patch ups, to + /// replace one br operand with multiple instructions, without moving anything else around. + br_block_flat, + + + + pub const Assembly = struct { + pub const base_tag = Tag.assembly; base: Inst, - block: *Block, - operand: *Inst, - - pub fn operandCount(self: *const Br) usize { - _ = self; - return 1; - } - pub fn getOperand(self: *const Br, index: usize) ?*Inst { - _ = self; - if (index == 0) - return self.operand; - return null; - } - }; - - pub const BrVoid = struct { - pub const base_tag = Tag.br_void; - - base: Inst, - block: *Block, - - pub fn operandCount(self: *const BrVoid) usize { - _ = self; - return 0; - } - pub fn getOperand(self: *const BrVoid, index: usize) ?*Inst { - _ = self; - _ = index; - return null; - } - }; - - pub const Call = struct { - pub const base_tag = Tag.call; - - base: Inst, - func: *Inst, + asm_source: []const u8, + is_volatile: bool, + output_constraint: ?[]const u8, + inputs: []const []const u8, + clobbers: []const []const u8, args: []const *Inst, - pub fn operandCount(self: *const Call) usize { - return self.args.len + 1; + pub fn operandCount(self: *const Assembly) usize { + return self.args.len; } - pub fn getOperand(self: *const Call, index: usize) ?*Inst { - var i = index; - - if (i < 1) - return self.func; - i -= 1; - - if (i < self.args.len) - return self.args[i]; - i -= self.args.len; - - return null; - } - }; - - pub const CondBr = struct { - pub const base_tag = Tag.condbr; - - base: Inst, - condition: *Inst, - then_body: Body, - else_body: Body, - /// Set of instructions whose lifetimes end at the start of one of the branches. - /// The `then` branch is first: `deaths[0..then_death_count]`. - /// The `else` branch is next: `(deaths + then_death_count)[0..else_death_count]`. - deaths: [*]*Inst = undefined, - then_death_count: u32 = 0, - else_death_count: u32 = 0, - - pub fn operandCount(self: *const CondBr) usize { - _ = self; - return 1; - } - pub fn getOperand(self: *const CondBr, index: usize) ?*Inst { - var i = index; - - if (i < 1) - return self.condition; - i -= 1; - - return null; - } - pub fn thenDeaths(self: *const CondBr) []*Inst { - return self.deaths[0..self.then_death_count]; - } - pub fn elseDeaths(self: *const CondBr) []*Inst { - return (self.deaths + self.then_death_count)[0..self.else_death_count]; - } - }; - - pub const Constant = struct { - pub const base_tag = Tag.constant; - - base: Inst, - val: Value, - - pub fn operandCount(self: *const Constant) usize { - _ = self; - return 0; - } - pub fn getOperand(self: *const Constant, index: usize) ?*Inst { - _ = self; - _ = index; - return null; - } - }; - - pub const Loop = struct { - pub const base_tag = Tag.loop; - - base: Inst, - body: Body, - - pub fn operandCount(self: *const Loop) usize { - _ = self; - return 0; - } - pub fn getOperand(self: *const Loop, index: usize) ?*Inst { - _ = self; - _ = index; - return null; - } - }; - - pub const VarPtr = struct { - pub const base_tag = Tag.varptr; - - base: Inst, - variable: *Module.Var, - - pub fn operandCount(self: *const VarPtr) usize { - _ = self; - return 0; - } - pub fn getOperand(self: *const VarPtr, index: usize) ?*Inst { - _ = self; - _ = index; + pub fn getOperand(self: *const Assembly, index: usize) ?*Inst { + if (index < self.args.len) + return self.args[index]; return null; } }; pub const StructFieldPtr = struct { - pub const base_tag = Tag.struct_field_ptr; - - base: Inst, struct_ptr: *Inst, field_index: usize, - - pub fn operandCount(self: *const StructFieldPtr) usize { - _ = self; - return 1; - } - pub fn getOperand(self: *const StructFieldPtr, index: usize) ?*Inst { - _ = self; - _ = index; - var i = index; - - if (i < 1) - return self.struct_ptr; - i -= 1; - - return null; - } }; - pub const SwitchBr = struct { - pub const base_tag = Tag.switchbr; - - base: Inst, - target: *Inst, - cases: []Case, - /// Set of instructions whose lifetimes end at the start of one of the cases. - /// In same order as cases, deaths[0..case_0_count, case_0_count .. case_1_count, ... ]. - deaths: [*]*Inst = undefined, - else_index: u32 = 0, - else_deaths: u32 = 0, - else_body: Body, - - pub const Case = struct { - item: Value, - body: Body, - index: u32 = 0, - deaths: u32 = 0, - }; - - pub fn operandCount(self: *const SwitchBr) usize { - _ = self; - return 1; - } - pub fn getOperand(self: *const SwitchBr, index: usize) ?*Inst { - var i = index; - - if (i < 1) - return self.target; - i -= 1; - - return null; - } - pub fn caseDeaths(self: *const SwitchBr, case_index: usize) []*Inst { - const case = self.cases[case_index]; - return (self.deaths + case.index)[0..case.deaths]; - } - pub fn elseDeaths(self: *const SwitchBr) []*Inst { - return (self.deaths + self.else_index)[0..self.else_deaths]; - } - }; - - pub const DbgStmt = struct { - pub const base_tag = Tag.dbg_stmt; - - base: Inst, - line: u32, - column: u32, - - pub fn operandCount(self: *const DbgStmt) usize { - _ = self; - return 0; - } - pub fn getOperand(self: *const DbgStmt, index: usize) ?*Inst { - _ = self; - _ = index; - return null; - } - }; -}; - -pub const Body = struct { - instructions: []*Inst, -}; /// For debugging purposes, prints a function representation to stderr. pub fn dumpFn(old_module: Module, module_fn: *Module.Fn) void { diff --git a/CMakeLists.txt b/CMakeLists.txt index 44417e4159..39db11773c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -564,7 +564,7 @@ set(ZIG_STAGE2_SOURCES "${CMAKE_SOURCE_DIR}/src/codegen/x86_64.zig" "${CMAKE_SOURCE_DIR}/src/glibc.zig" "${CMAKE_SOURCE_DIR}/src/introspect.zig" - "${CMAKE_SOURCE_DIR}/src/air.zig" + "${CMAKE_SOURCE_DIR}/src/Air.zig" "${CMAKE_SOURCE_DIR}/src/libc_installation.zig" "${CMAKE_SOURCE_DIR}/src/libcxx.zig" "${CMAKE_SOURCE_DIR}/src/libtsan.zig" diff --git a/src/Air.zig b/src/Air.zig new file mode 100644 index 0000000000..97a5824abc --- /dev/null +++ b/src/Air.zig @@ -0,0 +1,335 @@ +//! Analyzed Intermediate Representation. +//! Sema inputs ZIR and outputs AIR. + +const std = @import("std"); +const Value = @import("value.zig").Value; +const Type = @import("type.zig").Type; +const Module = @import("Module.zig"); +const assert = std.debug.assert; +const Air = @This(); + +instructions: std.MultiArrayList(Inst).Slice, +/// The meaning of this data is determined by `Inst.Tag` value. +extra: []u32, +values: []Value, +variables: []*Module.Var, + +pub const Inst = struct { + tag: Tag, + data: Data, + + pub const Tag = enum(u8) { + /// Float or integer addition. For integers, wrapping is undefined behavior. + /// Result type is the same as both operands. + /// Uses the `bin_op` field. + add, + /// Integer addition. Wrapping is defined to be twos complement wrapping. + /// Result type is the same as both operands. + /// Uses the `bin_op` field. + addwrap, + /// Float or integer subtraction. For integers, wrapping is undefined behavior. + /// Result type is the same as both operands. + /// Uses the `bin_op` field. + sub, + /// Integer subtraction. Wrapping is defined to be twos complement wrapping. + /// Result type is the same as both operands. + /// Uses the `bin_op` field. + subwrap, + /// Float or integer multiplication. For integers, wrapping is undefined behavior. + /// Result type is the same as both operands. + /// Uses the `bin_op` field. + mul, + /// Integer multiplication. Wrapping is defined to be twos complement wrapping. + /// Result type is the same as both operands. + /// Uses the `bin_op` field. + mulwrap, + /// Integer or float division. For integers, wrapping is undefined behavior. + /// Result type is the same as both operands. + /// Uses the `bin_op` field. + div, + /// Allocates stack local memory. + /// Uses the `ty` field. + alloc, + /// TODO + assembly, + /// Bitwise AND. `&`. + /// Result type is the same as both operands. + /// Uses the `bin_op` field. + bit_and, + /// Bitwise OR. `|`. + /// Result type is the same as both operands. + /// Uses the `bin_op` field. + bit_or, + /// Bitwise XOR. `^` + /// Uses the `bin_op` field. + xor, + /// Boolean or binary NOT. + /// Uses the `ty_op` field. + not, + /// Reinterpret the memory representation of a value as a different type. + /// Uses the `ty_op` field. + bitcast, + /// Uses the `ty_pl` field with payload `Block`. + block, + /// Return from a block with a result. + /// Result type is always noreturn. + /// Uses the `br` field. + br, + /// Lowers to a hardware trap instruction, or the next best thing. + /// Result type is always void. + breakpoint, + /// Function call. + /// Result type is the return type of the function being called. + /// Uses the `pl_op` field with the `Call` payload. operand is the callee. + call, + /// `<`. Result type is always bool. + /// Uses the `bin_op` field. + cmp_lt, + /// `<=`. Result type is always bool. + /// Uses the `bin_op` field. + cmp_lte, + /// `==`. Result type is always bool. + /// Uses the `bin_op` field. + cmp_eq, + /// `>=`. Result type is always bool. + /// Uses the `bin_op` field. + cmp_gte, + /// `>`. Result type is always bool. + /// Uses the `bin_op` field. + cmp_gt, + /// `!=`. Result type is always bool. + /// Uses the `bin_op` field. + cmp_neq, + /// Conditional branch. + /// Result type is always noreturn. + /// Uses the `pl_op` field. Operand is the condition. Payload is `CondBr`. + cond_br, + /// Switch branch. + /// Result type is always noreturn. + /// Uses the `pl_op` field. Operand is the condition. Payload is `SwitchBr`. + switch_br, + /// A comptime-known value. Uses the `ty_pl` field, payload is index of + /// `values` array. + constant, + /// Notes the beginning of a source code statement and marks the line and column. + /// Result type is always void. + /// Uses the `dbg_stmt` field. + dbg_stmt, + /// ?T => bool + /// Result type is always bool. + /// Uses the `un_op` field. + is_null, + /// ?T => bool (inverted logic) + /// Result type is always bool. + /// Uses the `un_op` field. + is_non_null, + /// *?T => bool + /// Result type is always bool. + /// Uses the `un_op` field. + is_null_ptr, + /// *?T => bool (inverted logic) + /// Result type is always bool. + /// Uses the `un_op` field. + is_non_null_ptr, + /// E!T => bool + /// Result type is always bool. + /// Uses the `un_op` field. + is_err, + /// E!T => bool (inverted logic) + /// Result type is always bool. + /// Uses the `un_op` field. + is_non_err, + /// *E!T => bool + /// Result type is always bool. + /// Uses the `un_op` field. + is_err_ptr, + /// *E!T => bool (inverted logic) + /// Result type is always bool. + /// Uses the `un_op` field. + is_non_err_ptr, + /// Result type is always bool. + /// Uses the `bin_op` field. + bool_and, + /// Result type is always bool. + /// Uses the `bin_op` field. + bool_or, + /// Read a value from a pointer. + /// Uses the `ty_op` field. + load, + /// A labeled block of code that loops forever. At the end of the body it is implied + /// to repeat; no explicit "repeat" instruction terminates loop bodies. + /// Result type is always noreturn. + /// Uses the `ty_pl` field. Payload is `Block`. + loop, + /// Converts a pointer to its address. Result type is always `usize`. + /// Uses the `un_op` field. + ptrtoint, + /// Stores a value onto the stack and returns a pointer to it. + /// TODO audit where this AIR instruction is emitted, maybe it should instead be emitting + /// alloca instruction and storing to the alloca. + /// Uses the `ty_op` field. + ref, + /// Return a value from a function. + /// Result type is always noreturn. + /// Uses the `un_op` field. + ret, + /// Returns a pointer to a global variable. + /// Uses the `ty_pl` field. Index is into the `variables` array. + varptr, + /// Write a value to a pointer. LHS is pointer, RHS is value. + /// Result type is always void. + /// Uses the `bin_op` field. + store, + /// Indicates the program counter will never get to this instruction. + /// Result type is always noreturn. + unreach, + /// Convert from one float type to another. + /// Uses the `ty_op` field. + floatcast, + /// TODO audit uses of this. We should have explicit instructions for integer + /// widening and truncating. + /// Uses the `ty_op` field. + intcast, + /// ?T => T. If the value is null, undefined behavior. + /// Uses the `ty_op` field. + optional_payload, + /// *?T => *T. If the value is null, undefined behavior. + /// Uses the `ty_op` field. + optional_payload_ptr, + /// Given a payload value, wraps it in an optional type. + /// Uses the `ty_op` field. + wrap_optional, + /// E!T -> T. If the value is an error, undefined behavior. + /// Uses the `ty_op` field. + unwrap_errunion_payload, + /// E!T -> E. If the value is not an error, undefined behavior. + /// Uses the `ty_op` field. + unwrap_errunion_err, + /// *(E!T) -> *T. If the value is an error, undefined behavior. + /// Uses the `ty_op` field. + unwrap_errunion_payload_ptr, + /// *(E!T) -> E. If the value is not an error, undefined behavior. + /// Uses the `ty_op` field. + unwrap_errunion_err_ptr, + /// wrap from T to E!T + /// Uses the `ty_op` field. + wrap_errunion_payload, + /// wrap from E to E!T + /// Uses the `ty_op` field. + wrap_errunion_err, + /// Given a pointer to a struct and a field index, returns a pointer to the field. + /// Uses the `ty_pl` field, payload is `StructField`. + struct_field_ptr, + + pub fn fromCmpOp(op: std.math.CompareOperator) Tag { + return switch (op) { + .lt => .cmp_lt, + .lte => .cmp_lte, + .eq => .cmp_eq, + .gte => .cmp_gte, + .gt => .cmp_gt, + .neq => .cmp_neq, + }; + } + }; + + /// The position of an AIR instruction within the `Air` instructions array. + pub const Index = u32; + + /// All instructions have an 8-byte payload, which is contained within + /// this union. `Tag` determines which union field is active, as well as + /// how to interpret the data within. + pub const Data = union { + un_op: Ref, + bin_op: struct { + lhs: Ref, + rhs: Ref, + }, + ty: Type, + ty_op: struct { + ty: Ref, + operand: Ref, + }, + ty_pl: struct { + ty: Ref, + // Index into a different array. + payload: u32, + }, + br: struct { + block_inst: Index, + operand: Ref, + }, + pl_op: struct { + operand: Ref, + payload: u32, + }, + constant: struct { + ty: Type, + val: Value, + }, + dbg_stmt: struct { + line: u32, + column: u32, + }, + + // Make sure we don't accidentally add a field to make this union + // bigger than expected. Note that in Debug builds, Zig is allowed + // to insert a secret field for safety checks. + comptime { + if (std.builtin.mode != .Debug) { + assert(@sizeOf(Data) == 8); + } + } + }; + + pub fn cmpOperator(base: *Inst) ?std.math.CompareOperator { + return switch (base.tag) { + .cmp_lt => .lt, + .cmp_lte => .lte, + .cmp_eq => .eq, + .cmp_gte => .gte, + .cmp_gt => .gt, + .cmp_neq => .neq, + else => null, + }; + } + + /// Trailing is a list of instruction indexes for every `body_len`. + pub const Block = struct { + body_len: u32, + }; + + /// Trailing is a list of `Ref` for every `args_len`. + pub const Call = struct { + args_len: u32, + }; + + /// This data is stored inside extra, with two sets of trailing `Ref`: + /// * 0. the then body, according to `then_body_len`. + /// * 1. the else body, according to `else_body_len`. + pub const CondBr = struct { + condition: Ref, + then_body_len: u32, + else_body_len: u32, + }; + + /// Trailing: + /// * 0. `Case` for each `cases_len` + /// * 1. the else body, according to `else_body_len`. + pub const SwitchBr = struct { + cases_len: u32, + else_body_len: u32, + + /// Trailing: + /// * instruction index for each `body_len`. + pub const Case = struct { + item: Ref, + body_len: u32, + }; + }; + + pub const StructField = struct { + struct_ptr: Ref, + field_index: u32, + }; +}; diff --git a/src/Module.zig b/src/Module.zig index a1f6887fbd..2f1dc0b33b 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -21,7 +21,7 @@ const Type = @import("type.zig").Type; const TypedValue = @import("TypedValue.zig"); const Package = @import("Package.zig"); const link = @import("link.zig"); -const ir = @import("air.zig"); +const Air = @import("Air.zig"); const Zir = @import("Zir.zig"); const trace = @import("tracy.zig").trace; const AstGen = @import("AstGen.zig"); diff --git a/src/Sema.zig b/src/Sema.zig index d7ce9fdf4f..85cb4aa423 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -52,7 +52,7 @@ const Sema = @This(); const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; const TypedValue = @import("TypedValue.zig"); -const ir = @import("air.zig"); +const Air = @import("Air.zig"); const Zir = @import("Zir.zig"); const Module = @import("Module.zig"); const Inst = ir.Inst; diff --git a/src/Zir.zig b/src/Zir.zig index db851cfa4b..b975500e2f 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -22,7 +22,6 @@ const Zir = @This(); const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; const TypedValue = @import("TypedValue.zig"); -const ir = @import("air.zig"); const Module = @import("Module.zig"); const LazySrcLoc = Module.LazySrcLoc; @@ -214,7 +213,7 @@ pub const Inst = struct { as_node, /// Bitwise AND. `&` bit_and, - /// Bitcast a value to a different type. + /// Reinterpret the memory representation of a value as a different type. /// Uses the pl_node field with payload `Bin`. bitcast, /// A typed result location pointer is bitcasted to a new result location pointer. diff --git a/src/codegen.zig b/src/codegen.zig index 6050fe0ed8..205bab755a 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -2,7 +2,7 @@ const std = @import("std"); const mem = std.mem; const math = std.math; const assert = std.debug.assert; -const ir = @import("air.zig"); +const Air = @import("Air.zig"); const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; const TypedValue = @import("TypedValue.zig"); diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 391375c709..e3f2423746 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -6,8 +6,7 @@ const log = std.log.scoped(.c); const link = @import("../link.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); -const ir = @import("../air.zig"); -const Inst = ir.Inst; +const Air = @import("../Air.zig"); const Value = @import("../value.zig").Value; const Type = @import("../type.zig").Type; const TypedValue = @import("../TypedValue.zig"); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index b8f96891f4..45ee2d9bb8 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -9,7 +9,7 @@ const math = std.math; const Module = @import("../Module.zig"); const TypedValue = @import("../TypedValue.zig"); -const ir = @import("../air.zig"); +const Air = @import("../Air.zig"); const Inst = ir.Inst; const Value = @import("../value.zig").Value; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 7fa813e565..60e9a96275 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -12,8 +12,7 @@ const Decl = Module.Decl; const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; const LazySrcLoc = Module.LazySrcLoc; -const ir = @import("../air.zig"); -const Inst = ir.Inst; +const Air = @import("../Air.zig"); pub const Word = u32; pub const ResultId = u32; diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 3476ab2ce6..45b00ddfad 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -9,8 +9,7 @@ const wasm = std.wasm; const Module = @import("../Module.zig"); const Decl = Module.Decl; -const ir = @import("../air.zig"); -const Inst = ir.Inst; +const Air = @import("../Air.zig"); const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; const Compilation = @import("../Compilation.zig"); diff --git a/src/link/Elf.zig b/src/link/Elf.zig index d754b478b9..90224866ba 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -10,7 +10,7 @@ const log = std.log.scoped(.link); const DW = std.dwarf; const leb128 = std.leb; -const ir = @import("../air.zig"); +const Air = @import("../Air.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const codegen = @import("../codegen.zig"); diff --git a/src/liveness.zig b/src/liveness.zig index d115af77ed..e6692e4fc3 100644 --- a/src/liveness.zig +++ b/src/liveness.zig @@ -1,5 +1,5 @@ const std = @import("std"); -const ir = @import("air.zig"); +const Air = @import("Air.zig"); const trace = @import("tracy.zig").trace; const log = std.log.scoped(.liveness); const assert = std.debug.assert; diff --git a/src/register_manager.zig b/src/register_manager.zig index 96cf4f17b7..9c61423706 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -3,7 +3,7 @@ const math = std.math; const mem = std.mem; const assert = std.debug.assert; const Allocator = std.mem.Allocator; -const ir = @import("air.zig"); +const Air = @import("Air.zig"); const Type = @import("type.zig").Type; const Module = @import("Module.zig"); const LazySrcLoc = Module.LazySrcLoc; diff --git a/src/value.zig b/src/value.zig index b4cd63b8d3..48cd6fffc4 100644 --- a/src/value.zig +++ b/src/value.zig @@ -7,7 +7,7 @@ const BigIntMutable = std.math.big.int.Mutable; const Target = std.Target; const Allocator = std.mem.Allocator; const Module = @import("Module.zig"); -const ir = @import("air.zig"); +const Air = @import("Air.zig"); /// This is the raw data, with no bookkeeping, no memory awareness, /// no de-duplication, and no type system awareness. From 3c3abaf3907e344305620fb4565e7c1acb0a9c88 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 10 Jul 2021 16:24:35 -0700 Subject: [PATCH 02/53] stage2: update liveness analysis to new AIR memory layout It's pretty compact, with each AIR instruction only taking up 4 bits, plus a sparse table for special instructions such as conditional branch, switch branch, and function calls with more than 2 arguments. --- BRANCH_TODO | 73 -------- src/Air.zig | 124 ++++++++----- src/Liveness.zig | 457 +++++++++++++++++++++++++++++++++++++++++++++++ src/codegen.zig | 13 +- src/liveness.zig | 254 -------------------------- 5 files changed, 542 insertions(+), 379 deletions(-) create mode 100644 src/Liveness.zig delete mode 100644 src/liveness.zig diff --git a/BRANCH_TODO b/BRANCH_TODO index ed77d6bd03..5bc4d2a2f5 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -57,79 +57,6 @@ unreachable; } - pub fn Type(tag: Tag) type { - return switch (tag) { - .alloc, - .retvoid, - .unreach, - .breakpoint, - => NoOp, - - .ref, - .ret, - .bitcast, - .not, - .is_non_null, - .is_non_null_ptr, - .is_null, - .is_null_ptr, - .is_err, - .is_non_err, - .is_err_ptr, - .is_non_err_ptr, - .ptrtoint, - .floatcast, - .intcast, - .load, - .optional_payload, - .optional_payload_ptr, - .wrap_optional, - .unwrap_errunion_payload, - .unwrap_errunion_err, - .unwrap_errunion_payload_ptr, - .unwrap_errunion_err_ptr, - .wrap_errunion_payload, - .wrap_errunion_err, - => UnOp, - - .add, - .addwrap, - .sub, - .subwrap, - .mul, - .mulwrap, - .div, - .cmp_lt, - .cmp_lte, - .cmp_eq, - .cmp_gte, - .cmp_gt, - .cmp_neq, - .store, - .bool_and, - .bool_or, - .bit_and, - .bit_or, - .xor, - => BinOp, - - .arg => Arg, - .assembly => Assembly, - .block => Block, - .br => Br, - .br_block_flat => BrBlockFlat, - .br_void => BrVoid, - .call => Call, - .condbr => CondBr, - .constant => Constant, - .loop => Loop, - .varptr => VarPtr, - .struct_field_ptr => StructFieldPtr, - .switchbr => SwitchBr, - .dbg_stmt => DbgStmt, - }; - } - pub fn Args(comptime T: type) type { return std.meta.fieldInfo(T, .args).field_type; } diff --git a/src/Air.zig b/src/Air.zig index 97a5824abc..c57232fba0 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -10,10 +10,18 @@ const Air = @This(); instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. +/// The first few indexes are reserved. See `ExtraIndex` for the values. extra: []u32, values: []Value, variables: []*Module.Var, +pub const ExtraIndex = enum(u32) { + /// Payload index of the main `Block` in the `extra` array. + main_block, + + _, +}; + pub const Inst = struct { tag: Tag, data: Data, @@ -231,11 +239,25 @@ pub const Inst = struct { .neq => .cmp_neq, }; } + + pub fn toCmpOp(tag: Tag) ?std.math.CompareOperator { + return switch (tag) { + .cmp_lt => .lt, + .cmp_lte => .lte, + .cmp_eq => .eq, + .cmp_gte => .gte, + .cmp_gt => .gt, + .cmp_neq => .neq, + else => null, + }; + } }; /// The position of an AIR instruction within the `Air` instructions array. pub const Index = u32; + pub const Ref = @import("Zir.zig").Inst.Ref; + /// All instructions have an 8-byte payload, which is contained within /// this union. `Tag` determines which union field is active, as well as /// how to interpret the data within. @@ -281,55 +303,69 @@ pub const Inst = struct { } } }; +}; - pub fn cmpOperator(base: *Inst) ?std.math.CompareOperator { - return switch (base.tag) { - .cmp_lt => .lt, - .cmp_lte => .lte, - .cmp_eq => .eq, - .cmp_gte => .gte, - .cmp_gt => .gt, - .cmp_neq => .neq, - else => null, - }; - } +/// Trailing is a list of instruction indexes for every `body_len`. +pub const Block = struct { + body_len: u32, +}; - /// Trailing is a list of instruction indexes for every `body_len`. - pub const Block = struct { - body_len: u32, - }; +/// Trailing is a list of `Ref` for every `args_len`. +pub const Call = struct { + args_len: u32, +}; - /// Trailing is a list of `Ref` for every `args_len`. - pub const Call = struct { - args_len: u32, - }; +/// This data is stored inside extra, with two sets of trailing `Ref`: +/// * 0. the then body, according to `then_body_len`. +/// * 1. the else body, according to `else_body_len`. +pub const CondBr = struct { + then_body_len: u32, + else_body_len: u32, +}; - /// This data is stored inside extra, with two sets of trailing `Ref`: - /// * 0. the then body, according to `then_body_len`. - /// * 1. the else body, according to `else_body_len`. - pub const CondBr = struct { - condition: Ref, - then_body_len: u32, - else_body_len: u32, - }; +/// Trailing: +/// * 0. `Case` for each `cases_len` +/// * 1. the else body, according to `else_body_len`. +pub const SwitchBr = struct { + cases_len: u32, + else_body_len: u32, /// Trailing: - /// * 0. `Case` for each `cases_len` - /// * 1. the else body, according to `else_body_len`. - pub const SwitchBr = struct { - cases_len: u32, - else_body_len: u32, - - /// Trailing: - /// * instruction index for each `body_len`. - pub const Case = struct { - item: Ref, - body_len: u32, - }; - }; - - pub const StructField = struct { - struct_ptr: Ref, - field_index: u32, + /// * instruction index for each `body_len`. + pub const Case = struct { + item: Ref, + body_len: u32, }; }; + +pub const StructField = struct { + struct_ptr: Ref, + field_index: u32, +}; + +pub fn getMainBody(air: Air) []const Air.Inst.Index { + const body_index = air.extra[@enumToInt(ExtraIndex.main_block)]; + const body_len = air.extra[body_index]; + return air.extra[body_index..][0..body_len]; +} + +/// Returns the requested data, as well as the new index which is at the start of the +/// trailers for the object. +pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end: usize } { + const fields = std.meta.fields(T); + var i: usize = index; + var result: T = undefined; + inline for (fields) |field| { + @field(result, field.name) = switch (field.field_type) { + u32 => air.extra[i], + Inst.Ref => @intToEnum(Inst.Ref, air.extra[i]), + i32 => @bitCast(i32, air.extra[i]), + else => @compileError("bad field type"), + }; + i += 1; + } + return .{ + .data = result, + .end = i, + }; +} diff --git a/src/Liveness.zig b/src/Liveness.zig new file mode 100644 index 0000000000..828614dcbb --- /dev/null +++ b/src/Liveness.zig @@ -0,0 +1,457 @@ +//! For each AIR instruction, we want to know: +//! * Is the instruction unreferenced (e.g. dies immediately)? +//! * For each of its operands, does the operand die with this instruction (e.g. is +//! this the last reference to it)? +//! Some instructions are special, such as: +//! * Conditional Branches +//! * Switch Branches +const Liveness = @This(); +const std = @import("std"); +const Air = @import("Air.zig"); +const trace = @import("tracy.zig").trace; +const log = std.log.scoped(.liveness); +const assert = std.debug.assert; +const Allocator = std.mem.Allocator; + +/// This array is split into sets of 4 bits per AIR instruction. +/// The MSB (0bX000) is whether the instruction is unreferenced. +/// The LSB (0b000X) is the first operand, and so on, up to 3 operands. A set bit means the +/// operand dies after this instruction. +/// Instructions which need more data to track liveness have special handling via the +/// `special` table. +tomb_bits: []const usize, +/// Sparse table of specially handled instructions. The value is an index into the `extra` +/// array. The meaning of the data depends on the AIR tag. +special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), +/// Auxilliary data. The way this data is interpreted is determined contextually. +extra: []const u32, + +/// Trailing is the set of instructions whose lifetimes end at the start of the then branch, +/// followed by the set of instructions whose lifetimes end at the start of the else branch. +pub const CondBr = struct { + then_death_count: u32, + else_death_count: u32, +}; + +/// Trailing is: +/// * For each case in the same order as in the AIR: +/// - case_death_count: u32 +/// - Air.Inst.Index for each `case_death_count`: set of instructions whose lifetimes +/// end at the start of this case. +/// * Air.Inst.Index for each `else_death_count`: set of instructions whose lifetimes +/// end at the start of the else case. +pub const SwitchBr = struct { + else_death_count: u32, +}; + +pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { + const tracy = trace(@src()); + defer tracy.end(); + + var a: Analysis = .{ + .gpa = gpa, + .air = &air, + .table = .{}, + .tomb_bits = try gpa.alloc( + usize, + (air.instructions.len * bpi + @bitSizeOf(usize) - 1) / @bitSizeOf(usize), + ), + .extra = .{}, + .special = .{}, + }; + errdefer gpa.free(a.tomb_bits); + errdefer a.special.deinit(gpa); + defer a.extra.deinit(gpa); + defer a.table.deinit(gpa); + + const main_body = air.getMainBody(); + try a.table.ensureTotalCapacity(main_body.len); + try analyzeWithContext(&a, null, main_body); + return Liveness{ + .tomb_bits = a.tomb_bits, + .special = a.special, + .extra = a.extra.toOwnedSlice(gpa), + }; +} + +pub fn deinit(l: *Liveness, gpa: *Allocator) void { + gpa.free(l.tomb_bits); + gpa.free(l.extra); + l.special.deinit(gpa); +} + +/// How many tomb bits per AIR instruction. +const bpi = 4; +const Bpi = std.meta.Int(.unsigned, bpi); + +/// In-progress data; on successful analysis converted into `Liveness`. +const Analysis = struct { + gpa: *Allocator, + air: *const Air, + table: std.AutoHashMapUnmanaged(Air.Inst.Index, void), + tomb_bits: []usize, + extra: std.ArrayListUnmanaged(u32), + + fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { + const usize_index = (inst * bpi) / @bitSizeOf(usize); + a.tomb_bits[usize_index] |= tomb_bits << (inst % (@bitSizeOf(usize) / bpi)) * bpi; + } + + fn addExtra(a: *Analysis, extra: anytype) Allocator.Error!u32 { + const fields = std.meta.fields(@TypeOf(extra)); + try a.extra.ensureUnusedCapacity(a.gpa, fields.len); + return addExtraAssumeCapacity(a, extra); + } + + fn addExtraAssumeCapacity(a: *Analysis, extra: anytype) u32 { + const fields = std.meta.fields(@TypeOf(extra)); + const result = @intCast(u32, a.extra.items.len); + inline for (fields) |field| { + a.extra.appendAssumeCapacity(switch (field.field_type) { + u32 => @field(extra, field.name), + else => @compileError("bad field type"), + }); + } + return result; + } +}; + +fn analyzeWithContext( + a: *Analysis, + new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), + body: []const Air.Inst.Index, +) Allocator.Error!void { + var i: usize = body.len; + + if (new_set) |ns| { + // We are only interested in doing this for instructions which are born + // before a conditional branch, so after obtaining the new set for + // each branch we prune the instructions which were born within. + while (i != 0) { + i -= 1; + const inst = body[i]; + _ = ns.remove(inst); + try analyzeInst(a, new_set, inst); + } + } else { + while (i != 0) { + i -= 1; + const inst = body[i]; + try analyzeInst(a, new_set, inst); + } + } +} + +fn analyzeInst( + a: *Analysis, + new_set: ?*std.AutoHashMap(Air.Inst.Index, void), + inst: Air.Inst.Index, +) Allocator.Error!void { + const gpa = a.gpa; + const table = &a.table; + const inst_tags = a.air.instructions.items(.tag); + + // No tombstone for this instruction means it is never referenced, + // and its birth marks its own death. Very metal 🤘 + const main_tomb = !table.contains(inst); + + switch (inst_tags[inst]) { + .add, + .addwrap, + .sub, + .subwrap, + .mul, + .mulwrap, + .div, + .bit_and, + .bit_or, + .xor, + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .bool_and, + .bool_or, + .store, + => { + const o = inst_datas[inst].bin_op; + return trackOperands(a, new_set, inst, main_tomb, .{ o.lhs, o.rhs, .none }); + }, + + .alloc, + .br, + .constant, + .breakpoint, + .dbg_stmt, + .varptr, + .unreach, + => return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }), + + .not, + .bitcast, + .load, + .ref, + .floatcast, + .intcast, + .optional_payload, + .optional_payload_ptr, + .wrap_optional, + .unwrap_errunion_payload, + .unwrap_errunion_err, + .unwrap_errunion_payload_ptr, + .unwrap_errunion_err_ptr, + .wrap_errunion_payload, + .wrap_errunion_err, + => { + const o = inst_datas[inst].ty_op; + return trackOperands(a, new_set, inst, main_tomb, .{ o.operand, .none, .none }); + }, + + .is_null, + .is_non_null, + .is_null_ptr, + .is_non_null_ptr, + .is_err, + .is_non_err, + .is_err_ptr, + .is_non_err_ptr, + .ptrtoint, + .ret, + => { + const operand = inst_datas[inst].un_op; + return trackOperands(a, new_set, inst, main_tomb, .{ operand, .none, .none }); + }, + + .call => { + const inst_data = inst_datas[inst].pl_op; + const callee = inst_data.operand; + const extra = a.air.extraData(Air.Call, inst_data.payload); + const args = a.air.extra[extra.end..][0..extra.data.args_len]; + if (args.len <= bpi - 2) { + var buf: [bpi - 1]Air.Inst.Ref = undefined; + buf[0] = callee; + std.mem.copy(&buf, buf[1..], args); + return trackOperands(a, new_set, inst, main_tomb, buf); + } + @panic("TODO: liveness analysis for function with many args"); + }, + .struct_field_ptr => { + const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data; + return trackOperands(a, new_set, inst, main_tomb, .{ extra.struct_ptr, .none, .none }); + }, + .block => { + const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const body = a.air.extra[extra.end..][0..extra.data.body_len]; + try analyzeWithContext(a, new_set, body); + // We let this continue so that it can possibly mark the block as + // unreferenced below. + return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }); + }, + .loop => { + const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const body = a.air.extra[extra.end..][0..extra.data.body_len]; + try analyzeWithContext(a, new_set, body); + return; // Loop has no operands and it is always unreferenced. + }, + .cond_br => { + // Each death that occurs inside one branch, but not the other, needs + // to be added as a death immediately upon entering the other branch. + const inst_data = inst_datas[inst].pl_op; + const condition = inst_data.operand; + const extra = a.air.extraData(Air.CondBr, inst_data.payload); + const then_body = a.air.extra[extra.end..][0..extra.data.then_body_len]; + const else_body = a.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + + var then_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa); + defer then_table.deinit(); + try analyzeWithContext(a, &then_table, then_body); + + // Reset the table back to its state from before the branch. + { + var it = then_table.keyIterator(); + while (it.next()) |key| { + assert(table.remove(key.*)); + } + } + + var else_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa); + defer else_table.deinit(); + try analyzeWithContext(a, &else_table, else_body); + + var then_entry_deaths = std.ArrayList(Air.Inst.Index).init(gpa); + defer then_entry_deaths.deinit(); + var else_entry_deaths = std.ArrayList(Air.Inst.Index).init(gpa); + defer else_entry_deaths.deinit(); + + { + var it = else_table.keyIterator(); + while (it.next()) |key| { + const else_death = key.*; + if (!then_table.contains(else_death)) { + try then_entry_deaths.append(else_death); + } + } + } + // This loop is the same, except it's for the then branch, and it additionally + // has to put its items back into the table to undo the reset. + { + var it = then_table.keyIterator(); + while (it.next()) |key| { + const then_death = key.*; + if (!else_table.contains(then_death)) { + try else_entry_deaths.append(then_death); + } + try table.put(gpa, then_death, {}); + } + } + // Now we have to correctly populate new_set. + if (new_set) |ns| { + try ns.ensureCapacity(@intCast(u32, ns.count() + then_table.count() + else_table.count())); + var it = then_table.keyIterator(); + while (it.next()) |key| { + _ = ns.putAssumeCapacity(key.*, {}); + } + it = else_table.keyIterator(); + while (it.next()) |key| { + _ = ns.putAssumeCapacity(key.*, {}); + } + } + const then_death_count = @intCast(u32, then_entry_deaths.items.len); + const else_death_count = @intCast(u32, else_entry_deaths.items.len); + + try a.extra.ensureUnusedCapacity(std.meta.fields(@TypeOf(CondBr)).len + + then_death_count + else_death_count); + const extra_index = a.addExtraAssumeCapacity(CondBr{ + .then_death_count = then_death_count, + .else_death_count = else_death_count, + }); + a.extra.appendSliceAssumeCapacity(then_entry_deaths.items); + a.extra.appendSliceAssumeCapacity(else_entry_deaths.items); + try a.special.put(inst, extra_index); + + // Continue on with the instruction analysis. The following code will find the condition + // instruction, and the deaths flag for the CondBr instruction will indicate whether the + // condition's lifetime ends immediately before entering any branch. + return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none }); + }, + .switch_br => { + const inst_data = inst_datas[inst].pl_op; + const condition = inst_data.operand; + const switch_br = a.air.extraData(Air.SwitchBr, inst_data.payload); + + const Table = std.AutoHashMapUnmanaged(Air.Inst.Index, void); + const case_tables = try gpa.alloc(Table, switch_br.data.cases_len + 1); // +1 for else + defer gpa.free(case_tables); + + std.mem.set(Table, case_tables, .{}); + defer for (case_tables) |*ct| ct.deinit(gpa); + + var air_extra_index: usize = switch_br.end; + for (case_tables[0..switch_br.data.cases_len]) |*case_table| { + const case = a.air.extraData(Air.SwitchBr.Case, air_extra_index); + const case_body = a.air.extra[case.end..][0..case.data.body_len]; + air_extra_index = case.end + case_body.len; + try analyzeWithContext(a, case_table, case_body); + + // Reset the table back to its state from before the case. + var it = case_table.keyIterator(); + while (it.next()) |key| { + assert(table.remove(key.*)); + } + } + { // else + const else_table = &case_tables[case_tables.len - 1]; + const else_body = a.air.extra[air_extra_index..][0..switch_br.data.else_body_len]; + try analyzeWithContext(a, else_table, else_body); + + // Reset the table back to its state from before the case. + var it = else_table.keyIterator(); + while (it.next()) |key| { + assert(table.remove(key.*)); + } + } + + const List = std.ArrayListUnmanaged(Air.Inst.Index); + const case_deaths = try gpa.alloc(List, case_tables.len); // includes else + defer gpa.free(case_deaths); + + std.mem.set(List, case_deaths, .{}); + defer for (case_deaths) |*cd| cd.deinit(gpa); + + var total_deaths: u32 = 0; + for (case_tables) |*ct, i| { + total_deaths += ct.count(); + var it = ct.keyIterator(); + while (it.next()) |key| { + const case_death = key.*; + for (case_tables) |*ct_inner, j| { + if (i == j) continue; + if (!ct_inner.contains(case_death)) { + // instruction is not referenced in this case + try case_deaths[j].append(gpa, case_death); + } + } + // undo resetting the table + try table.put(gpa, case_death, {}); + } + } + + // Now we have to correctly populate new_set. + if (new_set) |ns| { + try ns.ensureUnusedCapacity(gpa, total_deaths); + for (case_tables) |*ct| { + var it = ct.keyIterator(); + while (it.next()) |key| { + _ = ns.putAssumeCapacity(key.*, {}); + } + } + } + + const else_death_count = @intCast(u32, case_deaths[case_deaths.len - 1].items.len); + const extra_index = try a.addExtra(SwitchBr{ + .else_death_count = else_death_count, + }); + for (case_deaths[0 .. case_deaths.len - 1]) |*cd| { + const case_death_count = @intCast(u32, cd.items.len); + try a.extra.ensureUnusedCapacity(1 + case_death_count + else_death_count); + a.extra.appendAssumeCapacity(case_death_count); + a.extra.appendSliceAssumeCapacity(cd.items); + } + a.extra.appendSliceAssumeCapacity(case_deaths[case_deaths.len - 1].items); + try a.special.put(inst, extra_index); + + return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none }); + }, + } +} + +fn trackOperands( + a: *Analysis, + new_set: ?*std.AutoHashMap(Air.Inst.Index, void), + inst: Air.Inst.Index, + main_tomb: bool, + operands: [bpi - 1]Air.Inst.Ref, +) Allocator.Error!void { + const table = &a.table; + const gpa = a.gpa; + + var tomb_bits: Bpi = @boolToInt(main_tomb); + var i = operands.len; + + while (i > 0) { + i -= 1; + tomb_bits <<= 1; + const op_int = @enumToInt(operands[i]); + if (op_int < Air.Inst.Ref.typed_value_map.len) continue; + const operand: Air.Inst.Index = op_int - Air.Inst.Ref.typed_value_map.len; + const prev = try table.fetchPut(gpa, operand, {}); + if (prev == null) { + // Death. + tomb_bits |= 1; + if (new_set) |ns| try ns.putNoClobber(operand, {}); + } + } + a.storeTombBits(inst, tomb_bits); +} diff --git a/src/codegen.zig b/src/codegen.zig index 205bab755a..91b0401291 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -297,7 +297,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// across each runtime branch upon joining. branch_stack: *std.ArrayList(Branch), - blocks: std.AutoHashMapUnmanaged(*ir.Inst.Block, BlockData) = .{}, + // Key is the block instruction + blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, register_manager: RegisterManager(Self, Register, &callee_preserved_regs) = .{}, /// Maps offset to what is stored there. @@ -383,7 +384,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }; const Branch = struct { - inst_table: std.AutoArrayHashMapUnmanaged(*ir.Inst, MCValue) = .{}, + inst_table: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, MCValue) = .{}, fn deinit(self: *Branch, gpa: *Allocator) void { self.inst_table.deinit(gpa); @@ -392,7 +393,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }; const StackAllocation = struct { - inst: *ir.Inst, + inst: Air.Inst.Index, /// TODO do we need size? should be determined by inst.ty.abiSize() size: u32, }; @@ -720,7 +721,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.dbgAdvancePCAndLine(self.end_di_line, self.end_di_column); } - fn genBody(self: *Self, body: ir.Body) InnerError!void { + fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body.instructions) |inst| { try self.ensureProcessDeathCapacity(@popCount(@TypeOf(inst.deaths), inst.deaths)); @@ -2824,10 +2825,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genDbgStmt(self: *Self, inst: *ir.Inst.DbgStmt) !MCValue { - // TODO when reworking AIR memory layout, rework source locations here as - // well to be more efficient, as well as support inlined function calls correctly. - // For now we convert LazySrcLoc to absolute byte offset, to match what the - // existing codegen code expects. try self.dbgAdvancePCAndLine(inst.line, inst.column); assert(inst.base.isUnused()); return MCValue.dead; diff --git a/src/liveness.zig b/src/liveness.zig deleted file mode 100644 index e6692e4fc3..0000000000 --- a/src/liveness.zig +++ /dev/null @@ -1,254 +0,0 @@ -const std = @import("std"); -const Air = @import("Air.zig"); -const trace = @import("tracy.zig").trace; -const log = std.log.scoped(.liveness); -const assert = std.debug.assert; - -/// Perform Liveness Analysis over the `Body`. Each `Inst` will have its `deaths` field populated. -pub fn analyze( - /// Used for temporary storage during the analysis. - gpa: *std.mem.Allocator, - /// Used to tack on extra allocations in the same lifetime as the existing instructions. - arena: *std.mem.Allocator, - body: ir.Body, -) error{OutOfMemory}!void { - const tracy = trace(@src()); - defer tracy.end(); - - var table = std.AutoHashMap(*ir.Inst, void).init(gpa); - defer table.deinit(); - try table.ensureCapacity(@intCast(u32, body.instructions.len)); - try analyzeWithTable(arena, &table, null, body); -} - -fn analyzeWithTable( - arena: *std.mem.Allocator, - table: *std.AutoHashMap(*ir.Inst, void), - new_set: ?*std.AutoHashMap(*ir.Inst, void), - body: ir.Body, -) error{OutOfMemory}!void { - var i: usize = body.instructions.len; - - if (new_set) |ns| { - // We are only interested in doing this for instructions which are born - // before a conditional branch, so after obtaining the new set for - // each branch we prune the instructions which were born within. - while (i != 0) { - i -= 1; - const base = body.instructions[i]; - _ = ns.remove(base); - try analyzeInst(arena, table, new_set, base); - } - } else { - while (i != 0) { - i -= 1; - const base = body.instructions[i]; - try analyzeInst(arena, table, new_set, base); - } - } -} - -fn analyzeInst( - arena: *std.mem.Allocator, - table: *std.AutoHashMap(*ir.Inst, void), - new_set: ?*std.AutoHashMap(*ir.Inst, void), - base: *ir.Inst, -) error{OutOfMemory}!void { - if (table.contains(base)) { - base.deaths = 0; - } else { - // No tombstone for this instruction means it is never referenced, - // and its birth marks its own death. Very metal 🤘 - base.deaths = 1 << ir.Inst.unreferenced_bit_index; - } - - switch (base.tag) { - .constant => return, - .block => { - const inst = base.castTag(.block).?; - try analyzeWithTable(arena, table, new_set, inst.body); - // We let this continue so that it can possibly mark the block as - // unreferenced below. - }, - .loop => { - const inst = base.castTag(.loop).?; - try analyzeWithTable(arena, table, new_set, inst.body); - return; // Loop has no operands and it is always unreferenced. - }, - .condbr => { - const inst = base.castTag(.condbr).?; - - // Each death that occurs inside one branch, but not the other, needs - // to be added as a death immediately upon entering the other branch. - - var then_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator); - defer then_table.deinit(); - try analyzeWithTable(arena, table, &then_table, inst.then_body); - - // Reset the table back to its state from before the branch. - { - var it = then_table.keyIterator(); - while (it.next()) |key| { - assert(table.remove(key.*)); - } - } - - var else_table = std.AutoHashMap(*ir.Inst, void).init(table.allocator); - defer else_table.deinit(); - try analyzeWithTable(arena, table, &else_table, inst.else_body); - - var then_entry_deaths = std.ArrayList(*ir.Inst).init(table.allocator); - defer then_entry_deaths.deinit(); - var else_entry_deaths = std.ArrayList(*ir.Inst).init(table.allocator); - defer else_entry_deaths.deinit(); - - { - var it = else_table.keyIterator(); - while (it.next()) |key| { - const else_death = key.*; - if (!then_table.contains(else_death)) { - try then_entry_deaths.append(else_death); - } - } - } - // This loop is the same, except it's for the then branch, and it additionally - // has to put its items back into the table to undo the reset. - { - var it = then_table.keyIterator(); - while (it.next()) |key| { - const then_death = key.*; - if (!else_table.contains(then_death)) { - try else_entry_deaths.append(then_death); - } - try table.put(then_death, {}); - } - } - // Now we have to correctly populate new_set. - if (new_set) |ns| { - try ns.ensureCapacity(@intCast(u32, ns.count() + then_table.count() + else_table.count())); - var it = then_table.keyIterator(); - while (it.next()) |key| { - _ = ns.putAssumeCapacity(key.*, {}); - } - it = else_table.keyIterator(); - while (it.next()) |key| { - _ = ns.putAssumeCapacity(key.*, {}); - } - } - inst.then_death_count = std.math.cast(@TypeOf(inst.then_death_count), then_entry_deaths.items.len) catch return error.OutOfMemory; - inst.else_death_count = std.math.cast(@TypeOf(inst.else_death_count), else_entry_deaths.items.len) catch return error.OutOfMemory; - const allocated_slice = try arena.alloc(*ir.Inst, then_entry_deaths.items.len + else_entry_deaths.items.len); - inst.deaths = allocated_slice.ptr; - std.mem.copy(*ir.Inst, inst.thenDeaths(), then_entry_deaths.items); - std.mem.copy(*ir.Inst, inst.elseDeaths(), else_entry_deaths.items); - - // Continue on with the instruction analysis. The following code will find the condition - // instruction, and the deaths flag for the CondBr instruction will indicate whether the - // condition's lifetime ends immediately before entering any branch. - }, - .switchbr => { - const inst = base.castTag(.switchbr).?; - - const Table = std.AutoHashMap(*ir.Inst, void); - const case_tables = try table.allocator.alloc(Table, inst.cases.len + 1); // +1 for else - defer table.allocator.free(case_tables); - - std.mem.set(Table, case_tables, Table.init(table.allocator)); - defer for (case_tables) |*ct| ct.deinit(); - - for (inst.cases) |case, i| { - try analyzeWithTable(arena, table, &case_tables[i], case.body); - - // Reset the table back to its state from before the case. - var it = case_tables[i].keyIterator(); - while (it.next()) |key| { - assert(table.remove(key.*)); - } - } - { // else - try analyzeWithTable(arena, table, &case_tables[case_tables.len - 1], inst.else_body); - - // Reset the table back to its state from before the case. - var it = case_tables[case_tables.len - 1].keyIterator(); - while (it.next()) |key| { - assert(table.remove(key.*)); - } - } - - const List = std.ArrayList(*ir.Inst); - const case_deaths = try table.allocator.alloc(List, case_tables.len); // +1 for else - defer table.allocator.free(case_deaths); - - std.mem.set(List, case_deaths, List.init(table.allocator)); - defer for (case_deaths) |*cd| cd.deinit(); - - var total_deaths: u32 = 0; - for (case_tables) |*ct, i| { - total_deaths += ct.count(); - var it = ct.keyIterator(); - while (it.next()) |key| { - const case_death = key.*; - for (case_tables) |*ct_inner, j| { - if (i == j) continue; - if (!ct_inner.contains(case_death)) { - // instruction is not referenced in this case - try case_deaths[j].append(case_death); - } - } - // undo resetting the table - try table.put(case_death, {}); - } - } - - // Now we have to correctly populate new_set. - if (new_set) |ns| { - try ns.ensureCapacity(@intCast(u32, ns.count() + total_deaths)); - for (case_tables) |*ct| { - var it = ct.keyIterator(); - while (it.next()) |key| { - _ = ns.putAssumeCapacity(key.*, {}); - } - } - } - - total_deaths = 0; - for (case_deaths[0 .. case_deaths.len - 1]) |*ct, i| { - inst.cases[i].index = total_deaths; - const len = std.math.cast(@TypeOf(inst.else_deaths), ct.items.len) catch return error.OutOfMemory; - inst.cases[i].deaths = len; - total_deaths += len; - } - { // else - const else_deaths = std.math.cast(@TypeOf(inst.else_deaths), case_deaths[case_deaths.len - 1].items.len) catch return error.OutOfMemory; - inst.else_index = total_deaths; - inst.else_deaths = else_deaths; - total_deaths += else_deaths; - } - - const allocated_slice = try arena.alloc(*ir.Inst, total_deaths); - inst.deaths = allocated_slice.ptr; - for (case_deaths[0 .. case_deaths.len - 1]) |*cd, i| { - std.mem.copy(*ir.Inst, inst.caseDeaths(i), cd.items); - } - std.mem.copy(*ir.Inst, inst.elseDeaths(), case_deaths[case_deaths.len - 1].items); - }, - else => {}, - } - - const needed_bits = base.operandCount(); - if (needed_bits <= ir.Inst.deaths_bits) { - var bit_i: ir.Inst.DeathsBitIndex = 0; - while (base.getOperand(bit_i)) |operand| : (bit_i += 1) { - const prev = try table.fetchPut(operand, {}); - if (prev == null) { - // Death. - base.deaths |= @as(ir.Inst.DeathsInt, 1) << bit_i; - if (new_set) |ns| try ns.putNoClobber(operand, {}); - } - } - } else { - @panic("Handle liveness analysis for instructions with many parameters"); - } - - log.debug("analyze {}: 0b{b}\n", .{ base.tag, base.deaths }); -} From 9918a5fbe3dc910f90f2c60ad74edb51de53e0cf Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 10 Jul 2021 16:27:23 -0700 Subject: [PATCH 03/53] AstGen: remove unneeded field ref_start_index Previously, this field was used because the Zir.Inst.Ref encoding supported the concept of references to function parameters. However now thanks to whole-file-astgen, the implementations of indexToRef and refToIndex are trivial addition/subtraction of a comptime const integer. --- src/AstGen.zig | 117 +++++++++++++++++++++++-------------------------- 1 file changed, 56 insertions(+), 61 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index 3fdc097042..19906c94d3 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -989,7 +989,7 @@ fn suspendExpr( } try suspend_scope.setBlockBody(suspend_inst); - return gz.indexToRef(suspend_inst); + return indexToRef(suspend_inst); } fn awaitExpr( @@ -1300,7 +1300,7 @@ fn arrayInitExprRlPtr( .lhs = result_ptr, .rhs = index_inst, }); - elem_ptr_list[i] = gz.refToIndex(elem_ptr).?; + elem_ptr_list[i] = refToIndex(elem_ptr).?; _ = try expr(gz, scope, .{ .ptr = elem_ptr }, elem_init); } _ = try gz.addPlNode(.validate_array_init_ptr, node, Zir.Inst.Block{ @@ -1455,7 +1455,7 @@ fn structInitExprRlPtr( .lhs = result_ptr, .field_name_start = str_index, }); - field_ptr_list[i] = gz.refToIndex(field_ptr).?; + field_ptr_list[i] = refToIndex(field_ptr).?; _ = try expr(gz, scope, .{ .ptr = field_ptr }, field_init); } _ = try gz.addPlNode(.validate_struct_init_ptr, node, Zir.Inst.Block{ @@ -1489,7 +1489,7 @@ fn structInitExprRlTy( .name_start = str_index, }); fields_list[i] = .{ - .field_type = gz.refToIndex(field_ty_inst).?, + .field_type = refToIndex(field_ty_inst).?, .init = try expr(gz, scope, .{ .ty = field_ty_inst }, field_init), }; } @@ -1786,7 +1786,7 @@ fn labeledBlockExpr( } try block_scope.setBlockBody(block_inst); - return gz.indexToRef(block_inst); + return indexToRef(block_inst); }, .break_operand => { // All break operands are values that did not use the result location pointer. @@ -1800,7 +1800,7 @@ fn labeledBlockExpr( } else { try block_scope.setBlockBody(block_inst); } - const block_ref = gz.indexToRef(block_inst); + const block_ref = indexToRef(block_inst); switch (rl) { .ref => return block_ref, else => return rvalue(gz, rl, block_ref, block_node), @@ -1878,7 +1878,7 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: ast.Node.Index) Inner // we want to avoid adding the ZIR instruction if possible for performance. const maybe_unused_result = try expr(gz, scope, .none, statement); var noreturn_src_node: ast.Node.Index = 0; - const elide_check = if (gz.refToIndex(maybe_unused_result)) |inst| b: { + const elide_check = if (refToIndex(maybe_unused_result)) |inst| b: { // Note that this array becomes invalid after appending more items to it // in the above while loop. const zir_tags = gz.astgen.instructions.items(.tag); @@ -2440,7 +2440,7 @@ fn varDecl( // the alloc instruction and the store_to_block_ptr instruction. try parent_zir.ensureUnusedCapacity(gpa, init_scope.instructions.items.len); for (init_scope.instructions.items) |src_inst| { - if (gz.indexToRef(src_inst) == init_scope.rl_ptr) continue; + if (indexToRef(src_inst) == init_scope.rl_ptr) continue; if (zir_tags[src_inst] == .store_to_block_ptr) { if (zir_datas[src_inst].bin.lhs == init_scope.rl_ptr) continue; } @@ -2743,7 +2743,7 @@ fn ptrType( } const new_index = @intCast(Zir.Inst.Index, gz.astgen.instructions.len); - const result = gz.indexToRef(new_index); + const result = indexToRef(new_index); gz.astgen.instructions.appendAssumeCapacity(.{ .tag = .ptr_type, .data = .{ .ptr_type = .{ .flags = .{ @@ -3473,7 +3473,7 @@ fn structDeclInner( .body_len = 0, .decls_len = 0, }); - return gz.indexToRef(decl_inst); + return indexToRef(decl_inst); } const astgen = gz.astgen; @@ -3492,7 +3492,6 @@ fn structDeclInner( .astgen = astgen, .force_comptime = true, .in_defer = false, - .ref_start_index = gz.ref_start_index, }; defer block_scope.instructions.deinit(gpa); @@ -3730,7 +3729,7 @@ fn structDeclInner( } astgen.extra.appendSliceAssumeCapacity(fields_data.items); - return gz.indexToRef(decl_inst); + return indexToRef(decl_inst); } fn unionDeclInner( @@ -3758,7 +3757,6 @@ fn unionDeclInner( .astgen = astgen, .force_comptime = true, .in_defer = false, - .ref_start_index = gz.ref_start_index, }; defer block_scope.instructions.deinit(gpa); @@ -4006,7 +4004,7 @@ fn unionDeclInner( astgen.extra.appendAssumeCapacity(cur_bit_bag); astgen.extra.appendSliceAssumeCapacity(fields_data.items); - return gz.indexToRef(decl_inst); + return indexToRef(decl_inst); } fn containerDecl( @@ -4170,7 +4168,6 @@ fn containerDecl( .astgen = astgen, .force_comptime = true, .in_defer = false, - .ref_start_index = gz.ref_start_index, }; defer block_scope.instructions.deinit(gpa); @@ -4398,7 +4395,7 @@ fn containerDecl( astgen.extra.appendAssumeCapacity(cur_bit_bag); astgen.extra.appendSliceAssumeCapacity(fields_data.items); - return rvalue(gz, rl, gz.indexToRef(decl_inst), node); + return rvalue(gz, rl, indexToRef(decl_inst), node); }, .keyword_opaque => { var namespace: Scope.Namespace = .{ .parent = scope }; @@ -4559,7 +4556,7 @@ fn containerDecl( } astgen.extra.appendSliceAssumeCapacity(wip_decls.payload.items); - return rvalue(gz, rl, gz.indexToRef(decl_inst), node); + return rvalue(gz, rl, indexToRef(decl_inst), node); }, else => unreachable, } @@ -4797,7 +4794,7 @@ fn finishThenElseBlock( } assert(!strat.elide_store_to_block_ptr_instructions); try setCondBrPayload(condbr, cond, then_scope, else_scope); - return parent_gz.indexToRef(main_block); + return indexToRef(main_block); }, .break_operand => { if (!parent_gz.refIsNoReturn(then_result)) { @@ -4815,7 +4812,7 @@ fn finishThenElseBlock( } else { try setCondBrPayload(condbr, cond, then_scope, else_scope); } - const block_ref = parent_gz.indexToRef(main_block); + const block_ref = indexToRef(main_block); switch (rl) { .ref => return block_ref, else => return rvalue(parent_gz, rl, block_ref, node), @@ -4937,7 +4934,7 @@ fn boolBinOp( } try rhs_scope.setBoolBrBody(bool_br); - const block_ref = gz.indexToRef(bool_br); + const block_ref = indexToRef(bool_br); return rvalue(gz, rl, block_ref, node); } @@ -5959,7 +5956,7 @@ fn switchExpr( if (!strat.elide_store_to_block_ptr_instructions) { astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items); astgen.extra.appendSliceAssumeCapacity(multi_cases_payload.items); - return parent_gz.indexToRef(switch_block); + return indexToRef(switch_block); } // There will necessarily be a store_to_block_ptr for @@ -6003,7 +6000,7 @@ fn switchExpr( .lhs = block_scope.rl_ty_inst, .rhs = zir_datas[break_inst].@"break".operand, }; - zir_datas[break_inst].@"break".operand = parent_gz.indexToRef(store_inst); + zir_datas[break_inst].@"break".operand = indexToRef(store_inst); } else { scalar_cases_payload.items[body_len_index] -= 1; astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items[0..extra_index]); @@ -6045,7 +6042,7 @@ fn switchExpr( .lhs = block_scope.rl_ty_inst, .rhs = zir_datas[break_inst].@"break".operand, }; - zir_datas[break_inst].@"break".operand = parent_gz.indexToRef(store_inst); + zir_datas[break_inst].@"break".operand = indexToRef(store_inst); } else { scalar_cases_payload.items[body_len_index] -= 1; astgen.extra.appendSliceAssumeCapacity(scalar_cases_payload.items[start_index..extra_index]); @@ -6091,7 +6088,7 @@ fn switchExpr( .lhs = block_scope.rl_ty_inst, .rhs = zir_datas[break_inst].@"break".operand, }; - zir_datas[break_inst].@"break".operand = parent_gz.indexToRef(store_inst); + zir_datas[break_inst].@"break".operand = indexToRef(store_inst); } else { assert(zir_datas[store_inst].bin.lhs == block_scope.rl_ptr); multi_cases_payload.items[body_len_index] -= 1; @@ -6102,7 +6099,7 @@ fn switchExpr( } } - const block_ref = parent_gz.indexToRef(switch_block); + const block_ref = indexToRef(switch_block); switch (rl) { .ref => return block_ref, else => return rvalue(parent_gz, rl, block_ref, switch_node), @@ -6162,7 +6159,7 @@ fn switchExpr( } } - return parent_gz.indexToRef(switch_block); + return indexToRef(switch_block); }, } } @@ -6861,7 +6858,7 @@ fn asRlPtr( const zir_datas = astgen.instructions.items(.data); try parent_zir.ensureUnusedCapacity(astgen.gpa, as_scope.instructions.items.len); for (as_scope.instructions.items) |src_inst| { - if (parent_gz.indexToRef(src_inst) == as_scope.rl_ptr) continue; + if (indexToRef(src_inst) == as_scope.rl_ptr) continue; if (zir_tags[src_inst] == .store_to_block_ptr) { if (zir_datas[src_inst].bin.lhs == as_scope.rl_ptr) continue; } @@ -6992,10 +6989,10 @@ fn builtinCall( const str_lit_token = main_tokens[operand_node]; const str = try astgen.strLitAsString(str_lit_token); const result = try gz.addStrTok(.import, str.index, str_lit_token); - const gop = try astgen.imports.getOrPut(astgen.gpa, str.index); - if (!gop.found_existing) { - gop.value_ptr.* = str_lit_token; - } + const gop = try astgen.imports.getOrPut(astgen.gpa, str.index); + if (!gop.found_existing) { + gop.value_ptr.* = str_lit_token; + } return rvalue(gz, rl, result, node); }, .compile_log => { @@ -8705,9 +8702,6 @@ const GenZir = struct { in_defer: bool, /// How decls created in this scope should be named. anon_name_strategy: Zir.Inst.NameStrategy = .anon, - /// The end of special indexes. `Zir.Inst.Ref` subtracts against this number to convert - /// to `Zir.Inst.Index`. The default here is correct if there are 0 parameters. - ref_start_index: u32 = Zir.Inst.Ref.typed_value_map.len, /// The containing decl AST node. decl_node_index: ast.Node.Index, /// The containing decl line index, absolute. @@ -8751,7 +8745,6 @@ const GenZir = struct { return .{ .force_comptime = gz.force_comptime, .in_defer = gz.in_defer, - .ref_start_index = gz.ref_start_index, .decl_node_index = gz.decl_node_index, .decl_line = gz.decl_line, .parent = scope, @@ -8769,7 +8762,7 @@ const GenZir = struct { fn refIsNoReturn(gz: GenZir, inst_ref: Zir.Inst.Ref) bool { if (inst_ref == .unreachable_value) return true; - if (gz.refToIndex(inst_ref)) |inst_index| { + if (refToIndex(inst_ref)) |inst_index| { return gz.astgen.instructions.items(.tag)[inst_index].isNoReturn(); } return false; @@ -8807,19 +8800,6 @@ const GenZir = struct { return gz.astgen.tree.firstToken(gz.decl_node_index); } - fn indexToRef(gz: GenZir, inst: Zir.Inst.Index) Zir.Inst.Ref { - return @intToEnum(Zir.Inst.Ref, gz.ref_start_index + inst); - } - - fn refToIndex(gz: GenZir, inst: Zir.Inst.Ref) ?Zir.Inst.Index { - const ref_int = @enumToInt(inst); - if (ref_int >= gz.ref_start_index) { - return ref_int - gz.ref_start_index; - } else { - return null; - } - } - fn setBreakResultLoc(gz: *GenZir, parent_rl: AstGen.ResultLoc) void { // Depending on whether the result location is a pointer or value, different // ZIR needs to be generated. In the former case we rely on storing to the @@ -8998,7 +8978,7 @@ const GenZir = struct { } }, }); gz.instructions.appendAssumeCapacity(new_index); - return gz.indexToRef(new_index); + return indexToRef(new_index); } else { try gz.astgen.extra.ensureUnusedCapacity( gpa, @@ -9025,7 +9005,7 @@ const GenZir = struct { } }, }); gz.instructions.appendAssumeCapacity(new_index); - return gz.indexToRef(new_index); + return indexToRef(new_index); } } @@ -9079,7 +9059,7 @@ const GenZir = struct { } }, }); gz.instructions.appendAssumeCapacity(new_index); - return gz.indexToRef(new_index); + return indexToRef(new_index); } fn addCall( @@ -9113,7 +9093,7 @@ const GenZir = struct { } }, }); gz.instructions.appendAssumeCapacity(new_index); - return gz.indexToRef(new_index); + return indexToRef(new_index); } /// Note that this returns a `Zir.Inst.Index` not a ref. @@ -9164,7 +9144,7 @@ const GenZir = struct { }); gz.instructions.appendAssumeCapacity(new_index); astgen.string_bytes.appendSliceAssumeCapacity(mem.sliceAsBytes(limbs)); - return gz.indexToRef(new_index); + return indexToRef(new_index); } fn addFloat(gz: *GenZir, number: f32, src_node: ast.Node.Index) !Zir.Inst.Ref { @@ -9215,7 +9195,7 @@ const GenZir = struct { } }, }); gz.instructions.appendAssumeCapacity(new_index); - return gz.indexToRef(new_index); + return indexToRef(new_index); } fn addExtendedPayload( @@ -9239,7 +9219,7 @@ const GenZir = struct { } }, }); gz.instructions.appendAssumeCapacity(new_index); - return gz.indexToRef(new_index); + return indexToRef(new_index); } fn addExtendedMultiOp( @@ -9272,7 +9252,7 @@ const GenZir = struct { }); gz.instructions.appendAssumeCapacity(new_index); astgen.appendRefsAssumeCapacity(operands); - return gz.indexToRef(new_index); + return indexToRef(new_index); } fn addArrayTypeSentinel( @@ -9298,7 +9278,7 @@ const GenZir = struct { } }, }); gz.instructions.appendAssumeCapacity(new_index); - return gz.indexToRef(new_index); + return indexToRef(new_index); } fn addUnTok( @@ -9457,7 +9437,7 @@ const GenZir = struct { } }, }); gz.instructions.appendAssumeCapacity(new_index); - return gz.indexToRef(new_index); + return indexToRef(new_index); } fn addAsm( @@ -9515,7 +9495,7 @@ const GenZir = struct { } }, }); gz.instructions.appendAssumeCapacity(new_index); - return gz.indexToRef(new_index); + return indexToRef(new_index); } /// Note that this returns a `Zir.Inst.Index` not a ref. @@ -9693,7 +9673,7 @@ const GenZir = struct { } fn add(gz: *GenZir, inst: Zir.Inst) !Zir.Inst.Ref { - return gz.indexToRef(try gz.addAsIndex(inst)); + return indexToRef(try gz.addAsIndex(inst)); } fn addAsIndex(gz: *GenZir, inst: Zir.Inst) !Zir.Inst.Index { @@ -9840,3 +9820,18 @@ fn advanceSourceCursor(astgen: *AstGen, source: []const u8, end: usize) void { astgen.source_line = line; astgen.source_column = column; } + +const ref_start_index = Zir.Inst.Ref.typed_value_map.len; + +fn indexToRef(inst: Zir.Inst.Index) Zir.Inst.Ref { + return @intToEnum(Zir.Inst.Ref, ref_start_index + inst); +} + +fn refToIndex(inst: Zir.Inst.Ref) ?Zir.Inst.Index { + const ref_int = @enumToInt(inst); + if (ref_int >= ref_start_index) { + return ref_int - ref_start_index; + } else { + return null; + } +} From ef7080aed1a1a4dc54cb837938e462b4e6720734 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Jul 2021 16:32:11 -0700 Subject: [PATCH 04/53] stage2: update Liveness, SPIR-V for new AIR memory layout also do the inline assembly instruction --- BRANCH_TODO | 44 ---- src/Air.zig | 60 +++-- src/Compilation.zig | 57 +++-- src/Liveness.zig | 1 + src/Module.zig | 36 ++- src/Sema.zig | 563 +++++++++++++++++++++--------------------- src/codegen/spirv.zig | 405 +++++++++++++++--------------- 7 files changed, 592 insertions(+), 574 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index 5bc4d2a2f5..3b946edbbd 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,24 +1,6 @@ * be sure to test debug info of parameters - /// Each bit represents the index of an `Inst` parameter in the `args` field. - /// If a bit is set, it marks the end of the lifetime of the corresponding - /// instruction parameter. For example, 0b101 means that the first and - /// third `Inst` parameters' lifetimes end after this instruction, and will - /// not have any more following references. - /// The most significant bit being set means that the instruction itself is - /// never referenced, in other words its lifetime ends as soon as it finishes. - /// If bit 15 (0b1xxx_xxxx_xxxx_xxxx) is set, it means this instruction itself is unreferenced. - /// If bit 14 (0bx1xx_xxxx_xxxx_xxxx) is set, it means this is a special case and the - /// lifetimes of operands are encoded elsewhere. - deaths: DeathsInt = undefined, - - - pub const DeathsInt = u16; - pub const DeathsBitIndex = std.math.Log2Int(DeathsInt); - pub const unreferenced_bit_index = @typeInfo(DeathsInt).Int.bits - 1; - pub const deaths_bits = unreferenced_bit_index - 1; - pub fn isUnused(self: Inst) bool { return (self.deaths & (1 << unreferenced_bit_index)) != 0; } @@ -115,32 +97,6 @@ - pub const Assembly = struct { - pub const base_tag = Tag.assembly; - - base: Inst, - asm_source: []const u8, - is_volatile: bool, - output_constraint: ?[]const u8, - inputs: []const []const u8, - clobbers: []const []const u8, - args: []const *Inst, - - pub fn operandCount(self: *const Assembly) usize { - return self.args.len; - } - pub fn getOperand(self: *const Assembly, index: usize) ?*Inst { - if (index < self.args.len) - return self.args[index]; - return null; - } - }; - - pub const StructFieldPtr = struct { - struct_ptr: *Inst, - field_index: usize, - }; - /// For debugging purposes, prints a function representation to stderr. pub fn dumpFn(old_module: Module, module_fn: *Module.Fn) void { diff --git a/src/Air.zig b/src/Air.zig index c57232fba0..112845559d 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1,5 +1,7 @@ //! Analyzed Intermediate Representation. -//! Sema inputs ZIR and outputs AIR. +//! This data is produced by Sema and consumed by codegen. +//! Unlike ZIR where there is one instance for an entire source file, each function +//! gets its own `Air` instance. const std = @import("std"); const Value = @import("value.zig").Value; @@ -27,38 +29,48 @@ pub const Inst = struct { data: Data, pub const Tag = enum(u8) { + /// The first N instructions in Air must be one arg instruction per function parameter. + /// Uses the `ty` field. + arg, /// Float or integer addition. For integers, wrapping is undefined behavior. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. add, /// Integer addition. Wrapping is defined to be twos complement wrapping. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. addwrap, /// Float or integer subtraction. For integers, wrapping is undefined behavior. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. sub, /// Integer subtraction. Wrapping is defined to be twos complement wrapping. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. subwrap, /// Float or integer multiplication. For integers, wrapping is undefined behavior. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. mul, /// Integer multiplication. Wrapping is defined to be twos complement wrapping. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. mulwrap, /// Integer or float division. For integers, wrapping is undefined behavior. - /// Result type is the same as both operands. + /// Both operands are guaranteed to be the same type, and the result type + /// is the same as both operands. /// Uses the `bin_op` field. div, /// Allocates stack local memory. /// Uses the `ty` field. alloc, - /// TODO + /// Inline assembly. Uses the `ty_pl` field. Payload is `Asm`. assembly, /// Bitwise AND. `&`. /// Result type is the same as both operands. @@ -80,7 +92,7 @@ pub const Inst = struct { /// Uses the `ty_pl` field with payload `Block`. block, /// Return from a block with a result. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `br` field. br, /// Lowers to a hardware trap instruction, or the next best thing. @@ -109,11 +121,11 @@ pub const Inst = struct { /// Uses the `bin_op` field. cmp_neq, /// Conditional branch. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `pl_op` field. Operand is the condition. Payload is `CondBr`. cond_br, /// Switch branch. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `pl_op` field. Operand is the condition. Payload is `SwitchBr`. switch_br, /// A comptime-known value. Uses the `ty_pl` field, payload is index of @@ -166,7 +178,7 @@ pub const Inst = struct { load, /// A labeled block of code that loops forever. At the end of the body it is implied /// to repeat; no explicit "repeat" instruction terminates loop bodies. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `ty_pl` field. Payload is `Block`. loop, /// Converts a pointer to its address. Result type is always `usize`. @@ -178,7 +190,7 @@ pub const Inst = struct { /// Uses the `ty_op` field. ref, /// Return a value from a function. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `un_op` field. ret, /// Returns a pointer to a global variable. @@ -189,7 +201,7 @@ pub const Inst = struct { /// Uses the `bin_op` field. store, /// Indicates the program counter will never get to this instruction. - /// Result type is always noreturn. + /// Result type is always noreturn; no instructions in a block follow this one. unreach, /// Convert from one float type to another. /// Uses the `ty_op` field. @@ -343,6 +355,16 @@ pub const StructField = struct { field_index: u32, }; +/// Trailing: +/// 0. `Ref` for every outputs_len +/// 1. `Ref` for every inputs_len +pub const Asm = struct { + /// Index to the corresponding ZIR instruction. + /// `asm_source`, `outputs_len`, `inputs_len`, `clobbers_len`, `is_volatile`, and + /// clobbers are found via here. + zir_index: u32, +}; + pub fn getMainBody(air: Air) []const Air.Inst.Index { const body_index = air.extra[@enumToInt(ExtraIndex.main_block)]; const body_len = air.extra[body_index]; @@ -369,3 +391,11 @@ pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end .end = i, }; } + +pub fn deinit(air: *Air, gpa: *std.mem.Allocator) void { + air.instructions.deinit(gpa); + gpa.free(air.extra); + gpa.free(air.values); + gpa.free(air.variables); + air.* = undefined; +} diff --git a/src/Compilation.zig b/src/Compilation.zig index b9055eceed..74ad7b2aae 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -13,7 +13,7 @@ const target_util = @import("target.zig"); const Package = @import("Package.zig"); const link = @import("link.zig"); const trace = @import("tracy.zig").trace; -const liveness = @import("liveness.zig"); +const Liveness = @import("Liveness.zig"); const build_options = @import("build_options"); const LibCInstallation = @import("libc_installation.zig").LibCInstallation; const glibc = @import("glibc.zig"); @@ -1922,6 +1922,7 @@ pub fn getCompileLogOutput(self: *Compilation) []const u8 { } pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemory }!void { + const gpa = self.gpa; // If the terminal is dumb, we dont want to show the user all the // output. var progress: std.Progress = .{ .dont_print_on_dumb = true }; @@ -2005,7 +2006,8 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor assert(decl.has_tv); if (decl.val.castTag(.function)) |payload| { const func = payload.data; - switch (func.state) { + + var air = switch (func.state) { .queued => module.analyzeFnBody(decl, func) catch |err| switch (err) { error.AnalysisFail => { assert(func.state != .in_progress); @@ -2016,18 +2018,39 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor .in_progress => unreachable, .inline_only => unreachable, // don't queue work for this .sema_failure, .dependency_failure => continue, - .success => {}, - } - // Here we tack on additional allocations to the Decl's arena. The allocations - // are lifetime annotations in the ZIR. - var decl_arena = decl.value_arena.?.promote(module.gpa); - defer decl.value_arena.?.* = decl_arena.state; + .success => unreachable, // don't queue it twice + }; + defer air.deinit(gpa); + log.debug("analyze liveness of {s}", .{decl.name}); - try liveness.analyze(module.gpa, &decl_arena.allocator, func.body); + var liveness = try Liveness.analyze(gpa, air); + defer liveness.deinit(gpa); if (std.builtin.mode == .Debug and self.verbose_air) { func.dump(module.*); } + + assert(decl.ty.hasCodeGenBits()); + + self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => { + decl.analysis = .codegen_failure; + continue; + }, + else => { + try module.failed_decls.ensureUnusedCapacity(gpa, 1); + module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( + gpa, + decl.srcLoc(), + "unable to codegen: {s}", + .{@errorName(err)}, + )); + decl.analysis = .codegen_failure_retryable; + continue; + }, + }; + continue; } assert(decl.ty.hasCodeGenBits()); @@ -2039,9 +2062,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor continue; }, else => { - try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.count() + 1); + try module.failed_decls.ensureCapacity(gpa, module.failed_decls.count() + 1); module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( - module.gpa, + gpa, decl.srcLoc(), "unable to codegen: {s}", .{@errorName(err)}, @@ -2070,7 +2093,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor @panic("sadly stage2 is omitted from this build to save memory on the CI server"); const module = self.bin_file.options.module.?; const emit_h = module.emit_h.?; - _ = try emit_h.decl_table.getOrPut(module.gpa, decl); + _ = try emit_h.decl_table.getOrPut(gpa, decl); const decl_emit_h = decl.getEmitH(module); const fwd_decl = &decl_emit_h.fwd_decl; fwd_decl.shrinkRetainingCapacity(0); @@ -2079,7 +2102,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor .module = module, .error_msg = null, .decl = decl, - .fwd_decl = fwd_decl.toManaged(module.gpa), + .fwd_decl = fwd_decl.toManaged(gpa), // we don't want to emit optionals and error unions to headers since they have no ABI .typedefs = undefined, }; @@ -2087,14 +2110,14 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor c_codegen.genHeader(&dg) catch |err| switch (err) { error.AnalysisFail => { - try emit_h.failed_decls.put(module.gpa, decl, dg.error_msg.?); + try emit_h.failed_decls.put(gpa, decl, dg.error_msg.?); continue; }, else => |e| return e, }; fwd_decl.* = dg.fwd_decl.moveToUnmanaged(); - fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len); + fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len); }, }, .analyze_decl => |decl| { @@ -2111,9 +2134,9 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor @panic("sadly stage2 is omitted from this build to save memory on the CI server"); const module = self.bin_file.options.module.?; self.bin_file.updateDeclLineNumber(module, decl) catch |err| { - try module.failed_decls.ensureCapacity(module.gpa, module.failed_decls.count() + 1); + try module.failed_decls.ensureCapacity(gpa, module.failed_decls.count() + 1); module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( - module.gpa, + gpa, decl.srcLoc(), "unable to update line number: {s}", .{@errorName(err)}, diff --git a/src/Liveness.zig b/src/Liveness.zig index 828614dcbb..84e2495054 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -150,6 +150,7 @@ fn analyzeInst( const gpa = a.gpa; const table = &a.table; const inst_tags = a.air.instructions.items(.tag); + const inst_datas = a.air.instructions.items(.data); // No tombstone for this instruction means it is never referenced, // and its birth marks its own death. Very metal 🤘 diff --git a/src/Module.zig b/src/Module.zig index 2f1dc0b33b..6273243ee2 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -739,8 +739,6 @@ pub const Union = struct { pub const Fn = struct { /// The Decl that corresponds to the function itself. owner_decl: *Decl, - /// undefined unless analysis state is `success`. - body: ir.Body, /// The ZIR instruction that is a function instruction. Use this to find /// the body. We store this rather than the body directly so that when ZIR /// is regenerated on update(), we can map this to the new corresponding @@ -3585,17 +3583,19 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void { mod.gpa.free(kv.value); } -pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { +pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { const tracy = trace(@src()); defer tracy.end(); + const gpa = mod.gpa; + // Use the Decl's arena for function memory. - var arena = decl.value_arena.?.promote(mod.gpa); + var arena = decl.value_arena.?.promote(gpa); defer decl.value_arena.?.* = arena.state; const fn_ty = decl.ty; - const param_inst_list = try mod.gpa.alloc(*ir.Inst, fn_ty.fnParamLen()); - defer mod.gpa.free(param_inst_list); + const param_inst_list = try gpa.alloc(*ir.Inst, fn_ty.fnParamLen()); + defer gpa.free(param_inst_list); for (param_inst_list) |*param_inst, param_index| { const param_type = fn_ty.fnParamType(param_index); @@ -3615,7 +3615,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { var sema: Sema = .{ .mod = mod, - .gpa = mod.gpa, + .gpa = gpa, .arena = &arena.allocator, .code = zir, .owner_decl = decl, @@ -3626,6 +3626,11 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { }; defer sema.deinit(); + // First few indexes of extra are reserved and set at the end. + const reserved_count = @typeInfo(Air.ExtraIndex).Enum.fields.len; + try sema.air_extra.ensureTotalCapacity(gpa, reserved_count); + sema.air_extra.items.len += reserved_count; + var inner_block: Scope.Block = .{ .parent = null, .sema = &sema, @@ -3634,20 +3639,29 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !void { .inlining = null, .is_comptime = false, }; - defer inner_block.instructions.deinit(mod.gpa); + defer inner_block.instructions.deinit(gpa); // AIR currently requires the arg parameters to be the first N instructions - try inner_block.instructions.appendSlice(mod.gpa, param_inst_list); + try inner_block.instructions.appendSlice(gpa, param_inst_list); func.state = .in_progress; log.debug("set {s} to in_progress", .{decl.name}); try sema.analyzeFnBody(&inner_block, func.zir_body_inst); - const instructions = try arena.allocator.dupe(*ir.Inst, inner_block.instructions.items); + // Copy the block into place and mark that as the main block. + sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = sema.air_extra.items.len; + try sema.air_extra.appendSlice(inner_block.instructions.items); + func.state = .success; - func.body = .{ .instructions = instructions }; log.debug("set {s} to success", .{decl.name}); + + return Air{ + .instructions = sema.air_instructions.toOwnedSlice(), + .extra = sema.air_extra.toOwnedSlice(), + .values = sema.air_values.toOwnedSlice(), + .variables = sema.air_variables.toOwnedSlice(), + }; } fn markOutdatedDecl(mod: *Module, decl: *Decl) !void { diff --git a/src/Sema.zig b/src/Sema.zig index 85cb4aa423..b4e10837af 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1,6 +1,6 @@ //! Semantic analysis of ZIR instructions. //! Shared to every Block. Stored on the stack. -//! State used for compiling a `Zir` into AIR. +//! State used for compiling a ZIR into AIR. //! Transforms untyped ZIR instructions into semantically-analyzed AIR instructions. //! Does type checking, comptime control flow, and safety-check generation. //! This is the the heart of the Zig compiler. @@ -11,6 +11,10 @@ gpa: *Allocator, /// Points to the arena allocator of the Decl. arena: *Allocator, code: Zir, +air_instructions: std.MultiArrayList(Air.Inst) = .{}, +air_extra: ArrayListUnmanaged(u32) = .{}, +air_values: ArrayListUnmanaged(Value) = .{}, +air_variables: ArrayListUnmanaged(Module.Var) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// When analyzing an inline function call, owner_decl is the Decl of the caller @@ -32,7 +36,7 @@ func: ?*Module.Fn, /// > Denormalized data to make `resolveInst` faster. This is 0 if not inside a function, /// > otherwise it is the number of parameters of the function. /// > param_count: u32 -param_inst_list: []const *ir.Inst, +param_inst_list: []const Air.Inst.Index, branch_quota: u32 = 1000, branch_count: u32 = 0, /// This field is updated when a new source location becomes active, so that @@ -65,10 +69,15 @@ const LazySrcLoc = Module.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); const target_util = @import("target.zig"); -pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, *ir.Inst); +pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Index); pub fn deinit(sema: *Sema) void { - sema.inst_map.deinit(sema.gpa); + const gpa = sema.gpa; + sema.air_instructions.deinit(gpa); + sema.air_extra.deinit(gpa); + sema.air_values.deinit(gpa); + sema.air_variables.deinit(gpa); + sema.inst_map.deinit(gpa); sema.* = undefined; } @@ -108,7 +117,7 @@ pub fn analyzeFnBody( /// Returns only the result from the body that is specified. /// Only appropriate to call when it is determined at comptime that this body /// has no peers. -fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!*Inst { +fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Index { const break_inst = try sema.analyzeBody(block, body); const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand; return sema.resolveInst(operand_ref); @@ -533,7 +542,7 @@ pub fn analyzeBody( } } -fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const extended = sema.code.instructions.items(.data)[inst].extended; switch (extended.opcode) { // zig fmt: off @@ -569,7 +578,7 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } /// TODO when we rework AIR memory layout, this function will no longer have a possible error. -pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!*ir.Inst { +pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!Air.Inst.Index { var i: usize = @enumToInt(zir_ref); // First section of indexes correspond to a set number of constant values. @@ -618,19 +627,19 @@ pub fn resolveType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, zir_ref: Z return sema.resolveAirAsType(block, src, air_inst); } -fn resolveAirAsType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, air_inst: *ir.Inst) !Type { +fn resolveAirAsType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, air_inst: Air.Inst.Index) !Type { const wanted_type = Type.initTag(.@"type"); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); return val.toType(sema.arena); } -fn resolveConstValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: *ir.Inst) !Value { +fn resolveConstValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !Value { return (try sema.resolveDefinedValue(block, src, base)) orelse return sema.failWithNeededComptime(block, src); } -fn resolveDefinedValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: *ir.Inst) !?Value { +fn resolveDefinedValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !?Value { if (try sema.resolvePossiblyUndefinedValue(block, src, base)) |val| { if (val.isUndef()) { return sema.failWithUseOfUndef(block, src); @@ -644,7 +653,7 @@ fn resolvePossiblyUndefinedValue( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - base: *ir.Inst, + base: Air.Inst.Index, ) !?Value { if (try sema.typeHasOnePossibleValue(block, src, base.ty)) |opv| { return opv; @@ -708,13 +717,13 @@ pub fn resolveInstConst( }; } -fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO implement zir_sema.zirBitcastResultPtr", .{}); } -fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = inst; const tracy = trace(@src()); defer tracy.end(); @@ -749,7 +758,7 @@ fn zirStructDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); @@ -820,7 +829,7 @@ fn zirEnumDecl( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1017,7 +1026,7 @@ fn zirUnionDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1081,7 +1090,7 @@ fn zirOpaqueDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1101,7 +1110,7 @@ fn zirErrorSetDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1141,7 +1150,7 @@ fn zirRetPtr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1153,7 +1162,7 @@ fn zirRetPtr( return block.addNoOp(src, ptr_type, .alloc); } -fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1166,7 +1175,7 @@ fn zirRetType( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1191,7 +1200,7 @@ fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I fn ensureResultUsed( sema: *Sema, block: *Scope.Block, - operand: *Inst, + operand: Air.Inst.Index, src: LazySrcLoc, ) InnerError!void { switch (operand.ty.zigTypeTag()) { @@ -1213,7 +1222,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde } } -fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1247,7 +1256,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const arg_name = inst_data.get(sema.code); const arg_index = sema.next_arg_index; @@ -1269,13 +1278,13 @@ fn zirAllocExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended", .{}); } -fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1298,13 +1307,13 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne }); } -fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocInferredComptime", .{}); } -fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1317,7 +1326,7 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!* return block.addNoOp(var_decl_src, ptr_type, .alloc); } -fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1336,7 +1345,7 @@ fn zirAllocInferred( block: *Scope.Block, inst: Zir.Inst.Index, inferred_alloc_ty: Type, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1589,7 +1598,7 @@ fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.storePtr(block, src, ptr, value); } -fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1625,7 +1634,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, src, param_type); } -fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1653,7 +1662,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In return sema.analyzeDeclRef(block, .unneeded, new_decl); } -fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1662,7 +1671,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In return sema.mod.constIntUnsigned(sema.arena, .unneeded, Type.initTag(.comptime_int), int); } -fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1680,7 +1689,7 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! }); } -fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].float; @@ -1693,7 +1702,7 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!* }); } -fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -1722,7 +1731,7 @@ fn zirCompileLog( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { var managed = sema.mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); @@ -1772,7 +1781,7 @@ fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Z return sema.panicWithMsg(block, src, msg_inst); } -fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1832,12 +1841,12 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerE // Loop repetition is implied so the last instruction may or may not be a noreturn instruction. try child_block.instructions.append(sema.gpa, &loop_inst.base); - loop_inst.body = .{ .instructions = try sema.arena.dupe(*Inst, loop_block.instructions.items) }; + loop_inst.body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, loop_block.instructions.items) }; return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } -fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1847,13 +1856,13 @@ fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirCImport", .{}); } -fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirSuspendBlock", .{}); } -fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1911,7 +1920,7 @@ fn resolveBlockBody( child_block: *Scope.Block, body: []const Zir.Inst.Index, merges: *Scope.Block.Merges, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { _ = try sema.analyzeBody(child_block, body); return sema.analyzeBlockBody(parent_block, src, child_block, merges); } @@ -1922,7 +1931,7 @@ fn analyzeBlockBody( src: LazySrcLoc, child_block: *Scope.Block, merges: *Scope.Block.Merges, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1933,7 +1942,7 @@ fn analyzeBlockBody( if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions // directly into the parent block. - const copied_instructions = try sema.arena.dupe(*Inst, child_block.instructions.items); + const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items); try parent_block.instructions.appendSlice(sema.gpa, copied_instructions); return copied_instructions[copied_instructions.len - 1]; } @@ -1944,7 +1953,7 @@ fn analyzeBlockBody( if (br_block == merges.block_inst) { // No need for a block instruction. We can put the new instructions directly // into the parent block. Here we omit the break instruction. - const copied_instructions = try sema.arena.dupe(*Inst, child_block.instructions.items[0..last_inst_index]); + const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items[0..last_inst_index]); try parent_block.instructions.appendSlice(sema.gpa, copied_instructions); return merges.results.items[0]; } @@ -1959,7 +1968,7 @@ fn analyzeBlockBody( const resolved_ty = try sema.resolvePeerTypes(parent_block, src, merges.results.items); merges.block_inst.base.ty = resolved_ty; merges.block_inst.body = .{ - .instructions = try sema.arena.dupe(*Inst, child_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items), }; // Now that the block has its type resolved, we need to go back into all the break // instructions, and insert type coercion on the operands. @@ -1991,7 +2000,7 @@ fn analyzeBlockBody( }, .block = merges.block_inst, .body = .{ - .instructions = try sema.arena.dupe(*Inst, coerce_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, coerce_block.instructions.items), }, }; } @@ -2130,7 +2139,7 @@ fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError _ = try block.addDbgStmt(.unneeded, inst_data.line, inst_data.column); } -fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2138,7 +2147,7 @@ fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeDeclRef(block, src, decl); } -fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2192,7 +2201,7 @@ fn zirCall( inst: Zir.Inst.Index, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2204,7 +2213,7 @@ fn zirCall( const func = try sema.resolveInst(extra.data.callee); // TODO handle function calls of generic functions - const resolved_args = try sema.arena.alloc(*Inst, args.len); + const resolved_args = try sema.arena.alloc(Air.Inst.Index, args.len); for (args) |zir_arg, i| { // the args are already casted to the result of a param type instruction. resolved_args[i] = try sema.resolveInst(zir_arg); @@ -2216,13 +2225,13 @@ fn zirCall( fn analyzeCall( sema: *Sema, block: *Scope.Block, - func: *ir.Inst, + func: Air.Inst.Index, func_src: LazySrcLoc, call_src: LazySrcLoc, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, - args: []const *ir.Inst, -) InnerError!*ir.Inst { + args: []const Air.Inst.Index, +) InnerError!Air.Inst.Index { if (func.ty.zigTypeTag() != .Fn) return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty}); @@ -2279,7 +2288,7 @@ fn analyzeCall( const is_comptime_call = block.is_comptime or modifier == .compile_time; const is_inline_call = is_comptime_call or modifier == .always_inline or func.ty.fnCallingConvention() == .Inline; - const result: *Inst = if (is_inline_call) res: { + const result: Air.Inst.Index = if (is_inline_call) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { .function => func_val.castTag(.function).?.data, @@ -2377,7 +2386,7 @@ fn analyzeCall( return result; } -fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2389,7 +2398,7 @@ fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2401,7 +2410,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, opt_type); } -fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const array_type = try sema.resolveType(block, src, inst_data.operand); @@ -2409,7 +2418,7 @@ fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.constType(sema.arena, src, elem_type); } -fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -2424,7 +2433,7 @@ fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.mod.constType(sema.arena, src, vector_type); } -fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2437,7 +2446,7 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2452,7 +2461,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2465,7 +2474,7 @@ fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, anyframe_type); } -fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2486,7 +2495,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.constType(sema.arena, src, err_union_ty); } -fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2505,7 +2514,7 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr }); } -fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2535,7 +2544,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, result_ty, .bitcast, op_coerced); } -fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2568,7 +2577,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, Type.initTag(.anyerror), .bitcast, op); } -fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2658,7 +2667,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn }); } -fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2672,7 +2681,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE }); } -fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const mod = sema.mod; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; @@ -2680,7 +2689,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = try sema.resolveInst(inst_data.operand); - const enum_tag: *Inst = switch (operand.ty.zigTypeTag()) { + const enum_tag: Air.Inst.Index = switch (operand.ty.zigTypeTag()) { .Enum => operand, .Union => { //if (!operand.ty.unionHasTag()) { @@ -2754,7 +2763,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return block.addUnOp(src, int_tag_ty, .bitcast, enum_tag); } -fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const mod = sema.mod; const target = mod.getTarget(); const arena = sema.arena; @@ -2815,7 +2824,7 @@ fn zirOptionalPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2858,7 +2867,7 @@ fn zirOptionalPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2896,7 +2905,7 @@ fn zirErrUnionPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2930,7 +2939,7 @@ fn zirErrUnionPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2969,7 +2978,7 @@ fn zirErrUnionPayloadPtr( } /// Value in, value out -fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2995,7 +3004,7 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner } /// Pointer in, value out -fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3042,7 +3051,7 @@ fn zirFunc( block: *Scope.Block, inst: Zir.Inst.Index, inferred_error_set: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3093,7 +3102,7 @@ fn funcCommon( is_extern: bool, src_locs: Zir.Inst.Func.SrcLocs, opt_lib_name: ?[]const u8, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const bare_return_type = try sema.resolveType(block, ret_ty_src, zir_return_type); @@ -3234,7 +3243,7 @@ fn funcCommon( return result; } -fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3242,7 +3251,7 @@ fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Ins return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs); } -fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3258,13 +3267,13 @@ fn analyzeAs( src: LazySrcLoc, zir_dest_type: Zir.Inst.Ref, zir_operand: Zir.Inst.Ref, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const dest_type = try sema.resolveType(block, src, zir_dest_type); const operand = try sema.resolveInst(zir_operand); return sema.coerce(block, dest_type, operand, src); } -fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3281,7 +3290,7 @@ fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, ty, .ptrtoint, ptr); } -fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3299,7 +3308,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3312,7 +3321,7 @@ fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3327,7 +3336,7 @@ fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3340,7 +3349,7 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3383,7 +3392,7 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten int", .{}); } -fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3396,7 +3405,7 @@ fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.bitcast(block, dest_type, operand); } -fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3439,7 +3448,7 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten float", .{}); } -fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3454,7 +3463,7 @@ fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeLoad(block, sema.src, result_ptr, sema.src); } -fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3472,7 +3481,7 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3482,7 +3491,7 @@ fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); } -fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3495,7 +3504,7 @@ fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); } -fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3508,7 +3517,7 @@ fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.analyzeSlice(block, src, array_ptr, start, null, null, .unneeded); } -fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3522,7 +3531,7 @@ fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeSlice(block, src, array_ptr, start, end, null, .unneeded); } -fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3544,7 +3553,7 @@ fn zirSwitchCapture( inst: Zir.Inst.Index, is_multi: bool, is_ref: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3563,7 +3572,7 @@ fn zirSwitchCaptureElse( block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3582,7 +3591,7 @@ fn zirSwitchBlock( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3615,7 +3624,7 @@ fn zirSwitchBlockMulti( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -3645,14 +3654,14 @@ fn zirSwitchBlockMulti( fn analyzeSwitch( sema: *Sema, block: *Scope.Block, - operand: *Inst, + operand: Air.Inst.Index, extra_end: usize, special_prong: Zir.SpecialProng, scalar_cases_len: usize, multi_cases_len: usize, switch_inst: Zir.Inst.Index, src_node_offset: i32, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const gpa = sema.gpa; const mod = sema.mod; @@ -4187,7 +4196,7 @@ fn analyzeSwitch( cases[scalar_i] = .{ .item = item_val, - .body = .{ .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items) }, + .body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items) }, }; } @@ -4207,7 +4216,7 @@ fn analyzeSwitch( case_block.instructions.shrinkRetainingCapacity(0); - var any_ok: ?*Inst = null; + var any_ok: ?Air.Inst.Index = null; const bool_ty = comptime Type.initTag(.bool); for (items) |item_ref| { @@ -4280,7 +4289,7 @@ fn analyzeSwitch( try case_block.instructions.append(gpa, &new_condbr.base); const cond_body: Body = .{ - .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), }; case_block.instructions.shrinkRetainingCapacity(0); @@ -4288,7 +4297,7 @@ fn analyzeSwitch( extra_index += body_len; _ = try sema.analyzeBody(&case_block, body); new_condbr.then_body = .{ - .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), }; if (prev_condbr) |condbr| { condbr.else_body = cond_body; @@ -4303,7 +4312,7 @@ fn analyzeSwitch( case_block.instructions.shrinkRetainingCapacity(0); _ = try sema.analyzeBody(&case_block, special.body); const else_body: Body = .{ - .instructions = try sema.arena.dupe(*Inst, case_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), }; if (prev_condbr) |condbr| { condbr.else_body = else_body; @@ -4507,7 +4516,7 @@ fn validateSwitchNoRange( return sema.mod.failWithOwnedErrorMsg(&block.base, msg); } -fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; _ = extra; @@ -4516,7 +4525,7 @@ fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, src, "TODO implement zirHasField", .{}); } -fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -4541,7 +4550,7 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return mod.constBool(arena, src, false); } -fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4566,13 +4575,13 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return mod.constType(sema.arena, src, file_root_decl.ty); } -fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; _ = inst; return sema.mod.fail(&block.base, sema.src, "TODO implement zirRetErrValueCode", .{}); } -fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4581,7 +4590,7 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*In return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{}); } -fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4594,7 +4603,7 @@ fn zirBitwise( block: *Scope.Block, inst: Zir.Inst.Index, ir_tag: ir.Inst.Tag, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4606,7 +4615,7 @@ fn zirBitwise( const lhs = try sema.resolveInst(extra.lhs); const rhs = try sema.resolveInst(extra.rhs); - const instructions = &[_]*Inst{ lhs, rhs }; + const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -4652,7 +4661,7 @@ fn zirBitwise( return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); } -fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4660,7 +4669,7 @@ fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{}); } -fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4668,7 +4677,7 @@ fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{}); } -fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4681,7 +4690,7 @@ fn zirNegate( block: *Scope.Block, inst: Zir.Inst.Index, tag_override: Zir.Inst.Tag, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4695,7 +4704,7 @@ fn zirNegate( return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); } -fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4715,7 +4724,7 @@ fn zirOverflowArithmetic( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4729,13 +4738,13 @@ fn analyzeArithmetic( sema: *Sema, block: *Scope.Block, zir_tag: Zir.Inst.Tag, - lhs: *Inst, - rhs: *Inst, + lhs: Air.Inst.Index, + rhs: Air.Inst.Index, src: LazySrcLoc, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, -) InnerError!*Inst { - const instructions = &[_]*Inst{ lhs, rhs }; +) InnerError!Air.Inst.Index { + const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -4844,7 +4853,7 @@ fn analyzeArithmetic( return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); } -fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4859,7 +4868,7 @@ fn zirAsm( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -4899,7 +4908,7 @@ fn zirAsm( }; }; - const args = try sema.arena.alloc(*Inst, inputs_len); + const args = try sema.arena.alloc(Air.Inst.Index, inputs_len); const inputs = try sema.arena.alloc([]const u8, inputs_len); for (args) |*arg, arg_i| { @@ -4943,7 +4952,7 @@ fn zirCmp( block: *Scope.Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5009,7 +5018,7 @@ fn zirCmp( return mod.constBool(sema.arena, src, lhs.value().?.eql(rhs.value().?) == (op == .eq)); } - const instructions = &[_]*Inst{ lhs, rhs }; + const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); if (!resolved_type.isSelfComparable(is_equality_cmp)) { return mod.fail(&block.base, src, "operator not allowed for type '{}'", .{resolved_type}); @@ -5041,7 +5050,7 @@ fn zirCmp( return block.addBinOp(src, bool_type, tag, casted_lhs, casted_rhs); } -fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5051,7 +5060,7 @@ fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.constIntUnsigned(sema.arena, src, Type.initTag(.comptime_int), abi_size); } -fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5065,7 +5074,7 @@ fn zirThis( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirThis", .{}); } @@ -5074,7 +5083,7 @@ fn zirRetAddr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirRetAddr", .{}); } @@ -5083,12 +5092,12 @@ fn zirBuiltinSrc( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinSrc", .{}); } -fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -5131,7 +5140,7 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; @@ -5140,7 +5149,7 @@ fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.constType(sema.arena, src, operand.ty); } -fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { _ = block; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -5149,13 +5158,13 @@ fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.mod.constType(sema.arena, src, elem_ty); } -fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirTypeofLog2IntType", .{}); } -fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirLog2IntType", .{}); @@ -5165,7 +5174,7 @@ fn zirTypeofPeer( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5173,7 +5182,7 @@ fn zirTypeofPeer( const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; const args = sema.code.refSlice(extra.end, extended.small); - const inst_list = try sema.gpa.alloc(*ir.Inst, args.len); + const inst_list = try sema.gpa.alloc(Air.Inst.Index, args.len); defer sema.gpa.free(inst_list); for (args) |arg_ref, i| { @@ -5184,7 +5193,7 @@ fn zirTypeofPeer( return sema.mod.constType(sema.arena, src, result_type); } -fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5206,7 +5215,7 @@ fn zirBoolOp( block: *Scope.Block, inst: Zir.Inst.Index, comptime is_bool_or: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5237,7 +5246,7 @@ fn zirBoolBr( parent_block: *Scope.Block, inst: Zir.Inst.Index, is_bool_or: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5292,12 +5301,12 @@ fn zirBoolBr( const rhs_result = try sema.resolveBody(rhs_block, body); _ = try rhs_block.addBr(src, block_inst, rhs_result); - const air_then_body: ir.Body = .{ .instructions = try sema.arena.dupe(*Inst, then_block.instructions.items) }; - const air_else_body: ir.Body = .{ .instructions = try sema.arena.dupe(*Inst, else_block.instructions.items) }; + const air_then_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, then_block.instructions.items) }; + const air_else_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, else_block.instructions.items) }; _ = try child_block.addCondBr(src, lhs, air_then_body, air_else_body); block_inst.body = .{ - .instructions = try sema.arena.dupe(*Inst, child_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items), }; try parent_block.instructions.append(sema.gpa, &block_inst.base); return &block_inst.base; @@ -5307,7 +5316,7 @@ fn zirIsNonNull( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5321,7 +5330,7 @@ fn zirIsNonNullPtr( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5332,7 +5341,7 @@ fn zirIsNonNullPtr( return sema.analyzeIsNull(block, src, loaded, true); } -fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5341,7 +5350,7 @@ fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeIsNonErr(block, inst_data.src(), operand); } -fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5385,14 +5394,14 @@ fn zirCondbr( _ = try sema.analyzeBody(&sub_block, then_body); const air_then_body: ir.Body = .{ - .instructions = try sema.arena.dupe(*Inst, sub_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, sub_block.instructions.items), }; sub_block.instructions.shrinkRetainingCapacity(0); _ = try sema.analyzeBody(&sub_block, else_body); const air_else_body: ir.Body = .{ - .instructions = try sema.arena.dupe(*Inst, sub_block.instructions.items), + .instructions = try sema.arena.dupe(Air.Inst.Index, sub_block.instructions.items), }; _ = try parent_block.addCondBr(src, cond, air_then_body, air_else_body); @@ -5470,7 +5479,7 @@ fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError fn analyzeRet( sema: *Sema, block: *Scope.Block, - operand: *Inst, + operand: Air.Inst.Index, src: LazySrcLoc, need_coercion: bool, ) InnerError!Zir.Inst.Index { @@ -5505,7 +5514,7 @@ fn floatOpAllowed(tag: Zir.Inst.Tag) bool { }; } -fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5526,7 +5535,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.mod.constType(sema.arena, .unneeded, ty); } -fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5580,7 +5589,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5594,13 +5603,13 @@ fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In }); } -fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnionInitPtr", .{}); } -fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst { +fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { const mod = sema.mod; const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); @@ -5622,7 +5631,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: mem.set(Zir.Inst.Index, found_fields, 0); // The init values to use for the struct instance. - const field_inits = try gpa.alloc(*ir.Inst, struct_obj.fields.count()); + const field_inits = try gpa.alloc(Air.Inst.Index, struct_obj.fields.count()); defer gpa.free(field_inits); var field_i: u32 = 0; @@ -5713,7 +5722,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.fail(&block.base, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); } -fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst { +fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5721,7 +5730,7 @@ fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInitAnon", .{}); } -fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst { +fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5729,7 +5738,7 @@ fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{}); } -fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!*Inst { +fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5737,13 +5746,13 @@ fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_r return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInitAnon", .{}); } -fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldTypeRef", .{}); } -fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const src = inst_data.src(); @@ -5765,7 +5774,7 @@ fn zirErrorReturnTrace( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorReturnTrace", .{}); } @@ -5774,7 +5783,7 @@ fn zirFrame( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrame", .{}); } @@ -5783,84 +5792,84 @@ fn zirFrameAddress( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameAddress", .{}); } -fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignOf", .{}); } -fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBoolToInt", .{}); } -fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirEmbedFile", .{}); } -fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorName", .{}); } -fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnaryMath", .{}); } -fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTagName", .{}); } -fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReify", .{}); } -fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTypeName", .{}); } -fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameType", .{}); } -fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameSize", .{}); } -fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFloatToInt", .{}); } -fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToFloat", .{}); } -fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5923,199 +5932,199 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, type_res, .bitcast, operand_coerced); } -fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrSetCast", .{}); } -fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPtrCast", .{}); } -fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTruncate", .{}); } -fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignCast", .{}); } -fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirClz", .{}); } -fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCtz", .{}); } -fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPopCount", .{}); } -fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirByteSwap", .{}); } -fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitReverse", .{}); } -fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivExact", .{}); } -fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivFloor", .{}); } -fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivTrunc", .{}); } -fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMod", .{}); } -fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirRem", .{}); } -fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShlExact", .{}); } -fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShrExact", .{}); } -fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitOffsetOf", .{}); } -fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{}); } -fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCmpxchg", .{}); } -fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirSplat", .{}); } -fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReduce", .{}); } -fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShuffle", .{}); } -fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicLoad", .{}); } -fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicRmw", .{}); } -fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicStore", .{}); } -fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMulAdd", .{}); } -fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinCall", .{}); } -fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldPtrType", .{}); } -fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldParentPtr", .{}); } -fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy", .{}); } -fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset", .{}); } -fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinAsyncCall", .{}); } -fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!*Inst { +fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirResume", .{}); @@ -6126,7 +6135,7 @@ fn zirAwait( block: *Scope.Block, inst: Zir.Inst.Index, is_nosuspend: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -6138,7 +6147,7 @@ fn zirVarExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const src = sema.src; const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type @@ -6204,7 +6213,7 @@ fn zirFuncExtended( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -6271,7 +6280,7 @@ fn zirCUndef( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCUndef", .{}); @@ -6281,7 +6290,7 @@ fn zirCInclude( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCInclude", .{}); @@ -6291,7 +6300,7 @@ fn zirCDefine( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCDefine", .{}); @@ -6301,7 +6310,7 @@ fn zirWasmMemorySize( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemorySize", .{}); @@ -6311,7 +6320,7 @@ fn zirWasmMemoryGrow( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemoryGrow", .{}); @@ -6321,7 +6330,7 @@ fn zirBuiltinExtern( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinExtern", .{}); @@ -6355,7 +6364,7 @@ pub const PanicId = enum { invalid_error_code, }; -fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id: PanicId) !void { +fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Index, panic_id: PanicId) !void { const block_inst = try sema.arena.create(Inst.Block); block_inst.* = .{ .base = .{ @@ -6364,12 +6373,12 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id: .src = ok.src, }, .body = .{ - .instructions = try sema.arena.alloc(*Inst, 1), // Only need space for the condbr. + .instructions = try sema.arena.alloc(Air.Inst.Index, 1), // Only need space for the condbr. }, }; const ok_body: ir.Body = .{ - .instructions = try sema.arena.alloc(*Inst, 1), // Only need space for the br_void. + .instructions = try sema.arena.alloc(Air.Inst.Index, 1), // Only need space for the br_void. }; const br_void = try sema.arena.create(Inst.BrVoid); br_void.* = .{ @@ -6395,7 +6404,7 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: *Inst, panic_id: _ = try sema.safetyPanic(&fail_block, ok.src, panic_id); - const fail_body: ir.Body = .{ .instructions = try sema.arena.dupe(*Inst, fail_block.instructions.items) }; + const fail_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, fail_block.instructions.items) }; const condbr = try sema.arena.create(Inst.CondBr); condbr.* = .{ @@ -6417,7 +6426,7 @@ fn panicWithMsg( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - msg_inst: *ir.Inst, + msg_inst: Air.Inst.Index, ) !Zir.Inst.Index { const mod = sema.mod; const arena = sema.arena; @@ -6438,7 +6447,7 @@ fn panicWithMsg( .ty = try mod.optionalType(arena, ptr_stack_trace_ty), .val = Value.initTag(.null_value), }); - const args = try arena.create([2]*ir.Inst); + const args = try arena.create([2]Air.Inst.Index); args.* = .{ msg_inst, null_stack_trace }; _ = try sema.analyzeCall(block, panic_fn, src, src, .auto, false, args); return always_noreturn; @@ -6494,10 +6503,10 @@ fn namedFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - object_ptr: *Inst, + object_ptr: Air.Inst.Index, field_name: []const u8, field_name_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const mod = sema.mod; const arena = sema.arena; @@ -6647,7 +6656,7 @@ fn analyzeNamespaceLookup( src: LazySrcLoc, namespace: *Scope.Namespace, decl_name: []const u8, -) InnerError!?*Inst { +) InnerError!?Air.Inst.Index { const mod = sema.mod; const gpa = sema.gpa; if (try sema.lookupInNamespace(namespace, decl_name)) |decl| { @@ -6671,11 +6680,11 @@ fn analyzeStructFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - struct_ptr: *Inst, + struct_ptr: Air.Inst.Index, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const mod = sema.mod; const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); @@ -6706,11 +6715,11 @@ fn analyzeUnionFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - union_ptr: *Inst, + union_ptr: Air.Inst.Index, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_union_ty: Type, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const mod = sema.mod; const arena = sema.arena; assert(unresolved_union_ty.zigTypeTag() == .Union); @@ -6743,10 +6752,10 @@ fn elemPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: *Inst, - elem_index: *Inst, + array_ptr: Air.Inst.Index, + elem_index: Air.Inst.Index, elem_index_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const array_ty = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, array_ptr.src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -6770,10 +6779,10 @@ fn elemPtrArray( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: *Inst, - elem_index: *Inst, + array_ptr: Air.Inst.Index, + elem_index: Air.Inst.Index, elem_index_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { if (array_ptr.value()) |array_ptr_val| { if (elem_index.value()) |index_val| { // Both array pointer and index are compile-time known. @@ -6798,9 +6807,9 @@ fn coerce( sema: *Sema, block: *Scope.Block, dest_type: Type, - inst: *Inst, + inst: Air.Inst.Index, inst_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { if (dest_type.tag() == .var_args_param) { return sema.coerceVarArgParam(block, inst); } @@ -6976,7 +6985,7 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult return .no_match; } -fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) InnerError!?*Inst { +fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) InnerError!?Air.Inst.Index { const val = inst.value() orelse return null; const src_zig_tag = inst.ty.zigTypeTag(); const dst_zig_tag = dest_type.zigTypeTag(); @@ -7014,7 +7023,7 @@ fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) Inn return null; } -fn coerceVarArgParam(sema: *Sema, block: *Scope.Block, inst: *Inst) !*Inst { +fn coerceVarArgParam(sema: *Sema, block: *Scope.Block, inst: Air.Inst.Index) !Air.Inst.Index { switch (inst.ty.zigTypeTag()) { .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst.src, "integer and float literals in var args function must be casted", .{}), else => {}, @@ -7027,8 +7036,8 @@ fn storePtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: *Inst, - uncasted_value: *Inst, + ptr: Air.Inst.Index, + uncasted_value: Air.Inst.Index, ) !void { if (ptr.ty.isConstPtr()) return sema.mod.fail(&block.base, src, "cannot assign to constant", .{}); @@ -7076,7 +7085,7 @@ fn storePtr( _ = try block.addBinOp(src, Type.initTag(.void), .store, ptr, value); } -fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { if (inst.value()) |val| { // Keep the comptime Value representation; take the new type. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7086,7 +7095,7 @@ fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Ins return block.addUnOp(inst.src, dest_type, .bitcast, inst); } -fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7094,7 +7103,7 @@ fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{}); } -fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7102,12 +7111,12 @@ fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } -fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!*Inst { +fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { const decl_ref = try sema.analyzeDeclRef(block, src, decl); return sema.analyzeLoad(block, src, decl_ref, src); } -fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!*Inst { +fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { try sema.mod.declareDeclDependency(sema.owner_decl, decl); sema.mod.ensureDeclAnalyzed(decl) catch |err| { if (sema.func) |func| { @@ -7128,7 +7137,7 @@ fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl }); } -fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!*Inst { +fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Index { const variable = tv.val.castTag(.variable).?.data; const ty = try sema.mod.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); @@ -7157,8 +7166,8 @@ fn analyzeRef( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: *Inst, -) InnerError!*Inst { + operand: Air.Inst.Index, +) InnerError!Air.Inst.Index { const ptr_type = try sema.mod.simplePtrType(sema.arena, operand.ty, false, .One); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |val| { @@ -7176,9 +7185,9 @@ fn analyzeLoad( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: *Inst, + ptr: Air.Inst.Index, ptr_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const elem_ty = switch (ptr.ty.zigTypeTag()) { .Pointer => ptr.ty.elemType(), else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}), @@ -7201,9 +7210,9 @@ fn analyzeIsNull( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: *Inst, + operand: Air.Inst.Index, invert_logic: bool, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |opt_val| { if (opt_val.isUndef()) { @@ -7222,8 +7231,8 @@ fn analyzeIsNonErr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: *Inst, -) InnerError!*Inst { + operand: Air.Inst.Index, +) InnerError!Air.Inst.Index { const ot = operand.ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return sema.mod.constBool(sema.arena, src, true); if (ot == .ErrorSet) return sema.mod.constBool(sema.arena, src, false); @@ -7243,12 +7252,12 @@ fn analyzeSlice( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: *Inst, - start: *Inst, - end_opt: ?*Inst, - sentinel_opt: ?*Inst, + array_ptr: Air.Inst.Index, + start: Air.Inst.Index, + end_opt: ?Air.Inst.Index, + sentinel_opt: ?Air.Inst.Index, sentinel_src: LazySrcLoc, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { const ptr_child = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -7319,10 +7328,10 @@ fn cmpNumeric( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - lhs: *Inst, - rhs: *Inst, + lhs: Air.Inst.Index, + rhs: Air.Inst.Index, op: std.math.CompareOperator, -) InnerError!*Inst { +) InnerError!Air.Inst.Index { assert(lhs.ty.isNumeric()); assert(rhs.ty.isNumeric()); @@ -7488,7 +7497,7 @@ fn cmpNumeric( return block.addBinOp(src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } -fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { if (inst.value()) |val| { return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); } @@ -7497,7 +7506,7 @@ fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) return block.addUnOp(inst.src, dest_type, .wrap_optional, inst); } -fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst) !*Inst { +fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { const err_union = dest_type.castTag(.error_union).?; if (inst.value()) |val| { if (inst.ty.zigTypeTag() != .ErrorSet) { @@ -7568,7 +7577,7 @@ fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: *Inst } } -fn resolvePeerTypes(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, instructions: []*Inst) !Type { +fn resolvePeerTypes(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, instructions: []Air.Inst.Index) !Type { if (instructions.len == 0) return Type.initTag(.noreturn); @@ -7704,7 +7713,7 @@ fn getBuiltin( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!*ir.Inst { +) InnerError!Air.Inst.Index { const mod = sema.mod; const std_pkg = mod.root_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 60e9a96275..4a9087d7f5 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -18,14 +18,14 @@ pub const Word = u32; pub const ResultId = u32; pub const TypeMap = std.HashMap(Type, u32, Type.HashContext64, std.hash_map.default_max_load_percentage); -pub const InstMap = std.AutoHashMap(*Inst, ResultId); +pub const InstMap = std.AutoHashMap(Air.Inst.Index, ResultId); const IncomingBlock = struct { src_label_id: ResultId, break_value_id: ResultId, }; -pub const BlockMap = std.AutoHashMap(*Inst.Block, struct { +pub const BlockMap = std.AutoHashMap(Air.Inst.Index, struct { label_id: ResultId, incoming_blocks: *std.ArrayListUnmanaged(IncomingBlock), }); @@ -279,16 +279,17 @@ pub const DeclGen = struct { return self.spv.module.getTarget(); } - fn fail(self: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) Error { + fn fail(self: *DeclGen, comptime format: []const u8, args: anytype) Error { @setCold(true); + const src: LazySrcLoc = .{ .node_offset = 0 }; const src_loc = src.toSrcLocWithDecl(self.decl); self.error_msg = try Module.ErrorMsg.create(self.spv.module.gpa, src_loc, format, args); return error.AnalysisFail; } - fn resolve(self: *DeclGen, inst: *Inst) !ResultId { + fn resolve(self: *DeclGen, inst: Air.Inst.Index) !ResultId { if (inst.value()) |val| { - return self.genConstant(inst.src, inst.ty, val); + return self.genConstant(inst.ty, val); } return self.inst_results.get(inst).?; // Instruction does not dominate all uses! @@ -313,7 +314,7 @@ pub const DeclGen = struct { const target = self.getTarget(); // The backend will never be asked to compiler a 0-bit integer, so we won't have to handle those in this function. - std.debug.assert(bits != 0); + assert(bits != 0); // 8, 16 and 64-bit integers require the Int8, Int16 and Inr64 capabilities respectively. // 32-bit integers are always supported (see spec, 2.16.1, Data rules). @@ -387,19 +388,19 @@ pub const DeclGen = struct { .composite_integer }; }, // As of yet, there is no vector support in the self-hosted compiler. - .Vector => self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}), + .Vector => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for Vector", .{}), // TODO: For which types is this the case? - else => self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}), + else => self.fail("TODO: SPIR-V backend: implement arithmeticTypeInfo for {}", .{ty}), }; } /// Generate a constant representing `val`. /// TODO: Deduplication? - fn genConstant(self: *DeclGen, src: LazySrcLoc, ty: Type, val: Value) Error!ResultId { + fn genConstant(self: *DeclGen, ty: Type, val: Value) Error!ResultId { const target = self.getTarget(); const code = &self.spv.binary.types_globals_constants; const result_id = self.spv.allocResultId(); - const result_type_id = try self.genType(src, ty); + const result_type_id = try self.genType(ty); if (val.isUndef()) { try writeInstruction(code, .OpUndef, &[_]Word{ result_type_id, result_id }); @@ -411,13 +412,13 @@ pub const DeclGen = struct { const int_info = ty.intInfo(target); const backing_bits = self.backingIntBits(int_info.bits) orelse { // Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits. - return self.fail(src, "TODO: SPIR-V backend: implement composite int constants for {}", .{ty}); + return self.fail("TODO: SPIR-V backend: implement composite int constants for {}", .{ty}); }; // We can just use toSignedInt/toUnsignedInt here as it returns u64 - a type large enough to hold any // SPIR-V native type (up to i/u64 with Int64). If SPIR-V ever supports native ints of a larger size, this // might need to be updated. - std.debug.assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64)); + assert(self.largestSupportedIntBits() <= std.meta.bitCount(u64)); var int_bits = if (ty.isSignedInt()) @bitCast(u64, val.toSignedInt()) else val.toUnsignedInt(); // Mask the low bits which make up the actual integer. This is to make sure that negative values @@ -469,13 +470,13 @@ pub const DeclGen = struct { } }, .Void => unreachable, - else => return self.fail(src, "TODO: SPIR-V backend: constant generation of type {}", .{ty}), + else => return self.fail("TODO: SPIR-V backend: constant generation of type {}", .{ty}), } return result_id; } - fn genType(self: *DeclGen, src: LazySrcLoc, ty: Type) Error!ResultId { + fn genType(self: *DeclGen, ty: Type) Error!ResultId { // We can't use getOrPut here so we can recursively generate types. if (self.spv.types.get(ty)) |already_generated| { return already_generated; @@ -492,7 +493,7 @@ pub const DeclGen = struct { const int_info = ty.intInfo(target); const backing_bits = self.backingIntBits(int_info.bits) orelse { // Integers too big for any native type are represented as "composite integers": An array of largestSupportedIntBits. - return self.fail(src, "TODO: SPIR-V backend: implement composite int {}", .{ty}); + return self.fail("TODO: SPIR-V backend: implement composite int {}", .{ty}); }; // TODO: If backing_bits != int_info.bits, a duplicate type might be generated here. @@ -518,7 +519,7 @@ pub const DeclGen = struct { }; if (!supported) { - return self.fail(src, "Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits}); + return self.fail("Floating point width of {} bits is not supported for the current SPIR-V feature set", .{bits}); } try writeInstruction(code, .OpTypeFloat, &[_]Word{ result_id, bits }); @@ -526,19 +527,19 @@ pub const DeclGen = struct { .Fn => { // We only support zig-calling-convention functions, no varargs. if (ty.fnCallingConvention() != .Unspecified) - return self.fail(src, "Unsupported calling convention for SPIR-V", .{}); + return self.fail("Unsupported calling convention for SPIR-V", .{}); if (ty.fnIsVarArgs()) - return self.fail(src, "VarArgs unsupported for SPIR-V", .{}); + return self.fail("VarArgs unsupported for SPIR-V", .{}); // In order to avoid a temporary here, first generate all the required types and then simply look them up // when generating the function type. const params = ty.fnParamLen(); var i: usize = 0; while (i < params) : (i += 1) { - _ = try self.genType(src, ty.fnParamType(i)); + _ = try self.genType(ty.fnParamType(i)); } - const return_type_id = try self.genType(src, ty.fnReturnType()); + const return_type_id = try self.genType(ty.fnReturnType()); // result id + result type id + parameter type ids. try writeOpcode(code, .OpTypeFunction, 2 + @intCast(u16, ty.fnParamLen())); @@ -551,7 +552,7 @@ pub const DeclGen = struct { } }, // When recursively generating a type, we cannot infer the pointer's storage class. See genPointerType. - .Pointer => return self.fail(src, "Cannot create pointer with unkown storage class", .{}), + .Pointer => return self.fail("Cannot create pointer with unkown storage class", .{}), .Vector => { // Although not 100% the same, Zig vectors map quite neatly to SPIR-V vectors (including many integer and float operations // which work on them), so simply use those. @@ -561,7 +562,7 @@ pub const DeclGen = struct { // is adequate at all for this. // TODO: Vectors are not yet supported by the self-hosted compiler itself it seems. - return self.fail(src, "TODO: SPIR-V backend: implement type Vector", .{}); + return self.fail("TODO: SPIR-V backend: implement type Vector", .{}); }, .Null, .Undefined, @@ -573,7 +574,7 @@ pub const DeclGen = struct { .BoundFn => unreachable, // this type will be deleted from the language. - else => |tag| return self.fail(src, "TODO: SPIR-V backend: implement type {}s", .{tag}), + else => |tag| return self.fail("TODO: SPIR-V backend: implement type {}s", .{tag}), } try self.spv.types.putNoClobber(ty, result_id); @@ -582,8 +583,8 @@ pub const DeclGen = struct { /// SPIR-V requires pointers to have a storage class (address space), and so we have a special function for that. /// TODO: The result of this needs to be cached. - fn genPointerType(self: *DeclGen, src: LazySrcLoc, ty: Type, storage_class: spec.StorageClass) !ResultId { - std.debug.assert(ty.zigTypeTag() == .Pointer); + fn genPointerType(self: *DeclGen, ty: Type, storage_class: spec.StorageClass) !ResultId { + assert(ty.zigTypeTag() == .Pointer); const code = &self.spv.binary.types_globals_constants; const result_id = self.spv.allocResultId(); @@ -591,7 +592,7 @@ pub const DeclGen = struct { // TODO: There are many constraints which are ignored for now: We may only create pointers to certain types, and to other types // if more capabilities are enabled. For example, we may only create pointers to f16 if Float16Buffer is enabled. // These also relates to the pointer's address space. - const child_id = try self.genType(src, ty.elemType()); + const child_id = try self.genType(ty.elemType()); try writeInstruction(code, .OpTypePointer, &[_]Word{ result_id, @enumToInt(storage_class), child_id }); @@ -602,9 +603,9 @@ pub const DeclGen = struct { const decl = self.decl; const result_id = decl.fn_link.spirv.id; - if (decl.val.castTag(.function)) |func_payload| { - std.debug.assert(decl.ty.zigTypeTag() == .Fn); - const prototype_id = try self.genType(.{ .node_offset = 0 }, decl.ty); + if (decl.val.castTag(.function)) |_| { + assert(decl.ty.zigTypeTag() == .Fn); + const prototype_id = try self.genType(decl.ty); try writeInstruction(&self.spv.binary.fn_decls, .OpFunction, &[_]Word{ self.spv.types.get(decl.ty.fnReturnType()).?, // This type should be generated along with the prototype. result_id, @@ -631,189 +632,167 @@ pub const DeclGen = struct { try writeInstruction(&self.spv.binary.fn_decls, .OpLabel, &[_]Word{root_block_id}); self.current_block_label_id = root_block_id; - try self.genBody(func_payload.data.body); + const main_body = self.air.getMainBody(); + try self.genBody(main_body); // Append the actual code into the fn_decls section. try self.spv.binary.fn_decls.appendSlice(self.code.items); try writeInstruction(&self.spv.binary.fn_decls, .OpFunctionEnd, &[_]Word{}); } else { - return self.fail(.{ .node_offset = 0 }, "TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()}); + return self.fail("TODO: SPIR-V backend: generate decl type {}", .{decl.ty.zigTypeTag()}); } } - fn genBody(self: *DeclGen, body: ir.Body) Error!void { - for (body.instructions) |inst| { + fn genBody(self: *DeclGen, body: []const Air.Inst.Index) Error!void { + for (body) |inst| { try self.genInst(inst); } } - fn genInst(self: *DeclGen, inst: *Inst) !void { - const result_id = switch (inst.tag) { - .add, .addwrap => try self.genBinOp(inst.castTag(.add).?), - .sub, .subwrap => try self.genBinOp(inst.castTag(.sub).?), - .mul, .mulwrap => try self.genBinOp(inst.castTag(.mul).?), - .div => try self.genBinOp(inst.castTag(.div).?), - .bit_and => try self.genBinOp(inst.castTag(.bit_and).?), - .bit_or => try self.genBinOp(inst.castTag(.bit_or).?), - .xor => try self.genBinOp(inst.castTag(.xor).?), - .cmp_eq => try self.genCmp(inst.castTag(.cmp_eq).?), - .cmp_neq => try self.genCmp(inst.castTag(.cmp_neq).?), - .cmp_gt => try self.genCmp(inst.castTag(.cmp_gt).?), - .cmp_gte => try self.genCmp(inst.castTag(.cmp_gte).?), - .cmp_lt => try self.genCmp(inst.castTag(.cmp_lt).?), - .cmp_lte => try self.genCmp(inst.castTag(.cmp_lte).?), - .bool_and => try self.genBinOp(inst.castTag(.bool_and).?), - .bool_or => try self.genBinOp(inst.castTag(.bool_or).?), - .not => try self.genUnOp(inst.castTag(.not).?), - .alloc => try self.genAlloc(inst.castTag(.alloc).?), - .arg => self.genArg(), - .block => (try self.genBlock(inst.castTag(.block).?)) orelse return, - .br => return try self.genBr(inst.castTag(.br).?), - .br_void => return try self.genBrVoid(inst.castTag(.br_void).?), - // TODO: Breakpoints won't be supported in SPIR-V, but the compiler seems to insert them - // throughout the IR. + fn genInst(self: *DeclGen, inst: Air.Inst.Index) !void { + const air_tags = self.air.instructions.items(.tag); + const result_id = switch (air_tags[inst]) { + // zig fmt: off + .add, .addwrap => try self.genArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}), + .sub, .subwrap => try self.genArithOp(inst, .{.OpFSub, .OpISub, .OpISub}), + .mul, .mulwrap => try self.genArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}), + .div => try self.genArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}), + + .bit_and => try self.genBinOpSimple(inst, .OpBitwiseAnd), + .bit_or => try self.genBinOpSimple(inst, .OpBitwiseOr), + .xor => try self.genBinOpSimple(inst, .OpBitwiseXor), + .bool_and => try self.genBinOpSimple(inst, .OpLogicalAnd), + .bool_or => try self.genBinOpSimple(inst, .OpLogicalOr), + + .not => try self.genNot(inst), + + .cmp_eq => try self.genCmp(inst, .{.OpFOrdEqual, .OpLogicalEqual, .OpIEqual}), + .cmp_neq => try self.genCmp(inst, .{.OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual}), + .cmp_gt => try self.genCmp(inst, .{.OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan}), + .cmp_gte => try self.genCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}), + .cmp_lt => try self.genCmp(inst, .{.OpFOrdLessThan, .OpSLessThan, .OpULessThan}), + .cmp_lte => try self.genCmp(inst, .{.OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual}), + + .arg => self.genArg(), + .alloc => try self.genAlloc(inst), + .block => (try self.genBlock(inst)) orelse return, + .load => try self.genLoad(inst), + + .br => return self.genBr(inst), .breakpoint => return, - .condbr => return try self.genCondBr(inst.castTag(.condbr).?), - .constant => unreachable, - .dbg_stmt => return try self.genDbgStmt(inst.castTag(.dbg_stmt).?), - .load => try self.genLoad(inst.castTag(.load).?), - .loop => return try self.genLoop(inst.castTag(.loop).?), - .ret => return try self.genRet(inst.castTag(.ret).?), - .retvoid => return try self.genRetVoid(), - .store => return try self.genStore(inst.castTag(.store).?), - .unreach => return try self.genUnreach(), - else => return self.fail(inst.src, "TODO: SPIR-V backend: implement inst {s}", .{@tagName(inst.tag)}), + .condbr => return self.genCondBr(inst), + .constant => unreachable, + .dbg_stmt => return self.genDbgStmt(inst), + .loop => return self.genLoop(inst), + .ret => return self.genRet(inst), + .store => return self.genStore(inst), + .unreach => return self.genUnreach(), + // zig fmt: on }; try self.inst_results.putNoClobber(inst, result_id); } - fn genBinOp(self: *DeclGen, inst: *Inst.BinOp) !ResultId { - // TODO: Will lhs and rhs have the same type? - const lhs_id = try self.resolve(inst.lhs); - const rhs_id = try self.resolve(inst.rhs); + fn genBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs_id = try self.resolve(bin_op.lhs); + const rhs_id = try self.resolve(bin_op.rhs); + const result_id = self.spv.allocResultId(); + try writeInstruction(&self.code, opcode, &[_]Word{ + result_type_id, result_id, lhs_id, rhs_id, + }); + return result_id; + } + + fn genArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { + // LHS and RHS are guaranteed to have the same type, and AIR guarantees + // the result to be the same as the LHS and RHS, which matches SPIR-V. + const ty = self.air.getType(inst); + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs_id = try self.resolve(bin_op.lhs); + const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocResultId(); - const result_type_id = try self.genType(inst.base.src, inst.base.ty); + const result_type_id = try self.genType(ty); - // TODO: Is the result the same as the argument types? - // This is supposed to be the case for SPIR-V. - std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty)); - std.debug.assert(inst.base.ty.tag() == .bool or inst.base.ty.eql(inst.lhs.ty)); + assert(self.air.getType(bin_op.lhs).eql(ty)); + assert(self.air.getType(bin_op.rhs).eql(ty)); - // Binary operations are generally applicable to both scalar and vector operations in SPIR-V, but int and float - // versions of operations require different opcodes. - // For operations which produce bools, the information of inst.base.ty is not useful, so just pick either operand - // instead. - const info = try self.arithmeticTypeInfo(inst.lhs.ty); + // Binary operations are generally applicable to both scalar and vector operations + // in SPIR-V, but int and float versions of operations require different opcodes. + const info = try self.arithmeticTypeInfo(ty); - if (info.class == .composite_integer) { - return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for composite integers", .{}); - } else if (info.class == .strange_integer) { - return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for strange integers", .{}); - } - - const is_float = info.class == .float; - const is_signed = info.signedness == .signed; - // **Note**: All these operations must be valid for vectors as well! - const opcode = switch (inst.base.tag) { - // The regular integer operations are all defined for wrapping. Since theyre only relevant for integers, - // we can just switch on both cases here. - .add, .addwrap => if (is_float) Opcode.OpFAdd else Opcode.OpIAdd, - .sub, .subwrap => if (is_float) Opcode.OpFSub else Opcode.OpISub, - .mul, .mulwrap => if (is_float) Opcode.OpFMul else Opcode.OpIMul, - // TODO: Trap if divisor is 0? - // TODO: Figure out of OpSDiv for unsigned/OpUDiv for signed does anything useful. - // => Those are probably for divTrunc and divFloor, though the compiler does not yet generate those. - // => TODO: Figure out how those work on the SPIR-V side. - // => TODO: Test these. - .div => if (is_float) Opcode.OpFDiv else if (is_signed) Opcode.OpSDiv else Opcode.OpUDiv, - // Only integer versions for these. - .bit_and => Opcode.OpBitwiseAnd, - .bit_or => Opcode.OpBitwiseOr, - .xor => Opcode.OpBitwiseXor, - // Bool -> bool operations. - .bool_and => Opcode.OpLogicalAnd, - .bool_or => Opcode.OpLogicalOr, + const opcode_index: usize = switch (info.class) { + .composite_integer => { + return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{}); + }, + .strange_integer => { + return self.fail("TODO: SPIR-V backend: binary operations for strange integers", .{}); + }, + .integer => switch (info.signedness) { + .signed => 1, + .unsigned => 2, + }, + .float => 0, else => unreachable, }; - + const opcode = ops[opcode_index]; try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id }); // TODO: Trap on overflow? Probably going to be annoying. // TODO: Look into SPV_KHR_no_integer_wrap_decoration which provides NoSignedWrap/NoUnsignedWrap. - if (info.class != .strange_integer) - return result_id; - - return self.fail(inst.base.src, "TODO: SPIR-V backend: strange integer operation mask", .{}); + return result_id; } - fn genCmp(self: *DeclGen, inst: *Inst.BinOp) !ResultId { - const lhs_id = try self.resolve(inst.lhs); - const rhs_id = try self.resolve(inst.rhs); - + fn genCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs_id = try self.resolve(bin_op.lhs); + const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocResultId(); - const result_type_id = try self.genType(inst.base.src, inst.base.ty); + const result_type_id = try self.genType(Type.initTag(.bool)); + const op_ty = self.air.getType(bin_op.lhs); + assert(op_ty.eql(self.air.getType(bin_op.rhs))); - // All of these operations should be 2 equal types -> bool - std.debug.assert(inst.rhs.ty.eql(inst.lhs.ty)); - std.debug.assert(inst.base.ty.tag() == .bool); + // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, + // but int and float versions of operations require different opcodes. + const info = try self.arithmeticTypeInfo(op_ty); - // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, but int and float - // versions of operations require different opcodes. - // Since inst.base.ty is always bool and so not very useful, and because both arguments must be the same, just get the info - // from either of the operands. - const info = try self.arithmeticTypeInfo(inst.lhs.ty); - - if (info.class == .composite_integer) { - return self.fail(inst.base.src, "TODO: SPIR-V backend: binary operations for composite integers", .{}); - } else if (info.class == .strange_integer) { - return self.fail(inst.base.src, "TODO: SPIR-V backend: comparison for strange integers", .{}); - } - - const is_bool = info.class == .bool; - const is_float = info.class == .float; - const is_signed = info.signedness == .signed; - - // **Note**: All these operations must be valid for vectors as well! - // For floating points, we generally want ordered operations (which return false if either operand is nan). - const opcode = switch (inst.base.tag) { - .cmp_eq => if (is_float) Opcode.OpFOrdEqual else if (is_bool) Opcode.OpLogicalEqual else Opcode.OpIEqual, - .cmp_neq => if (is_float) Opcode.OpFOrdNotEqual else if (is_bool) Opcode.OpLogicalNotEqual else Opcode.OpINotEqual, - // TODO: Verify that these OpFOrd type operations produce the right value. - // TODO: Is there a more fundamental difference between OpU and OpS operations here than just the type? - .cmp_gt => if (is_float) Opcode.OpFOrdGreaterThan else if (is_signed) Opcode.OpSGreaterThan else Opcode.OpUGreaterThan, - .cmp_gte => if (is_float) Opcode.OpFOrdGreaterThanEqual else if (is_signed) Opcode.OpSGreaterThanEqual else Opcode.OpUGreaterThanEqual, - .cmp_lt => if (is_float) Opcode.OpFOrdLessThan else if (is_signed) Opcode.OpSLessThan else Opcode.OpULessThan, - .cmp_lte => if (is_float) Opcode.OpFOrdLessThanEqual else if (is_signed) Opcode.OpSLessThanEqual else Opcode.OpULessThanEqual, + const opcode_index: usize = switch (info.class) { + .composite_integer => { + return self.fail("TODO: SPIR-V backend: binary operations for composite integers", .{}); + }, + .strange_integer => { + return self.fail("TODO: SPIR-V backend: comparison for strange integers", .{}); + }, + .float => 0, + .bool => 1, + .integer => switch (info.signedness) { + .signed => 1, + .unsigned => 2, + }, else => unreachable, }; + const opcode = ops[opcode_index]; try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id }); return result_id; } - fn genUnOp(self: *DeclGen, inst: *Inst.UnOp) !ResultId { - const operand_id = try self.resolve(inst.operand); - + fn genNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand_id = try self.resolve(ty_op.operand); const result_id = self.spv.allocResultId(); - const result_type_id = try self.genType(inst.base.src, inst.base.ty); - - const opcode = switch (inst.base.tag) { - // Bool -> bool - .not => Opcode.OpLogicalNot, - else => unreachable, - }; - + const result_type_id = try self.genType(Type.initTag(.bool)); + const opcode: Opcode = .OpLogicalNot; try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, operand_id }); - return result_id; } - fn genAlloc(self: *DeclGen, inst: *Inst.NoOp) !ResultId { + fn genAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + const ty = self.air.getType(inst); const storage_class = spec.StorageClass.Function; - const result_type_id = try self.genPointerType(inst.base.src, inst.base.ty, storage_class); + const result_type_id = try self.genPointerType(ty, storage_class); const result_id = self.spv.allocResultId(); // Rather than generating into code here, we're just going to generate directly into the fn_decls section so that @@ -828,7 +807,7 @@ pub const DeclGen = struct { return self.args.items[self.next_arg_index]; } - fn genBlock(self: *DeclGen, inst: *Inst.Block) !?ResultId { + fn genBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId { // In IR, a block doesn't really define an entry point like a block, but more like a scope that breaks can jump out of and // "return" a value from. This cannot be directly modelled in SPIR-V, so in a block instruction, we're going to split up // the current block by first generating the code of the block, then a label, and then generate the rest of the current @@ -848,11 +827,16 @@ pub const DeclGen = struct { incoming_blocks.deinit(self.spv.gpa); } - try self.genBody(inst.body); + const ty = self.air.getType(inst); + const inst_datas = self.air.instructions.items(.data); + const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const body = self.air.extra[extra.end..][0..extra.data.body_len]; + + try self.genBody(body); try self.beginSPIRVBlock(label_id); // If this block didn't produce a value, simply return here. - if (!inst.base.ty.hasCodeGenBits()) + if (!ty.hasCodeGenBits()) return null; // Combine the result from the blocks using the Phi instruction. @@ -862,7 +846,7 @@ pub const DeclGen = struct { // TODO: OpPhi is limited in the types that it may produce, such as pointers. Figure out which other types // are not allowed to be created from a phi node, and throw an error for those. For now, genType already throws // an error for pointers. - const result_type_id = try self.genType(inst.base.src, inst.base.ty); + const result_type_id = try self.genType(ty); _ = result_type_id; try writeOpcode(&self.code, .OpPhi, 2 + @intCast(u16, incoming_blocks.items.len * 2)); // result type + result + variable/parent... @@ -874,30 +858,26 @@ pub const DeclGen = struct { return result_id; } - fn genBr(self: *DeclGen, inst: *Inst.Br) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - const target = self.blocks.get(inst.block).?; + fn genBr(self: *DeclGen, inst: Air.Inst.Index) !void { + const br = self.air.instructions.items(.data)[inst].br; + const block = self.blocks.get(br.block_inst).?; + const operand_ty = self.air.getType(br.operand); - // TODO: For some reason, br is emitted with void parameters. - if (inst.operand.ty.hasCodeGenBits()) { - const operand_id = try self.resolve(inst.operand); + if (operand_ty.hasCodeGenBits()) { + const operand_id = try self.resolve(br.operand); // current_block_label_id should not be undefined here, lest there is a br or br_void in the function's body. - try target.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); + try block.incoming_blocks.append(self.spv.gpa, .{ .src_label_id = self.current_block_label_id, .break_value_id = operand_id }); } - try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id}); - } - - fn genBrVoid(self: *DeclGen, inst: *Inst.BrVoid) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - const target = self.blocks.get(inst.block).?; - // Don't need to add this to the incoming block list, as there is no value to insert in the phi node anyway. - try writeInstruction(&self.code, .OpBranch, &[_]Word{target.label_id}); + try writeInstruction(&self.code, .OpBranch, &[_]Word{block.label_id}); } fn genCondBr(self: *DeclGen, inst: *Inst.CondBr) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - const condition_id = try self.resolve(inst.condition); + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const cond_br = self.air.extraData(Air.CondBr, pl_op.payload); + const then_body = self.air.extra[cond_br.end..][0..cond_br.data.then_body_len]; + const else_body = self.air.extra[cond_br.end + then_body.len ..][0..cond_br.data.else_body_len]; + const condition_id = try self.resolve(pl_op.operand); // These will always generate a new SPIR-V block, since they are ir.Body and not ir.Block. const then_label_id = self.spv.allocResultId(); @@ -913,23 +893,26 @@ pub const DeclGen = struct { }); try self.beginSPIRVBlock(then_label_id); - try self.genBody(inst.then_body); + try self.genBody(then_body); try self.beginSPIRVBlock(else_label_id); - try self.genBody(inst.else_body); + try self.genBody(else_body); } - fn genDbgStmt(self: *DeclGen, inst: *Inst.DbgStmt) !void { + fn genDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { + const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; const src_fname_id = try self.spv.resolveSourceFileName(self.decl); - try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, inst.line, inst.column }); + try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, dbg_stmt.line, dbg_stmt.column }); } - fn genLoad(self: *DeclGen, inst: *Inst.UnOp) !ResultId { - const operand_id = try self.resolve(inst.operand); + fn genLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand_id = try self.resolve(ty_op.operand); + const ty = self.air.getType(inst); - const result_type_id = try self.genType(inst.base.src, inst.base.ty); + const result_type_id = try self.genType(ty); const result_id = self.spv.allocResultId(); - const operands = if (inst.base.ty.isVolatilePtr()) + const operands = if (ty.isVolatilePtr()) &[_]Word{ result_type_id, result_id, operand_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) } else &[_]Word{ result_type_id, result_id, operand_id }; @@ -939,8 +922,9 @@ pub const DeclGen = struct { return result_id; } - fn genLoop(self: *DeclGen, inst: *Inst.Loop) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? + fn genLoop(self: *DeclGen, inst: Air.Inst.Index) !void { + const loop = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const body = self.air.extra[loop.end..][0..loop.data.body_len]; const loop_label_id = self.spv.allocResultId(); // Jump to the loop entry point @@ -949,27 +933,29 @@ pub const DeclGen = struct { // TODO: Look into OpLoopMerge. try self.beginSPIRVBlock(loop_label_id); - try self.genBody(inst.body); + try self.genBody(body); try writeInstruction(&self.code, .OpBranch, &[_]Word{loop_label_id}); } - fn genRet(self: *DeclGen, inst: *Inst.UnOp) !void { - const operand_id = try self.resolve(inst.operand); - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id}); + fn genRet(self: *DeclGen, inst: Air.Inst.Index) !void { + const operand = inst_datas[inst].un_op; + const operand_ty = self.air.getType(operand); + if (operand_ty.hasCodeGenBits()) { + const operand_id = try self.resolve(operand); + try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id}); + } else { + try writeInstruction(&self.code, .OpReturn, &[_]Word{}); + } } - fn genRetVoid(self: *DeclGen) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? - try writeInstruction(&self.code, .OpReturn, &[_]Word{}); - } + fn genStore(self: *DeclGen, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const dst_ptr_id = try self.resolve(bin_op.lhs); + const src_val_id = try self.resolve(bin_op.rhs); + const lhs_ty = self.air.getType(bin_op.lhs); - fn genStore(self: *DeclGen, inst: *Inst.BinOp) !void { - const dst_ptr_id = try self.resolve(inst.lhs); - const src_val_id = try self.resolve(inst.rhs); - - const operands = if (inst.lhs.ty.isVolatilePtr()) + const operands = if (lhs_ty.isVolatilePtr()) &[_]Word{ dst_ptr_id, src_val_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) } else &[_]Word{ dst_ptr_id, src_val_id }; @@ -978,7 +964,6 @@ pub const DeclGen = struct { } fn genUnreach(self: *DeclGen) !void { - // TODO: This instruction needs to be the last in a block. Is that guaranteed? try writeInstruction(&self.code, .OpUnreachable, &[_]Word{}); } }; From ee6432537ee29485c5de6c8b0911ef1482d752a7 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 11 Jul 2021 23:38:29 -0700 Subject: [PATCH 05/53] stage2: first pass over codegen.zig for AIR memory layout --- BRANCH_TODO | 38 -- src/Liveness.zig | 21 + src/codegen.zig | 1383 +++++++++++++++++++++----------------- src/register_manager.zig | 11 +- 4 files changed, 778 insertions(+), 675 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index 3b946edbbd..be3959e035 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -1,48 +1,10 @@ * be sure to test debug info of parameters - pub fn isUnused(self: Inst) bool { - return (self.deaths & (1 << unreferenced_bit_index)) != 0; - } - - pub fn operandDies(self: Inst, index: DeathsBitIndex) bool { - assert(index < deaths_bits); - return @truncate(u1, self.deaths >> index) != 0; - } - - pub fn clearOperandDeath(self: *Inst, index: DeathsBitIndex) void { - assert(index < deaths_bits); - self.deaths &= ~(@as(DeathsInt, 1) << index); - } - pub fn specialOperandDeaths(self: Inst) bool { return (self.deaths & (1 << deaths_bits)) != 0; } - pub fn operandCount(base: *Inst) usize { - inline for (@typeInfo(Tag).Enum.fields) |field| { - const tag = @intToEnum(Tag, field.value); - if (tag == base.tag) { - return @fieldParentPtr(tag.Type(), "base", base).operandCount(); - } - } - unreachable; - } - - pub fn getOperand(base: *Inst, index: usize) ?*Inst { - inline for (@typeInfo(Tag).Enum.fields) |field| { - const tag = @intToEnum(Tag, field.value); - if (tag == base.tag) { - return @fieldParentPtr(tag.Type(), "base", base).getOperand(index); - } - } - unreachable; - } - - pub fn Args(comptime T: type) type { - return std.meta.fieldInfo(T, .args).field_type; - } - /// Returns `null` if runtime-known. /// Should be called by codegen, not by Sema. Sema functions should call /// `resolvePossiblyUndefinedValue` or `resolveDefinedValue` instead. diff --git a/src/Liveness.zig b/src/Liveness.zig index 84e2495054..0cbac61118 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -74,6 +74,26 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { }; } +pub fn isUnused(l: Liveness, inst: Air.Inst.Index) bool { + const usize_index = (inst * bpi) / @bitSizeOf(usize); + const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1)); + return (l.tomb_bits[usize_index] & mask) != 0; +} + +pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool { + assert(operand < bpi - 1); + const usize_index = (inst * bpi) / @bitSizeOf(usize); + const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + return (l.tomb_bits[usize_index] & mask) != 0; +} + +pub fn clearOperandDeath(l: *Liveness, inst: Air.Inst.Index, operand: OperandInt) void { + assert(operand < bpi - 1); + const usize_index = (inst * bpi) / @bitSizeOf(usize); + const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + l.tomb_bits[usize_index] |= mask; +} + pub fn deinit(l: *Liveness, gpa: *Allocator) void { gpa.free(l.tomb_bits); gpa.free(l.extra); @@ -83,6 +103,7 @@ pub fn deinit(l: *Liveness, gpa: *Allocator) void { /// How many tomb bits per AIR instruction. const bpi = 4; const Bpi = std.meta.Int(.unsigned, bpi); +const OperandInt = std.math.Log2Int(Bpi); /// In-progress data; on successful analysis converted into `Liveness`. const Analysis = struct { diff --git a/src/codegen.zig b/src/codegen.zig index 91b0401291..65e85702e5 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -722,16 +722,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { - for (body.instructions) |inst| { - try self.ensureProcessDeathCapacity(@popCount(@TypeOf(inst.deaths), inst.deaths)); + for (body) |inst| { + const tomb_bits = self.liveness.getTombBits(inst); + try self.ensureProcessDeathCapacity(@popCount(@TypeOf(tomb_bits), tomb_bits)); const mcv = try self.genFuncInst(inst); - if (!inst.isUnused()) { - log.debug("{*} => {}", .{ inst, mcv }); + if (!self.liveness.isUnused(inst)) { + log.debug("{} => {}", .{ inst, mcv }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.putNoClobber(self.gpa, inst, mcv); } + // TODO inline this logic into every instruction var i: ir.Inst.DeathsBitIndex = 0; while (inst.getOperand(i)) |operand| : (i += 1) { if (inst.operandDies(i)) @@ -785,8 +787,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } /// Asserts there is already capacity to insert into top branch inst_table. - fn processDeath(self: *Self, inst: *ir.Inst) void { - if (inst.tag == .constant) return; // Constants are immortal. + fn processDeath(self: *Self, inst: Air.Inst.Index) void { + const air_tags = self.air.instructions.items(.tag); + if (air_tags[inst] == .constant) return; // Constants are immortal. // When editing this function, note that the logic must synchronize with `reuseOperand`. const prev_value = self.getResolvedInstValue(inst); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -827,74 +830,82 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genFuncInst(self: *Self, inst: *ir.Inst) !MCValue { - switch (inst.tag) { - .add => return self.genAdd(inst.castTag(.add).?), + fn genFuncInst(self: *Self, inst: Air.Inst.Index) !MCValue { + const air_tags = self.air.instructions.items(.tag); + switch (air_tags[inst]) { + // zig fmt: off + .add => return self.genAdd(inst.castTag(.add).?), .addwrap => return self.genAddWrap(inst.castTag(.addwrap).?), - .alloc => return self.genAlloc(inst.castTag(.alloc).?), - .arg => return self.genArg(inst.castTag(.arg).?), - .assembly => return self.genAsm(inst.castTag(.assembly).?), - .bitcast => return self.genBitCast(inst.castTag(.bitcast).?), - .bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), - .bit_or => return self.genBitOr(inst.castTag(.bit_or).?), - .block => return self.genBlock(inst.castTag(.block).?), - .br => return self.genBr(inst.castTag(.br).?), - .br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), - .breakpoint => return self.genBreakpoint(inst.src), - .br_void => return self.genBrVoid(inst.castTag(.br_void).?), - .bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), - .bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), - .call => return self.genCall(inst.castTag(.call).?), - .cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), - .cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), - .cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), - .cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte), - .cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), - .cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq), - .condbr => return self.genCondBr(inst.castTag(.condbr).?), - .constant => unreachable, // excluded from function bodies - .dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), - .floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), - .intcast => return self.genIntCast(inst.castTag(.intcast).?), - .is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), - .is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), - .is_null => return self.genIsNull(inst.castTag(.is_null).?), - .is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), - .is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), - .is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), - .is_err => return self.genIsErr(inst.castTag(.is_err).?), - .is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), - .load => return self.genLoad(inst.castTag(.load).?), - .loop => return self.genLoop(inst.castTag(.loop).?), - .not => return self.genNot(inst.castTag(.not).?), - .mul => return self.genMul(inst.castTag(.mul).?), - .mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), - .div => return self.genDiv(inst.castTag(.div).?), - .ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), - .ref => return self.genRef(inst.castTag(.ref).?), - .ret => return self.genRet(inst.castTag(.ret).?), - .retvoid => return self.genRetVoid(inst.castTag(.retvoid).?), - .store => return self.genStore(inst.castTag(.store).?), - .struct_field_ptr => return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), - .sub => return self.genSub(inst.castTag(.sub).?), + .sub => return self.genSub(inst.castTag(.sub).?), .subwrap => return self.genSubWrap(inst.castTag(.subwrap).?), - .switchbr => return self.genSwitch(inst.castTag(.switchbr).?), - .unreach => return MCValue{ .unreach = {} }, - .optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), - .optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), - .unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), - .unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), - .unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), - .unwrap_errunion_payload_ptr => return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), - .wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), + .mul => return self.genMul(inst.castTag(.mul).?), + .mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), + .div => return self.genDiv(inst.castTag(.div).?), + + .cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), + .cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), + .cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), + .cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte), + .cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), + .cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq), + + .bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), + .bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), + .bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), + .bit_or => return self.genBitOr(inst.castTag(.bit_or).?), + .xor => return self.genXor(inst.castTag(.xor).?), + + .alloc => return self.genAlloc(inst.castTag(.alloc).?), + .arg => return self.genArg(inst.castTag(.arg).?), + .assembly => return self.genAsm(inst.castTag(.assembly).?), + .bitcast => return self.genBitCast(inst.castTag(.bitcast).?), + .block => return self.genBlock(inst.castTag(.block).?), + .br => return self.genBr(inst.castTag(.br).?), + .br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), + .breakpoint => return self.genBreakpoint(inst.src), + .call => return self.genCall(inst.castTag(.call).?), + .cond_br => return self.genCondBr(inst.castTag(.condbr).?), + .dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), + .floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), + .intcast => return self.genIntCast(inst.castTag(.intcast).?), + .is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), + .is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), + .is_null => return self.genIsNull(inst.castTag(.is_null).?), + .is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), + .is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), + .is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), + .is_err => return self.genIsErr(inst.castTag(.is_err).?), + .is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), + .load => return self.genLoad(inst.castTag(.load).?), + .loop => return self.genLoop(inst.castTag(.loop).?), + .not => return self.genNot(inst.castTag(.not).?), + .ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), + .ref => return self.genRef(inst.castTag(.ref).?), + .ret => return self.genRet(inst.castTag(.ret).?), + .store => return self.genStore(inst.castTag(.store).?), + .struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), + .switchbr => return self.genSwitch(inst.castTag(.switchbr).?), + .varptr => return self.genVarPtr(inst.castTag(.varptr).?), + + .constant => unreachable, // excluded from function bodies + .unreach => return MCValue{ .unreach = {} }, + + .optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), + .optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), + .unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), + .unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), + .unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), + .unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), + + .wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), .wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), - .wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), - .varptr => return self.genVarPtr(inst.castTag(.varptr).?), - .xor => return self.genXor(inst.castTag(.xor).?), + .wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), + + // zig fmt: on } } - fn allocMem(self: *Self, inst: *ir.Inst, abi_size: u32, abi_align: u32) !u32 { + fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 { if (abi_align > self.stack_align) self.stack_align = abi_align; // TODO find a free slot instead of always appending @@ -910,20 +921,20 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } /// Use a pointer instruction as the basis for allocating stack memory. - fn allocMemPtr(self: *Self, inst: *ir.Inst) !u32 { - const elem_ty = inst.ty.elemType(); + fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { + const elem_ty = self.air.getType(inst).elemType(); const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { - return self.fail(inst.src, "type '{}' too big to fit into stack frame", .{elem_ty}); + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty}); }; // TODO swap this for inst.ty.ptrAlign const abi_align = elem_ty.abiAlignment(self.target.*); return self.allocMem(inst, abi_size, abi_align); } - fn allocRegOrMem(self: *Self, inst: *ir.Inst, reg_ok: bool) !MCValue { + fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { const elem_ty = inst.ty; const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { - return self.fail(inst.src, "type '{}' too big to fit into stack frame", .{elem_ty}); + return self.fail("type '{}' too big to fit into stack frame", .{elem_ty}); }; const abi_align = elem_ty.abiAlignment(self.target.*); if (abi_align > self.stack_align) @@ -943,72 +954,75 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return MCValue{ .stack_offset = stack_offset }; } - pub fn spillInstruction(self: *Self, src: LazySrcLoc, reg: Register, inst: *ir.Inst) !void { + pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { const stack_mcv = try self.allocRegOrMem(inst, false); log.debug("spilling {*} to stack mcv {any}", .{ inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); assert(reg == toCanonicalReg(reg_mcv.register)); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(src, inst.ty, stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(inst.ty, stack_mcv.stack_offset, reg_mcv); } /// Copies a value to a register without tracking the register. The register is not considered /// allocated. A second call to `copyToTmpRegister` may return the same register. /// This can have a side effect of spilling instructions to the stack to free up a register. - fn copyToTmpRegister(self: *Self, src: LazySrcLoc, ty: Type, mcv: MCValue) !Register { + fn copyToTmpRegister(self: *Self, ty: Type, mcv: MCValue) !Register { const reg = try self.register_manager.allocReg(null, &.{}); - try self.genSetReg(src, ty, reg, mcv); + try self.genSetReg(ty, reg, mcv); return reg; } /// Allocates a new register and copies `mcv` into it. /// `reg_owner` is the instruction that gets associated with the register in the register table. /// This can have a side effect of spilling instructions to the stack to free up a register. - fn copyToNewRegister(self: *Self, reg_owner: *ir.Inst, mcv: MCValue) !MCValue { + fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { const reg = try self.register_manager.allocReg(reg_owner, &.{}); - try self.genSetReg(reg_owner.src, reg_owner.ty, reg, mcv); + try self.genSetReg(reg_owner.ty, reg, mcv); return MCValue{ .register = reg }; } - fn genAlloc(self: *Self, inst: *ir.Inst.NoOp) !MCValue { - const stack_offset = try self.allocMemPtr(&inst.base); + fn genAlloc(self: *Self, inst: Air.Inst.Index) !MCValue { + const stack_offset = try self.allocMemPtr(inst); return MCValue{ .ptr_stack_offset = stack_offset }; } - fn genFloatCast(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genFloatCast(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement floatCast for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement floatCast for {}", .{self.target.cpu.arch}), } } - fn genIntCast(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genIntCast(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; - const operand = try self.resolveInst(inst.operand); - const info_a = inst.operand.ty.intInfo(self.target.*); - const info_b = inst.base.ty.intInfo(self.target.*); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand_ty = self.air.getType(ty_op.operand); + const operand = try self.resolveInst(ty_op.operand); + const info_a = operand_ty.intInfo(self.target.*); + const info_b = self.air.getType(inst).intInfo(self.target.*); if (info_a.signedness != info_b.signedness) - return self.fail(inst.base.src, "TODO gen intcast sign safety in semantic analysis", .{}); + return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); if (info_a.bits == info_b.bits) return operand; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement intCast for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement intCast for {}", .{self.target.cpu.arch}), } } - fn genNot(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genNot(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; - const operand = try self.resolveInst(inst.operand); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = try self.resolveInst(ty_op.operand); switch (operand) { .dead => unreachable, .unreach => unreachable, @@ -1037,216 +1051,209 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (arch) { .x86_64 => { - var imm = ir.Inst.Constant{ - .base = .{ - .tag = .constant, - .deaths = 0, - .ty = inst.operand.ty, - .src = inst.operand.src, - }, - .val = Value.initTag(.bool_true), - }; - return try self.genX8664BinMath(&inst.base, inst.operand, &imm.base); + return try self.genX8664BinMath(inst, ty_op.operand, .bool_true); }, .arm, .armeb => { - var imm = ir.Inst.Constant{ - .base = .{ - .tag = .constant, - .deaths = 0, - .ty = inst.operand.ty, - .src = inst.operand.src, - }, - .val = Value.initTag(.bool_true), - }; - return try self.genArmBinOp(&inst.base, inst.operand, &imm.base, .not); + return try self.genArmBinOp(inst, ty_op.operand, .bool_true, .not); }, - else => return self.fail(inst.base.src, "TODO implement NOT for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement NOT for {}", .{self.target.cpu.arch}), } } - fn genAdd(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genAdd(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; switch (arch) { .x86_64 => { - return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs); + return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs); }, - .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .add), - else => return self.fail(inst.base.src, "TODO implement add for {}", .{self.target.cpu.arch}), + .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .add), + else => return self.fail("TODO implement add for {}", .{self.target.cpu.arch}), } } - fn genAddWrap(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genAddWrap(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + _ = bin_op; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement addwrap for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement addwrap for {}", .{self.target.cpu.arch}), } } - fn genMul(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genMul(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; switch (arch) { - .x86_64 => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs), - .arm, .armeb => return try self.genArmMul(&inst.base, inst.lhs, inst.rhs), - else => return self.fail(inst.base.src, "TODO implement mul for {}", .{self.target.cpu.arch}), + .x86_64 => return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .arm, .armeb => return try self.genArmMul(inst, bin_op.lhs, bin_op.rhs), + else => return self.fail("TODO implement mul for {}", .{self.target.cpu.arch}), } } - fn genMulWrap(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genMulWrap(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + _ = bin_op; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement mulwrap for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement mulwrap for {}", .{self.target.cpu.arch}), } } - fn genDiv(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genDiv(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + _ = bin_op; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement div for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement div for {}", .{self.target.cpu.arch}), } } - fn genBitAnd(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genBitAnd(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; switch (arch) { - .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bit_and), - else => return self.fail(inst.base.src, "TODO implement bitwise and for {}", .{self.target.cpu.arch}), + .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_and), + else => return self.fail("TODO implement bitwise and for {}", .{self.target.cpu.arch}), } } - fn genBitOr(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genBitOr(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; switch (arch) { - .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bit_or), - else => return self.fail(inst.base.src, "TODO implement bitwise or for {}", .{self.target.cpu.arch}), + .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_or), + else => return self.fail("TODO implement bitwise or for {}", .{self.target.cpu.arch}), } } - fn genXor(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genXor(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; switch (arch) { - .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .xor), - else => return self.fail(inst.base.src, "TODO implement xor for {}", .{self.target.cpu.arch}), + .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .xor), + else => return self.fail("TODO implement xor for {}", .{self.target.cpu.arch}), } } - fn genOptionalPayload(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genOptionalPayload(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement .optional_payload for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement .optional_payload for {}", .{self.target.cpu.arch}), } } - fn genOptionalPayloadPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch}), } } - fn genUnwrapErrErr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement unwrap error union error for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement unwrap error union error for {}", .{self.target.cpu.arch}), } } - fn genUnwrapErrPayload(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement unwrap error union payload for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement unwrap error union payload for {}", .{self.target.cpu.arch}), } } // *(E!T) -> E - fn genUnwrapErrErrPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch}), } } // *(E!T) -> *T - fn genUnwrapErrPayloadPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch}), } } - fn genWrapOptional(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - const optional_ty = inst.base.ty; - + fn genWrapOptional(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const optional_ty = self.air.getType(inst); + // Optional type is just a boolean true if (optional_ty.abiSize(self.target.*) == 1) return MCValue{ .immediate = 1 }; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement wrap optional for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}), } } /// T to E!T - fn genWrapErrUnionPayload(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement wrap errunion payload for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement wrap errunion payload for {}", .{self.target.cpu.arch}), } } /// E to E!T - fn genWrapErrUnionErr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn genWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement wrap errunion error for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement wrap errunion error for {}", .{self.target.cpu.arch}), } } - fn genVarPtr(self: *Self, inst: *ir.Inst.VarPtr) !MCValue { + fn genVarPtr(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement varptr for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement varptr for {}", .{self.target.cpu.arch}), } } - fn reuseOperand(self: *Self, inst: *ir.Inst, op_index: ir.Inst.DeathsBitIndex, mcv: MCValue) bool { - if (!inst.operandDies(op_index)) + fn reuseOperand(self: *Self, inst: Air.Inst.Index, op_index: u2, mcv: MCValue) bool { + if (!self.liveness.operandDies(inst, op_index)) return false; switch (mcv) { @@ -1258,16 +1265,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.register_manager.registers[index] = inst; } } - log.debug("reusing {} => {*}", .{ reg, inst }); + log.debug("reusing {} => {}", .{ reg, inst }); }, .stack_offset => |off| { - log.debug("reusing stack offset {} => {*}", .{ off, inst }); + log.debug("reusing stack offset {} => {}", .{ off, inst }); }, else => return false, } // Prevent the operand deaths processing code from deallocating it. - inst.clearOperandDeath(op_index); + self.liveness.clearOperandDeath(inst, op_index); // That makes us responsible for doing the rest of the stuff that processDeath would have done. const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -1276,22 +1283,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return true; } - fn genLoad(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - const elem_ty = inst.base.ty; - if (!elem_ty.hasCodeGenBits()) - return MCValue.none; - const ptr = try self.resolveInst(inst.operand); - const is_volatile = inst.operand.ty.isVolatilePtr(); - if (inst.base.isUnused() and !is_volatile) - return MCValue.dead; - const dst_mcv: MCValue = blk: { - if (self.reuseOperand(&inst.base, 0, ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk ptr; - } else { - break :blk try self.allocRegOrMem(&inst.base, true); - } - }; + fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue) !void { switch (ptr) { .none => unreachable, .undef => unreachable, @@ -1299,31 +1291,51 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .dead => unreachable, .compare_flags_unsigned => unreachable, .compare_flags_signed => unreachable, - .immediate => |imm| try self.setRegOrMem(inst.base.src, elem_ty, dst_mcv, .{ .memory = imm }), - .ptr_stack_offset => |off| try self.setRegOrMem(inst.base.src, elem_ty, dst_mcv, .{ .stack_offset = off }), + .immediate => |imm| try self.setRegOrMem(elem_ty, dst_mcv, .{ .memory = imm }), + .ptr_stack_offset => |off| try self.setRegOrMem(elem_ty, dst_mcv, .{ .stack_offset = off }), .ptr_embedded_in_code => |off| { - try self.setRegOrMem(inst.base.src, elem_ty, dst_mcv, .{ .embedded_in_code = off }); + try self.setRegOrMem(elem_ty, dst_mcv, .{ .embedded_in_code = off }); }, .embedded_in_code => { - return self.fail(inst.base.src, "TODO implement loading from MCValue.embedded_in_code", .{}); + return self.fail("TODO implement loading from MCValue.embedded_in_code", .{}); }, .register => { - return self.fail(inst.base.src, "TODO implement loading from MCValue.register", .{}); + return self.fail("TODO implement loading from MCValue.register", .{}); }, .memory => { - return self.fail(inst.base.src, "TODO implement loading from MCValue.memory", .{}); + return self.fail("TODO implement loading from MCValue.memory", .{}); }, .stack_offset => { - return self.fail(inst.base.src, "TODO implement loading from MCValue.stack_offset", .{}); + return self.fail("TODO implement loading from MCValue.stack_offset", .{}); }, } + } + + fn genLoad(self: *Self, inst: Air.Inst.Index) !MCValue { + const elem_ty = self.air.getType(inst); + if (!elem_ty.hasCodeGenBits()) + return MCValue.none; + const ptr = try self.resolveInst(inst.operand); + const is_volatile = inst.operand.ty.isVolatilePtr(); + if (self.liveness.isUnused(inst) and !is_volatile) + return MCValue.dead; + const dst_mcv: MCValue = blk: { + if (self.reuseOperand(inst, 0, ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + self.load(dst_mcv, ptr); return dst_mcv; } - fn genStore(self: *Self, inst: *ir.Inst.BinOp) !MCValue { - const ptr = try self.resolveInst(inst.lhs); - const value = try self.resolveInst(inst.rhs); - const elem_ty = inst.rhs.ty; + fn genStore(self: *Self, inst: Air.Inst.Index) !MCValue { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const ptr = try self.resolveInst(bin_op.lhs); + const value = try self.resolveInst(bin_op.rhs); + const elem_ty = self.getType(bin_op.rhs); switch (ptr) { .none => unreachable, .undef => unreachable, @@ -1332,57 +1344,60 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, .compare_flags_signed => unreachable, .immediate => |imm| { - try self.setRegOrMem(inst.base.src, elem_ty, .{ .memory = imm }, value); + try self.setRegOrMem(elem_ty, .{ .memory = imm }, value); }, .ptr_stack_offset => |off| { - try self.genSetStack(inst.base.src, elem_ty, off, value); + try self.genSetStack(elem_ty, off, value); }, .ptr_embedded_in_code => |off| { - try self.setRegOrMem(inst.base.src, elem_ty, .{ .embedded_in_code = off }, value); + try self.setRegOrMem(elem_ty, .{ .embedded_in_code = off }, value); }, .embedded_in_code => { - return self.fail(inst.base.src, "TODO implement storing to MCValue.embedded_in_code", .{}); + return self.fail("TODO implement storing to MCValue.embedded_in_code", .{}); }, .register => { - return self.fail(inst.base.src, "TODO implement storing to MCValue.register", .{}); + return self.fail("TODO implement storing to MCValue.register", .{}); }, .memory => { - return self.fail(inst.base.src, "TODO implement storing to MCValue.memory", .{}); + return self.fail("TODO implement storing to MCValue.memory", .{}); }, .stack_offset => { - return self.fail(inst.base.src, "TODO implement storing to MCValue.stack_offset", .{}); + return self.fail("TODO implement storing to MCValue.stack_offset", .{}); }, } return .none; } - fn genStructFieldPtr(self: *Self, inst: *ir.Inst.StructFieldPtr) !MCValue { - return self.fail(inst.base.src, "TODO implement codegen struct_field_ptr", .{}); + fn genStructFieldPtr(self: *Self, inst: Air.Inst.Index) !MCValue { + const struct_field_ptr = self.air.instructions.items(.data)[inst].struct_field_ptr; + _ = struct_field_ptr; + return self.fail("TODO implement codegen struct_field_ptr", .{}); } - fn genSub(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genSub(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; switch (arch) { - .x86_64 => { - return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs); - }, - .arm, .armeb => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .sub), - else => return self.fail(inst.base.src, "TODO implement sub for {}", .{self.target.cpu.arch}), + .x86_64 => return self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .arm, .armeb => return self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .sub), + else => return self.fail("TODO implement sub for {}", .{self.target.cpu.arch}), } } - fn genSubWrap(self: *Self, inst: *ir.Inst.BinOp) !MCValue { + fn genSubWrap(self: *Self, inst: Air.Inst.Index) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + _ = bin_op; switch (arch) { - else => return self.fail(inst.base.src, "TODO implement subwrap for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement subwrap for {}", .{self.target.cpu.arch}), } } - fn armOperandShouldBeRegister(self: *Self, src: LazySrcLoc, mcv: MCValue) !bool { + fn armOperandShouldBeRegister(self: *Self, mcv: MCValue) !bool { return switch (mcv) { .none => unreachable, .undef => unreachable, @@ -1392,7 +1407,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .ptr_stack_offset => unreachable, .ptr_embedded_in_code => unreachable, .immediate => |imm| blk: { - if (imm > std.math.maxInt(u32)) return self.fail(src, "TODO ARM binary arithmetic immediate larger than u32", .{}); + if (imm > std.math.maxInt(u32)) return self.fail("TODO ARM binary arithmetic immediate larger than u32", .{}); // Load immediate into register if it doesn't fit // in an operand @@ -1406,14 +1421,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }; } - fn genArmBinOp(self: *Self, inst: *ir.Inst, op_lhs: *ir.Inst, op_rhs: *ir.Inst, op: ir.Inst.Tag) !MCValue { + fn genArmBinOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref, op: ir.Inst.Tag) !MCValue { const lhs = try self.resolveInst(op_lhs); const rhs = try self.resolveInst(op_rhs); const lhs_is_register = lhs == .register; const rhs_is_register = rhs == .register; - const lhs_should_be_register = try self.armOperandShouldBeRegister(op_lhs.src, lhs); - const rhs_should_be_register = try self.armOperandShouldBeRegister(op_rhs.src, rhs); + const lhs_should_be_register = try self.armOperandShouldBeRegister(lhs); + const rhs_should_be_register = try self.armOperandShouldBeRegister(rhs); const reuse_lhs = lhs_is_register and self.reuseOperand(inst, 0, lhs); const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, 1, rhs); @@ -1486,14 +1501,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Move the operands to the newly allocated registers if (lhs_mcv == .register and !lhs_is_register) { - try self.genSetReg(op_lhs.src, op_lhs.ty, lhs_mcv.register, lhs); + try self.genSetReg(op_lhs.ty, lhs_mcv.register, lhs); } if (rhs_mcv == .register and !rhs_is_register) { - try self.genSetReg(op_rhs.src, op_rhs.ty, rhs_mcv.register, rhs); + try self.genSetReg(op_rhs.ty, rhs_mcv.register, rhs); } try self.genArmBinOpCode( - inst.src, dst_mcv.register, lhs_mcv, rhs_mcv, @@ -1505,14 +1519,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genArmBinOpCode( self: *Self, - src: LazySrcLoc, dst_reg: Register, lhs_mcv: MCValue, rhs_mcv: MCValue, swap_lhs_and_rhs: bool, op: ir.Inst.Tag, ) !void { - _ = src; assert(lhs_mcv == .register or rhs_mcv == .register); const op1 = if (swap_lhs_and_rhs) rhs_mcv.register else lhs_mcv.register; @@ -1561,7 +1573,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genArmMul(self: *Self, inst: *ir.Inst, op_lhs: *ir.Inst, op_rhs: *ir.Inst) !MCValue { + fn genArmMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Index, op_rhs: Air.Inst.Index) !MCValue { const lhs = try self.resolveInst(op_lhs); const rhs = try self.resolveInst(op_rhs); @@ -1618,10 +1630,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Move the operands to the newly allocated registers if (!lhs_is_register) { - try self.genSetReg(op_lhs.src, op_lhs.ty, lhs_mcv.register, lhs); + try self.genSetReg(op_lhs.ty, lhs_mcv.register, lhs); } if (!rhs_is_register) { - try self.genSetReg(op_rhs.src, op_rhs.ty, rhs_mcv.register, rhs); + try self.genSetReg(op_rhs.ty, rhs_mcv.register, rhs); } writeInt(u32, try self.code.addManyAsArray(4), Instruction.mul(.al, dst_mcv.register, lhs_mcv.register, rhs_mcv.register).toU32()); @@ -1631,7 +1643,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// Perform "binary" operators, excluding comparisons. /// Currently, the following ops are supported: /// ADD, SUB, XOR, OR, AND - fn genX8664BinMath(self: *Self, inst: *ir.Inst, op_lhs: *ir.Inst, op_rhs: *ir.Inst) !MCValue { + fn genX8664BinMath(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref) !MCValue { // We'll handle these ops in two steps. // 1) Prepare an output location (register or memory) // This location will be the location of the operand that dies (if one exists) @@ -1654,7 +1666,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // as the result MCValue. var dst_mcv: MCValue = undefined; var src_mcv: MCValue = undefined; - var src_inst: *ir.Inst = undefined; + var src_inst: Air.Inst.Index = undefined; if (self.reuseOperand(inst, 0, lhs)) { // LHS dies; use it as the destination. // Both operands cannot be memory. @@ -1696,20 +1708,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (src_mcv) { .immediate => |imm| { if (imm > math.maxInt(u31)) { - src_mcv = MCValue{ .register = try self.copyToTmpRegister(src_inst.src, Type.initTag(.u64), src_mcv) }; + src_mcv = MCValue{ .register = try self.copyToTmpRegister(Type.initTag(.u64), src_mcv) }; } }, else => {}, } // Now for step 2, we perform the actual op - switch (inst.tag) { + const air_tags = self.air.instructions.items(.tag); + switch (air_tags[inst]) { // TODO: Generate wrapping and non-wrapping versions separately - .add, .addwrap => try self.genX8664BinMathCode(inst.src, inst.ty, dst_mcv, src_mcv, 0, 0x00), - .bool_or, .bit_or => try self.genX8664BinMathCode(inst.src, inst.ty, dst_mcv, src_mcv, 1, 0x08), - .bool_and, .bit_and => try self.genX8664BinMathCode(inst.src, inst.ty, dst_mcv, src_mcv, 4, 0x20), - .sub, .subwrap => try self.genX8664BinMathCode(inst.src, inst.ty, dst_mcv, src_mcv, 5, 0x28), - .xor, .not => try self.genX8664BinMathCode(inst.src, inst.ty, dst_mcv, src_mcv, 6, 0x30), + .add, .addwrap => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 0, 0x00), + .bool_or, .bit_or => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 1, 0x08), + .bool_and, .bit_and => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 4, 0x20), + .sub, .subwrap => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 5, 0x28), + .xor, .not => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 6, 0x30), .mul, .mulwrap => try self.genX8664Imul(inst.src, inst.ty, dst_mcv, src_mcv), else => unreachable, @@ -1719,16 +1732,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } /// Wrap over Instruction.encodeInto to translate errors - fn encodeX8664Instruction( - self: *Self, - src: LazySrcLoc, - inst: Instruction, - ) !void { + fn encodeX8664Instruction(self: *Self, inst: Instruction) !void { inst.encodeInto(self.code) catch |err| { if (err == error.OutOfMemory) return error.OutOfMemory else - return self.fail(src, "Instruction.encodeInto failed because {s}", .{@errorName(err)}); + return self.fail("Instruction.encodeInto failed because {s}", .{@errorName(err)}); }; } @@ -1800,7 +1809,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// d3 /opx | *r/m16/32/64*, CL (for context, CL is register 1) fn genX8664BinMathCode( self: *Self, - src: LazySrcLoc, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue, @@ -1818,7 +1826,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .register => |dst_reg| { switch (src_mcv) { .none => unreachable, - .undef => try self.genSetReg(src, dst_ty, dst_reg, .undef), + .undef => try self.genSetReg(dst_ty, dst_reg, .undef), .dead, .unreach => unreachable, .ptr_stack_offset => unreachable, .ptr_embedded_in_code => unreachable, @@ -1872,7 +1880,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } }, .embedded_in_code, .memory => { - return self.fail(src, "TODO implement x86 ADD/SUB/CMP source memory", .{}); + return self.fail("TODO implement x86 ADD/SUB/CMP source memory", .{}); }, .stack_offset => |off| { // register, indirect use mr + 3 @@ -1880,7 +1888,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const abi_size = dst_ty.abiSize(self.target.*); const adj_off = off + abi_size; if (off > math.maxInt(i32)) { - return self.fail(src, "stack offset too large", .{}); + return self.fail("stack offset too large", .{}); } const encoder = try X8664Encoder.init(self.code, 7); encoder.rex(.{ @@ -1903,17 +1911,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } }, .compare_flags_unsigned => { - return self.fail(src, "TODO implement x86 ADD/SUB/CMP source compare flag (unsigned)", .{}); + return self.fail("TODO implement x86 ADD/SUB/CMP source compare flag (unsigned)", .{}); }, .compare_flags_signed => { - return self.fail(src, "TODO implement x86 ADD/SUB/CMP source compare flag (signed)", .{}); + return self.fail("TODO implement x86 ADD/SUB/CMP source compare flag (signed)", .{}); }, } }, .stack_offset => |off| { switch (src_mcv) { .none => unreachable, - .undef => return self.genSetStack(src, dst_ty, off, .undef), + .undef => return self.genSetStack(dst_ty, off, .undef), .dead, .unreach => unreachable, .ptr_stack_offset => unreachable, .ptr_embedded_in_code => unreachable, @@ -1922,21 +1930,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, .immediate => |imm| { _ = imm; - return self.fail(src, "TODO implement x86 ADD/SUB/CMP source immediate", .{}); + return self.fail("TODO implement x86 ADD/SUB/CMP source immediate", .{}); }, .embedded_in_code, .memory, .stack_offset => { - return self.fail(src, "TODO implement x86 ADD/SUB/CMP source memory", .{}); + return self.fail("TODO implement x86 ADD/SUB/CMP source memory", .{}); }, .compare_flags_unsigned => { - return self.fail(src, "TODO implement x86 ADD/SUB/CMP source compare flag (unsigned)", .{}); + return self.fail("TODO implement x86 ADD/SUB/CMP source compare flag (unsigned)", .{}); }, .compare_flags_signed => { - return self.fail(src, "TODO implement x86 ADD/SUB/CMP source compare flag (signed)", .{}); + return self.fail("TODO implement x86 ADD/SUB/CMP source compare flag (signed)", .{}); }, } }, .embedded_in_code, .memory => { - return self.fail(src, "TODO implement x86 ADD/SUB/CMP destination memory", .{}); + return self.fail("TODO implement x86 ADD/SUB/CMP destination memory", .{}); }, } } @@ -1960,7 +1968,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .register => |dst_reg| { switch (src_mcv) { .none => unreachable, - .undef => try self.genSetReg(src, dst_ty, dst_reg, .undef), + .undef => try self.genSetReg(dst_ty, dst_reg, .undef), .dead, .unreach => unreachable, .ptr_stack_offset => unreachable, .ptr_embedded_in_code => unreachable, @@ -2026,31 +2034,31 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { ); encoder.imm32(@intCast(i32, imm)); } else { - const src_reg = try self.copyToTmpRegister(src, dst_ty, src_mcv); + const src_reg = try self.copyToTmpRegister(dst_ty, src_mcv); return self.genX8664Imul(src, dst_ty, dst_mcv, MCValue{ .register = src_reg }); } }, .embedded_in_code, .memory, .stack_offset => { - return self.fail(src, "TODO implement x86 multiply source memory", .{}); + return self.fail("TODO implement x86 multiply source memory", .{}); }, .compare_flags_unsigned => { - return self.fail(src, "TODO implement x86 multiply source compare flag (unsigned)", .{}); + return self.fail("TODO implement x86 multiply source compare flag (unsigned)", .{}); }, .compare_flags_signed => { - return self.fail(src, "TODO implement x86 multiply source compare flag (signed)", .{}); + return self.fail("TODO implement x86 multiply source compare flag (signed)", .{}); }, } }, .stack_offset => |off| { switch (src_mcv) { .none => unreachable, - .undef => return self.genSetStack(src, dst_ty, off, .undef), + .undef => return self.genSetStack(dst_ty, off, .undef), .dead, .unreach => unreachable, .ptr_stack_offset => unreachable, .ptr_embedded_in_code => unreachable, .register => |src_reg| { // copy dst to a register - const dst_reg = try self.copyToTmpRegister(src, dst_ty, dst_mcv); + const dst_reg = try self.copyToTmpRegister(dst_ty, dst_mcv); // multiply into dst_reg // register, register // Use the following imul opcode @@ -2068,34 +2076,34 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { src_reg.low_id(), ); // copy dst_reg back out - return self.genSetStack(src, dst_ty, off, MCValue{ .register = dst_reg }); + return self.genSetStack(dst_ty, off, MCValue{ .register = dst_reg }); }, .immediate => |imm| { _ = imm; - return self.fail(src, "TODO implement x86 multiply source immediate", .{}); + return self.fail("TODO implement x86 multiply source immediate", .{}); }, .embedded_in_code, .memory, .stack_offset => { - return self.fail(src, "TODO implement x86 multiply source memory", .{}); + return self.fail("TODO implement x86 multiply source memory", .{}); }, .compare_flags_unsigned => { - return self.fail(src, "TODO implement x86 multiply source compare flag (unsigned)", .{}); + return self.fail("TODO implement x86 multiply source compare flag (unsigned)", .{}); }, .compare_flags_signed => { - return self.fail(src, "TODO implement x86 multiply source compare flag (signed)", .{}); + return self.fail("TODO implement x86 multiply source compare flag (signed)", .{}); }, } }, .embedded_in_code, .memory => { - return self.fail(src, "TODO implement x86 multiply destination memory", .{}); + return self.fail("TODO implement x86 multiply destination memory", .{}); }, } } - fn genX8664ModRMRegToStack(self: *Self, src: LazySrcLoc, ty: Type, off: u32, reg: Register, opcode: u8) !void { + fn genX8664ModRMRegToStack(self: *Self, ty: Type, off: u32, reg: Register, opcode: u8) !void { const abi_size = ty.abiSize(self.target.*); const adj_off = off + abi_size; if (off > math.maxInt(i32)) { - return self.fail(src, "stack offset too large", .{}); + return self.fail("stack offset too large", .{}); } const i_adj_off = -@intCast(i32, adj_off); @@ -2122,8 +2130,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genArgDbgInfo(self: *Self, inst: *ir.Inst.Arg, mcv: MCValue) !void { + fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, mcv: MCValue) !void { const name_with_null = inst.name[0 .. mem.lenZ(inst.name) + 1]; + const ty = self.air.getType(inst); switch (mcv) { .register => |reg| { @@ -2136,7 +2145,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { reg.dwarfLocOp(), }); try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len); - try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4 + try self.addDbgInfoTypeReloc(ty); // DW.AT_type, DW.FORM_ref4 dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string }, .none => {}, @@ -2147,12 +2156,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .dwarf => |dbg_out| { switch (arch) { .arm, .armeb => { - const ty = inst.base.ty; const abi_size = math.cast(u32, ty.abiSize(self.target.*)) catch { - return self.fail(inst.base.src, "type '{}' too big to fit into stack frame", .{ty}); + return self.fail("type '{}' too big to fit into stack frame", .{ty}); }; const adjusted_stack_offset = math.negateCast(offset + abi_size) catch { - return self.fail(inst.base.src, "Stack offset too large for arguments", .{}); + return self.fail("Stack offset too large for arguments", .{}); }; try dbg_out.dbg_info.append(link.File.Elf.abbrev_parameter); @@ -2168,7 +2176,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try leb128.writeILEB128(dbg_out.dbg_info.writer(), adjusted_stack_offset); try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 5 + name_with_null.len); - try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4 + try self.addDbgInfoTypeReloc(ty); // DW.AT_type, DW.FORM_ref4 dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string }, else => {}, @@ -2181,23 +2189,24 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genArg(self: *Self, inst: *ir.Inst.Arg) !MCValue { + fn genArg(self: *Self, inst: Air.Inst.Index) !MCValue { const arg_index = self.arg_index; self.arg_index += 1; + const ty = self.air.getType(inst); + const result = self.args[arg_index]; const mcv = switch (arch) { // TODO support stack-only arguments on all target architectures .arm, .armeb, .aarch64, .aarch64_32, .aarch64_be => switch (result) { // Copy registers to the stack .register => |reg| blk: { - const ty = inst.base.ty; const abi_size = math.cast(u32, ty.abiSize(self.target.*)) catch { - return self.fail(inst.base.src, "type '{}' too big to fit into stack frame", .{ty}); + return self.fail("type '{}' too big to fit into stack frame", .{ty}); }; const abi_align = ty.abiAlignment(self.target.*); - const stack_offset = try self.allocMem(&inst.base, abi_size, abi_align); - try self.genSetStack(inst.base.src, ty, stack_offset, MCValue{ .register = reg }); + const stack_offset = try self.allocMem(inst, abi_size, abi_align); + try self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); break :blk MCValue{ .stack_offset = stack_offset }; }, @@ -2207,12 +2216,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }; try self.genArgDbgInfo(inst, mcv); - if (inst.base.isUnused()) + if (self.liveness.isUnused(inst)) return MCValue.dead; switch (mcv) { .register => |reg| { - self.register_manager.getRegAssumeFree(toCanonicalReg(reg), &inst.base); + self.register_manager.getRegAssumeFree(toCanonicalReg(reg), inst); }, else => {}, } @@ -2220,7 +2229,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return mcv; } - fn genBreakpoint(self: *Self, src: LazySrcLoc) !MCValue { + fn genBreakpoint(self: *Self) !MCValue { switch (arch) { .i386, .x86_64 => { try self.code.append(0xcc); // int3 @@ -2234,13 +2243,20 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .aarch64 => { mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.brk(1).toU32()); }, - else => return self.fail(src, "TODO implement @breakpoint() for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement @breakpoint() for {}", .{self.target.cpu.arch}), } return .none; } - fn genCall(self: *Self, inst: *ir.Inst.Call) !MCValue { - var info = try self.resolveCallingConventionValues(inst.base.src, inst.func.ty); + fn genCall(self: *Self, inst: Air.Inst.Index) !MCValue { + const inst_datas = self.air.instructions.items(.data); + const pl_op = inst_datas[inst].pl_op; + const fn_ty = self.air.getType(pl_op.operand); + const callee = pl_op.operand; + const extra = self.air.extraData(Air.Call, inst_data.payload); + const args = self.air.extra[extra.end..][0..extra.data.args_len]; + + var info = try self.resolveCallingConventionValues(fn_ty); defer info.deinit(self); // Due to incremental compilation, how function calls are generated depends @@ -2249,26 +2265,26 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { switch (arch) { .x86_64 => { for (info.args) |mc_arg, arg_i| { - const arg = inst.args[arg_i]; - const arg_mcv = try self.resolveInst(inst.args[arg_i]); + const arg = args[arg_i]; + const arg_mcv = try self.resolveInst(args[arg_i]); // Here we do not use setRegOrMem even though the logic is similar, because // the function call will move the stack pointer, so the offsets are different. switch (mc_arg) { .none => continue, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); }, .stack_offset => |off| { // Here we need to emit instructions like this: // mov qword ptr [rsp + stack_offset], x - try self.genSetStack(arg.src, arg.ty, off, arg_mcv); + try self.genSetStack(arg.ty, off, arg_mcv); }, .ptr_stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); }, .ptr_embedded_in_code => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); }, .undef => unreachable, .immediate => unreachable, @@ -2281,7 +2297,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - if (inst.func.value()) |func_value| { + if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -2300,18 +2316,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 }); mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), got_addr); } else if (func_value.castTag(.extern_fn)) |_| { - return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); + return self.fail("TODO implement calling extern functions", .{}); } else { - return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); + return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{}); + return self.fail("TODO implement calling runtime known function pointer", .{}); } }, .riscv64 => { - if (info.args.len > 0) return self.fail(inst.base.src, "TODO implement fn args for {}", .{self.target.cpu.arch}); + if (info.args.len > 0) return self.fail("TODO implement fn args for {}", .{self.target.cpu.arch}); - if (inst.func.value()) |func_value| { + if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; @@ -2325,21 +2341,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { else unreachable; - try self.genSetReg(inst.base.src, Type.initTag(.usize), .ra, .{ .memory = got_addr }); + try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr }); mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.jalr(.ra, 0, .ra).toU32()); } else if (func_value.castTag(.extern_fn)) |_| { - return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); + return self.fail("TODO implement calling extern functions", .{}); } else { - return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); + return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{}); + return self.fail("TODO implement calling runtime known function pointer", .{}); } }, .arm, .armeb => { for (info.args) |mc_arg, arg_i| { - const arg = inst.args[arg_i]; - const arg_mcv = try self.resolveInst(inst.args[arg_i]); + const arg = args[arg_i]; + const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { .none => continue, @@ -2353,21 +2369,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); }, .stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{}); + return self.fail("TODO implement calling with parameters in memory", .{}); }, .ptr_stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); }, .ptr_embedded_in_code => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); }, } } - if (inst.func.value()) |func_value| { + if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); @@ -2380,7 +2396,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { else unreachable; - try self.genSetReg(inst.base.src, Type.initTag(.usize), .lr, .{ .memory = got_addr }); + try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr }); // TODO: add Instruction.supportedOn // function for ARM @@ -2391,18 +2407,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.bx(.al, .lr).toU32()); } } else if (func_value.castTag(.extern_fn)) |_| { - return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); + return self.fail("TODO implement calling extern functions", .{}); } else { - return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); + return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{}); + return self.fail("TODO implement calling runtime known function pointer", .{}); } }, .aarch64 => { for (info.args) |mc_arg, arg_i| { - const arg = inst.args[arg_i]; - const arg_mcv = try self.resolveInst(inst.args[arg_i]); + const arg = args[arg_i]; + const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { .none => continue, @@ -2416,21 +2432,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); }, .stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{}); + return self.fail("TODO implement calling with parameters in memory", .{}); }, .ptr_stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); }, .ptr_embedded_in_code => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); }, } } - if (inst.func.value()) |func_value| { + if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); @@ -2443,24 +2459,24 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { else unreachable; - try self.genSetReg(inst.base.src, Type.initTag(.usize), .x30, .{ .memory = got_addr }); + try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr }); writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32()); } else if (func_value.castTag(.extern_fn)) |_| { - return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); + return self.fail("TODO implement calling extern functions", .{}); } else { - return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); + return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{}); + return self.fail("TODO implement calling runtime known function pointer", .{}); } }, - else => return self.fail(inst.base.src, "TODO implement call for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement call for {}", .{self.target.cpu.arch}), } } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { for (info.args) |mc_arg, arg_i| { - const arg = inst.args[arg_i]; - const arg_mcv = try self.resolveInst(inst.args[arg_i]); + const arg = args[arg_i]; + const arg_mcv = try self.resolveInst(args[arg_i]); // Here we do not use setRegOrMem even though the logic is similar, because // the function call will move the stack pointer, so the offsets are different. switch (mc_arg) { @@ -2471,18 +2487,18 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .x86_64, .aarch64 => try self.register_manager.getReg(reg, null), else => unreachable, } - try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); }, .stack_offset => { // Here we need to emit instructions like this: // mov qword ptr [rsp + stack_offset], x - return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{}); + return self.fail("TODO implement calling with parameters in memory", .{}); }, .ptr_stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); }, .ptr_embedded_in_code => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); }, .undef => unreachable, .immediate => unreachable, @@ -2495,7 +2511,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - if (inst.func.value()) |func_value| { + if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const func = func_payload.data; const got_addr = blk: { @@ -2506,13 +2522,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { log.debug("got_addr = 0x{x}", .{got_addr}); switch (arch) { .x86_64 => { - try self.genSetReg(inst.base.src, Type.initTag(.u64), .rax, .{ .memory = got_addr }); + try self.genSetReg(Type.initTag(.u64), .rax, .{ .memory = got_addr }); // callq *%rax try self.code.ensureCapacity(self.code.items.len + 2); self.code.appendSliceAssumeCapacity(&[2]u8{ 0xff, 0xd0 }); }, .aarch64 => { - try self.genSetReg(inst.base.src, Type.initTag(.u64), .x30, .{ .memory = got_addr }); + try self.genSetReg(Type.initTag(.u64), .x30, .{ .memory = got_addr }); // blr x30 writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32()); }, @@ -2552,35 +2568,35 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }); // We mark the space and fix it up later. } else { - return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); + return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{}); + return self.fail("TODO implement calling runtime known function pointer", .{}); } } else if (self.bin_file.cast(link.File.Plan9)) |p9| { switch (arch) { .x86_64 => { for (info.args) |mc_arg, arg_i| { - const arg = inst.args[arg_i]; - const arg_mcv = try self.resolveInst(inst.args[arg_i]); + const arg = args[arg_i]; + const arg_mcv = try self.resolveInst(args[arg_i]); // Here we do not use setRegOrMem even though the logic is similar, because // the function call will move the stack pointer, so the offsets are different. switch (mc_arg) { .none => continue, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); }, .stack_offset => { // Here we need to emit instructions like this: // mov qword ptr [rsp + stack_offset], x - return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{}); + return self.fail("TODO implement calling with parameters in memory", .{}); }, .ptr_stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); }, .ptr_embedded_in_code => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); }, .undef => unreachable, .immediate => unreachable, @@ -2592,7 +2608,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, } } - if (inst.func.value()) |func_value| { + if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); @@ -2603,9 +2619,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 }); const fn_got_addr = got_addr + got_index * ptr_bytes; mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), @intCast(u32, fn_got_addr)); - } else return self.fail(inst.base.src, "TODO implement calling extern fn on plan9", .{}); + } else return self.fail("TODO implement calling extern fn on plan9", .{}); } else { - return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{}); + return self.fail("TODO implement calling runtime known function pointer", .{}); } }, .aarch64 => { @@ -2628,13 +2644,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); }, .stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{}); + return self.fail("TODO implement calling with parameters in memory", .{}); }, .ptr_stack_offset => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); }, .ptr_embedded_in_code => { - return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); + return self.fail("TODO implement calling with MCValue.ptr_embedded_in_code arg", .{}); }, } } @@ -2650,15 +2666,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32()); } else if (func_value.castTag(.extern_fn)) |_| { - return self.fail(inst.base.src, "TODO implement calling extern functions", .{}); + return self.fail("TODO implement calling extern functions", .{}); } else { - return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{}); + return self.fail("TODO implement calling bitcasted functions", .{}); } } else { - return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{}); + return self.fail("TODO implement calling runtime known function pointer", .{}); } }, - else => return self.fail(inst.base.src, "TODO implement call on plan9 for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement call on plan9 for {}", .{self.target.cpu.arch}), } } else unreachable; @@ -2666,7 +2682,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .register => |reg| { if (Register.allocIndex(reg) == null) { // Save function return value in a callee saved register - return try self.copyToNewRegister(&inst.base, info.return_value); + return try self.copyToNewRegister(inst, info.return_value); } }, else => {}, @@ -2675,8 +2691,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return info.return_value; } - fn genRef(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - const operand = try self.resolveInst(inst.operand); + fn genRef(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand_ty = self.air.getType(ty_op.operand); + const operand = try self.resolveInst(ty_op.operand); switch (operand) { .unreach => unreachable, .dead => unreachable, @@ -2689,8 +2709,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned, .compare_flags_signed, => { - const stack_offset = try self.allocMemPtr(&inst.base); - try self.genSetStack(inst.base.src, inst.operand.ty, stack_offset, operand); + const stack_offset = try self.allocMemPtr(inst); + try self.genSetStack(operand_ty, stack_offset, operand); return MCValue{ .ptr_stack_offset = stack_offset }; }, @@ -2698,13 +2718,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .embedded_in_code => |offset| return MCValue{ .ptr_embedded_in_code = offset }, .memory => |vaddr| return MCValue{ .immediate = vaddr }, - .undef => return self.fail(inst.base.src, "TODO implement ref on an undefined value", .{}), + .undef => return self.fail("TODO implement ref on an undefined value", .{}), } } - fn ret(self: *Self, src: LazySrcLoc, mcv: MCValue) !MCValue { + fn ret(self: *Self, mcv: MCValue) !MCValue { const ret_ty = self.fn_type.fnReturnType(); - try self.setRegOrMem(src, ret_ty, self.ret_mcv, mcv); + try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); switch (arch) { .i386 => { try self.code.append(0xc3); // ret @@ -2730,58 +2750,54 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.code.resize(self.code.items.len + 4); try self.exitlude_jump_relocs.append(self.gpa, self.code.items.len - 4); }, - else => return self.fail(src, "TODO implement return for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement return for {}", .{self.target.cpu.arch}), } return .unreach; } - fn genRet(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - const operand = try self.resolveInst(inst.operand); + fn genRet(self: *Self, inst: Air.Inst.Index) !MCValue { + const operand = try self.resolveInst(self.air.instructions.items(.data)[inst].un_op); return self.ret(inst.base.src, operand); } - fn genRetVoid(self: *Self, inst: *ir.Inst.NoOp) !MCValue { - return self.ret(inst.base.src, .none); - } - - fn genCmp(self: *Self, inst: *ir.Inst.BinOp, op: math.CompareOperator) !MCValue { + fn genCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !MCValue { // No side effects, so if it's unreferenced, do nothing. - if (inst.base.isUnused()) - return MCValue{ .dead = {} }; - if (inst.lhs.ty.zigTypeTag() == .ErrorSet or inst.rhs.ty.zigTypeTag() == .ErrorSet) - return self.fail(inst.base.src, "TODO implement cmp for errors", .{}); + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const ty = self.air.getType(bin_op.lhs); + assert(ty.eql(self.air.getType(bin_op.rhs))); + if (ty.zigTypeTag() == .ErrorSet) + return self.fail("TODO implement cmp for errors", .{}); + + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); switch (arch) { .x86_64 => { try self.code.ensureCapacity(self.code.items.len + 8); - const lhs = try self.resolveInst(inst.lhs); - const rhs = try self.resolveInst(inst.rhs); - // There are 2 operands, destination and source. // Either one, but not both, can be a memory operand. // Source operand can be an immediate, 8 bits or 32 bits. const dst_mcv = if (lhs.isImmediate() or (lhs.isMemory() and rhs.isMemory())) - try self.copyToNewRegister(&inst.base, lhs) + try self.copyToNewRegister(inst, lhs) else lhs; // This instruction supports only signed 32-bit immediates at most. - const src_mcv = try self.limitImmediateType(inst.rhs, i32); + const src_mcv = try self.limitImmediateType(bin_op.rhs, i32); - try self.genX8664BinMathCode(inst.base.src, inst.base.ty, dst_mcv, src_mcv, 7, 0x38); - const info = inst.lhs.ty.intInfo(self.target.*); + try self.genX8664BinMathCode(Type.initTag(.bool), dst_mcv, src_mcv, 7, 0x38); + const info = ty.intInfo(self.target.*); return switch (info.signedness) { .signed => MCValue{ .compare_flags_signed = op }, .unsigned => MCValue{ .compare_flags_unsigned = op }, }; }, .arm, .armeb => { - const lhs = try self.resolveInst(inst.lhs); - const rhs = try self.resolveInst(inst.rhs); - const lhs_is_register = lhs == .register; const rhs_is_register = rhs == .register; // lhs should always be a register - const rhs_should_be_register = try self.armOperandShouldBeRegister(inst.rhs.src, rhs); + const rhs_should_be_register = try self.armOperandShouldBeRegister(rhs); var lhs_mcv = lhs; var rhs_mcv = rhs; @@ -2789,49 +2805,55 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Allocate registers if (rhs_should_be_register) { if (!lhs_is_register and !rhs_is_register) { - const regs = try self.register_manager.allocRegs(2, .{ inst.rhs, inst.lhs }, &.{}); + const regs = try self.register_manager.allocRegs(2, .{ bin_op.rhs, bin_op.lhs }, &.{}); lhs_mcv = MCValue{ .register = regs[0] }; rhs_mcv = MCValue{ .register = regs[1] }; } else if (!rhs_is_register) { - rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(inst.rhs, &.{}) }; + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(bin_op.rhs, &.{}) }; } } if (!lhs_is_register) { - lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(inst.lhs, &.{}) }; + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(bin_op.lhs, &.{}) }; } // Move the operands to the newly allocated registers const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; if (lhs_mcv == .register and !lhs_is_register) { - try self.genSetReg(inst.lhs.src, inst.lhs.ty, lhs_mcv.register, lhs); - branch.inst_table.putAssumeCapacity(inst.lhs, lhs); + try self.genSetReg(ty, lhs_mcv.register, lhs); + branch.inst_table.putAssumeCapacity(bin_op.lhs, lhs); } if (rhs_mcv == .register and !rhs_is_register) { - try self.genSetReg(inst.rhs.src, inst.rhs.ty, rhs_mcv.register, rhs); - branch.inst_table.putAssumeCapacity(inst.rhs, rhs); + try self.genSetReg(ty, rhs_mcv.register, rhs); + branch.inst_table.putAssumeCapacity(bin_op.rhs, rhs); } // The destination register is not present in the cmp instruction - try self.genArmBinOpCode(inst.base.src, undefined, lhs_mcv, rhs_mcv, false, .cmp_eq); + try self.genArmBinOpCode(undefined, lhs_mcv, rhs_mcv, false, .cmp_eq); - const info = inst.lhs.ty.intInfo(self.target.*); + const info = ty.intInfo(self.target.*); return switch (info.signedness) { .signed => MCValue{ .compare_flags_signed = op }, .unsigned => MCValue{ .compare_flags_unsigned = op }, }; }, - else => return self.fail(inst.base.src, "TODO implement cmp for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement cmp for {}", .{self.target.cpu.arch}), } } - fn genDbgStmt(self: *Self, inst: *ir.Inst.DbgStmt) !MCValue { - try self.dbgAdvancePCAndLine(inst.line, inst.column); - assert(inst.base.isUnused()); + fn genDbgStmt(self: *Self, inst: Air.Inst.Index) !MCValue { + const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; + try self.dbgAdvancePCAndLine(dbg_stmt.line, dbg_stmt.column); + assert(self.liveness.isUnused(inst)); return MCValue.dead; } - fn genCondBr(self: *Self, inst: *ir.Inst.CondBr) !MCValue { - const cond = try self.resolveInst(inst.condition); + fn genCondBr(self: *Self, inst: Air.Inst.Index) !MCValue { + const inst_datas = self.air.instructions.items(.data); + const pl_op = inst_datas[inst].pl_op; + const cond = try self.resolveInst(pl_op.operand); + const extra = self.air.extraData(Air.CondBr, inst_data.payload); + const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; + const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const reloc: Reloc = switch (arch) { .i386, .x86_64 => reloc: { @@ -2880,7 +2902,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { encoder.disp8(1); break :blk 0x84; }, - else => return self.fail(inst.base.src, "TODO implement condbr {s} when condition is {s}", .{ self.target.cpu.arch, @tagName(cond) }), + else => return self.fail("TODO implement condbr {s} when condition is {s}", .{ self.target.cpu.arch, @tagName(cond) }), }; self.code.appendSliceAssumeCapacity(&[_]u8{ 0x0f, opcode }); const reloc = Reloc{ .rel32 = self.code.items.len }; @@ -2906,7 +2928,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.cmp(.al, reg, op).toU32()); break :blk .ne; }, - else => return self.fail(inst.base.src, "TODO implement condbr {} when condition is {s}", .{ self.target.cpu.arch, @tagName(cond) }), + else => return self.fail("TODO implement condbr {} when condition is {s}", .{ self.target.cpu.arch, @tagName(cond) }), }; const reloc = Reloc{ @@ -2918,7 +2940,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.code.resize(self.code.items.len + 4); break :reloc reloc; }, - else => return self.fail(inst.base.src, "TODO implement condbr {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement condbr {}", .{self.target.cpu.arch}), }; // Capture the state of register and stack allocation state so that we can revert to it. @@ -2930,12 +2952,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.branch_stack.append(.{}); - const then_deaths = inst.thenDeaths(); + const then_deaths = self.liveness.thenDeaths(inst); try self.ensureProcessDeathCapacity(then_deaths.len); for (then_deaths) |operand| { self.processDeath(operand); } - try self.genBody(inst.then_body); + try self.genBody(then_body); // Revert to the previous register and stack allocation state. @@ -2951,16 +2973,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.next_stack_offset = parent_next_stack_offset; self.register_manager.free_registers = parent_free_registers; - try self.performReloc(inst.base.src, reloc); + try self.performReloc(reloc); const else_branch = self.branch_stack.addOneAssumeCapacity(); else_branch.* = .{}; - const else_deaths = inst.elseDeaths(); + const else_deaths = self.liveness.elseDeaths(inst); try self.ensureProcessDeathCapacity(else_deaths.len); for (else_deaths) |operand| { self.processDeath(operand); } - try self.genBody(inst.else_body); + try self.genBody(else_body); // At this point, each branch will possibly have conflicting values for where // each instruction is stored. They agree, however, on which instructions are alive/dead. @@ -3003,7 +3025,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { log.debug("consolidating else_entry {*} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(inst.base.src, else_key.ty, canon_mcv, else_value); + try self.setRegOrMem(else_key.ty, canon_mcv, else_value); // TODO track the new register / stack allocation } try parent_branch.inst_table.ensureCapacity(self.gpa, parent_branch.inst_table.count() + @@ -3031,7 +3053,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { log.debug("consolidating then_entry {*} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(inst.base.src, then_key.ty, parent_mcv, then_value); + try self.setRegOrMem(then_key.ty, parent_mcv, then_value); // TODO track the new register / stack allocation } @@ -3040,58 +3062,155 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return MCValue.unreach; } - fn genIsNull(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - switch (arch) { - else => return self.fail(inst.base.src, "TODO implement isnull for {}", .{self.target.cpu.arch}), - } - } - - fn genIsNullPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - return self.fail(inst.base.src, "TODO load the operand and call genIsNull", .{}); - } - - fn genIsNonNull(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn isNull(self: *Self, operand: MCValue) !MCValue { + _ = operand; // Here you can specialize this instruction if it makes sense to, otherwise the default - // will call genIsNull and invert the result. + // will call isNonNull and invert the result. switch (arch) { - else => return self.fail(inst.base.src, "TODO call genIsNull and invert the result ", .{}), + else => return self.fail("TODO call isNonNull and invert the result", .{}), } } - fn genIsNonNullPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - return self.fail(inst.base.src, "TODO load the operand and call genIsNonNull", .{}); - } - - fn genIsErr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn isNonNull(self: *Self, operand: MCValue) !MCValue { + _ = operand; + // Here you can specialize this instruction if it makes sense to, otherwise the default + // will call isNull and invert the result. switch (arch) { - else => return self.fail(inst.base.src, "TODO implement iserr for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO call isNull and invert the result", .{}), } } - fn genIsErrPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - return self.fail(inst.base.src, "TODO load the operand and call genIsErr", .{}); - } - - fn genIsNonErr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { + fn isErr(self: *Self, operand: MCValue) !MCValue { + _ = operand; + // Here you can specialize this instruction if it makes sense to, otherwise the default + // will call isNonNull and invert the result. switch (arch) { - else => return self.fail(inst.base.src, "TODO implement is_non_err for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO call isNonErr and invert the result", .{}), } } - fn genIsNonErrPtr(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - return self.fail(inst.base.src, "TODO load the operand and call genIsNonErr", .{}); + fn isNonErr(self: *Self, operand: MCValue) !MCValue { + _ = operand; + // Here you can specialize this instruction if it makes sense to, otherwise the default + // will call isNull and invert the result. + switch (arch) { + else => return self.fail("TODO call isErr and invert the result", .{}), + } } - fn genLoop(self: *Self, inst: *ir.Inst.Loop) !MCValue { + fn genIsNull(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const inst_datas = self.air.instructions.items(.data); + const operand = try self.resolveInst(inst_datas[inst].un_op); + return self.isNull(operand); + } + + fn genIsNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const inst_datas = self.air.instructions.items(.data); + const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, ptr); + return self.isNull(operand); + } + + fn genIsNonNull(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const inst_datas = self.air.instructions.items(.data); + const operand = try self.resolveInst(inst_datas[inst].un_op); + return self.isNonNull(operand); + } + + fn genIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const inst_datas = self.air.instructions.items(.data); + const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, ptr); + return self.isNonNull(operand); + } + + fn genIsErr(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const inst_datas = self.air.instructions.items(.data); + const operand = try self.resolveInst(inst_datas[inst].un_op); + return self.isErr(operand); + } + + fn genIsErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const inst_datas = self.air.instructions.items(.data); + const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, ptr); + return self.isErr(operand); + } + + fn genIsNonErr(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const inst_datas = self.air.instructions.items(.data); + const operand = try self.resolveInst(inst_datas[inst].un_op); + return self.isNonErr(operand); + } + + fn genIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) + return MCValue.dead; + const inst_datas = self.air.instructions.items(.data); + const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, ptr); + return self.isNonErr(operand); + } + + fn genLoop(self: *Self, inst: Air.Inst.Index) !MCValue { // A loop is a setup to be able to jump back to the beginning. + const inst_datas = self.air.instructions.items(.data); + const loop = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const body = self.air.extra[loop.end..][0..loop.data.body_len]; const start_index = self.code.items.len; - try self.genBody(inst.body); - try self.jump(inst.base.src, start_index); + try self.genBody(body); + try self.jump(start_index); return MCValue.unreach; } /// Send control flow to the `index` of `self.code`. - fn jump(self: *Self, src: LazySrcLoc, index: usize) !void { + fn jump(self: *Self, index: usize) !void { switch (arch) { .i386, .x86_64 => { try self.code.ensureCapacity(self.code.items.len + 5); @@ -3108,21 +3227,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (math.cast(i26, @intCast(i32, index) - @intCast(i32, self.code.items.len + 8))) |delta| { writeInt(u32, try self.code.addManyAsArray(4), Instruction.b(.al, delta).toU32()); } else |_| { - return self.fail(src, "TODO: enable larger branch offset", .{}); + return self.fail("TODO: enable larger branch offset", .{}); } }, .aarch64, .aarch64_be, .aarch64_32 => { if (math.cast(i28, @intCast(i32, index) - @intCast(i32, self.code.items.len + 8))) |delta| { writeInt(u32, try self.code.addManyAsArray(4), Instruction.b(delta).toU32()); } else |_| { - return self.fail(src, "TODO: enable larger branch offset", .{}); + return self.fail("TODO: enable larger branch offset", .{}); } }, - else => return self.fail(src, "TODO implement jump for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement jump for {}", .{self.target.cpu.arch}), } } - fn genBlock(self: *Self, inst: *ir.Inst.Block) !MCValue { + fn genBlock(self: *Self, inst: Air.Inst.Index) !MCValue { try self.blocks.putNoClobber(self.gpa, inst, .{ // A block is a setup to be able to jump to the end. .relocs = .{}, @@ -3136,20 +3255,24 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const block_data = self.blocks.getPtr(inst).?; defer block_data.relocs.deinit(self.gpa); - try self.genBody(inst.body); + const ty_pl = self.air.instructions.items(.data).ty_pl; + const extra = self.air.extraData(Air.Block, ty_pl.payload); + const body = self.air.extra[extra.end..][0..extra.data.body_len]; + try self.genBody(body); - for (block_data.relocs.items) |reloc| try self.performReloc(inst.base.src, reloc); + for (block_data.relocs.items) |reloc| try self.performReloc(reloc); return @bitCast(MCValue, block_data.mcv); } - fn genSwitch(self: *Self, inst: *ir.Inst.SwitchBr) !MCValue { + fn genSwitch(self: *Self, inst: Air.Inst.Index) !MCValue { + _ = inst; switch (arch) { - else => return self.fail(inst.base.src, "TODO genSwitch for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO genSwitch for {}", .{self.target.cpu.arch}), } } - fn performReloc(self: *Self, src: LazySrcLoc, reloc: Reloc) !void { + fn performReloc(self: *Self, reloc: Reloc) !void { switch (reloc) { .rel32 => |pos| { const amt = self.code.items.len - (pos + 4); @@ -3160,7 +3283,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // best place to elide jumps will be in semantic analysis, by inlining blocks that only // only have 1 break instruction. const s32_amt = math.cast(i32, amt) catch - return self.fail(src, "unable to perform relocation: jump too far", .{}); + return self.fail("unable to perform relocation: jump too far", .{}); mem.writeIntLittle(i32, self.code.items[pos..][0..4], s32_amt); }, .arm_branch => |info| { @@ -3170,7 +3293,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (math.cast(i26, amt)) |delta| { writeInt(u32, self.code.items[info.pos..][0..4], Instruction.b(info.cond, delta).toU32()); } else |_| { - return self.fail(src, "TODO: enable larger branch offset", .{}); + return self.fail("TODO: enable larger branch offset", .{}); } }, else => unreachable, // attempting to perfrom an ARM relocation on a non-ARM target arch @@ -3179,41 +3302,39 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genBrBlockFlat(self: *Self, inst: *ir.Inst.BrBlockFlat) !MCValue { + fn genBrBlockFlat(self: *Self, inst: Air.Inst.Index) !MCValue { try self.genBody(inst.body); const last = inst.body.instructions[inst.body.instructions.len - 1]; - return self.br(inst.base.src, inst.block, last); + return self.br(inst.block, last); } - fn genBr(self: *Self, inst: *ir.Inst.Br) !MCValue { - return self.br(inst.base.src, inst.block, inst.operand); + fn genBr(self: *Self, inst: Air.Inst.Index) !MCValue { + return self.br(inst.block, inst.operand); } - fn genBrVoid(self: *Self, inst: *ir.Inst.BrVoid) !MCValue { - return self.brVoid(inst.base.src, inst.block); - } - - fn genBoolOp(self: *Self, inst: *ir.Inst.BinOp) !MCValue { - if (inst.base.isUnused()) + fn genBoolOp(self: *Self, inst: Air.Inst.Index) !MCValue { + if (self.liveness.isUnused(inst)) return MCValue.dead; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const air_tags = self.air.instructions.items(.tag); switch (arch) { - .x86_64 => switch (inst.base.tag) { + .x86_64 => switch (air_tags[inst]) { // lhs AND rhs - .bool_and => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs), + .bool_and => return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), // lhs OR rhs - .bool_or => return try self.genX8664BinMath(&inst.base, inst.lhs, inst.rhs), + .bool_or => return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), else => unreachable, // Not a boolean operation }, - .arm, .armeb => switch (inst.base.tag) { - .bool_and => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bool_and), - .bool_or => return try self.genArmBinOp(&inst.base, inst.lhs, inst.rhs, .bool_or), + .arm, .armeb => switch (air_tags[inst]) { + .bool_and => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_and), + .bool_or => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_or), else => unreachable, // Not a boolean operation }, - else => return self.fail(inst.base.src, "TODO implement boolean operations for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement boolean operations for {}", .{self.target.cpu.arch}), } } - fn br(self: *Self, src: LazySrcLoc, block: *ir.Inst.Block, operand: *ir.Inst) !MCValue { + fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Index) !MCValue { const block_data = self.blocks.getPtr(block).?; if (operand.ty.hasCodeGenBits()) { @@ -3222,13 +3343,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (block_mcv == .none) { block_data.mcv = operand_mcv; } else { - try self.setRegOrMem(src, block.base.ty, block_mcv, operand_mcv); + try self.setRegOrMem(block.base.ty, block_mcv, operand_mcv); } } - return self.brVoid(src, block); + return self.brVoid(block); } - fn brVoid(self: *Self, src: LazySrcLoc, block: *ir.Inst.Block) !MCValue { + fn brVoid(self: *Self, block: Air.Inst.Index) !MCValue { const block_data = self.blocks.getPtr(block).?; // Emit a jump with a relocation. It will be patched up after the block ends. @@ -3252,43 +3373,43 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, }); }, - else => return self.fail(src, "TODO implement brvoid for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement brvoid for {}", .{self.target.cpu.arch}), } return .none; } - fn genAsm(self: *Self, inst: *ir.Inst.Assembly) !MCValue { - if (!inst.is_volatile and inst.base.isUnused()) + fn genAsm(self: *Self, inst: Air.Inst.Index) !MCValue { + if (!inst.is_volatile and self.liveness.isUnused(inst)) return MCValue.dead; switch (arch) { .arm, .armeb => { for (inst.inputs) |input, i| { if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm input constraint: '{s}'", .{input}); + return self.fail("unrecognized asm input constraint: '{s}'", .{input}); } const reg_name = input[1 .. input.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(inst.base.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); } if (mem.eql(u8, inst.asm_source, "svc #0")) { writeInt(u32, try self.code.addManyAsArray(4), Instruction.svc(.al, 0).toU32()); } else { - return self.fail(inst.base.src, "TODO implement support for more arm assembly instructions", .{}); + return self.fail("TODO implement support for more arm assembly instructions", .{}); } if (inst.output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm output constraint: '{s}'", .{output}); + return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); return MCValue{ .register = reg }; } else { return MCValue.none; @@ -3297,16 +3418,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .aarch64 => { for (inst.inputs) |input, i| { if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm input constraint: '{s}'", .{input}); + return self.fail("unrecognized asm input constraint: '{s}'", .{input}); } const reg_name = input[1 .. input.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(inst.base.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); } if (mem.eql(u8, inst.asm_source, "svc #0")) { @@ -3314,16 +3435,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } else if (mem.eql(u8, inst.asm_source, "svc #0x80")) { mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.svc(0x80).toU32()); } else { - return self.fail(inst.base.src, "TODO implement support for more aarch64 assembly instructions", .{}); + return self.fail("TODO implement support for more aarch64 assembly instructions", .{}); } if (inst.output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm output constraint: '{s}'", .{output}); + return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); return MCValue{ .register = reg }; } else { return MCValue.none; @@ -3332,31 +3453,31 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .riscv64 => { for (inst.inputs) |input, i| { if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm input constraint: '{s}'", .{input}); + return self.fail("unrecognized asm input constraint: '{s}'", .{input}); } const reg_name = input[1 .. input.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(inst.base.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); } if (mem.eql(u8, inst.asm_source, "ecall")) { mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ecall.toU32()); } else { - return self.fail(inst.base.src, "TODO implement support for more riscv64 assembly instructions", .{}); + return self.fail("TODO implement support for more riscv64 assembly instructions", .{}); } if (inst.output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm output constraint: '{s}'", .{output}); + return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); return MCValue{ .register = reg }; } else { return MCValue.none; @@ -3365,16 +3486,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .x86_64, .i386 => { for (inst.inputs) |input, i| { if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm input constraint: '{s}'", .{input}); + return self.fail("unrecognized asm input constraint: '{s}'", .{input}); } const reg_name = input[1 .. input.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(inst.base.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg.ty, reg, arg_mcv); } { @@ -3385,68 +3506,68 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } else if (mem.indexOf(u8, ins, "push")) |_| { const arg = ins[4..]; if (mem.indexOf(u8, arg, "$")) |l| { - const n = std.fmt.parseInt(u8, ins[4 + l + 1 ..], 10) catch return self.fail(inst.base.src, "TODO implement more inline asm int parsing", .{}); + const n = std.fmt.parseInt(u8, ins[4 + l + 1 ..], 10) catch return self.fail("TODO implement more inline asm int parsing", .{}); try self.code.appendSlice(&.{ 0x6a, n }); } else if (mem.indexOf(u8, arg, "%%")) |l| { const reg_name = ins[4 + l + 2 ..]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); const low_id: u8 = reg.low_id(); if (reg.isExtended()) { try self.code.appendSlice(&.{ 0x41, 0b1010000 | low_id }); } else { try self.code.append(0b1010000 | low_id); } - } else return self.fail(inst.base.src, "TODO more push operands", .{}); + } else return self.fail("TODO more push operands", .{}); } else if (mem.indexOf(u8, ins, "pop")) |_| { const arg = ins[3..]; if (mem.indexOf(u8, arg, "%%")) |l| { const reg_name = ins[3 + l + 2 ..]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); const low_id: u8 = reg.low_id(); if (reg.isExtended()) { try self.code.appendSlice(&.{ 0x41, 0b1011000 | low_id }); } else { try self.code.append(0b1011000 | low_id); } - } else return self.fail(inst.base.src, "TODO more pop operands", .{}); + } else return self.fail("TODO more pop operands", .{}); } else { - return self.fail(inst.base.src, "TODO implement support for more x86 assembly instructions", .{}); + return self.fail("TODO implement support for more x86 assembly instructions", .{}); } } } if (inst.output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { - return self.fail(inst.base.src, "unrecognized asm output constraint: '{s}'", .{output}); + return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse - return self.fail(inst.base.src, "unrecognized register: '{s}'", .{reg_name}); + return self.fail("unrecognized register: '{s}'", .{reg_name}); return MCValue{ .register = reg }; } else { return MCValue.none; } }, - else => return self.fail(inst.base.src, "TODO implement inline asm support for more architectures", .{}), + else => return self.fail("TODO implement inline asm support for more architectures", .{}), } } /// Sets the value without any modifications to register allocation metadata or stack allocation metadata. - fn setRegOrMem(self: *Self, src: LazySrcLoc, ty: Type, loc: MCValue, val: MCValue) !void { + fn setRegOrMem(self: *Self, ty: Type, loc: MCValue, val: MCValue) !void { switch (loc) { .none => return, - .register => |reg| return self.genSetReg(src, ty, reg, val), - .stack_offset => |off| return self.genSetStack(src, ty, off, val), + .register => |reg| return self.genSetReg(ty, reg, val), + .stack_offset => |off| return self.genSetStack(ty, off, val), .memory => { - return self.fail(src, "TODO implement setRegOrMem for memory", .{}); + return self.fail("TODO implement setRegOrMem for memory", .{}); }, else => unreachable, } } - fn genSetStack(self: *Self, src: LazySrcLoc, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { + fn genSetStack(self: *Self, ty: Type, stack_offset: u32, mcv: MCValue) InnerError!void { switch (arch) { .arm, .armeb => switch (mcv) { .dead => unreachable, @@ -3458,28 +3579,28 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. switch (ty.abiSize(self.target.*)) { - 1 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaa }), - 2 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaa }), - 4 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), - 8 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), - else => return self.fail(src, "TODO implement memset", .{}), + 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), + 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), + 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), + 8 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), + else => return self.fail("TODO implement memset", .{}), } }, .compare_flags_unsigned => |op| { _ = op; - return self.fail(src, "TODO implement set stack variable with compare flags value (unsigned)", .{}); + return self.fail("TODO implement set stack variable with compare flags value (unsigned)", .{}); }, .compare_flags_signed => |op| { _ = op; - return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{}); + return self.fail("TODO implement set stack variable with compare flags value (signed)", .{}); }, .immediate => { - const reg = try self.copyToTmpRegister(src, ty, mcv); - return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, .embedded_in_code => |code_offset| { _ = code_offset; - return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{}); + return self.fail("TODO implement set stack variable from embedded_in_code", .{}); }, .register => |reg| { const abi_size = ty.abiSize(self.target.*); @@ -3489,7 +3610,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { 1, 4 => { const offset = if (math.cast(u12, adj_off)) |imm| blk: { break :blk Instruction.Offset.imm(imm); - } else |_| Instruction.Offset.reg(try self.copyToTmpRegister(src, Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0); + } else |_| Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0); const str = switch (abi_size) { 1 => Instruction.strb, 4 => Instruction.str, @@ -3504,26 +3625,26 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { 2 => { const offset = if (adj_off <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, adj_off)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(src, Type.initTag(.u32), MCValue{ .immediate = adj_off })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off })); writeInt(u32, try self.code.addManyAsArray(4), Instruction.strh(.al, reg, .fp, .{ .offset = offset, .positive = false, }).toU32()); }, - else => return self.fail(src, "TODO implement storing other types abi_size={}", .{abi_size}), + else => return self.fail("TODO implement storing other types abi_size={}", .{abi_size}), } }, .memory => |vaddr| { _ = vaddr; - return self.fail(src, "TODO implement set stack variable from memory vaddr", .{}); + return self.fail("TODO implement set stack variable from memory vaddr", .{}); }, .stack_offset => |off| { if (stack_offset == off) return; // Copy stack variable to itself; nothing to do. - const reg = try self.copyToTmpRegister(src, ty, mcv); - return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, }, .x86_64 => switch (mcv) { @@ -3536,34 +3657,34 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. switch (ty.abiSize(self.target.*)) { - 1 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaa }), - 2 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaa }), - 4 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), - 8 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), - else => return self.fail(src, "TODO implement memset", .{}), + 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), + 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), + 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), + 8 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), + else => return self.fail("TODO implement memset", .{}), } }, .compare_flags_unsigned => |op| { _ = op; - return self.fail(src, "TODO implement set stack variable with compare flags value (unsigned)", .{}); + return self.fail("TODO implement set stack variable with compare flags value (unsigned)", .{}); }, .compare_flags_signed => |op| { _ = op; - return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{}); + return self.fail("TODO implement set stack variable with compare flags value (signed)", .{}); }, .immediate => |x_big| { const abi_size = ty.abiSize(self.target.*); const adj_off = stack_offset + abi_size; if (adj_off > 128) { - return self.fail(src, "TODO implement set stack variable with large stack offset", .{}); + return self.fail("TODO implement set stack variable with large stack offset", .{}); } try self.code.ensureCapacity(self.code.items.len + 8); switch (abi_size) { 1 => { - return self.fail(src, "TODO implement set abi_size=1 stack variable with immediate", .{}); + return self.fail("TODO implement set abi_size=1 stack variable with immediate", .{}); }, 2 => { - return self.fail(src, "TODO implement set abi_size=2 stack variable with immediate", .{}); + return self.fail("TODO implement set abi_size=2 stack variable with immediate", .{}); }, 4 => { const x = @intCast(u32, x_big); @@ -3596,22 +3717,22 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.code.appendSliceAssumeCapacity(buf[0..4]); }, else => { - return self.fail(src, "TODO implement set abi_size=large stack variable with immediate", .{}); + return self.fail("TODO implement set abi_size=large stack variable with immediate", .{}); }, } }, .embedded_in_code => { // TODO this and `.stack_offset` below need to get improved to support types greater than // register size, and do general memcpy - const reg = try self.copyToTmpRegister(src, ty, mcv); - return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, .register => |reg| { try self.genX8664ModRMRegToStack(src, ty, stack_offset, reg, 0x89); }, .memory => |vaddr| { _ = vaddr; - return self.fail(src, "TODO implement set stack variable from memory vaddr", .{}); + return self.fail("TODO implement set stack variable from memory vaddr", .{}); }, .stack_offset => |off| { // TODO this and `.embedded_in_code` above need to get improved to support types greater than @@ -3620,8 +3741,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (stack_offset == off) return; // Copy stack variable to itself; nothing to do. - const reg = try self.copyToTmpRegister(src, ty, mcv); - return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, }, .aarch64, .aarch64_be, .aarch64_32 => switch (mcv) { @@ -3634,28 +3755,28 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return; // The already existing value will do just fine. // TODO Upgrade this to a memset call when we have that available. switch (ty.abiSize(self.target.*)) { - 1 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaa }), - 2 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaa }), - 4 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), - 8 => return self.genSetStack(src, ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), - else => return self.fail(src, "TODO implement memset", .{}), + 1 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaa }), + 2 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaa }), + 4 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaa }), + 8 => return self.genSetStack(ty, stack_offset, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), + else => return self.fail("TODO implement memset", .{}), } }, .compare_flags_unsigned => |op| { _ = op; - return self.fail(src, "TODO implement set stack variable with compare flags value (unsigned)", .{}); + return self.fail("TODO implement set stack variable with compare flags value (unsigned)", .{}); }, .compare_flags_signed => |op| { _ = op; - return self.fail(src, "TODO implement set stack variable with compare flags value (signed)", .{}); + return self.fail("TODO implement set stack variable with compare flags value (signed)", .{}); }, .immediate => { - const reg = try self.copyToTmpRegister(src, ty, mcv); - return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, .embedded_in_code => |code_offset| { _ = code_offset; - return self.fail(src, "TODO implement set stack variable from embedded_in_code", .{}); + return self.fail("TODO implement set stack variable from embedded_in_code", .{}); }, .register => |reg| { const abi_size = ty.abiSize(self.target.*); @@ -3666,7 +3787,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const offset = if (math.cast(i9, adj_off)) |imm| Instruction.LoadStoreOffset.imm_post_index(-imm) else |_| - Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(src, Type.initTag(.u64), MCValue{ .immediate = adj_off })); + Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u64), MCValue{ .immediate = adj_off })); const rn: Register = switch (arch) { .aarch64, .aarch64_be => .x29, .aarch64_32 => .w29, @@ -3683,26 +3804,26 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .offset = offset, }).toU32()); }, - else => return self.fail(src, "TODO implement storing other types abi_size={}", .{abi_size}), + else => return self.fail("TODO implement storing other types abi_size={}", .{abi_size}), } }, .memory => |vaddr| { _ = vaddr; - return self.fail(src, "TODO implement set stack variable from memory vaddr", .{}); + return self.fail("TODO implement set stack variable from memory vaddr", .{}); }, .stack_offset => |off| { if (stack_offset == off) return; // Copy stack variable to itself; nothing to do. - const reg = try self.copyToTmpRegister(src, ty, mcv); - return self.genSetStack(src, ty, stack_offset, MCValue{ .register = reg }); + const reg = try self.copyToTmpRegister(ty, mcv); + return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, }, - else => return self.fail(src, "TODO implement getSetStack for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement getSetStack for {}", .{self.target.cpu.arch}), } } - fn genSetReg(self: *Self, src: LazySrcLoc, ty: Type, reg: Register, mcv: MCValue) InnerError!void { + fn genSetReg(self: *Self, ty: Type, reg: Register, mcv: MCValue) InnerError!void { switch (arch) { .arm, .armeb => switch (mcv) { .dead => unreachable, @@ -3713,7 +3834,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (!self.wantSafety()) return; // The already existing value will do just fine. // Write the debug undefined value. - return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaa }); + return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaa }); }, .compare_flags_unsigned, .compare_flags_signed, @@ -3732,7 +3853,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(condition, reg, one).toU32()); }, .immediate => |x| { - if (x > math.maxInt(u32)) return self.fail(src, "ARM registers are 32-bit wide", .{}); + if (x > math.maxInt(u32)) return self.fail("ARM registers are 32-bit wide", .{}); if (Instruction.Operand.fromU32(@intCast(u32, x))) |op| { writeInt(u32, try self.code.addManyAsArray(4), Instruction.mov(.al, reg, op).toU32()); @@ -3778,7 +3899,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .memory => |addr| { // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. - try self.genSetReg(src, ty, reg, .{ .immediate = addr }); + try self.genSetReg(ty, reg, .{ .immediate = addr }); writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldr(.al, reg, reg, .{ .offset = Instruction.Offset.none }).toU32()); }, .stack_offset => |unadjusted_off| { @@ -3790,7 +3911,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { 1, 4 => { const offset = if (adj_off <= math.maxInt(u12)) blk: { break :blk Instruction.Offset.imm(@intCast(u12, adj_off)); - } else Instruction.Offset.reg(try self.copyToTmpRegister(src, Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0); + } else Instruction.Offset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off }), 0); const ldr = switch (abi_size) { 1 => Instruction.ldrb, 4 => Instruction.ldr, @@ -3805,17 +3926,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { 2 => { const offset = if (adj_off <= math.maxInt(u8)) blk: { break :blk Instruction.ExtraLoadStoreOffset.imm(@intCast(u8, adj_off)); - } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(src, Type.initTag(.u32), MCValue{ .immediate = adj_off })); + } else Instruction.ExtraLoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u32), MCValue{ .immediate = adj_off })); writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldrh(.al, reg, .fp, .{ .offset = offset, .positive = false, }).toU32()); }, - else => return self.fail(src, "TODO a type of size {} is not allowed in a register", .{abi_size}), + else => return self.fail("TODO a type of size {} is not allowed in a register", .{abi_size}), } }, - else => return self.fail(src, "TODO implement getSetReg for arm {}", .{mcv}), + else => return self.fail("TODO implement getSetReg for arm {}", .{mcv}), }, .aarch64 => switch (mcv) { .dead => unreachable, @@ -3827,8 +3948,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return; // The already existing value will do just fine. // Write the debug undefined value. switch (reg.size()) { - 32 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaa }), - 64 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), + 32 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaa }), + 64 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), else => unreachable, // unexpected register size } }, @@ -3876,7 +3997,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .size = 4, }); } else { - return self.fail(src, "TODO implement genSetReg for PIE GOT indirection on this platform", .{}); + return self.fail("TODO implement genSetReg for PIE GOT indirection on this platform", .{}); } mem.writeIntLittle( u32, @@ -3893,7 +4014,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } else { // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. - try self.genSetReg(src, Type.initTag(.usize), reg, .{ .immediate = addr }); + try self.genSetReg(Type.initTag(.usize), reg, .{ .immediate = addr }); mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(reg, .{ .register = .{ .rn = reg } }).toU32()); } }, @@ -3911,7 +4032,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const offset = if (math.cast(i9, adj_off)) |imm| Instruction.LoadStoreOffset.imm_post_index(-imm) else |_| - Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(src, Type.initTag(.u64), MCValue{ .immediate = adj_off })); + Instruction.LoadStoreOffset.reg(try self.copyToTmpRegister(Type.initTag(.u64), MCValue{ .immediate = adj_off })); switch (abi_size) { 1, 2 => { @@ -3931,10 +4052,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .offset = offset, } }).toU32()); }, - else => return self.fail(src, "TODO implement genSetReg other types abi_size={}", .{abi_size}), + else => return self.fail("TODO implement genSetReg other types abi_size={}", .{abi_size}), } }, - else => return self.fail(src, "TODO implement genSetReg for aarch64 {}", .{mcv}), + else => return self.fail("TODO implement genSetReg for aarch64 {}", .{mcv}), }, .riscv64 => switch (mcv) { .dead => unreachable, @@ -3945,7 +4066,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (!self.wantSafety()) return; // The already existing value will do just fine. // Write the debug undefined value. - return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); + return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }); }, .immediate => |unsigned_x| { const x = @bitCast(i64, unsigned_x); @@ -3965,19 +4086,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } // li rd, immediate // "Myriad sequences" - return self.fail(src, "TODO genSetReg 33-64 bit immediates for riscv64", .{}); // glhf + return self.fail("TODO genSetReg 33-64 bit immediates for riscv64", .{}); // glhf }, .memory => |addr| { // The value is in memory at a hard-coded address. // If the type is a pointer, it means the pointer address is at this memory location. - try self.genSetReg(src, ty, reg, .{ .immediate = addr }); + try self.genSetReg(ty, reg, .{ .immediate = addr }); mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ld(reg, 0, reg).toU32()); // LOAD imm=[i12 offset = 0], rs1 = // return self.fail("TODO implement genSetReg memory for riscv64"); }, - else => return self.fail(src, "TODO implement getSetReg for riscv64 {}", .{mcv}), + else => return self.fail("TODO implement getSetReg for riscv64 {}", .{mcv}), }, .x86_64 => switch (mcv) { .dead => unreachable, @@ -3989,10 +4110,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return; // The already existing value will do just fine. // Write the debug undefined value. switch (reg.size()) { - 8 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaa }), - 16 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaa }), - 32 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaa }), - 64 => return self.genSetReg(src, ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), + 8 => return self.genSetReg(ty, reg, .{ .immediate = 0xaa }), + 16 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaa }), + 32 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaa }), + 64 => return self.genSetReg(ty, reg, .{ .immediate = 0xaaaaaaaaaaaaaaaa }), else => unreachable, } }, @@ -4019,7 +4140,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, .compare_flags_signed => |op| { _ = op; - return self.fail(src, "TODO set register with compare flags value (signed)", .{}); + return self.fail("TODO set register with compare flags value (signed)", .{}); }, .immediate => |x| { // 32-bit moves zero-extend to 64-bit, so xoring the 32-bit @@ -4152,7 +4273,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .size = 4, }); } else { - return self.fail(src, "TODO implement genSetReg for PIE GOT indirection on this platform", .{}); + return self.fail("TODO implement genSetReg for PIE GOT indirection on this platform", .{}); } // MOV reg, [reg] @@ -4208,7 +4329,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { assert(id3 != 4 and id3 != 5); // Rather than duplicate the logic used for the move, we just use a self-call with a new MCValue. - try self.genSetReg(src, ty, reg, MCValue{ .immediate = x }); + try self.genSetReg(ty, reg, MCValue{ .immediate = x }); // Now, the register contains the address of the value to load into it // Currently, we're only allowing 64-bit registers, so we need the `REX.W 8B /r` variant. @@ -4231,7 +4352,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const abi_size = ty.abiSize(self.target.*); const off = unadjusted_off + abi_size; if (off < std.math.minInt(i32) or off > std.math.maxInt(i32)) { - return self.fail(src, "stack offset too large", .{}); + return self.fail("stack offset too large", .{}); } const ioff = -@intCast(i32, off); const encoder = try X8664Encoder.init(self.code, 3); @@ -4251,21 +4372,21 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } }, }, - else => return self.fail(src, "TODO implement getSetReg for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement getSetReg for {}", .{self.target.cpu.arch}), } } - fn genPtrToInt(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - // no-op - return self.resolveInst(inst.operand); + fn genPtrToInt(self: *Self, inst: Air.Inst.Index) !MCValue { + const inst_datas = self.air.instructions.items(.data); + return self.resolveInst(inst_datas[inst].un_op); } - fn genBitCast(self: *Self, inst: *ir.Inst.UnOp) !MCValue { - const operand = try self.resolveInst(inst.operand); - return operand; + fn genBitCast(self: *Self, inst: Air.Inst.Index) !MCValue { + const inst_datas = self.air.instructions.items(.data); + return self.resolveInst(inst_datas[inst].ty_op.operand); } - fn resolveInst(self: *Self, inst: *ir.Inst) !MCValue { + fn resolveInst(self: *Self, inst: Air.Inst.Index) !MCValue { // If the type has no codegen bits, no need to store it. if (!inst.ty.hasCodeGenBits()) return MCValue.none; @@ -4283,7 +4404,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.getResolvedInstValue(inst); } - fn getResolvedInstValue(self: *Self, inst: *ir.Inst) MCValue { + fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { // Treat each stack item as a "layer" on top of the previous one. var i: usize = self.branch_stack.items.len; while (true) { @@ -4300,7 +4421,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// A potential opportunity for future optimization here would be keeping track /// of the fact that the instruction is available both as an immediate /// and as a register. - fn limitImmediateType(self: *Self, inst: *ir.Inst, comptime T: type) !MCValue { + fn limitImmediateType(self: *Self, inst: Air.Inst.Index, comptime T: type) !MCValue { const mcv = try self.resolveInst(inst); const ti = @typeInfo(T).Int; switch (mcv) { @@ -4308,7 +4429,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // This immediate is unsigned. const U = std.meta.Int(.unsigned, ti.bits - @boolToInt(ti.signedness == .signed)); if (imm >= math.maxInt(U)) { - return MCValue{ .register = try self.copyToTmpRegister(inst.src, Type.initTag(.usize), mcv) }; + return MCValue{ .register = try self.copyToTmpRegister(Type.initTag(.usize), mcv) }; } }, else => {}, @@ -4334,7 +4455,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { _ = slice_len; _ = ptr_imm; // We need more general support for const data being stored in memory to make this work. - return self.fail(src, "TODO codegen for const slices", .{}); + return self.fail("TODO codegen for const slices", .{}); }, else => { if (typed_value.val.castTag(.decl_ref)) |payload| { @@ -4360,19 +4481,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes; return MCValue{ .memory = got_addr }; } else { - return self.fail(src, "TODO codegen non-ELF const Decl pointer", .{}); + return self.fail("TODO codegen non-ELF const Decl pointer", .{}); } } if (typed_value.val.tag() == .int_u64) { return MCValue{ .immediate = typed_value.val.toUnsignedInt() }; } - return self.fail(src, "TODO codegen more kinds of const pointers", .{}); + return self.fail("TODO codegen more kinds of const pointers", .{}); }, }, .Int => { const info = typed_value.ty.intInfo(self.target.*); if (info.bits > ptr_bits or info.signedness == .signed) { - return self.fail(src, "TODO const int bigger than ptr and signed int", .{}); + return self.fail("TODO const int bigger than ptr and signed int", .{}); } return MCValue{ .immediate = typed_value.val.toUnsignedInt() }; }, @@ -4394,9 +4515,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } else if (typed_value.ty.abiSize(self.target.*) == 1) { return MCValue{ .immediate = @boolToInt(typed_value.val.isNull()) }; } - return self.fail(src, "TODO non pointer optionals", .{}); + return self.fail("TODO non pointer optionals", .{}); }, - else => return self.fail(src, "TODO implement const of type '{}'", .{typed_value.ty}), + else => return self.fail("TODO implement const of type '{}'", .{typed_value.ty}), } } @@ -4413,7 +4534,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }; /// Caller must call `CallMCValues.deinit`. - fn resolveCallingConventionValues(self: *Self, src: LazySrcLoc, fn_ty: Type) !CallMCValues { + fn resolveCallingConventionValues(self: *Self, fn_ty: Type) !CallMCValues { const cc = fn_ty.fnCallingConvention(); const param_types = try self.gpa.alloc(Type, fn_ty.fnParamLen()); defer self.gpa.free(param_types); @@ -4482,7 +4603,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { result.stack_byte_count = next_stack_offset; result.stack_align = 16; }, - else => return self.fail(src, "TODO implement function parameters for {} on x86_64", .{cc}), + else => return self.fail("TODO implement function parameters for {} on x86_64", .{cc}), } }, .arm, .armeb => { @@ -4509,10 +4630,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] }; ncrn += 1; } else { - return self.fail(src, "TODO MCValues with multiple registers", .{}); + return self.fail("TODO MCValues with multiple registers", .{}); } } else if (ncrn < 4 and nsaa == 0) { - return self.fail(src, "TODO MCValues split between registers and stack", .{}); + return self.fail("TODO MCValues split between registers and stack", .{}); } else { ncrn = 4; if (ty.abiAlignment(self.target.*) == 8) @@ -4526,7 +4647,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { result.stack_byte_count = nsaa; result.stack_align = 4; }, - else => return self.fail(src, "TODO implement function parameters for {} on arm", .{cc}), + else => return self.fail("TODO implement function parameters for {} on arm", .{cc}), } }, .aarch64 => { @@ -4557,10 +4678,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { result.args[i] = .{ .register = c_abi_int_param_regs[ncrn] }; ncrn += 1; } else { - return self.fail(src, "TODO MCValues with multiple registers", .{}); + return self.fail("TODO MCValues with multiple registers", .{}); } } else if (ncrn < 8 and nsaa == 0) { - return self.fail(src, "TODO MCValues split between registers and stack", .{}); + return self.fail("TODO MCValues split between registers and stack", .{}); } else { ncrn = 8; // TODO Apple allows the arguments on the stack to be non-8-byte aligned provided @@ -4579,11 +4700,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { result.stack_byte_count = nsaa; result.stack_align = 16; }, - else => return self.fail(src, "TODO implement function parameters for {} on aarch64", .{cc}), + else => return self.fail("TODO implement function parameters for {} on aarch64", .{cc}), } }, else => if (param_types.len != 0) - return self.fail(src, "TODO implement codegen parameters for {}", .{self.target.cpu.arch}), + return self.fail("TODO implement codegen parameters for {}", .{self.target.cpu.arch}), } if (ret_ty.zigTypeTag() == .NoReturn) { @@ -4598,7 +4719,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const aliased_reg = registerAlias(c_abi_int_return_regs[0], ret_ty_size); result.return_value = .{ .register = aliased_reg }; }, - else => return self.fail(src, "TODO implement function return values for {}", .{cc}), + else => return self.fail("TODO implement function return values for {}", .{cc}), }, .arm, .armeb => switch (cc) { .Naked => unreachable, @@ -4607,10 +4728,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (ret_ty_size <= 4) { result.return_value = .{ .register = c_abi_int_return_regs[0] }; } else { - return self.fail(src, "TODO support more return types for ARM backend", .{}); + return self.fail("TODO support more return types for ARM backend", .{}); } }, - else => return self.fail(src, "TODO implement function return values for {}", .{cc}), + else => return self.fail("TODO implement function return values for {}", .{cc}), }, .aarch64 => switch (cc) { .Naked => unreachable, @@ -4619,12 +4740,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (ret_ty_size <= 8) { result.return_value = .{ .register = c_abi_int_return_regs[0] }; } else { - return self.fail(src, "TODO support more return types for ARM backend", .{}); + return self.fail("TODO support more return types for ARM backend", .{}); } }, - else => return self.fail(src, "TODO implement function return values for {}", .{cc}), + else => return self.fail("TODO implement function return values for {}", .{cc}), }, - else => return self.fail(src, "TODO implement codegen return values for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO implement codegen return values for {}", .{self.target.cpu.arch}), } return result; } diff --git a/src/register_manager.zig b/src/register_manager.zig index 9c61423706..8aca7fcc3d 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -147,14 +147,14 @@ pub fn RegisterManager( self.markRegUsed(reg); } else { const spilled_inst = self.registers[index].?; - try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst); + try self.getFunction().spillInstruction(reg, spilled_inst); } self.registers[index] = inst; } else { // Don't track the register if (!self.isRegFree(reg)) { const spilled_inst = self.registers[index].?; - try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst); + try self.getFunction().spillInstruction(reg, spilled_inst); self.freeReg(reg); } } @@ -184,7 +184,7 @@ pub fn RegisterManager( // stack allocation. const spilled_inst = self.registers[index].?; self.registers[index] = tracked_inst; - try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst); + try self.getFunction().spillInstruction(reg, spilled_inst); } else { self.getRegAssumeFree(reg, tracked_inst); } @@ -193,7 +193,7 @@ pub fn RegisterManager( // Move the instruction that was previously there to a // stack allocation. const spilled_inst = self.registers[index].?; - try self.getFunction().spillInstruction(spilled_inst.src, reg, spilled_inst); + try self.getFunction().spillInstruction(reg, spilled_inst); self.freeReg(reg); } } @@ -264,8 +264,7 @@ fn MockFunction(comptime Register: type) type { self.spilled.deinit(self.allocator); } - pub fn spillInstruction(self: *Self, src: LazySrcLoc, reg: Register, inst: *ir.Inst) !void { - _ = src; + pub fn spillInstruction(self: *Self, reg: Register, inst: *ir.Inst) !void { _ = inst; try self.spilled.append(self.allocator, reg); } From 913393fd3b986dd262a8419341dced9ad5d9620d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 12 Jul 2021 15:30:30 -0700 Subject: [PATCH 06/53] stage2: first pass over Module.zig for AIR memory layout --- BRANCH_TODO | 122 +++++++++++++ src/Air.zig | 14 +- src/AstGen.zig | 2 +- src/Module.zig | 359 ++++----------------------------------- src/Sema.zig | 114 ++++++++++++- src/codegen.zig | 196 +++++++++++---------- src/codegen/spirv.zig | 57 ++++--- src/link/SpirV.zig | 4 + src/register_manager.zig | 16 +- 9 files changed, 429 insertions(+), 455 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index be3959e035..585c8adf44 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -568,3 +568,125 @@ const DumpAir = struct { } } }; + +pub fn constInst(mod: *Module, arena: *Allocator, src: LazySrcLoc, typed_value: TypedValue) !*ir.Inst { + _ = mod; + const const_inst = try arena.create(ir.Inst.Constant); + const_inst.* = .{ + .base = .{ + .tag = ir.Inst.Constant.base_tag, + .ty = typed_value.ty, + .src = src, + }, + .val = typed_value.val, + }; + return &const_inst.base; +} + +pub fn constType(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = Type.initTag(.type), + .val = try ty.toValue(arena), + }); +} + +pub fn constVoid(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = Type.initTag(.void), + .val = Value.initTag(.void_value), + }); +} + +pub fn constNoReturn(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = Type.initTag(.noreturn), + .val = Value.initTag(.unreachable_value), + }); +} + +pub fn constUndef(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = ty, + .val = Value.initTag(.undef), + }); +} + +pub fn constBool(mod: *Module, arena: *Allocator, src: LazySrcLoc, v: bool) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = Type.initTag(.bool), + .val = ([2]Value{ Value.initTag(.bool_false), Value.initTag(.bool_true) })[@boolToInt(v)], + }); +} + +pub fn constIntUnsigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: u64) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = ty, + .val = try Value.Tag.int_u64.create(arena, int), + }); +} + +pub fn constIntSigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: i64) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = ty, + .val = try Value.Tag.int_i64.create(arena, int), + }); +} + +pub fn constIntBig(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, big_int: BigIntConst) !*ir.Inst { + if (big_int.positive) { + if (big_int.to(u64)) |x| { + return mod.constIntUnsigned(arena, src, ty, x); + } else |err| switch (err) { + error.NegativeIntoUnsigned => unreachable, + error.TargetTooSmall => {}, // handled below + } + return mod.constInst(arena, src, .{ + .ty = ty, + .val = try Value.Tag.int_big_positive.create(arena, big_int.limbs), + }); + } else { + if (big_int.to(i64)) |x| { + return mod.constIntSigned(arena, src, ty, x); + } else |err| switch (err) { + error.NegativeIntoUnsigned => unreachable, + error.TargetTooSmall => {}, // handled below + } + return mod.constInst(arena, src, .{ + .ty = ty, + .val = try Value.Tag.int_big_negative.create(arena, big_int.limbs), + }); + } +} + +pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { + const zir_module = scope.namespace(); + const source = zir_module.getSource(mod) catch @panic("dumpInst failed to get source"); + const loc = std.zig.findLineColumn(source, inst.src); + if (inst.tag == .constant) { + std.debug.print("constant ty={} val={} src={s}:{d}:{d}\n", .{ + inst.ty, + inst.castTag(.constant).?.val, + zir_module.subFilePath(), + loc.line + 1, + loc.column + 1, + }); + } else if (inst.deaths == 0) { + std.debug.print("{s} ty={} src={s}:{d}:{d}\n", .{ + @tagName(inst.tag), + inst.ty, + zir_module.subFilePath(), + loc.line + 1, + loc.column + 1, + }); + } else { + std.debug.print("{s} ty={} deaths={b} src={s}:{d}:{d}\n", .{ + @tagName(inst.tag), + inst.ty, + inst.deaths, + zir_module.subFilePath(), + loc.line + 1, + loc.column + 1, + }); + } +} + diff --git a/src/Air.zig b/src/Air.zig index 112845559d..e85f2e5c43 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -29,8 +29,11 @@ pub const Inst = struct { data: Data, pub const Tag = enum(u8) { - /// The first N instructions in Air must be one arg instruction per function parameter. - /// Uses the `ty` field. + /// The first N instructions in the main block must be one arg instruction per + /// function parameter. This makes function parameters participate in + /// liveness analysis without any special handling. + /// Uses the `ty_str` field. + /// The string is the parameter name. arg, /// Float or integer addition. For integers, wrapping is undefined behavior. /// Both operands are guaranteed to be the same type, and the result type @@ -131,6 +134,8 @@ pub const Inst = struct { /// A comptime-known value. Uses the `ty_pl` field, payload is index of /// `values` array. constant, + /// A comptime-known type. Uses the `ty` field. + const_ty, /// Notes the beginning of a source code statement and marks the line and column. /// Result type is always void. /// Uses the `dbg_stmt` field. @@ -289,6 +294,11 @@ pub const Inst = struct { // Index into a different array. payload: u32, }, + ty_str: struct { + ty: Ref, + // ZIR string table index. + str: u32, + }, br: struct { block_inst: Index, operand: Ref, diff --git a/src/AstGen.zig b/src/AstGen.zig index 19906c94d3..24766aaf60 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -9821,7 +9821,7 @@ fn advanceSourceCursor(astgen: *AstGen, source: []const u8, end: usize) void { astgen.source_column = column; } -const ref_start_index = Zir.Inst.Ref.typed_value_map.len; +const ref_start_index: u32 = Zir.Inst.Ref.typed_value_map.len; fn indexToRef(inst: Zir.Inst.Index) Zir.Inst.Ref { return @intToEnum(Zir.Inst.Ref, ref_start_index + inst); diff --git a/src/Module.zig b/src/Module.zig index 6273243ee2..8971a57487 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1155,7 +1155,7 @@ pub const Scope = struct { /// This can vary during inline or comptime function calls. See `Sema.owner_decl` /// for the one that will be the same for all Block instances. src_decl: *Decl, - instructions: ArrayListUnmanaged(*ir.Inst), + instructions: ArrayListUnmanaged(Air.Inst.Index), label: ?*Label = null, inlining: ?*Inlining, /// If runtime_index is not 0 then one of these is guaranteed to be non null. @@ -1187,14 +1187,14 @@ pub const Scope = struct { }; pub const Merges = struct { - block_inst: *ir.Inst.Block, + block_inst: Air.Inst.Index, /// Separate array list from break_inst_list so that it can be passed directly /// to resolvePeerTypes. - results: ArrayListUnmanaged(*ir.Inst), + results: ArrayListUnmanaged(Air.Inst.Index), /// Keeps track of the break instructions so that the operand can be replaced /// if we need to add type coercion at the end of block analysis. /// Same indexes, capacity, length as `results`. - br_list: ArrayListUnmanaged(*ir.Inst.Br), + br_list: ArrayListUnmanaged(Air.Inst.Index), }; /// For debugging purposes. @@ -1230,187 +1230,6 @@ pub const Scope = struct { pub fn getFileScope(block: *Block) *Scope.File { return block.src_decl.namespace.file_scope; } - - pub fn addNoOp( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - comptime tag: ir.Inst.Tag, - ) !*ir.Inst { - const inst = try block.sema.arena.create(tag.Type()); - inst.* = .{ - .base = .{ - .tag = tag, - .ty = ty, - .src = src, - }, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addUnOp( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - tag: ir.Inst.Tag, - operand: *ir.Inst, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.UnOp); - inst.* = .{ - .base = .{ - .tag = tag, - .ty = ty, - .src = src, - }, - .operand = operand, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addBinOp( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - tag: ir.Inst.Tag, - lhs: *ir.Inst, - rhs: *ir.Inst, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.BinOp); - inst.* = .{ - .base = .{ - .tag = tag, - .ty = ty, - .src = src, - }, - .lhs = lhs, - .rhs = rhs, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addBr( - scope_block: *Scope.Block, - src: LazySrcLoc, - target_block: *ir.Inst.Block, - operand: *ir.Inst, - ) !*ir.Inst.Br { - const inst = try scope_block.sema.arena.create(ir.Inst.Br); - inst.* = .{ - .base = .{ - .tag = .br, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .operand = operand, - .block = target_block, - }; - try scope_block.instructions.append(scope_block.sema.gpa, &inst.base); - return inst; - } - - pub fn addCondBr( - block: *Scope.Block, - src: LazySrcLoc, - condition: *ir.Inst, - then_body: ir.Body, - else_body: ir.Body, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.CondBr); - inst.* = .{ - .base = .{ - .tag = .condbr, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .condition = condition, - .then_body = then_body, - .else_body = else_body, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addCall( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - func: *ir.Inst, - args: []const *ir.Inst, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.Call); - inst.* = .{ - .base = .{ - .tag = .call, - .ty = ty, - .src = src, - }, - .func = func, - .args = args, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addSwitchBr( - block: *Scope.Block, - src: LazySrcLoc, - operand: *ir.Inst, - cases: []ir.Inst.SwitchBr.Case, - else_body: ir.Body, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.SwitchBr); - inst.* = .{ - .base = .{ - .tag = .switchbr, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .target = operand, - .cases = cases, - .else_body = else_body, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addDbgStmt(block: *Scope.Block, src: LazySrcLoc, line: u32, column: u32) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.DbgStmt); - inst.* = .{ - .base = .{ - .tag = .dbg_stmt, - .ty = Type.initTag(.void), - .src = src, - }, - .line = line, - .column = column, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addStructFieldPtr( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - struct_ptr: *ir.Inst, - field_index: u32, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.StructFieldPtr); - inst.* = .{ - .base = .{ - .tag = .struct_field_ptr, - .ty = ty, - .src = src, - }, - .struct_ptr = struct_ptr, - .field_index = field_index, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } }; }; @@ -3594,30 +3413,14 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { defer decl.value_arena.?.* = arena.state; const fn_ty = decl.ty; - const param_inst_list = try gpa.alloc(*ir.Inst, fn_ty.fnParamLen()); + const param_inst_list = try gpa.alloc(Air.Inst.Index, fn_ty.fnParamLen()); defer gpa.free(param_inst_list); - for (param_inst_list) |*param_inst, param_index| { - const param_type = fn_ty.fnParamType(param_index); - const arg_inst = try arena.allocator.create(ir.Inst.Arg); - arg_inst.* = .{ - .base = .{ - .tag = .arg, - .ty = param_type, - .src = .unneeded, - }, - .name = undefined, // Set in the semantic analysis of the arg instruction. - }; - param_inst.* = &arg_inst.base; - } - - const zir = decl.namespace.file_scope.zir; - var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = &arena.allocator, - .code = zir, + .code = decl.namespace.file_scope.zir, .owner_decl = decl, .namespace = decl.namespace, .func = func, @@ -3641,7 +3444,21 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { }; defer inner_block.instructions.deinit(gpa); - // AIR currently requires the arg parameters to be the first N instructions + // AIR requires the arg parameters to be the first N instructions. + for (param_inst_list) |*param_inst, param_index| { + const param_type = fn_ty.fnParamType(param_index); + const ty_ref = try sema.addType(param_type); + param_inst.* = @intCast(u32, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .arg, + .data = .{ + .ty_str = .{ + .ty = ty_ref, + .str = undefined, // Set in the semantic analysis of the arg instruction. + }, + }, + }); + } try inner_block.instructions.appendSlice(gpa, param_inst_list); func.state = .in_progress; @@ -3650,17 +3467,21 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { try sema.analyzeFnBody(&inner_block, func.zir_body_inst); // Copy the block into place and mark that as the main block. - sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = sema.air_extra.items.len; - try sema.air_extra.appendSlice(inner_block.instructions.items); + try sema.air_extra.ensureUnusedCapacity(gpa, inner_block.instructions.items.len + 1); + const main_block_index = sema.addExtraAssumeCapacity(Air.Block{ + .body_len = @intCast(u32, inner_block.instructions.items.len), + }); + sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items); + sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = main_block_index; func.state = .success; log.debug("set {s} to success", .{decl.name}); return Air{ .instructions = sema.air_instructions.toOwnedSlice(), - .extra = sema.air_extra.toOwnedSlice(), - .values = sema.air_values.toOwnedSlice(), - .variables = sema.air_variables.toOwnedSlice(), + .extra = sema.air_extra.toOwnedSlice(gpa), + .values = sema.air_values.toOwnedSlice(gpa), + .variables = sema.air_variables.toOwnedSlice(gpa), }; } @@ -3815,94 +3636,6 @@ pub fn analyzeExport( de_gop.value_ptr.*[de_gop.value_ptr.len - 1] = new_export; errdefer de_gop.value_ptr.* = mod.gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1); } -pub fn constInst(mod: *Module, arena: *Allocator, src: LazySrcLoc, typed_value: TypedValue) !*ir.Inst { - _ = mod; - const const_inst = try arena.create(ir.Inst.Constant); - const_inst.* = .{ - .base = .{ - .tag = ir.Inst.Constant.base_tag, - .ty = typed_value.ty, - .src = src, - }, - .val = typed_value.val, - }; - return &const_inst.base; -} - -pub fn constType(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.type), - .val = try ty.toValue(arena), - }); -} - -pub fn constVoid(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.void), - .val = Value.initTag(.void_value), - }); -} - -pub fn constNoReturn(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.noreturn), - .val = Value.initTag(.unreachable_value), - }); -} - -pub fn constUndef(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = Value.initTag(.undef), - }); -} - -pub fn constBool(mod: *Module, arena: *Allocator, src: LazySrcLoc, v: bool) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.bool), - .val = ([2]Value{ Value.initTag(.bool_false), Value.initTag(.bool_true) })[@boolToInt(v)], - }); -} - -pub fn constIntUnsigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: u64) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_u64.create(arena, int), - }); -} - -pub fn constIntSigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: i64) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_i64.create(arena, int), - }); -} - -pub fn constIntBig(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, big_int: BigIntConst) !*ir.Inst { - if (big_int.positive) { - if (big_int.to(u64)) |x| { - return mod.constIntUnsigned(arena, src, ty, x); - } else |err| switch (err) { - error.NegativeIntoUnsigned => unreachable, - error.TargetTooSmall => {}, // handled below - } - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_big_positive.create(arena, big_int.limbs), - }); - } else { - if (big_int.to(i64)) |x| { - return mod.constIntSigned(arena, src, ty, x); - } else |err| switch (err) { - error.NegativeIntoUnsigned => unreachable, - error.TargetTooSmall => {}, // handled below - } - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_big_negative.create(arena, big_int.limbs), - }); - } -} pub fn deleteAnonDecl(mod: *Module, scope: *Scope, decl: *Decl) void { const scope_decl = scope.ownerDecl().?; @@ -4438,38 +4171,6 @@ pub fn errorUnionType( }); } -pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { - const zir_module = scope.namespace(); - const source = zir_module.getSource(mod) catch @panic("dumpInst failed to get source"); - const loc = std.zig.findLineColumn(source, inst.src); - if (inst.tag == .constant) { - std.debug.print("constant ty={} val={} src={s}:{d}:{d}\n", .{ - inst.ty, - inst.castTag(.constant).?.val, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } else if (inst.deaths == 0) { - std.debug.print("{s} ty={} src={s}:{d}:{d}\n", .{ - @tagName(inst.tag), - inst.ty, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } else { - std.debug.print("{s} ty={} deaths={b} src={s}:{d}:{d}\n", .{ - @tagName(inst.tag), - inst.ty, - inst.deaths, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } -} - pub fn getTarget(mod: Module) Target { return mod.comp.bin_file.options.target; } diff --git a/src/Sema.zig b/src/Sema.zig index b4e10837af..d7ec01696f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -12,9 +12,9 @@ gpa: *Allocator, arena: *Allocator, code: Zir, air_instructions: std.MultiArrayList(Air.Inst) = .{}, -air_extra: ArrayListUnmanaged(u32) = .{}, -air_values: ArrayListUnmanaged(Value) = .{}, -air_variables: ArrayListUnmanaged(Module.Var) = .{}, +air_extra: std.ArrayListUnmanaged(u32) = .{}, +air_values: std.ArrayListUnmanaged(Value) = .{}, +air_variables: std.ArrayListUnmanaged(*Module.Var) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// When analyzing an inline function call, owner_decl is the Decl of the caller @@ -1263,15 +1263,16 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air sema.next_arg_index += 1; // TODO check if arg_name shadows a Decl + _ = arg_name; if (block.inlining) |_| { return sema.param_inst_list[arg_index]; } - // Need to set the name of the Air.Arg instruction. - const air_arg = sema.param_inst_list[arg_index].castTag(.arg).?; - air_arg.name = arg_name; - return &air_arg.base; + // Set the name of the Air.Arg instruction for use by codegen debug info. + const air_arg = sema.param_inst_list[arg_index]; + sema.air.instructions.items(.data)[air_arg].ty_str.str = inst_data.start; + return air_arg; } fn zirAllocExtended( @@ -7940,3 +7941,102 @@ fn enumFieldSrcLoc( } } else unreachable; } + +pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { + switch (ty.tag()) { + .u8 => return .u8_type, + .i8 => return .i8_type, + .u16 => return .u16_type, + .i16 => return .i16_type, + .u32 => return .u32_type, + .i32 => return .i32_type, + .u64 => return .u64_type, + .i64 => return .i64_type, + .u128 => return .u128_type, + .i128 => return .i128_type, + .usize => return .usize_type, + .isize => return .isize_type, + .c_short => return .c_short_type, + .c_ushort => return .c_ushort_type, + .c_int => return .c_int_type, + .c_uint => return .c_uint_type, + .c_long => return .c_long_type, + .c_ulong => return .c_ulong_type, + .c_longlong => return .c_longlong_type, + .c_ulonglong => return .c_ulonglong_type, + .c_longdouble => return .c_longdouble_type, + .f16 => return .f16_type, + .f32 => return .f32_type, + .f64 => return .f64_type, + .f128 => return .f128_type, + .c_void => return .c_void_type, + .bool => return .bool_type, + .void => return .void_type, + .type => return .type_type, + .anyerror => return .anyerror_type, + .comptime_int => return .comptime_int_type, + .comptime_float => return .comptime_float_type, + .noreturn => return .noreturn_type, + .@"anyframe" => return .anyframe_type, + .@"null" => return .null_type, + .@"undefined" => return .undefined_type, + .enum_literal => return .enum_literal_type, + .atomic_ordering => return .atomic_ordering_type, + .atomic_rmw_op => return .atomic_rmw_op_type, + .calling_convention => return .calling_convention_type, + .float_mode => return .float_mode_type, + .reduce_op => return .reduce_op_type, + .call_options => return .call_options_type, + .export_options => return .export_options_type, + .extern_options => return .extern_options_type, + .manyptr_u8 => return .manyptr_u8_type, + .manyptr_const_u8 => return .manyptr_const_u8_type, + .fn_noreturn_no_args => return .fn_noreturn_no_args_type, + .fn_void_no_args => return .fn_void_no_args_type, + .fn_naked_noreturn_no_args => return .fn_naked_noreturn_no_args_type, + .fn_ccc_void_no_args => return .fn_ccc_void_no_args_type, + .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, + .const_slice_u8 => return .const_slice_u8_type, + else => {}, + } + try sema.air_instructions.append(sema.gpa, .{ + .tag = .const_ty, + .data = .{ .ty = ty }, + }); + return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); +} + +const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; + +fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { + return @intToEnum(Air.Inst.Ref, ref_start_index + inst); +} + +fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { + const ref_int = @enumToInt(inst); + if (ref_int >= ref_start_index) { + return ref_int - ref_start_index; + } else { + return null; + } +} + +pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 { + const fields = std.meta.fields(@TypeOf(extra)); + try sema.air_extra.ensureUnusedCapacity(sema.gpa, fields.len); + return addExtraAssumeCapacity(sema, extra); +} + +pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { + const fields = std.meta.fields(@TypeOf(extra)); + const result = @intCast(u32, sema.air_extra.items.len); + inline for (fields) |field| { + sema.air_extra.appendAssumeCapacity(switch (field.field_type) { + u32 => @field(extra, field.name), + Air.Inst.Ref => @enumToInt(@field(extra, field.name)), + i32 => @bitCast(u32, @field(extra, field.name)), + else => @compileError("bad field type"), + }); + } + return result; +} diff --git a/src/codegen.zig b/src/codegen.zig index 65e85702e5..eaf910977e 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -3,6 +3,7 @@ const mem = std.mem; const math = std.math; const assert = std.debug.assert; const Air = @import("Air.zig"); +const Liveness = @import("Liveness.zig"); const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; const TypedValue = @import("TypedValue.zig"); @@ -45,6 +46,71 @@ pub const DebugInfoOutput = union(enum) { none, }; +pub fn generateFunction( + bin_file: *link.File, + src_loc: Module.SrcLoc, + func: *Module.Fn, + air: Air, + liveness: Liveness, + code: *std.ArrayList(u8), + debug_output: DebugInfoOutput, +) GenerateSymbolError!Result { + switch (bin_file.options.target.cpu.arch) { + .wasm32 => unreachable, // has its own code path + .wasm64 => unreachable, // has its own code path + .arm => return Function(.arm).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .armeb => return Function(.armeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .aarch64 => return Function(.aarch64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .aarch64_be => return Function(.aarch64_be).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .aarch64_32 => return Function(.aarch64_32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.arc => return Function(.arc).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.avr => return Function(.avr).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.bpfel => return Function(.bpfel).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.bpfeb => return Function(.bpfeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.hexagon => return Function(.hexagon).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.mips => return Function(.mips).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.mipsel => return Function(.mipsel).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.mips64 => return Function(.mips64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.mips64el => return Function(.mips64el).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.msp430 => return Function(.msp430).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.powerpc => return Function(.powerpc).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.powerpc64 => return Function(.powerpc64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.powerpc64le => return Function(.powerpc64le).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.r600 => return Function(.r600).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.amdgcn => return Function(.amdgcn).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.riscv32 => return Function(.riscv32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .riscv64 => return Function(.riscv64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.sparc => return Function(.sparc).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.sparcv9 => return Function(.sparcv9).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.sparcel => return Function(.sparcel).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.s390x => return Function(.s390x).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.tce => return Function(.tce).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.tcele => return Function(.tcele).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.thumb => return Function(.thumb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.thumbeb => return Function(.thumbeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.i386 => return Function(.i386).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .x86_64 => return Function(.x86_64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.xcore => return Function(.xcore).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.nvptx => return Function(.nvptx).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.nvptx64 => return Function(.nvptx64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.le32 => return Function(.le32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.le64 => return Function(.le64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.amdil => return Function(.amdil).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.amdil64 => return Function(.amdil64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.hsail => return Function(.hsail).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.hsail64 => return Function(.hsail64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.spir => return Function(.spir).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.spir64 => return Function(.spir64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.kalimba => return Function(.kalimba).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.shave => return Function(.shave).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.lanai => return Function(.lanai).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.renderscript32 => return Function(.renderscript32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.renderscript64 => return Function(.renderscript64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.ve => return Function(.ve).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."), + } +} + pub fn generateSymbol( bin_file: *link.File, src_loc: Module.SrcLoc, @@ -57,60 +123,14 @@ pub fn generateSymbol( switch (typed_value.ty.zigTypeTag()) { .Fn => { - switch (bin_file.options.target.cpu.arch) { - .wasm32 => unreachable, // has its own code path - .wasm64 => unreachable, // has its own code path - .arm => return Function(.arm).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .armeb => return Function(.armeb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .aarch64 => return Function(.aarch64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.arc => return Function(.arc).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.avr => return Function(.avr).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.bpfel => return Function(.bpfel).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.hexagon => return Function(.hexagon).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.mips => return Function(.mips).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.mipsel => return Function(.mipsel).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.mips64 => return Function(.mips64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.mips64el => return Function(.mips64el).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.msp430 => return Function(.msp430).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.powerpc => return Function(.powerpc).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.r600 => return Function(.r600).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .riscv64 => return Function(.riscv64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.sparc => return Function(.sparc).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.sparcel => return Function(.sparcel).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.s390x => return Function(.s390x).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.tce => return Function(.tce).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.tcele => return Function(.tcele).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.thumb => return Function(.thumb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.i386 => return Function(.i386).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .x86_64 => return Function(.x86_64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.xcore => return Function(.xcore).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.nvptx => return Function(.nvptx).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.le32 => return Function(.le32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.le64 => return Function(.le64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.amdil => return Function(.amdil).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.hsail => return Function(.hsail).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.spir => return Function(.spir).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.spir64 => return Function(.spir64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.kalimba => return Function(.kalimba).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.shave => return Function(.shave).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.lanai => return Function(.lanai).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.ve => return Function(.ve).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."), - } + return Result{ + .fail = try ErrorMsg.create( + bin_file.allocator, + src_loc, + "TODO implement generateSymbol function pointers", + .{}, + ), + }; }, .Array => { // TODO populate .debug_info for the array @@ -262,6 +282,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return struct { gpa: *Allocator, + air: *const Air, bin_file: *link.File, target: *const std.Target, mod_fn: *const Module.Fn, @@ -421,10 +442,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const Self = @This(); - fn generateSymbol( + fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - typed_value: TypedValue, + module_fn: *Module.Fn, + air: Air, + liveness: Liveness, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) GenerateSymbolError!Result { @@ -432,8 +455,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { @panic("Attempted to compile for architecture that was disabled by build configuration"); } - const module_fn = typed_value.val.castTag(.function).?.data; - assert(module_fn.owner_decl.has_tv); const fn_type = module_fn.owner_decl.ty; @@ -447,6 +468,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { var function = Self{ .gpa = bin_file.allocator, + .air = &air, + .liveness = &liveness, .target = &bin_file.options.target, .bin_file = bin_file, .mod_fn = module_fn, @@ -2131,8 +2154,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, mcv: MCValue) !void { - const name_with_null = inst.name[0 .. mem.lenZ(inst.name) + 1]; - const ty = self.air.getType(inst); + const ty_str = self.air.instruction.items(.data)[inst].ty_str; + const zir = &self.mod_fn.owner_decl.namespace.file_scope.zir; + const name = zir.nullTerminatedString(ty_str.str); + const name_with_null = name.ptr[0 .. name.len + 1]; + const ty = self.air.getRefType(ty_str.ty); switch (mcv) { .register => |reg| { @@ -2249,8 +2275,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genCall(self: *Self, inst: Air.Inst.Index) !MCValue { - const inst_datas = self.air.instructions.items(.data); - const pl_op = inst_datas[inst].pl_op; + const pl_op = self.air.instruction.items(.data)[inst].pl_op; const fn_ty = self.air.getType(pl_op.operand); const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, inst_data.payload); @@ -2848,8 +2873,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genCondBr(self: *Self, inst: Air.Inst.Index) !MCValue { - const inst_datas = self.air.instructions.items(.data); - const pl_op = inst_datas[inst].pl_op; + const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const extra = self.air.extraData(Air.CondBr, inst_data.payload); const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; @@ -3101,16 +3125,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genIsNull(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); return self.isNull(operand); } fn genIsNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3126,16 +3150,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genIsNonNull(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); return self.isNonNull(operand); } fn genIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3151,16 +3175,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genIsErr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); return self.isErr(operand); } fn genIsErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3176,16 +3200,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genIsNonErr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); return self.isNonErr(operand); } fn genIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3200,8 +3224,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genLoop(self: *Self, inst: Air.Inst.Index) !MCValue { // A loop is a setup to be able to jump back to the beginning. - const inst_datas = self.air.instructions.items(.data); - const loop = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; const start_index = self.code.items.len; try self.genBody(body); @@ -4377,13 +4401,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genPtrToInt(self: *Self, inst: Air.Inst.Index) !MCValue { - const inst_datas = self.air.instructions.items(.data); - return self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + return self.resolveInst(un_op); } fn genBitCast(self: *Self, inst: Air.Inst.Index) !MCValue { - const inst_datas = self.air.instructions.items(.data); - return self.resolveInst(inst_datas[inst].ty_op.operand); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + return self.resolveInst(ty_op.operand); } fn resolveInst(self: *Self, inst: Air.Inst.Index) !MCValue { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 4a9087d7f5..3d704a8dc5 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -159,7 +159,10 @@ pub const DeclGen = struct { /// The SPIR-V module code should be put in. spv: *SPIRVModule, - /// An array of function argument result-ids. Each index corresponds with the function argument of the same index. + air: *const Air, + + /// An array of function argument result-ids. Each index corresponds with the + /// function argument of the same index. args: std.ArrayList(ResultId), /// A counter to keep track of how many `arg` instructions we've seen yet. @@ -168,33 +171,35 @@ pub const DeclGen = struct { /// A map keeping track of which instruction generated which result-id. inst_results: InstMap, - /// We need to keep track of result ids for block labels, as well as the 'incoming' blocks for a block. + /// We need to keep track of result ids for block labels, as well as the 'incoming' + /// blocks for a block. blocks: BlockMap, /// The label of the SPIR-V block we are currently generating. current_block_label_id: ResultId, - /// The actual instructions for this function. We need to declare all locals in the first block, and because we don't - /// know which locals there are going to be, we're just going to generate everything after the locals-section in this array. - /// Note: It will not contain OpFunction, OpFunctionParameter, OpVariable and the initial OpLabel. These will be generated - /// into spv.binary.fn_decls directly. + /// The actual instructions for this function. We need to declare all locals in + /// the first block, and because we don't know which locals there are going to be, + /// we're just going to generate everything after the locals-section in this array. + /// Note: It will not contain OpFunction, OpFunctionParameter, OpVariable and the + /// initial OpLabel. These will be generated into spv.binary.fn_decls directly. code: std.ArrayList(Word), /// The decl we are currently generating code for. decl: *Decl, - /// If `gen` returned `Error.AnalysisFail`, this contains an explanatory message. Memory is owned by - /// `module.gpa`. + /// If `gen` returned `Error.AnalysisFail`, this contains an explanatory message. + /// Memory is owned by `module.gpa`. error_msg: ?*Module.ErrorMsg, /// Possible errors the `gen` function may return. const Error = error{ AnalysisFail, OutOfMemory }; - /// This structure is used to return information about a type typically used for arithmetic operations. - /// These types may either be integers, floats, or a vector of these. Most scalar operations also work on vectors, - /// so we can easily represent those as arithmetic types. - /// If the type is a scalar, 'inner type' refers to the scalar type. Otherwise, if its a vector, it refers - /// to the vector's element type. + /// This structure is used to return information about a type typically used for + /// arithmetic operations. These types may either be integers, floats, or a vector + /// of these. Most scalar operations also work on vectors, so we can easily represent + /// those as arithmetic types. If the type is a scalar, 'inner type' refers to the + /// scalar type. Otherwise, if its a vector, it refers to the vector's element type. const ArithmeticTypeInfo = struct { /// A classification of the inner type. const Class = enum { @@ -206,13 +211,14 @@ pub const DeclGen = struct { /// the relevant capability is enabled). integer, - /// A regular float. These are all required to be natively supported. Floating points for - /// which the relevant capability is not enabled are not emulated. + /// A regular float. These are all required to be natively supported. Floating points + /// for which the relevant capability is not enabled are not emulated. float, - /// An integer of a 'strange' size (which' bit size is not the same as its backing type. **Note**: this - /// may **also** include power-of-2 integers for which the relevant capability is not enabled), but still - /// within the limits of the largest natively supported integer type. + /// An integer of a 'strange' size (which' bit size is not the same as its backing + /// type. **Note**: this may **also** include power-of-2 integers for which the + /// relevant capability is not enabled), but still within the limits of the largest + /// natively supported integer type. strange_integer, /// An integer with more bits than the largest natively supported integer type. @@ -220,7 +226,7 @@ pub const DeclGen = struct { }; /// The number of bits in the inner type. - /// Note: this is the actual number of bits of the type, not the size of the backing integer. + /// This is the actual number of bits of the type, not the size of the backing integer. bits: u16, /// Whether the type is a vector. @@ -234,10 +240,12 @@ pub const DeclGen = struct { class: Class, }; - /// Initialize the common resources of a DeclGen. Some fields are left uninitialized, only set when `gen` is called. + /// Initialize the common resources of a DeclGen. Some fields are left uninitialized, + /// only set when `gen` is called. pub fn init(spv: *SPIRVModule) DeclGen { return .{ .spv = spv, + .air = undefined, .args = std.ArrayList(ResultId).init(spv.gpa), .next_arg_index = undefined, .inst_results = InstMap.init(spv.gpa), @@ -252,8 +260,9 @@ pub const DeclGen = struct { /// Generate the code for `decl`. If a reportable error occured during code generation, /// a message is returned by this function. Callee owns the memory. If this function returns such /// a reportable error, it is valid to be called again for a different decl. - pub fn gen(self: *DeclGen, decl: *Decl) !?*Module.ErrorMsg { + pub fn gen(self: *DeclGen, decl: *Decl, air: Air) !?*Module.ErrorMsg { // Reset internal resources, we don't want to re-allocate these. + self.air = &air; self.args.items.len = 0; self.next_arg_index = 0; self.inst_results.clearRetainingCapacity(); @@ -680,7 +689,7 @@ pub const DeclGen = struct { .br => return self.genBr(inst), .breakpoint => return, - .condbr => return self.genCondBr(inst), + .cond_br => return self.genCondBr(inst), .constant => unreachable, .dbg_stmt => return self.genDbgStmt(inst), .loop => return self.genLoop(inst), @@ -688,6 +697,10 @@ pub const DeclGen = struct { .store => return self.genStore(inst), .unreach => return self.genUnreach(), // zig fmt: on + + else => |tag| return self.fail("TODO: SPIR-V backend: implement AIR tag {s}", .{ + @tagName(tag), + }), }; try self.inst_results.putNoClobber(inst, result_id); diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index bfae799462..8a2e877d42 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -135,6 +135,10 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void { const tracy = trace(@src()); defer tracy.end(); + if (build_options.skip_non_native) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } + const module = self.base.options.module.?; const target = comp.getTarget(); diff --git a/src/register_manager.zig b/src/register_manager.zig index 8aca7fcc3d..f0d128e7f9 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -20,7 +20,7 @@ pub fn RegisterManager( ) type { return struct { /// The key must be canonical register. - registers: [callee_preserved_regs.len]?*ir.Inst = [_]?*ir.Inst{null} ** callee_preserved_regs.len, + registers: [callee_preserved_regs.len]?Air.Inst.Index = [_]?Air.Inst.Index{null} ** callee_preserved_regs.len, free_registers: FreeRegInt = math.maxInt(FreeRegInt), /// Tracks all registers allocated in the course of this function allocated_registers: FreeRegInt = 0, @@ -75,7 +75,7 @@ pub fn RegisterManager( pub fn tryAllocRegs( self: *Self, comptime count: comptime_int, - insts: [count]?*ir.Inst, + insts: [count]?Air.Inst.Index, exceptions: []const Register, ) ?[count]Register { comptime if (callee_preserved_regs.len == 0) return null; @@ -113,7 +113,7 @@ pub fn RegisterManager( /// Allocates a register and optionally tracks it with a /// corresponding instruction. Returns `null` if all registers /// are allocated. - pub fn tryAllocReg(self: *Self, inst: ?*ir.Inst, exceptions: []const Register) ?Register { + pub fn tryAllocReg(self: *Self, inst: ?Air.Inst.Index, exceptions: []const Register) ?Register { return if (tryAllocRegs(self, 1, .{inst}, exceptions)) |regs| regs[0] else null; } @@ -123,7 +123,7 @@ pub fn RegisterManager( pub fn allocRegs( self: *Self, comptime count: comptime_int, - insts: [count]?*ir.Inst, + insts: [count]?Air.Inst.Index, exceptions: []const Register, ) ![count]Register { comptime assert(count > 0 and count <= callee_preserved_regs.len); @@ -168,14 +168,14 @@ pub fn RegisterManager( /// Allocates a register and optionally tracks it with a /// corresponding instruction. - pub fn allocReg(self: *Self, inst: ?*ir.Inst, exceptions: []const Register) !Register { + pub fn allocReg(self: *Self, inst: ?Air.Inst.Index, exceptions: []const Register) !Register { return (try self.allocRegs(1, .{inst}, exceptions))[0]; } /// Spills the register if it is currently allocated. If a /// corresponding instruction is passed, will also track this /// register. - pub fn getReg(self: *Self, reg: Register, inst: ?*ir.Inst) !void { + pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) !void { const index = reg.allocIndex() orelse return; if (inst) |tracked_inst| @@ -202,7 +202,7 @@ pub fn RegisterManager( /// Allocates the specified register with the specified /// instruction. Asserts that the register is free and no /// spilling is necessary. - pub fn getRegAssumeFree(self: *Self, reg: Register, inst: *ir.Inst) void { + pub fn getRegAssumeFree(self: *Self, reg: Register, inst: Air.Inst.Index) void { const index = reg.allocIndex() orelse return; assert(self.registers[index] == null); @@ -264,7 +264,7 @@ fn MockFunction(comptime Register: type) type { self.spilled.deinit(self.allocator); } - pub fn spillInstruction(self: *Self, reg: Register, inst: *ir.Inst) !void { + pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { _ = inst; try self.spilled.append(self.allocator, reg); } From 0ffc6b5cc300e750029c9ff22f6a1ed0596496d6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 12 Jul 2021 15:42:22 -0700 Subject: [PATCH 07/53] cmake: fix Liveness.zig file path --- CMakeLists.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 39db11773c..0a8da2dd49 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -597,7 +597,7 @@ set(ZIG_STAGE2_SOURCES "${CMAKE_SOURCE_DIR}/src/link/tapi/yaml.zig" "${CMAKE_SOURCE_DIR}/src/link/C/zig.h" "${CMAKE_SOURCE_DIR}/src/link/msdos-stub.bin" - "${CMAKE_SOURCE_DIR}/src/liveness.zig" + "${CMAKE_SOURCE_DIR}/src/Liveness.zig" "${CMAKE_SOURCE_DIR}/src/main.zig" "${CMAKE_SOURCE_DIR}/src/mingw.zig" "${CMAKE_SOURCE_DIR}/src/musl.zig" From 0f38f686964664f68e013ec3c63cfe655001f165 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 12 Jul 2021 19:51:31 -0700 Subject: [PATCH 08/53] stage2: Air and Liveness are passed ephemerally to the link infrastructure, instead of being stored with Module.Fn. This moves towards a strategy to make more efficient use of memory by not storing Air or Liveness data in the Fn struct, but computing it on demand, immediately sending it to the backend, and then immediately freeing it. Backends which want to defer codegen until flush() such as SPIR-V must move the Air/Liveness data upon `updateFunc` being called and keep track of that data in the backend implementation itself. --- BRANCH_TODO | 5 + src/Compilation.zig | 2 +- src/Liveness.zig | 9 +- src/Module.zig | 5 - src/Sema.zig | 752 +++++++++++++++++++++--------------------- src/codegen.zig | 7 +- src/codegen/c.zig | 9 +- src/codegen/llvm.zig | 3 + src/codegen/spirv.zig | 3 +- src/codegen/wasm.zig | 88 ++--- src/link.zig | 34 +- src/link/C.zig | 28 +- src/link/Coff.zig | 56 +++- src/link/Elf.zig | 558 +++++++++++++++++-------------- src/link/MachO.zig | 55 +++ src/link/Plan9.zig | 29 +- src/link/SpirV.zig | 24 +- src/link/Wasm.zig | 59 +++- 18 files changed, 1018 insertions(+), 708 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index 585c8adf44..c7f3923559 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -690,3 +690,8 @@ pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { } } + /// For debugging purposes. + pub fn dump(func: *Fn, mod: Module) void { + ir.dumpFn(mod, func); + } + diff --git a/src/Compilation.zig b/src/Compilation.zig index 74ad7b2aae..90224a77d1 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2027,7 +2027,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor defer liveness.deinit(gpa); if (std.builtin.mode == .Debug and self.verbose_air) { - func.dump(module.*); + @panic("TODO implement dumping AIR and liveness"); } assert(decl.ty.hasCodeGenBits()); diff --git a/src/Liveness.zig b/src/Liveness.zig index 0cbac61118..1402a5997b 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -50,7 +50,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { var a: Analysis = .{ .gpa = gpa, - .air = &air, + .air = air, .table = .{}, .tomb_bits = try gpa.alloc( usize, @@ -65,7 +65,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { defer a.table.deinit(gpa); const main_body = air.getMainBody(); - try a.table.ensureTotalCapacity(main_body.len); + try a.table.ensureTotalCapacity(gpa, @intCast(u32, main_body.len)); try analyzeWithContext(&a, null, main_body); return Liveness{ .tomb_bits = a.tomb_bits, @@ -108,9 +108,10 @@ const OperandInt = std.math.Log2Int(Bpi); /// In-progress data; on successful analysis converted into `Liveness`. const Analysis = struct { gpa: *Allocator, - air: *const Air, + air: Air, table: std.AutoHashMapUnmanaged(Air.Inst.Index, void), tomb_bits: []usize, + special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { @@ -165,7 +166,7 @@ fn analyzeWithContext( fn analyzeInst( a: *Analysis, - new_set: ?*std.AutoHashMap(Air.Inst.Index, void), + new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), inst: Air.Inst.Index, ) Allocator.Error!void { const gpa = a.gpa; diff --git a/src/Module.zig b/src/Module.zig index 8971a57487..5972c2bdcf 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -769,11 +769,6 @@ pub const Fn = struct { success, }; - /// For debugging purposes. - pub fn dump(func: *Fn, mod: Module) void { - ir.dumpFn(mod, func); - } - pub fn deinit(func: *Fn, gpa: *Allocator) void { if (func.getInferredErrorSet()) |map| { map.deinit(gpa); diff --git a/src/Sema.zig b/src/Sema.zig index d7ec01696f..54c42a482d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -69,7 +69,7 @@ const LazySrcLoc = Module.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); const target_util = @import("target.zig"); -pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Index); +pub const InstMap = std.AutoHashMapUnmanaged(Zir.Inst.Index, Air.Inst.Ref); pub fn deinit(sema: *Sema) void { const gpa = sema.gpa; @@ -158,344 +158,344 @@ pub fn analyzeBody( var i: usize = 0; while (true) { const inst = body[i]; - const air_inst = switch (tags[inst]) { + const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off .arg => try sema.zirArg(block, inst), - .alloc => try sema.zirAlloc(block, inst), - .alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), - .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), - .alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), - .alloc_mut => try sema.zirAllocMut(block, inst), - .alloc_comptime => try sema.zirAllocComptime(block, inst), - .anyframe_type => try sema.zirAnyframeType(block, inst), - .array_cat => try sema.zirArrayCat(block, inst), - .array_mul => try sema.zirArrayMul(block, inst), - .array_type => try sema.zirArrayType(block, inst), - .array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), - .vector_type => try sema.zirVectorType(block, inst), - .as => try sema.zirAs(block, inst), - .as_node => try sema.zirAsNode(block, inst), - .bit_and => try sema.zirBitwise(block, inst, .bit_and), - .bit_not => try sema.zirBitNot(block, inst), - .bit_or => try sema.zirBitwise(block, inst, .bit_or), - .bitcast => try sema.zirBitcast(block, inst), - .bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), - .block => try sema.zirBlock(block, inst), - .suspend_block => try sema.zirSuspendBlock(block, inst), - .bool_not => try sema.zirBoolNot(block, inst), - .bool_and => try sema.zirBoolOp(block, inst, false), - .bool_or => try sema.zirBoolOp(block, inst, true), - .bool_br_and => try sema.zirBoolBr(block, inst, false), - .bool_br_or => try sema.zirBoolBr(block, inst, true), - .c_import => try sema.zirCImport(block, inst), - .call => try sema.zirCall(block, inst, .auto, false), - .call_chkused => try sema.zirCall(block, inst, .auto, true), - .call_compile_time => try sema.zirCall(block, inst, .compile_time, false), - .call_nosuspend => try sema.zirCall(block, inst, .no_async, false), - .call_async => try sema.zirCall(block, inst, .async_kw, false), - .cmp_eq => try sema.zirCmp(block, inst, .eq), - .cmp_gt => try sema.zirCmp(block, inst, .gt), - .cmp_gte => try sema.zirCmp(block, inst, .gte), - .cmp_lt => try sema.zirCmp(block, inst, .lt), - .cmp_lte => try sema.zirCmp(block, inst, .lte), - .cmp_neq => try sema.zirCmp(block, inst, .neq), - .coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), - .decl_ref => try sema.zirDeclRef(block, inst), - .decl_val => try sema.zirDeclVal(block, inst), - .load => try sema.zirLoad(block, inst), - .elem_ptr => try sema.zirElemPtr(block, inst), - .elem_ptr_node => try sema.zirElemPtrNode(block, inst), - .elem_val => try sema.zirElemVal(block, inst), - .elem_val_node => try sema.zirElemValNode(block, inst), - .elem_type => try sema.zirElemType(block, inst), - .enum_literal => try sema.zirEnumLiteral(block, inst), - .enum_to_int => try sema.zirEnumToInt(block, inst), - .int_to_enum => try sema.zirIntToEnum(block, inst), - .err_union_code => try sema.zirErrUnionCode(block, inst), - .err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), - .err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), - .err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), - .err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), - .err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), - .error_union_type => try sema.zirErrorUnionType(block, inst), - .error_value => try sema.zirErrorValue(block, inst), - .error_to_int => try sema.zirErrorToInt(block, inst), - .int_to_error => try sema.zirIntToError(block, inst), - .field_ptr => try sema.zirFieldPtr(block, inst), - .field_ptr_named => try sema.zirFieldPtrNamed(block, inst), - .field_val => try sema.zirFieldVal(block, inst), - .field_val_named => try sema.zirFieldValNamed(block, inst), - .func => try sema.zirFunc(block, inst, false), - .func_inferred => try sema.zirFunc(block, inst, true), - .import => try sema.zirImport(block, inst), - .indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), - .int => try sema.zirInt(block, inst), - .int_big => try sema.zirIntBig(block, inst), - .float => try sema.zirFloat(block, inst), - .float128 => try sema.zirFloat128(block, inst), - .int_type => try sema.zirIntType(block, inst), - .is_non_err => try sema.zirIsNonErr(block, inst), - .is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), - .is_non_null => try sema.zirIsNonNull(block, inst), - .is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), - .loop => try sema.zirLoop(block, inst), - .merge_error_sets => try sema.zirMergeErrorSets(block, inst), - .negate => try sema.zirNegate(block, inst, .sub), - .negate_wrap => try sema.zirNegate(block, inst, .subwrap), - .optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), - .optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), - .optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), - .optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), - .optional_type => try sema.zirOptionalType(block, inst), - .param_type => try sema.zirParamType(block, inst), - .ptr_type => try sema.zirPtrType(block, inst), - .ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), - .ref => try sema.zirRef(block, inst), - .ret_err_value_code => try sema.zirRetErrValueCode(block, inst), - .shl => try sema.zirShl(block, inst), - .shr => try sema.zirShr(block, inst), - .slice_end => try sema.zirSliceEnd(block, inst), - .slice_sentinel => try sema.zirSliceSentinel(block, inst), - .slice_start => try sema.zirSliceStart(block, inst), - .str => try sema.zirStr(block, inst), - .switch_block => try sema.zirSwitchBlock(block, inst, false, .none), - .switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), - .switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), - .switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"), - .switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under), - .switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under), - .switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none), - .switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none), - .switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"), - .switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"), - .switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under), - .switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under), - .switch_capture => try sema.zirSwitchCapture(block, inst, false, false), - .switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), - .switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), - .switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), - .switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), - .switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), - .type_info => try sema.zirTypeInfo(block, inst), - .size_of => try sema.zirSizeOf(block, inst), - .bit_size_of => try sema.zirBitSizeOf(block, inst), - .typeof => try sema.zirTypeof(block, inst), - .typeof_elem => try sema.zirTypeofElem(block, inst), - .log2_int_type => try sema.zirLog2IntType(block, inst), - .typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), - .xor => try sema.zirBitwise(block, inst, .xor), - .struct_init_empty => try sema.zirStructInitEmpty(block, inst), - .struct_init => try sema.zirStructInit(block, inst, false), - .struct_init_ref => try sema.zirStructInit(block, inst, true), - .struct_init_anon => try sema.zirStructInitAnon(block, inst, false), - .struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), - .array_init => try sema.zirArrayInit(block, inst, false), - .array_init_ref => try sema.zirArrayInit(block, inst, true), - .array_init_anon => try sema.zirArrayInitAnon(block, inst, false), - .array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), - .union_init_ptr => try sema.zirUnionInitPtr(block, inst), - .field_type => try sema.zirFieldType(block, inst), - .field_type_ref => try sema.zirFieldTypeRef(block, inst), - .ptr_to_int => try sema.zirPtrToInt(block, inst), - .align_of => try sema.zirAlignOf(block, inst), - .bool_to_int => try sema.zirBoolToInt(block, inst), - .embed_file => try sema.zirEmbedFile(block, inst), - .error_name => try sema.zirErrorName(block, inst), - .tag_name => try sema.zirTagName(block, inst), - .reify => try sema.zirReify(block, inst), - .type_name => try sema.zirTypeName(block, inst), - .frame_type => try sema.zirFrameType(block, inst), - .frame_size => try sema.zirFrameSize(block, inst), - .float_to_int => try sema.zirFloatToInt(block, inst), - .int_to_float => try sema.zirIntToFloat(block, inst), - .int_to_ptr => try sema.zirIntToPtr(block, inst), - .float_cast => try sema.zirFloatCast(block, inst), - .int_cast => try sema.zirIntCast(block, inst), - .err_set_cast => try sema.zirErrSetCast(block, inst), - .ptr_cast => try sema.zirPtrCast(block, inst), - .truncate => try sema.zirTruncate(block, inst), - .align_cast => try sema.zirAlignCast(block, inst), - .has_decl => try sema.zirHasDecl(block, inst), - .has_field => try sema.zirHasField(block, inst), - .clz => try sema.zirClz(block, inst), - .ctz => try sema.zirCtz(block, inst), - .pop_count => try sema.zirPopCount(block, inst), - .byte_swap => try sema.zirByteSwap(block, inst), - .bit_reverse => try sema.zirBitReverse(block, inst), - .div_exact => try sema.zirDivExact(block, inst), - .div_floor => try sema.zirDivFloor(block, inst), - .div_trunc => try sema.zirDivTrunc(block, inst), - .mod => try sema.zirMod(block, inst), - .rem => try sema.zirRem(block, inst), - .shl_exact => try sema.zirShlExact(block, inst), - .shr_exact => try sema.zirShrExact(block, inst), - .bit_offset_of => try sema.zirBitOffsetOf(block, inst), - .offset_of => try sema.zirOffsetOf(block, inst), - .cmpxchg_strong => try sema.zirCmpxchg(block, inst), - .cmpxchg_weak => try sema.zirCmpxchg(block, inst), - .splat => try sema.zirSplat(block, inst), - .reduce => try sema.zirReduce(block, inst), - .shuffle => try sema.zirShuffle(block, inst), - .atomic_load => try sema.zirAtomicLoad(block, inst), - .atomic_rmw => try sema.zirAtomicRmw(block, inst), - .atomic_store => try sema.zirAtomicStore(block, inst), - .mul_add => try sema.zirMulAdd(block, inst), - .builtin_call => try sema.zirBuiltinCall(block, inst), - .field_ptr_type => try sema.zirFieldPtrType(block, inst), - .field_parent_ptr => try sema.zirFieldParentPtr(block, inst), - .memcpy => try sema.zirMemcpy(block, inst), - .memset => try sema.zirMemset(block, inst), - .builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), - .@"resume" => try sema.zirResume(block, inst), - .@"await" => try sema.zirAwait(block, inst, false), - .await_nosuspend => try sema.zirAwait(block, inst, true), - .extended => try sema.zirExtended(block, inst), + //.alloc => try sema.zirAlloc(block, inst), + //.alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), + //.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), + //.alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), + //.alloc_mut => try sema.zirAllocMut(block, inst), + //.alloc_comptime => try sema.zirAllocComptime(block, inst), + //.anyframe_type => try sema.zirAnyframeType(block, inst), + //.array_cat => try sema.zirArrayCat(block, inst), + //.array_mul => try sema.zirArrayMul(block, inst), + //.array_type => try sema.zirArrayType(block, inst), + //.array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), + //.vector_type => try sema.zirVectorType(block, inst), + //.as => try sema.zirAs(block, inst), + //.as_node => try sema.zirAsNode(block, inst), + //.bit_and => try sema.zirBitwise(block, inst, .bit_and), + //.bit_not => try sema.zirBitNot(block, inst), + //.bit_or => try sema.zirBitwise(block, inst, .bit_or), + //.bitcast => try sema.zirBitcast(block, inst), + //.bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), + //.block => try sema.zirBlock(block, inst), + //.suspend_block => try sema.zirSuspendBlock(block, inst), + //.bool_not => try sema.zirBoolNot(block, inst), + //.bool_and => try sema.zirBoolOp(block, inst, false), + //.bool_or => try sema.zirBoolOp(block, inst, true), + //.bool_br_and => try sema.zirBoolBr(block, inst, false), + //.bool_br_or => try sema.zirBoolBr(block, inst, true), + //.c_import => try sema.zirCImport(block, inst), + //.call => try sema.zirCall(block, inst, .auto, false), + //.call_chkused => try sema.zirCall(block, inst, .auto, true), + //.call_compile_time => try sema.zirCall(block, inst, .compile_time, false), + //.call_nosuspend => try sema.zirCall(block, inst, .no_async, false), + //.call_async => try sema.zirCall(block, inst, .async_kw, false), + //.cmp_eq => try sema.zirCmp(block, inst, .eq), + //.cmp_gt => try sema.zirCmp(block, inst, .gt), + //.cmp_gte => try sema.zirCmp(block, inst, .gte), + //.cmp_lt => try sema.zirCmp(block, inst, .lt), + //.cmp_lte => try sema.zirCmp(block, inst, .lte), + //.cmp_neq => try sema.zirCmp(block, inst, .neq), + //.coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), + //.decl_ref => try sema.zirDeclRef(block, inst), + //.decl_val => try sema.zirDeclVal(block, inst), + //.load => try sema.zirLoad(block, inst), + //.elem_ptr => try sema.zirElemPtr(block, inst), + //.elem_ptr_node => try sema.zirElemPtrNode(block, inst), + //.elem_val => try sema.zirElemVal(block, inst), + //.elem_val_node => try sema.zirElemValNode(block, inst), + //.elem_type => try sema.zirElemType(block, inst), + //.enum_literal => try sema.zirEnumLiteral(block, inst), + //.enum_to_int => try sema.zirEnumToInt(block, inst), + //.int_to_enum => try sema.zirIntToEnum(block, inst), + //.err_union_code => try sema.zirErrUnionCode(block, inst), + //.err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), + //.err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), + //.err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), + //.err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), + //.err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), + //.error_union_type => try sema.zirErrorUnionType(block, inst), + //.error_value => try sema.zirErrorValue(block, inst), + //.error_to_int => try sema.zirErrorToInt(block, inst), + //.int_to_error => try sema.zirIntToError(block, inst), + //.field_ptr => try sema.zirFieldPtr(block, inst), + //.field_ptr_named => try sema.zirFieldPtrNamed(block, inst), + //.field_val => try sema.zirFieldVal(block, inst), + //.field_val_named => try sema.zirFieldValNamed(block, inst), + //.func => try sema.zirFunc(block, inst, false), + //.func_inferred => try sema.zirFunc(block, inst, true), + //.import => try sema.zirImport(block, inst), + //.indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), + //.int => try sema.zirInt(block, inst), + //.int_big => try sema.zirIntBig(block, inst), + //.float => try sema.zirFloat(block, inst), + //.float128 => try sema.zirFloat128(block, inst), + //.int_type => try sema.zirIntType(block, inst), + //.is_non_err => try sema.zirIsNonErr(block, inst), + //.is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), + //.is_non_null => try sema.zirIsNonNull(block, inst), + //.is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), + //.loop => try sema.zirLoop(block, inst), + //.merge_error_sets => try sema.zirMergeErrorSets(block, inst), + //.negate => try sema.zirNegate(block, inst, .sub), + //.negate_wrap => try sema.zirNegate(block, inst, .subwrap), + //.optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), + //.optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), + //.optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), + //.optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), + //.optional_type => try sema.zirOptionalType(block, inst), + //.param_type => try sema.zirParamType(block, inst), + //.ptr_type => try sema.zirPtrType(block, inst), + //.ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), + //.ref => try sema.zirRef(block, inst), + //.ret_err_value_code => try sema.zirRetErrValueCode(block, inst), + //.shl => try sema.zirShl(block, inst), + //.shr => try sema.zirShr(block, inst), + //.slice_end => try sema.zirSliceEnd(block, inst), + //.slice_sentinel => try sema.zirSliceSentinel(block, inst), + //.slice_start => try sema.zirSliceStart(block, inst), + //.str => try sema.zirStr(block, inst), + //.switch_block => try sema.zirSwitchBlock(block, inst, false, .none), + //.switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), + //.switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), + //.switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"), + //.switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under), + //.switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under), + //.switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none), + //.switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none), + //.switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"), + //.switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"), + //.switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under), + //.switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under), + //.switch_capture => try sema.zirSwitchCapture(block, inst, false, false), + //.switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), + //.switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), + //.switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), + //.switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), + //.switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), + //.type_info => try sema.zirTypeInfo(block, inst), + //.size_of => try sema.zirSizeOf(block, inst), + //.bit_size_of => try sema.zirBitSizeOf(block, inst), + //.typeof => try sema.zirTypeof(block, inst), + //.typeof_elem => try sema.zirTypeofElem(block, inst), + //.log2_int_type => try sema.zirLog2IntType(block, inst), + //.typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), + //.xor => try sema.zirBitwise(block, inst, .xor), + //.struct_init_empty => try sema.zirStructInitEmpty(block, inst), + //.struct_init => try sema.zirStructInit(block, inst, false), + //.struct_init_ref => try sema.zirStructInit(block, inst, true), + //.struct_init_anon => try sema.zirStructInitAnon(block, inst, false), + //.struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), + //.array_init => try sema.zirArrayInit(block, inst, false), + //.array_init_ref => try sema.zirArrayInit(block, inst, true), + //.array_init_anon => try sema.zirArrayInitAnon(block, inst, false), + //.array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), + //.union_init_ptr => try sema.zirUnionInitPtr(block, inst), + //.field_type => try sema.zirFieldType(block, inst), + //.field_type_ref => try sema.zirFieldTypeRef(block, inst), + //.ptr_to_int => try sema.zirPtrToInt(block, inst), + //.align_of => try sema.zirAlignOf(block, inst), + //.bool_to_int => try sema.zirBoolToInt(block, inst), + //.embed_file => try sema.zirEmbedFile(block, inst), + //.error_name => try sema.zirErrorName(block, inst), + //.tag_name => try sema.zirTagName(block, inst), + //.reify => try sema.zirReify(block, inst), + //.type_name => try sema.zirTypeName(block, inst), + //.frame_type => try sema.zirFrameType(block, inst), + //.frame_size => try sema.zirFrameSize(block, inst), + //.float_to_int => try sema.zirFloatToInt(block, inst), + //.int_to_float => try sema.zirIntToFloat(block, inst), + //.int_to_ptr => try sema.zirIntToPtr(block, inst), + //.float_cast => try sema.zirFloatCast(block, inst), + //.int_cast => try sema.zirIntCast(block, inst), + //.err_set_cast => try sema.zirErrSetCast(block, inst), + //.ptr_cast => try sema.zirPtrCast(block, inst), + //.truncate => try sema.zirTruncate(block, inst), + //.align_cast => try sema.zirAlignCast(block, inst), + //.has_decl => try sema.zirHasDecl(block, inst), + //.has_field => try sema.zirHasField(block, inst), + //.clz => try sema.zirClz(block, inst), + //.ctz => try sema.zirCtz(block, inst), + //.pop_count => try sema.zirPopCount(block, inst), + //.byte_swap => try sema.zirByteSwap(block, inst), + //.bit_reverse => try sema.zirBitReverse(block, inst), + //.div_exact => try sema.zirDivExact(block, inst), + //.div_floor => try sema.zirDivFloor(block, inst), + //.div_trunc => try sema.zirDivTrunc(block, inst), + //.mod => try sema.zirMod(block, inst), + //.rem => try sema.zirRem(block, inst), + //.shl_exact => try sema.zirShlExact(block, inst), + //.shr_exact => try sema.zirShrExact(block, inst), + //.bit_offset_of => try sema.zirBitOffsetOf(block, inst), + //.offset_of => try sema.zirOffsetOf(block, inst), + //.cmpxchg_strong => try sema.zirCmpxchg(block, inst), + //.cmpxchg_weak => try sema.zirCmpxchg(block, inst), + //.splat => try sema.zirSplat(block, inst), + //.reduce => try sema.zirReduce(block, inst), + //.shuffle => try sema.zirShuffle(block, inst), + //.atomic_load => try sema.zirAtomicLoad(block, inst), + //.atomic_rmw => try sema.zirAtomicRmw(block, inst), + //.atomic_store => try sema.zirAtomicStore(block, inst), + //.mul_add => try sema.zirMulAdd(block, inst), + //.builtin_call => try sema.zirBuiltinCall(block, inst), + //.field_ptr_type => try sema.zirFieldPtrType(block, inst), + //.field_parent_ptr => try sema.zirFieldParentPtr(block, inst), + //.memcpy => try sema.zirMemcpy(block, inst), + //.memset => try sema.zirMemset(block, inst), + //.builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), + //.@"resume" => try sema.zirResume(block, inst), + //.@"await" => try sema.zirAwait(block, inst, false), + //.await_nosuspend => try sema.zirAwait(block, inst, true), + //.extended => try sema.zirExtended(block, inst), - .sqrt => try sema.zirUnaryMath(block, inst), - .sin => try sema.zirUnaryMath(block, inst), - .cos => try sema.zirUnaryMath(block, inst), - .exp => try sema.zirUnaryMath(block, inst), - .exp2 => try sema.zirUnaryMath(block, inst), - .log => try sema.zirUnaryMath(block, inst), - .log2 => try sema.zirUnaryMath(block, inst), - .log10 => try sema.zirUnaryMath(block, inst), - .fabs => try sema.zirUnaryMath(block, inst), - .floor => try sema.zirUnaryMath(block, inst), - .ceil => try sema.zirUnaryMath(block, inst), - .trunc => try sema.zirUnaryMath(block, inst), - .round => try sema.zirUnaryMath(block, inst), + //.sqrt => try sema.zirUnaryMath(block, inst), + //.sin => try sema.zirUnaryMath(block, inst), + //.cos => try sema.zirUnaryMath(block, inst), + //.exp => try sema.zirUnaryMath(block, inst), + //.exp2 => try sema.zirUnaryMath(block, inst), + //.log => try sema.zirUnaryMath(block, inst), + //.log2 => try sema.zirUnaryMath(block, inst), + //.log10 => try sema.zirUnaryMath(block, inst), + //.fabs => try sema.zirUnaryMath(block, inst), + //.floor => try sema.zirUnaryMath(block, inst), + //.ceil => try sema.zirUnaryMath(block, inst), + //.trunc => try sema.zirUnaryMath(block, inst), + //.round => try sema.zirUnaryMath(block, inst), - .opaque_decl => try sema.zirOpaqueDecl(block, inst, .parent), - .opaque_decl_anon => try sema.zirOpaqueDecl(block, inst, .anon), - .opaque_decl_func => try sema.zirOpaqueDecl(block, inst, .func), - .error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), - .error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), - .error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), + //.opaque_decl => try sema.zirOpaqueDecl(block, inst, .parent), + //.opaque_decl_anon => try sema.zirOpaqueDecl(block, inst, .anon), + //.opaque_decl_func => try sema.zirOpaqueDecl(block, inst, .func), + //.error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), + //.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), + //.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), - .add => try sema.zirArithmetic(block, inst), - .addwrap => try sema.zirArithmetic(block, inst), - .div => try sema.zirArithmetic(block, inst), - .mod_rem => try sema.zirArithmetic(block, inst), - .mul => try sema.zirArithmetic(block, inst), - .mulwrap => try sema.zirArithmetic(block, inst), - .sub => try sema.zirArithmetic(block, inst), - .subwrap => try sema.zirArithmetic(block, inst), + //.add => try sema.zirArithmetic(block, inst), + //.addwrap => try sema.zirArithmetic(block, inst), + //.div => try sema.zirArithmetic(block, inst), + //.mod_rem => try sema.zirArithmetic(block, inst), + //.mul => try sema.zirArithmetic(block, inst), + //.mulwrap => try sema.zirArithmetic(block, inst), + //.sub => try sema.zirArithmetic(block, inst), + //.subwrap => try sema.zirArithmetic(block, inst), - // Instructions that we know to *always* be noreturn based solely on their tag. - // These functions match the return type of analyzeBody so that we can - // tail call them here. - .break_inline => return inst, - .condbr => return sema.zirCondbr(block, inst), - .@"break" => return sema.zirBreak(block, inst), - .compile_error => return sema.zirCompileError(block, inst), - .ret_coerce => return sema.zirRetCoerce(block, inst, true), - .ret_node => return sema.zirRetNode(block, inst), - .ret_err_value => return sema.zirRetErrValue(block, inst), - .@"unreachable" => return sema.zirUnreachable(block, inst), - .repeat => return sema.zirRepeat(block, inst), - .panic => return sema.zirPanic(block, inst), - // zig fmt: on + //// Instructions that we know to *always* be noreturn based solely on their tag. + //// These functions match the return type of analyzeBody so that we can + //// tail call them here. + //.break_inline => return inst, + //.condbr => return sema.zirCondbr(block, inst), + //.@"break" => return sema.zirBreak(block, inst), + //.compile_error => return sema.zirCompileError(block, inst), + //.ret_coerce => return sema.zirRetCoerce(block, inst, true), + //.ret_node => return sema.zirRetNode(block, inst), + //.ret_err_value => return sema.zirRetErrValue(block, inst), + //.@"unreachable" => return sema.zirUnreachable(block, inst), + //.repeat => return sema.zirRepeat(block, inst), + //.panic => return sema.zirPanic(block, inst), + //// zig fmt: on - // Instructions that we know can *never* be noreturn based solely on - // their tag. We avoid needlessly checking if they are noreturn and - // continue the loop. - // We also know that they cannot be referenced later, so we avoid - // putting them into the map. - .breakpoint => { - try sema.zirBreakpoint(block, inst); - i += 1; - continue; - }, - .fence => { - try sema.zirFence(block, inst); - i += 1; - continue; - }, - .dbg_stmt => { - try sema.zirDbgStmt(block, inst); - i += 1; - continue; - }, - .ensure_err_payload_void => { - try sema.zirEnsureErrPayloadVoid(block, inst); - i += 1; - continue; - }, - .ensure_result_non_error => { - try sema.zirEnsureResultNonError(block, inst); - i += 1; - continue; - }, - .ensure_result_used => { - try sema.zirEnsureResultUsed(block, inst); - i += 1; - continue; - }, - .set_eval_branch_quota => { - try sema.zirSetEvalBranchQuota(block, inst); - i += 1; - continue; - }, - .store => { - try sema.zirStore(block, inst); - i += 1; - continue; - }, - .store_node => { - try sema.zirStoreNode(block, inst); - i += 1; - continue; - }, - .store_to_block_ptr => { - try sema.zirStoreToBlockPtr(block, inst); - i += 1; - continue; - }, - .store_to_inferred_ptr => { - try sema.zirStoreToInferredPtr(block, inst); - i += 1; - continue; - }, - .resolve_inferred_alloc => { - try sema.zirResolveInferredAlloc(block, inst); - i += 1; - continue; - }, - .validate_struct_init_ptr => { - try sema.zirValidateStructInitPtr(block, inst); - i += 1; - continue; - }, - .validate_array_init_ptr => { - try sema.zirValidateArrayInitPtr(block, inst); - i += 1; - continue; - }, - .@"export" => { - try sema.zirExport(block, inst); - i += 1; - continue; - }, - .set_align_stack => { - try sema.zirSetAlignStack(block, inst); - i += 1; - continue; - }, - .set_cold => { - try sema.zirSetCold(block, inst); - i += 1; - continue; - }, - .set_float_mode => { - try sema.zirSetFloatMode(block, inst); - i += 1; - continue; - }, - .set_runtime_safety => { - try sema.zirSetRuntimeSafety(block, inst); - i += 1; - continue; - }, + //// Instructions that we know can *never* be noreturn based solely on + //// their tag. We avoid needlessly checking if they are noreturn and + //// continue the loop. + //// We also know that they cannot be referenced later, so we avoid + //// putting them into the map. + //.breakpoint => { + // try sema.zirBreakpoint(block, inst); + // i += 1; + // continue; + //}, + //.fence => { + // try sema.zirFence(block, inst); + // i += 1; + // continue; + //}, + //.dbg_stmt => { + // try sema.zirDbgStmt(block, inst); + // i += 1; + // continue; + //}, + //.ensure_err_payload_void => { + // try sema.zirEnsureErrPayloadVoid(block, inst); + // i += 1; + // continue; + //}, + //.ensure_result_non_error => { + // try sema.zirEnsureResultNonError(block, inst); + // i += 1; + // continue; + //}, + //.ensure_result_used => { + // try sema.zirEnsureResultUsed(block, inst); + // i += 1; + // continue; + //}, + //.set_eval_branch_quota => { + // try sema.zirSetEvalBranchQuota(block, inst); + // i += 1; + // continue; + //}, + //.store => { + // try sema.zirStore(block, inst); + // i += 1; + // continue; + //}, + //.store_node => { + // try sema.zirStoreNode(block, inst); + // i += 1; + // continue; + //}, + //.store_to_block_ptr => { + // try sema.zirStoreToBlockPtr(block, inst); + // i += 1; + // continue; + //}, + //.store_to_inferred_ptr => { + // try sema.zirStoreToInferredPtr(block, inst); + // i += 1; + // continue; + //}, + //.resolve_inferred_alloc => { + // try sema.zirResolveInferredAlloc(block, inst); + // i += 1; + // continue; + //}, + //.validate_struct_init_ptr => { + // try sema.zirValidateStructInitPtr(block, inst); + // i += 1; + // continue; + //}, + //.validate_array_init_ptr => { + // try sema.zirValidateArrayInitPtr(block, inst); + // i += 1; + // continue; + //}, + //.@"export" => { + // try sema.zirExport(block, inst); + // i += 1; + // continue; + //}, + //.set_align_stack => { + // try sema.zirSetAlignStack(block, inst); + // i += 1; + // continue; + //}, + //.set_cold => { + // try sema.zirSetCold(block, inst); + // i += 1; + // continue; + //}, + //.set_float_mode => { + // try sema.zirSetFloatMode(block, inst); + // i += 1; + // continue; + //}, + //.set_runtime_safety => { + // try sema.zirSetRuntimeSafety(block, inst); + // i += 1; + // continue; + //}, // Special case instructions to handle comptime control flow. .repeat_inline => { @@ -505,37 +505,38 @@ pub fn analyzeBody( i = 0; continue; }, - .block_inline => blk: { - // Directly analyze the block body without introducing a new block. - const inst_data = datas[inst].pl_node; - const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); - const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; - const break_inst = try sema.analyzeBody(block, inline_body); - const break_data = datas[break_inst].@"break"; - if (inst == break_data.block_inst) { - break :blk try sema.resolveInst(break_data.operand); - } else { - return break_inst; - } - }, - .condbr_inline => blk: { - const inst_data = datas[inst].pl_node; - const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; - const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); - const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; - const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); - const inline_body = if (cond.val.toBool()) then_body else else_body; - const break_inst = try sema.analyzeBody(block, inline_body); - const break_data = datas[break_inst].@"break"; - if (inst == break_data.block_inst) { - break :blk try sema.resolveInst(break_data.operand); - } else { - return break_inst; - } - }, + //.block_inline => blk: { + // // Directly analyze the block body without introducing a new block. + // const inst_data = datas[inst].pl_node; + // const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); + // const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; + // const break_inst = try sema.analyzeBody(block, inline_body); + // const break_data = datas[break_inst].@"break"; + // if (inst == break_data.block_inst) { + // break :blk try sema.resolveInst(break_data.operand); + // } else { + // return break_inst; + // } + //}, + //.condbr_inline => blk: { + // const inst_data = datas[inst].pl_node; + // const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; + // const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); + // const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; + // const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + // const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); + // const inline_body = if (cond.val.toBool()) then_body else else_body; + // const break_inst = try sema.analyzeBody(block, inline_body); + // const break_data = datas[break_inst].@"break"; + // if (inst == break_data.block_inst) { + // break :blk try sema.resolveInst(break_data.operand); + // } else { + // return break_inst; + // } + //}, + else => @panic("TODO remove else prong"), }; - if (air_inst.ty.isNoReturn()) + if (sema.getAirType(air_inst).isNoReturn()) return always_noreturn; try map.put(sema.gpa, inst, air_inst); i += 1; @@ -577,18 +578,13 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -/// TODO when we rework AIR memory layout, this function will no longer have a possible error. -pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) error{OutOfMemory}!Air.Inst.Index { +pub fn resolveInst(sema: *Sema, zir_ref: Zir.Inst.Ref) Air.Inst.Ref { var i: usize = @enumToInt(zir_ref); // First section of indexes correspond to a set number of constant values. if (i < Zir.Inst.Ref.typed_value_map.len) { - // TODO when we rework AIR memory layout, this function can be as simple as: - // if (zir_ref < Zir.const_inst_list.len + sema.param_count) - // return zir_ref; - // Until then we allocate memory for a new, mutable `ir.Inst` to match what - // AIR expects. - return sema.mod.constInst(sema.arena, .unneeded, Zir.Inst.Ref.typed_value_map[i]); + // We intentionally map the same indexes to the same values between ZIR and AIR. + return zir_ref; } i -= Zir.Inst.Ref.typed_value_map.len; @@ -1256,7 +1252,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const arg_name = inst_data.get(sema.code); const arg_index = sema.next_arg_index; @@ -1271,7 +1267,7 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air // Set the name of the Air.Arg instruction for use by codegen debug info. const air_arg = sema.param_inst_list[arg_index]; - sema.air.instructions.items(.data)[air_arg].ty_str.str = inst_data.start; + sema.air_instructions.items(.data)[air_arg].ty_str.str = inst_data.start; return air_arg; } @@ -7942,6 +7938,18 @@ fn enumFieldSrcLoc( } else unreachable; } +fn getAirType(sema: *Sema, air_ref: Air.Inst.Ref) Type { + var i: usize = @enumToInt(air_ref); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; + } + i -= Air.Inst.Ref.typed_value_map.len; + const air_tags = sema.air_instructions.items(.tag); + const air_datas = sema.air_instructions.items(.data); + assert(air_tags[i] == .const_ty); + return air_datas[i].ty; +} + pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { switch (ty.tag()) { .u8 => return .u8_type, diff --git a/src/codegen.zig b/src/codegen.zig index eaf910977e..a6c4b5ad3c 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -282,7 +282,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return struct { gpa: *Allocator, - air: *const Air, + air: Air, + liveness: Liveness, bin_file: *link.File, target: *const std.Target, mod_fn: *const Module.Fn, @@ -468,8 +469,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { var function = Self{ .gpa = bin_file.allocator, - .air = &air, - .liveness = &liveness, + .air = air, + .liveness = liveness, .target = &bin_file.options.target, .bin_file = bin_file, .mod_fn = module_fn, diff --git a/src/codegen/c.zig b/src/codegen/c.zig index e3f2423746..4743494f35 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -6,7 +6,6 @@ const log = std.log.scoped(.c); const link = @import("../link.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); -const Air = @import("../Air.zig"); const Value = @import("../value.zig").Value; const Type = @import("../type.zig").Type; const TypedValue = @import("../TypedValue.zig"); @@ -14,6 +13,8 @@ const C = link.File.C; const Decl = Module.Decl; const trace = @import("../tracy.zig").trace; const LazySrcLoc = Module.LazySrcLoc; +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const Mutability = enum { Const, Mut }; @@ -37,7 +38,7 @@ const BlockData = struct { result: CValue, }; -pub const CValueMap = std.AutoHashMap(*Inst, CValue); +pub const CValueMap = std.AutoHashMap(Air.Inst.Index, CValue); pub const TypedefMap = std.ArrayHashMap( Type, struct { name: []const u8, rendered: []u8 }, @@ -93,6 +94,8 @@ pub fn fmtIdent(ident: []const u8) std.fmt.Formatter(formatIdent) { /// It is not available when generating .h file. pub const Object = struct { dg: DeclGen, + air: Air, + liveness: Liveness, gpa: *mem.Allocator, code: std.ArrayList(u8), value_map: CValueMap, @@ -102,7 +105,7 @@ pub const Object = struct { next_block_index: usize = 0, indent_writer: IndentWriter(std.ArrayList(u8).Writer), - fn resolveInst(o: *Object, inst: *Inst) !CValue { + fn resolveInst(o: *Object, inst: Air.Inst.Index) !CValue { if (inst.value()) |_| { return CValue{ .constant = inst }; } diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 45ee2d9bb8..ddf2883259 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -277,6 +277,9 @@ pub const Object = struct { } pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void { + const tracy = trace(@src()); + defer tracy.end(); + var dg: DeclGen = .{ .object = self, .module = module, diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 3d704a8dc5..4da320b087 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -159,7 +159,8 @@ pub const DeclGen = struct { /// The SPIR-V module code should be put in. spv: *SPIRVModule, - air: *const Air, + air: Air, + liveness: Liveness, /// An array of function argument result-ids. Each index corresponds with the /// function argument of the same index. diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 45b00ddfad..912577a358 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -9,13 +9,14 @@ const wasm = std.wasm; const Module = @import("../Module.zig"); const Decl = Module.Decl; -const Air = @import("../Air.zig"); const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; const Compilation = @import("../Compilation.zig"); const LazySrcLoc = Module.LazySrcLoc; const link = @import("../link.zig"); const TypedValue = @import("../TypedValue.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); /// Wasm Value, created when generating an instruction const WValue = union(enum) { @@ -491,6 +492,8 @@ pub const Context = struct { /// Reference to the function declaration the code /// section belongs to decl: *Decl, + air: Air, + liveness: Liveness, gpa: *mem.Allocator, /// Table to save `WValue`'s generated by an `Inst` values: ValueTable, @@ -710,52 +713,53 @@ pub const Context = struct { } } + pub fn genFunc(self: *Context, func: *Module.Fn) InnerError!Result { + try self.genFunctype(); + + // Write instructions + // TODO: check for and handle death of instructions + + // Reserve space to write the size after generating the code as well as space for locals count + try self.code.resize(10); + + try self.genBody(func.body); + + // finally, write our local types at the 'offset' position + { + leb.writeUnsignedFixed(5, self.code.items[5..10], @intCast(u32, self.locals.items.len)); + + // offset into 'code' section where we will put our locals types + var local_offset: usize = 10; + + // emit the actual locals amount + for (self.locals.items) |local| { + var buf: [6]u8 = undefined; + leb.writeUnsignedFixed(5, buf[0..5], @as(u32, 1)); + buf[5] = local; + try self.code.insertSlice(local_offset, &buf); + local_offset += 6; + } + } + + const writer = self.code.writer(); + try writer.writeByte(wasm.opcode(.end)); + + // Fill in the size of the generated code to the reserved space at the + // beginning of the buffer. + const size = self.code.items.len - 5 + self.decl.fn_link.wasm.idx_refs.items.len * 5; + leb.writeUnsignedFixed(5, self.code.items[0..5], @intCast(u32, size)); + + // codegen data has been appended to `code` + return Result.appended; + } + /// Generates the wasm bytecode for the function declaration belonging to `Context` pub fn gen(self: *Context, typed_value: TypedValue) InnerError!Result { switch (typed_value.ty.zigTypeTag()) { .Fn => { try self.genFunctype(); - - // Write instructions - // TODO: check for and handle death of instructions - const mod_fn = blk: { - if (typed_value.val.castTag(.function)) |func| break :blk func.data; - if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions - unreachable; - }; - - // Reserve space to write the size after generating the code as well as space for locals count - try self.code.resize(10); - - try self.genBody(mod_fn.body); - - // finally, write our local types at the 'offset' position - { - leb.writeUnsignedFixed(5, self.code.items[5..10], @intCast(u32, self.locals.items.len)); - - // offset into 'code' section where we will put our locals types - var local_offset: usize = 10; - - // emit the actual locals amount - for (self.locals.items) |local| { - var buf: [6]u8 = undefined; - leb.writeUnsignedFixed(5, buf[0..5], @as(u32, 1)); - buf[5] = local; - try self.code.insertSlice(local_offset, &buf); - local_offset += 6; - } - } - - const writer = self.code.writer(); - try writer.writeByte(wasm.opcode(.end)); - - // Fill in the size of the generated code to the reserved space at the - // beginning of the buffer. - const size = self.code.items.len - 5 + self.decl.fn_link.wasm.idx_refs.items.len * 5; - leb.writeUnsignedFixed(5, self.code.items[0..5], @intCast(u32, size)); - - // codegen data has been appended to `code` - return Result.appended; + if (typed_value.val.castTag(.extern_fn)) |_| return Result.appended; // don't need code body for extern functions + return self.fail("TODO implement wasm codegen for function pointers", .{}); }, .Array => { if (typed_value.val.castTag(.bytes)) |payload| { diff --git a/src/link.zig b/src/link.zig index 02d9afaf07..2403180ec8 100644 --- a/src/link.zig +++ b/src/link.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const fs = std.fs; @@ -14,8 +15,10 @@ const Cache = @import("Cache.zig"); const build_options = @import("build_options"); const LibCInstallation = @import("libc_installation.zig").LibCInstallation; const wasi_libc = @import("wasi_libc.zig"); +const Air = @import("Air.zig"); +const Liveness = @import("Liveness.zig"); -pub const producer_string = if (std.builtin.is_test) "zig test" else "zig " ++ build_options.version; +pub const producer_string = if (builtin.is_test) "zig test" else "zig " ++ build_options.version; pub const Emit = struct { /// Where the output will go. @@ -313,13 +316,34 @@ pub const File = struct { log.debug("updateDecl {*} ({s}), type={}", .{ decl, decl.name, decl.ty }); assert(decl.has_tv); switch (base.tag) { - .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl), - .elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl), + // zig fmt: off + .coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl), + .elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl), .macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl), - .c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl), - .wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl), + .c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateDecl(module, decl), .spirv => return @fieldParentPtr(SpirV, "base", base).updateDecl(module, decl), .plan9 => return @fieldParentPtr(Plan9, "base", base).updateDecl(module, decl), + // zig fmt: on + } + } + + /// May be called before or after updateDeclExports but must be called + /// after allocateDeclIndexes for any given Decl. + pub fn updateFunc(base: *File, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + log.debug("updateFunc {*} ({s}), type={}", .{ + func.owner_decl, func.owner_decl.name, func.owner_decl.ty, + }); + switch (base.tag) { + // zig fmt: off + .coff => return @fieldParentPtr(Coff, "base", base).updateFunc(module, func, air, liveness), + .elf => return @fieldParentPtr(Elf, "base", base).updateFunc(module, func, air, liveness), + .macho => return @fieldParentPtr(MachO, "base", base).updateFunc(module, func, air, liveness), + .c => return @fieldParentPtr(C, "base", base).updateFunc(module, func, air, liveness), + .wasm => return @fieldParentPtr(Wasm, "base", base).updateFunc(module, func, air, liveness), + .spirv => return @fieldParentPtr(SpirV, "base", base).updateFunc(module, func, air, liveness), + .plan9 => return @fieldParentPtr(Plan9, "base", base).updateFunc(module, func, air, liveness), + // zig fmt: on } } diff --git a/src/link/C.zig b/src/link/C.zig index 53561d16cd..09f789f7d1 100644 --- a/src/link/C.zig +++ b/src/link/C.zig @@ -2,14 +2,17 @@ const std = @import("std"); const mem = std.mem; const assert = std.debug.assert; const Allocator = std.mem.Allocator; +const fs = std.fs; + +const C = @This(); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); -const fs = std.fs; const codegen = @import("../codegen/c.zig"); const link = @import("../link.zig"); const trace = @import("../tracy.zig").trace; -const C = @This(); const Type = @import("../type.zig").Type; +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); pub const base_tag: link.File.Tag = .c; pub const zig_h = @embedFile("C/zig.h"); @@ -95,10 +98,7 @@ fn deinitDecl(gpa: *Allocator, decl: *Module.Decl) void { decl.fn_link.c.typedefs.deinit(gpa); } -pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { - const tracy = trace(@src()); - defer tracy.end(); - +pub fn finishUpdateDecl(self: *C, module: *Module, decl: *Module.Decl, air: Air, liveness: Liveness) !void { // Keep track of all decls so we can iterate over them on flush(). _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -126,6 +126,8 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { .code = code.toManaged(module.gpa), .value_map = codegen.CValueMap.init(module.gpa), .indent_writer = undefined, // set later so we can get a pointer to object.code + .air = air, + .liveness = liveness, }; object.indent_writer = .{ .underlying_writer = object.code.writer() }; defer { @@ -157,6 +159,20 @@ pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { code.shrinkAndFree(module.gpa, code.items.len); } +pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + const tracy = trace(@src()); + defer tracy.end(); + + return self.finishUpdateDecl(module, func.owner_decl, air, liveness); +} + +pub fn updateDecl(self: *C, module: *Module, decl: *Module.Decl) !void { + const tracy = trace(@src()); + defer tracy.end(); + + return self.finishUpdateDecl(module, decl, undefined, undefined); +} + pub fn updateDeclLineNumber(self: *C, module: *Module, decl: *Module.Decl) !void { // The C backend does not have the ability to fix line numbers without re-generating // the entire Decl. diff --git a/src/link/Coff.zig b/src/link/Coff.zig index b466cf9136..44442b73a3 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -1,6 +1,7 @@ const Coff = @This(); const std = @import("std"); +const builtin = @import("builtin"); const log = std.log.scoped(.link); const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -17,6 +18,8 @@ const build_options = @import("build_options"); const Cache = @import("../Cache.zig"); const mingw = @import("../mingw.zig"); const llvm_backend = @import("../codegen/llvm.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const allocation_padding = 4 / 3; const minimum_text_block_size = 64 * allocation_padding; @@ -653,19 +656,58 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { } } -pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { - // TODO COFF/PE debug information - // TODO Implement exports +pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } const tracy = trace(@src()); defer tracy.end(); - if (build_options.have_llvm) - if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl); + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + const res = try codegen.generateFunction( + &self.base, + decl.srcLoc(), + func, + air, + liveness, + &code_buffer, + .none, + ); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + return self.finishUpdateDecl(module, func.owner_decl, code); +} + +pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + const tracy = trace(@src()); + defer tracy.end(); if (decl.val.tag() == .extern_fn) { return; // TODO Should we do more when front-end analyzed extern decl? } + // TODO COFF/PE debug information + // TODO Implement exports + var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); @@ -683,6 +725,10 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { }, }; + return self.finishUpdateDecl(module, func.owner_decl, code); +} + +fn finishUpdateDecl(self: *Coff, decl: *Module.Decl, code: []const u8) !void { const required_alignment = decl.ty.abiAlignment(self.base.options.target); const curr_size = decl.link.coff.size; if (curr_size != 0) { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 90224866ba..0d05b97846 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -1,6 +1,7 @@ const Elf = @This(); const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const assert = std.debug.assert; const Allocator = std.mem.Allocator; @@ -10,7 +11,6 @@ const log = std.log.scoped(.link); const DW = std.dwarf; const leb128 = std.leb; -const Air = @import("../Air.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const codegen = @import("../codegen.zig"); @@ -26,6 +26,8 @@ const glibc = @import("../glibc.zig"); const musl = @import("../musl.zig"); const Cache = @import("../Cache.zig"); const llvm_backend = @import("../codegen/llvm.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const default_entry_addr = 0x8000000; @@ -2155,138 +2157,17 @@ pub fn freeDecl(self: *Elf, decl: *Module.Decl) void { } } -pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { - const tracy = trace(@src()); - defer tracy.end(); - - if (build_options.have_llvm) - if (self.llvm_object) |llvm_object| return try llvm_object.updateDecl(module, decl); - - if (decl.val.tag() == .extern_fn) { - return; // TODO Should we do more when front-end analyzed extern decl? - } - if (decl.val.castTag(.variable)) |payload| { - const variable = payload.data; - if (variable.is_extern) { - return; // TODO Should we do more when front-end analyzed extern decl? - } +fn deinitRelocs(gpa: *Allocator, table: *File.DbgInfoTypeRelocsTable) void { + var it = table.valueIterator(); + while (it.next()) |value| { + value.relocs.deinit(gpa); } + table.deinit(gpa); +} - var code_buffer = std.ArrayList(u8).init(self.base.allocator); - defer code_buffer.deinit(); - - var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); - defer dbg_line_buffer.deinit(); - - var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); - defer dbg_info_buffer.deinit(); - - var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; - defer { - var it = dbg_info_type_relocs.valueIterator(); - while (it.next()) |value| { - value.relocs.deinit(self.base.allocator); - } - dbg_info_type_relocs.deinit(self.base.allocator); - } - - const is_fn: bool = switch (decl.ty.zigTypeTag()) { - .Fn => true, - else => false, - }; - if (is_fn) { - // For functions we need to add a prologue to the debug line program. - try dbg_line_buffer.ensureCapacity(26); - - const func = decl.val.castTag(.function).?.data; - const line_off = @intCast(u28, decl.src_line + func.lbrace_line); - - const ptr_width_bytes = self.ptrWidthBytes(); - dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{ - DW.LNS_extended_op, - ptr_width_bytes + 1, - DW.LNE_set_address, - }); - // This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`. - assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len); - dbg_line_buffer.items.len += ptr_width_bytes; - - dbg_line_buffer.appendAssumeCapacity(DW.LNS_advance_line); - // This is the "relocatable" relative line offset from the previous function's end curly - // to this function's begin curly. - assert(self.getRelocDbgLineOff() == dbg_line_buffer.items.len); - // Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later. - leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off); - - dbg_line_buffer.appendAssumeCapacity(DW.LNS_set_file); - assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len); - // Once we support more than one source file, this will have the ability to be more - // than one possible value. - const file_index = 1; - leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index); - - // Emit a line for the begin curly with prologue_end=false. The codegen will - // do the work of setting prologue_end=true and epilogue_begin=true. - dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy); - - // .debug_info subprogram - const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1]; - try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len); - - const fn_ret_type = decl.ty.fnReturnType(); - const fn_ret_has_bits = fn_ret_type.hasCodeGenBits(); - if (fn_ret_has_bits) { - dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram); - } else { - dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram_retvoid); - } - // These get overwritten after generating the machine code. These values are - // "relocations" and have to be in this fixed place so that functions can be - // moved in virtual address space. - assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len); - dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT_low_pc, DW.FORM_addr - assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len); - dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4 - if (fn_ret_has_bits) { - const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type); - if (!gop.found_existing) { - gop.value_ptr.* = .{ - .off = undefined, - .relocs = .{}, - }; - } - try gop.value_ptr.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len)); - dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4 - } - dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string - } else { - // TODO implement .debug_info for global variables - } - const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ - .ty = decl.ty, - .val = decl_val, - }, &code_buffer, .{ - .dwarf = .{ - .dbg_line = &dbg_line_buffer, - .dbg_info = &dbg_info_buffer, - .dbg_info_type_relocs = &dbg_info_type_relocs, - }, - }); - const code = switch (res) { - .externally_managed => |x| x, - .appended => code_buffer.items, - .fail => |em| { - decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); - return; - }, - }; - +fn updateDeclCode(self: *Elf, decl: *Module.Decl, code: []const u8, stt_bits: u8) !*elf.Elf64_Sym { const required_alignment = decl.ty.abiAlignment(self.base.options.target); - const stt_bits: u8 = if (is_fn) elf.STT_FUNC else elf.STT_OBJECT; - assert(decl.link.elf.local_sym_index != 0); // Caller forgot to allocateDeclIndexes() const local_sym = &self.local_symbols.items[decl.link.elf.local_sym_index]; if (local_sym.st_size != 0) { @@ -2338,128 +2219,16 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { const file_offset = self.sections.items[self.text_section_index.?].sh_offset + section_offset; try self.base.file.?.pwriteAll(code, file_offset); - const target_endian = self.base.options.target.cpu.arch.endian(); - - const text_block = &decl.link.elf; - - // If the Decl is a function, we need to update the .debug_line program. - if (is_fn) { - // Perform the relocations based on vaddr. - switch (self.ptr_width) { - .p32 => { - { - const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); - } - { - const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); - } - }, - .p64 => { - { - const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8]; - mem.writeInt(u64, ptr, local_sym.st_value, target_endian); - } - { - const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..8]; - mem.writeInt(u64, ptr, local_sym.st_value, target_endian); - } - }, - } - { - const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4]; - mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_size), target_endian); - } - - try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence }); - - // Now we have the full contents and may allocate a region to store it. - - // This logic is nearly identical to the logic below in `updateDeclDebugInfoAllocation` for - // `TextBlock` and the .debug_info. If you are editing this logic, you - // probably need to edit that logic too. - - const debug_line_sect = &self.sections.items[self.debug_line_section_index.?]; - const src_fn = &decl.fn_link.elf; - src_fn.len = @intCast(u32, dbg_line_buffer.items.len); - if (self.dbg_line_fn_last) |last| not_first: { - if (src_fn.next) |next| { - // Update existing function - non-last item. - if (src_fn.off + src_fn.len + min_nop_size > next.off) { - // It grew too big, so we move it to a new location. - if (src_fn.prev) |prev| { - self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {}; - prev.next = src_fn.next; - } - assert(src_fn.prev != next); - next.prev = src_fn.prev; - src_fn.next = null; - // Populate where it used to be with NOPs. - const file_pos = debug_line_sect.sh_offset + src_fn.off; - try self.pwriteDbgLineNops(0, &[0]u8{}, src_fn.len, file_pos); - // TODO Look at the free list before appending at the end. - src_fn.prev = last; - last.next = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = last.off + padToIdeal(last.len); - } - } else if (src_fn.prev == null) { - if (src_fn == last) { - // Special case: there is only 1 function and it is being updated. - // In this case there is nothing to do. The function's length has - // already been updated, and the logic below takes care of - // resizing the .debug_line section. - break :not_first; - } - // Append new function. - // TODO Look at the free list before appending at the end. - src_fn.prev = last; - last.next = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = last.off + padToIdeal(last.len); - } - } else { - // This is the first function of the Line Number Program. - self.dbg_line_fn_first = src_fn; - self.dbg_line_fn_last = src_fn; - - src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes()); - } - - const last_src_fn = self.dbg_line_fn_last.?; - const needed_size = last_src_fn.off + last_src_fn.len; - if (needed_size != debug_line_sect.sh_size) { - if (needed_size > self.allocatedSize(debug_line_sect.sh_offset)) { - const new_offset = self.findFreeSpace(needed_size, 1); - const existing_size = last_src_fn.off; - log.debug("moving .debug_line section: {d} bytes from 0x{x} to 0x{x}", .{ - existing_size, - debug_line_sect.sh_offset, - new_offset, - }); - const amt = try self.base.file.?.copyRangeAll(debug_line_sect.sh_offset, self.base.file.?, new_offset, existing_size); - if (amt != existing_size) return error.InputOutput; - debug_line_sect.sh_offset = new_offset; - } - debug_line_sect.sh_size = needed_size; - self.shdr_table_dirty = true; // TODO look into making only the one section dirty - self.debug_line_header_dirty = true; - } - const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0; - const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0; - - // We only have support for one compilation unit so far, so the offsets are directly - // from the .debug_line section. - const file_pos = debug_line_sect.sh_offset + src_fn.off; - try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos); - - // .debug_info - End the TAG_subprogram children. - try dbg_info_buffer.append(0); - } + return local_sym; +} +fn finishUpdateDecl( + self: *Elf, + module: *Module, + decl: *Module.Decl, + dbg_info_type_relocs: *File.DbgInfoTypeRelocsTable, + dbg_info_buffer: *std.ArrayList(u8), +) !void { // Now we emit the .debug_info types of the Decl. These will count towards the size of // the buffer, so we have to do it before computing the offset, and we can't perform the actual // relocations yet. @@ -2467,12 +2236,15 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { var it = dbg_info_type_relocs.iterator(); while (it.next()) |entry| { entry.value_ptr.off = @intCast(u32, dbg_info_buffer.items.len); - try self.addDbgInfoType(entry.key_ptr.*, &dbg_info_buffer); + try self.addDbgInfoType(entry.key_ptr.*, dbg_info_buffer); } } + const text_block = &decl.link.elf; try self.updateDeclDebugInfoAllocation(text_block, @intCast(u32, dbg_info_buffer.items.len)); + const target_endian = self.base.options.target.cpu.arch.endian(); + { // Now that we have the offset assigned we can finally perform type relocations. var it = dbg_info_type_relocs.valueIterator(); @@ -2495,6 +2267,290 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { return self.updateDeclExports(module, decl, decl_exports); } +pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .elf) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + + const tracy = trace(@src()); + defer tracy.end(); + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_line_buffer.deinit(); + + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_info_buffer.deinit(); + + var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; + defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs); + + // For functions we need to add a prologue to the debug line program. + try dbg_line_buffer.ensureCapacity(26); + + const decl = func.owner_decl; + const line_off = @intCast(u28, decl.src_line + func.lbrace_line); + + const ptr_width_bytes = self.ptrWidthBytes(); + dbg_line_buffer.appendSliceAssumeCapacity(&[_]u8{ + DW.LNS_extended_op, + ptr_width_bytes + 1, + DW.LNE_set_address, + }); + // This is the "relocatable" vaddr, corresponding to `code_buffer` index `0`. + assert(dbg_line_vaddr_reloc_index == dbg_line_buffer.items.len); + dbg_line_buffer.items.len += ptr_width_bytes; + + dbg_line_buffer.appendAssumeCapacity(DW.LNS_advance_line); + // This is the "relocatable" relative line offset from the previous function's end curly + // to this function's begin curly. + assert(self.getRelocDbgLineOff() == dbg_line_buffer.items.len); + // Here we use a ULEB128-fixed-4 to make sure this field can be overwritten later. + leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), line_off); + + dbg_line_buffer.appendAssumeCapacity(DW.LNS_set_file); + assert(self.getRelocDbgFileIndex() == dbg_line_buffer.items.len); + // Once we support more than one source file, this will have the ability to be more + // than one possible value. + const file_index = 1; + leb128.writeUnsignedFixed(4, dbg_line_buffer.addManyAsArrayAssumeCapacity(4), file_index); + + // Emit a line for the begin curly with prologue_end=false. The codegen will + // do the work of setting prologue_end=true and epilogue_begin=true. + dbg_line_buffer.appendAssumeCapacity(DW.LNS_copy); + + // .debug_info subprogram + const decl_name_with_null = decl.name[0 .. mem.lenZ(decl.name) + 1]; + try dbg_info_buffer.ensureCapacity(dbg_info_buffer.items.len + 25 + decl_name_with_null.len); + + const fn_ret_type = decl.ty.fnReturnType(); + const fn_ret_has_bits = fn_ret_type.hasCodeGenBits(); + if (fn_ret_has_bits) { + dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram); + } else { + dbg_info_buffer.appendAssumeCapacity(abbrev_subprogram_retvoid); + } + // These get overwritten after generating the machine code. These values are + // "relocations" and have to be in this fixed place so that functions can be + // moved in virtual address space. + assert(dbg_info_low_pc_reloc_index == dbg_info_buffer.items.len); + dbg_info_buffer.items.len += ptr_width_bytes; // DW.AT_low_pc, DW.FORM_addr + assert(self.getRelocDbgInfoSubprogramHighPC() == dbg_info_buffer.items.len); + dbg_info_buffer.items.len += 4; // DW.AT_high_pc, DW.FORM_data4 + if (fn_ret_has_bits) { + const gop = try dbg_info_type_relocs.getOrPut(self.base.allocator, fn_ret_type); + if (!gop.found_existing) { + gop.value_ptr.* = .{ + .off = undefined, + .relocs = .{}, + }; + } + try gop.value_ptr.relocs.append(self.base.allocator, @intCast(u32, dbg_info_buffer.items.len)); + dbg_info_buffer.items.len += 4; // DW.AT_type, DW.FORM_ref4 + } + dbg_info_buffer.appendSliceAssumeCapacity(decl_name_with_null); // DW.AT_name, DW.FORM_string + + const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg_line_buffer, + .dbg_info = &dbg_info_buffer, + .dbg_info_type_relocs = &dbg_info_type_relocs, + }, + }); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + const local_sym = try self.updateDeclCode(decl, code, elf.STT_FUNC); + + const target_endian = self.base.options.target.cpu.arch.endian(); + + // Since the Decl is a function, we need to update the .debug_line program. + // Perform the relocations based on vaddr. + switch (self.ptr_width) { + .p32 => { + { + const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); + } + { + const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_value), target_endian); + } + }, + .p64 => { + { + const ptr = dbg_line_buffer.items[dbg_line_vaddr_reloc_index..][0..8]; + mem.writeInt(u64, ptr, local_sym.st_value, target_endian); + } + { + const ptr = dbg_info_buffer.items[dbg_info_low_pc_reloc_index..][0..8]; + mem.writeInt(u64, ptr, local_sym.st_value, target_endian); + } + }, + } + { + const ptr = dbg_info_buffer.items[self.getRelocDbgInfoSubprogramHighPC()..][0..4]; + mem.writeInt(u32, ptr, @intCast(u32, local_sym.st_size), target_endian); + } + + try dbg_line_buffer.appendSlice(&[_]u8{ DW.LNS_extended_op, 1, DW.LNE_end_sequence }); + + // Now we have the full contents and may allocate a region to store it. + + // This logic is nearly identical to the logic below in `updateDeclDebugInfoAllocation` for + // `TextBlock` and the .debug_info. If you are editing this logic, you + // probably need to edit that logic too. + + const debug_line_sect = &self.sections.items[self.debug_line_section_index.?]; + const src_fn = &decl.fn_link.elf; + src_fn.len = @intCast(u32, dbg_line_buffer.items.len); + if (self.dbg_line_fn_last) |last| not_first: { + if (src_fn.next) |next| { + // Update existing function - non-last item. + if (src_fn.off + src_fn.len + min_nop_size > next.off) { + // It grew too big, so we move it to a new location. + if (src_fn.prev) |prev| { + self.dbg_line_fn_free_list.put(self.base.allocator, prev, {}) catch {}; + prev.next = src_fn.next; + } + assert(src_fn.prev != next); + next.prev = src_fn.prev; + src_fn.next = null; + // Populate where it used to be with NOPs. + const file_pos = debug_line_sect.sh_offset + src_fn.off; + try self.pwriteDbgLineNops(0, &[0]u8{}, src_fn.len, file_pos); + // TODO Look at the free list before appending at the end. + src_fn.prev = last; + last.next = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = last.off + padToIdeal(last.len); + } + } else if (src_fn.prev == null) { + if (src_fn == last) { + // Special case: there is only 1 function and it is being updated. + // In this case there is nothing to do. The function's length has + // already been updated, and the logic below takes care of + // resizing the .debug_line section. + break :not_first; + } + // Append new function. + // TODO Look at the free list before appending at the end. + src_fn.prev = last; + last.next = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = last.off + padToIdeal(last.len); + } + } else { + // This is the first function of the Line Number Program. + self.dbg_line_fn_first = src_fn; + self.dbg_line_fn_last = src_fn; + + src_fn.off = padToIdeal(self.dbgLineNeededHeaderBytes()); + } + + const last_src_fn = self.dbg_line_fn_last.?; + const needed_size = last_src_fn.off + last_src_fn.len; + if (needed_size != debug_line_sect.sh_size) { + if (needed_size > self.allocatedSize(debug_line_sect.sh_offset)) { + const new_offset = self.findFreeSpace(needed_size, 1); + const existing_size = last_src_fn.off; + log.debug("moving .debug_line section: {d} bytes from 0x{x} to 0x{x}", .{ + existing_size, + debug_line_sect.sh_offset, + new_offset, + }); + const amt = try self.base.file.?.copyRangeAll(debug_line_sect.sh_offset, self.base.file.?, new_offset, existing_size); + if (amt != existing_size) return error.InputOutput; + debug_line_sect.sh_offset = new_offset; + } + debug_line_sect.sh_size = needed_size; + self.shdr_table_dirty = true; // TODO look into making only the one section dirty + self.debug_line_header_dirty = true; + } + const prev_padding_size: u32 = if (src_fn.prev) |prev| src_fn.off - (prev.off + prev.len) else 0; + const next_padding_size: u32 = if (src_fn.next) |next| next.off - (src_fn.off + src_fn.len) else 0; + + // We only have support for one compilation unit so far, so the offsets are directly + // from the .debug_line section. + const file_pos = debug_line_sect.sh_offset + src_fn.off; + try self.pwriteDbgLineNops(prev_padding_size, dbg_line_buffer.items, next_padding_size, file_pos); + + // .debug_info - End the TAG_subprogram children. + try dbg_info_buffer.append(0); + + return self.finishUpdateDecl(module, decl, &dbg_info_type_relocs, &dbg_info_buffer); +} + +pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .elf) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + + const tracy = trace(@src()); + defer tracy.end(); + + if (decl.val.tag() == .extern_fn) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + if (decl.val.castTag(.variable)) |payload| { + const variable = payload.data; + if (variable.is_extern) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + } + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_info_buffer.deinit(); + + var dbg_info_type_relocs: File.DbgInfoTypeRelocsTable = .{}; + defer deinitRelocs(self.base.allocator, &dbg_info_type_relocs); + + // TODO implement .debug_info for global variables + const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + .ty = decl.ty, + .val = decl_val, + }, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg_line_buffer, + .dbg_info = &dbg_info_buffer, + .dbg_info_type_relocs = &dbg_info_type_relocs, + }, + }); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + + _ = try self.updateDeclCode(decl, code, elf.STT_OBJECT); + return self.finishUpdateDecl(module, decl, &dbg_info_type_relocs, &dbg_info_buffer); +} + /// Asserts the type has codegen bits. fn addDbgInfoType(self: *Elf, ty: Type, dbg_info_buffer: *std.ArrayList(u8)) !void { switch (ty.zigTypeTag()) { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index df2e0134e4..cd020c1b27 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1,6 +1,7 @@ const MachO = @This(); const std = @import("std"); +const builtin = @import("builtin"); const Allocator = std.mem.Allocator; const assert = std.debug.assert; const fmt = std.fmt; @@ -22,6 +23,8 @@ const link = @import("../link.zig"); const File = link.File; const Cache = @import("../Cache.zig"); const target_util = @import("../target.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); const DebugSymbols = @import("MachO/DebugSymbols.zig"); const Trie = @import("MachO/Trie.zig"); @@ -1132,7 +1135,55 @@ pub fn allocateDeclIndexes(self: *MachO, decl: *Module.Decl) !void { }; } +pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .macho) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + const tracy = trace(@src()); + defer tracy.end(); + + const decl = func.owner_decl; + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + + var debug_buffers = if (self.d_sym) |*ds| try ds.initDeclDebugBuffers(self.base.allocator, module, decl) else null; + defer { + if (debug_buffers) |*dbg| { + dbg.dbg_line_buffer.deinit(); + dbg.dbg_info_buffer.deinit(); + var it = dbg.dbg_info_type_relocs.valueIterator(); + while (it.next()) |value| { + value.relocs.deinit(self.base.allocator); + } + dbg.dbg_info_type_relocs.deinit(self.base.allocator); + } + } + + const res = if (debug_buffers) |*dbg| + try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ + .dwarf = .{ + .dbg_line = &dbg.dbg_line_buffer, + .dbg_info = &dbg.dbg_info_buffer, + .dbg_info_type_relocs = &dbg.dbg_info_type_relocs, + }, + }) + else + try codegen.generateSymbol(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + + return self.finishUpdateDecl(module, decl, res); +} + pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native and builtin.object_format != .macho) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } const tracy = trace(@src()); defer tracy.end(); @@ -1173,6 +1224,10 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { .val = decl.val, }, &code_buffer, .none); + return self.finishUpdateDecl(module, decl, res); +} + +fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: codegen.Result) !void { const code = switch (res) { .externally_managed => |x| x, .appended => code_buffer.items, diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 80a92f9cdb..bc044ce414 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -2,18 +2,21 @@ //! would be to add incremental linking in a similar way as ELF does. const Plan9 = @This(); - -const std = @import("std"); const link = @import("../link.zig"); const Module = @import("../Module.zig"); const Compilation = @import("../Compilation.zig"); const aout = @import("Plan9/aout.zig"); const codegen = @import("../codegen.zig"); const trace = @import("../tracy.zig").trace; -const mem = std.mem; const File = link.File; -const Allocator = std.mem.Allocator; +const build_options = @import("build_options"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); +const std = @import("std"); +const builtin = @import("builtin"); +const mem = std.mem; +const Allocator = std.mem.Allocator; const log = std.log.scoped(.link); const assert = std.debug.assert; @@ -120,6 +123,19 @@ pub fn createEmpty(gpa: *Allocator, options: link.Options) !*Plan9 { return self; } +pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .plan9) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + _ = module; + // Keep track of all decls so we can iterate over them on flush(). + _ = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl); + + _ = air; + _ = liveness; + @panic("TODO Plan9 needs to keep track of Air and Liveness so it can use them later"); +} + pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void { _ = module; _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -138,6 +154,9 @@ pub fn flush(self: *Plan9, comp: *Compilation) !void { } pub fn flushModule(self: *Plan9, comp: *Compilation) !void { + if (build_options.skip_non_native and builtin.object_format != .plan9) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } _ = comp; const tracy = trace(@src()); defer tracy.end(); @@ -199,7 +218,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { } } if (std.mem.eql(u8, exp.options.name, "_start")) { - std.debug.assert(decl.link.plan9.type == .t); // we tried to link a non-function as the entry + assert(decl.link.plan9.type == .t); // we tried to link a non-function as the entry self.entry_decl = decl; } if (exp.link.plan9) |i| { diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index 8a2e877d42..bc9e560582 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -36,6 +36,8 @@ const ResultId = codegen.ResultId; const trace = @import("../tracy.zig").trace; const build_options = @import("build_options"); const spec = @import("../codegen/spirv/spec.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); // TODO: Should this struct be used at all rather than just a hashmap of aux data for every decl? pub const FnData = struct { @@ -101,7 +103,23 @@ pub fn deinit(self: *SpirV) void { self.decl_table.deinit(self.base.allocator); } +pub fn updateFunc(self: *SpirV, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } + _ = module; + // Keep track of all decls so we can iterate over them on flush(). + _ = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl); + + _ = air; + _ = liveness; + @panic("TODO SPIR-V needs to keep track of Air and Liveness so it can use them later"); +} + pub fn updateDecl(self: *SpirV, module: *Module, decl: *Module.Decl) !void { + if (build_options.skip_non_native) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } _ = module; // Keep track of all decls so we can iterate over them on flush(). _ = try self.decl_table.getOrPut(self.base.allocator, decl); @@ -132,13 +150,13 @@ pub fn flush(self: *SpirV, comp: *Compilation) !void { } pub fn flushModule(self: *SpirV, comp: *Compilation) !void { - const tracy = trace(@src()); - defer tracy.end(); - if (build_options.skip_non_native) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } + const tracy = trace(@src()); + defer tracy.end(); + const module = self.base.options.module.?; const target = comp.getTarget(); diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 15a36a4bcc..be6ad78701 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -1,6 +1,7 @@ const Wasm = @This(); const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -18,6 +19,8 @@ const build_options = @import("build_options"); const wasi_libc = @import("../wasi_libc.zig"); const Cache = @import("../Cache.zig"); const TypedValue = @import("../TypedValue.zig"); +const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); pub const base_tag = link.File.Tag.wasm; @@ -186,11 +189,60 @@ pub fn allocateDeclIndexes(self: *Wasm, decl: *Module.Decl) !void { } } +pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { + if (build_options.skip_non_native and builtin.object_format != .wasm) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + } + const decl = func.owner_decl; + assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + + const fn_data = &decl.fn_link.wasm; + fn_data.functype.items.len = 0; + fn_data.code.items.len = 0; + fn_data.idx_refs.items.len = 0; + + var context = codegen.Context{ + .gpa = self.base.allocator, + .air = air, + .liveness = liveness, + .values = .{}, + .code = fn_data.code.toManaged(self.base.allocator), + .func_type_data = fn_data.functype.toManaged(self.base.allocator), + .decl = decl, + .err_msg = undefined, + .locals = .{}, + .target = self.base.options.target, + .global_error_set = self.base.options.module.?.global_error_set, + }; + defer context.deinit(); + + // generate the 'code' section for the function declaration + const result = context.genFunc(func) catch |err| switch (err) { + error.CodegenFail => { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, context.err_msg); + return; + }, + else => |e| return e, + }; + return self.finishUpdateDecl(decl, result); +} + // Generate code for the Decl, storing it in memory to be later written to // the file on flush(). pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { - std.debug.assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + if (build_options.skip_non_native and builtin.object_format != .wasm) { + @panic("Attempted to compile for object format that was disabled by build configuration"); + } + if (build_options.have_llvm) { + if (self.llvm_object) |llvm_object| return llvm_object.updateDecl(module, decl); + } + assert(decl.link.wasm.init); // Must call allocateDeclIndexes() + // TODO don't use this for non-functions const fn_data = &decl.fn_link.wasm; fn_data.functype.items.len = 0; fn_data.code.items.len = 0; @@ -218,7 +270,10 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { }, else => |e| return e, }; + return self.finishUpdateDecl(decl, result); +} +fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, result: codegen.Result) !void { const code: []const u8 = switch (result) { .appended => @as([]const u8, context.code.items), .externally_managed => |payload| payload, @@ -521,7 +576,7 @@ pub fn flushModule(self: *Wasm, comp: *Compilation) !void { var data_offset = offset_table_size; while (cur) |cur_block| : (cur = cur_block.next) { if (cur_block.size == 0) continue; - std.debug.assert(cur_block.init); + assert(cur_block.init); const offset = (cur_block.offset_index) * ptr_width; var buf: [4]u8 = undefined; From c09b973ec25f328f5e15e9e6eed4da7f5e4634af Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 13 Jul 2021 15:45:08 -0700 Subject: [PATCH 09/53] stage2: compile error fixes for AIR memory layout branch Now the branch is compiling again, provided that one uses `-Dskip-non-native`, but many code paths are disabled. The code paths can now be re-enabled one at a time and updated to conform to the new AIR memory layout. --- src/Air.zig | 30 +- src/Compilation.zig | 2 +- src/Liveness.zig | 71 +++- src/Module.zig | 34 +- src/Sema.zig | 986 +++++++++++++++++++++++++------------------- src/codegen.zig | 147 ++++--- src/codegen/c.zig | 198 ++++----- src/link/Elf.zig | 3 + src/value.zig | 2 +- 9 files changed, 842 insertions(+), 631 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index e85f2e5c43..1f294c43f3 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -332,12 +332,12 @@ pub const Block = struct { body_len: u32, }; -/// Trailing is a list of `Ref` for every `args_len`. +/// Trailing is a list of `Inst.Ref` for every `args_len`. pub const Call = struct { args_len: u32, }; -/// This data is stored inside extra, with two sets of trailing `Ref`: +/// This data is stored inside extra, with two sets of trailing `Inst.Ref`: /// * 0. the then body, according to `then_body_len`. /// * 1. the else body, according to `else_body_len`. pub const CondBr = struct { @@ -355,19 +355,19 @@ pub const SwitchBr = struct { /// Trailing: /// * instruction index for each `body_len`. pub const Case = struct { - item: Ref, + item: Inst.Ref, body_len: u32, }; }; pub const StructField = struct { - struct_ptr: Ref, + struct_ptr: Inst.Ref, field_index: u32, }; /// Trailing: -/// 0. `Ref` for every outputs_len -/// 1. `Ref` for every inputs_len +/// 0. `Inst.Ref` for every outputs_len +/// 1. `Inst.Ref` for every inputs_len pub const Asm = struct { /// Index to the corresponding ZIR instruction. /// `asm_source`, `outputs_len`, `inputs_len`, `clobbers_len`, `is_volatile`, and @@ -381,6 +381,24 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[body_index..][0..body_len]; } +pub fn getType(air: Air, inst: Air.Inst.Index) Type { + _ = air; + _ = inst; + @panic("TODO Air getType"); +} + +pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { + var i: usize = @enumToInt(ref); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; + } + i -= Air.Inst.Ref.typed_value_map.len; + const air_tags = air.instructions.items(.tag); + const air_datas = air.instructions.items(.data); + assert(air_tags[i] == .const_ty); + return air_datas[i].ty; +} + /// Returns the requested data, as well as the new index which is at the start of the /// trailers for the object. pub fn extraData(air: Air, comptime T: type, index: usize) struct { data: T, end: usize } { diff --git a/src/Compilation.zig b/src/Compilation.zig index 90224a77d1..4a442a8b67 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2023,7 +2023,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor defer air.deinit(gpa); log.debug("analyze liveness of {s}", .{decl.name}); - var liveness = try Liveness.analyze(gpa, air); + var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); defer liveness.deinit(gpa); if (std.builtin.mode == .Debug and self.verbose_air) { diff --git a/src/Liveness.zig b/src/Liveness.zig index 1402a5997b..838f19d4a1 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -7,11 +7,13 @@ //! * Switch Branches const Liveness = @This(); const std = @import("std"); -const Air = @import("Air.zig"); const trace = @import("tracy.zig").trace; const log = std.log.scoped(.liveness); const assert = std.debug.assert; const Allocator = std.mem.Allocator; +const Air = @import("Air.zig"); +const Zir = @import("Zir.zig"); +const Log2Int = std.math.Log2Int; /// This array is split into sets of 4 bits per AIR instruction. /// The MSB (0bX000) is whether the instruction is unreferenced. @@ -44,7 +46,7 @@ pub const SwitchBr = struct { else_death_count: u32, }; -pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { +pub fn analyze(gpa: *Allocator, air: Air, zir: Zir) Allocator.Error!Liveness { const tracy = trace(@src()); defer tracy.end(); @@ -58,6 +60,7 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { ), .extra = .{}, .special = .{}, + .zir = &zir, }; errdefer gpa.free(a.tomb_bits); errdefer a.special.deinit(gpa); @@ -74,23 +77,32 @@ pub fn analyze(gpa: *Allocator, air: Air) Allocator.Error!Liveness { }; } +pub fn getTombBits(l: Liveness, inst: Air.Inst.Index) Bpi { + const usize_index = (inst * bpi) / @bitSizeOf(usize); + return @truncate(Bpi, l.tomb_bits[usize_index] >> + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi)); +} + pub fn isUnused(l: Liveness, inst: Air.Inst.Index) bool { const usize_index = (inst * bpi) / @bitSizeOf(usize); - const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1)); + const mask = @as(usize, 1) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + (bpi - 1)); return (l.tomb_bits[usize_index] & mask) != 0; } pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool { assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); - const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + const mask = @as(usize, 1) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); return (l.tomb_bits[usize_index] & mask) != 0; } pub fn clearOperandDeath(l: *Liveness, inst: Air.Inst.Index, operand: OperandInt) void { assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); - const mask = @as(usize, 1) << ((inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); + const mask = @as(usize, 1) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); l.tomb_bits[usize_index] |= mask; } @@ -113,10 +125,12 @@ const Analysis = struct { tomb_bits: []usize, special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), extra: std.ArrayListUnmanaged(u32), + zir: *const Zir, fn storeTombBits(a: *Analysis, inst: Air.Inst.Index, tomb_bits: Bpi) void { const usize_index = (inst * bpi) / @bitSizeOf(usize); - a.tomb_bits[usize_index] |= tomb_bits << (inst % (@bitSizeOf(usize) / bpi)) * bpi; + a.tomb_bits[usize_index] |= @as(usize, tomb_bits) << + @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi); } fn addExtra(a: *Analysis, extra: anytype) Allocator.Error!u32 { @@ -203,9 +217,11 @@ fn analyzeInst( return trackOperands(a, new_set, inst, main_tomb, .{ o.lhs, o.rhs, .none }); }, + .arg, .alloc, .br, .constant, + .const_ty, .breakpoint, .dbg_stmt, .varptr, @@ -255,15 +271,30 @@ fn analyzeInst( if (args.len <= bpi - 2) { var buf: [bpi - 1]Air.Inst.Ref = undefined; buf[0] = callee; - std.mem.copy(&buf, buf[1..], args); + std.mem.copy(Air.Inst.Ref, buf[1..], @bitCast([]const Air.Inst.Ref, args)); return trackOperands(a, new_set, inst, main_tomb, buf); } - @panic("TODO: liveness analysis for function with many args"); + @panic("TODO: liveness analysis for function with greater than 2 args"); }, .struct_field_ptr => { const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ extra.struct_ptr, .none, .none }); }, + .assembly => { + const extra = a.air.extraData(Air.Asm, inst_datas[inst].ty_pl.payload); + const extended = a.zir.instructions.items(.data)[extra.data.zir_index].extended; + const outputs_len = @truncate(u5, extended.small); + const inputs_len = @truncate(u5, extended.small >> 5); + const outputs = a.air.extra[extra.end..][0..outputs_len]; + const inputs = a.air.extra[extra.end + outputs.len ..][0..inputs_len]; + if (outputs.len + inputs.len <= bpi - 1) { + var buf: [bpi - 1]Air.Inst.Ref = undefined; + std.mem.copy(Air.Inst.Ref, &buf, @bitCast([]const Air.Inst.Ref, outputs)); + std.mem.copy(Air.Inst.Ref, buf[outputs.len..], @bitCast([]const Air.Inst.Ref, inputs)); + return trackOperands(a, new_set, inst, main_tomb, buf); + } + @panic("TODO: liveness analysis for asm with greater than 3 args"); + }, .block => { const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = a.air.extra[extra.end..][0..extra.data.body_len]; @@ -287,8 +318,8 @@ fn analyzeInst( const then_body = a.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = a.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - var then_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa); - defer then_table.deinit(); + var then_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}; + defer then_table.deinit(gpa); try analyzeWithContext(a, &then_table, then_body); // Reset the table back to its state from before the branch. @@ -299,8 +330,8 @@ fn analyzeInst( } } - var else_table = std.AutoHashMap(Air.Inst.Index, void).init(gpa); - defer else_table.deinit(); + var else_table: std.AutoHashMapUnmanaged(Air.Inst.Index, void) = .{}; + defer else_table.deinit(gpa); try analyzeWithContext(a, &else_table, else_body); var then_entry_deaths = std.ArrayList(Air.Inst.Index).init(gpa); @@ -331,7 +362,7 @@ fn analyzeInst( } // Now we have to correctly populate new_set. if (new_set) |ns| { - try ns.ensureCapacity(@intCast(u32, ns.count() + then_table.count() + else_table.count())); + try ns.ensureCapacity(gpa, @intCast(u32, ns.count() + then_table.count() + else_table.count())); var it = then_table.keyIterator(); while (it.next()) |key| { _ = ns.putAssumeCapacity(key.*, {}); @@ -344,7 +375,7 @@ fn analyzeInst( const then_death_count = @intCast(u32, then_entry_deaths.items.len); const else_death_count = @intCast(u32, else_entry_deaths.items.len); - try a.extra.ensureUnusedCapacity(std.meta.fields(@TypeOf(CondBr)).len + + try a.extra.ensureUnusedCapacity(gpa, std.meta.fields(Air.CondBr).len + then_death_count + else_death_count); const extra_index = a.addExtraAssumeCapacity(CondBr{ .then_death_count = then_death_count, @@ -352,7 +383,7 @@ fn analyzeInst( }); a.extra.appendSliceAssumeCapacity(then_entry_deaths.items); a.extra.appendSliceAssumeCapacity(else_entry_deaths.items); - try a.special.put(inst, extra_index); + try a.special.put(gpa, inst, extra_index); // Continue on with the instruction analysis. The following code will find the condition // instruction, and the deaths flag for the CondBr instruction will indicate whether the @@ -438,12 +469,12 @@ fn analyzeInst( }); for (case_deaths[0 .. case_deaths.len - 1]) |*cd| { const case_death_count = @intCast(u32, cd.items.len); - try a.extra.ensureUnusedCapacity(1 + case_death_count + else_death_count); + try a.extra.ensureUnusedCapacity(gpa, 1 + case_death_count + else_death_count); a.extra.appendAssumeCapacity(case_death_count); a.extra.appendSliceAssumeCapacity(cd.items); } a.extra.appendSliceAssumeCapacity(case_deaths[case_deaths.len - 1].items); - try a.special.put(inst, extra_index); + try a.special.put(gpa, inst, extra_index); return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none }); }, @@ -452,7 +483,7 @@ fn analyzeInst( fn trackOperands( a: *Analysis, - new_set: ?*std.AutoHashMap(Air.Inst.Index, void), + new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), inst: Air.Inst.Index, main_tomb: bool, operands: [bpi - 1]Air.Inst.Ref, @@ -468,12 +499,12 @@ fn trackOperands( tomb_bits <<= 1; const op_int = @enumToInt(operands[i]); if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const operand: Air.Inst.Index = op_int - Air.Inst.Ref.typed_value_map.len; + const operand: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len); const prev = try table.fetchPut(gpa, operand, {}); if (prev == null) { // Death. tomb_bits |= 1; - if (new_set) |ns| try ns.putNoClobber(operand, {}); + if (new_set) |ns| try ns.putNoClobber(gpa, operand, {}); } } a.storeTombBits(inst, tomb_bits); diff --git a/src/Module.zig b/src/Module.zig index 5972c2bdcf..7ec9c7e93d 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1225,6 +1225,30 @@ pub const Scope = struct { pub fn getFileScope(block: *Block) *Scope.File { return block.src_decl.namespace.file_scope; } + + pub fn addTyOp( + block: *Block, + tag: Air.Inst.Tag, + ty: Type, + operand: Air.Inst.Ref, + ) error{OutOfMemory}!Air.Inst.Ref { + const sema = block.sema; + const gpa = sema.gpa; + + try sema.air_instructions.ensureUnusedCapacity(gpa, 1); + try block.instructions.ensureUnusedCapacity(gpa, 1); + + const inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + sema.air_instructions.appendAssumeCapacity(.{ + .tag = tag, + .data = .{ .ty_op = .{ + .ty = try sema.addType(ty), + .operand = operand, + } }, + }); + block.instructions.appendAssumeCapacity(inst); + return Sema.indexToRef(inst); + } }; }; @@ -3408,7 +3432,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { defer decl.value_arena.?.* = arena.state; const fn_ty = decl.ty; - const param_inst_list = try gpa.alloc(Air.Inst.Index, fn_ty.fnParamLen()); + const param_inst_list = try gpa.alloc(Air.Inst.Ref, fn_ty.fnParamLen()); defer gpa.free(param_inst_list); var sema: Sema = .{ @@ -3440,10 +3464,13 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { defer inner_block.instructions.deinit(gpa); // AIR requires the arg parameters to be the first N instructions. + try inner_block.instructions.ensureTotalCapacity(gpa, param_inst_list.len); for (param_inst_list) |*param_inst, param_index| { const param_type = fn_ty.fnParamType(param_index); const ty_ref = try sema.addType(param_type); - param_inst.* = @intCast(u32, sema.air_instructions.len); + const arg_index = @intCast(u32, sema.air_instructions.len); + inner_block.instructions.appendAssumeCapacity(arg_index); + param_inst.* = Sema.indexToRef(arg_index); try sema.air_instructions.append(gpa, .{ .tag = .arg, .data = .{ @@ -3454,7 +3481,6 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { }, }); } - try inner_block.instructions.appendSlice(gpa, param_inst_list); func.state = .in_progress; log.debug("set {s} to in_progress", .{decl.name}); @@ -4043,13 +4069,11 @@ pub fn floatMul( } pub fn simplePtrType( - mod: *Module, arena: *Allocator, elem_ty: Type, mutable: bool, size: std.builtin.TypeInfo.Pointer.Size, ) Allocator.Error!Type { - _ = mod; if (!mutable and size == .Slice and elem_ty.eql(Type.initTag(.u8))) { return Type.initTag(.const_slice_u8); } diff --git a/src/Sema.zig b/src/Sema.zig index 54c42a482d..fc130cd4a4 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -36,7 +36,7 @@ func: ?*Module.Fn, /// > Denormalized data to make `resolveInst` faster. This is 0 if not inside a function, /// > otherwise it is the number of parameters of the function. /// > param_count: u32 -param_inst_list: []const Air.Inst.Index, +param_inst_list: []const Air.Inst.Ref, branch_quota: u32 = 1000, branch_count: u32 = 0, /// This field is updated when a new source location becomes active, so that @@ -59,8 +59,6 @@ const TypedValue = @import("TypedValue.zig"); const Air = @import("Air.zig"); const Zir = @import("Zir.zig"); const Module = @import("Module.zig"); -const Inst = ir.Inst; -const Body = ir.Body; const trace = @import("tracy.zig").trace; const Scope = Module.Scope; const InnerError = Module.InnerError; @@ -117,7 +115,7 @@ pub fn analyzeFnBody( /// Returns only the result from the body that is specified. /// Only appropriate to call when it is determined at comptime that this body /// has no peers. -fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Index { +fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Ref { const break_inst = try sema.analyzeBody(block, body); const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand; return sema.resolveInst(operand_ref); @@ -513,7 +511,7 @@ pub fn analyzeBody( // const break_inst = try sema.analyzeBody(block, inline_body); // const break_data = datas[break_inst].@"break"; // if (inst == break_data.block_inst) { - // break :blk try sema.resolveInst(break_data.operand); + // break :blk sema.resolveInst(break_data.operand); // } else { // return break_inst; // } @@ -529,12 +527,12 @@ pub fn analyzeBody( // const break_inst = try sema.analyzeBody(block, inline_body); // const break_data = datas[break_inst].@"break"; // if (inst == break_data.block_inst) { - // break :blk try sema.resolveInst(break_data.operand); + // break :blk sema.resolveInst(break_data.operand); // } else { // return break_inst; // } //}, - else => @panic("TODO remove else prong"), + else => @panic("TODO finish updating Sema for AIR memory layout changes and then remove this else prong"), }; if (sema.getAirType(air_inst).isNoReturn()) return always_noreturn; @@ -543,7 +541,7 @@ pub fn analyzeBody( } } -fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const extended = sema.code.instructions.items(.data)[inst].extended; switch (extended.opcode) { // zig fmt: off @@ -598,7 +596,7 @@ fn resolveConstBool( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) !bool { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const wanted_type = Type.initTag(.bool); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); @@ -611,7 +609,7 @@ fn resolveConstString( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) ![]u8 { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const wanted_type = Type.initTag(.const_slice_u8); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); @@ -619,24 +617,39 @@ fn resolveConstString( } pub fn resolveType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); return sema.resolveAirAsType(block, src, air_inst); } -fn resolveAirAsType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, air_inst: Air.Inst.Index) !Type { +fn resolveAirAsType( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + air_inst: Air.Inst.Ref, +) !Type { const wanted_type = Type.initTag(.@"type"); const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced_inst); return val.toType(sema.arena); } -fn resolveConstValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !Value { - return (try sema.resolveDefinedValue(block, src, base)) orelse +fn resolveConstValue( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + air_ref: Air.Inst.Ref, +) !Value { + return (try sema.resolveDefinedValue(block, src, air_ref)) orelse return sema.failWithNeededComptime(block, src); } -fn resolveDefinedValue(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, base: Air.Inst.Index) !?Value { - if (try sema.resolvePossiblyUndefinedValue(block, src, base)) |val| { +fn resolveDefinedValue( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + air_ref: Air.Inst.Ref, +) !?Value { + if (try sema.resolvePossiblyUndefinedValue(block, src, air_ref)) |val| { if (val.isUndef()) { return sema.failWithUseOfUndef(block, src); } @@ -649,13 +662,29 @@ fn resolvePossiblyUndefinedValue( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - base: Air.Inst.Index, + air_ref: Air.Inst.Ref, ) !?Value { - if (try sema.typeHasOnePossibleValue(block, src, base.ty)) |opv| { + const ty = sema.getTypeOfAirRef(air_ref); + if (try sema.typeHasOnePossibleValue(block, src, ty)) |opv| { return opv; } - const inst = base.castTag(.constant) orelse return null; - return inst.val; + // First section of indexes correspond to a set number of constant values. + var i: usize = @enumToInt(air_ref); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val; + } + i -= Air.Inst.Ref.typed_value_map.len; + + switch (sema.air_instructions.items(.tag)[i]) { + .constant => { + const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; + return sema.air_values.items[ty_pl.payload]; + }, + .const_ty => { + return sema.air_instructions.items(.data)[i].ty.toValue(undefined) catch unreachable; + }, + else => return null, + } } fn failWithNeededComptime(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) InnerError { @@ -677,7 +706,7 @@ fn resolveAlreadyCoercedInt( comptime Int: type, ) !Int { comptime assert(@typeInfo(Int).Int.bits <= 64); - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const val = try sema.resolveConstValue(block, src, air_inst); switch (@typeInfo(Int).Int.signedness) { .signed => return @intCast(Int, val.toSignedInt()), @@ -692,7 +721,7 @@ fn resolveInt( zir_ref: Zir.Inst.Ref, dest_type: Type, ) !u64 { - const air_inst = try sema.resolveInst(zir_ref); + const air_inst = sema.resolveInst(zir_ref); const coerced = try sema.coerce(block, dest_type, air_inst, src); const val = try sema.resolveConstValue(block, src, coerced); @@ -705,21 +734,21 @@ pub fn resolveInstConst( src: LazySrcLoc, zir_ref: Zir.Inst.Ref, ) InnerError!TypedValue { - const air_inst = try sema.resolveInst(zir_ref); - const val = try sema.resolveConstValue(block, src, air_inst); + const air_ref = sema.resolveInst(zir_ref); + const val = try sema.resolveConstValue(block, src, air_ref); return TypedValue{ - .ty = air_inst.ty, + .ty = sema.getTypeOfAirRef(air_ref), .val = val, }; } -fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO implement zir_sema.zirBitcastResultPtr", .{}); } -fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = inst; const tracy = trace(@src()); defer tracy.end(); @@ -754,7 +783,7 @@ fn zirStructDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); @@ -825,7 +854,7 @@ fn zirEnumDecl( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1022,7 +1051,7 @@ fn zirUnionDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1086,7 +1115,7 @@ fn zirOpaqueDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1106,7 +1135,7 @@ fn zirErrorSetDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1146,7 +1175,7 @@ fn zirRetPtr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1154,16 +1183,16 @@ fn zirRetPtr( try sema.requireFunctionBlock(block, src); const fn_ty = sema.func.?.owner_decl.ty; const ret_type = fn_ty.fnReturnType(); - const ptr_type = try sema.mod.simplePtrType(sema.arena, ret_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, ret_type, true, .One); return block.addNoOp(src, ptr_type, .alloc); } -fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_tok; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.analyzeRef(block, inst_data.src(), operand); } @@ -1171,7 +1200,7 @@ fn zirRetType( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1187,7 +1216,7 @@ fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.ensureResultUsed(block, operand, src); @@ -1196,7 +1225,7 @@ fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) I fn ensureResultUsed( sema: *Sema, block: *Scope.Block, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, src: LazySrcLoc, ) InnerError!void { switch (operand.ty.zigTypeTag()) { @@ -1210,7 +1239,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); switch (operand.ty.zigTypeTag()) { .ErrorSet, .ErrorUnion => return sema.mod.fail(&block.base, src, "error is discarded", .{}), @@ -1218,13 +1247,13 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde } } -fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const array_ptr = try sema.resolveInst(inst_data.operand); + const array_ptr = sema.resolveInst(inst_data.operand); const elem_ty = array_ptr.ty.elemType(); if (!elem_ty.isIndexable()) { @@ -1267,7 +1296,7 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air // Set the name of the Air.Arg instruction for use by codegen debug info. const air_arg = sema.param_inst_list[arg_index]; - sema.air_instructions.items(.data)[air_arg].ty_str.str = inst_data.start; + sema.air_instructions.items(.data)[refToIndex(air_arg).?].ty_str.str = inst_data.start; return air_arg; } @@ -1275,13 +1304,13 @@ fn zirAllocExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended", .{}); } -fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1289,7 +1318,7 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_type = try sema.resolveType(block, ty_src, inst_data.operand); - const ptr_type = try sema.mod.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); const val_payload = try sema.arena.create(Value.Payload.ComptimeAlloc); val_payload.* = .{ @@ -1304,13 +1333,13 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne }); } -fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocInferredComptime", .{}); } -fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1318,12 +1347,12 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!A const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_decl_src = inst_data.src(); const var_type = try sema.resolveType(block, ty_src, inst_data.operand); - const ptr_type = try sema.mod.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); try sema.requireRuntimeBlock(block, var_decl_src); return block.addNoOp(var_decl_src, ptr_type, .alloc); } -fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1332,7 +1361,7 @@ fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_type = try sema.resolveType(block, ty_src, inst_data.operand); try sema.validateVarType(block, ty_src, var_type); - const ptr_type = try sema.mod.simplePtrType(sema.arena, var_type, true, .One); + const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); try sema.requireRuntimeBlock(block, var_decl_src); return block.addNoOp(var_decl_src, ptr_type, .alloc); } @@ -1342,7 +1371,7 @@ fn zirAllocInferred( block: *Scope.Block, inst: Zir.Inst.Index, inferred_alloc_ty: Type, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1372,7 +1401,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); const ptr_val = ptr.castTag(.constant).?.val; const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; const peer_inst_list = inferred_alloc.data.stored_inst_list.items; @@ -1385,7 +1414,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde if (var_is_mut) { try sema.validateVarType(block, ty_src, final_elem_ty); } - const final_ptr_ty = try sema.mod.simplePtrType(sema.arena, final_elem_ty, true, .One); + const final_ptr_ty = try Module.simplePtrType(sema.arena, final_elem_ty, true, .One); // Change it to a normal alloc. ptr.ty = final_ptr_ty; @@ -1406,7 +1435,7 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Ind const struct_obj: *Module.Struct = s: { const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; - const object_ptr = try sema.resolveInst(field_ptr_extra.lhs); + const object_ptr = sema.resolveInst(field_ptr_extra.lhs); break :s object_ptr.ty.elemType().castTag(.@"struct").?.data; }; @@ -1535,9 +1564,9 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In // to omit it. return; } - const ptr = try sema.resolveInst(bin_inst.lhs); - const value = try sema.resolveInst(bin_inst.rhs); - const ptr_ty = try sema.mod.simplePtrType(sema.arena, value.ty, true, .One); + const ptr = sema.resolveInst(bin_inst.lhs); + const value = sema.resolveInst(bin_inst.rhs); + const ptr_ty = try Module.simplePtrType(sema.arena, value.ty, true, .One); // TODO detect when this store should be done at compile-time. For example, // if expressions should force it when the condition is compile-time known. const src: LazySrcLoc = .unneeded; @@ -1552,14 +1581,14 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) const src: LazySrcLoc = .unneeded; const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const ptr = try sema.resolveInst(bin_inst.lhs); - const value = try sema.resolveInst(bin_inst.rhs); + const ptr = sema.resolveInst(bin_inst.lhs); + const value = sema.resolveInst(bin_inst.rhs); const inferred_alloc = ptr.castTag(.constant).?.val.castTag(.inferred_alloc).?; // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. try inferred_alloc.data.stored_inst_list.append(sema.arena, value); // Create a runtime bitcast instruction with exactly the type the pointer wants. - const ptr_ty = try sema.mod.simplePtrType(sema.arena, value.ty, true, .One); + const ptr_ty = try Module.simplePtrType(sema.arena, value.ty, true, .One); try sema.requireRuntimeBlock(block, src); const bitcasted_ptr = try block.addUnOp(src, ptr_ty, .bitcast, ptr); return sema.storePtr(block, src, bitcasted_ptr, value); @@ -1578,8 +1607,8 @@ fn zirStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!v defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const ptr = try sema.resolveInst(bin_inst.lhs); - const value = try sema.resolveInst(bin_inst.rhs); + const ptr = sema.resolveInst(bin_inst.lhs); + const value = sema.resolveInst(bin_inst.rhs); return sema.storePtr(block, sema.src, ptr, value); } @@ -1590,18 +1619,18 @@ fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const ptr = try sema.resolveInst(extra.lhs); - const value = try sema.resolveInst(extra.rhs); + const ptr = sema.resolveInst(extra.lhs); + const value = sema.resolveInst(extra.rhs); return sema.storePtr(block, src, ptr, value); } -fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .unneeded; const inst_data = sema.code.instructions.items(.data)[inst].param_type; - const fn_inst = try sema.resolveInst(inst_data.callee); + const fn_inst = sema.resolveInst(inst_data.callee); const param_index = inst_data.param_index; const fn_ty: Type = switch (fn_inst.ty.zigTypeTag()) { @@ -1631,7 +1660,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, src, param_type); } -fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1659,7 +1688,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.analyzeDeclRef(block, .unneeded, new_decl); } -fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1668,7 +1697,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.mod.constIntUnsigned(sema.arena, .unneeded, Type.initTag(.comptime_int), int); } -fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1686,7 +1715,7 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! }); } -fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].float; @@ -1699,7 +1728,7 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!A }); } -fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -1728,7 +1757,7 @@ fn zirCompileLog( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { var managed = sema.mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); @@ -1741,7 +1770,7 @@ fn zirCompileLog( for (args) |arg_ref, i| { if (i != 0) try writer.print(", ", .{}); - const arg = try sema.resolveInst(arg_ref); + const arg = sema.resolveInst(arg_ref); if (try sema.resolvePossiblyUndefinedValue(block, src, arg)) |val| { try writer.print("@as({}, {})", .{ arg.ty, val }); } else { @@ -1773,12 +1802,12 @@ fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); - const msg_inst = try sema.resolveInst(inst_data.operand); + const msg_inst = sema.resolveInst(inst_data.operand); return sema.panicWithMsg(block, src, msg_inst); } -fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1843,7 +1872,7 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } -fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1853,13 +1882,13 @@ fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirCImport", .{}); } -fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirSuspendBlock", .{}); } -fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1917,7 +1946,7 @@ fn resolveBlockBody( child_block: *Scope.Block, body: []const Zir.Inst.Index, merges: *Scope.Block.Merges, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { _ = try sema.analyzeBody(child_block, body); return sema.analyzeBlockBody(parent_block, src, child_block, merges); } @@ -1928,7 +1957,7 @@ fn analyzeBlockBody( src: LazySrcLoc, child_block: *Scope.Block, merges: *Scope.Block.Merges, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2088,7 +2117,7 @@ fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) InnerE const inst_data = sema.code.instructions.items(.data)[inst].@"break"; const src = sema.src; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const zir_block = inst_data.block_inst; var block = start_block; @@ -2136,7 +2165,7 @@ fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError _ = try block.addDbgStmt(.unneeded, inst_data.line, inst_data.column); } -fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2144,7 +2173,7 @@ fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeDeclRef(block, src, decl); } -fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2198,7 +2227,7 @@ fn zirCall( inst: Zir.Inst.Index, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2208,12 +2237,12 @@ fn zirCall( const extra = sema.code.extraData(Zir.Inst.Call, inst_data.payload_index); const args = sema.code.refSlice(extra.end, extra.data.args_len); - const func = try sema.resolveInst(extra.data.callee); + const func = sema.resolveInst(extra.data.callee); // TODO handle function calls of generic functions - const resolved_args = try sema.arena.alloc(Air.Inst.Index, args.len); + const resolved_args = try sema.arena.alloc(Air.Inst.Ref, args.len); for (args) |zir_arg, i| { // the args are already casted to the result of a param type instruction. - resolved_args[i] = try sema.resolveInst(zir_arg); + resolved_args[i] = sema.resolveInst(zir_arg); } return sema.analyzeCall(block, func, func_src, call_src, modifier, ensure_result_used, resolved_args); @@ -2222,13 +2251,13 @@ fn zirCall( fn analyzeCall( sema: *Sema, block: *Scope.Block, - func: Air.Inst.Index, + func: Air.Inst.Ref, func_src: LazySrcLoc, call_src: LazySrcLoc, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, - args: []const Air.Inst.Index, -) InnerError!Air.Inst.Index { + args: []const Air.Inst.Ref, +) InnerError!Air.Inst.Ref { if (func.ty.zigTypeTag() != .Fn) return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty}); @@ -2285,7 +2314,7 @@ fn analyzeCall( const is_comptime_call = block.is_comptime or modifier == .compile_time; const is_inline_call = is_comptime_call or modifier == .always_inline or func.ty.fnCallingConvention() == .Inline; - const result: Air.Inst.Index = if (is_inline_call) res: { + const result: Air.Inst.Ref = if (is_inline_call) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { .function => func_val.castTag(.function).?.data, @@ -2383,7 +2412,7 @@ fn analyzeCall( return result; } -fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2395,7 +2424,7 @@ fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2407,7 +2436,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, opt_type); } -fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const array_type = try sema.resolveType(block, src, inst_data.operand); @@ -2415,7 +2444,7 @@ fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.constType(sema.arena, src, elem_type); } -fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -2430,7 +2459,7 @@ fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.mod.constType(sema.arena, src, vector_type); } -fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2443,7 +2472,7 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2458,7 +2487,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) return sema.mod.constType(sema.arena, .unneeded, array_ty); } -fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2471,7 +2500,7 @@ fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.mod.constType(sema.arena, src, anyframe_type); } -fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2492,7 +2521,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.constType(sema.arena, src, err_union_ty); } -fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2511,14 +2540,14 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr }); } -fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const op = try sema.resolveInst(inst_data.operand); + const op = sema.resolveInst(inst_data.operand); const op_coerced = try sema.coerce(block, Type.initTag(.anyerror), op, operand_src); const result_ty = Type.initTag(.u16); @@ -2541,7 +2570,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, result_ty, .bitcast, op_coerced); } -fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2549,7 +2578,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const op = try sema.resolveInst(inst_data.operand); + const op = sema.resolveInst(inst_data.operand); if (try sema.resolveDefinedValue(block, operand_src, op)) |value| { const int = value.toUnsignedInt(); @@ -2574,7 +2603,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addUnOp(src, Type.initTag(.anyerror), .bitcast, op); } -fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2583,8 +2612,8 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); if (rhs.ty.zigTypeTag() == .Bool and lhs.ty.zigTypeTag() == .Bool) { const msg = msg: { const msg = try sema.mod.errMsg(&block.base, lhs_src, "expected error set type, found 'bool'", .{}); @@ -2664,7 +2693,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn }); } -fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2678,15 +2707,15 @@ fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE }); } -fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); - const enum_tag: Air.Inst.Index = switch (operand.ty.zigTypeTag()) { + const enum_tag: Air.Inst.Ref = switch (operand.ty.zigTypeTag()) { .Enum => operand, .Union => { //if (!operand.ty.unionHasTag()) { @@ -2760,7 +2789,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return block.addUnOp(src, int_tag_ty, .bitcast, enum_tag); } -fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const mod = sema.mod; const target = mod.getTarget(); const arena = sema.arena; @@ -2770,7 +2799,7 @@ fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const dest_ty = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); + const operand = sema.resolveInst(extra.rhs); if (dest_ty.zigTypeTag() != .Enum) { return mod.fail(&block.base, dest_ty_src, "expected enum, found {}", .{dest_ty}); @@ -2821,12 +2850,12 @@ fn zirOptionalPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const optional_ptr = try sema.resolveInst(inst_data.operand); + const optional_ptr = sema.resolveInst(inst_data.operand); assert(optional_ptr.ty.zigTypeTag() == .Pointer); const src = inst_data.src(); @@ -2836,7 +2865,7 @@ fn zirOptionalPayloadPtr( } const child_type = try opt_type.optionalChildAlloc(sema.arena); - const child_pointer = try sema.mod.simplePtrType(sema.arena, child_type, !optional_ptr.ty.isConstPtr(), .One); + const child_pointer = try Module.simplePtrType(sema.arena, child_type, !optional_ptr.ty.isConstPtr(), .One); if (optional_ptr.value()) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); @@ -2864,13 +2893,13 @@ fn zirOptionalPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const opt_type = operand.ty; if (opt_type.zigTypeTag() != .Optional) { return sema.mod.fail(&block.base, src, "expected optional type, found {}", .{opt_type}); @@ -2902,13 +2931,13 @@ fn zirErrUnionPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, operand.src, "expected error union type, found '{}'", .{operand.ty}); @@ -2936,19 +2965,19 @@ fn zirErrUnionPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); assert(operand.ty.zigTypeTag() == .Pointer); if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found {}", .{operand.ty.elemType()}); - const operand_pointer_ty = try sema.mod.simplePtrType(sema.arena, operand.ty.elemType().castTag(.error_union).?.data.payload, !operand.ty.isConstPtr(), .One); + const operand_pointer_ty = try Module.simplePtrType(sema.arena, operand.ty.elemType().castTag(.error_union).?.data.payload, !operand.ty.isConstPtr(), .One); if (operand.value()) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); @@ -2975,13 +3004,13 @@ fn zirErrUnionPayloadPtr( } /// Value in, value out -fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand.ty}); @@ -3001,13 +3030,13 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner } /// Pointer in, value out -fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); assert(operand.ty.zigTypeTag() == .Pointer); if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) @@ -3035,7 +3064,7 @@ fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand.ty}); if (operand.ty.castTag(.error_union).?.data.payload.zigTypeTag() != .Void) { @@ -3048,7 +3077,7 @@ fn zirFunc( block: *Scope.Block, inst: Zir.Inst.Index, inferred_error_set: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3099,7 +3128,7 @@ fn funcCommon( is_extern: bool, src_locs: Zir.Inst.Func.SrcLocs, opt_lib_name: ?[]const u8, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const bare_return_type = try sema.resolveType(block, ret_ty_src, zir_return_type); @@ -3240,7 +3269,7 @@ fn funcCommon( return result; } -fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3248,7 +3277,7 @@ fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air. return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs); } -fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3264,18 +3293,18 @@ fn analyzeAs( src: LazySrcLoc, zir_dest_type: Zir.Inst.Ref, zir_operand: Zir.Inst.Ref, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const dest_type = try sema.resolveType(block, src, zir_dest_type); - const operand = try sema.resolveInst(zir_operand); + const operand = sema.resolveInst(zir_operand); return sema.coerce(block, dest_type, operand, src); } -fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); if (ptr.ty.zigTypeTag() != .Pointer) { const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}); @@ -3287,7 +3316,7 @@ fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, ty, .ptrtoint, ptr); } -fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3296,7 +3325,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); - const object = try sema.resolveInst(extra.lhs); + const object = sema.resolveInst(extra.lhs); const object_ptr = if (object.ty.zigTypeTag() == .Pointer) object else @@ -3305,7 +3334,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3314,11 +3343,11 @@ fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const field_name_src: LazySrcLoc = .{ .node_offset_field_name = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); - const object_ptr = try sema.resolveInst(extra.lhs); + const object_ptr = sema.resolveInst(extra.lhs); return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3326,14 +3355,14 @@ fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; - const object = try sema.resolveInst(extra.lhs); + const object = sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); const object_ptr = try sema.analyzeRef(block, src, object); const result_ptr = try sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3341,12 +3370,12 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const field_name_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.FieldNamed, inst_data.payload_index).data; - const object_ptr = try sema.resolveInst(extra.lhs); + const object_ptr = sema.resolveInst(extra.lhs); const field_name = try sema.resolveConstString(block, field_name_src, extra.field_name); return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3357,7 +3386,7 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); + const operand = sema.resolveInst(extra.rhs); const dest_is_comptime_int = switch (dest_type.zigTypeTag()) { .ComptimeInt => true, @@ -3389,20 +3418,21 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten int", .{}); } -fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const dest_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; + const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); - return sema.bitcast(block, dest_type, operand); + const operand = sema.resolveInst(extra.rhs); + return sema.bitcast(block, dest_type, operand, operand_src); } -fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3413,7 +3443,7 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const dest_type = try sema.resolveType(block, dest_ty_src, extra.lhs); - const operand = try sema.resolveInst(extra.rhs); + const operand = sema.resolveInst(extra.rhs); const dest_is_comptime_float = switch (dest_type.zigTypeTag()) { .ComptimeFloat => true, @@ -3445,22 +3475,22 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten float", .{}); } -fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const array = try sema.resolveInst(bin_inst.lhs); + const array = sema.resolveInst(bin_inst.lhs); const array_ptr = if (array.ty.zigTypeTag() == .Pointer) array else try sema.analyzeRef(block, sema.src, array); - const elem_index = try sema.resolveInst(bin_inst.rhs); + const elem_index = sema.resolveInst(bin_inst.rhs); const result_ptr = try sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); return sema.analyzeLoad(block, sema.src, result_ptr, sema.src); } -fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3468,27 +3498,27 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE const src = inst_data.src(); const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const array = try sema.resolveInst(extra.lhs); + const array = sema.resolveInst(extra.lhs); const array_ptr = if (array.ty.zigTypeTag() == .Pointer) array else try sema.analyzeRef(block, src, array); - const elem_index = try sema.resolveInst(extra.rhs); + const elem_index = sema.resolveInst(extra.rhs); const result_ptr = try sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const array_ptr = try sema.resolveInst(bin_inst.lhs); - const elem_index = try sema.resolveInst(bin_inst.rhs); + const array_ptr = sema.resolveInst(bin_inst.lhs); + const elem_index = sema.resolveInst(bin_inst.rhs); return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); } -fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3496,39 +3526,39 @@ fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE const src = inst_data.src(); const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const elem_index = try sema.resolveInst(extra.rhs); + const array_ptr = sema.resolveInst(extra.lhs); + const elem_index = sema.resolveInst(extra.rhs); return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); } -fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.SliceStart, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const start = try sema.resolveInst(extra.start); + const array_ptr = sema.resolveInst(extra.lhs); + const start = sema.resolveInst(extra.start); return sema.analyzeSlice(block, src, array_ptr, start, null, null, .unneeded); } -fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.SliceEnd, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const start = try sema.resolveInst(extra.start); - const end = try sema.resolveInst(extra.end); + const array_ptr = sema.resolveInst(extra.lhs); + const start = sema.resolveInst(extra.start); + const end = sema.resolveInst(extra.end); return sema.analyzeSlice(block, src, array_ptr, start, end, null, .unneeded); } -fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3536,10 +3566,10 @@ fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne const src = inst_data.src(); const sentinel_src: LazySrcLoc = .{ .node_offset_slice_sentinel = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SliceSentinel, inst_data.payload_index).data; - const array_ptr = try sema.resolveInst(extra.lhs); - const start = try sema.resolveInst(extra.start); - const end = try sema.resolveInst(extra.end); - const sentinel = try sema.resolveInst(extra.sentinel); + const array_ptr = sema.resolveInst(extra.lhs); + const start = sema.resolveInst(extra.start); + const end = sema.resolveInst(extra.end); + const sentinel = sema.resolveInst(extra.sentinel); return sema.analyzeSlice(block, src, array_ptr, start, end, sentinel, sentinel_src); } @@ -3550,7 +3580,7 @@ fn zirSwitchCapture( inst: Zir.Inst.Index, is_multi: bool, is_ref: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3569,7 +3599,7 @@ fn zirSwitchCaptureElse( block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3588,7 +3618,7 @@ fn zirSwitchBlock( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3597,7 +3627,7 @@ fn zirSwitchBlock( const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SwitchBlock, inst_data.payload_index); - const operand_ptr = try sema.resolveInst(extra.data.operand); + const operand_ptr = sema.resolveInst(extra.data.operand); const operand = if (is_ref) try sema.analyzeLoad(block, src, operand_ptr, operand_src) else @@ -3621,7 +3651,7 @@ fn zirSwitchBlockMulti( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3630,7 +3660,7 @@ fn zirSwitchBlockMulti( const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.SwitchBlockMulti, inst_data.payload_index); - const operand_ptr = try sema.resolveInst(extra.data.operand); + const operand_ptr = sema.resolveInst(extra.data.operand); const operand = if (is_ref) try sema.analyzeLoad(block, src, operand_ptr, operand_src) else @@ -3651,14 +3681,14 @@ fn zirSwitchBlockMulti( fn analyzeSwitch( sema: *Sema, block: *Scope.Block, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, extra_end: usize, special_prong: Zir.SpecialProng, scalar_cases_len: usize, multi_cases_len: usize, switch_inst: Zir.Inst.Index, src_node_offset: i32, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const gpa = sema.gpa; const mod = sema.mod; @@ -4217,7 +4247,7 @@ fn analyzeSwitch( const bool_ty = comptime Type.initTag(.bool); for (items) |item_ref| { - const item = try sema.resolveInst(item_ref); + const item = sema.resolveInst(item_ref); _ = try sema.resolveConstValue(&child_block, item.src, item); const cmp_ok = try case_block.addBinOp(item.src, bool_ty, .cmp_eq, operand, item); @@ -4235,8 +4265,8 @@ fn analyzeSwitch( const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); extra_index += 1; - const item_first = try sema.resolveInst(first_ref); - const item_last = try sema.resolveInst(last_ref); + const item_first = sema.resolveInst(first_ref); + const item_last = sema.resolveInst(last_ref); _ = try sema.resolveConstValue(&child_block, item_first.src, item_first); _ = try sema.resolveConstValue(&child_block, item_last.src, item_last); @@ -4334,7 +4364,7 @@ fn resolveSwitchItemVal( switch_prong_src: Module.SwitchProngSrc, range_expand: Module.SwitchProngSrc.RangeExpand, ) InnerError!TypedValue { - const item = try sema.resolveInst(item_ref); + const item = sema.resolveInst(item_ref); // We have to avoid the other helper functions here because we cannot construct a LazySrcLoc // because we only have the switch AST node. Only if we know for sure we need to report // a compile error do we resolve the full source locations. @@ -4513,7 +4543,7 @@ fn validateSwitchNoRange( return sema.mod.failWithOwnedErrorMsg(&block.base, msg); } -fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; _ = extra; @@ -4522,7 +4552,7 @@ fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, src, "TODO implement zirHasField", .{}); } -fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -4547,7 +4577,7 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return mod.constBool(arena, src, false); } -fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4572,13 +4602,13 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return mod.constType(sema.arena, src, file_root_decl.ty); } -fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; _ = inst; return sema.mod.fail(&block.base, sema.src, "TODO implement zirRetErrValueCode", .{}); } -fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4587,7 +4617,7 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{}); } -fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4599,8 +4629,8 @@ fn zirBitwise( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, - ir_tag: ir.Inst.Tag, -) InnerError!Air.Inst.Index { + air_tag: Air.Inst.Tag, +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4609,8 +4639,8 @@ fn zirBitwise( const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); @@ -4655,10 +4685,10 @@ fn zirBitwise( } try sema.requireRuntimeBlock(block, src); - return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); + return block.addBinOp(src, scalar_type, air_tag, casted_lhs, casted_rhs); } -fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4666,7 +4696,7 @@ fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{}); } -fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4674,7 +4704,7 @@ fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{}); } -fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4687,7 +4717,7 @@ fn zirNegate( block: *Scope.Block, inst: Zir.Inst.Index, tag_override: Zir.Inst.Tag, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4695,13 +4725,13 @@ fn zirNegate( const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const lhs = try sema.resolveInst(.zero); - const rhs = try sema.resolveInst(inst_data.operand); + const lhs = sema.resolveInst(.zero); + const rhs = sema.resolveInst(inst_data.operand); return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); } -fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4711,8 +4741,8 @@ fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); return sema.analyzeArithmetic(block, tag_override, lhs, rhs, sema.src, lhs_src, rhs_src); } @@ -4721,7 +4751,7 @@ fn zirOverflowArithmetic( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4735,12 +4765,12 @@ fn analyzeArithmetic( sema: *Sema, block: *Scope.Block, zir_tag: Zir.Inst.Tag, - lhs: Air.Inst.Index, - rhs: Air.Inst.Index, + lhs: Air.Inst.Ref, + rhs: Air.Inst.Ref, src: LazySrcLoc, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); @@ -4850,14 +4880,14 @@ fn analyzeArithmetic( return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); } -fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ptr_src: LazySrcLoc = .{ .node_offset_deref_ptr = inst_data.src_node }; - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); return sema.analyzeLoad(block, src, ptr, ptr_src); } @@ -4865,7 +4895,7 @@ fn zirAsm( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4915,7 +4945,7 @@ fn zirAsm( const name = sema.code.nullTerminatedString(input.data.name); _ = name; // TODO: use the name - arg.* = try sema.resolveInst(input.data.operand); + arg.* = sema.resolveInst(input.data.operand); inputs[arg_i] = sema.code.nullTerminatedString(input.data.constraint); } @@ -4949,7 +4979,7 @@ fn zirCmp( block: *Scope.Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4960,8 +4990,8 @@ fn zirCmp( const src: LazySrcLoc = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; - const lhs = try sema.resolveInst(extra.lhs); - const rhs = try sema.resolveInst(extra.rhs); + const lhs = sema.resolveInst(extra.lhs); + const rhs = sema.resolveInst(extra.rhs); const is_equality_cmp = switch (op) { .eq, .neq => true, @@ -5047,7 +5077,7 @@ fn zirCmp( return block.addBinOp(src, bool_type, tag, casted_lhs, casted_rhs); } -fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5057,7 +5087,7 @@ fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.constIntUnsigned(sema.arena, src, Type.initTag(.comptime_int), abi_size); } -fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5071,7 +5101,7 @@ fn zirThis( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirThis", .{}); } @@ -5080,7 +5110,7 @@ fn zirRetAddr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirRetAddr", .{}); } @@ -5089,12 +5119,12 @@ fn zirBuiltinSrc( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinSrc", .{}); } -fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -5137,31 +5167,31 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.mod.constType(sema.arena, src, operand.ty); } -fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand_ptr = try sema.resolveInst(inst_data.operand); + const operand_ptr = sema.resolveInst(inst_data.operand); const elem_ty = operand_ptr.ty.elemType(); return sema.mod.constType(sema.arena, src, elem_ty); } -fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirTypeofLog2IntType", .{}); } -fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirLog2IntType", .{}); @@ -5171,7 +5201,7 @@ fn zirTypeofPeer( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5183,20 +5213,20 @@ fn zirTypeofPeer( defer sema.gpa.free(inst_list); for (args) |arg_ref, i| { - inst_list[i] = try sema.resolveInst(arg_ref); + inst_list[i] = sema.resolveInst(arg_ref); } const result_type = try sema.resolvePeerTypes(block, src, inst_list); return sema.mod.constType(sema.arena, src, result_type); } -fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const uncasted_operand = try sema.resolveInst(inst_data.operand); + const uncasted_operand = sema.resolveInst(inst_data.operand); const bool_type = Type.initTag(.bool); const operand = try sema.coerce(block, bool_type, uncasted_operand, uncasted_operand.src); @@ -5212,16 +5242,16 @@ fn zirBoolOp( block: *Scope.Block, inst: Zir.Inst.Index, comptime is_bool_or: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const src: LazySrcLoc = .unneeded; const bool_type = Type.initTag(.bool); const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const uncasted_lhs = try sema.resolveInst(bin_inst.lhs); + const uncasted_lhs = sema.resolveInst(bin_inst.lhs); const lhs = try sema.coerce(block, bool_type, uncasted_lhs, uncasted_lhs.src); - const uncasted_rhs = try sema.resolveInst(bin_inst.rhs); + const uncasted_rhs = sema.resolveInst(bin_inst.rhs); const rhs = try sema.coerce(block, bool_type, uncasted_rhs, uncasted_rhs.src); if (lhs.value()) |lhs_val| { @@ -5234,7 +5264,7 @@ fn zirBoolOp( } } try sema.requireRuntimeBlock(block, src); - const tag: ir.Inst.Tag = if (is_bool_or) .bool_or else .bool_and; + const tag: Air.Inst.Tag = if (is_bool_or) .bool_or else .bool_and; return block.addBinOp(src, bool_type, tag, lhs, rhs); } @@ -5243,14 +5273,14 @@ fn zirBoolBr( parent_block: *Scope.Block, inst: Zir.Inst.Index, is_bool_or: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const datas = sema.code.instructions.items(.data); const inst_data = datas[inst].bool_br; const src: LazySrcLoc = .unneeded; - const lhs = try sema.resolveInst(inst_data.lhs); + const lhs = sema.resolveInst(inst_data.lhs); const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; @@ -5313,13 +5343,13 @@ fn zirIsNonNull( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.analyzeIsNull(block, src, operand, true); } @@ -5327,33 +5357,33 @@ fn zirIsNonNullPtr( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNull(block, src, loaded, true); } -fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); return sema.analyzeIsNonErr(block, inst_data.src(), operand); } -fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); - const ptr = try sema.resolveInst(inst_data.operand); + const ptr = sema.resolveInst(inst_data.operand); const loaded = try sema.analyzeLoad(block, src, ptr, src); return sema.analyzeIsNonErr(block, src, loaded); } @@ -5374,7 +5404,7 @@ fn zirCondbr( const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - const uncasted_cond = try sema.resolveInst(extra.data.condition); + const uncasted_cond = sema.resolveInst(extra.data.condition); const cond = try sema.coerce(parent_block, Type.initTag(.bool), uncasted_cond, cond_src); if (try sema.resolveDefinedValue(parent_block, src, cond)) |cond_val| { @@ -5456,7 +5486,7 @@ fn zirRetCoerce( defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_tok; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.analyzeRet(block, operand, src, need_coercion); @@ -5467,7 +5497,7 @@ fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const operand = try sema.resolveInst(inst_data.operand); + const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); return sema.analyzeRet(block, operand, src, false); @@ -5476,7 +5506,7 @@ fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError fn analyzeRet( sema: *Sema, block: *Scope.Block, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, src: LazySrcLoc, need_coercion: bool, ) InnerError!Zir.Inst.Index { @@ -5511,7 +5541,7 @@ fn floatOpAllowed(tag: Zir.Inst.Tag) bool { }; } -fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5532,7 +5562,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.mod.constType(sema.arena, .unneeded, ty); } -fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5586,7 +5616,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.constType(sema.arena, src, ty); } -fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5600,13 +5630,13 @@ fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In }); } -fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnionInitPtr", .{}); } -fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); @@ -5657,7 +5687,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.failWithOwnedErrorMsg(&block.base, msg); } found_fields[field_index] = item.data.field_type; - field_inits[field_index] = try sema.resolveInst(item.data.init); + field_inits[field_index] = sema.resolveInst(item.data.init); } var root_msg: ?*Module.ErrorMsg = null; @@ -5719,7 +5749,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.fail(&block.base, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); } -fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5727,7 +5757,7 @@ fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInitAnon", .{}); } -fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5735,7 +5765,7 @@ fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{}); } -fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Index { +fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5743,13 +5773,13 @@ fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_r return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInitAnon", .{}); } -fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldTypeRef", .{}); } -fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const src = inst_data.src(); @@ -5771,7 +5801,7 @@ fn zirErrorReturnTrace( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorReturnTrace", .{}); } @@ -5780,7 +5810,7 @@ fn zirFrame( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrame", .{}); } @@ -5789,91 +5819,91 @@ fn zirFrameAddress( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameAddress", .{}); } -fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignOf", .{}); } -fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBoolToInt", .{}); } -fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirEmbedFile", .{}); } -fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorName", .{}); } -fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnaryMath", .{}); } -fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTagName", .{}); } -fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReify", .{}); } -fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTypeName", .{}); } -fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameType", .{}); } -fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameSize", .{}); } -fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFloatToInt", .{}); } -fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToFloat", .{}); } -fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; - const operand_res = try sema.resolveInst(extra.rhs); + const operand_res = sema.resolveInst(extra.rhs); const operand_coerced = try sema.coerce(block, Type.initTag(.usize), operand_res, operand_src); const type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -5929,199 +5959,199 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(src, type_res, .bitcast, operand_coerced); } -fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrSetCast", .{}); } -fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPtrCast", .{}); } -fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTruncate", .{}); } -fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignCast", .{}); } -fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirClz", .{}); } -fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCtz", .{}); } -fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPopCount", .{}); } -fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirByteSwap", .{}); } -fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitReverse", .{}); } -fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivExact", .{}); } -fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivFloor", .{}); } -fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivTrunc", .{}); } -fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMod", .{}); } -fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirRem", .{}); } -fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShlExact", .{}); } -fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShrExact", .{}); } -fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitOffsetOf", .{}); } -fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{}); } -fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCmpxchg", .{}); } -fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirSplat", .{}); } -fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReduce", .{}); } -fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShuffle", .{}); } -fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicLoad", .{}); } -fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicRmw", .{}); } -fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicStore", .{}); } -fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMulAdd", .{}); } -fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinCall", .{}); } -fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldPtrType", .{}); } -fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldParentPtr", .{}); } -fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy", .{}); } -fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset", .{}); } -fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinAsyncCall", .{}); } -fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Index { +fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirResume", .{}); @@ -6132,7 +6162,7 @@ fn zirAwait( block: *Scope.Block, inst: Zir.Inst.Index, is_nosuspend: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -6144,7 +6174,7 @@ fn zirVarExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const src = sema.src; const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type @@ -6210,7 +6240,7 @@ fn zirFuncExtended( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -6277,7 +6307,7 @@ fn zirCUndef( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCUndef", .{}); @@ -6287,7 +6317,7 @@ fn zirCInclude( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCInclude", .{}); @@ -6297,7 +6327,7 @@ fn zirCDefine( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCDefine", .{}); @@ -6307,7 +6337,7 @@ fn zirWasmMemorySize( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemorySize", .{}); @@ -6317,7 +6347,7 @@ fn zirWasmMemoryGrow( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemoryGrow", .{}); @@ -6327,7 +6357,7 @@ fn zirBuiltinExtern( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinExtern", .{}); @@ -6361,7 +6391,7 @@ pub const PanicId = enum { invalid_error_code, }; -fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Index, panic_id: PanicId) !void { +fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Ref, panic_id: PanicId) !void { const block_inst = try sema.arena.create(Inst.Block); block_inst.* = .{ .base = .{ @@ -6423,7 +6453,7 @@ fn panicWithMsg( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - msg_inst: Air.Inst.Index, + msg_inst: Air.Inst.Ref, ) !Zir.Inst.Index { const mod = sema.mod; const arena = sema.arena; @@ -6439,7 +6469,7 @@ fn panicWithMsg( const panic_fn = try sema.getBuiltin(block, src, "panic"); const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); - const ptr_stack_trace_ty = try mod.simplePtrType(arena, stack_trace_ty, true, .One); + const ptr_stack_trace_ty = try Module.simplePtrType(arena, stack_trace_ty, true, .One); const null_stack_trace = try mod.constInst(arena, src, .{ .ty = try mod.optionalType(arena, ptr_stack_trace_ty), .val = Value.initTag(.null_value), @@ -6500,10 +6530,10 @@ fn namedFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - object_ptr: Air.Inst.Index, + object_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; @@ -6579,7 +6609,7 @@ fn namedFieldPtr( } else (try mod.getErrorValue(field_name)).key; return mod.constInst(arena, src, .{ - .ty = try mod.simplePtrType(arena, child_type, false, .One), + .ty = try Module.simplePtrType(arena, child_type, false, .One), .val = try Value.Tag.ref_val.create( arena, try Value.Tag.@"error".create(arena, .{ @@ -6633,7 +6663,7 @@ fn namedFieldPtr( const field_index_u32 = @intCast(u32, field_index); const enum_val = try Value.Tag.enum_field_index.create(arena, field_index_u32); return mod.constInst(arena, src, .{ - .ty = try mod.simplePtrType(arena, child_type, false, .One), + .ty = try Module.simplePtrType(arena, child_type, false, .One), .val = try Value.Tag.ref_val.create(arena, enum_val), }); }, @@ -6653,7 +6683,7 @@ fn analyzeNamespaceLookup( src: LazySrcLoc, namespace: *Scope.Namespace, decl_name: []const u8, -) InnerError!?Air.Inst.Index { +) InnerError!?Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; if (try sema.lookupInNamespace(namespace, decl_name)) |decl| { @@ -6677,11 +6707,11 @@ fn analyzeStructFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - struct_ptr: Air.Inst.Index, + struct_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); @@ -6692,7 +6722,7 @@ fn analyzeStructFieldPtr( const field_index = struct_obj.fields.getIndex(field_name) orelse return sema.failWithBadFieldAccess(block, struct_obj, field_name_src, field_name); const field = struct_obj.fields.values()[field_index]; - const ptr_field_ty = try mod.simplePtrType(arena, field.ty, true, .One); + const ptr_field_ty = try Module.simplePtrType(arena, field.ty, true, .One); if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { return mod.constInst(arena, src, .{ @@ -6712,11 +6742,11 @@ fn analyzeUnionFieldPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - union_ptr: Air.Inst.Index, + union_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, unresolved_union_ty: Type, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; assert(unresolved_union_ty.zigTypeTag() == .Union); @@ -6728,7 +6758,7 @@ fn analyzeUnionFieldPtr( return sema.failWithBadUnionFieldAccess(block, union_obj, field_name_src, field_name); const field = union_obj.fields.values()[field_index]; - const ptr_field_ty = try mod.simplePtrType(arena, field.ty, true, .One); + const ptr_field_ty = try Module.simplePtrType(arena, field.ty, true, .One); if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| { // TODO detect inactive union field and emit compile error @@ -6749,10 +6779,10 @@ fn elemPtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: Air.Inst.Index, - elem_index: Air.Inst.Index, + array_ptr: Air.Inst.Ref, + elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const array_ty = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, array_ptr.src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -6776,10 +6806,10 @@ fn elemPtrArray( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: Air.Inst.Index, - elem_index: Air.Inst.Index, + array_ptr: Air.Inst.Ref, + elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { if (array_ptr.value()) |array_ptr_val| { if (elem_index.value()) |index_val| { // Both array pointer and index are compile-time known. @@ -6804,35 +6834,41 @@ fn coerce( sema: *Sema, block: *Scope.Block, dest_type: Type, - inst: Air.Inst.Index, + inst: Air.Inst.Ref, inst_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { if (dest_type.tag() == .var_args_param) { - return sema.coerceVarArgParam(block, inst); + return sema.coerceVarArgParam(block, inst, inst_src); } + + const inst_ty = sema.getTypeOfAirRef(inst); // If the types are the same, we can return the operand. - if (dest_type.eql(inst.ty)) + if (dest_type.eql(inst_ty)) return inst; - const in_memory_result = coerceInMemoryAllowed(dest_type, inst.ty); + const in_memory_result = coerceInMemoryAllowed(dest_type, inst_ty); if (in_memory_result == .ok) { - return sema.bitcast(block, dest_type, inst); + return sema.bitcast(block, dest_type, inst, inst_src); } const mod = sema.mod; const arena = sema.arena; // undefined to anything - if (inst.value()) |val| { - if (val.isUndef() or inst.ty.zigTypeTag() == .Undefined) { - return mod.constInst(arena, inst_src, .{ .ty = dest_type, .val = val }); + if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { + if (val.isUndef() or inst_ty.zigTypeTag() == .Undefined) { + return sema.addConstant(dest_type, val); } } - assert(inst.ty.zigTypeTag() != .Undefined); + assert(inst_ty.zigTypeTag() != .Undefined); + + if (true) { + @panic("TODO finish AIR memory layout rework"); + } // T to E!T or E to E!T if (dest_type.tag() == .error_union) { - return try sema.wrapErrorUnion(block, dest_type, inst); + return try sema.wrapErrorUnion(block, dest_type, inst, inst_src); } // comptime known number to other number @@ -6844,14 +6880,14 @@ fn coerce( switch (dest_type.zigTypeTag()) { .Optional => { // null to ?T - if (inst.ty.zigTypeTag() == .Null) { + if (inst_ty.zigTypeTag() == .Null) { return mod.constInst(arena, inst_src, .{ .ty = dest_type, .val = Value.initTag(.null_value) }); } // T to ?T var buf: Type.Payload.ElemType = undefined; const child_type = dest_type.optionalChild(&buf); - if (child_type.eql(inst.ty)) { + if (child_type.eql(inst_ty)) { return sema.wrapOptional(block, dest_type, inst); } else if (try sema.coerceNum(block, child_type, inst)) |some| { return sema.wrapOptional(block, dest_type, some); @@ -6860,12 +6896,12 @@ fn coerce( .Pointer => { // Coercions where the source is a single pointer to an array. src_array_ptr: { - if (!inst.ty.isSinglePointer()) break :src_array_ptr; - const array_type = inst.ty.elemType(); + if (!inst_ty.isSinglePointer()) break :src_array_ptr; + const array_type = inst_ty.elemType(); if (array_type.zigTypeTag() != .Array) break :src_array_ptr; const array_elem_type = array_type.elemType(); - if (inst.ty.isConstPtr() and !dest_type.isConstPtr()) break :src_array_ptr; - if (inst.ty.isVolatilePtr() and !dest_type.isVolatilePtr()) break :src_array_ptr; + if (inst_ty.isConstPtr() and !dest_type.isConstPtr()) break :src_array_ptr; + if (inst_ty.isVolatilePtr() and !dest_type.isVolatilePtr()) break :src_array_ptr; const dst_elem_type = dest_type.elemType(); switch (coerceInMemoryAllowed(dst_elem_type, array_elem_type)) { @@ -6904,11 +6940,11 @@ fn coerce( }, .Int => { // integer widening - if (inst.ty.zigTypeTag() == .Int) { + if (inst_ty.zigTypeTag() == .Int) { assert(inst.value() == null); // handled above const dst_info = dest_type.intInfo(target); - const src_info = inst.ty.intInfo(target); + const src_info = inst_ty.intInfo(target); if ((src_info.signedness == dst_info.signedness and dst_info.bits >= src_info.bits) or // small enough unsigned ints can get casted to large enough signed ints (src_info.signedness == .signed and dst_info.signedness == .unsigned and dst_info.bits > src_info.bits)) @@ -6920,10 +6956,10 @@ fn coerce( }, .Float => { // float widening - if (inst.ty.zigTypeTag() == .Float) { + if (inst_ty.zigTypeTag() == .Float) { assert(inst.value() == null); // handled above - const src_bits = inst.ty.floatBits(target); + const src_bits = inst_ty.floatBits(target); const dst_bits = dest_type.floatBits(target); if (dst_bits >= src_bits) { try sema.requireRuntimeBlock(block, inst_src); @@ -6933,7 +6969,7 @@ fn coerce( }, .Enum => { // enum literal to enum - if (inst.ty.zigTypeTag() == .EnumLiteral) { + if (inst_ty.zigTypeTag() == .EnumLiteral) { const val = try sema.resolveConstValue(block, inst_src, inst); const bytes = val.castTag(.enum_literal).?.data; const resolved_dest_type = try sema.resolveTypeFields(block, inst_src, dest_type); @@ -6965,7 +7001,7 @@ fn coerce( else => {}, } - return mod.fail(&block.base, inst_src, "expected {}, found {}", .{ dest_type, inst.ty }); + return mod.fail(&block.base, inst_src, "expected {}, found {}", .{ dest_type, inst_ty }); } const InMemoryCoercionResult = enum { @@ -6982,7 +7018,7 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult return .no_match; } -fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) InnerError!?Air.Inst.Index { +fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) InnerError!?Air.Inst.Index { const val = inst.value() orelse return null; const src_zig_tag = inst.ty.zigTypeTag(); const dst_zig_tag = dest_type.zigTypeTag(); @@ -7020,9 +7056,15 @@ fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.I return null; } -fn coerceVarArgParam(sema: *Sema, block: *Scope.Block, inst: Air.Inst.Index) !Air.Inst.Index { - switch (inst.ty.zigTypeTag()) { - .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst.src, "integer and float literals in var args function must be casted", .{}), +fn coerceVarArgParam( + sema: *Sema, + block: *Scope.Block, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) !Air.Inst.Ref { + const inst_ty = sema.getTypeOfAirRef(inst); + switch (inst_ty.zigTypeTag()) { + .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst_src, "integer and float literals in var args function must be casted", .{}), else => {}, } // TODO implement more of this function. @@ -7033,8 +7075,8 @@ fn storePtr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: Air.Inst.Index, - uncasted_value: Air.Inst.Index, + ptr: Air.Inst.Ref, + uncasted_value: Air.Inst.Ref, ) !void { if (ptr.ty.isConstPtr()) return sema.mod.fail(&block.base, src, "cannot assign to constant", .{}); @@ -7082,17 +7124,23 @@ fn storePtr( _ = try block.addBinOp(src, Type.initTag(.void), .store, ptr, value); } -fn bitcast(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { - if (inst.value()) |val| { +fn bitcast( + sema: *Sema, + block: *Scope.Block, + dest_type: Type, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) InnerError!Air.Inst.Ref { + if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { // Keep the comptime Value representation; take the new type. - return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); + return sema.addConstant(dest_type, val); } // TODO validate the type size and other compile errors - try sema.requireRuntimeBlock(block, inst.src); - return block.addUnOp(inst.src, dest_type, .bitcast, inst); + try sema.requireRuntimeBlock(block, inst_src); + return block.addTyOp(.bitcast, dest_type, inst); } -fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) InnerError!Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7100,7 +7148,7 @@ fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{}); } -fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7108,12 +7156,12 @@ fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } -fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { +fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Ref { const decl_ref = try sema.analyzeDeclRef(block, src, decl); return sema.analyzeLoad(block, src, decl_ref, src); } -fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Index { +fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Ref { try sema.mod.declareDeclDependency(sema.owner_decl, decl); sema.mod.ensureDeclAnalyzed(decl) catch |err| { if (sema.func) |func| { @@ -7128,43 +7176,41 @@ fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl if (decl_tv.val.tag() == .variable) { return sema.analyzeVarRef(block, src, decl_tv); } - return sema.mod.constInst(sema.arena, src, .{ - .ty = try sema.mod.simplePtrType(sema.arena, decl_tv.ty, false, .One), - .val = try Value.Tag.decl_ref.create(sema.arena, decl), - }); + return sema.addConstant( + try Module.simplePtrType(sema.arena, decl_tv.ty, false, .One), + try Value.Tag.decl_ref.create(sema.arena, decl), + ); } -fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Index { +fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Ref { const variable = tv.val.castTag(.variable).?.data; - const ty = try sema.mod.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); + const ty = try Module.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); if (!variable.is_mutable and !variable.is_extern) { - return sema.mod.constInst(sema.arena, src, .{ - .ty = ty, - .val = try Value.Tag.ref_val.create(sema.arena, variable.init), - }); + return sema.addConstant(ty, try Value.Tag.ref_val.create(sema.arena, variable.init)); } + const gpa = sema.gpa; try sema.requireRuntimeBlock(block, src); - const inst = try sema.arena.create(Inst.VarPtr); - inst.* = .{ - .base = .{ - .tag = .varptr, - .ty = ty, - .src = src, - }, - .variable = variable, - }; - try block.instructions.append(sema.gpa, &inst.base); - return &inst.base; + try sema.air_variables.append(gpa, variable); + const result_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .varptr, + .data = .{ .ty_pl = .{ + .ty = try sema.addType(ty), + .payload = @intCast(u32, sema.air_variables.items.len - 1), + } }, + }); + try block.instructions.append(gpa, result_inst); + return indexToRef(result_inst); } fn analyzeRef( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: Air.Inst.Index, -) InnerError!Air.Inst.Index { + operand: Air.Inst.Ref, +) InnerError!Air.Inst.Ref { const ptr_type = try sema.mod.simplePtrType(sema.arena, operand.ty, false, .One); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |val| { @@ -7182,34 +7228,32 @@ fn analyzeLoad( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - ptr: Air.Inst.Index, + ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, -) InnerError!Air.Inst.Index { - const elem_ty = switch (ptr.ty.zigTypeTag()) { - .Pointer => ptr.ty.elemType(), - else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}), +) InnerError!Air.Inst.Ref { + const ptr_ty = sema.getTypeOfAirRef(ptr); + const elem_ty = switch (ptr_ty.zigTypeTag()) { + .Pointer => ptr_ty.elemType(), + else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr_ty}), }; if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| blk: { if (ptr_val.tag() == .int_u64) break :blk; // do it at runtime - return sema.mod.constInst(sema.arena, src, .{ - .ty = elem_ty, - .val = try ptr_val.pointerDeref(sema.arena), - }); + return sema.addConstant(elem_ty, try ptr_val.pointerDeref(sema.arena)); } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, elem_ty, .load, ptr); + return block.addTyOp(.load, elem_ty, ptr); } fn analyzeIsNull( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: Air.Inst.Index, + operand: Air.Inst.Ref, invert_logic: bool, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |opt_val| { if (opt_val.isUndef()) { @@ -7228,8 +7272,8 @@ fn analyzeIsNonErr( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - operand: Air.Inst.Index, -) InnerError!Air.Inst.Index { + operand: Air.Inst.Ref, +) InnerError!Air.Inst.Ref { const ot = operand.ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return sema.mod.constBool(sema.arena, src, true); if (ot == .ErrorSet) return sema.mod.constBool(sema.arena, src, false); @@ -7249,12 +7293,12 @@ fn analyzeSlice( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - array_ptr: Air.Inst.Index, - start: Air.Inst.Index, + array_ptr: Air.Inst.Ref, + start: Air.Inst.Ref, end_opt: ?Air.Inst.Index, sentinel_opt: ?Air.Inst.Index, sentinel_src: LazySrcLoc, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const ptr_child = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -7325,10 +7369,10 @@ fn cmpNumeric( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - lhs: Air.Inst.Index, - rhs: Air.Inst.Index, + lhs: Air.Inst.Ref, + rhs: Air.Inst.Ref, op: std.math.CompareOperator, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { assert(lhs.ty.isNumeric()); assert(rhs.ty.isNumeric()); @@ -7494,7 +7538,7 @@ fn cmpNumeric( return block.addBinOp(src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } -fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Index { if (inst.value()) |val| { return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); } @@ -7503,9 +7547,15 @@ fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Ins return block.addUnOp(inst.src, dest_type, .wrap_optional, inst); } -fn wrapErrorUnion(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Index) !Air.Inst.Index { +fn wrapErrorUnion( + sema: *Sema, + block: *Scope.Block, + dest_type: Type, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) !Air.Inst.Index { const err_union = dest_type.castTag(.error_union).?; - if (inst.value()) |val| { + if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { if (inst.ty.zigTypeTag() != .ErrorSet) { _ = try sema.coerce(block, err_union.data.payload, inst, inst.src); } else switch (err_union.data.error_set.tag()) { @@ -7710,7 +7760,7 @@ fn getBuiltin( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!Air.Inst.Index { +) InnerError!Air.Inst.Ref { const mod = sema.mod; const std_pkg = mod.root_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; @@ -7938,6 +7988,68 @@ fn enumFieldSrcLoc( } else unreachable; } +/// Returns the type of the AIR instruction. +fn getTypeOfAirRef(sema: *Sema, air_ref: Air.Inst.Ref) Type { + switch (air_ref) { + .none => unreachable, + .u8_type => return Type.initTag(.u8), + .i8_type => return Type.initTag(.i8), + .u16_type => return Type.initTag(.u16), + .i16_type => return Type.initTag(.i16), + .u32_type => return Type.initTag(.u32), + .i32_type => return Type.initTag(.i32), + .u64_type => return Type.initTag(.u64), + .i64_type => return Type.initTag(.i64), + .u128_type => return Type.initTag(.u128), + .i128_type => return Type.initTag(.i128), + .usize_type => return Type.initTag(.usize), + .isize_type => return Type.initTag(.isize), + .c_short_type => return Type.initTag(.c_short), + .c_ushort_type => return Type.initTag(.c_ushort), + .c_int_type => return Type.initTag(.c_int), + .c_uint_type => return Type.initTag(.c_uint), + .c_long_type => return Type.initTag(.c_long), + .c_ulong_type => return Type.initTag(.c_ulong), + .c_longlong_type => return Type.initTag(.c_longlong), + .c_ulonglong_type => return Type.initTag(.c_ulonglong), + .c_longdouble_type => return Type.initTag(.c_longdouble), + .f16_type => return Type.initTag(.f16), + .f32_type => return Type.initTag(.f32), + .f64_type => return Type.initTag(.f64), + .f128_type => return Type.initTag(.f128), + .c_void_type => return Type.initTag(.c_void), + .bool_type => return Type.initTag(.bool), + .void_type => return Type.initTag(.void), + .type_type => return Type.initTag(.type), + .anyerror_type => return Type.initTag(.anyerror), + .comptime_int_type => return Type.initTag(.comptime_int), + .comptime_float_type => return Type.initTag(.comptime_float), + .noreturn_type => return Type.initTag(.noreturn), + .anyframe_type => return Type.initTag(.@"anyframe"), + .null_type => return Type.initTag(.@"null"), + .undefined_type => return Type.initTag(.@"undefined"), + .enum_literal_type => return Type.initTag(.enum_literal), + .atomic_ordering_type => return Type.initTag(.atomic_ordering), + .atomic_rmw_op_type => return Type.initTag(.atomic_rmw_op), + .calling_convention_type => return Type.initTag(.calling_convention), + .float_mode_type => return Type.initTag(.float_mode), + .reduce_op_type => return Type.initTag(.reduce_op), + .call_options_type => return Type.initTag(.call_options), + .export_options_type => return Type.initTag(.export_options), + .extern_options_type => return Type.initTag(.extern_options), + .manyptr_u8_type => return Type.initTag(.manyptr_u8), + .manyptr_const_u8_type => return Type.initTag(.manyptr_const_u8), + .fn_noreturn_no_args_type => return Type.initTag(.fn_noreturn_no_args), + .fn_void_no_args_type => return Type.initTag(.fn_void_no_args), + .fn_naked_noreturn_no_args_type => return Type.initTag(.fn_naked_noreturn_no_args), + .fn_ccc_void_no_args_type => return Type.initTag(.fn_ccc_void_no_args), + .single_const_pointer_to_comptime_int_type => return Type.initTag(.single_const_pointer_to_comptime_int), + .const_slice_u8_type => return Type.initTag(.const_slice_u8), + else => return sema.getAirType(air_ref), + } +} + +/// Asserts the AIR instruction is a `const_ty` and returns the type. fn getAirType(sema: *Sema, air_ref: Air.Inst.Ref) Type { var i: usize = @enumToInt(air_ref); if (i < Air.Inst.Ref.typed_value_map.len) { @@ -8014,13 +8126,27 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } +pub fn addConstant(sema: *Sema, ty: Type, val: Value) InnerError!Air.Inst.Ref { + const gpa = sema.gpa; + const ty_inst = try sema.addType(ty); + try sema.air_values.append(gpa, val); + try sema.air_instructions.append(gpa, .{ + .tag = .constant, + .data = .{ .ty_pl = .{ + .ty = ty_inst, + .payload = @intCast(u32, sema.air_values.items.len - 1), + } }, + }); + return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); +} + const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; -fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { +pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { return @intToEnum(Air.Inst.Ref, ref_start_index + inst); } -fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { +pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { const ref_int = @enumToInt(inst); if (ref_int >= ref_start_index) { return ref_int - ref_start_index; diff --git a/src/codegen.zig b/src/codegen.zig index a6c4b5ad3c..c27a1444ef 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -494,7 +494,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { defer function.blocks.deinit(bin_file.allocator); defer function.exitlude_jump_relocs.deinit(bin_file.allocator); - var call_info = function.resolveCallingConventionValues(src_loc.lazy, fn_type) catch |err| switch (err) { + var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { error.CodegenFail => return Result{ .fail = function.err_msg.? }, else => |e| return e, }; @@ -537,7 +537,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.code.items.len += 4; try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); const stack_end = self.max_end_stack; if (stack_end > math.maxInt(i32)) @@ -578,7 +578,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }); } else { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); try self.dbgSetEpilogueBegin(); } }, @@ -758,11 +758,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } // TODO inline this logic into every instruction - var i: ir.Inst.DeathsBitIndex = 0; - while (inst.getOperand(i)) |operand| : (i += 1) { - if (inst.operandDies(i)) - self.processDeath(operand); - } + @panic("TODO rework AIR memory layout codegen for processing deaths"); + //var i: ir.Inst.DeathsBitIndex = 0; + //while (inst.getOperand(i)) |operand| : (i += 1) { + // if (inst.operandDies(i)) + // self.processDeath(operand); + //} } } @@ -858,74 +859,76 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const air_tags = self.air.instructions.items(.tag); switch (air_tags[inst]) { // zig fmt: off - .add => return self.genAdd(inst.castTag(.add).?), - .addwrap => return self.genAddWrap(inst.castTag(.addwrap).?), - .sub => return self.genSub(inst.castTag(.sub).?), - .subwrap => return self.genSubWrap(inst.castTag(.subwrap).?), - .mul => return self.genMul(inst.castTag(.mul).?), - .mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), - .div => return self.genDiv(inst.castTag(.div).?), + //.add => return self.genAdd(inst.castTag(.add).?), + //.addwrap => return self.genAddWrap(inst.castTag(.addwrap).?), + //.sub => return self.genSub(inst.castTag(.sub).?), + //.subwrap => return self.genSubWrap(inst.castTag(.subwrap).?), + //.mul => return self.genMul(inst.castTag(.mul).?), + //.mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), + //.div => return self.genDiv(inst.castTag(.div).?), - .cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), - .cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), - .cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), - .cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte), - .cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), - .cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq), + //.cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), + //.cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), + //.cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), + //.cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte), + //.cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), + //.cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq), - .bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), - .bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), - .bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), - .bit_or => return self.genBitOr(inst.castTag(.bit_or).?), - .xor => return self.genXor(inst.castTag(.xor).?), + //.bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), + //.bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), + //.bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), + //.bit_or => return self.genBitOr(inst.castTag(.bit_or).?), + //.xor => return self.genXor(inst.castTag(.xor).?), - .alloc => return self.genAlloc(inst.castTag(.alloc).?), - .arg => return self.genArg(inst.castTag(.arg).?), - .assembly => return self.genAsm(inst.castTag(.assembly).?), - .bitcast => return self.genBitCast(inst.castTag(.bitcast).?), - .block => return self.genBlock(inst.castTag(.block).?), - .br => return self.genBr(inst.castTag(.br).?), - .br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), - .breakpoint => return self.genBreakpoint(inst.src), - .call => return self.genCall(inst.castTag(.call).?), - .cond_br => return self.genCondBr(inst.castTag(.condbr).?), - .dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), - .floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), - .intcast => return self.genIntCast(inst.castTag(.intcast).?), - .is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), - .is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), - .is_null => return self.genIsNull(inst.castTag(.is_null).?), - .is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), - .is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), - .is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), - .is_err => return self.genIsErr(inst.castTag(.is_err).?), - .is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), - .load => return self.genLoad(inst.castTag(.load).?), - .loop => return self.genLoop(inst.castTag(.loop).?), - .not => return self.genNot(inst.castTag(.not).?), - .ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), - .ref => return self.genRef(inst.castTag(.ref).?), - .ret => return self.genRet(inst.castTag(.ret).?), - .store => return self.genStore(inst.castTag(.store).?), - .struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), - .switchbr => return self.genSwitch(inst.castTag(.switchbr).?), - .varptr => return self.genVarPtr(inst.castTag(.varptr).?), + //.alloc => return self.genAlloc(inst.castTag(.alloc).?), + //.arg => return self.genArg(inst.castTag(.arg).?), + //.assembly => return self.genAsm(inst.castTag(.assembly).?), + //.bitcast => return self.genBitCast(inst.castTag(.bitcast).?), + //.block => return self.genBlock(inst.castTag(.block).?), + //.br => return self.genBr(inst.castTag(.br).?), + //.br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), + //.breakpoint => return self.genBreakpoint(inst.src), + //.call => return self.genCall(inst.castTag(.call).?), + //.cond_br => return self.genCondBr(inst.castTag(.condbr).?), + //.dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), + //.floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), + //.intcast => return self.genIntCast(inst.castTag(.intcast).?), + //.is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), + //.is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), + //.is_null => return self.genIsNull(inst.castTag(.is_null).?), + //.is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), + //.is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), + //.is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), + //.is_err => return self.genIsErr(inst.castTag(.is_err).?), + //.is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), + //.load => return self.genLoad(inst.castTag(.load).?), + //.loop => return self.genLoop(inst.castTag(.loop).?), + //.not => return self.genNot(inst.castTag(.not).?), + //.ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), + //.ref => return self.genRef(inst.castTag(.ref).?), + //.ret => return self.genRet(inst.castTag(.ret).?), + //.store => return self.genStore(inst.castTag(.store).?), + //.struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), + //.switch_br => return self.genSwitch(inst.castTag(.switchbr).?), + //.varptr => return self.genVarPtr(inst.castTag(.varptr).?), - .constant => unreachable, // excluded from function bodies - .unreach => return MCValue{ .unreach = {} }, + //.constant => unreachable, // excluded from function bodies + //.unreach => return MCValue{ .unreach = {} }, - .optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), - .optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), - .unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), - .unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), - .unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), - .unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), + //.optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), + //.optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), + //.unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), + //.unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), + //.unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), + //.unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), - .wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), - .wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), - .wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), + //.wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), + //.wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), + //.wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), // zig fmt: on + + else => @panic("TODO finish air memory layout branch, more codegen.zig instructions"), } } @@ -4785,14 +4788,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }; } - fn fail(self: *Self, src: LazySrcLoc, comptime format: []const u8, args: anytype) InnerError { + fn fail(self: *Self, comptime format: []const u8, args: anytype) InnerError { @setCold(true); assert(self.err_msg == null); - const src_loc = if (src != .unneeded) - src.toSrcLocWithDecl(self.mod_fn.owner_decl) - else - self.src_loc; - self.err_msg = try ErrorMsg.create(self.bin_file.allocator, src_loc, format, args); + self.err_msg = try ErrorMsg.create(self.bin_file.allocator, self.src_loc, format, args); return error.CodegenFail; } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 4743494f35..0ee6972654 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -25,7 +25,7 @@ pub const CValue = union(enum) { /// Index into local_names, but take the address. local_ref: usize, /// A constant instruction, to be rendered inline. - constant: *Inst, + constant: Air.Inst.Index, /// Index into the parameters arg: usize, /// By-value @@ -99,7 +99,7 @@ pub const Object = struct { gpa: *mem.Allocator, code: std.ArrayList(u8), value_map: CValueMap, - blocks: std.AutoHashMapUnmanaged(*ir.Inst.Block, BlockData) = .{}, + blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, BlockData) = .{}, next_arg_index: usize = 0, next_local_index: usize = 0, next_block_index: usize = 0, @@ -133,7 +133,12 @@ pub const Object = struct { .none => unreachable, .local => |i| return w.print("t{d}", .{i}), .local_ref => |i| return w.print("&t{d}", .{i}), - .constant => |inst| return o.dg.renderValue(w, inst.ty, inst.value().?), + .constant => |inst| { + const ty_pl = o.air.instructions.items(.data)[inst].ty_pl; + const ty = o.air.getRefType(ty_pl.ty); + const val = o.air.values[ty_pl.payload]; + return o.dg.renderValue(w, ty, val); + }, .arg => |i| return w.print("a{d}", .{i}), .decl => |decl| return w.writeAll(mem.span(decl.name)), .decl_ref => |decl| return w.print("&{s}", .{decl.name}), @@ -213,8 +218,9 @@ pub const DeclGen = struct { error_msg: ?*Module.ErrorMsg, typedefs: TypedefMap, - fn fail(dg: *DeclGen, src: LazySrcLoc, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { + fn fail(dg: *DeclGen, comptime format: []const u8, args: anytype) error{ AnalysisFail, OutOfMemory } { @setCold(true); + const src: LazySrcLoc = .{ .node_offset = 0 }; const src_loc = src.toSrcLocWithDecl(dg.decl); dg.error_msg = try Module.ErrorMsg.create(dg.module.gpa, src_loc, format, args); return error.AnalysisFail; @@ -230,7 +236,7 @@ pub const DeclGen = struct { // This should lower to 0xaa bytes in safe modes, and for unsafe modes should // lower to leaving variables uninitialized (that might need to be implemented // outside of this function). - return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement renderValue undef", .{}); + return dg.fail("TODO: C backend: implement renderValue undef", .{}); } switch (t.zigTypeTag()) { .Int => { @@ -440,7 +446,7 @@ pub const DeclGen = struct { }, else => unreachable, }, - else => |e| return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement value {s}", .{ + else => |e| return dg.fail("TODO: C backend: implement value {s}", .{ @tagName(e), }), } @@ -519,14 +525,14 @@ pub const DeclGen = struct { break; } } else { - return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement integer types larger than 128 bits", .{}); + return dg.fail("TODO: C backend: implement integer types larger than 128 bits", .{}); } }, else => unreachable, } }, - .Float => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Float", .{}), + .Float => return dg.fail("TODO: C backend: implement type Float", .{}), .Pointer => { if (t.isSlice()) { @@ -681,7 +687,7 @@ pub const DeclGen = struct { try dg.renderType(w, int_tag_ty); }, - .Union => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Union", .{}), + .Union => return dg.fail("TODO: C backend: implement type Union", .{}), .Fn => { try dg.renderType(w, t.fnReturnType()); try w.writeAll(" (*)("); @@ -704,10 +710,10 @@ pub const DeclGen = struct { } try w.writeByte(')'); }, - .Opaque => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Opaque", .{}), - .Frame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Frame", .{}), - .AnyFrame => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type AnyFrame", .{}), - .Vector => return dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement type Vector", .{}), + .Opaque => return dg.fail("TODO: C backend: implement type Opaque", .{}), + .Frame => return dg.fail("TODO: C backend: implement type Frame", .{}), + .AnyFrame => return dg.fail("TODO: C backend: implement type AnyFrame", .{}), + .Vector => return dg.fail("TODO: C backend: implement type Vector", .{}), .Null, .Undefined, @@ -760,7 +766,8 @@ pub fn genDecl(o: *Object) !void { try o.dg.renderFunctionSignature(o.writer(), is_global); try o.writer().writeByte(' '); - try genBody(o, func.body); + const main_body = o.air.getMainBody(); + try genBody(o, main_body); try o.indent_writer.insertNewline(); return; @@ -833,9 +840,9 @@ pub fn genHeader(dg: *DeclGen) error{ AnalysisFail, OutOfMemory }!void { } } -pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!void { +fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfMemory }!void { const writer = o.writer(); - if (body.instructions.len == 0) { + if (body.len == 0) { try writer.writeAll("{}"); return; } @@ -843,82 +850,85 @@ pub fn genBody(o: *Object, body: ir.Body) error{ AnalysisFail, OutOfMemory }!voi try writer.writeAll("{\n"); o.indent_writer.pushIndent(); - for (body.instructions) |inst| { - const result_value = switch (inst.tag) { - // TODO use a different strategy for add that communicates to the optimizer - // that wrapping is UB. - .add => try genBinOp(o, inst.castTag(.add).?, " + "), - .addwrap => try genWrapOp(o, inst.castTag(.addwrap).?, " + ", "addw_"), - // TODO use a different strategy for sub that communicates to the optimizer - // that wrapping is UB. - .sub => try genBinOp(o, inst.castTag(.sub).?, " - "), - .subwrap => try genWrapOp(o, inst.castTag(.subwrap).?, " - ", "subw_"), - // TODO use a different strategy for mul that communicates to the optimizer - // that wrapping is UB. - .mul => try genBinOp(o, inst.castTag(.sub).?, " * "), - .mulwrap => try genWrapOp(o, inst.castTag(.mulwrap).?, " * ", "mulw_"), - // TODO use a different strategy for div that communicates to the optimizer - // that wrapping is UB. - .div => try genBinOp(o, inst.castTag(.div).?, " / "), + const air_tags = o.air.instructions.items(.tag); - .constant => unreachable, // excluded from function bodies - .alloc => try genAlloc(o, inst.castTag(.alloc).?), - .arg => genArg(o), - .assembly => try genAsm(o, inst.castTag(.assembly).?), - .block => try genBlock(o, inst.castTag(.block).?), - .bitcast => try genBitcast(o, inst.castTag(.bitcast).?), - .breakpoint => try genBreakpoint(o, inst.castTag(.breakpoint).?), - .call => try genCall(o, inst.castTag(.call).?), - .cmp_eq => try genBinOp(o, inst.castTag(.cmp_eq).?, " == "), - .cmp_gt => try genBinOp(o, inst.castTag(.cmp_gt).?, " > "), - .cmp_gte => try genBinOp(o, inst.castTag(.cmp_gte).?, " >= "), - .cmp_lt => try genBinOp(o, inst.castTag(.cmp_lt).?, " < "), - .cmp_lte => try genBinOp(o, inst.castTag(.cmp_lte).?, " <= "), - .cmp_neq => try genBinOp(o, inst.castTag(.cmp_neq).?, " != "), - .dbg_stmt => try genDbgStmt(o, inst.castTag(.dbg_stmt).?), - .intcast => try genIntCast(o, inst.castTag(.intcast).?), - .load => try genLoad(o, inst.castTag(.load).?), - .ret => try genRet(o, inst.castTag(.ret).?), - .retvoid => try genRetVoid(o), - .store => try genStore(o, inst.castTag(.store).?), - .unreach => try genUnreach(o, inst.castTag(.unreach).?), - .loop => try genLoop(o, inst.castTag(.loop).?), - .condbr => try genCondBr(o, inst.castTag(.condbr).?), - .br => try genBr(o, inst.castTag(.br).?), - .br_void => try genBrVoid(o, inst.castTag(.br_void).?.block), - .switchbr => try genSwitchBr(o, inst.castTag(.switchbr).?), - // bool_and and bool_or are non-short-circuit operations - .bool_and => try genBinOp(o, inst.castTag(.bool_and).?, " & "), - .bool_or => try genBinOp(o, inst.castTag(.bool_or).?, " | "), - .bit_and => try genBinOp(o, inst.castTag(.bit_and).?, " & "), - .bit_or => try genBinOp(o, inst.castTag(.bit_or).?, " | "), - .xor => try genBinOp(o, inst.castTag(.xor).?, " ^ "), - .not => try genUnOp(o, inst.castTag(.not).?, "!"), - .is_null => try genIsNull(o, inst.castTag(.is_null).?), - .is_non_null => try genIsNull(o, inst.castTag(.is_non_null).?), - .is_null_ptr => try genIsNull(o, inst.castTag(.is_null_ptr).?), - .is_non_null_ptr => try genIsNull(o, inst.castTag(.is_non_null_ptr).?), - .wrap_optional => try genWrapOptional(o, inst.castTag(.wrap_optional).?), - .optional_payload => try genOptionalPayload(o, inst.castTag(.optional_payload).?), - .optional_payload_ptr => try genOptionalPayload(o, inst.castTag(.optional_payload_ptr).?), - .ref => try genRef(o, inst.castTag(.ref).?), - .struct_field_ptr => try genStructFieldPtr(o, inst.castTag(.struct_field_ptr).?), + for (body) |inst| { + const result_value = switch (air_tags[inst]) { + //// TODO use a different strategy for add that communicates to the optimizer + //// that wrapping is UB. + //.add => try genBinOp(o, inst.castTag(.add).?, " + "), + //.addwrap => try genWrapOp(o, inst.castTag(.addwrap).?, " + ", "addw_"), + //// TODO use a different strategy for sub that communicates to the optimizer + //// that wrapping is UB. + //.sub => try genBinOp(o, inst.castTag(.sub).?, " - "), + //.subwrap => try genWrapOp(o, inst.castTag(.subwrap).?, " - ", "subw_"), + //// TODO use a different strategy for mul that communicates to the optimizer + //// that wrapping is UB. + //.mul => try genBinOp(o, inst.castTag(.sub).?, " * "), + //.mulwrap => try genWrapOp(o, inst.castTag(.mulwrap).?, " * ", "mulw_"), + //// TODO use a different strategy for div that communicates to the optimizer + //// that wrapping is UB. + //.div => try genBinOp(o, inst.castTag(.div).?, " / "), - .is_err => try genIsErr(o, inst.castTag(.is_err).?, "", ".", "!="), - .is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", ".", "=="), - .is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "*", "->", "!="), - .is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "*", "->", "=="), + //.constant => unreachable, // excluded from function bodies + //.alloc => try genAlloc(o, inst.castTag(.alloc).?), + //.arg => genArg(o), + //.assembly => try genAsm(o, inst.castTag(.assembly).?), + //.block => try genBlock(o, inst.castTag(.block).?), + //.bitcast => try genBitcast(o, inst.castTag(.bitcast).?), + //.breakpoint => try genBreakpoint(o, inst.castTag(.breakpoint).?), + //.call => try genCall(o, inst.castTag(.call).?), + //.cmp_eq => try genBinOp(o, inst.castTag(.cmp_eq).?, " == "), + //.cmp_gt => try genBinOp(o, inst.castTag(.cmp_gt).?, " > "), + //.cmp_gte => try genBinOp(o, inst.castTag(.cmp_gte).?, " >= "), + //.cmp_lt => try genBinOp(o, inst.castTag(.cmp_lt).?, " < "), + //.cmp_lte => try genBinOp(o, inst.castTag(.cmp_lte).?, " <= "), + //.cmp_neq => try genBinOp(o, inst.castTag(.cmp_neq).?, " != "), + //.dbg_stmt => try genDbgStmt(o, inst.castTag(.dbg_stmt).?), + //.intcast => try genIntCast(o, inst.castTag(.intcast).?), + //.load => try genLoad(o, inst.castTag(.load).?), + //.ret => try genRet(o, inst.castTag(.ret).?), + //.retvoid => try genRetVoid(o), + //.store => try genStore(o, inst.castTag(.store).?), + //.unreach => try genUnreach(o, inst.castTag(.unreach).?), + //.loop => try genLoop(o, inst.castTag(.loop).?), + //.condbr => try genCondBr(o, inst.castTag(.condbr).?), + //.br => try genBr(o, inst.castTag(.br).?), + //.br_void => try genBrVoid(o, inst.castTag(.br_void).?.block), + //.switchbr => try genSwitchBr(o, inst.castTag(.switchbr).?), + //// bool_and and bool_or are non-short-circuit operations + //.bool_and => try genBinOp(o, inst.castTag(.bool_and).?, " & "), + //.bool_or => try genBinOp(o, inst.castTag(.bool_or).?, " | "), + //.bit_and => try genBinOp(o, inst.castTag(.bit_and).?, " & "), + //.bit_or => try genBinOp(o, inst.castTag(.bit_or).?, " | "), + //.xor => try genBinOp(o, inst.castTag(.xor).?, " ^ "), + //.not => try genUnOp(o, inst.castTag(.not).?, "!"), + //.is_null => try genIsNull(o, inst.castTag(.is_null).?), + //.is_non_null => try genIsNull(o, inst.castTag(.is_non_null).?), + //.is_null_ptr => try genIsNull(o, inst.castTag(.is_null_ptr).?), + //.is_non_null_ptr => try genIsNull(o, inst.castTag(.is_non_null_ptr).?), + //.wrap_optional => try genWrapOptional(o, inst.castTag(.wrap_optional).?), + //.optional_payload => try genOptionalPayload(o, inst.castTag(.optional_payload).?), + //.optional_payload_ptr => try genOptionalPayload(o, inst.castTag(.optional_payload_ptr).?), + //.ref => try genRef(o, inst.castTag(.ref).?), + //.struct_field_ptr => try genStructFieldPtr(o, inst.castTag(.struct_field_ptr).?), - .unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?), - .unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?), - .unwrap_errunion_payload_ptr => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload_ptr).?), - .unwrap_errunion_err_ptr => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err_ptr).?), - .wrap_errunion_payload => try genWrapErrUnionPay(o, inst.castTag(.wrap_errunion_payload).?), - .wrap_errunion_err => try genWrapErrUnionErr(o, inst.castTag(.wrap_errunion_err).?), - .br_block_flat => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for br_block_flat", .{}), - .ptrtoint => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for ptrtoint", .{}), - .varptr => try genVarPtr(o, inst.castTag(.varptr).?), - .floatcast => return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement codegen for floatcast", .{}), + //.is_err => try genIsErr(o, inst.castTag(.is_err).?, "", ".", "!="), + //.is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", ".", "=="), + //.is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "*", "->", "!="), + //.is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "*", "->", "=="), + + //.unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?), + //.unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?), + //.unwrap_errunion_payload_ptr => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload_ptr).?), + //.unwrap_errunion_err_ptr => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err_ptr).?), + //.wrap_errunion_payload => try genWrapErrUnionPay(o, inst.castTag(.wrap_errunion_payload).?), + //.wrap_errunion_err => try genWrapErrUnionErr(o, inst.castTag(.wrap_errunion_err).?), + //.br_block_flat => return o.dg.fail("TODO: C backend: implement codegen for br_block_flat", .{}), + //.ptrtoint => return o.dg.fail("TODO: C backend: implement codegen for ptrtoint", .{}), + //.varptr => try genVarPtr(o, inst.castTag(.varptr).?), + //.floatcast => return o.dg.fail("TODO: C backend: implement codegen for floatcast", .{}), + else => return o.dg.fail("TODO: C backend: rework AIR memory layout", .{}), }; switch (result_value) { .none => {}, @@ -1060,7 +1070,7 @@ fn genWrapOp(o: *Object, inst: *Inst.BinOp, str_op: [*:0]const u8, fn_op: [*:0]c } if (bits > 64) { - return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: genWrapOp for large integers", .{}); + return o.dg.fail("TODO: C backend: genWrapOp for large integers", .{}); } var min_buf: [80]u8 = undefined; @@ -1227,7 +1237,7 @@ fn genCall(o: *Object, inst: *Inst.Call) !CValue { try writer.writeAll(");\n"); return result_local; } else { - return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: implement function pointers", .{}); + return o.dg.fail("TODO: C backend: implement function pointers", .{}); } } @@ -1390,13 +1400,13 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue { try o.writeCValue(writer, arg_c_value); try writer.writeAll(";\n"); } else { - return o.dg.fail(.{ .node_offset = 0 }, "TODO non-explicit inline asm regs", .{}); + return o.dg.fail("TODO non-explicit inline asm regs", .{}); } } const volatile_string: []const u8 = if (as.is_volatile) "volatile " else ""; try writer.print("__asm {s}(\"{s}\"", .{ volatile_string, as.asm_source }); if (as.output_constraint) |_| { - return o.dg.fail(.{ .node_offset = 0 }, "TODO: CBE inline asm output", .{}); + return o.dg.fail("TODO: CBE inline asm output", .{}); } if (as.inputs.len > 0) { if (as.output_constraint == null) { @@ -1421,7 +1431,7 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue { if (as.base.isUnused()) return CValue.none; - return o.dg.fail(.{ .node_offset = 0 }, "TODO: C backend: inline asm expression result used", .{}); + return o.dg.fail("TODO: C backend: inline asm expression result used", .{}); } fn genIsNull(o: *Object, inst: *Inst.UnOp) !CValue { diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 0d05b97846..c93f04f618 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2519,6 +2519,9 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void { var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); + var dbg_line_buffer = std.ArrayList(u8).init(self.base.allocator); + defer dbg_line_buffer.deinit(); + var dbg_info_buffer = std.ArrayList(u8).init(self.base.allocator); defer dbg_info_buffer.deinit(); diff --git a/src/value.zig b/src/value.zig index 48cd6fffc4..0f7194d8c1 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1700,7 +1700,7 @@ pub const Value = extern union { /// peer type resolution. This is stored in a separate list so that /// the items are contiguous in memory and thus can be passed to /// `Module.resolvePeerTypes`. - stored_inst_list: std.ArrayListUnmanaged(*ir.Inst) = .{}, + stored_inst_list: std.ArrayListUnmanaged(Air.Inst.Index) = .{}, }, }; From 3a41e4430eae16e5aa739b7a71b1fded1f1029e3 Mon Sep 17 00:00:00 2001 From: Jacob G-W Date: Tue, 13 Jul 2021 20:38:55 -0400 Subject: [PATCH 10/53] codegen: add FnResult type which is a Result that removes externally_managed --- src/codegen.zig | 17 +++++++++++------ src/link/Elf.zig | 1 - 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/src/codegen.zig b/src/codegen.zig index c27a1444ef..1495b19673 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -23,6 +23,11 @@ const RegisterManager = @import("register_manager.zig").RegisterManager; const X8664Encoder = @import("codegen/x86_64.zig").Encoder; +pub const FnResult = union(enum) { + /// The `code` parameter passed to `generateSymbol` has the value appended. + appended: void, + fail: *ErrorMsg, +}; pub const Result = union(enum) { /// The `code` parameter passed to `generateSymbol` has the value appended. appended: void, @@ -54,7 +59,7 @@ pub fn generateFunction( liveness: Liveness, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, -) GenerateSymbolError!Result { +) GenerateSymbolError!FnResult { switch (bin_file.options.target.cpu.arch) { .wasm32 => unreachable, // has its own code path .wasm64 => unreachable, // has its own code path @@ -451,7 +456,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { liveness: Liveness, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, - ) GenerateSymbolError!Result { + ) GenerateSymbolError!FnResult { if (build_options.skip_non_native and std.Target.current.cpu.arch != arch) { @panic("Attempted to compile for architecture that was disabled by build configuration"); } @@ -495,7 +500,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { defer function.exitlude_jump_relocs.deinit(bin_file.allocator); var call_info = function.resolveCallingConventionValues(fn_type) catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, + error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, else => |e| return e, }; defer call_info.deinit(&function); @@ -506,14 +511,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { function.max_end_stack = call_info.stack_byte_count; function.gen() catch |err| switch (err) { - error.CodegenFail => return Result{ .fail = function.err_msg.? }, + error.CodegenFail => return FnResult{ .fail = function.err_msg.? }, else => |e| return e, }; if (function.err_msg) |em| { - return Result{ .fail = em }; + return FnResult{ .fail = em }; } else { - return Result{ .appended = {} }; + return FnResult{ .appended = {} }; } } diff --git a/src/link/Elf.zig b/src/link/Elf.zig index c93f04f618..815c0c9f23 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -2363,7 +2363,6 @@ pub fn updateFunc(self: *Elf, module: *Module, func: *Module.Fn, air: Air, liven }, }); const code = switch (res) { - .externally_managed => |x| x, .appended => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; From 91b1896184cc89e21d12dd246ce7d658b6d3f365 Mon Sep 17 00:00:00 2001 From: Jacob G-W Date: Tue, 13 Jul 2021 20:40:29 -0400 Subject: [PATCH 11/53] plan9 linker: make more incremental The incrementalness is now roughly the same as the c backend rather than the spirv backend before. --- src/link/Plan9.zig | 323 +++++++++++++++++++++++---------------------- 1 file changed, 167 insertions(+), 156 deletions(-) diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index bc044ce414..9b123f56aa 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -25,20 +25,22 @@ sixtyfour_bit: bool, error_flags: File.ErrorFlags = File.ErrorFlags{}, bases: Bases, -decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, void) = .{}, -/// is just casted down when 32 bit +/// A symbol's value is just casted down when compiling +/// for a 32 bit target. syms: std.ArrayListUnmanaged(aout.Sym) = .{}, -text_buf: std.ArrayListUnmanaged(u8) = .{}, -data_buf: std.ArrayListUnmanaged(u8) = .{}, + +fn_decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, []const u8) = .{}, +data_decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, []const u8) = .{}, hdr: aout.ExecHdr = undefined, entry_decl: ?*Module.Decl = null, -got: std.ArrayListUnmanaged(u64) = .{}, +got_len: u64 = 0, + const Bases = struct { text: u64, - /// the addr of the got + /// the Global Offset Table starts at the beginning of the data section data: u64, }; @@ -49,14 +51,6 @@ fn getAddr(self: Plan9, addr: u64, t: aout.Sym.Type) u64 { else => unreachable, }; } -/// opposite of getAddr -fn takeAddr(self: Plan9, addr: u64, t: aout.Sym.Type) u64 { - return addr - switch (t) { - .T, .t, .l, .L => self.bases.text, - .D, .d, .B, .b => self.bases.data, - else => unreachable, - }; -} fn getSymAddr(self: Plan9, s: aout.Sym) u64 { return self.getAddr(s.value, s.type); @@ -127,18 +121,80 @@ pub fn updateFunc(self: *Plan9, module: *Module, func: *Module.Fn, air: Air, liv if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } - _ = module; - // Keep track of all decls so we can iterate over them on flush(). - _ = try self.decl_table.getOrPut(self.base.allocator, func.owner_decl); - _ = air; - _ = liveness; - @panic("TODO Plan9 needs to keep track of Air and Liveness so it can use them later"); + const decl = func.owner_decl; + log.debug("codegen decl {*} ({s})", .{ decl, decl.name }); + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + const res = try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ .none = .{} }); + const code = switch (res) { + .appended => code_buffer.toOwnedSlice(), + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + try self.fn_decl_table.put(self.base.allocator, decl, code); + return self.updateFinish(decl); } pub fn updateDecl(self: *Plan9, module: *Module, decl: *Module.Decl) !void { - _ = module; - _ = try self.decl_table.getOrPut(self.base.allocator, decl); + if (decl.val.tag() == .extern_fn) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + if (decl.val.castTag(.variable)) |payload| { + const variable = payload.data; + if (variable.is_extern) { + return; // TODO Should we do more when front-end analyzed extern decl? + } + } + + log.debug("codegen decl {*} ({s})", .{ decl, decl.name }); + + var code_buffer = std.ArrayList(u8).init(self.base.allocator); + defer code_buffer.deinit(); + const decl_val = if (decl.val.castTag(.variable)) |payload| payload.data.init else decl.val; + const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ + .ty = decl.ty, + .val = decl_val, + }, &code_buffer, .{ .none = .{} }); + const code = switch (res) { + .externally_managed => |x| x, + .appended => code_buffer.items, + .fail => |em| { + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + }; + var duped_code = try std.mem.dupe(self.base.allocator, u8, code); + errdefer self.base.allocator.free(duped_code); + try self.data_decl_table.put(self.base.allocator, decl, duped_code); + return self.updateFinish(decl); +} +/// called at the end of update{Decl,Func} +fn updateFinish(self: *Plan9, decl: *Module.Decl) !void { + const is_fn = (decl.ty.zigTypeTag() == .Fn); + log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name }); + const sym_t: aout.Sym.Type = if (is_fn) .t else .d; + // write the internal linker metadata + decl.link.plan9.type = sym_t; + // write the symbol + // we already have the got index because that got allocated in allocateDeclIndexes + const sym: aout.Sym = .{ + .value = undefined, // the value of stuff gets filled in in flushModule + .type = decl.link.plan9.type, + .name = mem.span(decl.name), + }; + + if (decl.link.plan9.sym_index) |s| { + self.syms.items[s] = sym; + } else { + try self.syms.append(self.base.allocator, sym); + decl.link.plan9.sym_index = self.syms.items.len - 1; + } } pub fn flush(self: *Plan9, comp: *Compilation) !void { @@ -165,160 +221,107 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { defer assert(self.hdr.entry != 0x0); - const module = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented; + _ = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented; - self.text_buf.items.len = 0; - self.data_buf.items.len = 0; - // ensure space to write the got later - assert(self.got.items.len == self.decl_table.count()); - try self.data_buf.appendNTimes(self.base.allocator, 0x69, self.got.items.len * if (!self.sixtyfour_bit) @as(u32, 4) else 8); - // temporary buffer - var code_buffer = std.ArrayList(u8).init(self.base.allocator); - defer code_buffer.deinit(); + assert(self.got_len == self.fn_decl_table.count() + self.data_decl_table.count()); + const got_size = self.got_len * if (!self.sixtyfour_bit) @as(u32, 4) else 8; + var got_table = try self.base.allocator.alloc(u8, got_size); + defer self.base.allocator.free(got_table); + + // + 2 for header, got, symbols + var iovecs = try self.base.allocator.alloc(std.os.iovec_const, self.fn_decl_table.count() + self.data_decl_table.count() + 3); + + const file = self.base.file.?; + + var hdr_buf: [40]u8 = undefined; + // account for the fat header + const hdr_size = if (self.sixtyfour_bit) @as(usize, 40) else 32; + const hdr_slice: []u8 = hdr_buf[0..hdr_size]; + var foff = hdr_size; + iovecs[0] = .{ .iov_base = hdr_slice.ptr, .iov_len = hdr_slice.len }; + var iovecs_i: u64 = 1; + var text_i: u64 = 0; + // text { - for (self.decl_table.keys()) |decl| { - if (!decl.has_tv) continue; - const is_fn = (decl.ty.zigTypeTag() == .Fn); - - log.debug("update the symbol table and got for decl {*} ({s})", .{ decl, decl.name }); - decl.link.plan9 = if (is_fn) .{ - .offset = self.getAddr(self.text_buf.items.len, .t), - .type = .t, - .sym_index = decl.link.plan9.sym_index, - .got_index = decl.link.plan9.got_index, - } else .{ - .offset = self.getAddr(self.data_buf.items.len, .d), - .type = .d, - .sym_index = decl.link.plan9.sym_index, - .got_index = decl.link.plan9.got_index, - }; - self.got.items[decl.link.plan9.got_index.?] = decl.link.plan9.offset.?; - if (decl.link.plan9.sym_index) |s| { - self.syms.items[s] = .{ - .value = decl.link.plan9.offset.?, - .type = decl.link.plan9.type, - .name = mem.span(decl.name), - }; + var it = self.fn_decl_table.iterator(); + while (it.next()) |entry| { + const decl = entry.key_ptr.*; + const code = entry.value_ptr.*; + foff += code.len; + text_i += code.len; + iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len }; + iovecs_i += 1; + const off = self.getAddr(text_i, .t); + decl.link.plan9.offset = off; + if (!self.sixtyfour_bit) { + mem.writeIntNative(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off)); + mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); } else { - try self.syms.append(self.base.allocator, .{ - .value = decl.link.plan9.offset.?, - .type = decl.link.plan9.type, - .name = mem.span(decl.name), - }); - decl.link.plan9.sym_index = self.syms.items.len - 1; + mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } - - if (module.decl_exports.get(decl)) |exports| { - for (exports) |exp| { - // plan9 does not support custom sections - if (exp.options.section) |section_name| { - if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) { - try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "plan9 does not support extra sections", .{})); - break; - } - } - if (std.mem.eql(u8, exp.options.name, "_start")) { - assert(decl.link.plan9.type == .t); // we tried to link a non-function as the entry - self.entry_decl = decl; - } - if (exp.link.plan9) |i| { - self.syms.items[i] = .{ - .value = decl.link.plan9.offset.?, - .type = decl.link.plan9.type.toGlobal(), - .name = exp.options.name, - }; - } else { - try self.syms.append(self.base.allocator, .{ - .value = decl.link.plan9.offset.?, - .type = decl.link.plan9.type.toGlobal(), - .name = exp.options.name, - }); - exp.link.plan9 = self.syms.items.len - 1; - } - } - } - - log.debug("codegen decl {*} ({s})", .{ decl, decl.name }); - const res = try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ - .ty = decl.ty, - .val = decl.val, - }, &code_buffer, .{ .none = {} }); - const code = switch (res) { - .externally_managed => |x| x, - .appended => code_buffer.items, - .fail => |em| { - decl.analysis = .codegen_failure; - try module.failed_decls.put(module.gpa, decl, em); - // TODO try to do more decls - return; - }, - }; - if (is_fn) { - try self.text_buf.appendSlice(self.base.allocator, code); - code_buffer.items.len = 0; + self.syms.items[decl.link.plan9.sym_index.?].value = off; + } + // etext symbol + self.syms.items[2].value = self.getAddr(text_i, .t); + } + // data + var data_i: u64 = got_size; + { + var it = self.data_decl_table.iterator(); + while (it.next()) |entry| { + const decl = entry.key_ptr.*; + const code = entry.value_ptr.*; + foff += code.len; + data_i += code.len; + iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len }; + iovecs_i += 1; + const off = self.getAddr(data_i, .d); + decl.link.plan9.offset = off; + if (!self.sixtyfour_bit) { + mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); } else { - try self.data_buf.appendSlice(self.base.allocator, code); - code_buffer.items.len = 0; + mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } + self.syms.items[decl.link.plan9.sym_index.?].value = off; } + // edata symbol + self.syms.items[0].value = self.getAddr(data_i, .b); } - - // write the got - if (!self.sixtyfour_bit) { - for (self.got.items) |p, i| { - mem.writeInt(u32, self.data_buf.items[i * 4 ..][0..4], @intCast(u32, p), self.base.options.target.cpu.arch.endian()); - } - } else { - for (self.got.items) |p, i| { - mem.writeInt(u64, self.data_buf.items[i * 8 ..][0..8], p, self.base.options.target.cpu.arch.endian()); - } - } - - self.hdr.entry = @truncate(u32, self.entry_decl.?.link.plan9.offset.?); - - // edata, end, etext - self.syms.items[0].value = self.getAddr(0x0, .b); + // edata self.syms.items[1].value = self.getAddr(0x0, .b); - self.syms.items[2].value = self.getAddr(self.text_buf.items.len, .t); - var sym_buf = std.ArrayList(u8).init(self.base.allocator); defer sym_buf.deinit(); try self.writeSyms(&sym_buf); - + iovecs[iovecs_i] = .{ .iov_base = got_table.ptr, .iov_len = got_table.len }; + iovecs_i += 1; + assert(2 + self.fn_decl_table.count() + self.data_decl_table.count() == iovecs_i); // we didn't write all the decls + iovecs[iovecs_i] = .{ .iov_base = sym_buf.items.ptr, .iov_len = sym_buf.items.len }; + iovecs_i += 1; // generate the header self.hdr = .{ .magic = try aout.magicFromArch(self.base.options.target.cpu.arch), - .text = @intCast(u32, self.text_buf.items.len), - .data = @intCast(u32, self.data_buf.items.len), + .text = @intCast(u32, text_i), + .data = @intCast(u32, data_i), .syms = @intCast(u32, sym_buf.items.len), .bss = 0, .pcsz = 0, .spsz = 0, - .entry = self.hdr.entry, + .entry = @intCast(u32, self.entry_decl.?.link.plan9.offset.?), }; - - const file = self.base.file.?; - - var hdr_buf = self.hdr.toU8s(); - const hdr_slice: []const u8 = &hdr_buf; - // account for the fat header - const hdr_size: u8 = if (!self.sixtyfour_bit) 32 else 40; + std.mem.copy(u8, hdr_slice, self.hdr.toU8s()[0..hdr_size]); // write the fat header for 64 bit entry points if (self.sixtyfour_bit) { - mem.writeIntSliceBig(u64, hdr_buf[32..40], self.hdr.entry); + mem.writeIntSliceBig(u64, hdr_buf[32..40], self.entry_decl.?.link.plan9.offset.?); } // write it all! - var vectors: [4]std.os.iovec_const = .{ - .{ .iov_base = hdr_slice.ptr, .iov_len = hdr_size }, - .{ .iov_base = self.text_buf.items.ptr, .iov_len = self.text_buf.items.len }, - .{ .iov_base = self.data_buf.items.ptr, .iov_len = self.data_buf.items.len }, - .{ .iov_base = sym_buf.items.ptr, .iov_len = sym_buf.items.len }, - // TODO spsz, pcsz - }; - try file.pwritevAll(&vectors, 0); + try file.pwritevAll(iovecs, 0); } pub fn freeDecl(self: *Plan9, decl: *Module.Decl) void { - assert(self.decl_table.swapRemove(decl)); + const is_fn = (decl.ty.zigTypeTag() == .Fn); + if (is_fn) + assert(self.fn_decl_table.swapRemove(decl)) + else + assert(self.data_decl_table.swapRemove(decl)); } pub fn updateDeclExports( @@ -334,11 +337,17 @@ pub fn updateDeclExports( _ = exports; } pub fn deinit(self: *Plan9) void { - self.decl_table.deinit(self.base.allocator); + var itf = self.fn_decl_table.iterator(); + while (itf.next()) |entry| { + self.base.allocator.free(entry.value_ptr.*); + } + self.fn_decl_table.deinit(self.base.allocator); + var itd = self.data_decl_table.iterator(); + while (itd.next()) |entry| { + self.base.allocator.free(entry.value_ptr.*); + } + self.data_decl_table.deinit(self.base.allocator); self.syms.deinit(self.base.allocator); - self.text_buf.deinit(self.base.allocator); - self.data_buf.deinit(self.base.allocator); - self.got.deinit(self.base.allocator); } pub const Export = ?usize; @@ -397,6 +406,8 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { } pub fn allocateDeclIndexes(self: *Plan9, decl: *Module.Decl) !void { - try self.got.append(self.base.allocator, 0xdeadbeef); - decl.link.plan9.got_index = self.got.items.len - 1; + if (decl.link.plan9.got_index != null) { + self.got_len += 1; + decl.link.plan9.got_index = self.got_len - 1; + } } From f17a05bfb7ca0ff010fef9654264eed7342298d2 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 13 Jul 2021 17:37:46 -0700 Subject: [PATCH 12/53] CLI: add plan9 -ofmt help text --- src/main.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main.zig b/src/main.zig index 2b961bb64c..3b62bba410 100644 --- a/src/main.zig +++ b/src/main.zig @@ -365,6 +365,7 @@ const usage_build_generic = \\ coff Common Object File Format (Windows) \\ macho macOS relocatables \\ spirv Standard, Portable Intermediate Representation V (SPIR-V) + \\ plan9 Plan 9 from Bell Labs object format \\ hex (planned) Intel IHEX \\ raw (planned) Dump machine code directly \\ -dirafter [dir] Add directory to AFTER include search path From dbd3529d1fa02d5e720df0fbf2436d646f5a4f57 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 13 Jul 2021 21:49:22 -0700 Subject: [PATCH 13/53] Sema: first pass reworking for AIR memory layout --- BRANCH_TODO | 89 ---------- src/Module.zig | 50 ++++-- src/Sema.zig | 444 +++++++++++++++++++++++++++---------------------- 3 files changed, 286 insertions(+), 297 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index c7f3923559..aaba8b70b3 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -569,95 +569,6 @@ const DumpAir = struct { } }; -pub fn constInst(mod: *Module, arena: *Allocator, src: LazySrcLoc, typed_value: TypedValue) !*ir.Inst { - _ = mod; - const const_inst = try arena.create(ir.Inst.Constant); - const_inst.* = .{ - .base = .{ - .tag = ir.Inst.Constant.base_tag, - .ty = typed_value.ty, - .src = src, - }, - .val = typed_value.val, - }; - return &const_inst.base; -} - -pub fn constType(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.type), - .val = try ty.toValue(arena), - }); -} - -pub fn constVoid(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.void), - .val = Value.initTag(.void_value), - }); -} - -pub fn constNoReturn(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.noreturn), - .val = Value.initTag(.unreachable_value), - }); -} - -pub fn constUndef(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = Value.initTag(.undef), - }); -} - -pub fn constBool(mod: *Module, arena: *Allocator, src: LazySrcLoc, v: bool) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.bool), - .val = ([2]Value{ Value.initTag(.bool_false), Value.initTag(.bool_true) })[@boolToInt(v)], - }); -} - -pub fn constIntUnsigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: u64) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_u64.create(arena, int), - }); -} - -pub fn constIntSigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: i64) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_i64.create(arena, int), - }); -} - -pub fn constIntBig(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, big_int: BigIntConst) !*ir.Inst { - if (big_int.positive) { - if (big_int.to(u64)) |x| { - return mod.constIntUnsigned(arena, src, ty, x); - } else |err| switch (err) { - error.NegativeIntoUnsigned => unreachable, - error.TargetTooSmall => {}, // handled below - } - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_big_positive.create(arena, big_int.limbs), - }); - } else { - if (big_int.to(i64)) |x| { - return mod.constIntSigned(arena, src, ty, x); - } else |err| switch (err) { - error.NegativeIntoUnsigned => unreachable, - error.TargetTooSmall => {}, // handled below - } - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_big_negative.create(arena, big_int.limbs), - }); - } -} - pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { const zir_module = scope.namespace(); const source = zir_module.getSource(mod) catch @panic("dumpInst failed to get source"); diff --git a/src/Module.zig b/src/Module.zig index 7ec9c7e93d..3ce3c47f14 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1232,22 +1232,52 @@ pub const Scope = struct { ty: Type, operand: Air.Inst.Ref, ) error{OutOfMemory}!Air.Inst.Ref { + return block.addInst(.{ + .tag = tag, + .data = .{ .ty_op = .{ + .ty = try block.sema.addType(ty), + .operand = operand, + } }, + }); + } + + pub fn addUnOp( + block: *Block, + tag: Air.Inst.Tag, + operand: Air.Inst.Ref, + ) error{OutOfMemory}!Air.Inst.Ref { + return block.addInst(.{ + .tag = tag, + .data = .{ .un_op = operand }, + }); + } + + pub fn addBinOp( + block: *Block, + tag: Air.Inst.Tag, + lhs: Air.Inst.Ref, + rhs: Air.Inst.Ref, + ) error{OutOfMemory}!Air.Inst.Ref { + return block.addInst(.{ + .tag = tag, + .data = .{ .bin_op = .{ + .lhs = lhs, + .rhs = rhs, + } }, + }); + } + + pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref { const sema = block.sema; const gpa = sema.gpa; try sema.air_instructions.ensureUnusedCapacity(gpa, 1); try block.instructions.ensureUnusedCapacity(gpa, 1); - const inst = @intCast(Air.Inst.Index, sema.air_instructions.len); - sema.air_instructions.appendAssumeCapacity(.{ - .tag = tag, - .data = .{ .ty_op = .{ - .ty = try sema.addType(ty), - .operand = operand, - } }, - }); - block.instructions.appendAssumeCapacity(inst); - return Sema.indexToRef(inst); + const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); + sema.air_instructions.appendAssumeCapacity(inst); + block.instructions.appendAssumeCapacity(result_index); + return Sema.indexToRef(result_index); } }; }; diff --git a/src/Sema.zig b/src/Sema.zig index fc130cd4a4..829dd843cc 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -534,7 +534,7 @@ pub fn analyzeBody( //}, else => @panic("TODO finish updating Sema for AIR memory layout changes and then remove this else prong"), }; - if (sema.getAirType(air_inst).isNoReturn()) + if (sema.getTypeOf(air_inst).isNoReturn()) return always_noreturn; try map.put(sema.gpa, inst, air_inst); i += 1; @@ -664,7 +664,7 @@ fn resolvePossiblyUndefinedValue( src: LazySrcLoc, air_ref: Air.Inst.Ref, ) !?Value { - const ty = sema.getTypeOfAirRef(air_ref); + const ty = sema.getTypeOf(air_ref); if (try sema.typeHasOnePossibleValue(block, src, ty)) |opv| { return opv; } @@ -737,7 +737,7 @@ pub fn resolveInstConst( const air_ref = sema.resolveInst(zir_ref); const val = try sema.resolveConstValue(block, src, air_ref); return TypedValue{ - .ty = sema.getTypeOfAirRef(air_ref), + .ty = sema.getTypeOf(air_ref), .val = val, }; } @@ -1208,7 +1208,7 @@ fn zirRetType( try sema.requireFunctionBlock(block, src); const fn_ty = sema.func.?.owner_decl.ty; const ret_type = fn_ty.fnReturnType(); - return sema.mod.constType(sema.arena, src, ret_type); + return sema.addType(ret_type); } fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { @@ -1571,7 +1571,7 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In // if expressions should force it when the condition is compile-time known. const src: LazySrcLoc = .unneeded; try sema.requireRuntimeBlock(block, src); - const bitcasted_ptr = try block.addUnOp(src, ptr_ty, .bitcast, ptr); + const bitcasted_ptr = try block.addTyOp(.bitcast, ptr_ty, ptr); return sema.storePtr(block, src, bitcasted_ptr, value); } @@ -1590,7 +1590,7 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) // Create a runtime bitcast instruction with exactly the type the pointer wants. const ptr_ty = try Module.simplePtrType(sema.arena, value.ty, true, .One); try sema.requireRuntimeBlock(block, src); - const bitcasted_ptr = try block.addUnOp(src, ptr_ty, .bitcast, ptr); + const bitcasted_ptr = try block.addTyOp(.bitcast, ptr_ty, ptr); return sema.storePtr(block, src, bitcasted_ptr, value); } @@ -1646,7 +1646,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const param_count = fn_ty.fnParamLen(); if (param_index >= param_count) { if (fn_ty.fnIsVarArgs()) { - return sema.mod.constType(sema.arena, src, Type.initTag(.var_args_param)); + return sema.addType(Type.initTag(.var_args_param)); } return sema.mod.fail(&block.base, src, "arg index {d} out of bounds; '{}' has {d} argument(s)", .{ param_index, @@ -1657,7 +1657,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr // TODO support generic functions const param_type = fn_ty.fnParamType(param_index); - return sema.mod.constType(sema.arena, src, param_type); + return sema.addType(param_type); } fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -1694,7 +1694,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air defer tracy.end(); const int = sema.code.instructions.items(.data)[inst].int; - return sema.mod.constIntUnsigned(sema.arena, .unneeded, Type.initTag(.comptime_int), int); + return sema.addIntUnsigned(Type.initTag(.comptime_int), int); } fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2418,10 +2418,9 @@ fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError defer tracy.end(); const int_type = sema.code.instructions.items(.data)[inst].int_type; - const src = int_type.src(); const ty = try Module.makeIntType(sema.arena, int_type.signedness, int_type.bit_count); - return sema.mod.constType(sema.arena, src, ty); + return sema.addType(ty); } fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2433,7 +2432,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner const child_type = try sema.resolveType(block, src, inst_data.operand); const opt_type = try sema.mod.optionalType(sema.arena, child_type); - return sema.mod.constType(sema.arena, src, opt_type); + return sema.addType(opt_type); } fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2441,12 +2440,11 @@ fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro const src = inst_data.src(); const array_type = try sema.resolveType(block, src, inst_data.operand); const elem_type = array_type.elemType(); - return sema.mod.constType(sema.arena, src, elem_type); + return sema.addType(elem_type); } fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const src = inst_data.src(); const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; @@ -2456,7 +2454,7 @@ fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr .len = len, .elem_type = elem_type, }); - return sema.mod.constType(sema.arena, src, vector_type); + return sema.addType(vector_type); } fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2469,7 +2467,7 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const elem_type = try sema.resolveType(block, .unneeded, bin_inst.rhs); const array_ty = try sema.mod.arrayType(sema.arena, len.val.toUnsignedInt(), null, elem_type); - return sema.mod.constType(sema.arena, .unneeded, array_ty); + return sema.addType(array_ty); } fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2484,7 +2482,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) const elem_type = try sema.resolveType(block, .unneeded, extra.elem_type); const array_ty = try sema.mod.arrayType(sema.arena, len.val.toUnsignedInt(), sentinel.val, elem_type); - return sema.mod.constType(sema.arena, .unneeded, array_ty); + return sema.addType(array_ty); } fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2492,12 +2490,11 @@ fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_anyframe_type = inst_data.src_node }; const return_type = try sema.resolveType(block, operand_src, inst_data.operand); const anyframe_type = try Type.Tag.anyframe_T.create(sema.arena, return_type); - return sema.mod.constType(sema.arena, src, anyframe_type); + return sema.addType(anyframe_type); } fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2506,7 +2503,6 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const src: LazySrcLoc = .{ .node_offset_bin_op = inst_data.src_node }; const lhs_src: LazySrcLoc = .{ .node_offset_bin_lhs = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const error_union = try sema.resolveType(block, lhs_src, extra.lhs); @@ -2518,7 +2514,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn }); } const err_union_ty = try sema.mod.errorUnionType(sema.arena, error_union, payload); - return sema.mod.constType(sema.arena, src, err_union_ty); + return sema.addType(err_union_ty); } fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2553,7 +2549,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr if (try sema.resolvePossiblyUndefinedValue(block, src, op_coerced)) |val| { if (val.isUndef()) { - return sema.mod.constUndef(sema.arena, src, result_ty); + return sema.addConstUndef(result_ty); } const payload = try sema.arena.create(Value.Payload.U64); payload.* = .{ @@ -2567,7 +2563,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, result_ty, .bitcast, op_coerced); + return block.addTyOp(.bitcast, result_ty, op_coerced); } fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2600,7 +2596,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr // const is_gt_max = @panic("TODO get max errors in compilation"); // try sema.addSafetyCheck(block, is_gt_max, .invalid_error_code); } - return block.addUnOp(src, Type.initTag(.anyerror), .bitcast, op); + return block.addTyOp(.bitcast, Type.initTag(.anyerror), op); } fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2614,7 +2610,9 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); - if (rhs.ty.zigTypeTag() == .Bool and lhs.ty.zigTypeTag() == .Bool) { + const lhs_ty = sema.getTypeOf(lhs); + const rhs_ty = sema.getTypeOf(rhs); + if (rhs_ty.zigTypeTag() == .Bool and lhs_ty.zigTypeTag() == .Bool) { const msg = msg: { const msg = try sema.mod.errMsg(&block.base, lhs_src, "expected error set type, found 'bool'", .{}); errdefer msg.destroy(sema.gpa); @@ -2623,8 +2621,6 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn }; return sema.mod.failWithOwnedErrorMsg(&block.base, msg); } - const rhs_ty = try sema.resolveAirAsType(block, rhs_src, rhs); - const lhs_ty = try sema.resolveAirAsType(block, lhs_src, lhs); if (rhs_ty.zigTypeTag() != .ErrorSet) return sema.mod.fail(&block.base, rhs_src, "expected error set type, found {}", .{rhs_ty}); if (lhs_ty.zigTypeTag() != .ErrorSet) @@ -2786,7 +2782,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, int_tag_ty, .bitcast, enum_tag); + return block.addTyOp(.bitcast, int_tag_ty, enum_tag); } fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -2841,7 +2837,7 @@ fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, dest_ty, .bitcast, operand); + return block.addTyOp(.bitcast, dest_ty, operand); } /// Pointer in, pointer out. @@ -2881,10 +2877,10 @@ fn zirOptionalPayloadPtr( try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { - const is_non_null = try block.addUnOp(src, Type.initTag(.bool), .is_non_null_ptr, optional_ptr); + const is_non_null = try block.addUnOp(.is_non_null_ptr, optional_ptr); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } - return block.addUnOp(src, child_pointer, .optional_payload_ptr, optional_ptr); + return block.addTyOp(.optional_payload_ptr, child_pointer, optional_ptr); } /// Value in, value out. @@ -2919,10 +2915,10 @@ fn zirOptionalPayload( try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { - const is_non_null = try block.addUnOp(src, Type.initTag(.bool), .is_non_null, operand); + const is_non_null = try block.addUnOp(.is_non_null, operand); try sema.addSafetyCheck(block, is_non_null, .unwrap_null); } - return block.addUnOp(src, child_type, .optional_payload, operand); + return block.addTyOp(.optional_payload, child_type, operand); } /// Value in, value out @@ -2953,10 +2949,11 @@ fn zirErrUnionPayload( } try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { - const is_non_err = try block.addUnOp(src, Type.initTag(.bool), .is_err, operand); + const is_non_err = try block.addUnOp(.is_err, operand); try sema.addSafetyCheck(block, is_non_err, .unwrap_errunion); } - return block.addUnOp(src, operand.ty.castTag(.error_union).?.data.payload, .unwrap_errunion_payload, operand); + const result_ty = operand.ty.castTag(.error_union).?.data.payload; + return block.addTyOp(.unwrap_errunion_payload, result_ty, operand); } /// Pointer in, pointer out. @@ -2997,10 +2994,10 @@ fn zirErrUnionPayloadPtr( try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { - const is_non_err = try block.addUnOp(src, Type.initTag(.bool), .is_err, operand); + const is_non_err = try block.addUnOp(.is_err, operand); try sema.addSafetyCheck(block, is_non_err, .unwrap_errunion); } - return block.addUnOp(src, operand_pointer_ty, .unwrap_errunion_payload_ptr, operand); + return block.addTyOp(.unwrap_errunion_payload_ptr, operand_pointer_ty, operand); } /// Value in, value out @@ -3026,7 +3023,7 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, result_ty, .unwrap_errunion_err, operand); + return block.addTyOp(.unwrap_errunion_err, result_ty, operand); } /// Pointer in, value out @@ -3055,7 +3052,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, result_ty, .unwrap_errunion_err_ptr, operand); + return block.addTyOp(.unwrap_errunion_err_ptr, result_ty, operand); } fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { @@ -3241,7 +3238,7 @@ fn funcCommon( } if (body_inst == 0) { - return mod.constType(sema.arena, src, fn_ty); + return sema.addType(fn_ty); } const is_inline = fn_ty.fnCallingConvention() == .Inline; @@ -3312,8 +3309,7 @@ fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro // TODO handle known-pointer-address const src = inst_data.src(); try sema.requireRuntimeBlock(block, src); - const ty = Type.initTag(.usize); - return block.addUnOp(src, ty, .ptrtoint, ptr); + return block.addUnOp(.ptrtoint, ptr); } fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -4244,15 +4240,14 @@ fn analyzeSwitch( case_block.instructions.shrinkRetainingCapacity(0); var any_ok: ?Air.Inst.Index = null; - const bool_ty = comptime Type.initTag(.bool); for (items) |item_ref| { const item = sema.resolveInst(item_ref); _ = try sema.resolveConstValue(&child_block, item.src, item); - const cmp_ok = try case_block.addBinOp(item.src, bool_ty, .cmp_eq, operand, item); + const cmp_ok = try case_block.addBinOp(.cmp_eq, operand, item); if (any_ok) |some| { - any_ok = try case_block.addBinOp(item.src, bool_ty, .bool_or, some, cmp_ok); + any_ok = try case_block.addBinOp(.bool_or, some, cmp_ok); } else { any_ok = cmp_ok; } @@ -4271,32 +4266,24 @@ fn analyzeSwitch( _ = try sema.resolveConstValue(&child_block, item_first.src, item_first); _ = try sema.resolveConstValue(&child_block, item_last.src, item_last); - const range_src = item_first.src; - // operand >= first and operand <= last const range_first_ok = try case_block.addBinOp( - item_first.src, - bool_ty, .cmp_gte, operand, item_first, ); const range_last_ok = try case_block.addBinOp( - item_last.src, - bool_ty, .cmp_lte, operand, item_last, ); const range_ok = try case_block.addBinOp( - range_src, - bool_ty, .bool_and, range_first_ok, range_last_ok, ); if (any_ok) |some| { - any_ok = try case_block.addBinOp(range_src, bool_ty, .bool_or, some, range_ok); + any_ok = try case_block.addBinOp(.bool_or, some, range_ok); } else { any_ok = range_ok; } @@ -4555,13 +4542,11 @@ fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; - const src = inst_data.src(); const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const rhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; const container_type = try sema.resolveType(block, lhs_src, extra.lhs); const decl_name = try sema.resolveConstString(block, rhs_src, extra.rhs); const mod = sema.mod; - const arena = sema.arena; const namespace = container_type.getNamespace() orelse return mod.fail( &block.base, @@ -4571,10 +4556,10 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError ); if (try sema.lookupInNamespace(namespace, decl_name)) |decl| { if (decl.is_pub or decl.namespace.file_scope == block.base.namespace().file_scope) { - return mod.constBool(arena, src, true); + return Air.Inst.Ref.bool_true; } } - return mod.constBool(arena, src, false); + return Air.Inst.Ref.bool_false; } fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -4599,7 +4584,7 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! try mod.semaFile(result.file); const file_root_decl = result.file.root_decl.?; try sema.mod.declareDeclDependency(sema.owner_decl, file_root_decl); - return mod.constType(sema.arena, src, file_root_decl.ty); + return sema.addType(file_root_decl.ty); } fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -4641,6 +4626,8 @@ fn zirBitwise( const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); + const lhs_ty = sema.getTypeOf(lhs); + const rhs_ty = sema.getTypeOf(rhs); const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); @@ -4654,38 +4641,38 @@ fn zirBitwise( const scalar_tag = scalar_type.zigTypeTag(); - if (lhs.ty.zigTypeTag() == .Vector and rhs.ty.zigTypeTag() == .Vector) { - if (lhs.ty.arrayLen() != rhs.ty.arrayLen()) { + if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { return sema.mod.fail(&block.base, src, "vector length mismatch: {d} and {d}", .{ - lhs.ty.arrayLen(), - rhs.ty.arrayLen(), + lhs_ty.arrayLen(), + rhs_ty.arrayLen(), }); } return sema.mod.fail(&block.base, src, "TODO implement support for vectors in zirBitwise", .{}); - } else if (lhs.ty.zigTypeTag() == .Vector or rhs.ty.zigTypeTag() == .Vector) { + } else if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) { return sema.mod.fail(&block.base, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{ - lhs.ty, - rhs.ty, + lhs_ty, + rhs_ty, }); } const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; if (!is_int) { - return sema.mod.fail(&block.base, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs.ty.zigTypeTag()), @tagName(rhs.ty.zigTypeTag()) }); + return sema.mod.fail(&block.base, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) }); } if (casted_lhs.value()) |lhs_val| { if (casted_rhs.value()) |rhs_val| { if (lhs_val.isUndef() or rhs_val.isUndef()) { - return sema.mod.constUndef(sema.arena, src, resolved_type); + return sema.addConstUndef(resolved_type); } return sema.mod.fail(&block.base, src, "TODO implement comptime bitwise operations", .{}); } } try sema.requireRuntimeBlock(block, src); - return block.addBinOp(src, scalar_type, air_tag, casted_lhs, casted_rhs); + return block.addBinOp(air_tag, casted_lhs, casted_rhs); } fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -4783,18 +4770,18 @@ fn analyzeArithmetic( const scalar_tag = scalar_type.zigTypeTag(); - if (lhs.ty.zigTypeTag() == .Vector and rhs.ty.zigTypeTag() == .Vector) { - if (lhs.ty.arrayLen() != rhs.ty.arrayLen()) { + if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { + if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { return sema.mod.fail(&block.base, src, "vector length mismatch: {d} and {d}", .{ - lhs.ty.arrayLen(), - rhs.ty.arrayLen(), + lhs_ty.arrayLen(), + rhs_ty.arrayLen(), }); } return sema.mod.fail(&block.base, src, "TODO implement support for vectors in zirBinOp", .{}); - } else if (lhs.ty.zigTypeTag() == .Vector or rhs.ty.zigTypeTag() == .Vector) { + } else if (lhs_ty.zigTypeTag() == .Vector or rhs_ty.zigTypeTag() == .Vector) { return sema.mod.fail(&block.base, src, "mixed scalar and vector operands to binary expression: '{}' and '{}'", .{ - lhs.ty, - rhs.ty, + lhs_ty, + rhs_ty, }); } @@ -4802,13 +4789,13 @@ fn analyzeArithmetic( const is_float = scalar_tag == .Float or scalar_tag == .ComptimeFloat; if (!is_int and !(is_float and floatOpAllowed(zir_tag))) { - return sema.mod.fail(&block.base, src, "invalid operands to binary expression: '{s}' and '{s}'", .{ @tagName(lhs.ty.zigTypeTag()), @tagName(rhs.ty.zigTypeTag()) }); + return sema.mod.fail(&block.base, src, "invalid operands to binary expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) }); } if (casted_lhs.value()) |lhs_val| { if (casted_rhs.value()) |rhs_val| { if (lhs_val.isUndef() or rhs_val.isUndef()) { - return sema.mod.constUndef(sema.arena, src, resolved_type); + return sema.addConstUndef(resolved_type); } // incase rhs is 0, simply return lhs without doing any calculations // TODO Once division is implemented we should throw an error when dividing by 0. @@ -4866,7 +4853,7 @@ fn analyzeArithmetic( } try sema.requireRuntimeBlock(block, src); - const ir_tag: Inst.Tag = switch (zir_tag) { + const air_tag: Air.Inst.Tag = switch (zir_tag) { .add => .add, .addwrap => .addwrap, .sub => .sub, @@ -4877,7 +4864,7 @@ fn analyzeArithmetic( else => return sema.mod.fail(&block.base, src, "TODO implement arithmetic for operand '{s}''", .{@tagName(zir_tag)}), }; - return block.addBinOp(src, scalar_type, ir_tag, casted_lhs, casted_rhs); + return block.addBinOp(air_tag, casted_lhs, casted_rhs); } fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -4997,11 +4984,17 @@ fn zirCmp( .eq, .neq => true, else => false, }; - const lhs_ty_tag = lhs.ty.zigTypeTag(); - const rhs_ty_tag = rhs.ty.zigTypeTag(); + const lhs_ty = sema.getTypeOf(lhs); + const rhs_ty = sema.getTypeOf(rhs); + const lhs_ty_tag = lhs_ty.zigTypeTag(); + const rhs_ty_tag = rhs_ty.zigTypeTag(); if (is_equality_cmp and lhs_ty_tag == .Null and rhs_ty_tag == .Null) { // null == null, null != null - return mod.constBool(sema.arena, src, op == .eq); + if (op == .eq) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; + } } else if (is_equality_cmp and ((lhs_ty_tag == .Null and rhs_ty_tag == .Optional) or rhs_ty_tag == .Null and lhs_ty_tag == .Optional)) @@ -5010,11 +5003,11 @@ fn zirCmp( const opt_operand = if (lhs_ty_tag == .Optional) lhs else rhs; return sema.analyzeIsNull(block, src, opt_operand, op == .neq); } else if (is_equality_cmp and - ((lhs_ty_tag == .Null and rhs.ty.isCPtr()) or (rhs_ty_tag == .Null and lhs.ty.isCPtr()))) + ((lhs_ty_tag == .Null and rhs_ty.isCPtr()) or (rhs_ty_tag == .Null and lhs_ty.isCPtr()))) { return mod.fail(&block.base, src, "TODO implement C pointer cmp", .{}); } else if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) { - const non_null_type = if (lhs_ty_tag == .Null) rhs.ty else lhs.ty; + const non_null_type = if (lhs_ty_tag == .Null) rhs_ty else lhs_ty; return mod.fail(&block.base, src, "comparison of '{}' with null", .{non_null_type}); } else if (is_equality_cmp and ((lhs_ty_tag == .EnumLiteral and rhs_ty_tag == .Union) or @@ -5025,27 +5018,45 @@ fn zirCmp( if (!is_equality_cmp) { return mod.fail(&block.base, src, "{s} operator not allowed for errors", .{@tagName(op)}); } - if (rhs.value()) |rval| { - if (lhs.value()) |lval| { - // TODO optimisation oppurtunity: evaluate if std.mem.eql is faster with the names, or calling to Module.getErrorValue to get the values and then compare them is faster - return mod.constBool(sema.arena, src, std.mem.eql(u8, lval.castTag(.@"error").?.data.name, rval.castTag(.@"error").?.data.name) == (op == .eq)); + if (try sema.resolvePossiblyUndefinedValue(block, lhs_src, lhs)) |lval| { + if (try sema.resolvePossiblyUndefinedValue(block, rhs_src, rhs)) |rval| { + if (lval.isUndef() or rval.isUndef()) { + return sema.addConstUndef(Type.initTag(.bool)); + } + // TODO optimisation opportunity: evaluate if mem.eql is faster with the names, + // or calling to Module.getErrorValue to get the values and then compare them is + // faster. + const lhs_name = lval.castTag(.@"error").?.data.name; + const rhs_name = rval.castTag(.@"error").?.data.name; + if (mem.eql(u8, lhs_name, rhs_name) == (op == .eq)) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; + } } } try sema.requireRuntimeBlock(block, src); - return block.addBinOp(src, Type.initTag(.bool), if (op == .eq) .cmp_eq else .cmp_neq, lhs, rhs); - } else if (lhs.ty.isNumeric() and rhs.ty.isNumeric()) { + const tag: Air.Inst.Tag = if (op == .eq) .cmp_eq else .cmp_neq; + return block.addBinOp(tag, lhs, rhs); + } else if (lhs_ty.isNumeric() and rhs_ty.isNumeric()) { // This operation allows any combination of integer and float types, regardless of the // signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for // numeric types. - return sema.cmpNumeric(block, src, lhs, rhs, op); + return sema.cmpNumeric(block, src, lhs, rhs, op, lhs_src, rhs_src); } else if (lhs_ty_tag == .Type and rhs_ty_tag == .Type) { if (!is_equality_cmp) { return mod.fail(&block.base, src, "{s} operator not allowed for types", .{@tagName(op)}); } - return mod.constBool(sema.arena, src, lhs.value().?.eql(rhs.value().?) == (op == .eq)); + const lhs_as_type = try sema.resolveAirAsType(block, lhs_src, lhs); + const rhs_as_type = try sema.resolveAirAsType(block, rhs_src, rhs); + if (lhs_as_type.eql(rhs_as_type) == (op == .eq)) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; + } } - const instructions = &[_]Air.Inst.Index{ lhs, rhs }; + const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); if (!resolved_type.isSelfComparable(is_equality_cmp)) { return mod.fail(&block.base, src, "operator not allowed for type '{}'", .{resolved_type}); @@ -5057,15 +5068,18 @@ fn zirCmp( if (casted_lhs.value()) |lhs_val| { if (casted_rhs.value()) |rhs_val| { if (lhs_val.isUndef() or rhs_val.isUndef()) { - return sema.mod.constUndef(sema.arena, src, resolved_type); + return sema.addConstUndef(resolved_type); + } + if (lhs_val.compare(op, rhs_val)) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; } - const result = lhs_val.compare(op, rhs_val); - return sema.mod.constBool(sema.arena, src, result); } } try sema.requireRuntimeBlock(block, src); - const tag: Inst.Tag = switch (op) { + const tag: Air.Inst.Tag = switch (op) { .lt => .cmp_lt, .lte => .cmp_lte, .eq => .cmp_eq, @@ -5073,28 +5087,26 @@ fn zirCmp( .gt => .cmp_gt, .neq => .cmp_neq, }; - const bool_type = Type.initTag(.bool); // TODO handle vectors - return block.addBinOp(src, bool_type, tag, casted_lhs, casted_rhs); + // TODO handle vectors + return block.addBinOp(tag, casted_lhs, casted_rhs); } fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); const target = sema.mod.getTarget(); const abi_size = operand_ty.abiSize(target); - return sema.mod.constIntUnsigned(sema.arena, src, Type.initTag(.comptime_int), abi_size); + return sema.addIntUnsigned(Type.initTag(.comptime_int), abi_size); } fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); const target = sema.mod.getTarget(); const bit_size = operand_ty.bitSize(target); - return sema.mod.constIntUnsigned(sema.arena, src, Type.initTag(.comptime_int), bit_size); + return sema.addIntUnsigned(Type.initTag(.comptime_int), bit_size); } fn zirThis( @@ -5171,18 +5183,16 @@ fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; - const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - return sema.mod.constType(sema.arena, src, operand.ty); + return sema.addType(operand.ty); } fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { _ = block; const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const src = inst_data.src(); const operand_ptr = sema.resolveInst(inst_data.operand); const elem_ty = operand_ptr.ty.elemType(); - return sema.mod.constType(sema.arena, src, elem_ty); + return sema.addType(elem_ty); } fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -5217,7 +5227,7 @@ fn zirTypeofPeer( } const result_type = try sema.resolvePeerTypes(block, src, inst_list); - return sema.mod.constType(sema.arena, src, result_type); + return sema.addType(result_type); } fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -5231,17 +5241,21 @@ fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError const bool_type = Type.initTag(.bool); const operand = try sema.coerce(block, bool_type, uncasted_operand, uncasted_operand.src); if (try sema.resolveDefinedValue(block, src, operand)) |val| { - return sema.mod.constBool(sema.arena, src, !val.toBool()); + if (val.toBool()) { + return Air.Inst.Ref.bool_false; + } else { + return Air.Inst.Ref.bool_true; + } } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, bool_type, .not, operand); + return block.addTyOp(.not, bool_type, operand); } fn zirBoolOp( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, - comptime is_bool_or: bool, + is_bool_or: bool, ) InnerError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5257,15 +5271,23 @@ fn zirBoolOp( if (lhs.value()) |lhs_val| { if (rhs.value()) |rhs_val| { if (is_bool_or) { - return sema.mod.constBool(sema.arena, src, lhs_val.toBool() or rhs_val.toBool()); + if (lhs_val.toBool() or rhs_val.toBool()) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; + } } else { - return sema.mod.constBool(sema.arena, src, lhs_val.toBool() and rhs_val.toBool()); + if (lhs_val.toBool() and rhs_val.toBool()) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; + } } } } try sema.requireRuntimeBlock(block, src); const tag: Air.Inst.Tag = if (is_bool_or) .bool_or else .bool_and; - return block.addBinOp(src, bool_type, tag, lhs, rhs); + return block.addBinOp(tag, lhs, rhs); } fn zirBoolBr( @@ -5286,7 +5308,11 @@ fn zirBoolBr( if (try sema.resolveDefinedValue(parent_block, src, lhs)) |lhs_val| { if (lhs_val.toBool() == is_bool_or) { - return sema.mod.constBool(sema.arena, src, is_bool_or); + if (is_bool_or) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; + } } // comptime-known left-hand side. No need for a block here; the result // is simply the rhs expression. Here we rely on there only being 1 @@ -5522,14 +5548,11 @@ fn analyzeRet( const fn_ty = func.owner_decl.ty; const fn_ret_ty = fn_ty.fnReturnType(); const casted_operand = try sema.coerce(block, fn_ret_ty, operand, src); - if (fn_ret_ty.zigTypeTag() == .Void) - _ = try block.addNoOp(src, Type.initTag(.noreturn), .retvoid) - else - _ = try block.addUnOp(src, Type.initTag(.noreturn), .ret, casted_operand); + _ = try block.addUnOp(.ret, casted_operand); return always_noreturn; } } - _ = try block.addUnOp(src, Type.initTag(.noreturn), .ret, operand); + _ = try block.addUnOp(.ret, operand); return always_noreturn; } @@ -5559,7 +5582,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne inst_data.is_volatile, inst_data.size, ); - return sema.mod.constType(sema.arena, .unneeded, ty); + return sema.addType(ty); } fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -5613,7 +5636,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError inst_data.flags.is_volatile, inst_data.size, ); - return sema.mod.constType(sema.arena, src, ty); + return sema.addType(ty); } fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -5794,7 +5817,7 @@ fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr const struct_obj = struct_ty.castTag(.@"struct").?.data; const field = struct_obj.fields.get(field_name) orelse return sema.failWithBadFieldAccess(block, struct_obj, src, field_name); - return sema.mod.constType(sema.arena, src, field.ty); + return sema.addType(field.ty); } fn zirErrorReturnTrace( @@ -5937,7 +5960,7 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro .val = Value.initTag(.zero), }); if (!type_res.isAllowzeroPtr()) { - const is_non_zero = try block.addBinOp(src, Type.initTag(.bool), .cmp_neq, operand_coerced, zero); + const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, zero); try sema.addSafetyCheck(block, is_non_zero, .cast_to_null); } @@ -5951,12 +5974,12 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro .ty = Type.initTag(.u64), .val = Value.initPayload(&val_payload.base), }); - const remainder = try block.addBinOp(src, Type.initTag(.u64), .bit_and, operand_coerced, align_minus_1); - const is_aligned = try block.addBinOp(src, Type.initTag(.bool), .cmp_eq, remainder, zero); + const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1); + const is_aligned = try block.addBinOp(.cmp_eq, remainder, zero); try sema.addSafetyCheck(block, is_aligned, .incorrect_alignment); } } - return block.addUnOp(src, type_res, .bitcast, operand_coerced); + return block.addTyOp(.bitcast, type_res, operand_coerced); } fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { @@ -6841,7 +6864,7 @@ fn coerce( return sema.coerceVarArgParam(block, inst, inst_src); } - const inst_ty = sema.getTypeOfAirRef(inst); + const inst_ty = sema.getTypeOf(inst); // If the types are the same, we can return the operand. if (dest_type.eql(inst_ty)) return inst; @@ -6950,7 +6973,7 @@ fn coerce( (src_info.signedness == .signed and dst_info.signedness == .unsigned and dst_info.bits > src_info.bits)) { try sema.requireRuntimeBlock(block, inst_src); - return block.addUnOp(inst_src, dest_type, .intcast, inst); + return block.addTyOp(.intcast, dest_type, inst); } } }, @@ -6963,7 +6986,7 @@ fn coerce( const dst_bits = dest_type.floatBits(target); if (dst_bits >= src_bits) { try sema.requireRuntimeBlock(block, inst_src); - return block.addUnOp(inst_src, dest_type, .floatcast, inst); + return block.addTyOp(.floatcast, dest_type, inst); } } }, @@ -7062,7 +7085,7 @@ fn coerceVarArgParam( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const inst_ty = sema.getTypeOfAirRef(inst); + const inst_ty = sema.getTypeOf(inst); switch (inst_ty.zigTypeTag()) { .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst_src, "integer and float literals in var args function must be casted", .{}), else => {}, @@ -7121,7 +7144,7 @@ fn storePtr( // TODO handle if the element type requires comptime try sema.requireRuntimeBlock(block, src); - _ = try block.addBinOp(src, Type.initTag(.void), .store, ptr, value); + _ = try block.addBinOp(.store, ptr, value); } fn bitcast( @@ -7221,7 +7244,7 @@ fn analyzeRef( } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, ptr_type, .ref, operand); + return block.addTyOp(.ref, ptr_type, operand); } fn analyzeLoad( @@ -7231,7 +7254,7 @@ fn analyzeLoad( ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, ) InnerError!Air.Inst.Ref { - const ptr_ty = sema.getTypeOfAirRef(ptr); + const ptr_ty = sema.getTypeOf(ptr); const elem_ty = switch (ptr_ty.zigTypeTag()) { .Pointer => ptr_ty.elemType(), else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr_ty}), @@ -7257,15 +7280,19 @@ fn analyzeIsNull( const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |opt_val| { if (opt_val.isUndef()) { - return sema.mod.constUndef(sema.arena, src, result_ty); + return sema.addConstUndef(result_ty); } const is_null = opt_val.isNull(); const bool_value = if (invert_logic) !is_null else is_null; - return sema.mod.constBool(sema.arena, src, bool_value); + if (bool_value) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; + } } try sema.requireRuntimeBlock(block, src); - const inst_tag: Inst.Tag = if (invert_logic) .is_non_null else .is_null; - return block.addUnOp(src, result_ty, inst_tag, operand); + const air_tag: Air.Inst.Tag = if (invert_logic) .is_non_null else .is_null; + return block.addUnOp(air_tag, operand); } fn analyzeIsNonErr( @@ -7275,18 +7302,22 @@ fn analyzeIsNonErr( operand: Air.Inst.Ref, ) InnerError!Air.Inst.Ref { const ot = operand.ty.zigTypeTag(); - if (ot != .ErrorSet and ot != .ErrorUnion) return sema.mod.constBool(sema.arena, src, true); - if (ot == .ErrorSet) return sema.mod.constBool(sema.arena, src, false); + if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; + if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; assert(ot == .ErrorUnion); const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |err_union| { if (err_union.isUndef()) { - return sema.mod.constUndef(sema.arena, src, result_ty); + return sema.addConstUndef(result_ty); + } + if (err_union.getError() == null) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; } - return sema.mod.constBool(sema.arena, src, err_union.getError() == null); } try sema.requireRuntimeBlock(block, src); - return block.addUnOp(src, result_ty, .is_non_err, operand); + return block.addUnOp(.is_non_err, operand); } fn analyzeSlice( @@ -7372,31 +7403,43 @@ fn cmpNumeric( lhs: Air.Inst.Ref, rhs: Air.Inst.Ref, op: std.math.CompareOperator, + lhs_src: LazySrcLoc, + rhs_src: LazySrcLoc, ) InnerError!Air.Inst.Ref { - assert(lhs.ty.isNumeric()); - assert(rhs.ty.isNumeric()); + const lhs_ty = sema.getTypeOf(lhs); + const rhs_ty = sema.getTypeOf(rhs); - const lhs_ty_tag = lhs.ty.zigTypeTag(); - const rhs_ty_tag = rhs.ty.zigTypeTag(); + assert(lhs_ty.isNumeric()); + assert(rhs_ty.isNumeric()); + + const lhs_ty_tag = lhs_ty.zigTypeTag(); + const rhs_ty_tag = rhs_ty.zigTypeTag(); if (lhs_ty_tag == .Vector and rhs_ty_tag == .Vector) { - if (lhs.ty.arrayLen() != rhs.ty.arrayLen()) { + if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { return sema.mod.fail(&block.base, src, "vector length mismatch: {d} and {d}", .{ - lhs.ty.arrayLen(), - rhs.ty.arrayLen(), + lhs_ty.arrayLen(), + rhs_ty.arrayLen(), }); } return sema.mod.fail(&block.base, src, "TODO implement support for vectors in cmpNumeric", .{}); } else if (lhs_ty_tag == .Vector or rhs_ty_tag == .Vector) { return sema.mod.fail(&block.base, src, "mixed scalar and vector operands to comparison operator: '{}' and '{}'", .{ - lhs.ty, - rhs.ty, + lhs_ty, + rhs_ty, }); } - if (lhs.value()) |lhs_val| { - if (rhs.value()) |rhs_val| { - return sema.mod.constBool(sema.arena, src, Value.compare(lhs_val, op, rhs_val)); + if (try sema.resolvePossiblyUndefinedValue(block, lhs_src, lhs)) |lhs_val| { + if (try sema.resolvePossiblyUndefinedValue(block, rhs_src, rhs)) |rhs_val| { + if (lhs_val.isUndef() or rhs_val.isUndef()) { + return sema.addConstUndef(Type.initTag(.bool)); + } + if (Value.compare(lhs_val, op, rhs_val)) { + return Air.Inst.Ref.bool_true; + } else { + return Air.Inst.Ref.bool_false; + } } } @@ -7422,19 +7465,19 @@ fn cmpNumeric( // Implicit cast the smaller one to the larger one. const dest_type = x: { if (lhs_ty_tag == .ComptimeFloat) { - break :x rhs.ty; + break :x rhs_ty; } else if (rhs_ty_tag == .ComptimeFloat) { - break :x lhs.ty; + break :x lhs_ty; } - if (lhs.ty.floatBits(target) >= rhs.ty.floatBits(target)) { - break :x lhs.ty; + if (lhs_ty.floatBits(target) >= rhs_ty.floatBits(target)) { + break :x lhs_ty; } else { - break :x rhs.ty; + break :x rhs_ty; } }; - const casted_lhs = try sema.coerce(block, dest_type, lhs, lhs.src); - const casted_rhs = try sema.coerce(block, dest_type, rhs, rhs.src); - return block.addBinOp(src, dest_type, Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); + const casted_lhs = try sema.coerce(block, dest_type, lhs, lhs_src); + const casted_rhs = try sema.coerce(block, dest_type, rhs, rhs_src); + return block.addBinOp(Air.Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } // For mixed unsigned integer sizes, implicit cast both operands to the larger integer. // For mixed signed and unsigned integers, implicit cast both operands to a signed @@ -7445,11 +7488,11 @@ fn cmpNumeric( const lhs_is_signed = if (lhs.value()) |lhs_val| lhs_val.compareWithZero(.lt) else - (lhs.ty.isFloat() or lhs.ty.isSignedInt()); + (lhs_ty.isFloat() or lhs_ty.isSignedInt()); const rhs_is_signed = if (rhs.value()) |rhs_val| rhs_val.compareWithZero(.lt) else - (rhs.ty.isFloat() or rhs.ty.isSignedInt()); + (rhs_ty.isFloat() or rhs_ty.isSignedInt()); const dest_int_is_signed = lhs_is_signed or rhs_is_signed; var dest_float_type: ?Type = null; @@ -7457,7 +7500,7 @@ fn cmpNumeric( var lhs_bits: usize = undefined; if (lhs.value()) |lhs_val| { if (lhs_val.isUndef()) - return sema.mod.constUndef(sema.arena, src, Type.initTag(.bool)); + return sema.addConstUndef(Type.initTag(.bool)); const is_unsigned = if (lhs_is_float) x: { var bigint_space: Value.BigIntSpace = undefined; var bigint = try lhs_val.toBigInt(&bigint_space).toManaged(sema.gpa); @@ -7465,8 +7508,8 @@ fn cmpNumeric( const zcmp = lhs_val.orderAgainstZero(); if (lhs_val.floatHasFraction()) { switch (op) { - .eq => return sema.mod.constBool(sema.arena, src, false), - .neq => return sema.mod.constBool(sema.arena, src, true), + .eq => return Air.Inst.Ref.bool_false, + .neq => return Air.Inst.Ref.bool_true, else => {}, } if (zcmp == .lt) { @@ -7483,16 +7526,16 @@ fn cmpNumeric( }; lhs_bits += @boolToInt(is_unsigned and dest_int_is_signed); } else if (lhs_is_float) { - dest_float_type = lhs.ty; + dest_float_type = lhs_ty; } else { - const int_info = lhs.ty.intInfo(target); + const int_info = lhs_ty.intInfo(target); lhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); } var rhs_bits: usize = undefined; if (rhs.value()) |rhs_val| { if (rhs_val.isUndef()) - return sema.mod.constUndef(sema.arena, src, Type.initTag(.bool)); + return sema.addConstUndef(Type.initTag(.bool)); const is_unsigned = if (rhs_is_float) x: { var bigint_space: Value.BigIntSpace = undefined; var bigint = try rhs_val.toBigInt(&bigint_space).toManaged(sema.gpa); @@ -7500,8 +7543,8 @@ fn cmpNumeric( const zcmp = rhs_val.orderAgainstZero(); if (rhs_val.floatHasFraction()) { switch (op) { - .eq => return sema.mod.constBool(sema.arena, src, false), - .neq => return sema.mod.constBool(sema.arena, src, true), + .eq => return Air.Inst.Ref.bool_false, + .neq => return Air.Inst.Ref.bool_true, else => {}, } if (zcmp == .lt) { @@ -7518,9 +7561,9 @@ fn cmpNumeric( }; rhs_bits += @boolToInt(is_unsigned and dest_int_is_signed); } else if (rhs_is_float) { - dest_float_type = rhs.ty; + dest_float_type = rhs_ty; } else { - const int_info = rhs.ty.intInfo(target); + const int_info = rhs_ty.intInfo(target); rhs_bits = int_info.bits + @boolToInt(int_info.signedness == .unsigned and dest_int_is_signed); } @@ -7532,10 +7575,10 @@ fn cmpNumeric( const signedness: std.builtin.Signedness = if (dest_int_is_signed) .signed else .unsigned; break :blk try Module.makeIntType(sema.arena, signedness, casted_bits); }; - const casted_lhs = try sema.coerce(block, dest_type, lhs, lhs.src); - const casted_rhs = try sema.coerce(block, dest_type, rhs, rhs.src); + const casted_lhs = try sema.coerce(block, dest_type, lhs, lhs_src); + const casted_rhs = try sema.coerce(block, dest_type, rhs, rhs_src); - return block.addBinOp(src, Type.initTag(.bool), Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); + return block.addBinOp(Air.Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Index { @@ -7544,7 +7587,7 @@ fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Ins } try sema.requireRuntimeBlock(block, inst.src); - return block.addUnOp(inst.src, dest_type, .wrap_optional, inst); + return block.addTyOp(.wrap_optional, dest_type, inst); } fn wrapErrorUnion( @@ -7617,19 +7660,24 @@ fn wrapErrorUnion( // we are coercing from E to E!T if (inst.ty.zigTypeTag() == .ErrorSet) { var coerced = try sema.coerce(block, err_union.data.error_set, inst, inst.src); - return block.addUnOp(inst.src, dest_type, .wrap_errunion_err, coerced); + return block.addTyOp(.wrap_errunion_err, dest_type, coerced); } else { var coerced = try sema.coerce(block, err_union.data.payload, inst, inst.src); - return block.addUnOp(inst.src, dest_type, .wrap_errunion_payload, coerced); + return block.addTyOp(.wrap_errunion_payload, dest_type, coerced); } } -fn resolvePeerTypes(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, instructions: []Air.Inst.Index) !Type { +fn resolvePeerTypes( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + instructions: []Air.Inst.Ref, +) !Type { if (instructions.len == 0) return Type.initTag(.noreturn); if (instructions.len == 1) - return instructions[0].ty; + return sema.getTypeOf(instructions[0]); const target = sema.mod.getTarget(); @@ -7989,7 +8037,7 @@ fn enumFieldSrcLoc( } /// Returns the type of the AIR instruction. -fn getTypeOfAirRef(sema: *Sema, air_ref: Air.Inst.Ref) Type { +fn getTypeOf(sema: *Sema, air_ref: Air.Inst.Ref) Type { switch (air_ref) { .none => unreachable, .u8_type => return Type.initTag(.u8), @@ -8045,21 +8093,13 @@ fn getTypeOfAirRef(sema: *Sema, air_ref: Air.Inst.Ref) Type { .fn_ccc_void_no_args_type => return Type.initTag(.fn_ccc_void_no_args), .single_const_pointer_to_comptime_int_type => return Type.initTag(.single_const_pointer_to_comptime_int), .const_slice_u8_type => return Type.initTag(.const_slice_u8), - else => return sema.getAirType(air_ref), + else => {}, } -} - -/// Asserts the AIR instruction is a `const_ty` and returns the type. -fn getAirType(sema: *Sema, air_ref: Air.Inst.Ref) Type { - var i: usize = @enumToInt(air_ref); - if (i < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; - } - i -= Air.Inst.Ref.typed_value_map.len; + const air_index = @as(usize, @enumToInt(air_ref)) - Air.Inst.Ref.typed_value_map.len; const air_tags = sema.air_instructions.items(.tag); const air_datas = sema.air_instructions.items(.data); - assert(air_tags[i] == .const_ty); - return air_datas[i].ty; + assert(air_tags[air_index] == .const_ty); + return air_datas[air_index].ty; } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { @@ -8126,7 +8166,15 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } -pub fn addConstant(sema: *Sema, ty: Type, val: Value) InnerError!Air.Inst.Ref { +fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) InnerError!Air.Inst.Ref { + return sema.addConstant(ty, try Value.Tag.int_u64.create(sema.arena, int)); +} + +fn addConstUndef(sema: *Sema, ty: Type) InnerError!Air.Inst.Ref { + return sema.addConstant(ty, Value.initTag(.undef)); +} + +fn addConstant(sema: *Sema, ty: Type, val: Value) InnerError!Air.Inst.Ref { const gpa = sema.gpa; const ty_inst = try sema.addType(ty); try sema.air_values.append(gpa, val); From 3c5927fb87034affd6af56ecd5d9ae07fe23d690 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Jul 2021 12:16:48 -0700 Subject: [PATCH 14/53] Sema: add a strategy for handling costly source locations Now you can pass `.unneeded` for a `LazySrcLoc` and if there ended up being a compile error that needed it, you'll get `error.NeededSourceLocation`. Callsites can now exploit this error to do the expensive computation to produce a source location object and then repeat the operation. --- src/Compilation.zig | 6 +- src/Module.zig | 32 +-- src/Sema.zig | 585 ++++++++++++++++++++++---------------------- 3 files changed, 317 insertions(+), 306 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 4a442a8b67..f241ae6b10 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -148,7 +148,7 @@ emit_docs: ?EmitLoc, work_queue_wait_group: WaitGroup, astgen_wait_group: WaitGroup, -pub const InnerError = Module.InnerError; +pub const SemaError = Module.SemaError; pub const CRTFile = struct { lock: Cache.Lock, @@ -3170,7 +3170,7 @@ pub fn addCCArgs( try argv.appendSlice(comp.clang_argv); } -fn failCObj(comp: *Compilation, c_object: *CObject, comptime format: []const u8, args: anytype) InnerError { +fn failCObj(comp: *Compilation, c_object: *CObject, comptime format: []const u8, args: anytype) SemaError { @setCold(true); const err_msg = blk: { const msg = try std.fmt.allocPrint(comp.gpa, format, args); @@ -3191,7 +3191,7 @@ fn failCObjWithOwnedErrorMsg( comp: *Compilation, c_object: *CObject, err_msg: *CObject.ErrorMsg, -) InnerError { +) SemaError { @setCold(true); { const lock = comp.mutex.acquire(); diff --git a/src/Module.zig b/src/Module.zig index 3ce3c47f14..0a082313b3 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1996,7 +1996,8 @@ pub const LazySrcLoc = union(enum) { } }; -pub const InnerError = error{ OutOfMemory, AnalysisFail }; +pub const SemaError = error{ OutOfMemory, AnalysisFail }; +pub const CompileError = error{ OutOfMemory, AnalysisFail, NeededSourceLocation }; pub fn deinit(mod: *Module) void { const gpa = mod.gpa; @@ -2635,7 +2636,7 @@ pub fn mapOldZirToNew( } } -pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) InnerError!void { +pub fn ensureDeclAnalyzed(mod: *Module, decl: *Decl) SemaError!void { const tracy = trace(@src()); defer tracy.end(); @@ -2735,7 +2736,7 @@ pub fn semaPkg(mod: *Module, pkg: *Package) !void { /// Regardless of the file status, will create a `Decl` so that we /// can track dependencies and re-analyze when the file becomes outdated. -pub fn semaFile(mod: *Module, file: *Scope.File) InnerError!void { +pub fn semaFile(mod: *Module, file: *Scope.File) SemaError!void { const tracy = trace(@src()); defer tracy.end(); @@ -3150,7 +3151,7 @@ pub fn scanNamespace( extra_start: usize, decls_len: u32, parent_decl: *Decl, -) InnerError!usize { +) SemaError!usize { const tracy = trace(@src()); defer tracy.end(); @@ -3197,7 +3198,7 @@ const ScanDeclIter = struct { unnamed_test_index: usize = 0, }; -fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) InnerError!void { +fn scanDecl(iter: *ScanDeclIter, decl_sub_index: usize, flags: u4) SemaError!void { const tracy = trace(@src()); defer tracy.end(); @@ -3451,7 +3452,7 @@ fn deleteDeclExports(mod: *Module, decl: *Decl) void { mod.gpa.free(kv.value); } -pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { +pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) SemaError!Air { const tracy = trace(@src()); defer tracy.end(); @@ -3804,7 +3805,7 @@ pub fn fail( src: LazySrcLoc, comptime format: []const u8, args: anytype, -) InnerError { +) CompileError { const err_msg = try mod.errMsg(scope, src, format, args); return mod.failWithOwnedErrorMsg(scope, err_msg); } @@ -3817,7 +3818,7 @@ pub fn failTok( token_index: ast.TokenIndex, comptime format: []const u8, args: anytype, -) InnerError { +) CompileError { const src = scope.srcDecl().?.tokSrcLoc(token_index); return mod.fail(scope, src, format, args); } @@ -3830,18 +3831,21 @@ pub fn failNode( node_index: ast.Node.Index, comptime format: []const u8, args: anytype, -) InnerError { +) CompileError { const src = scope.srcDecl().?.nodeSrcLoc(node_index); return mod.fail(scope, src, format, args); } -pub fn failWithOwnedErrorMsg(mod: *Module, scope: *Scope, err_msg: *ErrorMsg) InnerError { +pub fn failWithOwnedErrorMsg(mod: *Module, scope: *Scope, err_msg: *ErrorMsg) CompileError { @setCold(true); { errdefer err_msg.destroy(mod.gpa); - try mod.failed_decls.ensureCapacity(mod.gpa, mod.failed_decls.count() + 1); - try mod.failed_files.ensureCapacity(mod.gpa, mod.failed_files.count() + 1); + if (err_msg.src_loc.lazy == .unneeded) { + return error.NeededSourceLocation; + } + try mod.failed_decls.ensureUnusedCapacity(mod.gpa, 1); + try mod.failed_files.ensureUnusedCapacity(mod.gpa, 1); } switch (scope.tag) { .block => { @@ -4340,7 +4344,7 @@ pub const SwitchProngSrc = union(enum) { } }; -pub fn analyzeStructFields(mod: *Module, struct_obj: *Struct) InnerError!void { +pub fn analyzeStructFields(mod: *Module, struct_obj: *Struct) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -4490,7 +4494,7 @@ pub fn analyzeStructFields(mod: *Module, struct_obj: *Struct) InnerError!void { } } -pub fn analyzeUnionFields(mod: *Module, union_obj: *Union) InnerError!void { +pub fn analyzeUnionFields(mod: *Module, union_obj: *Union) CompileError!void { const tracy = trace(@src()); defer tracy.end(); diff --git a/src/Sema.zig b/src/Sema.zig index 829dd843cc..91f81ffeed 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -61,7 +61,8 @@ const Zir = @import("Zir.zig"); const Module = @import("Module.zig"); const trace = @import("tracy.zig").trace; const Scope = Module.Scope; -const InnerError = Module.InnerError; +const CompileError = Module.CompileError; +const SemaError = Module.SemaError; const Decl = Module.Decl; const LazySrcLoc = Module.LazySrcLoc; const RangeSet = @import("RangeSet.zig"); @@ -83,7 +84,7 @@ pub fn analyzeFnBody( sema: *Sema, block: *Scope.Block, fn_body_inst: Zir.Inst.Index, -) InnerError!void { +) SemaError!void { const tags = sema.code.instructions.items(.tag); const datas = sema.code.instructions.items(.data); const body: []const Zir.Inst.Index = switch (tags[fn_body_inst]) { @@ -109,13 +110,16 @@ pub fn analyzeFnBody( }, else => unreachable, }; - _ = try sema.analyzeBody(block, body); + _ = sema.analyzeBody(block, body) catch |err| switch (err) { + error.NeededSourceLocation => unreachable, + else => |e| return e, + }; } /// Returns only the result from the body that is specified. /// Only appropriate to call when it is determined at comptime that this body /// has no peers. -fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) CompileError!Air.Inst.Ref { const break_inst = try sema.analyzeBody(block, body); const operand_ref = sema.code.instructions.items(.data)[break_inst].@"break".operand; return sema.resolveInst(operand_ref); @@ -125,7 +129,7 @@ fn resolveBody(sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index) I /// return type of `analyzeBody` so that we can tail call them. /// Only appropriate to return when the instruction is known to be NoReturn /// solely based on the ZIR tag. -const always_noreturn: InnerError!Zir.Inst.Index = @as(Zir.Inst.Index, undefined); +const always_noreturn: CompileError!Zir.Inst.Index = @as(Zir.Inst.Index, undefined); /// This function is the main loop of `Sema` and it can be used in two different ways: /// * The traditional way where there are N breaks out of the block and peer type @@ -140,7 +144,7 @@ pub fn analyzeBody( sema: *Sema, block: *Scope.Block, body: []const Zir.Inst.Index, -) InnerError!Zir.Inst.Index { +) CompileError!Zir.Inst.Index { // No tracy calls here, to avoid interfering with the tail call mechanism. const map = &block.sema.inst_map; @@ -541,7 +545,7 @@ pub fn analyzeBody( } } -fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const extended = sema.code.instructions.items(.data)[inst].extended; switch (extended.opcode) { // zig fmt: off @@ -638,7 +642,7 @@ fn resolveConstValue( block: *Scope.Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, -) !Value { +) CompileError!Value { return (try sema.resolveDefinedValue(block, src, air_ref)) orelse return sema.failWithNeededComptime(block, src); } @@ -648,7 +652,7 @@ fn resolveDefinedValue( block: *Scope.Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, -) !?Value { +) CompileError!?Value { if (try sema.resolvePossiblyUndefinedValue(block, src, air_ref)) |val| { if (val.isUndef()) { return sema.failWithUseOfUndef(block, src); @@ -663,7 +667,7 @@ fn resolvePossiblyUndefinedValue( block: *Scope.Block, src: LazySrcLoc, air_ref: Air.Inst.Ref, -) !?Value { +) CompileError!?Value { const ty = sema.getTypeOf(air_ref); if (try sema.typeHasOnePossibleValue(block, src, ty)) |opv| { return opv; @@ -687,11 +691,11 @@ fn resolvePossiblyUndefinedValue( } } -fn failWithNeededComptime(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) InnerError { +fn failWithNeededComptime(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) CompileError { return sema.mod.fail(&block.base, src, "unable to resolve comptime value", .{}); } -fn failWithUseOfUndef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) InnerError { +fn failWithUseOfUndef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc) CompileError { return sema.mod.fail(&block.base, src, "use of undefined value here causes undefined behavior", .{}); } @@ -733,7 +737,7 @@ pub fn resolveInstConst( block: *Scope.Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref, -) InnerError!TypedValue { +) CompileError!TypedValue { const air_ref = sema.resolveInst(zir_ref); const val = try sema.resolveConstValue(block, src, air_ref); return TypedValue{ @@ -742,13 +746,13 @@ pub fn resolveInstConst( }; } -fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitcastResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO implement zir_sema.zirBitcastResultPtr", .{}); } -fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirCoerceResultPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = inst; const tracy = trace(@src()); defer tracy.end(); @@ -760,7 +764,7 @@ pub fn analyzeStructDecl( new_decl: *Decl, inst: Zir.Inst.Index, struct_obj: *Module.Struct, -) InnerError!void { +) SemaError!void { const extended = sema.code.instructions.items(.data)[inst].extended; assert(extended.opcode == .struct_decl); const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); @@ -783,7 +787,7 @@ fn zirStructDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const small = @bitCast(Zir.Inst.StructDecl.Small, extended.small); const src: LazySrcLoc = if (small.has_src_node) blk: { const node_offset = @bitCast(i32, sema.code.extra[extended.operand]); @@ -854,7 +858,7 @@ fn zirEnumDecl( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1051,7 +1055,7 @@ fn zirUnionDecl( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1115,7 +1119,7 @@ fn zirOpaqueDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1135,7 +1139,7 @@ fn zirErrorSetDecl( block: *Scope.Block, inst: Zir.Inst.Index, name_strategy: Zir.Inst.NameStrategy, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1175,7 +1179,7 @@ fn zirRetPtr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1187,7 +1191,7 @@ fn zirRetPtr( return block.addNoOp(src, ptr_type, .alloc); } -fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1200,7 +1204,7 @@ fn zirRetType( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1211,7 +1215,7 @@ fn zirRetType( return sema.addType(ret_type); } -fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirEnsureResultUsed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1227,14 +1231,14 @@ fn ensureResultUsed( block: *Scope.Block, operand: Air.Inst.Ref, src: LazySrcLoc, -) InnerError!void { +) CompileError!void { switch (operand.ty.zigTypeTag()) { .Void, .NoReturn => return, else => return sema.mod.fail(&block.base, src, "expression value is ignored", .{}), } } -fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1247,7 +1251,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde } } -fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1281,7 +1285,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const arg_name = inst_data.get(sema.code); const arg_index = sema.next_arg_index; @@ -1304,13 +1308,13 @@ fn zirAllocExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.AllocExtended, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocExtended", .{}); } -fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1333,13 +1337,13 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne }); } -fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; return sema.mod.fail(&block.base, src, "TODO implement Sema.zirAllocInferredComptime", .{}); } -fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1352,7 +1356,7 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!A return block.addNoOp(var_decl_src, ptr_type, .alloc); } -fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1371,7 +1375,7 @@ fn zirAllocInferred( block: *Scope.Block, inst: Zir.Inst.Index, inferred_alloc_ty: Type, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1395,7 +1399,7 @@ fn zirAllocInferred( return result; } -fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1421,7 +1425,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde ptr.tag = .alloc; } -fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1494,7 +1498,7 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Ind } } -fn zirValidateArrayInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirValidateArrayInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO implement Sema.zirValidateArrayInitPtr", .{}); @@ -1506,7 +1510,7 @@ fn failWithBadFieldAccess( struct_obj: *Module.Struct, field_src: LazySrcLoc, field_name: []const u8, -) InnerError { +) CompileError { const mod = sema.mod; const gpa = sema.gpa; @@ -1533,7 +1537,7 @@ fn failWithBadUnionFieldAccess( union_obj: *Module.Union, field_src: LazySrcLoc, field_name: []const u8, -) InnerError { +) CompileError { const mod = sema.mod; const gpa = sema.gpa; @@ -1554,7 +1558,7 @@ fn failWithBadUnionFieldAccess( return mod.failWithOwnedErrorMsg(&block.base, msg); } -fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1575,7 +1579,7 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return sema.storePtr(block, src, bitcasted_ptr, value); } -fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1594,7 +1598,7 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) return sema.storePtr(block, src, bitcasted_ptr, value); } -fn zirSetEvalBranchQuota(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirSetEvalBranchQuota(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const quota = try sema.resolveAlreadyCoercedInt(block, src, inst_data.operand, u32); @@ -1602,7 +1606,7 @@ fn zirSetEvalBranchQuota(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) sema.branch_quota = quota; } -fn zirStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1612,7 +1616,7 @@ fn zirStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!v return sema.storePtr(block, sema.src, ptr, value); } -fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -1624,7 +1628,7 @@ fn zirStoreNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.storePtr(block, src, ptr, value); } -fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1660,7 +1664,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.addType(param_type); } -fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1688,7 +1692,7 @@ fn zirStr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.analyzeDeclRef(block, .unneeded, new_decl); } -fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1697,7 +1701,7 @@ fn zirInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.addIntUnsigned(Type.initTag(.comptime_int), int); } -fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -1715,7 +1719,7 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! }); } -fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].float; @@ -1728,7 +1732,7 @@ fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!A }); } -fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; @@ -1742,7 +1746,7 @@ fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro }); } -fn zirCompileError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirCompileError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1757,7 +1761,7 @@ fn zirCompileLog( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { var managed = sema.mod.compile_log_text.toManaged(sema.gpa); defer sema.mod.compile_log_text = managed.moveToUnmanaged(); const writer = managed.writer(); @@ -1789,7 +1793,7 @@ fn zirCompileLog( }); } -fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -1799,7 +1803,7 @@ fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return always_noreturn; } -fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); const msg_inst = sema.resolveInst(inst_data.operand); @@ -1807,7 +1811,7 @@ fn zirPanic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Z return sema.panicWithMsg(block, src, msg_inst); } -fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1872,7 +1876,7 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } -fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1882,13 +1886,13 @@ fn zirCImport(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirCImport", .{}); } -fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirSuspendBlock", .{}); } -fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -1946,7 +1950,7 @@ fn resolveBlockBody( child_block: *Scope.Block, body: []const Zir.Inst.Index, merges: *Scope.Block.Merges, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { _ = try sema.analyzeBody(child_block, body); return sema.analyzeBlockBody(parent_block, src, child_block, merges); } @@ -1957,7 +1961,7 @@ fn analyzeBlockBody( src: LazySrcLoc, child_block: *Scope.Block, merges: *Scope.Block.Merges, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2033,7 +2037,7 @@ fn analyzeBlockBody( return &merges.block_inst.base; } -fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -2069,13 +2073,13 @@ fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! try sema.mod.analyzeExport(&block.base, src, export_name, decl); } -fn zirSetAlignStack(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirSetAlignStack(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirSetAlignStack", .{}); } -fn zirSetCold(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirSetCold(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const is_cold = try sema.resolveConstBool(block, operand_src, inst_data.operand); @@ -2083,19 +2087,19 @@ fn zirSetCold(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError func.is_cold = is_cold; } -fn zirSetFloatMode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirSetFloatMode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src: LazySrcLoc = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirSetFloatMode", .{}); } -fn zirSetRuntimeSafety(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirSetRuntimeSafety(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; block.want_safety = try sema.resolveConstBool(block, operand_src, inst_data.operand); } -fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -2105,13 +2109,13 @@ fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr _ = try block.addNoOp(src, Type.initTag(.void), .breakpoint); } -fn zirFence(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirFence(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirFence", .{}); } -fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -2151,7 +2155,7 @@ fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) InnerE } } -fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -2165,7 +2169,7 @@ fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError _ = try block.addDbgStmt(.unneeded, inst_data.line, inst_data.column); } -fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2173,7 +2177,7 @@ fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeDeclRef(block, src, decl); } -fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirDeclVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const src = inst_data.src(); const decl_name = inst_data.get(sema.code); @@ -2199,7 +2203,7 @@ fn lookupInNamespace( sema: *Sema, namespace: *Scope.Namespace, ident_name: []const u8, -) InnerError!?*Decl { +) CompileError!?*Decl { const namespace_decl = namespace.getDecl(); if (namespace_decl.analysis == .file_failure) { try sema.mod.declareDeclDependency(sema.owner_decl, namespace_decl); @@ -2227,7 +2231,7 @@ fn zirCall( inst: Zir.Inst.Index, modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2257,7 +2261,7 @@ fn analyzeCall( modifier: std.builtin.CallOptions.Modifier, ensure_result_used: bool, args: []const Air.Inst.Ref, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { if (func.ty.zigTypeTag() != .Fn) return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty}); @@ -2412,7 +2416,7 @@ fn analyzeCall( return result; } -fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2423,7 +2427,7 @@ fn zirIntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.addType(ty); } -fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2435,7 +2439,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.addType(opt_type); } -fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const array_type = try sema.resolveType(block, src, inst_data.operand); @@ -2443,7 +2447,7 @@ fn zirElemType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.addType(elem_type); } -fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const elem_type_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const len_src: LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node }; @@ -2457,7 +2461,7 @@ fn zirVectorType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.addType(vector_type); } -fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2470,7 +2474,7 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.addType(array_ty); } -fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2485,7 +2489,7 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) return sema.addType(array_ty); } -fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2497,7 +2501,7 @@ fn zirAnyframeType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner return sema.addType(anyframe_type); } -fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2517,7 +2521,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn return sema.addType(err_union_ty); } -fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2536,7 +2540,7 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr }); } -fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2566,7 +2570,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addTyOp(.bitcast, result_ty, op_coerced); } -fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2599,7 +2603,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return block.addTyOp(.bitcast, Type.initTag(.anyerror), op); } -fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2689,7 +2693,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inn }); } -fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const tracy = trace(@src()); defer tracy.end(); @@ -2703,7 +2707,7 @@ fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE }); } -fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].un_node; @@ -2741,7 +2745,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr }); } - if (enum_tag.value()) |enum_tag_val| { + if (try sema.resolvePossiblyUndefinedValue(block, operand_src, enum_tag)) |enum_tag_val| { if (enum_tag_val.castTag(.enum_field_index)) |enum_field_payload| { const field_index = enum_field_payload.data; switch (enum_tag.ty.tag()) { @@ -2785,7 +2789,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return block.addTyOp(.bitcast, int_tag_ty, enum_tag); } -fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const target = mod.getTarget(); const arena = sema.arena; @@ -2801,16 +2805,16 @@ fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return mod.fail(&block.base, dest_ty_src, "expected enum, found {}", .{dest_ty}); } - if (dest_ty.isNonexhaustiveEnum()) { - if (operand.value()) |int_val| { + if (try sema.resolvePossiblyUndefinedValue(block, operand_src, operand)) |int_val| { + if (dest_ty.isNonexhaustiveEnum()) { return mod.constInst(arena, src, .{ .ty = dest_ty, .val = int_val, }); } - } - - if (try sema.resolveDefinedValue(block, operand_src, operand)) |int_val| { + if (int_val.isUndef()) { + return sema.failWithUseOfUndef(block, operand_src); + } if (!dest_ty.enumHasInt(int_val, target)) { const msg = msg: { const msg = try mod.errMsg( @@ -2846,7 +2850,7 @@ fn zirOptionalPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2863,7 +2867,7 @@ fn zirOptionalPayloadPtr( const child_type = try opt_type.optionalChildAlloc(sema.arena); const child_pointer = try Module.simplePtrType(sema.arena, child_type, !optional_ptr.ty.isConstPtr(), .One); - if (optional_ptr.value()) |pointer_val| { + if (try sema.resolveDefinedValue(block, src, optional_ptr)) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); if (val.isNull()) { return sema.mod.fail(&block.base, src, "unable to unwrap null", .{}); @@ -2889,7 +2893,7 @@ fn zirOptionalPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2903,7 +2907,7 @@ fn zirOptionalPayload( const child_type = try opt_type.optionalChildAlloc(sema.arena); - if (operand.value()) |val| { + if (try sema.resolveDefinedValue(block, src, operand)) |val| { if (val.isNull()) { return sema.mod.fail(&block.base, src, "unable to unwrap null", .{}); } @@ -2927,7 +2931,7 @@ fn zirErrUnionPayload( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2937,7 +2941,7 @@ fn zirErrUnionPayload( if (operand.ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, operand.src, "expected error union type, found '{}'", .{operand.ty}); - if (operand.value()) |val| { + if (try sema.resolveDefinedValue(block, src, operand)) |val| { if (val.getError()) |name| { return sema.mod.fail(&block.base, src, "caught unexpected error '{s}'", .{name}); } @@ -2962,7 +2966,7 @@ fn zirErrUnionPayloadPtr( block: *Scope.Block, inst: Zir.Inst.Index, safety_check: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -2976,7 +2980,7 @@ fn zirErrUnionPayloadPtr( const operand_pointer_ty = try Module.simplePtrType(sema.arena, operand.ty.elemType().castTag(.error_union).?.data.payload, !operand.ty.isConstPtr(), .One); - if (operand.value()) |pointer_val| { + if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); if (val.getError()) |name| { return sema.mod.fail(&block.base, src, "caught unexpected error '{s}'", .{name}); @@ -3001,7 +3005,7 @@ fn zirErrUnionPayloadPtr( } /// Value in, value out -fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3013,7 +3017,7 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner const result_ty = operand.ty.castTag(.error_union).?.data.error_set; - if (operand.value()) |val| { + if (try sema.resolveDefinedValue(block, src, operand)) |val| { assert(val.getError() != null); const data = val.castTag(.error_union).?.data; return sema.mod.constInst(sema.arena, src, .{ @@ -3027,7 +3031,7 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inner } /// Pointer in, value out -fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3041,7 +3045,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In const result_ty = operand.ty.elemType().castTag(.error_union).?.data.error_set; - if (operand.value()) |pointer_val| { + if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); assert(val.getError() != null); const data = val.castTag(.error_union).?.data; @@ -3055,7 +3059,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In return block.addTyOp(.unwrap_errunion_err_ptr, result_ty, operand); } -fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!void { +fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { const tracy = trace(@src()); defer tracy.end(); @@ -3074,7 +3078,7 @@ fn zirFunc( block: *Scope.Block, inst: Zir.Inst.Index, inferred_error_set: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3125,7 +3129,7 @@ fn funcCommon( is_extern: bool, src_locs: Zir.Inst.Func.SrcLocs, opt_lib_name: ?[]const u8, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = src_node_offset }; const bare_return_type = try sema.resolveType(block, ret_ty_src, zir_return_type); @@ -3266,7 +3270,7 @@ fn funcCommon( return result; } -fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3274,7 +3278,7 @@ fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air. return sema.analyzeAs(block, .unneeded, bin_inst.lhs, bin_inst.rhs); } -fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAsNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3290,13 +3294,13 @@ fn analyzeAs( src: LazySrcLoc, zir_dest_type: Zir.Inst.Ref, zir_operand: Zir.Inst.Ref, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const dest_type = try sema.resolveType(block, src, zir_dest_type); const operand = sema.resolveInst(zir_operand); return sema.coerce(block, dest_type, operand, src); } -fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3312,7 +3316,7 @@ fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addUnOp(.ptrtoint, ptr); } -fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3330,7 +3334,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); } -fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3343,7 +3347,7 @@ fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3358,7 +3362,7 @@ fn zirFieldValNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3371,7 +3375,7 @@ fn zirFieldPtrNamed(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); } -fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3414,7 +3418,7 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten int", .{}); } -fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3428,7 +3432,7 @@ fn zirBitcast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.bitcast(block, dest_type, operand, operand_src); } -fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3471,7 +3475,7 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErr return sema.mod.fail(&block.base, src, "TODO implement analyze widen or shorten float", .{}); } -fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3486,7 +3490,7 @@ fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.analyzeLoad(block, sema.src, result_ptr, sema.src); } -fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3504,7 +3508,7 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.analyzeLoad(block, src, result_ptr, src); } -fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3514,7 +3518,7 @@ fn zirElemPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.elemPtr(block, sema.src, array_ptr, elem_index, sema.src); } -fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3527,7 +3531,7 @@ fn zirElemPtrNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerE return sema.elemPtr(block, src, array_ptr, elem_index, elem_index_src); } -fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3540,7 +3544,7 @@ fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.analyzeSlice(block, src, array_ptr, start, null, null, .unneeded); } -fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3554,7 +3558,7 @@ fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeSlice(block, src, array_ptr, start, end, null, .unneeded); } -fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3576,7 +3580,7 @@ fn zirSwitchCapture( inst: Zir.Inst.Index, is_multi: bool, is_ref: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3595,7 +3599,7 @@ fn zirSwitchCaptureElse( block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3614,7 +3618,7 @@ fn zirSwitchBlock( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3647,7 +3651,7 @@ fn zirSwitchBlockMulti( inst: Zir.Inst.Index, is_ref: bool, special_prong: Zir.SpecialProng, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -3684,7 +3688,7 @@ fn analyzeSwitch( multi_cases_len: usize, switch_inst: Zir.Inst.Index, src_node_offset: i32, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const gpa = sema.gpa; const mod = sema.mod; @@ -4350,20 +4354,23 @@ fn resolveSwitchItemVal( switch_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, range_expand: Module.SwitchProngSrc.RangeExpand, -) InnerError!TypedValue { +) CompileError!TypedValue { const item = sema.resolveInst(item_ref); - // We have to avoid the other helper functions here because we cannot construct a LazySrcLoc - // because we only have the switch AST node. Only if we know for sure we need to report - // a compile error do we resolve the full source locations. - if (item.value()) |val| { - if (val.isUndef()) { - const src = switch_prong_src.resolve(sema.gpa, block.src_decl, switch_node_offset, range_expand); - return sema.failWithUseOfUndef(block, src); - } + // Constructing a LazySrcLoc is costly because we only have the switch AST node. + // Only if we know for sure we need to report a compile error do we resolve the + // full source locations. + if (sema.resolveConstValue(block, .unneeded, item)) |val| { return TypedValue{ .ty = item.ty, .val = val }; + } else |err| switch (err) { + error.NeededSourceLocation => { + const src = switch_prong_src.resolve(sema.gpa, block.src_decl, switch_node_offset, range_expand); + return TypedValue{ + .ty = item.ty, + .val = try sema.resolveConstValue(block, src, item), + }; + }, + else => |e| return e, } - const src = switch_prong_src.resolve(sema.gpa, block.src_decl, switch_node_offset, range_expand); - return sema.failWithNeededComptime(block, src); } fn validateSwitchRange( @@ -4374,7 +4381,7 @@ fn validateSwitchRange( last_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, -) InnerError!void { +) CompileError!void { const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val; const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val; const maybe_prev_src = try range_set.add(first_val, last_val, switch_prong_src); @@ -4388,7 +4395,7 @@ fn validateSwitchItem( item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, -) InnerError!void { +) CompileError!void { const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; const maybe_prev_src = try range_set.add(item_val, item_val, switch_prong_src); return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset); @@ -4401,7 +4408,7 @@ fn validateSwitchItemEnum( item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, -) InnerError!void { +) CompileError!void { const mod = sema.mod; const item_tv = try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none); const field_index = item_tv.ty.enumTagFieldIndex(item_tv.val) orelse { @@ -4435,7 +4442,7 @@ fn validateSwitchDupe( maybe_prev_src: ?Module.SwitchProngSrc, switch_prong_src: Module.SwitchProngSrc, src_node_offset: i32, -) InnerError!void { +) CompileError!void { const prev_prong_src = maybe_prev_src orelse return; const mod = sema.mod; const gpa = sema.gpa; @@ -4469,7 +4476,7 @@ fn validateSwitchItemBool( item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, -) InnerError!void { +) CompileError!void { const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; if (item_val.toBool()) { true_count.* += 1; @@ -4491,7 +4498,7 @@ fn validateSwitchItemSparse( item_ref: Zir.Inst.Ref, src_node_offset: i32, switch_prong_src: Module.SwitchProngSrc, -) InnerError!void { +) CompileError!void { const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val; const kv = (try seen_values.fetchPut(item_val, switch_prong_src)) orelse return; return sema.validateSwitchDupe(block, kv.value, switch_prong_src, src_node_offset); @@ -4503,7 +4510,7 @@ fn validateSwitchNoRange( ranges_len: u32, operand_ty: Type, src_node_offset: i32, -) InnerError!void { +) CompileError!void { if (ranges_len == 0) return; @@ -4530,7 +4537,7 @@ fn validateSwitchNoRange( return sema.mod.failWithOwnedErrorMsg(&block.base, msg); } -fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; _ = extra; @@ -4539,7 +4546,7 @@ fn zirHasField(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, src, "TODO implement zirHasField", .{}); } -fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; @@ -4562,7 +4569,7 @@ fn zirHasDecl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return Air.Inst.Ref.bool_false; } -fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4587,13 +4594,13 @@ fn zirImport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.addType(file_root_decl.ty); } -fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirRetErrValueCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; _ = inst; return sema.mod.fail(&block.base, sema.src, "TODO implement zirRetErrValueCode", .{}); } -fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4602,7 +4609,7 @@ fn zirShl(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air return sema.mod.fail(&block.base, sema.src, "TODO implement zirShl", .{}); } -fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirShr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4615,7 +4622,7 @@ fn zirBitwise( block: *Scope.Block, inst: Zir.Inst.Index, air_tag: Air.Inst.Tag, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4675,7 +4682,7 @@ fn zirBitwise( return block.addBinOp(air_tag, casted_lhs, casted_rhs); } -fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4683,7 +4690,7 @@ fn zirBitNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.mod.fail(&block.base, sema.src, "TODO implement zirBitNot", .{}); } -fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4691,7 +4698,7 @@ fn zirArrayCat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.mod.fail(&block.base, sema.src, "TODO implement zirArrayCat", .{}); } -fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArrayMul(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4704,7 +4711,7 @@ fn zirNegate( block: *Scope.Block, inst: Zir.Inst.Index, tag_override: Zir.Inst.Tag, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4718,7 +4725,7 @@ fn zirNegate( return sema.analyzeArithmetic(block, tag_override, lhs, rhs, src, lhs_src, rhs_src); } -fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirArithmetic(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4738,7 +4745,7 @@ fn zirOverflowArithmetic( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4757,7 +4764,7 @@ fn analyzeArithmetic( src: LazySrcLoc, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const instructions = &[_]Air.Inst.Index{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); @@ -4867,7 +4874,7 @@ fn analyzeArithmetic( return block.addBinOp(air_tag, casted_lhs, casted_rhs); } -fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4882,7 +4889,7 @@ fn zirAsm( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -4966,7 +4973,7 @@ fn zirCmp( block: *Scope.Block, inst: Zir.Inst.Index, op: std.math.CompareOperator, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5091,7 +5098,7 @@ fn zirCmp( return block.addBinOp(tag, casted_lhs, casted_rhs); } -fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); @@ -5100,7 +5107,7 @@ fn zirSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.addIntUnsigned(Type.initTag(.comptime_int), abi_size); } -fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitSizeOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand_ty = try sema.resolveType(block, operand_src, inst_data.operand); @@ -5113,7 +5120,7 @@ fn zirThis( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirThis", .{}); } @@ -5122,7 +5129,7 @@ fn zirRetAddr( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirRetAddr", .{}); } @@ -5131,12 +5138,12 @@ fn zirBuiltinSrc( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinSrc", .{}); } -fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const ty = try sema.resolveType(block, src, inst_data.operand); @@ -5179,7 +5186,7 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro } } -fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; @@ -5187,7 +5194,7 @@ fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError! return sema.addType(operand.ty); } -fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_ptr = sema.resolveInst(inst_data.operand); @@ -5195,13 +5202,13 @@ fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerEr return sema.addType(elem_ty); } -fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTypeofLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirTypeofLog2IntType", .{}); } -fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirLog2IntType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirLog2IntType", .{}); @@ -5211,7 +5218,7 @@ fn zirTypeofPeer( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5230,7 +5237,7 @@ fn zirTypeofPeer( return sema.addType(result_type); } -fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5256,7 +5263,7 @@ fn zirBoolOp( block: *Scope.Block, inst: Zir.Inst.Index, is_bool_or: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5295,7 +5302,7 @@ fn zirBoolBr( parent_block: *Scope.Block, inst: Zir.Inst.Index, is_bool_or: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5369,7 +5376,7 @@ fn zirIsNonNull( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5383,7 +5390,7 @@ fn zirIsNonNullPtr( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5394,7 +5401,7 @@ fn zirIsNonNullPtr( return sema.analyzeIsNull(block, src, loaded, true); } -fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5403,7 +5410,7 @@ fn zirIsNonErr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return sema.analyzeIsNonErr(block, inst_data.src(), operand); } -fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIsNonErrPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5418,7 +5425,7 @@ fn zirCondbr( sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Zir.Inst.Index { +) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5461,7 +5468,7 @@ fn zirCondbr( return always_noreturn; } -fn zirUnreachable(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirUnreachable(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5482,7 +5489,7 @@ fn zirRetErrValue( sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, -) InnerError!Zir.Inst.Index { +) CompileError!Zir.Inst.Index { const inst_data = sema.code.instructions.items(.data)[inst].str_tok; const err_name = inst_data.get(sema.code); const src = inst_data.src(); @@ -5507,7 +5514,7 @@ fn zirRetCoerce( block: *Scope.Block, inst: Zir.Inst.Index, need_coercion: bool, -) InnerError!Zir.Inst.Index { +) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5518,7 +5525,7 @@ fn zirRetCoerce( return sema.analyzeRet(block, operand, src, need_coercion); } -fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Zir.Inst.Index { +fn zirRetNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { const tracy = trace(@src()); defer tracy.end(); @@ -5535,7 +5542,7 @@ fn analyzeRet( operand: Air.Inst.Ref, src: LazySrcLoc, need_coercion: bool, -) InnerError!Zir.Inst.Index { +) CompileError!Zir.Inst.Index { if (block.inlining) |inlining| { // We are inlining a function call; rewrite the `ret` as a `break`. try inlining.merges.results.append(sema.gpa, operand); @@ -5564,7 +5571,7 @@ fn floatOpAllowed(tag: Zir.Inst.Tag) bool { }; } -fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5585,7 +5592,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Inne return sema.addType(ty); } -fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5639,7 +5646,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError return sema.addType(ty); } -fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -5653,13 +5660,13 @@ fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) In }); } -fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnionInitPtr", .{}); } -fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { +fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; const zir_datas = sema.code.instructions.items(.data); @@ -5772,7 +5779,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return mod.fail(&block.base, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); } -fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { +fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5780,7 +5787,7 @@ fn zirStructInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ return sema.mod.fail(&block.base, src, "TODO: Sema.zirStructInitAnon", .{}); } -fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { +fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5788,7 +5795,7 @@ fn zirArrayInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInit", .{}); } -fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) InnerError!Air.Inst.Ref { +fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: bool) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5796,13 +5803,13 @@ fn zirArrayInitAnon(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_r return sema.mod.fail(&block.base, src, "TODO: Sema.zirArrayInitAnon", .{}); } -fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldTypeRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldTypeRef", .{}); } -fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.FieldType, inst_data.payload_index).data; const src = inst_data.src(); @@ -5824,7 +5831,7 @@ fn zirErrorReturnTrace( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorReturnTrace", .{}); } @@ -5833,7 +5840,7 @@ fn zirFrame( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrame", .{}); } @@ -5842,84 +5849,84 @@ fn zirFrameAddress( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const src: LazySrcLoc = .{ .node_offset = @bitCast(i32, extended.operand) }; return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameAddress", .{}); } -fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAlignOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignOf", .{}); } -fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBoolToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBoolToInt", .{}); } -fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirEmbedFile(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirEmbedFile", .{}); } -fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrorName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrorName", .{}); } -fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirUnaryMath(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirUnaryMath", .{}); } -fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTagName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTagName", .{}); } -fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirReify(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReify", .{}); } -fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTypeName(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTypeName", .{}); } -fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFrameType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameType", .{}); } -fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFrameSize(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFrameSize", .{}); } -fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFloatToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFloatToInt", .{}); } -fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntToFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirIntToFloat", .{}); } -fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); @@ -5982,199 +5989,199 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerErro return block.addTyOp(.bitcast, type_res, operand_coerced); } -fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirErrSetCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirErrSetCast", .{}); } -fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirPtrCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPtrCast", .{}); } -fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirTruncate(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirTruncate", .{}); } -fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAlignCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAlignCast", .{}); } -fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirClz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirClz", .{}); } -fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirCtz(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCtz", .{}); } -fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirPopCount(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirPopCount", .{}); } -fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirByteSwap(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirByteSwap", .{}); } -fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitReverse(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitReverse", .{}); } -fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirDivExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivExact", .{}); } -fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirDivFloor(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivFloor", .{}); } -fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirDivTrunc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirDivTrunc", .{}); } -fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirMod(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMod", .{}); } -fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirRem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirRem", .{}); } -fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirShlExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShlExact", .{}); } -fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirShrExact(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShrExact", .{}); } -fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBitOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBitOffsetOf", .{}); } -fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirOffsetOf(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirOffsetOf", .{}); } -fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirCmpxchg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirCmpxchg", .{}); } -fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirSplat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirSplat", .{}); } -fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirReduce(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirReduce", .{}); } -fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirShuffle(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirShuffle", .{}); } -fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAtomicLoad(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicLoad", .{}); } -fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAtomicRmw(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicRmw", .{}); } -fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirAtomicStore(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirAtomicStore", .{}); } -fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirMulAdd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMulAdd", .{}); } -fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBuiltinCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinCall", .{}); } -fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldPtrType", .{}); } -fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirFieldParentPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirFieldParentPtr", .{}); } -fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirMemcpy(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemcpy", .{}); } -fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirMemset(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirMemset", .{}); } -fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirBuiltinAsyncCall(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirBuiltinAsyncCall", .{}); } -fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air.Inst.Ref { +fn zirResume(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); return sema.mod.fail(&block.base, src, "TODO: Sema.zirResume", .{}); @@ -6185,7 +6192,7 @@ fn zirAwait( block: *Scope.Block, inst: Zir.Inst.Index, is_nosuspend: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); @@ -6197,7 +6204,7 @@ fn zirVarExtended( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.ExtendedVar, extended.operand); const src = sema.src; const ty_src: LazySrcLoc = src; // TODO add a LazySrcLoc that points at type @@ -6263,7 +6270,7 @@ fn zirFuncExtended( block: *Scope.Block, extended: Zir.Inst.Extended.InstData, inst: Zir.Inst.Index, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); @@ -6330,7 +6337,7 @@ fn zirCUndef( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCUndef", .{}); @@ -6340,7 +6347,7 @@ fn zirCInclude( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCInclude", .{}); @@ -6350,7 +6357,7 @@ fn zirCDefine( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirCDefine", .{}); @@ -6360,7 +6367,7 @@ fn zirWasmMemorySize( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemorySize", .{}); @@ -6370,7 +6377,7 @@ fn zirWasmMemoryGrow( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirWasmMemoryGrow", .{}); @@ -6380,7 +6387,7 @@ fn zirBuiltinExtern( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const extra = sema.code.extraData(Zir.Inst.BinNode, extended.operand).data; const src: LazySrcLoc = .{ .node_offset = extra.node }; return sema.mod.fail(&block.base, src, "TODO: implement Sema.zirBuiltinExtern", .{}); @@ -6556,7 +6563,7 @@ fn namedFieldPtr( object_ptr: Air.Inst.Ref, field_name: []const u8, field_name_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; @@ -6706,7 +6713,7 @@ fn analyzeNamespaceLookup( src: LazySrcLoc, namespace: *Scope.Namespace, decl_name: []const u8, -) InnerError!?Air.Inst.Ref { +) CompileError!?Air.Inst.Ref { const mod = sema.mod; const gpa = sema.gpa; if (try sema.lookupInNamespace(namespace, decl_name)) |decl| { @@ -6734,7 +6741,7 @@ fn analyzeStructFieldPtr( field_name: []const u8, field_name_src: LazySrcLoc, unresolved_struct_ty: Type, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); @@ -6769,7 +6776,7 @@ fn analyzeUnionFieldPtr( field_name: []const u8, field_name_src: LazySrcLoc, unresolved_union_ty: Type, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const mod = sema.mod; const arena = sema.arena; assert(unresolved_union_ty.zigTypeTag() == .Union); @@ -6805,7 +6812,7 @@ fn elemPtr( array_ptr: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const array_ty = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, array_ptr.src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -6832,7 +6839,7 @@ fn elemPtrArray( array_ptr: Air.Inst.Ref, elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { if (array_ptr.value()) |array_ptr_val| { if (elem_index.value()) |index_val| { // Both array pointer and index are compile-time known. @@ -6859,7 +6866,7 @@ fn coerce( dest_type: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { if (dest_type.tag() == .var_args_param) { return sema.coerceVarArgParam(block, inst, inst_src); } @@ -7041,7 +7048,7 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult return .no_match; } -fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) InnerError!?Air.Inst.Index { +fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) CompileError!?Air.Inst.Index { const val = inst.value() orelse return null; const src_zig_tag = inst.ty.zigTypeTag(); const dst_zig_tag = dest_type.zigTypeTag(); @@ -7153,7 +7160,7 @@ fn bitcast( dest_type: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { // Keep the comptime Value representation; take the new type. return sema.addConstant(dest_type, val); @@ -7163,7 +7170,7 @@ fn bitcast( return block.addTyOp(.bitcast, dest_type, inst); } -fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) InnerError!Air.Inst.Ref { +fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) CompileError!Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); @@ -7179,12 +7186,12 @@ fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } -fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Ref { +fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) CompileError!Air.Inst.Ref { const decl_ref = try sema.analyzeDeclRef(block, src, decl); return sema.analyzeLoad(block, src, decl_ref, src); } -fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) InnerError!Air.Inst.Ref { +fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) CompileError!Air.Inst.Ref { try sema.mod.declareDeclDependency(sema.owner_decl, decl); sema.mod.ensureDeclAnalyzed(decl) catch |err| { if (sema.func) |func| { @@ -7205,7 +7212,7 @@ fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl ); } -fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) InnerError!Air.Inst.Ref { +fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedValue) CompileError!Air.Inst.Ref { const variable = tv.val.castTag(.variable).?.data; const ty = try Module.simplePtrType(sema.arena, tv.ty, variable.is_mutable, .One); @@ -7233,7 +7240,7 @@ fn analyzeRef( block: *Scope.Block, src: LazySrcLoc, operand: Air.Inst.Ref, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const ptr_type = try sema.mod.simplePtrType(sema.arena, operand.ty, false, .One); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |val| { @@ -7253,7 +7260,7 @@ fn analyzeLoad( src: LazySrcLoc, ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const ptr_ty = sema.getTypeOf(ptr); const elem_ty = switch (ptr_ty.zigTypeTag()) { .Pointer => ptr_ty.elemType(), @@ -7276,7 +7283,7 @@ fn analyzeIsNull( src: LazySrcLoc, operand: Air.Inst.Ref, invert_logic: bool, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const result_ty = Type.initTag(.bool); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |opt_val| { if (opt_val.isUndef()) { @@ -7300,7 +7307,7 @@ fn analyzeIsNonErr( block: *Scope.Block, src: LazySrcLoc, operand: Air.Inst.Ref, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const ot = operand.ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; @@ -7329,7 +7336,7 @@ fn analyzeSlice( end_opt: ?Air.Inst.Index, sentinel_opt: ?Air.Inst.Index, sentinel_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const ptr_child = switch (array_ptr.ty.zigTypeTag()) { .Pointer => array_ptr.ty.elemType(), else => return sema.mod.fail(&block.base, src, "expected pointer, found '{}'", .{array_ptr.ty}), @@ -7405,7 +7412,7 @@ fn cmpNumeric( op: std.math.CompareOperator, lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const lhs_ty = sema.getTypeOf(lhs); const rhs_ty = sema.getTypeOf(rhs); @@ -7746,7 +7753,7 @@ fn resolvePeerTypes( return chosen.ty; } -fn resolveTypeFields(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type) InnerError!Type { +fn resolveTypeFields(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type) CompileError!Type { switch (ty.tag()) { .@"struct" => { const struct_obj = ty.castTag(.@"struct").?.data; @@ -7798,7 +7805,7 @@ fn resolveBuiltinTypeFields( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!Type { +) CompileError!Type { const resolved_ty = try sema.getBuiltinType(block, src, name); return sema.resolveTypeFields(block, src, resolved_ty); } @@ -7808,7 +7815,7 @@ fn getBuiltin( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!Air.Inst.Ref { +) CompileError!Air.Inst.Ref { const mod = sema.mod; const std_pkg = mod.root_pkg.table.get("std").?; const std_file = (mod.importPkg(std_pkg) catch unreachable).file; @@ -7834,7 +7841,7 @@ fn getBuiltinType( block: *Scope.Block, src: LazySrcLoc, name: []const u8, -) InnerError!Type { +) CompileError!Type { const ty_inst = try sema.getBuiltin(block, src, name); return sema.resolveAirAsType(block, src, ty_inst); } @@ -7848,7 +7855,7 @@ fn typeHasOnePossibleValue( block: *Scope.Block, src: LazySrcLoc, starting_type: Type, -) InnerError!?Value { +) CompileError!?Value { var ty = starting_type; while (true) switch (ty.tag()) { .f16, @@ -7986,7 +7993,7 @@ fn typeHasOnePossibleValue( }; } -fn getAstTree(sema: *Sema, block: *Scope.Block) InnerError!*const std.zig.ast.Tree { +fn getAstTree(sema: *Sema, block: *Scope.Block) CompileError!*const std.zig.ast.Tree { return block.src_decl.namespace.file_scope.getTree(sema.gpa) catch |err| { log.err("unable to load AST to report compile error: {s}", .{@errorName(err)}); return error.AnalysisFail; @@ -8166,15 +8173,15 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } -fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) InnerError!Air.Inst.Ref { +fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { return sema.addConstant(ty, try Value.Tag.int_u64.create(sema.arena, int)); } -fn addConstUndef(sema: *Sema, ty: Type) InnerError!Air.Inst.Ref { +fn addConstUndef(sema: *Sema, ty: Type) CompileError!Air.Inst.Ref { return sema.addConstant(ty, Value.initTag(.undef)); } -fn addConstant(sema: *Sema, ty: Type, val: Value) InnerError!Air.Inst.Ref { +fn addConstant(sema: *Sema, ty: Type, val: Value) CompileError!Air.Inst.Ref { const gpa = sema.gpa; const ty_inst = try sema.addType(ty); try sema.air_values.append(gpa, val); From 7bb2d13a090f700b3806127a639e164726af8e03 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Jul 2021 12:38:32 -0700 Subject: [PATCH 15/53] stage2: remove ZIR instructions bool_and and bool_or These were unused. I believe this happened with the introduction of bool_br_and and bool_br_or instructions. --- src/AstGen.zig | 2 - src/Sema.zig | 125 +++++++++++++++++-------------------------------- src/Zir.zig | 12 ----- 3 files changed, 43 insertions(+), 96 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index 24766aaf60..a8510365a9 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -1922,8 +1922,6 @@ fn unusedResultExpr(gz: *GenZir, scope: *Scope, statement: ast.Node.Index) Inner .bool_br_and, .bool_br_or, .bool_not, - .bool_and, - .bool_or, .call_compile_time, .call_nosuspend, .call_async, diff --git a/src/Sema.zig b/src/Sema.zig index 91f81ffeed..31d3c9551d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -185,8 +185,6 @@ pub fn analyzeBody( //.block => try sema.zirBlock(block, inst), //.suspend_block => try sema.zirSuspendBlock(block, inst), //.bool_not => try sema.zirBoolNot(block, inst), - //.bool_and => try sema.zirBoolOp(block, inst, false), - //.bool_or => try sema.zirBoolOp(block, inst, true), //.bool_br_and => try sema.zirBoolBr(block, inst, false), //.bool_br_or => try sema.zirBoolBr(block, inst, true), //.c_import => try sema.zirCImport(block, inst), @@ -195,12 +193,12 @@ pub fn analyzeBody( //.call_compile_time => try sema.zirCall(block, inst, .compile_time, false), //.call_nosuspend => try sema.zirCall(block, inst, .no_async, false), //.call_async => try sema.zirCall(block, inst, .async_kw, false), - //.cmp_eq => try sema.zirCmp(block, inst, .eq), - //.cmp_gt => try sema.zirCmp(block, inst, .gt), - //.cmp_gte => try sema.zirCmp(block, inst, .gte), - //.cmp_lt => try sema.zirCmp(block, inst, .lt), - //.cmp_lte => try sema.zirCmp(block, inst, .lte), - //.cmp_neq => try sema.zirCmp(block, inst, .neq), + .cmp_eq => try sema.zirCmp(block, inst, .eq), + .cmp_gt => try sema.zirCmp(block, inst, .gt), + .cmp_gte => try sema.zirCmp(block, inst, .gte), + .cmp_lt => try sema.zirCmp(block, inst, .lt), + .cmp_lte => try sema.zirCmp(block, inst, .lte), + .cmp_neq => try sema.zirCmp(block, inst, .neq), //.coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), //.decl_ref => try sema.zirDeclRef(block, inst), //.decl_val => try sema.zirDeclVal(block, inst), @@ -4669,8 +4667,8 @@ fn zirBitwise( return sema.mod.fail(&block.base, src, "invalid operands to binary bitwise expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) }); } - if (casted_lhs.value()) |lhs_val| { - if (casted_rhs.value()) |rhs_val| { + if (try sema.resolvePossiblyUndefinedValue(block, lhs_src, casted_lhs)) |lhs_val| { + if (try sema.resolvePossiblyUndefinedValue(block, rhs_src, casted_rhs)) |rhs_val| { if (lhs_val.isUndef() or rhs_val.isUndef()) { return sema.addConstUndef(resolved_type); } @@ -4799,8 +4797,8 @@ fn analyzeArithmetic( return sema.mod.fail(&block.base, src, "invalid operands to binary expression: '{s}' and '{s}'", .{ @tagName(lhs_ty.zigTypeTag()), @tagName(rhs_ty.zigTypeTag()) }); } - if (casted_lhs.value()) |lhs_val| { - if (casted_rhs.value()) |rhs_val| { + if (try sema.resolvePossiblyUndefinedValue(block, lhs_src, casted_lhs)) |lhs_val| { + if (try sema.resolvePossiblyUndefinedValue(block, rhs_src, casted_rhs)) |rhs_val| { if (lhs_val.isUndef() or rhs_val.isUndef()) { return sema.addConstUndef(resolved_type); } @@ -5072,8 +5070,8 @@ fn zirCmp( const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - if (casted_lhs.value()) |lhs_val| { - if (casted_rhs.value()) |rhs_val| { + if (try sema.resolvePossiblyUndefinedValue(block, lhs_src, casted_lhs)) |lhs_val| { + if (try sema.resolvePossiblyUndefinedValue(block, rhs_src, casted_rhs)) |rhs_val| { if (lhs_val.isUndef() or rhs_val.isUndef()) { return sema.addConstUndef(resolved_type); } @@ -5258,45 +5256,6 @@ fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr return block.addTyOp(.not, bool_type, operand); } -fn zirBoolOp( - sema: *Sema, - block: *Scope.Block, - inst: Zir.Inst.Index, - is_bool_or: bool, -) CompileError!Air.Inst.Ref { - const tracy = trace(@src()); - defer tracy.end(); - - const src: LazySrcLoc = .unneeded; - const bool_type = Type.initTag(.bool); - const bin_inst = sema.code.instructions.items(.data)[inst].bin; - const uncasted_lhs = sema.resolveInst(bin_inst.lhs); - const lhs = try sema.coerce(block, bool_type, uncasted_lhs, uncasted_lhs.src); - const uncasted_rhs = sema.resolveInst(bin_inst.rhs); - const rhs = try sema.coerce(block, bool_type, uncasted_rhs, uncasted_rhs.src); - - if (lhs.value()) |lhs_val| { - if (rhs.value()) |rhs_val| { - if (is_bool_or) { - if (lhs_val.toBool() or rhs_val.toBool()) { - return Air.Inst.Ref.bool_true; - } else { - return Air.Inst.Ref.bool_false; - } - } else { - if (lhs_val.toBool() and rhs_val.toBool()) { - return Air.Inst.Ref.bool_true; - } else { - return Air.Inst.Ref.bool_false; - } - } - } - } - try sema.requireRuntimeBlock(block, src); - const tag: Air.Inst.Tag = if (is_bool_or) .bool_or else .bool_and; - return block.addBinOp(tag, lhs, rhs); -} - fn zirBoolBr( sema: *Sema, parent_block: *Scope.Block, @@ -6840,8 +6799,8 @@ fn elemPtrArray( elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - if (array_ptr.value()) |array_ptr_val| { - if (elem_index.value()) |index_val| { + if (try sema.resolveDefinedValue(block, src, array_ptr)) |array_ptr_val| { + if (try sema.resolveDefinedValue(block, src, elem_index)) |index_val| { // Both array pointer and index are compile-time known. const index_u64 = index_val.toUnsignedInt(); // @intCast here because it would have been impossible to construct a value that @@ -7367,8 +7326,8 @@ fn analyzeSlice( var return_ptr_size: std.builtin.TypeInfo.Pointer.Size = .Slice; var return_elem_type = elem_type; if (end_opt) |end| { - if (end.value()) |end_val| { - if (start.value()) |start_val| { + if (try sema.resolveDefinedValue(block, src, end)) |end_val| { + if (try sema.resolveDefinedValue(block, src, start)) |start_val| { const start_u64 = start_val.toUnsignedInt(); const end_u64 = end_val.toUnsignedInt(); if (start_u64 > end_u64) { @@ -7492,11 +7451,11 @@ fn cmpNumeric( // For mixed floats and integers, extract the integer part from the float, cast that to // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float, // add/subtract 1. - const lhs_is_signed = if (lhs.value()) |lhs_val| + const lhs_is_signed = if (try sema.resolveDefinedValue(block, lhs_src, lhs)) |lhs_val| lhs_val.compareWithZero(.lt) else (lhs_ty.isFloat() or lhs_ty.isSignedInt()); - const rhs_is_signed = if (rhs.value()) |rhs_val| + const rhs_is_signed = if (try sema.resolveDefinedValue(block, rhs_src, rhs)) |rhs_val| rhs_val.compareWithZero(.lt) else (rhs_ty.isFloat() or rhs_ty.isSignedInt()); @@ -7505,7 +7464,7 @@ fn cmpNumeric( var dest_float_type: ?Type = null; var lhs_bits: usize = undefined; - if (lhs.value()) |lhs_val| { + if (try sema.resolvePossiblyUndefinedValue(block, lhs_src, lhs)) |lhs_val| { if (lhs_val.isUndef()) return sema.addConstUndef(Type.initTag(.bool)); const is_unsigned = if (lhs_is_float) x: { @@ -7540,7 +7499,7 @@ fn cmpNumeric( } var rhs_bits: usize = undefined; - if (rhs.value()) |rhs_val| { + if (try sema.resolvePossiblyUndefinedValue(block, rhs_src, rhs)) |rhs_val| { if (rhs_val.isUndef()) return sema.addConstUndef(Type.initTag(.bool)); const is_unsigned = if (rhs_is_float) x: { @@ -7589,8 +7548,8 @@ fn cmpNumeric( } fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Index { - if (inst.value()) |val| { - return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); + if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { + return sema.mod.constInst(sema.arena, inst_src, .{ .ty = dest_type, .val = val }); } try sema.requireRuntimeBlock(block, inst.src); @@ -7690,67 +7649,69 @@ fn resolvePeerTypes( var chosen = instructions[0]; for (instructions[1..]) |candidate| { - if (candidate.ty.eql(chosen.ty)) + const candidate_ty = sema.getTypeOf(candidate); + const chosen_ty = sema.getTypeOf(chosen); + if (candidate_ty.eql(chosen_ty)) continue; - if (candidate.ty.zigTypeTag() == .NoReturn) + if (candidate_ty.zigTypeTag() == .NoReturn) continue; - if (chosen.ty.zigTypeTag() == .NoReturn) { + if (chosen_ty.zigTypeTag() == .NoReturn) { chosen = candidate; continue; } - if (candidate.ty.zigTypeTag() == .Undefined) + if (candidate_ty.zigTypeTag() == .Undefined) continue; - if (chosen.ty.zigTypeTag() == .Undefined) { + if (chosen_ty.zigTypeTag() == .Undefined) { chosen = candidate; continue; } - if (chosen.ty.isInt() and - candidate.ty.isInt() and - chosen.ty.isSignedInt() == candidate.ty.isSignedInt()) + if (chosen_ty.isInt() and + candidate_ty.isInt() and + chosen_ty.isSignedInt() == candidate_ty.isSignedInt()) { - if (chosen.ty.intInfo(target).bits < candidate.ty.intInfo(target).bits) { + if (chosen_ty.intInfo(target).bits < candidate_ty.intInfo(target).bits) { chosen = candidate; } continue; } - if (chosen.ty.isFloat() and candidate.ty.isFloat()) { - if (chosen.ty.floatBits(target) < candidate.ty.floatBits(target)) { + if (chosen_ty.isFloat() and candidate_ty.isFloat()) { + if (chosen_ty.floatBits(target) < candidate_ty.floatBits(target)) { chosen = candidate; } continue; } - if (chosen.ty.zigTypeTag() == .ComptimeInt and candidate.ty.isInt()) { + if (chosen_ty.zigTypeTag() == .ComptimeInt and candidate_ty.isInt()) { chosen = candidate; continue; } - if (chosen.ty.isInt() and candidate.ty.zigTypeTag() == .ComptimeInt) { + if (chosen_ty.isInt() and candidate_ty.zigTypeTag() == .ComptimeInt) { continue; } - if (chosen.ty.zigTypeTag() == .ComptimeFloat and candidate.ty.isFloat()) { + if (chosen_ty.zigTypeTag() == .ComptimeFloat and candidate_ty.isFloat()) { chosen = candidate; continue; } - if (chosen.ty.isFloat() and candidate.ty.zigTypeTag() == .ComptimeFloat) { + if (chosen_ty.isFloat() and candidate_ty.zigTypeTag() == .ComptimeFloat) { continue; } - if (chosen.ty.zigTypeTag() == .Enum and candidate.ty.zigTypeTag() == .EnumLiteral) { + if (chosen_ty.zigTypeTag() == .Enum and candidate_ty.zigTypeTag() == .EnumLiteral) { continue; } - if (chosen.ty.zigTypeTag() == .EnumLiteral and candidate.ty.zigTypeTag() == .Enum) { + if (chosen_ty.zigTypeTag() == .EnumLiteral and candidate_ty.zigTypeTag() == .Enum) { chosen = candidate; continue; } // TODO error notes pointing out each type - return sema.mod.fail(&block.base, src, "incompatible types: '{}' and '{}'", .{ chosen.ty, candidate.ty }); + return sema.mod.fail(&block.base, src, "incompatible types: '{}' and '{}'", .{ chosen_ty, candidate_ty }); } - return chosen.ty; + return sema.getTypeOf(chosen); } fn resolveTypeFields(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type) CompileError!Type { diff --git a/src/Zir.zig b/src/Zir.zig index b975500e2f..e14b636ab6 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -236,15 +236,9 @@ pub const Inst = struct { /// Implements `suspend {...}`. /// Uses the `pl_node` union field. Payload is `Block`. suspend_block, - /// Boolean AND. See also `bit_and`. - /// Uses the `pl_node` union field. Payload is `Bin`. - bool_and, /// Boolean NOT. See also `bit_not`. /// Uses the `un_node` field. bool_not, - /// Boolean OR. See also `bit_or`. - /// Uses the `pl_node` union field. Payload is `Bin`. - bool_or, /// Short-circuiting boolean `and`. `lhs` is a boolean `Ref` and the other operand /// is a block, which is evaluated if `lhs` is `true`. /// Uses the `bool_br` union field. @@ -998,8 +992,6 @@ pub const Inst = struct { .bool_br_and, .bool_br_or, .bool_not, - .bool_and, - .bool_or, .breakpoint, .fence, .call, @@ -1248,9 +1240,7 @@ pub const Inst = struct { .block = .pl_node, .block_inline = .pl_node, .suspend_block = .pl_node, - .bool_and = .pl_node, .bool_not = .un_node, - .bool_or = .pl_node, .bool_br_and = .bool_br, .bool_br_or = .bool_br, .@"break" = .@"break", @@ -2981,8 +2971,6 @@ const Writer = struct { .mulwrap, .sub, .subwrap, - .bool_and, - .bool_or, .cmp_lt, .cmp_lte, .cmp_eq, From 27be4f31402557972ae28d552f4ec4617357d454 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Jul 2021 19:04:02 -0700 Subject: [PATCH 16/53] Sema: more AIR memory layout reworking progress Additionally: ZIR encoding for floats now supports float literals up to f64, not only f32. This is because we no longer need a source location for this instruction. --- src/Air.zig | 11 +- src/AstGen.zig | 13 +- src/Module.zig | 32 ++ src/Sema.zig | 928 +++++++++++++++++++++++-------------------------- src/Zir.zig | 19 +- 5 files changed, 486 insertions(+), 517 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 1f294c43f3..e2eeae1130 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -94,6 +94,11 @@ pub const Inst = struct { bitcast, /// Uses the `ty_pl` field with payload `Block`. block, + /// A labeled block of code that loops forever. At the end of the body it is implied + /// to repeat; no explicit "repeat" instruction terminates loop bodies. + /// Result type is always noreturn; no instructions in a block follow this one. + /// Uses the `ty_pl` field. Payload is `Block`. + loop, /// Return from a block with a result. /// Result type is always noreturn; no instructions in a block follow this one. /// Uses the `br` field. @@ -181,11 +186,6 @@ pub const Inst = struct { /// Read a value from a pointer. /// Uses the `ty_op` field. load, - /// A labeled block of code that loops forever. At the end of the body it is implied - /// to repeat; no explicit "repeat" instruction terminates loop bodies. - /// Result type is always noreturn; no instructions in a block follow this one. - /// Uses the `ty_pl` field. Payload is `Block`. - loop, /// Converts a pointer to its address. Result type is always `usize`. /// Uses the `un_op` field. ptrtoint, @@ -279,6 +279,7 @@ pub const Inst = struct { /// this union. `Tag` determines which union field is active, as well as /// how to interpret the data within. pub const Data = union { + no_op: void, un_op: Ref, bin_op: struct { lhs: Ref, diff --git a/src/AstGen.zig b/src/AstGen.zig index a8510365a9..1b58b3f2f7 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -6589,12 +6589,12 @@ fn floatLiteral(gz: *GenZir, rl: ResultLoc, node: ast.Node.Index) InnerError!Zir } else std.fmt.parseFloat(f128, bytes) catch |err| switch (err) { error.InvalidCharacter => unreachable, // validated by tokenizer }; - // If the value fits into a f32 without losing any precision, store it that way. + // If the value fits into a f64 without losing any precision, store it that way. @setFloatMode(.Strict); - const smaller_float = @floatCast(f32, float_number); + const smaller_float = @floatCast(f64, float_number); const bigger_again: f128 = smaller_float; if (bigger_again == float_number) { - const result = try gz.addFloat(smaller_float, node); + const result = try gz.addFloat(smaller_float); return rvalue(gz, rl, result, node); } // We need to use 128 bits. Break the float into 4 u32 values so we can @@ -9145,13 +9145,10 @@ const GenZir = struct { return indexToRef(new_index); } - fn addFloat(gz: *GenZir, number: f32, src_node: ast.Node.Index) !Zir.Inst.Ref { + fn addFloat(gz: *GenZir, number: f64) !Zir.Inst.Ref { return gz.add(.{ .tag = .float, - .data = .{ .float = .{ - .src_node = gz.nodeIndexToRelative(src_node), - .number = number, - } }, + .data = .{ .float = number }, }); } diff --git a/src/Module.zig b/src/Module.zig index 0a082313b3..4bd48dad05 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1226,6 +1226,17 @@ pub const Scope = struct { return block.src_decl.namespace.file_scope; } + pub fn addTy( + block: *Block, + tag: Air.Inst.Tag, + ty: Type, + ) error{OutOfMemory}!Air.Inst.Ref { + return block.addInst(.{ + .tag = tag, + .data = .{ .ty = ty }, + }); + } + pub fn addTyOp( block: *Block, tag: Air.Inst.Tag, @@ -1241,6 +1252,13 @@ pub const Scope = struct { }); } + pub fn addNoOp(block: *Block, tag: Air.Inst.Tag) error{OutOfMemory}!Air.Inst.Ref { + return block.addInst(.{ + .tag = tag, + .data = .no_op, + }); + } + pub fn addUnOp( block: *Block, tag: Air.Inst.Tag, @@ -1252,6 +1270,20 @@ pub const Scope = struct { }); } + pub fn addBr( + block: *Block, + target_block: Air.Inst.Index, + operand: Air.Inst.Ref, + ) error{OutOfMemory}!Air.Inst.Ref { + return block.addInst(.{ + .tag = .br, + .data = .{ .br = .{ + .block_inst = target_block, + .operand = operand, + } }, + }); + } + pub fn addBinOp( block: *Block, tag: Air.Inst.Tag, diff --git a/src/Sema.zig b/src/Sema.zig index 31d3c9551d..48ad8d97fc 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -372,14 +372,14 @@ pub fn analyzeBody( //.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), //.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), - //.add => try sema.zirArithmetic(block, inst), - //.addwrap => try sema.zirArithmetic(block, inst), - //.div => try sema.zirArithmetic(block, inst), - //.mod_rem => try sema.zirArithmetic(block, inst), - //.mul => try sema.zirArithmetic(block, inst), - //.mulwrap => try sema.zirArithmetic(block, inst), - //.sub => try sema.zirArithmetic(block, inst), - //.subwrap => try sema.zirArithmetic(block, inst), + .add => try sema.zirArithmetic(block, inst), + .addwrap => try sema.zirArithmetic(block, inst), + .div => try sema.zirArithmetic(block, inst), + .mod_rem => try sema.zirArithmetic(block, inst), + .mul => try sema.zirArithmetic(block, inst), + .mulwrap => try sema.zirArithmetic(block, inst), + .sub => try sema.zirArithmetic(block, inst), + .subwrap => try sema.zirArithmetic(block, inst), //// Instructions that we know to *always* be noreturn based solely on their tag. //// These functions match the return type of analyzeBody so that we can @@ -505,35 +505,35 @@ pub fn analyzeBody( i = 0; continue; }, - //.block_inline => blk: { - // // Directly analyze the block body without introducing a new block. - // const inst_data = datas[inst].pl_node; - // const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); - // const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; - // const break_inst = try sema.analyzeBody(block, inline_body); - // const break_data = datas[break_inst].@"break"; - // if (inst == break_data.block_inst) { - // break :blk sema.resolveInst(break_data.operand); - // } else { - // return break_inst; - // } - //}, - //.condbr_inline => blk: { - // const inst_data = datas[inst].pl_node; - // const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; - // const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); - // const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; - // const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; - // const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); - // const inline_body = if (cond.val.toBool()) then_body else else_body; - // const break_inst = try sema.analyzeBody(block, inline_body); - // const break_data = datas[break_inst].@"break"; - // if (inst == break_data.block_inst) { - // break :blk sema.resolveInst(break_data.operand); - // } else { - // return break_inst; - // } - //}, + .block_inline => blk: { + // Directly analyze the block body without introducing a new block. + const inst_data = datas[inst].pl_node; + const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); + const inline_body = sema.code.extra[extra.end..][0..extra.data.body_len]; + const break_inst = try sema.analyzeBody(block, inline_body); + const break_data = datas[break_inst].@"break"; + if (inst == break_data.block_inst) { + break :blk sema.resolveInst(break_data.operand); + } else { + return break_inst; + } + }, + .condbr_inline => blk: { + const inst_data = datas[inst].pl_node; + const cond_src: LazySrcLoc = .{ .node_offset_if_cond = inst_data.src_node }; + const extra = sema.code.extraData(Zir.Inst.CondBr, inst_data.payload_index); + const then_body = sema.code.extra[extra.end..][0..extra.data.then_body_len]; + const else_body = sema.code.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + const cond = try sema.resolveInstConst(block, cond_src, extra.data.condition); + const inline_body = if (cond.val.toBool()) then_body else else_body; + const break_inst = try sema.analyzeBody(block, inline_body); + const break_data = datas[break_inst].@"break"; + if (inst == break_data.block_inst) { + break :blk sema.resolveInst(break_data.operand); + } else { + return break_inst; + } + }, else => @panic("TODO finish updating Sema for AIR memory layout changes and then remove this else prong"), }; if (sema.getTypeOf(air_inst).isNoReturn()) @@ -1186,7 +1186,7 @@ fn zirRetPtr( const fn_ty = sema.func.?.owner_decl.ty; const ret_type = fn_ty.fnReturnType(); const ptr_type = try Module.simplePtrType(sema.arena, ret_type, true, .One); - return block.addNoOp(src, ptr_type, .alloc); + return block.addTy(.alloc, ptr_type); } fn zirRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -1230,7 +1230,8 @@ fn ensureResultUsed( operand: Air.Inst.Ref, src: LazySrcLoc, ) CompileError!void { - switch (operand.ty.zigTypeTag()) { + const operand_ty = sema.getTypeOf(operand); + switch (operand_ty.zigTypeTag()) { .Void, .NoReturn => return, else => return sema.mod.fail(&block.base, src, "expression value is ignored", .{}), } @@ -1243,7 +1244,8 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); - switch (operand.ty.zigTypeTag()) { + const operand_ty = sema.getTypeOf(operand); + switch (operand_ty.zigTypeTag()) { .ErrorSet, .ErrorUnion => return sema.mod.fail(&block.base, src, "error is discarded", .{}), else => return, } @@ -1257,7 +1259,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Co const src = inst_data.src(); const array_ptr = sema.resolveInst(inst_data.operand); - const elem_ty = array_ptr.ty.elemType(); + const elem_ty = sema.getTypeOf(array_ptr).elemType(); if (!elem_ty.isIndexable()) { const cond_src: LazySrcLoc = .{ .node_offset_for_cond = inst_data.src_node }; const msg = msg: { @@ -1317,7 +1319,6 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Comp defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].un_node; - const src = inst_data.src(); const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const var_type = try sema.resolveType(block, ty_src, inst_data.operand); const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); @@ -1329,10 +1330,7 @@ fn zirAllocComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Comp .val = undefined, // astgen guarantees there will be a store before the first load }, }; - return sema.mod.constInst(sema.arena, src, .{ - .ty = ptr_type, - .val = Value.initPayload(&val_payload.base), - }); + return sema.addConstant(ptr_type, Value.initPayload(&val_payload.base)); } fn zirAllocInferredComptime(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -1351,7 +1349,7 @@ fn zirAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError const var_type = try sema.resolveType(block, ty_src, inst_data.operand); const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); try sema.requireRuntimeBlock(block, var_decl_src); - return block.addNoOp(var_decl_src, ptr_type, .alloc); + return block.addTy(.alloc, ptr_type); } fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -1365,7 +1363,7 @@ fn zirAllocMut(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr try sema.validateVarType(block, ty_src, var_type); const ptr_type = try Module.simplePtrType(sema.arena, var_type, true, .One); try sema.requireRuntimeBlock(block, var_decl_src); - return block.addNoOp(var_decl_src, ptr_type, .alloc); + return block.addTy(.alloc, ptr_type); } fn zirAllocInferred( @@ -1388,12 +1386,9 @@ fn zirAllocInferred( // not needed in the case of constant values. However here, we plan to "downgrade" // to a normal instruction when we hit `resolve_inferred_alloc`. So we append // to the block even though it is currently a `.constant`. - const result = try sema.mod.constInst(sema.arena, src, .{ - .ty = inferred_alloc_ty, - .val = Value.initPayload(&val_payload.base), - }); + const result = try sema.addConstant(inferred_alloc_ty, Value.initPayload(&val_payload.base)); try sema.requireFunctionBlock(block, src); - try block.instructions.append(sema.gpa, result); + try block.instructions.append(sema.gpa, refToIndex(result).?); return result; } @@ -1630,18 +1625,21 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE const tracy = trace(@src()); defer tracy.end(); - const src: LazySrcLoc = .unneeded; + const src = sema.src; + const fn_inst_src = sema.src; + const inst_data = sema.code.instructions.items(.data)[inst].param_type; const fn_inst = sema.resolveInst(inst_data.callee); + const fn_inst_ty = sema.getTypeOf(fn_inst); const param_index = inst_data.param_index; - const fn_ty: Type = switch (fn_inst.ty.zigTypeTag()) { - .Fn => fn_inst.ty, + const fn_ty: Type = switch (fn_inst_ty.zigTypeTag()) { + .Fn => fn_inst_ty, .BoundFn => { - return sema.mod.fail(&block.base, fn_inst.src, "TODO implement zirParamType for method call syntax", .{}); + return sema.mod.fail(&block.base, fn_inst_src, "TODO implement zirParamType for method call syntax", .{}); }, else => { - return sema.mod.fail(&block.base, fn_inst.src, "expected function, found '{}'", .{fn_inst.ty}); + return sema.mod.fail(&block.base, fn_inst_src, "expected function, found '{}'", .{fn_inst_ty}); }, }; @@ -1711,23 +1709,20 @@ fn zirIntBig(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErro const limbs = try arena.alloc(std.math.big.Limb, int.len); mem.copy(u8, mem.sliceAsBytes(limbs), limb_bytes); - return sema.mod.constInst(arena, .unneeded, .{ - .ty = Type.initTag(.comptime_int), - .val = try Value.Tag.int_big_positive.create(arena, limbs), - }); + return sema.addConstant( + Type.initTag(.comptime_int), + try Value.Tag.int_big_positive.create(arena, limbs), + ); } fn zirFloat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { _ = block; const arena = sema.arena; - const inst_data = sema.code.instructions.items(.data)[inst].float; - const src = inst_data.src(); - const number = inst_data.number; - - return sema.mod.constInst(arena, src, .{ - .ty = Type.initTag(.comptime_float), - .val = try Value.Tag.float_32.create(arena, number), - }); + const number = sema.code.instructions.items(.data)[inst].float; + return sema.addConstant( + Type.initTag(.comptime_float), + try Value.Tag.float_64.create(arena, number), + ); } fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -1735,13 +1730,11 @@ fn zirFloat128(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Float128, inst_data.payload_index).data; - const src = inst_data.src(); const number = extra.get(); - - return sema.mod.constInst(arena, src, .{ - .ty = Type.initTag(.comptime_float), - .val = try Value.Tag.float_128.create(arena, number), - }); + return sema.addConstant( + Type.initTag(.comptime_float), + try Value.Tag.float_128.create(arena, number), + ); } fn zirCompileError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { @@ -1785,10 +1778,7 @@ fn zirCompileLog( if (!gop.found_existing) { gop.value_ptr.* = src_node; } - return sema.mod.constInst(sema.arena, src, .{ - .ty = Type.initTag(.void), - .val = Value.initTag(.void_value), - }); + return Air.Inst.Ref.void_value; } fn zirRepeat(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Zir.Inst.Index { @@ -1817,18 +1807,26 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Compil const src = inst_data.src(); const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; + const gpa = sema.gpa; // AIR expects a block outside the loop block too. - const block_inst = try sema.arena.create(Inst.Block); - block_inst.* = .{ - .base = .{ - .tag = Inst.Block.base_tag, - .ty = undefined, - .src = src, - }, - .body = undefined, - }; - + // Reserve space for a Loop instruction so that generated Break instructions can + // point to it, even if it doesn't end up getting used because the code ends up being + // comptime evaluated. + const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const loop_inst = block_inst + 1; + try sema.air_instructions.ensureUnusedCapacity(gpa, 2); + sema.air_instructions.appendAssumeCapacity(.{ + .tag = .block, + .data = undefined, + }); + sema.air_instructions.appendAssumeCapacity(.{ + .tag = .loop, + .data = .{ .ty_pl = .{ + .ty = .noreturn_type, + .payload = undefined, + } }, + }); var label: Scope.Block.Label = .{ .zir_block = inst, .merges = .{ @@ -1844,33 +1842,24 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Compil child_block.runtime_index += 1; const merges = &child_block.label.?.merges; - defer child_block.instructions.deinit(sema.gpa); - defer merges.results.deinit(sema.gpa); - defer merges.br_list.deinit(sema.gpa); - - // Reserve space for a Loop instruction so that generated Break instructions can - // point to it, even if it doesn't end up getting used because the code ends up being - // comptime evaluated. - const loop_inst = try sema.arena.create(Inst.Loop); - loop_inst.* = .{ - .base = .{ - .tag = Inst.Loop.base_tag, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .body = undefined, - }; + defer child_block.instructions.deinit(gpa); + defer merges.results.deinit(gpa); + defer merges.br_list.deinit(gpa); var loop_block = child_block.makeSubBlock(); - defer loop_block.instructions.deinit(sema.gpa); + defer loop_block.instructions.deinit(gpa); _ = try sema.analyzeBody(&loop_block, body); // Loop repetition is implied so the last instruction may or may not be a noreturn instruction. + try child_block.instructions.append(gpa, loop_inst); - try child_block.instructions.append(sema.gpa, &loop_inst.base); - loop_inst.body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, loop_block.instructions.items) }; - + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + + loop_block.instructions.items.len); + sema.air_instructions.items(.data)[loop_inst].ty_pl.payload = sema.addExtraAssumeCapacity( + Air.Block{ .body_len = @intCast(u32, loop_block.instructions.items.len) }, + ); + sema.air_extra.appendAssumeCapacity(loop_block.instructions.items); return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } @@ -1890,27 +1879,28 @@ fn zirSuspendBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index return sema.mod.fail(&parent_block.base, src, "TODO: implement Sema.zirSuspendBlock", .{}); } -fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { +fn zirBlock( + sema: *Sema, + parent_block: *Scope.Block, + inst: Zir.Inst.Index, +) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); - const inst_data = sema.code.instructions.items(.data)[inst].pl_node; - const src = inst_data.src(); - const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); + const pl_node = sema.code.instructions.items(.data)[inst].pl_node; + const src = pl_node.src(); + const extra = sema.code.extraData(Zir.Inst.Block, pl_node.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; + const gpa = sema.gpa; // Reserve space for a Block instruction so that generated Break instructions can // point to it, even if it doesn't end up getting used because the code ends up being // comptime evaluated. - const block_inst = try sema.arena.create(Inst.Block); - block_inst.* = .{ - .base = .{ - .tag = Inst.Block.base_tag, - .ty = undefined, // Set after analysis. - .src = src, - }, - .body = undefined, - }; + const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .block, + .data = undefined, + }); var label: Scope.Block.Label = .{ .zir_block = inst, @@ -1932,9 +1922,9 @@ fn zirBlock(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Compi }; const merges = &child_block.label.?.merges; - defer child_block.instructions.deinit(sema.gpa); - defer merges.results.deinit(sema.gpa); - defer merges.br_list.deinit(sema.gpa); + defer child_block.instructions.deinit(gpa); + defer merges.results.deinit(gpa); + defer merges.br_list.deinit(gpa); _ = try sema.analyzeBody(&child_block, body); @@ -1963,6 +1953,8 @@ fn analyzeBlockBody( const tracy = trace(@src()); defer tracy.end(); + const gpa = sema.gpa; + // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); assert(child_block.instructions.items[child_block.instructions.items.len - 1].ty.isNoReturn()); @@ -1971,7 +1963,7 @@ fn analyzeBlockBody( // No need for a block instruction. We can put the new instructions // directly into the parent block. const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items); - try parent_block.instructions.appendSlice(sema.gpa, copied_instructions); + try parent_block.instructions.appendSlice(gpa, copied_instructions); return copied_instructions[copied_instructions.len - 1]; } if (merges.results.items.len == 1) { @@ -1982,7 +1974,7 @@ fn analyzeBlockBody( // No need for a block instruction. We can put the new instructions directly // into the parent block. Here we omit the break instruction. const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items[0..last_inst_index]); - try parent_block.instructions.appendSlice(sema.gpa, copied_instructions); + try parent_block.instructions.appendSlice(gpa, copied_instructions); return merges.results.items[0]; } } @@ -1992,21 +1984,26 @@ fn analyzeBlockBody( // Need to set the type and emit the Block instruction. This allows machine code generation // to emit a jump instruction to after the block when it encounters the break. - try parent_block.instructions.append(sema.gpa, &merges.block_inst.base); + try parent_block.instructions.append(gpa, merges.block_inst); const resolved_ty = try sema.resolvePeerTypes(parent_block, src, merges.results.items); - merges.block_inst.base.ty = resolved_ty; - merges.block_inst.body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items), - }; + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + + child_block.instructions.items.len); + sema.air_instructions.items(.data)[merges.block_inst] = .{ .ty_pl = .{ + .ty = try sema.addType(resolved_ty), + .payload = sema.addExtraAssumeCapacity(Air.Block{ + .body_len = @intCast(u32, child_block.instructions.items.len), + }), + } }; + sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); // Now that the block has its type resolved, we need to go back into all the break // instructions, and insert type coercion on the operands. for (merges.br_list.items) |br| { - if (br.operand.ty.eql(resolved_ty)) { + if (sema.getTypeOf(br.operand).eql(resolved_ty)) { // No type coercion needed. continue; } var coerce_block = parent_block.makeSubBlock(); - defer coerce_block.instructions.deinit(sema.gpa); + defer coerce_block.instructions.deinit(gpa); const coerced_operand = try sema.coerce(&coerce_block, resolved_ty, br.operand, br.operand.src); // If no instructions were produced, such as in the case of a coercion of a // constant value to a new type, we can simply point the br operand to it. @@ -2032,7 +2029,7 @@ fn analyzeBlockBody( }, }; } - return &merges.block_inst.base; + return indexToRef(merges.block_inst); } fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { @@ -2104,7 +2101,7 @@ fn zirBreakpoint(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compile const src_node = sema.code.instructions.items(.data)[inst].node; const src: LazySrcLoc = .{ .node_offset = src_node }; try sema.requireRuntimeBlock(block, src); - _ = try block.addNoOp(src, Type.initTag(.void), .breakpoint); + _ = try block.addNoOp(.breakpoint); } fn zirFence(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { @@ -2311,6 +2308,8 @@ fn analyzeCall( }), } + const gpa = sema.gpa; + const ret_type = func.ty.fnReturnType(); const is_comptime_call = block.is_comptime or modifier == .compile_time; @@ -2331,15 +2330,11 @@ fn analyzeCall( // set to in the `Scope.Block`. // This block instruction will be used to capture the return value from the // inlined function. - const block_inst = try sema.arena.create(Inst.Block); - block_inst.* = .{ - .base = .{ - .tag = Inst.Block.base_tag, - .ty = ret_type, - .src = call_src, - }, - .body = undefined, - }; + const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .block, + .data = undefined, + }); // This one is shared among sub-blocks within the same callee, but not // shared among the entire inline/comptime call stack. var inlining: Scope.Block.Inlining = .{ @@ -2358,7 +2353,7 @@ fn analyzeCall( const parent_inst_map = sema.inst_map; sema.inst_map = .{}; defer { - sema.inst_map.deinit(sema.gpa); + sema.inst_map.deinit(gpa); sema.inst_map = parent_inst_map; } @@ -2390,9 +2385,9 @@ fn analyzeCall( const merges = &child_block.inlining.?.merges; - defer child_block.instructions.deinit(sema.gpa); - defer merges.results.deinit(sema.gpa); - defer merges.br_list.deinit(sema.gpa); + defer child_block.instructions.deinit(gpa); + defer merges.results.deinit(gpa); + defer merges.br_list.deinit(gpa); try sema.emitBackwardBranch(&child_block, call_src); @@ -2525,17 +2520,16 @@ fn zirErrorValue(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compile defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const src = inst_data.src(); // Create an anonymous error set type with only this error value, and return the value. const kv = try sema.mod.getErrorValue(inst_data.get(sema.code)); const result_type = try Type.Tag.error_set_single.create(sema.arena, kv.key); - return sema.mod.constInst(sema.arena, src, .{ - .ty = result_type, - .val = try Value.Tag.@"error".create(sema.arena, .{ + return sema.addConstant( + result_type, + try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key, }), - }); + ); } fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -2558,10 +2552,7 @@ fn zirErrorToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compile .base = .{ .tag = .int_u64 }, .data = (try sema.mod.getErrorValue(val.castTag(.@"error").?.data.name)).value, }; - return sema.mod.constInst(sema.arena, src, .{ - .ty = result_ty, - .val = Value.initPayload(&payload.base), - }); + return sema.addConstant(result_ty, Value.initPayload(&payload.base)); } try sema.requireRuntimeBlock(block, src); @@ -2587,10 +2578,7 @@ fn zirIntToError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compile .base = .{ .tag = .@"error" }, .data = .{ .name = sema.mod.error_name_list.items[@intCast(usize, int)] }, }; - return sema.mod.constInst(sema.arena, src, .{ - .ty = Type.initTag(.anyerror), - .val = Value.initPayload(&payload.base), - }); + return sema.addConstant(Type.initTag(.anyerror), Value.initPayload(&payload.base)); } try sema.requireRuntimeBlock(block, src); if (block.wantSafety()) { @@ -2630,10 +2618,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Com // Anything merged with anyerror is anyerror. if (lhs_ty.tag() == .anyerror or rhs_ty.tag() == .anyerror) { - return sema.mod.constInst(sema.arena, src, .{ - .ty = Type.initTag(.type), - .val = Value.initTag(.anyerror_type), - }); + return Air.Inst.Ref.anyerror_type; } // When we support inferred error sets, we'll want to use a data structure that can // represent a merged set of errors without forcing them to be resolved here. Until then @@ -2685,10 +2670,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Com .names_len = @intCast(u32, new_names.len), }; const error_set_ty = try Type.Tag.error_set.create(sema.arena, new_error_set); - return sema.mod.constInst(sema.arena, src, .{ - .ty = Type.initTag(.type), - .val = try Value.Tag.ty.create(sema.arena, error_set_ty), - }); + return sema.addConstant(Type.initTag(.type), try Value.Tag.ty.create(sema.arena, error_set_ty)); } fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -2697,12 +2679,11 @@ fn zirEnumLiteral(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compil defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].str_tok; - const src = inst_data.src(); const duped_name = try sema.arena.dupe(u8, inst_data.get(sema.code)); - return sema.mod.constInst(sema.arena, src, .{ - .ty = Type.initTag(.enum_literal), - .val = try Value.Tag.enum_literal.create(sema.arena, duped_name), - }); + return sema.addConstant( + Type.initTag(.enum_literal), + try Value.Tag.enum_literal.create(sema.arena, duped_name), + ); } fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -2712,11 +2693,12 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = sema.resolveInst(inst_data.operand); + const operand_ty = sema.getTypeOf(operand); - const enum_tag: Air.Inst.Ref = switch (operand.ty.zigTypeTag()) { + const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag()) { .Enum => operand, .Union => { - //if (!operand.ty.unionHasTag()) { + //if (!operand_ty.unionHasTag()) { // return mod.fail( // &block.base, // operand_src, @@ -2728,58 +2710,44 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE }, else => { return mod.fail(&block.base, operand_src, "expected enum or tagged union, found {}", .{ - operand.ty, + operand_ty, }); }, }; + const enum_tag_ty = sema.getTypeOf(enum_tag); var int_tag_type_buffer: Type.Payload.Bits = undefined; - const int_tag_ty = try enum_tag.ty.intTagType(&int_tag_type_buffer).copy(arena); + const int_tag_ty = try enum_tag_ty.intTagType(&int_tag_type_buffer).copy(arena); - if (try sema.typeHasOnePossibleValue(block, src, enum_tag.ty)) |opv| { - return mod.constInst(arena, src, .{ - .ty = int_tag_ty, - .val = opv, - }); + if (try sema.typeHasOnePossibleValue(block, src, enum_tag_ty)) |opv| { + return sema.addConstant(int_tag_ty, opv); } if (try sema.resolvePossiblyUndefinedValue(block, operand_src, enum_tag)) |enum_tag_val| { if (enum_tag_val.castTag(.enum_field_index)) |enum_field_payload| { const field_index = enum_field_payload.data; - switch (enum_tag.ty.tag()) { + switch (enum_tag_ty.tag()) { .enum_full => { - const enum_full = enum_tag.ty.castTag(.enum_full).?.data; + const enum_full = enum_tag_ty.castTag(.enum_full).?.data; if (enum_full.values.count() != 0) { const val = enum_full.values.keys()[field_index]; - return mod.constInst(arena, src, .{ - .ty = int_tag_ty, - .val = val, - }); + return sema.addConstant(int_tag_ty, val); } else { // Field index and integer values are the same. const val = try Value.Tag.int_u64.create(arena, field_index); - return mod.constInst(arena, src, .{ - .ty = int_tag_ty, - .val = val, - }); + return sema.addConstant(int_tag_ty, val); } }, .enum_simple => { // Field index and integer values are the same. const val = try Value.Tag.int_u64.create(arena, field_index); - return mod.constInst(arena, src, .{ - .ty = int_tag_ty, - .val = val, - }); + return sema.addConstant(int_tag_ty, val); }, else => unreachable, } } else { // Assume it is already an integer and return it directly. - return mod.constInst(arena, src, .{ - .ty = int_tag_ty, - .val = enum_tag_val, - }); + return sema.addConstant(int_tag_ty, enum_tag_val); } } @@ -2790,7 +2758,6 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { const mod = sema.mod; const target = mod.getTarget(); - const arena = sema.arena; const inst_data = sema.code.instructions.items(.data)[inst].pl_node; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const src = inst_data.src(); @@ -2805,10 +2772,7 @@ fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE if (try sema.resolvePossiblyUndefinedValue(block, operand_src, operand)) |int_val| { if (dest_ty.isNonexhaustiveEnum()) { - return mod.constInst(arena, src, .{ - .ty = dest_ty, - .val = int_val, - }); + return sema.addConstant(dest_ty, int_val); } if (int_val.isUndef()) { return sema.failWithUseOfUndef(block, operand_src); @@ -2832,10 +2796,7 @@ fn zirIntToEnum(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE }; return mod.failWithOwnedErrorMsg(&block.base, msg); } - return mod.constInst(arena, src, .{ - .ty = dest_ty, - .val = int_val, - }); + return sema.addConstant(dest_ty, int_val); } try sema.requireRuntimeBlock(block, src); @@ -2854,16 +2815,17 @@ fn zirOptionalPayloadPtr( const inst_data = sema.code.instructions.items(.data)[inst].un_node; const optional_ptr = sema.resolveInst(inst_data.operand); - assert(optional_ptr.ty.zigTypeTag() == .Pointer); + const optional_ptr_ty = sema.getTypeOf(optional_ptr); + assert(optional_ptr_ty.zigTypeTag() == .Pointer); const src = inst_data.src(); - const opt_type = optional_ptr.ty.elemType(); + const opt_type = optional_ptr_ty.elemType(); if (opt_type.zigTypeTag() != .Optional) { return sema.mod.fail(&block.base, src, "expected optional type, found {}", .{opt_type}); } const child_type = try opt_type.optionalChildAlloc(sema.arena); - const child_pointer = try Module.simplePtrType(sema.arena, child_type, !optional_ptr.ty.isConstPtr(), .One); + const child_pointer = try Module.simplePtrType(sema.arena, child_type, !optional_ptr_ty.isConstPtr(), .One); if (try sema.resolveDefinedValue(block, src, optional_ptr)) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); @@ -2871,10 +2833,7 @@ fn zirOptionalPayloadPtr( return sema.mod.fail(&block.base, src, "unable to unwrap null", .{}); } // The same Value represents the pointer to the optional and the payload. - return sema.mod.constInst(sema.arena, src, .{ - .ty = child_pointer, - .val = pointer_val, - }); + return sema.addConstant(child_pointer, pointer_val); } try sema.requireRuntimeBlock(block, src); @@ -2898,7 +2857,8 @@ fn zirOptionalPayload( const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - const opt_type = operand.ty; + const operand_ty = sema.getTypeOf(operand); + const opt_type = operand_ty; if (opt_type.zigTypeTag() != .Optional) { return sema.mod.fail(&block.base, src, "expected optional type, found {}", .{opt_type}); } @@ -2909,10 +2869,7 @@ fn zirOptionalPayload( if (val.isNull()) { return sema.mod.fail(&block.base, src, "unable to unwrap null", .{}); } - return sema.mod.constInst(sema.arena, src, .{ - .ty = child_type, - .val = val, - }); + return sema.addConstant(child_type, val); } try sema.requireRuntimeBlock(block, src); @@ -2936,25 +2893,27 @@ fn zirErrUnionPayload( const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - if (operand.ty.zigTypeTag() != .ErrorUnion) - return sema.mod.fail(&block.base, operand.src, "expected error union type, found '{}'", .{operand.ty}); + const operand_src = src; + const operand_ty = sema.getTypeOf(operand); + if (operand_ty.zigTypeTag() != .ErrorUnion) + return sema.mod.fail(&block.base, operand_src, "expected error union type, found '{}'", .{operand_ty}); if (try sema.resolveDefinedValue(block, src, operand)) |val| { if (val.getError()) |name| { return sema.mod.fail(&block.base, src, "caught unexpected error '{s}'", .{name}); } const data = val.castTag(.error_union).?.data; - return sema.mod.constInst(sema.arena, src, .{ - .ty = operand.ty.castTag(.error_union).?.data.payload, - .val = data, - }); + return sema.addConstant( + operand_ty.castTag(.error_union).?.data.payload, + data, + ); } try sema.requireRuntimeBlock(block, src); if (safety_check and block.wantSafety()) { const is_non_err = try block.addUnOp(.is_err, operand); try sema.addSafetyCheck(block, is_non_err, .unwrap_errunion); } - const result_ty = operand.ty.castTag(.error_union).?.data.payload; + const result_ty = operand_ty.castTag(.error_union).?.data.payload; return block.addTyOp(.unwrap_errunion_payload, result_ty, operand); } @@ -2971,12 +2930,13 @@ fn zirErrUnionPayloadPtr( const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - assert(operand.ty.zigTypeTag() == .Pointer); + const operand_ty = sema.getTypeOf(operand); + assert(operand_ty.zigTypeTag() == .Pointer); - if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) - return sema.mod.fail(&block.base, src, "expected error union type, found {}", .{operand.ty.elemType()}); + if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) + return sema.mod.fail(&block.base, src, "expected error union type, found {}", .{operand_ty.elemType()}); - const operand_pointer_ty = try Module.simplePtrType(sema.arena, operand.ty.elemType().castTag(.error_union).?.data.payload, !operand.ty.isConstPtr(), .One); + const operand_pointer_ty = try Module.simplePtrType(sema.arena, operand_ty.elemType().castTag(.error_union).?.data.payload, !operand_ty.isConstPtr(), .One); if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); @@ -2985,13 +2945,13 @@ fn zirErrUnionPayloadPtr( } const data = val.castTag(.error_union).?.data; // The same Value represents the pointer to the error union and the payload. - return sema.mod.constInst(sema.arena, src, .{ - .ty = operand_pointer_ty, - .val = try Value.Tag.ref_val.create( + return sema.addConstant( + operand_pointer_ty, + try Value.Tag.ref_val.create( sema.arena, data, ), - }); + ); } try sema.requireRuntimeBlock(block, src); @@ -3010,18 +2970,16 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compi const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - if (operand.ty.zigTypeTag() != .ErrorUnion) - return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand.ty}); + const operand_ty = sema.getTypeOf(operand); + if (operand_ty.zigTypeTag() != .ErrorUnion) + return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand_ty}); - const result_ty = operand.ty.castTag(.error_union).?.data.error_set; + const result_ty = operand_ty.castTag(.error_union).?.data.error_set; if (try sema.resolveDefinedValue(block, src, operand)) |val| { assert(val.getError() != null); const data = val.castTag(.error_union).?.data; - return sema.mod.constInst(sema.arena, src, .{ - .ty = result_ty, - .val = data, - }); + return sema.addConstant(result_ty, data); } try sema.requireRuntimeBlock(block, src); @@ -3036,21 +2994,19 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Co const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - assert(operand.ty.zigTypeTag() == .Pointer); + const operand_ty = sema.getTypeOf(operand); + assert(operand_ty.zigTypeTag() == .Pointer); - if (operand.ty.elemType().zigTypeTag() != .ErrorUnion) - return sema.mod.fail(&block.base, src, "expected error union type, found {}", .{operand.ty.elemType()}); + if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) + return sema.mod.fail(&block.base, src, "expected error union type, found {}", .{operand_ty.elemType()}); - const result_ty = operand.ty.elemType().castTag(.error_union).?.data.error_set; + const result_ty = operand_ty.elemType().castTag(.error_union).?.data.error_set; if (try sema.resolveDefinedValue(block, src, operand)) |pointer_val| { const val = try pointer_val.pointerDeref(sema.arena); assert(val.getError() != null); const data = val.castTag(.error_union).?.data; - return sema.mod.constInst(sema.arena, src, .{ - .ty = result_ty, - .val = data, - }); + return sema.addConstant(result_ty, data); } try sema.requireRuntimeBlock(block, src); @@ -3064,9 +3020,10 @@ fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - if (operand.ty.zigTypeTag() != .ErrorUnion) - return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand.ty}); - if (operand.ty.castTag(.error_union).?.data.payload.zigTypeTag() != .Void) { + const operand_ty = sema.getTypeOf(operand); + if (operand_ty.zigTypeTag() != .ErrorUnion) + return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand_ty}); + if (operand_ty.castTag(.error_union).?.data.payload.zigTypeTag() != .Void) { return sema.mod.fail(&block.base, src, "expression value is ignored", .{}); } } @@ -3233,10 +3190,10 @@ fn funcCommon( } if (is_extern) { - return sema.mod.constInst(sema.arena, src, .{ - .ty = fn_ty, - .val = try Value.Tag.extern_fn.create(sema.arena, sema.owner_decl), - }); + return sema.addConstant( + fn_ty, + try Value.Tag.extern_fn.create(sema.arena, sema.owner_decl), + ); } if (body_inst == 0) { @@ -3261,11 +3218,7 @@ fn funcCommon( .base = .{ .tag = .function }, .data = new_func, }; - const result = try sema.mod.constInst(sema.arena, src, .{ - .ty = fn_ty, - .val = Value.initPayload(&fn_payload.base), - }); - return result; + return sema.addConstant(fn_ty, Value.initPayload(&fn_payload.base)); } fn zirAs(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3324,7 +3277,7 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); const object = sema.resolveInst(extra.lhs); - const object_ptr = if (object.ty.zigTypeTag() == .Pointer) + const object_ptr = if (sema.getTypeOf(object).zigTypeTag() == .Pointer) object else try sema.analyzeRef(block, src, object); @@ -3397,13 +3350,14 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr ), }; - switch (operand.ty.zigTypeTag()) { + const operand_ty = sema.getTypeOf(operand); + switch (operand_ty.zigTypeTag()) { .ComptimeInt, .Int => {}, else => return sema.mod.fail( &block.base, operand_src, "expected integer type, found '{}'", - .{operand.ty}, + .{operand_ty}, ), } @@ -3454,13 +3408,14 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE ), }; - switch (operand.ty.zigTypeTag()) { + const operand_ty = sema.getTypeOf(operand); + switch (operand_ty.zigTypeTag()) { .ComptimeFloat, .Float, .ComptimeInt => {}, else => return sema.mod.fail( &block.base, operand_src, "expected float type, found '{}'", - .{operand.ty}, + .{operand_ty}, ), } @@ -3479,7 +3434,8 @@ fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr const bin_inst = sema.code.instructions.items(.data)[inst].bin; const array = sema.resolveInst(bin_inst.lhs); - const array_ptr = if (array.ty.zigTypeTag() == .Pointer) + const array_ty = sema.getTypeOf(array); + const array_ptr = if (array_ty.zigTypeTag() == .Pointer) array else try sema.analyzeRef(block, sema.src, array); @@ -3497,7 +3453,8 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compil const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array = sema.resolveInst(extra.lhs); - const array_ptr = if (array.ty.zigTypeTag() == .Pointer) + const array_ty = sema.getTypeOf(array); + const array_ptr = if (array_ty.zigTypeTag() == .Pointer) array else try sema.analyzeRef(block, src, array); @@ -3705,9 +3662,10 @@ fn analyzeSwitch( const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const special_prong_src: LazySrcLoc = .{ .node_offset_switch_special_prong = src_node_offset }; const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset }; + const operand_ty = sema.getTypeOf(operand); // Validate usage of '_' prongs. - if (special_prong == .under and !operand.ty.isNonexhaustiveEnum()) { + if (special_prong == .under and !operand_ty.isNonexhaustiveEnum()) { const msg = msg: { const msg = try mod.errMsg( &block.base, @@ -3729,9 +3687,9 @@ fn analyzeSwitch( } // Validate for duplicate items, missing else prong, and invalid range. - switch (operand.ty.zigTypeTag()) { + switch (operand_ty.zigTypeTag()) { .Enum => { - var seen_fields = try gpa.alloc(?Module.SwitchProngSrc, operand.ty.enumFieldCount()); + var seen_fields = try gpa.alloc(?Module.SwitchProngSrc, operand_ty.enumFieldCount()); defer gpa.free(seen_fields); mem.set(?Module.SwitchProngSrc, seen_fields, null); @@ -3777,7 +3735,7 @@ fn analyzeSwitch( ); } - try sema.validateSwitchNoRange(block, ranges_len, operand.ty, src_node_offset); + try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); } } const all_tags_handled = for (seen_fields) |seen_src| { @@ -3798,7 +3756,7 @@ fn analyzeSwitch( for (seen_fields) |seen_src, i| { if (seen_src != null) continue; - const field_name = operand.ty.enumFieldName(i); + const field_name = operand_ty.enumFieldName(i); // TODO have this point to the tag decl instead of here try mod.errNote( @@ -3810,10 +3768,10 @@ fn analyzeSwitch( ); } try mod.errNoteNonLazy( - operand.ty.declSrcLoc(), + operand_ty.declSrcLoc(), msg, "enum '{}' declared here", - .{operand.ty}, + .{operand_ty}, ); break :msg msg; }; @@ -3908,12 +3866,12 @@ fn analyzeSwitch( } check_range: { - if (operand.ty.zigTypeTag() == .Int) { + if (operand_ty.zigTypeTag() == .Int) { var arena = std.heap.ArenaAllocator.init(gpa); defer arena.deinit(); - const min_int = try operand.ty.minInt(&arena, mod.getTarget()); - const max_int = try operand.ty.maxInt(&arena, mod.getTarget()); + const min_int = try operand_ty.minInt(&arena, mod.getTarget()); + const max_int = try operand_ty.maxInt(&arena, mod.getTarget()); if (try range_set.spans(min_int, max_int)) { if (special_prong == .@"else") { return mod.fail( @@ -3983,7 +3941,7 @@ fn analyzeSwitch( ); } - try sema.validateSwitchNoRange(block, ranges_len, operand.ty, src_node_offset); + try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); } } switch (special_prong) { @@ -4015,7 +3973,7 @@ fn analyzeSwitch( &block.base, src, "else prong required when switching on type '{}'", - .{operand.ty}, + .{operand_ty}, ); } @@ -4063,7 +4021,7 @@ fn analyzeSwitch( ); } - try sema.validateSwitchNoRange(block, ranges_len, operand.ty, src_node_offset); + try sema.validateSwitchNoRange(block, ranges_len, operand_ty, src_node_offset); } } }, @@ -4083,20 +4041,15 @@ fn analyzeSwitch( .ComptimeFloat, .Float, => return mod.fail(&block.base, operand_src, "invalid switch operand type '{}'", .{ - operand.ty, + operand_ty, }), } - const block_inst = try sema.arena.create(Inst.Block); - block_inst.* = .{ - .base = .{ - .tag = Inst.Block.base_tag, - .ty = undefined, // Set after analysis. - .src = src, - }, - .body = undefined, - }; - + const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .block, + .data = undefined, + }); var label: Scope.Block.Label = .{ .zir_block = switch_inst, .merges = .{ @@ -4634,7 +4587,7 @@ fn zirBitwise( const lhs_ty = sema.getTypeOf(lhs); const rhs_ty = sema.getTypeOf(rhs); - const instructions = &[_]Air.Inst.Index{ lhs, rhs }; + const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); @@ -4763,18 +4716,8 @@ fn analyzeArithmetic( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const instructions = &[_]Air.Inst.Index{ lhs, rhs }; - const resolved_type = try sema.resolvePeerTypes(block, src, instructions); - const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); - const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); - - const scalar_type = if (resolved_type.zigTypeTag() == .Vector) - resolved_type.elemType() - else - resolved_type; - - const scalar_tag = scalar_type.zigTypeTag(); - + const lhs_ty = sema.getTypeOf(lhs); + const rhs_ty = sema.getTypeOf(rhs); if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { return sema.mod.fail(&block.base, src, "vector length mismatch: {d} and {d}", .{ @@ -4790,6 +4733,18 @@ fn analyzeArithmetic( }); } + const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; + const resolved_type = try sema.resolvePeerTypes(block, src, instructions); + const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src); + const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src); + + const scalar_type = if (resolved_type.zigTypeTag() == .Vector) + resolved_type.elemType() + else + resolved_type; + + const scalar_tag = scalar_type.zigTypeTag(); + const is_int = scalar_tag == .Int or scalar_tag == .ComptimeInt; const is_float = scalar_tag == .Float or scalar_tag == .ComptimeFloat; @@ -4807,10 +4762,7 @@ fn analyzeArithmetic( if (rhs_val.compareWithZero(.eq)) { switch (zir_tag) { .add, .addwrap, .sub, .subwrap => { - return sema.mod.constInst(sema.arena, src, .{ - .ty = scalar_type, - .val = lhs_val, - }); + return sema.addConstant(scalar_type, lhs_val); }, else => {}, } @@ -4850,10 +4802,7 @@ fn analyzeArithmetic( log.debug("{s}({}, {}) result: {}", .{ @tagName(zir_tag), lhs_val, rhs_val, value }); - return sema.mod.constInst(sema.arena, src, .{ - .ty = scalar_type, - .val = value, - }); + return sema.addConstant(scalar_type, value); } } @@ -5167,16 +5116,16 @@ fn zirTypeInfo(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr // args: []const FnArg, field_values[5] = Value.initTag(.null_value); // TODO - return sema.mod.constInst(sema.arena, src, .{ - .ty = type_info_ty, - .val = try Value.Tag.@"union".create(sema.arena, .{ + return sema.addConstant( + type_info_ty, + try Value.Tag.@"union".create(sema.arena, .{ .tag = try Value.Tag.enum_field_index.create( sema.arena, @enumToInt(@typeInfo(std.builtin.TypeInfo).Union.tag_type.?.Fn), ), .val = try Value.Tag.@"struct".create(sema.arena, field_values.ptr), }), - }); + ); }, else => |t| return sema.mod.fail(&block.base, src, "TODO: implement zirTypeInfo for {s}", .{ @tagName(t), @@ -5189,7 +5138,8 @@ fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErro const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; const operand = sema.resolveInst(inst_data.operand); - return sema.addType(operand.ty); + const operand_ty = sema.getTypeOf(operand); + return sema.addType(operand_ty); } fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -5241,11 +5191,12 @@ fn zirBoolNot(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); + const operand_src = src; // TODO put this on the operand, not the `!` const uncasted_operand = sema.resolveInst(inst_data.operand); const bool_type = Type.initTag(.bool); - const operand = try sema.coerce(block, bool_type, uncasted_operand, uncasted_operand.src); - if (try sema.resolveDefinedValue(block, src, operand)) |val| { + const operand = try sema.coerce(block, bool_type, uncasted_operand, operand_src); + if (try sema.resolveDefinedValue(block, operand_src, operand)) |val| { if (val.toBool()) { return Air.Inst.Ref.bool_false; } else { @@ -5267,12 +5218,13 @@ fn zirBoolBr( const datas = sema.code.instructions.items(.data); const inst_data = datas[inst].bool_br; - const src: LazySrcLoc = .unneeded; const lhs = sema.resolveInst(inst_data.lhs); + const lhs_src = sema.src; const extra = sema.code.extraData(Zir.Inst.Block, inst_data.payload_index); const body = sema.code.extra[extra.end..][0..extra.data.body_len]; + const gpa = sema.gpa; - if (try sema.resolveDefinedValue(parent_block, src, lhs)) |lhs_val| { + if (try sema.resolveDefinedValue(parent_block, lhs_src, lhs)) |lhs_val| { if (lhs_val.toBool() == is_bool_or) { if (is_bool_or) { return Air.Inst.Ref.bool_true; @@ -5286,49 +5238,59 @@ fn zirBoolBr( return sema.resolveBody(parent_block, body); } - const block_inst = try sema.arena.create(Inst.Block); - block_inst.* = .{ - .base = .{ - .tag = Inst.Block.base_tag, - .ty = Type.initTag(.bool), - .src = src, - }, - .body = undefined, - }; + const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .block, + .data = .{ .ty_pl = .{ + .ty = .bool_type, + .payload = undefined, + } }, + }); var child_block = parent_block.makeSubBlock(); child_block.runtime_loop = null; - child_block.runtime_cond = lhs.src; + child_block.runtime_cond = lhs_src; child_block.runtime_index += 1; - defer child_block.instructions.deinit(sema.gpa); + defer child_block.instructions.deinit(gpa); var then_block = child_block.makeSubBlock(); - defer then_block.instructions.deinit(sema.gpa); + defer then_block.instructions.deinit(gpa); var else_block = child_block.makeSubBlock(); - defer else_block.instructions.deinit(sema.gpa); + defer else_block.instructions.deinit(gpa); const lhs_block = if (is_bool_or) &then_block else &else_block; const rhs_block = if (is_bool_or) &else_block else &then_block; - const lhs_result = try sema.mod.constInst(sema.arena, src, .{ - .ty = Type.initTag(.bool), - .val = if (is_bool_or) Value.initTag(.bool_true) else Value.initTag(.bool_false), - }); - _ = try lhs_block.addBr(src, block_inst, lhs_result); + const lhs_result: Air.Inst.Ref = if (is_bool_or) .bool_true else .bool_false; + _ = try lhs_block.addBr(block_inst, lhs_result); const rhs_result = try sema.resolveBody(rhs_block, body); - _ = try rhs_block.addBr(src, block_inst, rhs_result); + _ = try rhs_block.addBr(block_inst, rhs_result); - const air_then_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, then_block.instructions.items) }; - const air_else_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, else_block.instructions.items) }; - _ = try child_block.addCondBr(src, lhs, air_then_body, air_else_body); + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + + then_block.instructions.items.len + else_block.instructions.items.len + + @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len); - block_inst.body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items), - }; - try parent_block.instructions.append(sema.gpa, &block_inst.base); - return &block_inst.base; + sema.air_instructions.items(.data)[block_inst].ty_pl.payload = sema.addExtraAssumeCapacity( + Air.Block{ .body_len = @intCast(u32, child_block.instructions.items.len) }, + ); + sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); + + const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{ + .then_body_len = @intCast(u32, then_block.instructions.items.len), + .else_body_len = @intCast(u32, else_block.instructions.items.len), + }); + sema.air_extra.appendSliceAssumeCapacity(then_block.instructions.items); + sema.air_extra.appendSliceAssumeCapacity(else_block.instructions.items); + + _ = try child_block.addInst(.{ .tag = .cond_br, .data = .{ .pl_op = .{ + .operand = lhs, + .payload = cond_br_payload, + } } }); + + try parent_block.instructions.append(gpa, block_inst); + return indexToRef(block_inst); } fn zirIsNonNull( @@ -5439,7 +5401,7 @@ fn zirUnreachable(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compil if (safety_check and block.wantSafety()) { return sema.safetyPanic(block, src, .unreach); } else { - _ = try block.addNoOp(src, Type.initTag(.noreturn), .unreach); + _ = try block.addNoOp(.unreach); return always_noreturn; } } @@ -5461,10 +5423,10 @@ fn zirRetErrValue( } // Return the error code from the function. const kv = try sema.mod.getErrorValue(err_name); - const result_inst = try sema.mod.constInst(sema.arena, src, .{ - .ty = try Type.Tag.error_set_single.create(sema.arena, kv.key), - .val = try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), - }); + const result_inst = try sema.addConstant( + try Type.Tag.error_set_single.create(sema.arena, kv.key), + try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), + ); return sema.analyzeRet(block, result_inst, src, true); } @@ -5505,7 +5467,7 @@ fn analyzeRet( if (block.inlining) |inlining| { // We are inlining a function call; rewrite the `ret` as a `break`. try inlining.merges.results.append(sema.gpa, operand); - _ = try block.addBr(src, inlining.merges.block_inst, operand); + _ = try block.addBr(inlining.merges.block_inst, operand); return always_noreturn; } @@ -5613,10 +5575,7 @@ fn zirStructInitEmpty(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Co const src = inst_data.src(); const struct_type = try sema.resolveType(block, src, inst_data.operand); - return sema.mod.constInst(sema.arena, src, .{ - .ty = struct_type, - .val = Value.initTag(.empty_struct_value), - }); + return sema.addConstant(struct_type, Value.initTag(.empty_struct_value)); } fn zirUnionInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -5696,10 +5655,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: root_msg = try mod.errMsg(&block.base, src, template, args); } } else { - field_inits[i] = try mod.constInst(sema.arena, src, .{ - .ty = field.ty, - .val = field.default_val, - }); + field_inits[i] = try sema.addConstant(field.ty, field.default_val); } } if (root_msg) |msg| { @@ -5729,10 +5685,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: for (field_inits) |field_init, i| { values[i] = field_init.value().?; } - return mod.constInst(sema.arena, src, .{ - .ty = struct_ty, - .val = try Value.Tag.@"struct".create(sema.arena, values.ptr), - }); + return sema.addConstant(struct_ty, try Value.Tag.@"struct".create(sema.arena, values.ptr)); } return mod.fail(&block.base, src, "TODO: Sema.zirStructInit for runtime-known struct values", .{}); @@ -5913,20 +5866,13 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr .base = .{ .tag = .int_u64 }, .data = addr, }; - return sema.mod.constInst(sema.arena, src, .{ - .ty = type_res, - .val = Value.initPayload(&val_payload.base), - }); + return sema.addConstant(type_res, Value.initPayload(&val_payload.base)); } try sema.requireRuntimeBlock(block, src); if (block.wantSafety()) { - const zero = try sema.mod.constInst(sema.arena, src, .{ - .ty = Type.initTag(.u64), - .val = Value.initTag(.zero), - }); if (!type_res.isAllowzeroPtr()) { - const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, zero); + const is_non_zero = try block.addBinOp(.cmp_neq, operand_coerced, .zero_usize); try sema.addSafetyCheck(block, is_non_zero, .cast_to_null); } @@ -5936,12 +5882,12 @@ fn zirIntToPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr .base = .{ .tag = .int_u64 }, .data = ptr_align - 1, }; - const align_minus_1 = try sema.mod.constInst(sema.arena, src, .{ - .ty = Type.initTag(.u64), - .val = Value.initPayload(&val_payload.base), - }); + const align_minus_1 = try sema.addConstant( + Type.initTag(.usize), + Value.initPayload(&val_payload.base), + ); const remainder = try block.addBinOp(.bit_and, operand_coerced, align_minus_1); - const is_aligned = try block.addBinOp(.cmp_eq, remainder, zero); + const is_aligned = try block.addBinOp(.cmp_eq, remainder, .zero_usize); try sema.addSafetyCheck(block, is_aligned, .incorrect_alignment); } } @@ -6217,10 +6163,10 @@ fn zirVarExtended( .is_mutable = true, // TODO get rid of this unused field .is_threadlocal = small.is_threadlocal, }; - const result = try sema.mod.constInst(sema.arena, src, .{ - .ty = var_ty, - .val = try Value.Tag.variable.create(sema.arena, new_var), - }); + const result = try sema.addConstant( + var_ty, + try Value.Tag.variable.create(sema.arena, new_var), + ); return result; } @@ -6380,32 +6326,13 @@ pub const PanicId = enum { invalid_error_code, }; -fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Ref, panic_id: PanicId) !void { - const block_inst = try sema.arena.create(Inst.Block); - block_inst.* = .{ - .base = .{ - .tag = Inst.Block.base_tag, - .ty = Type.initTag(.void), - .src = ok.src, - }, - .body = .{ - .instructions = try sema.arena.alloc(Air.Inst.Index, 1), // Only need space for the condbr. - }, - }; - - const ok_body: ir.Body = .{ - .instructions = try sema.arena.alloc(Air.Inst.Index, 1), // Only need space for the br_void. - }; - const br_void = try sema.arena.create(Inst.BrVoid); - br_void.* = .{ - .base = .{ - .tag = .br_void, - .ty = Type.initTag(.noreturn), - .src = ok.src, - }, - .block = block_inst, - }; - ok_body.instructions[0] = &br_void.base; +fn addSafetyCheck( + sema: *Sema, + parent_block: *Scope.Block, + ok: Air.Inst.Ref, + panic_id: PanicId, +) !void { + const gpa = sema.gpa; var fail_block: Scope.Block = .{ .parent = parent_block, @@ -6416,26 +6343,55 @@ fn addSafetyCheck(sema: *Sema, parent_block: *Scope.Block, ok: Air.Inst.Ref, pan .is_comptime = parent_block.is_comptime, }; - defer fail_block.instructions.deinit(sema.gpa); + defer fail_block.instructions.deinit(gpa); - _ = try sema.safetyPanic(&fail_block, ok.src, panic_id); + _ = try sema.safetyPanic(&fail_block, .unneeded, panic_id); - const fail_body: ir.Body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, fail_block.instructions.items) }; + try parent_block.instructions.ensureUnusedCapacity(gpa, 1); - const condbr = try sema.arena.create(Inst.CondBr); - condbr.* = .{ - .base = .{ - .tag = .condbr, - .ty = Type.initTag(.noreturn), - .src = ok.src, - }, - .condition = ok, - .then_body = ok_body, - .else_body = fail_body, - }; - block_inst.body.instructions[0] = &condbr.base; + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + + 1 + // The main block only needs space for the cond_br. + @typeInfo(Air.CondBr).Struct.fields.len + + 1 + // The ok branch of the cond_br only needs space for the br. + fail_block.instructions.items.len); - try parent_block.instructions.append(sema.gpa, &block_inst.base); + try sema.air_instructions.ensureUnusedCapacity(gpa, 3); + const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const cond_br_inst = block_inst + 1; + const br_inst = cond_br_inst + 1; + sema.air_instructions.appendAssumeCapacity(gpa, .{ + .tag = .block, + .data = .{ .ty_pl = .{ + .ty = .void_type, + .payload = sema.addExtraAssumeCapacity(Air.Block{ + .body_len = 1, + }), + } }, + }); + sema.air_extra.appendAssumeCapacity(cond_br_inst); + + sema.air_instructions.appendAssumeCapacity(gpa, .{ + .tag = .cond_br, + .data = .{ .pl_op = .{ + .operand = ok, + .payload = sema.addExtraAssumeCapacity(Air.CondBr{ + .then_body_len = 1, + .else_body_len = @intCast(u32, fail_block.instructions.items.len), + }), + } }, + }); + sema.air_extra.appendAssumeCapacity(br_inst); + sema.air_extra.appendSliceAssumeCapacity(fail_block.instructions.items); + + sema.air_instructions.appendAssumeCapacity(gpa, .{ + .tag = .br, + .data = .{ .br = .{ + .block_inst = block_inst, + .operand = .void_value, + } }, + }); + + parent_block.instructions.appendAssumeCapacity(block_inst); } fn panicWithMsg( @@ -6451,18 +6407,18 @@ fn panicWithMsg( mod.comp.bin_file.options.object_format == .c; if (!this_feature_is_implemented_in_the_backend) { // TODO implement this feature in all the backends and then delete this branch - _ = try block.addNoOp(src, Type.initTag(.void), .breakpoint); - _ = try block.addNoOp(src, Type.initTag(.noreturn), .unreach); + _ = try block.addNoOp(.breakpoint); + _ = try block.addNoOp(.unreach); return always_noreturn; } const panic_fn = try sema.getBuiltin(block, src, "panic"); const unresolved_stack_trace_ty = try sema.getBuiltinType(block, src, "StackTrace"); const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty); const ptr_stack_trace_ty = try Module.simplePtrType(arena, stack_trace_ty, true, .One); - const null_stack_trace = try mod.constInst(arena, src, .{ - .ty = try mod.optionalType(arena, ptr_stack_trace_ty), - .val = Value.initTag(.null_value), - }); + const null_stack_trace = try sema.addConstant( + try mod.optionalType(arena, ptr_stack_trace_ty), + Value.initTag(.null_value), + ); const args = try arena.create([2]Air.Inst.Index); args.* = .{ msg_inst, null_stack_trace }; _ = try sema.analyzeCall(block, panic_fn, src, src, .auto, false, args); @@ -6503,7 +6459,6 @@ fn safetyPanic( }; const casted_msg_inst = try sema.coerce(block, Type.initTag(.const_slice_u8), msg_inst, src); - return sema.panicWithMsg(block, src, casted_msg_inst); } @@ -6533,13 +6488,13 @@ fn namedFieldPtr( switch (elem_ty.zigTypeTag()) { .Array => { if (mem.eql(u8, field_name, "len")) { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.single_const_pointer_to_comptime_int), - .val = try Value.Tag.ref_val.create( + return sema.addConstant( + Type.initTag(.single_const_pointer_to_comptime_int), + try Value.Tag.ref_val.create( arena, try Value.Tag.int_u64.create(arena, elem_ty.arrayLen()), ), - }); + ); } else { return mod.fail( &block.base, @@ -6554,13 +6509,13 @@ fn namedFieldPtr( switch (ptr_child.zigTypeTag()) { .Array => { if (mem.eql(u8, field_name, "len")) { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.single_const_pointer_to_comptime_int), - .val = try Value.Tag.ref_val.create( + return sema.addConstant( + Type.initTag(.single_const_pointer_to_comptime_int), + try Value.Tag.ref_val.create( arena, try Value.Tag.int_u64.create(arena, ptr_child.arrayLen()), ), - }); + ); } else { return mod.fail( &block.base, @@ -6597,15 +6552,15 @@ fn namedFieldPtr( }); } else (try mod.getErrorValue(field_name)).key; - return mod.constInst(arena, src, .{ - .ty = try Module.simplePtrType(arena, child_type, false, .One), - .val = try Value.Tag.ref_val.create( + return sema.addConstant( + try Module.simplePtrType(arena, child_type, false, .One), + try Value.Tag.ref_val.create( arena, try Value.Tag.@"error".create(arena, .{ .name = name, }), ), - }); + ); }, .Struct, .Opaque, .Union => { if (child_type.getNamespace()) |namespace| { @@ -6651,10 +6606,10 @@ fn namedFieldPtr( }; const field_index_u32 = @intCast(u32, field_index); const enum_val = try Value.Tag.enum_field_index.create(arena, field_index_u32); - return mod.constInst(arena, src, .{ - .ty = try Module.simplePtrType(arena, child_type, false, .One), - .val = try Value.Tag.ref_val.create(arena, enum_val), - }); + return sema.addConstant( + try Module.simplePtrType(arena, child_type, false, .One), + try Value.Tag.ref_val.create(arena, enum_val), + ); }, else => return mod.fail(&block.base, src, "type '{}' has no members", .{child_type}), } @@ -6701,7 +6656,6 @@ fn analyzeStructFieldPtr( field_name_src: LazySrcLoc, unresolved_struct_ty: Type, ) CompileError!Air.Inst.Ref { - const mod = sema.mod; const arena = sema.arena; assert(unresolved_struct_ty.zigTypeTag() == .Struct); @@ -6714,13 +6668,13 @@ fn analyzeStructFieldPtr( const ptr_field_ty = try Module.simplePtrType(arena, field.ty, true, .One); if (try sema.resolveDefinedValue(block, src, struct_ptr)) |struct_ptr_val| { - return mod.constInst(arena, src, .{ - .ty = ptr_field_ty, - .val = try Value.Tag.field_ptr.create(arena, .{ + return sema.addConstant( + ptr_field_ty, + try Value.Tag.field_ptr.create(arena, .{ .container_ptr = struct_ptr_val, .field_index = field_index, }), - }); + ); } try sema.requireRuntimeBlock(block, src); @@ -6751,13 +6705,13 @@ fn analyzeUnionFieldPtr( if (try sema.resolveDefinedValue(block, src, union_ptr)) |union_ptr_val| { // TODO detect inactive union field and emit compile error - return mod.constInst(arena, src, .{ - .ty = ptr_field_ty, - .val = try Value.Tag.field_ptr.create(arena, .{ + return sema.addConstant( + ptr_field_ty, + try Value.Tag.field_ptr.create(arena, .{ .container_ptr = union_ptr_val, .field_index = field_index, }), - }); + ); } try sema.requireRuntimeBlock(block, src); @@ -6808,10 +6762,10 @@ fn elemPtrArray( const elem_ptr = try array_ptr_val.elemPtr(sema.arena, @intCast(usize, index_u64)); const pointee_type = array_ptr.ty.elemType().elemType(); - return sema.mod.constInst(sema.arena, src, .{ - .ty = try Type.Tag.single_const_pointer.create(sema.arena, pointee_type), - .val = elem_ptr, - }); + return sema.addConstant( + try Type.Tag.single_const_pointer.create(sema.arena, pointee_type), + elem_ptr, + ); } } _ = elem_index; @@ -6870,7 +6824,7 @@ fn coerce( .Optional => { // null to ?T if (inst_ty.zigTypeTag() == .Null) { - return mod.constInst(arena, inst_src, .{ .ty = dest_type, .val = Value.initTag(.null_value) }); + return sema.addConstant(dest_type, Value.initTag(.null_value)); } // T to ?T @@ -6981,10 +6935,10 @@ fn coerce( }; return mod.failWithOwnedErrorMsg(&block.base, msg); }; - return mod.constInst(arena, inst_src, .{ - .ty = resolved_dest_type, - .val = try Value.Tag.enum_field_index.create(arena, @intCast(u32, field_index)), - }); + return sema.addConstant( + resolved_dest_type, + try Value.Tag.enum_field_index.create(arena, @intCast(u32, field_index)), + ); } }, else => {}, @@ -7024,7 +6978,7 @@ fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.R if (!val.intFitsInType(dest_type, target)) { return sema.mod.fail(&block.base, inst.src, "type {} cannot represent integer value {}", .{ inst.ty, val }); } - return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); + return sema.addConstant(dest_type, val); } } else if (dst_zig_tag == .ComptimeFloat or dst_zig_tag == .Float) { if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) { @@ -7037,7 +6991,7 @@ fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.R ), error.OutOfMemory => return error.OutOfMemory, }; - return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = res }); + return sema.addConstant(dest_type, res); } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) { return sema.mod.fail(&block.base, inst.src, "TODO int to float", .{}); } @@ -7132,7 +7086,7 @@ fn bitcast( fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) CompileError!Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. - return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); + return sema.addConstant(dest_type, val); } return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{}); } @@ -7140,7 +7094,7 @@ fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Ref { if (inst.value()) |val| { // The comptime Value representation is compatible with both types. - return sema.mod.constInst(sema.arena, inst.src, .{ .ty = dest_type, .val = val }); + return sema.addConstant(dest_type, val); } return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } @@ -7200,13 +7154,11 @@ fn analyzeRef( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - const ptr_type = try sema.mod.simplePtrType(sema.arena, operand.ty, false, .One); + const operand_ty = sema.getTypeOf(operand); + const ptr_type = try sema.mod.simplePtrType(sema.arena, operand_ty, false, .One); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |val| { - return sema.mod.constInst(sema.arena, src, .{ - .ty = ptr_type, - .val = try Value.Tag.ref_val.create(sema.arena, val), - }); + return sema.addConstant(ptr_type, try Value.Tag.ref_val.create(sema.arena, val)); } try sema.requireRuntimeBlock(block, src); @@ -7267,7 +7219,8 @@ fn analyzeIsNonErr( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - const ot = operand.ty.zigTypeTag(); + const operand_ty = sema.getTypeOf(operand); + const ot = operand_ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; assert(ot == .ErrorUnion); @@ -7549,7 +7502,7 @@ fn cmpNumeric( fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Index { if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { - return sema.mod.constInst(sema.arena, inst_src, .{ .ty = dest_type, .val = val }); + return sema.addConstant(dest_type, val); } try sema.requireRuntimeBlock(block, inst.src); @@ -7614,11 +7567,8 @@ fn wrapErrorUnion( else => unreachable, } - return sema.mod.constInst(sema.arena, inst.src, .{ - .ty = dest_type, - // creating a SubValue for the error_union payload - .val = try Value.Tag.error_union.create(sema.arena, val), - }); + // Create a SubValue for the error_union payload. + return sema.addConstant(dest_type, try Value.Tag.error_union.create(sema.arena, val)); } try sema.requireRuntimeBlock(block, inst.src); diff --git a/src/Zir.zig b/src/Zir.zig index e14b636ab6..42924817fc 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -386,7 +386,7 @@ pub const Inst = struct { int, /// Arbitrary sized integer literal. Uses the `str` union field. int_big, - /// A float literal that fits in a f32. Uses the float union value. + /// A float literal that fits in a f64. Uses the float union value. float, /// A float literal that fits in a f128. Uses the `pl_node` union value. /// Payload is `Float128`. @@ -2058,16 +2058,7 @@ pub const Inst = struct { /// Offset from Decl AST node index. node: i32, int: u64, - float: struct { - /// Offset from Decl AST node index. - /// `Tag` determines which kind of AST node this points to. - src_node: i32, - number: f32, - - pub fn src(self: @This()) LazySrcLoc { - return .{ .node_offset = self.src_node }; - } - }, + float: f64, array_type_sentinel: struct { len: Ref, /// index into extra, points to an `ArrayTypeSentinel` @@ -3256,10 +3247,8 @@ const Writer = struct { } fn writeFloat(self: *Writer, stream: anytype, inst: Inst.Index) !void { - const inst_data = self.code.instructions.items(.data)[inst].float; - const src = inst_data.src(); - try stream.print("{d}) ", .{inst_data.number}); - try self.writeSrc(stream, src); + const number = self.code.instructions.items(.data)[inst].float; + try stream.print("{d})", .{number}); } fn writeFloat128(self: *Writer, stream: anytype, inst: Inst.Index) !void { From c020a302960c499ffe811dd0601a2d386c191b91 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Jul 2021 21:57:40 -0700 Subject: [PATCH 17/53] Sema: remove br_block_flat AIR instruction Thanks to the new AIR memory layout, we can do this by turning a br operand into a block, rather than having this special purpose instruction. --- BRANCH_TODO | 42 ------------ src/Air.zig | 4 -- src/Liveness.zig | 2 - src/Module.zig | 2 +- src/Sema.zig | 170 +++++++++++++++++++++++++++++------------------ 5 files changed, 105 insertions(+), 115 deletions(-) diff --git a/BRANCH_TODO b/BRANCH_TODO index aaba8b70b3..9055cda307 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -16,48 +16,6 @@ return inst.val; } - pub fn breakBlock(base: *Inst) ?*Block { - return switch (base.tag) { - .br => base.castTag(.br).?.block, - .br_void => base.castTag(.br_void).?.block, - .br_block_flat => base.castTag(.br_block_flat).?.block, - else => null, - }; - } - - pub const convertable_br_size = std.math.max(@sizeOf(BrBlockFlat), @sizeOf(Br)); - pub const convertable_br_align = std.math.max(@alignOf(BrBlockFlat), @alignOf(Br)); - comptime { - assert(@offsetOf(BrBlockFlat, "base") == @offsetOf(Br, "base")); - } - - pub const BrBlockFlat = struct { - pub const base_tag = Tag.br_block_flat; - - base: Inst, - block: *Block, - body: Body, - - pub fn operandCount(self: *const BrBlockFlat) usize { - _ = self; - return 0; - } - pub fn getOperand(self: *const BrBlockFlat, index: usize) ?*Inst { - _ = self; - _ = index; - return null; - } - }; - - /// Same as `br` except the operand is a list of instructions to be treated as - /// a flat block; that is there is only 1 break instruction from the block, and - /// it is implied to be after the last instruction, and the last instruction is - /// the break operand. - /// This instruction exists for late-stage semantic analysis patch ups, to - /// replace one br operand with multiple instructions, without moving anything else around. - br_block_flat, - - /// For debugging purposes, prints a function representation to stderr. diff --git a/src/Air.zig b/src/Air.zig index e2eeae1130..60e6e9933d 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -308,10 +308,6 @@ pub const Inst = struct { operand: Ref, payload: u32, }, - constant: struct { - ty: Type, - val: Value, - }, dbg_stmt: struct { line: u32, column: u32, diff --git a/src/Liveness.zig b/src/Liveness.zig index 838f19d4a1..98af9eb429 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -299,8 +299,6 @@ fn analyzeInst( const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = a.air.extra[extra.end..][0..extra.data.body_len]; try analyzeWithContext(a, new_set, body); - // We let this continue so that it can possibly mark the block as - // unreferenced below. return trackOperands(a, new_set, inst, main_tomb, .{ .none, .none, .none }); }, .loop => { diff --git a/src/Module.zig b/src/Module.zig index 4bd48dad05..94d8b63744 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1185,7 +1185,7 @@ pub const Scope = struct { block_inst: Air.Inst.Index, /// Separate array list from break_inst_list so that it can be passed directly /// to resolvePeerTypes. - results: ArrayListUnmanaged(Air.Inst.Index), + results: ArrayListUnmanaged(Air.Inst.Ref), /// Keeps track of the break instructions so that the operand can be replaced /// if we need to add type coercion at the end of block analysis. /// Same indexes, capacity, length as `results`. diff --git a/src/Sema.zig b/src/Sema.zig index 48ad8d97fc..b4e8cd5af5 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -163,36 +163,36 @@ pub fn analyzeBody( const air_inst: Air.Inst.Ref = switch (tags[inst]) { // zig fmt: off .arg => try sema.zirArg(block, inst), - //.alloc => try sema.zirAlloc(block, inst), - //.alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), - //.alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), - //.alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), - //.alloc_mut => try sema.zirAllocMut(block, inst), - //.alloc_comptime => try sema.zirAllocComptime(block, inst), - //.anyframe_type => try sema.zirAnyframeType(block, inst), - //.array_cat => try sema.zirArrayCat(block, inst), - //.array_mul => try sema.zirArrayMul(block, inst), - //.array_type => try sema.zirArrayType(block, inst), - //.array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), - //.vector_type => try sema.zirVectorType(block, inst), - //.as => try sema.zirAs(block, inst), - //.as_node => try sema.zirAsNode(block, inst), - //.bit_and => try sema.zirBitwise(block, inst, .bit_and), - //.bit_not => try sema.zirBitNot(block, inst), - //.bit_or => try sema.zirBitwise(block, inst, .bit_or), - //.bitcast => try sema.zirBitcast(block, inst), - //.bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), - //.block => try sema.zirBlock(block, inst), - //.suspend_block => try sema.zirSuspendBlock(block, inst), - //.bool_not => try sema.zirBoolNot(block, inst), - //.bool_br_and => try sema.zirBoolBr(block, inst, false), - //.bool_br_or => try sema.zirBoolBr(block, inst, true), - //.c_import => try sema.zirCImport(block, inst), - //.call => try sema.zirCall(block, inst, .auto, false), - //.call_chkused => try sema.zirCall(block, inst, .auto, true), - //.call_compile_time => try sema.zirCall(block, inst, .compile_time, false), - //.call_nosuspend => try sema.zirCall(block, inst, .no_async, false), - //.call_async => try sema.zirCall(block, inst, .async_kw, false), + .alloc => try sema.zirAlloc(block, inst), + .alloc_inferred => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_const)), + .alloc_inferred_mut => try sema.zirAllocInferred(block, inst, Type.initTag(.inferred_alloc_mut)), + .alloc_inferred_comptime => try sema.zirAllocInferredComptime(block, inst), + .alloc_mut => try sema.zirAllocMut(block, inst), + .alloc_comptime => try sema.zirAllocComptime(block, inst), + .anyframe_type => try sema.zirAnyframeType(block, inst), + .array_cat => try sema.zirArrayCat(block, inst), + .array_mul => try sema.zirArrayMul(block, inst), + .array_type => try sema.zirArrayType(block, inst), + .array_type_sentinel => try sema.zirArrayTypeSentinel(block, inst), + .vector_type => try sema.zirVectorType(block, inst), + .as => try sema.zirAs(block, inst), + .as_node => try sema.zirAsNode(block, inst), + .bit_and => try sema.zirBitwise(block, inst, .bit_and), + .bit_not => try sema.zirBitNot(block, inst), + .bit_or => try sema.zirBitwise(block, inst, .bit_or), + .bitcast => try sema.zirBitcast(block, inst), + .bitcast_result_ptr => try sema.zirBitcastResultPtr(block, inst), + .block => try sema.zirBlock(block, inst), + .suspend_block => try sema.zirSuspendBlock(block, inst), + .bool_not => try sema.zirBoolNot(block, inst), + .bool_br_and => try sema.zirBoolBr(block, inst, false), + .bool_br_or => try sema.zirBoolBr(block, inst, true), + .c_import => try sema.zirCImport(block, inst), + .call => try sema.zirCall(block, inst, .auto, false), + .call_chkused => try sema.zirCall(block, inst, .auto, true), + .call_compile_time => try sema.zirCall(block, inst, .compile_time, false), + .call_nosuspend => try sema.zirCall(block, inst, .no_async, false), + .call_async => try sema.zirCall(block, inst, .async_kw, false), .cmp_eq => try sema.zirCmp(block, inst, .eq), .cmp_gt => try sema.zirCmp(block, inst, .gt), .cmp_gte => try sema.zirCmp(block, inst, .gte), @@ -1957,24 +1957,23 @@ fn analyzeBlockBody( // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); - assert(child_block.instructions.items[child_block.instructions.items.len - 1].ty.isNoReturn()); + assert(sema.getTypeOf(indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions // directly into the parent block. - const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items); - try parent_block.instructions.appendSlice(gpa, copied_instructions); - return copied_instructions[copied_instructions.len - 1]; + try parent_block.instructions.appendSlice(gpa, child_block.instructions.items); + return indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1]); } if (merges.results.items.len == 1) { const last_inst_index = child_block.instructions.items.len - 1; const last_inst = child_block.instructions.items[last_inst_index]; - if (last_inst.breakBlock()) |br_block| { + if (sema.getBreakBlock(last_inst)) |br_block| { if (br_block == merges.block_inst) { // No need for a block instruction. We can put the new instructions directly // into the parent block. Here we omit the break instruction. - const copied_instructions = try sema.arena.dupe(Air.Inst.Index, child_block.instructions.items[0..last_inst_index]); - try parent_block.instructions.appendSlice(gpa, copied_instructions); + const without_break = child_block.instructions.items[0..last_inst_index]; + try parent_block.instructions.appendSlice(gpa, without_break); return merges.results.items[0]; } } @@ -1998,36 +1997,50 @@ fn analyzeBlockBody( // Now that the block has its type resolved, we need to go back into all the break // instructions, and insert type coercion on the operands. for (merges.br_list.items) |br| { - if (sema.getTypeOf(br.operand).eql(resolved_ty)) { + const br_operand = sema.air_instructions.items(.data)[br].br.operand; + const br_operand_src = src; + const br_operand_ty = sema.getTypeOf(br_operand); + if (br_operand_ty.eql(resolved_ty)) { // No type coercion needed. continue; } var coerce_block = parent_block.makeSubBlock(); defer coerce_block.instructions.deinit(gpa); - const coerced_operand = try sema.coerce(&coerce_block, resolved_ty, br.operand, br.operand.src); + const coerced_operand = try sema.coerce(&coerce_block, resolved_ty, br_operand, br_operand_src); // If no instructions were produced, such as in the case of a coercion of a // constant value to a new type, we can simply point the br operand to it. if (coerce_block.instructions.items.len == 0) { - br.operand = coerced_operand; + sema.air_instructions.items(.data)[br].br.operand = coerced_operand; continue; } - assert(coerce_block.instructions.items[coerce_block.instructions.items.len - 1] == coerced_operand); - // Here we depend on the br instruction having been over-allocated (if necessary) - // inside zirBreak so that it can be converted into a br_block_flat instruction. - const br_src = br.base.src; - const br_ty = br.base.ty; - const br_block_flat = @ptrCast(*Inst.BrBlockFlat, br); - br_block_flat.* = .{ - .base = .{ - .src = br_src, - .ty = br_ty, - .tag = .br_block_flat, - }, - .block = merges.block_inst, - .body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, coerce_block.instructions.items), - }, - }; + assert(coerce_block.instructions.items[coerce_block.instructions.items.len - 1] == + refToIndex(coerced_operand).?); + + // Convert the br operand to a block. + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + + coerce_block.instructions.items.len); + try sema.air_instructions.ensureUnusedCapacity(gpa, 2); + const sub_block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); + const sub_br_inst = sub_block_inst + 1; + sema.air_instructions.items(.data)[br].br.operand = indexToRef(sub_block_inst); + sema.air_instructions.appendAssumeCapacity(.{ + .tag = .block, + .data = .{ .ty_pl = .{ + .ty = try sema.addType(br_operand_ty), + .payload = sema.addExtraAssumeCapacity(Air.Block{ + .body_len = @intCast(u32, coerce_block.instructions.items.len), + }), + } }, + }); + sema.air_extra.appendSliceAssumeCapacity(coerce_block.instructions.items); + sema.air_extra.appendAssumeCapacity(sub_br_inst); + sema.air_instructions.appendAssumeCapacity(.{ + .tag = .br, + .data = .{ .br = .{ + .block_inst = sub_block_inst, + .operand = coerced_operand, + } }, + }); } return indexToRef(merges.block_inst); } @@ -2257,10 +2270,11 @@ fn analyzeCall( ensure_result_used: bool, args: []const Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - if (func.ty.zigTypeTag() != .Fn) - return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func.ty}); + const func_ty = sema.getTypeOf(func); + if (func_ty.zigTypeTag() != .Fn) + return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func_ty}); - const cc = func.ty.fnCallingConvention(); + const cc = func_ty.fnCallingConvention(); if (cc == .Naked) { // TODO add error note: declared here return sema.mod.fail( @@ -2270,8 +2284,8 @@ fn analyzeCall( .{}, ); } - const fn_params_len = func.ty.fnParamLen(); - if (func.ty.fnIsVarArgs()) { + const fn_params_len = func_ty.fnParamLen(); + if (func_ty.fnIsVarArgs()) { assert(cc == .C); if (args.len < fn_params_len) { // TODO add error note: declared here @@ -2310,11 +2324,9 @@ fn analyzeCall( const gpa = sema.gpa; - const ret_type = func.ty.fnReturnType(); - const is_comptime_call = block.is_comptime or modifier == .compile_time; const is_inline_call = is_comptime_call or modifier == .always_inline or - func.ty.fnCallingConvention() == .Inline; + func_ty.fnCallingConvention() == .Inline; const result: Air.Inst.Ref = if (is_inline_call) res: { const func_val = try sema.resolveConstValue(block, func_src, func); const module_fn = switch (func_val.tag()) { @@ -2400,7 +2412,19 @@ fn analyzeCall( break :res result; } else res: { try sema.requireRuntimeBlock(block, call_src); - break :res try block.addCall(call_src, ret_type, func, args); + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Call).Struct.fields.len + + args.len); + const func_inst = try block.addInst(.{ + .tag = .call, + .data = .{ .pl_op = .{ + .operand = func, + .payload = sema.addExtraAssumeCapacity(Air.Call{ + .args_len = @intCast(u32, args.len), + }), + } }, + }); + sema.appendRefsAssumeCapacity(args); + break :res func_inst; }; if (ensure_result_used) { @@ -8140,3 +8164,17 @@ pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { } return result; } + +fn appendRefsAssumeCapacity(sema: *Sema, refs: []const Air.Inst.Ref) void { + const coerced = @bitCast([]const u32, refs); + sema.air_extra.appendSliceAssumeCapacity(coerced); +} + +fn getBreakBlock(sema: *Sema, inst_index: Air.Inst.Index) ?Air.Inst.Index { + const air_datas = sema.air_instructions.items(.data); + const air_tags = sema.air_instructions.items(.tag); + switch (air_tags[inst_index]) { + .br => return air_datas[inst_index].br.block_inst, + else => return null, + } +} From 1294ebe1f5eaca1f11d68284d1b96419d53253be Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Jul 2021 22:44:57 -0700 Subject: [PATCH 18/53] Sema: AIR memory layout reworking for noreturn instructions --- src/Module.zig | 2 +- src/Sema.zig | 88 +++++++++++++++++++++++--------------------------- 2 files changed, 41 insertions(+), 49 deletions(-) diff --git a/src/Module.zig b/src/Module.zig index 94d8b63744..fb514ccbd2 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1255,7 +1255,7 @@ pub const Scope = struct { pub fn addNoOp(block: *Block, tag: Air.Inst.Tag) error{OutOfMemory}!Air.Inst.Ref { return block.addInst(.{ .tag = tag, - .data = .no_op, + .data = .{ .no_op = {} }, }); } diff --git a/src/Sema.zig b/src/Sema.zig index b4e8cd5af5..d33d5bd49b 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -381,20 +381,20 @@ pub fn analyzeBody( .sub => try sema.zirArithmetic(block, inst), .subwrap => try sema.zirArithmetic(block, inst), - //// Instructions that we know to *always* be noreturn based solely on their tag. - //// These functions match the return type of analyzeBody so that we can - //// tail call them here. - //.break_inline => return inst, - //.condbr => return sema.zirCondbr(block, inst), - //.@"break" => return sema.zirBreak(block, inst), - //.compile_error => return sema.zirCompileError(block, inst), - //.ret_coerce => return sema.zirRetCoerce(block, inst, true), - //.ret_node => return sema.zirRetNode(block, inst), - //.ret_err_value => return sema.zirRetErrValue(block, inst), - //.@"unreachable" => return sema.zirUnreachable(block, inst), - //.repeat => return sema.zirRepeat(block, inst), - //.panic => return sema.zirPanic(block, inst), - //// zig fmt: on + // Instructions that we know to *always* be noreturn based solely on their tag. + // These functions match the return type of analyzeBody so that we can + // tail call them here. + .break_inline => return inst, + .condbr => return sema.zirCondbr(block, inst), + .@"break" => return sema.zirBreak(block, inst), + .compile_error => return sema.zirCompileError(block, inst), + .ret_coerce => return sema.zirRetCoerce(block, inst, true), + .ret_node => return sema.zirRetNode(block, inst), + .ret_err_value => return sema.zirRetErrValue(block, inst), + .@"unreachable" => return sema.zirUnreachable(block, inst), + .repeat => return sema.zirRepeat(block, inst), + .panic => return sema.zirPanic(block, inst), + // zig fmt: on //// Instructions that we know can *never* be noreturn based solely on //// their tag. We avoid needlessly checking if they are noreturn and @@ -534,7 +534,7 @@ pub fn analyzeBody( return break_inst; } }, - else => @panic("TODO finish updating Sema for AIR memory layout changes and then remove this else prong"), + else => |t| @panic(@tagName(t)), }; if (sema.getTypeOf(air_inst).isNoReturn()) return always_noreturn; @@ -2128,7 +2128,6 @@ fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) Compil defer tracy.end(); const inst_data = sema.code.instructions.items(.data)[inst].@"break"; - const src = sema.src; const operand = sema.resolveInst(inst_data.operand); const zir_block = inst_data.block_inst; @@ -2136,26 +2135,9 @@ fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) Compil while (true) { if (block.label) |label| { if (label.zir_block == zir_block) { - // Here we add a br instruction, but we over-allocate a little bit - // (if necessary) to make it possible to convert the instruction into - // a br_block_flat instruction later. - const br = @ptrCast(*Inst.Br, try sema.arena.alignedAlloc( - u8, - Inst.convertable_br_align, - Inst.convertable_br_size, - )); - br.* = .{ - .base = .{ - .tag = .br, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .operand = operand, - .block = label.merges.block_inst, - }; - try start_block.instructions.append(sema.gpa, &br.base); + const br_ref = try start_block.addBr(label.merges.block_inst, operand); try label.merges.results.append(sema.gpa, operand); - try label.merges.br_list.append(sema.gpa, br); + try label.merges.br_list.append(sema.gpa, refToIndex(br_ref).?); return inst; } } @@ -5391,25 +5373,35 @@ fn zirCondbr( return always_noreturn; } + const gpa = sema.gpa; + + // We'll re-use the sub block to save on memory bandwidth, and yank out the + // instructions array in between using it for the then block and else block. var sub_block = parent_block.makeSubBlock(); sub_block.runtime_loop = null; - sub_block.runtime_cond = cond.src; + sub_block.runtime_cond = cond_src; sub_block.runtime_index += 1; - defer sub_block.instructions.deinit(sema.gpa); + defer sub_block.instructions.deinit(gpa); _ = try sema.analyzeBody(&sub_block, then_body); - const air_then_body: ir.Body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, sub_block.instructions.items), - }; - - sub_block.instructions.shrinkRetainingCapacity(0); + const true_instructions = sub_block.instructions.toOwnedSlice(gpa); + defer gpa.free(true_instructions); _ = try sema.analyzeBody(&sub_block, else_body); - const air_else_body: ir.Body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, sub_block.instructions.items), - }; - - _ = try parent_block.addCondBr(src, cond, air_then_body, air_else_body); + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.CondBr).Struct.fields.len + + true_instructions.len + sub_block.instructions.items.len); + _ = try parent_block.addInst(.{ + .tag = .cond_br, + .data = .{ .pl_op = .{ + .operand = cond, + .payload = sema.addExtraAssumeCapacity(Air.CondBr{ + .then_body_len = @intCast(u32, true_instructions.len), + .else_body_len = @intCast(u32, sub_block.instructions.items.len), + }), + } }, + }); + sema.air_extra.appendSliceAssumeCapacity(true_instructions); + sema.air_extra.appendSliceAssumeCapacity(sub_block.instructions.items); return always_noreturn; } @@ -6443,7 +6435,7 @@ fn panicWithMsg( try mod.optionalType(arena, ptr_stack_trace_ty), Value.initTag(.null_value), ); - const args = try arena.create([2]Air.Inst.Index); + const args = try arena.create([2]Air.Inst.Ref); args.* = .{ msg_inst, null_stack_trace }; _ = try sema.analyzeCall(block, panic_fn, src, src, .auto, false, args); return always_noreturn; From 0da66339096c21d9ca524ff7a0c11a5707b60041 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 14 Jul 2021 23:59:29 -0700 Subject: [PATCH 19/53] Sema: fix implementation of getTypeOf and rename it to typeOf --- src/Sema.zig | 469 ++++++++++++++++++++++++++++----------------------- 1 file changed, 259 insertions(+), 210 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index d33d5bd49b..268f7bc903 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -199,65 +199,65 @@ pub fn analyzeBody( .cmp_lt => try sema.zirCmp(block, inst, .lt), .cmp_lte => try sema.zirCmp(block, inst, .lte), .cmp_neq => try sema.zirCmp(block, inst, .neq), - //.coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), - //.decl_ref => try sema.zirDeclRef(block, inst), - //.decl_val => try sema.zirDeclVal(block, inst), - //.load => try sema.zirLoad(block, inst), - //.elem_ptr => try sema.zirElemPtr(block, inst), - //.elem_ptr_node => try sema.zirElemPtrNode(block, inst), - //.elem_val => try sema.zirElemVal(block, inst), - //.elem_val_node => try sema.zirElemValNode(block, inst), - //.elem_type => try sema.zirElemType(block, inst), - //.enum_literal => try sema.zirEnumLiteral(block, inst), - //.enum_to_int => try sema.zirEnumToInt(block, inst), - //.int_to_enum => try sema.zirIntToEnum(block, inst), - //.err_union_code => try sema.zirErrUnionCode(block, inst), - //.err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), - //.err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), - //.err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), - //.err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), - //.err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), - //.error_union_type => try sema.zirErrorUnionType(block, inst), - //.error_value => try sema.zirErrorValue(block, inst), - //.error_to_int => try sema.zirErrorToInt(block, inst), - //.int_to_error => try sema.zirIntToError(block, inst), - //.field_ptr => try sema.zirFieldPtr(block, inst), - //.field_ptr_named => try sema.zirFieldPtrNamed(block, inst), - //.field_val => try sema.zirFieldVal(block, inst), - //.field_val_named => try sema.zirFieldValNamed(block, inst), - //.func => try sema.zirFunc(block, inst, false), - //.func_inferred => try sema.zirFunc(block, inst, true), - //.import => try sema.zirImport(block, inst), - //.indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), - //.int => try sema.zirInt(block, inst), - //.int_big => try sema.zirIntBig(block, inst), - //.float => try sema.zirFloat(block, inst), - //.float128 => try sema.zirFloat128(block, inst), - //.int_type => try sema.zirIntType(block, inst), - //.is_non_err => try sema.zirIsNonErr(block, inst), - //.is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), - //.is_non_null => try sema.zirIsNonNull(block, inst), - //.is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), - //.loop => try sema.zirLoop(block, inst), - //.merge_error_sets => try sema.zirMergeErrorSets(block, inst), - //.negate => try sema.zirNegate(block, inst, .sub), - //.negate_wrap => try sema.zirNegate(block, inst, .subwrap), - //.optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), - //.optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), - //.optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), - //.optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), - //.optional_type => try sema.zirOptionalType(block, inst), - //.param_type => try sema.zirParamType(block, inst), - //.ptr_type => try sema.zirPtrType(block, inst), - //.ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), - //.ref => try sema.zirRef(block, inst), - //.ret_err_value_code => try sema.zirRetErrValueCode(block, inst), - //.shl => try sema.zirShl(block, inst), - //.shr => try sema.zirShr(block, inst), - //.slice_end => try sema.zirSliceEnd(block, inst), - //.slice_sentinel => try sema.zirSliceSentinel(block, inst), - //.slice_start => try sema.zirSliceStart(block, inst), - //.str => try sema.zirStr(block, inst), + .coerce_result_ptr => try sema.zirCoerceResultPtr(block, inst), + .decl_ref => try sema.zirDeclRef(block, inst), + .decl_val => try sema.zirDeclVal(block, inst), + .load => try sema.zirLoad(block, inst), + .elem_ptr => try sema.zirElemPtr(block, inst), + .elem_ptr_node => try sema.zirElemPtrNode(block, inst), + .elem_val => try sema.zirElemVal(block, inst), + .elem_val_node => try sema.zirElemValNode(block, inst), + .elem_type => try sema.zirElemType(block, inst), + .enum_literal => try sema.zirEnumLiteral(block, inst), + .enum_to_int => try sema.zirEnumToInt(block, inst), + .int_to_enum => try sema.zirIntToEnum(block, inst), + .err_union_code => try sema.zirErrUnionCode(block, inst), + .err_union_code_ptr => try sema.zirErrUnionCodePtr(block, inst), + .err_union_payload_safe => try sema.zirErrUnionPayload(block, inst, true), + .err_union_payload_safe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, true), + .err_union_payload_unsafe => try sema.zirErrUnionPayload(block, inst, false), + .err_union_payload_unsafe_ptr => try sema.zirErrUnionPayloadPtr(block, inst, false), + .error_union_type => try sema.zirErrorUnionType(block, inst), + .error_value => try sema.zirErrorValue(block, inst), + .error_to_int => try sema.zirErrorToInt(block, inst), + .int_to_error => try sema.zirIntToError(block, inst), + .field_ptr => try sema.zirFieldPtr(block, inst), + .field_ptr_named => try sema.zirFieldPtrNamed(block, inst), + .field_val => try sema.zirFieldVal(block, inst), + .field_val_named => try sema.zirFieldValNamed(block, inst), + .func => try sema.zirFunc(block, inst, false), + .func_inferred => try sema.zirFunc(block, inst, true), + .import => try sema.zirImport(block, inst), + .indexable_ptr_len => try sema.zirIndexablePtrLen(block, inst), + .int => try sema.zirInt(block, inst), + .int_big => try sema.zirIntBig(block, inst), + .float => try sema.zirFloat(block, inst), + .float128 => try sema.zirFloat128(block, inst), + .int_type => try sema.zirIntType(block, inst), + .is_non_err => try sema.zirIsNonErr(block, inst), + .is_non_err_ptr => try sema.zirIsNonErrPtr(block, inst), + .is_non_null => try sema.zirIsNonNull(block, inst), + .is_non_null_ptr => try sema.zirIsNonNullPtr(block, inst), + .loop => try sema.zirLoop(block, inst), + .merge_error_sets => try sema.zirMergeErrorSets(block, inst), + .negate => try sema.zirNegate(block, inst, .sub), + .negate_wrap => try sema.zirNegate(block, inst, .subwrap), + .optional_payload_safe => try sema.zirOptionalPayload(block, inst, true), + .optional_payload_safe_ptr => try sema.zirOptionalPayloadPtr(block, inst, true), + .optional_payload_unsafe => try sema.zirOptionalPayload(block, inst, false), + .optional_payload_unsafe_ptr => try sema.zirOptionalPayloadPtr(block, inst, false), + .optional_type => try sema.zirOptionalType(block, inst), + .param_type => try sema.zirParamType(block, inst), + .ptr_type => try sema.zirPtrType(block, inst), + .ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), + .ref => try sema.zirRef(block, inst), + .ret_err_value_code => try sema.zirRetErrValueCode(block, inst), + .shl => try sema.zirShl(block, inst), + .shr => try sema.zirShr(block, inst), + .slice_end => try sema.zirSliceEnd(block, inst), + .slice_sentinel => try sema.zirSliceSentinel(block, inst), + .slice_start => try sema.zirSliceStart(block, inst), + .str => try sema.zirStr(block, inst), //.switch_block => try sema.zirSwitchBlock(block, inst, false, .none), //.switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), //.switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), @@ -536,7 +536,7 @@ pub fn analyzeBody( }, else => |t| @panic(@tagName(t)), }; - if (sema.getTypeOf(air_inst).isNoReturn()) + if (sema.typeOf(air_inst).isNoReturn()) return always_noreturn; try map.put(sema.gpa, inst, air_inst); i += 1; @@ -620,10 +620,10 @@ fn resolveConstString( pub fn resolveType(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, zir_ref: Zir.Inst.Ref) !Type { const air_inst = sema.resolveInst(zir_ref); - return sema.resolveAirAsType(block, src, air_inst); + return sema.analyzeAsType(block, src, air_inst); } -fn resolveAirAsType( +fn analyzeAsType( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, @@ -664,26 +664,26 @@ fn resolvePossiblyUndefinedValue( sema: *Sema, block: *Scope.Block, src: LazySrcLoc, - air_ref: Air.Inst.Ref, + inst: Air.Inst.Ref, ) CompileError!?Value { - const ty = sema.getTypeOf(air_ref); - if (try sema.typeHasOnePossibleValue(block, src, ty)) |opv| { - return opv; - } // First section of indexes correspond to a set number of constant values. - var i: usize = @enumToInt(air_ref); + var i: usize = @enumToInt(inst); if (i < Air.Inst.Ref.typed_value_map.len) { return Air.Inst.Ref.typed_value_map[i].val; } i -= Air.Inst.Ref.typed_value_map.len; + if (try sema.typeHasOnePossibleValue(block, src, sema.typeOf(inst))) |opv| { + return opv; + } + switch (sema.air_instructions.items(.tag)[i]) { .constant => { const ty_pl = sema.air_instructions.items(.data)[i].ty_pl; return sema.air_values.items[ty_pl.payload]; }, .const_ty => { - return sema.air_instructions.items(.data)[i].ty.toValue(undefined) catch unreachable; + return try sema.air_instructions.items(.data)[i].ty.toValue(sema.arena); }, else => return null, } @@ -739,7 +739,7 @@ pub fn resolveInstConst( const air_ref = sema.resolveInst(zir_ref); const val = try sema.resolveConstValue(block, src, air_ref); return TypedValue{ - .ty = sema.getTypeOf(air_ref), + .ty = sema.typeOf(air_ref), .val = val, }; } @@ -1230,7 +1230,7 @@ fn ensureResultUsed( operand: Air.Inst.Ref, src: LazySrcLoc, ) CompileError!void { - const operand_ty = sema.getTypeOf(operand); + const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag()) { .Void, .NoReturn => return, else => return sema.mod.fail(&block.base, src, "expression value is ignored", .{}), @@ -1244,7 +1244,7 @@ fn zirEnsureResultNonError(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand = sema.resolveInst(inst_data.operand); const src = inst_data.src(); - const operand_ty = sema.getTypeOf(operand); + const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag()) { .ErrorSet, .ErrorUnion => return sema.mod.fail(&block.base, src, "error is discarded", .{}), else => return, @@ -1259,7 +1259,7 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Co const src = inst_data.src(); const array_ptr = sema.resolveInst(inst_data.operand); - const elem_ty = sema.getTypeOf(array_ptr).elemType(); + const elem_ty = sema.typeOf(array_ptr).elemType(); if (!elem_ty.isIndexable()) { const cond_src: LazySrcLoc = .{ .node_offset_for_cond = inst_data.src_node }; const msg = msg: { @@ -1282,7 +1282,8 @@ fn zirIndexablePtrLen(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Co return sema.mod.failWithOwnedErrorMsg(&block.base, msg); } const result_ptr = try sema.namedFieldPtr(block, src, array_ptr, "len", src); - return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); + const result_ptr_src = src; + return sema.analyzeLoad(block, src, result_ptr, result_ptr_src); } fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -1630,7 +1631,7 @@ fn zirParamType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE const inst_data = sema.code.instructions.items(.data)[inst].param_type; const fn_inst = sema.resolveInst(inst_data.callee); - const fn_inst_ty = sema.getTypeOf(fn_inst); + const fn_inst_ty = sema.typeOf(fn_inst); const param_index = inst_data.param_index; const fn_ty: Type = switch (fn_inst_ty.zigTypeTag()) { @@ -1859,7 +1860,7 @@ fn zirLoop(sema: *Sema, parent_block: *Scope.Block, inst: Zir.Inst.Index) Compil sema.air_instructions.items(.data)[loop_inst].ty_pl.payload = sema.addExtraAssumeCapacity( Air.Block{ .body_len = @intCast(u32, loop_block.instructions.items.len) }, ); - sema.air_extra.appendAssumeCapacity(loop_block.instructions.items); + sema.air_extra.appendSliceAssumeCapacity(loop_block.instructions.items); return sema.analyzeBlockBody(parent_block, src, &child_block, merges); } @@ -1957,7 +1958,7 @@ fn analyzeBlockBody( // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); - assert(sema.getTypeOf(indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); + assert(sema.typeOf(indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions @@ -1999,7 +2000,7 @@ fn analyzeBlockBody( for (merges.br_list.items) |br| { const br_operand = sema.air_instructions.items(.data)[br].br.operand; const br_operand_src = src; - const br_operand_ty = sema.getTypeOf(br_operand); + const br_operand_ty = sema.typeOf(br_operand); if (br_operand_ty.eql(resolved_ty)) { // No type coercion needed. continue; @@ -2252,7 +2253,7 @@ fn analyzeCall( ensure_result_used: bool, args: []const Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - const func_ty = sema.getTypeOf(func); + const func_ty = sema.typeOf(func); if (func_ty.zigTypeTag() != .Fn) return sema.mod.fail(&block.base, func_src, "type '{}' not a function", .{func_ty}); @@ -2606,8 +2607,8 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Com const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); - const lhs_ty = sema.getTypeOf(lhs); - const rhs_ty = sema.getTypeOf(rhs); + const lhs_ty = sema.typeOf(lhs); + const rhs_ty = sema.typeOf(rhs); if (rhs_ty.zigTypeTag() == .Bool and lhs_ty.zigTypeTag() == .Bool) { const msg = msg: { const msg = try sema.mod.errMsg(&block.base, lhs_src, "expected error set type, found 'bool'", .{}); @@ -2699,7 +2700,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE const src = inst_data.src(); const operand_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; const operand = sema.resolveInst(inst_data.operand); - const operand_ty = sema.getTypeOf(operand); + const operand_ty = sema.typeOf(operand); const enum_tag: Air.Inst.Ref = switch (operand_ty.zigTypeTag()) { .Enum => operand, @@ -2720,7 +2721,7 @@ fn zirEnumToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE }); }, }; - const enum_tag_ty = sema.getTypeOf(enum_tag); + const enum_tag_ty = sema.typeOf(enum_tag); var int_tag_type_buffer: Type.Payload.Bits = undefined; const int_tag_ty = try enum_tag_ty.intTagType(&int_tag_type_buffer).copy(arena); @@ -2821,7 +2822,7 @@ fn zirOptionalPayloadPtr( const inst_data = sema.code.instructions.items(.data)[inst].un_node; const optional_ptr = sema.resolveInst(inst_data.operand); - const optional_ptr_ty = sema.getTypeOf(optional_ptr); + const optional_ptr_ty = sema.typeOf(optional_ptr); assert(optional_ptr_ty.zigTypeTag() == .Pointer); const src = inst_data.src(); @@ -2863,7 +2864,7 @@ fn zirOptionalPayload( const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - const operand_ty = sema.getTypeOf(operand); + const operand_ty = sema.typeOf(operand); const opt_type = operand_ty; if (opt_type.zigTypeTag() != .Optional) { return sema.mod.fail(&block.base, src, "expected optional type, found {}", .{opt_type}); @@ -2900,7 +2901,7 @@ fn zirErrUnionPayload( const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); const operand_src = src; - const operand_ty = sema.getTypeOf(operand); + const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, operand_src, "expected error union type, found '{}'", .{operand_ty}); @@ -2936,7 +2937,7 @@ fn zirErrUnionPayloadPtr( const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - const operand_ty = sema.getTypeOf(operand); + const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag() == .Pointer); if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) @@ -2976,7 +2977,7 @@ fn zirErrUnionCode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compi const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - const operand_ty = sema.getTypeOf(operand); + const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand_ty}); @@ -3000,7 +3001,7 @@ fn zirErrUnionCodePtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Co const inst_data = sema.code.instructions.items(.data)[inst].un_node; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - const operand_ty = sema.getTypeOf(operand); + const operand_ty = sema.typeOf(operand); assert(operand_ty.zigTypeTag() == .Pointer); if (operand_ty.elemType().zigTypeTag() != .ErrorUnion) @@ -3026,7 +3027,7 @@ fn zirEnsureErrPayloadVoid(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_tok; const src = inst_data.src(); const operand = sema.resolveInst(inst_data.operand); - const operand_ty = sema.getTypeOf(operand); + const operand_ty = sema.typeOf(operand); if (operand_ty.zigTypeTag() != .ErrorUnion) return sema.mod.fail(&block.base, src, "expected error union type, found '{}'", .{operand_ty}); if (operand_ty.castTag(.error_union).?.data.payload.zigTypeTag() != .Void) { @@ -3214,7 +3215,6 @@ fn funcCommon( .state = anal_state, .zir_body_inst = body_inst, .owner_decl = sema.owner_decl, - .body = undefined, .lbrace_line = src_locs.lbrace_line, .rbrace_line = src_locs.rbrace_line, .lbrace_column = @truncate(u16, src_locs.columns), @@ -3283,12 +3283,13 @@ fn zirFieldVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr const extra = sema.code.extraData(Zir.Inst.Field, inst_data.payload_index).data; const field_name = sema.code.nullTerminatedString(extra.field_name_start); const object = sema.resolveInst(extra.lhs); - const object_ptr = if (sema.getTypeOf(object).zigTypeTag() == .Pointer) + const object_ptr = if (sema.typeOf(object).zigTypeTag() == .Pointer) object else try sema.analyzeRef(block, src, object); const result_ptr = try sema.namedFieldPtr(block, src, object_ptr, field_name, field_name_src); - return sema.analyzeLoad(block, src, result_ptr, result_ptr.src); + const result_ptr_src = src; + return sema.analyzeLoad(block, src, result_ptr, result_ptr_src); } fn zirFieldPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3356,7 +3357,7 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr ), }; - const operand_ty = sema.getTypeOf(operand); + const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag()) { .ComptimeInt, .Int => {}, else => return sema.mod.fail( @@ -3414,7 +3415,7 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE ), }; - const operand_ty = sema.getTypeOf(operand); + const operand_ty = sema.typeOf(operand); switch (operand_ty.zigTypeTag()) { .ComptimeFloat, .Float, .ComptimeInt => {}, else => return sema.mod.fail( @@ -3440,7 +3441,7 @@ fn zirElemVal(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr const bin_inst = sema.code.instructions.items(.data)[inst].bin; const array = sema.resolveInst(bin_inst.lhs); - const array_ty = sema.getTypeOf(array); + const array_ty = sema.typeOf(array); const array_ptr = if (array_ty.zigTypeTag() == .Pointer) array else @@ -3459,7 +3460,7 @@ fn zirElemValNode(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compil const elem_index_src: LazySrcLoc = .{ .node_offset_array_access_index = inst_data.src_node }; const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const array = sema.resolveInst(extra.lhs); - const array_ty = sema.getTypeOf(array); + const array_ty = sema.typeOf(array); const array_ptr = if (array_ty.zigTypeTag() == .Pointer) array else @@ -3502,7 +3503,7 @@ fn zirSliceStart(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compile const array_ptr = sema.resolveInst(extra.lhs); const start = sema.resolveInst(extra.start); - return sema.analyzeSlice(block, src, array_ptr, start, null, null, .unneeded); + return sema.analyzeSlice(block, src, array_ptr, start, .none, .none, .unneeded); } fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3516,7 +3517,7 @@ fn zirSliceEnd(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr const start = sema.resolveInst(extra.start); const end = sema.resolveInst(extra.end); - return sema.analyzeSlice(block, src, array_ptr, start, end, null, .unneeded); + return sema.analyzeSlice(block, src, array_ptr, start, end, .none, .unneeded); } fn zirSliceSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3668,7 +3669,7 @@ fn analyzeSwitch( const src: LazySrcLoc = .{ .node_offset = src_node_offset }; const special_prong_src: LazySrcLoc = .{ .node_offset_switch_special_prong = src_node_offset }; const operand_src: LazySrcLoc = .{ .node_offset_switch_operand = src_node_offset }; - const operand_ty = sema.getTypeOf(operand); + const operand_ty = sema.typeOf(operand); // Validate usage of '_' prongs. if (special_prong == .under and !operand_ty.isNonexhaustiveEnum()) { @@ -4590,8 +4591,8 @@ fn zirBitwise( const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); - const lhs_ty = sema.getTypeOf(lhs); - const rhs_ty = sema.getTypeOf(rhs); + const lhs_ty = sema.typeOf(lhs); + const rhs_ty = sema.typeOf(rhs); const instructions = &[_]Air.Inst.Ref{ lhs, rhs }; const resolved_type = try sema.resolvePeerTypes(block, src, instructions); @@ -4722,8 +4723,8 @@ fn analyzeArithmetic( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const lhs_ty = sema.getTypeOf(lhs); - const rhs_ty = sema.getTypeOf(rhs); + const lhs_ty = sema.typeOf(lhs); + const rhs_ty = sema.typeOf(rhs); if (lhs_ty.zigTypeTag() == .Vector and rhs_ty.zigTypeTag() == .Vector) { if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) { return sema.mod.fail(&block.base, src, "vector length mismatch: {d} and {d}", .{ @@ -4944,8 +4945,8 @@ fn zirCmp( .eq, .neq => true, else => false, }; - const lhs_ty = sema.getTypeOf(lhs); - const rhs_ty = sema.getTypeOf(rhs); + const lhs_ty = sema.typeOf(lhs); + const rhs_ty = sema.typeOf(rhs); const lhs_ty_tag = lhs_ty.zigTypeTag(); const rhs_ty_tag = rhs_ty.zigTypeTag(); if (is_equality_cmp and lhs_ty_tag == .Null and rhs_ty_tag == .Null) { @@ -5007,8 +5008,8 @@ fn zirCmp( if (!is_equality_cmp) { return mod.fail(&block.base, src, "{s} operator not allowed for types", .{@tagName(op)}); } - const lhs_as_type = try sema.resolveAirAsType(block, lhs_src, lhs); - const rhs_as_type = try sema.resolveAirAsType(block, rhs_src, rhs); + const lhs_as_type = try sema.analyzeAsType(block, lhs_src, lhs); + const rhs_as_type = try sema.analyzeAsType(block, rhs_src, rhs); if (lhs_as_type.eql(rhs_as_type) == (op == .eq)) { return Air.Inst.Ref.bool_true; } else { @@ -5144,7 +5145,7 @@ fn zirTypeof(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErro const zir_datas = sema.code.instructions.items(.data); const inst_data = zir_datas[inst].un_node; const operand = sema.resolveInst(inst_data.operand); - const operand_ty = sema.getTypeOf(operand); + const operand_ty = sema.typeOf(operand); return sema.addType(operand_ty); } @@ -6375,7 +6376,7 @@ fn addSafetyCheck( const block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); const cond_br_inst = block_inst + 1; const br_inst = cond_br_inst + 1; - sema.air_instructions.appendAssumeCapacity(gpa, .{ + sema.air_instructions.appendAssumeCapacity(.{ .tag = .block, .data = .{ .ty_pl = .{ .ty = .void_type, @@ -6386,7 +6387,7 @@ fn addSafetyCheck( }); sema.air_extra.appendAssumeCapacity(cond_br_inst); - sema.air_instructions.appendAssumeCapacity(gpa, .{ + sema.air_instructions.appendAssumeCapacity(.{ .tag = .cond_br, .data = .{ .pl_op = .{ .operand = ok, @@ -6399,7 +6400,7 @@ fn addSafetyCheck( sema.air_extra.appendAssumeCapacity(br_inst); sema.air_extra.appendSliceAssumeCapacity(fail_block.instructions.items); - sema.air_instructions.appendAssumeCapacity(gpa, .{ + sema.air_instructions.appendAssumeCapacity(.{ .tag = .br, .data = .{ .br = .{ .block_inst = block_inst, @@ -6497,9 +6498,11 @@ fn namedFieldPtr( const mod = sema.mod; const arena = sema.arena; - const elem_ty = switch (object_ptr.ty.zigTypeTag()) { - .Pointer => object_ptr.ty.elemType(), - else => return mod.fail(&block.base, object_ptr.src, "expected pointer, found '{}'", .{object_ptr.ty}), + const object_ptr_src = src; // TODO better source location + const object_ptr_ty = sema.typeOf(object_ptr); + const elem_ty = switch (object_ptr_ty.zigTypeTag()) { + .Pointer => object_ptr_ty.elemType(), + else => return mod.fail(&block.base, object_ptr_src, "expected pointer, found '{}'", .{object_ptr_ty}), }; switch (elem_ty.zigTypeTag()) { .Array => { @@ -6545,9 +6548,9 @@ fn namedFieldPtr( } }, .Type => { - _ = try sema.resolveConstValue(block, object_ptr.src, object_ptr); - const result = try sema.analyzeLoad(block, src, object_ptr, object_ptr.src); - const val = result.value().?; + _ = try sema.resolveConstValue(block, object_ptr_src, object_ptr); + const result = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src); + const val = (sema.resolveDefinedValue(block, src, result) catch unreachable).?; const child_type = try val.toType(arena); switch (child_type.zigTypeTag()) { .ErrorSet => { @@ -6694,7 +6697,16 @@ fn analyzeStructFieldPtr( } try sema.requireRuntimeBlock(block, src); - return block.addStructFieldPtr(src, ptr_field_ty, struct_ptr, @intCast(u32, field_index)); + return block.addInst(.{ + .tag = .struct_field_ptr, + .data = .{ .ty_pl = .{ + .ty = try sema.addType(ptr_field_ty), + .payload = try sema.addExtra(Air.StructField{ + .struct_ptr = struct_ptr, + .field_index = @intCast(u32, field_index), + }), + } }, + }); } fn analyzeUnionFieldPtr( @@ -6742,16 +6754,18 @@ fn elemPtr( elem_index: Air.Inst.Ref, elem_index_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const array_ty = switch (array_ptr.ty.zigTypeTag()) { - .Pointer => array_ptr.ty.elemType(), - else => return sema.mod.fail(&block.base, array_ptr.src, "expected pointer, found '{}'", .{array_ptr.ty}), + const array_ptr_src = src; // TODO better source location + const array_ptr_ty = sema.typeOf(array_ptr); + const array_ty = switch (array_ptr_ty.zigTypeTag()) { + .Pointer => array_ptr_ty.elemType(), + else => return sema.mod.fail(&block.base, array_ptr_src, "expected pointer, found '{}'", .{array_ptr_ty}), }; if (!array_ty.isIndexable()) { return sema.mod.fail(&block.base, src, "array access of non-array type '{}'", .{array_ty}); } if (array_ty.isSinglePointer() and array_ty.elemType().zigTypeTag() == .Array) { // we have to deref the ptr operand to get the actual array pointer - const array_ptr_deref = try sema.analyzeLoad(block, src, array_ptr, array_ptr.src); + const array_ptr_deref = try sema.analyzeLoad(block, src, array_ptr, array_ptr_src); return sema.elemPtrArray(block, src, array_ptr_deref, elem_index, elem_index_src); } if (array_ty.zigTypeTag() == .Array) { @@ -6776,7 +6790,7 @@ fn elemPtrArray( // @intCast here because it would have been impossible to construct a value that // required a larger index. const elem_ptr = try array_ptr_val.elemPtr(sema.arena, @intCast(usize, index_u64)); - const pointee_type = array_ptr.ty.elemType().elemType(); + const pointee_type = sema.typeOf(array_ptr).elemType().elemType(); return sema.addConstant( try Type.Tag.single_const_pointer.create(sema.arena, pointee_type), @@ -6800,7 +6814,7 @@ fn coerce( return sema.coerceVarArgParam(block, inst, inst_src); } - const inst_ty = sema.getTypeOf(inst); + const inst_ty = sema.typeOf(inst); // If the types are the same, we can return the operand. if (dest_type.eql(inst_ty)) return inst; @@ -7021,7 +7035,7 @@ fn coerceVarArgParam( inst: Air.Inst.Ref, inst_src: LazySrcLoc, ) !Air.Inst.Ref { - const inst_ty = sema.getTypeOf(inst); + const inst_ty = sema.typeOf(inst); switch (inst_ty.zigTypeTag()) { .ComptimeInt, .ComptimeFloat => return sema.mod.fail(&block.base, inst_src, "integer and float literals in var args function must be casted", .{}), else => {}, @@ -7170,8 +7184,8 @@ fn analyzeRef( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - const operand_ty = sema.getTypeOf(operand); - const ptr_type = try sema.mod.simplePtrType(sema.arena, operand_ty, false, .One); + const operand_ty = sema.typeOf(operand); + const ptr_type = try Module.simplePtrType(sema.arena, operand_ty, false, .One); if (try sema.resolvePossiblyUndefinedValue(block, src, operand)) |val| { return sema.addConstant(ptr_type, try Value.Tag.ref_val.create(sema.arena, val)); @@ -7188,7 +7202,7 @@ fn analyzeLoad( ptr: Air.Inst.Ref, ptr_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const ptr_ty = sema.getTypeOf(ptr); + const ptr_ty = sema.typeOf(ptr); const elem_ty = switch (ptr_ty.zigTypeTag()) { .Pointer => ptr_ty.elemType(), else => return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr_ty}), @@ -7235,7 +7249,7 @@ fn analyzeIsNonErr( src: LazySrcLoc, operand: Air.Inst.Ref, ) CompileError!Air.Inst.Ref { - const operand_ty = sema.getTypeOf(operand); + const operand_ty = sema.typeOf(operand); const ot = operand_ty.zigTypeTag(); if (ot != .ErrorSet and ot != .ErrorUnion) return Air.Inst.Ref.bool_true; if (ot == .ErrorSet) return Air.Inst.Ref.bool_false; @@ -7261,13 +7275,14 @@ fn analyzeSlice( src: LazySrcLoc, array_ptr: Air.Inst.Ref, start: Air.Inst.Ref, - end_opt: ?Air.Inst.Index, - sentinel_opt: ?Air.Inst.Index, + end_opt: Air.Inst.Ref, + sentinel_opt: Air.Inst.Ref, sentinel_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const ptr_child = switch (array_ptr.ty.zigTypeTag()) { - .Pointer => array_ptr.ty.elemType(), - else => return sema.mod.fail(&block.base, src, "expected pointer, found '{}'", .{array_ptr.ty}), + const array_ptr_ty = sema.typeOf(array_ptr); + const ptr_child = switch (array_ptr_ty.zigTypeTag()) { + .Pointer => array_ptr_ty.elemType(), + else => return sema.mod.fail(&block.base, src, "expected pointer, found '{}'", .{array_ptr_ty}), }; var array_type = ptr_child; @@ -7287,15 +7302,15 @@ fn analyzeSlice( else => return sema.mod.fail(&block.base, src, "slice of non-array type '{}'", .{ptr_child}), }; - const slice_sentinel = if (sentinel_opt) |sentinel| blk: { - const casted = try sema.coerce(block, elem_type, sentinel, sentinel.src); + const slice_sentinel = if (sentinel_opt != .none) blk: { + const casted = try sema.coerce(block, elem_type, sentinel_opt, sentinel_src); break :blk try sema.resolveConstValue(block, sentinel_src, casted); } else null; var return_ptr_size: std.builtin.TypeInfo.Pointer.Size = .Slice; var return_elem_type = elem_type; - if (end_opt) |end| { - if (try sema.resolveDefinedValue(block, src, end)) |end_val| { + if (end_opt != .none) { + if (try sema.resolveDefinedValue(block, src, end_opt)) |end_val| { if (try sema.resolveDefinedValue(block, src, start)) |start_val| { const start_u64 = start_val.toUnsignedInt(); const end_u64 = end_val.toUnsignedInt(); @@ -7316,7 +7331,7 @@ fn analyzeSlice( const return_type = try sema.mod.ptrType( sema.arena, return_elem_type, - if (end_opt == null) slice_sentinel else null, + if (end_opt == .none) slice_sentinel else null, 0, // TODO alignment 0, 0, @@ -7341,8 +7356,8 @@ fn cmpNumeric( lhs_src: LazySrcLoc, rhs_src: LazySrcLoc, ) CompileError!Air.Inst.Ref { - const lhs_ty = sema.getTypeOf(lhs); - const rhs_ty = sema.getTypeOf(rhs); + const lhs_ty = sema.typeOf(lhs); + const rhs_ty = sema.typeOf(rhs); assert(lhs_ty.isNumeric()); assert(rhs_ty.isNumeric()); @@ -7609,14 +7624,14 @@ fn resolvePeerTypes( return Type.initTag(.noreturn); if (instructions.len == 1) - return sema.getTypeOf(instructions[0]); + return sema.typeOf(instructions[0]); const target = sema.mod.getTarget(); var chosen = instructions[0]; for (instructions[1..]) |candidate| { - const candidate_ty = sema.getTypeOf(candidate); - const chosen_ty = sema.getTypeOf(chosen); + const candidate_ty = sema.typeOf(candidate); + const chosen_ty = sema.typeOf(chosen); if (candidate_ty.eql(chosen_ty)) continue; if (candidate_ty.zigTypeTag() == .NoReturn) @@ -7677,7 +7692,7 @@ fn resolvePeerTypes( return sema.mod.fail(&block.base, src, "incompatible types: '{}' and '{}'", .{ chosen_ty, candidate_ty }); } - return sema.getTypeOf(chosen); + return sema.typeOf(chosen); } fn resolveTypeFields(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, ty: Type) CompileError!Type { @@ -7753,7 +7768,7 @@ fn getBuiltin( "builtin", ); const builtin_inst = try sema.analyzeLoad(block, src, opt_builtin_inst.?, src); - const builtin_ty = try sema.resolveAirAsType(block, src, builtin_inst); + const builtin_ty = try sema.analyzeAsType(block, src, builtin_inst); const opt_ty_inst = try sema.analyzeNamespaceLookup( block, src, @@ -7770,7 +7785,7 @@ fn getBuiltinType( name: []const u8, ) CompileError!Type { const ty_inst = try sema.getBuiltin(block, src, name); - return sema.resolveAirAsType(block, src, ty_inst); + return sema.analyzeAsType(block, src, ty_inst); } /// There is another implementation of this in `Type.onePossibleValue`. This one @@ -7970,70 +7985,104 @@ fn enumFieldSrcLoc( } else unreachable; } -/// Returns the type of the AIR instruction. -fn getTypeOf(sema: *Sema, air_ref: Air.Inst.Ref) Type { - switch (air_ref) { - .none => unreachable, - .u8_type => return Type.initTag(.u8), - .i8_type => return Type.initTag(.i8), - .u16_type => return Type.initTag(.u16), - .i16_type => return Type.initTag(.i16), - .u32_type => return Type.initTag(.u32), - .i32_type => return Type.initTag(.i32), - .u64_type => return Type.initTag(.u64), - .i64_type => return Type.initTag(.i64), - .u128_type => return Type.initTag(.u128), - .i128_type => return Type.initTag(.i128), - .usize_type => return Type.initTag(.usize), - .isize_type => return Type.initTag(.isize), - .c_short_type => return Type.initTag(.c_short), - .c_ushort_type => return Type.initTag(.c_ushort), - .c_int_type => return Type.initTag(.c_int), - .c_uint_type => return Type.initTag(.c_uint), - .c_long_type => return Type.initTag(.c_long), - .c_ulong_type => return Type.initTag(.c_ulong), - .c_longlong_type => return Type.initTag(.c_longlong), - .c_ulonglong_type => return Type.initTag(.c_ulonglong), - .c_longdouble_type => return Type.initTag(.c_longdouble), - .f16_type => return Type.initTag(.f16), - .f32_type => return Type.initTag(.f32), - .f64_type => return Type.initTag(.f64), - .f128_type => return Type.initTag(.f128), - .c_void_type => return Type.initTag(.c_void), - .bool_type => return Type.initTag(.bool), - .void_type => return Type.initTag(.void), - .type_type => return Type.initTag(.type), - .anyerror_type => return Type.initTag(.anyerror), - .comptime_int_type => return Type.initTag(.comptime_int), - .comptime_float_type => return Type.initTag(.comptime_float), - .noreturn_type => return Type.initTag(.noreturn), - .anyframe_type => return Type.initTag(.@"anyframe"), - .null_type => return Type.initTag(.@"null"), - .undefined_type => return Type.initTag(.@"undefined"), - .enum_literal_type => return Type.initTag(.enum_literal), - .atomic_ordering_type => return Type.initTag(.atomic_ordering), - .atomic_rmw_op_type => return Type.initTag(.atomic_rmw_op), - .calling_convention_type => return Type.initTag(.calling_convention), - .float_mode_type => return Type.initTag(.float_mode), - .reduce_op_type => return Type.initTag(.reduce_op), - .call_options_type => return Type.initTag(.call_options), - .export_options_type => return Type.initTag(.export_options), - .extern_options_type => return Type.initTag(.extern_options), - .manyptr_u8_type => return Type.initTag(.manyptr_u8), - .manyptr_const_u8_type => return Type.initTag(.manyptr_const_u8), - .fn_noreturn_no_args_type => return Type.initTag(.fn_noreturn_no_args), - .fn_void_no_args_type => return Type.initTag(.fn_void_no_args), - .fn_naked_noreturn_no_args_type => return Type.initTag(.fn_naked_noreturn_no_args), - .fn_ccc_void_no_args_type => return Type.initTag(.fn_ccc_void_no_args), - .single_const_pointer_to_comptime_int_type => return Type.initTag(.single_const_pointer_to_comptime_int), - .const_slice_u8_type => return Type.initTag(.const_slice_u8), - else => {}, +/// This is only meant to be called by `typeOf`. +fn analyzeAsTypeInfallible(sema: *Sema, inst: Air.Inst.Ref) Type { + var i: usize = @enumToInt(inst); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; } - const air_index = @as(usize, @enumToInt(air_ref)) - Air.Inst.Ref.typed_value_map.len; - const air_tags = sema.air_instructions.items(.tag); + i -= Air.Inst.Ref.typed_value_map.len; + assert(sema.air_instructions.items(.tag)[i] == .const_ty); + return sema.air_instructions.items(.data)[i].ty; +} + +/// Returns the type of the AIR instruction. +fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { + var i: usize = @enumToInt(inst); + if (i < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[i].ty; + } + i -= Air.Inst.Ref.typed_value_map.len; + const air_datas = sema.air_instructions.items(.data); - assert(air_tags[air_index] == .const_ty); - return air_datas[air_index].ty; + switch (sema.air_instructions.items(.tag)[i]) { + .arg => return sema.analyzeAsTypeInfallible(air_datas[i].ty_str.ty), + + .add, + .addwrap, + .sub, + .subwrap, + .mul, + .mulwrap, + .div, + .bit_and, + .bit_or, + .xor, + => return sema.typeOf(air_datas[i].bin_op.lhs), + + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .is_null, + .is_non_null, + .is_null_ptr, + .is_non_null_ptr, + .is_err, + .is_non_err, + .is_err_ptr, + .is_non_err_ptr, + .bool_and, + .bool_or, + => return Type.initTag(.bool), + + .const_ty => return Type.initTag(.type), + + .alloc => return air_datas[i].ty, + + .assembly, + .block, + .constant, + .varptr, + .struct_field_ptr, + => return sema.analyzeAsTypeInfallible(air_datas[i].ty_pl.ty), + + .not, + .bitcast, + .load, + .ref, + .floatcast, + .intcast, + .optional_payload, + .optional_payload_ptr, + .wrap_optional, + .unwrap_errunion_payload, + .unwrap_errunion_err, + .unwrap_errunion_payload_ptr, + .unwrap_errunion_err_ptr, + .wrap_errunion_payload, + .wrap_errunion_err, + => return sema.analyzeAsTypeInfallible(air_datas[i].ty_op.ty), + + .loop, + .br, + .cond_br, + .switch_br, + .ret, + .unreach, + => return Type.initTag(.noreturn), + + .breakpoint, + .dbg_stmt, + .store, + => return Type.initTag(.void), + + .ptrtoint => return Type.initTag(.usize), + + .call => @panic("TODO Sema.typeOf call"), + } } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { From 12c10139e3e0166e91d2dbb1801c2054ca12d413 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 15 Jul 2021 13:36:46 -0700 Subject: [PATCH 20/53] Sema: finish reworking for AIR memory layout except switch --- src/Sema.zig | 611 +++++++++++++++++++++++++++----------------------- src/value.zig | 2 +- 2 files changed, 330 insertions(+), 283 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 268f7bc903..ac6755d24e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -276,101 +276,101 @@ pub fn analyzeBody( //.switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), //.switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), //.switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), - //.type_info => try sema.zirTypeInfo(block, inst), - //.size_of => try sema.zirSizeOf(block, inst), - //.bit_size_of => try sema.zirBitSizeOf(block, inst), - //.typeof => try sema.zirTypeof(block, inst), - //.typeof_elem => try sema.zirTypeofElem(block, inst), - //.log2_int_type => try sema.zirLog2IntType(block, inst), - //.typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), - //.xor => try sema.zirBitwise(block, inst, .xor), - //.struct_init_empty => try sema.zirStructInitEmpty(block, inst), - //.struct_init => try sema.zirStructInit(block, inst, false), - //.struct_init_ref => try sema.zirStructInit(block, inst, true), - //.struct_init_anon => try sema.zirStructInitAnon(block, inst, false), - //.struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), - //.array_init => try sema.zirArrayInit(block, inst, false), - //.array_init_ref => try sema.zirArrayInit(block, inst, true), - //.array_init_anon => try sema.zirArrayInitAnon(block, inst, false), - //.array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), - //.union_init_ptr => try sema.zirUnionInitPtr(block, inst), - //.field_type => try sema.zirFieldType(block, inst), - //.field_type_ref => try sema.zirFieldTypeRef(block, inst), - //.ptr_to_int => try sema.zirPtrToInt(block, inst), - //.align_of => try sema.zirAlignOf(block, inst), - //.bool_to_int => try sema.zirBoolToInt(block, inst), - //.embed_file => try sema.zirEmbedFile(block, inst), - //.error_name => try sema.zirErrorName(block, inst), - //.tag_name => try sema.zirTagName(block, inst), - //.reify => try sema.zirReify(block, inst), - //.type_name => try sema.zirTypeName(block, inst), - //.frame_type => try sema.zirFrameType(block, inst), - //.frame_size => try sema.zirFrameSize(block, inst), - //.float_to_int => try sema.zirFloatToInt(block, inst), - //.int_to_float => try sema.zirIntToFloat(block, inst), - //.int_to_ptr => try sema.zirIntToPtr(block, inst), - //.float_cast => try sema.zirFloatCast(block, inst), - //.int_cast => try sema.zirIntCast(block, inst), - //.err_set_cast => try sema.zirErrSetCast(block, inst), - //.ptr_cast => try sema.zirPtrCast(block, inst), - //.truncate => try sema.zirTruncate(block, inst), - //.align_cast => try sema.zirAlignCast(block, inst), - //.has_decl => try sema.zirHasDecl(block, inst), - //.has_field => try sema.zirHasField(block, inst), - //.clz => try sema.zirClz(block, inst), - //.ctz => try sema.zirCtz(block, inst), - //.pop_count => try sema.zirPopCount(block, inst), - //.byte_swap => try sema.zirByteSwap(block, inst), - //.bit_reverse => try sema.zirBitReverse(block, inst), - //.div_exact => try sema.zirDivExact(block, inst), - //.div_floor => try sema.zirDivFloor(block, inst), - //.div_trunc => try sema.zirDivTrunc(block, inst), - //.mod => try sema.zirMod(block, inst), - //.rem => try sema.zirRem(block, inst), - //.shl_exact => try sema.zirShlExact(block, inst), - //.shr_exact => try sema.zirShrExact(block, inst), - //.bit_offset_of => try sema.zirBitOffsetOf(block, inst), - //.offset_of => try sema.zirOffsetOf(block, inst), - //.cmpxchg_strong => try sema.zirCmpxchg(block, inst), - //.cmpxchg_weak => try sema.zirCmpxchg(block, inst), - //.splat => try sema.zirSplat(block, inst), - //.reduce => try sema.zirReduce(block, inst), - //.shuffle => try sema.zirShuffle(block, inst), - //.atomic_load => try sema.zirAtomicLoad(block, inst), - //.atomic_rmw => try sema.zirAtomicRmw(block, inst), - //.atomic_store => try sema.zirAtomicStore(block, inst), - //.mul_add => try sema.zirMulAdd(block, inst), - //.builtin_call => try sema.zirBuiltinCall(block, inst), - //.field_ptr_type => try sema.zirFieldPtrType(block, inst), - //.field_parent_ptr => try sema.zirFieldParentPtr(block, inst), - //.memcpy => try sema.zirMemcpy(block, inst), - //.memset => try sema.zirMemset(block, inst), - //.builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), - //.@"resume" => try sema.zirResume(block, inst), - //.@"await" => try sema.zirAwait(block, inst, false), - //.await_nosuspend => try sema.zirAwait(block, inst, true), - //.extended => try sema.zirExtended(block, inst), + .type_info => try sema.zirTypeInfo(block, inst), + .size_of => try sema.zirSizeOf(block, inst), + .bit_size_of => try sema.zirBitSizeOf(block, inst), + .typeof => try sema.zirTypeof(block, inst), + .typeof_elem => try sema.zirTypeofElem(block, inst), + .log2_int_type => try sema.zirLog2IntType(block, inst), + .typeof_log2_int_type => try sema.zirTypeofLog2IntType(block, inst), + .xor => try sema.zirBitwise(block, inst, .xor), + .struct_init_empty => try sema.zirStructInitEmpty(block, inst), + .struct_init => try sema.zirStructInit(block, inst, false), + .struct_init_ref => try sema.zirStructInit(block, inst, true), + .struct_init_anon => try sema.zirStructInitAnon(block, inst, false), + .struct_init_anon_ref => try sema.zirStructInitAnon(block, inst, true), + .array_init => try sema.zirArrayInit(block, inst, false), + .array_init_ref => try sema.zirArrayInit(block, inst, true), + .array_init_anon => try sema.zirArrayInitAnon(block, inst, false), + .array_init_anon_ref => try sema.zirArrayInitAnon(block, inst, true), + .union_init_ptr => try sema.zirUnionInitPtr(block, inst), + .field_type => try sema.zirFieldType(block, inst), + .field_type_ref => try sema.zirFieldTypeRef(block, inst), + .ptr_to_int => try sema.zirPtrToInt(block, inst), + .align_of => try sema.zirAlignOf(block, inst), + .bool_to_int => try sema.zirBoolToInt(block, inst), + .embed_file => try sema.zirEmbedFile(block, inst), + .error_name => try sema.zirErrorName(block, inst), + .tag_name => try sema.zirTagName(block, inst), + .reify => try sema.zirReify(block, inst), + .type_name => try sema.zirTypeName(block, inst), + .frame_type => try sema.zirFrameType(block, inst), + .frame_size => try sema.zirFrameSize(block, inst), + .float_to_int => try sema.zirFloatToInt(block, inst), + .int_to_float => try sema.zirIntToFloat(block, inst), + .int_to_ptr => try sema.zirIntToPtr(block, inst), + .float_cast => try sema.zirFloatCast(block, inst), + .int_cast => try sema.zirIntCast(block, inst), + .err_set_cast => try sema.zirErrSetCast(block, inst), + .ptr_cast => try sema.zirPtrCast(block, inst), + .truncate => try sema.zirTruncate(block, inst), + .align_cast => try sema.zirAlignCast(block, inst), + .has_decl => try sema.zirHasDecl(block, inst), + .has_field => try sema.zirHasField(block, inst), + .clz => try sema.zirClz(block, inst), + .ctz => try sema.zirCtz(block, inst), + .pop_count => try sema.zirPopCount(block, inst), + .byte_swap => try sema.zirByteSwap(block, inst), + .bit_reverse => try sema.zirBitReverse(block, inst), + .div_exact => try sema.zirDivExact(block, inst), + .div_floor => try sema.zirDivFloor(block, inst), + .div_trunc => try sema.zirDivTrunc(block, inst), + .mod => try sema.zirMod(block, inst), + .rem => try sema.zirRem(block, inst), + .shl_exact => try sema.zirShlExact(block, inst), + .shr_exact => try sema.zirShrExact(block, inst), + .bit_offset_of => try sema.zirBitOffsetOf(block, inst), + .offset_of => try sema.zirOffsetOf(block, inst), + .cmpxchg_strong => try sema.zirCmpxchg(block, inst), + .cmpxchg_weak => try sema.zirCmpxchg(block, inst), + .splat => try sema.zirSplat(block, inst), + .reduce => try sema.zirReduce(block, inst), + .shuffle => try sema.zirShuffle(block, inst), + .atomic_load => try sema.zirAtomicLoad(block, inst), + .atomic_rmw => try sema.zirAtomicRmw(block, inst), + .atomic_store => try sema.zirAtomicStore(block, inst), + .mul_add => try sema.zirMulAdd(block, inst), + .builtin_call => try sema.zirBuiltinCall(block, inst), + .field_ptr_type => try sema.zirFieldPtrType(block, inst), + .field_parent_ptr => try sema.zirFieldParentPtr(block, inst), + .memcpy => try sema.zirMemcpy(block, inst), + .memset => try sema.zirMemset(block, inst), + .builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst), + .@"resume" => try sema.zirResume(block, inst), + .@"await" => try sema.zirAwait(block, inst, false), + .await_nosuspend => try sema.zirAwait(block, inst, true), + .extended => try sema.zirExtended(block, inst), - //.sqrt => try sema.zirUnaryMath(block, inst), - //.sin => try sema.zirUnaryMath(block, inst), - //.cos => try sema.zirUnaryMath(block, inst), - //.exp => try sema.zirUnaryMath(block, inst), - //.exp2 => try sema.zirUnaryMath(block, inst), - //.log => try sema.zirUnaryMath(block, inst), - //.log2 => try sema.zirUnaryMath(block, inst), - //.log10 => try sema.zirUnaryMath(block, inst), - //.fabs => try sema.zirUnaryMath(block, inst), - //.floor => try sema.zirUnaryMath(block, inst), - //.ceil => try sema.zirUnaryMath(block, inst), - //.trunc => try sema.zirUnaryMath(block, inst), - //.round => try sema.zirUnaryMath(block, inst), + .sqrt => try sema.zirUnaryMath(block, inst), + .sin => try sema.zirUnaryMath(block, inst), + .cos => try sema.zirUnaryMath(block, inst), + .exp => try sema.zirUnaryMath(block, inst), + .exp2 => try sema.zirUnaryMath(block, inst), + .log => try sema.zirUnaryMath(block, inst), + .log2 => try sema.zirUnaryMath(block, inst), + .log10 => try sema.zirUnaryMath(block, inst), + .fabs => try sema.zirUnaryMath(block, inst), + .floor => try sema.zirUnaryMath(block, inst), + .ceil => try sema.zirUnaryMath(block, inst), + .trunc => try sema.zirUnaryMath(block, inst), + .round => try sema.zirUnaryMath(block, inst), - //.opaque_decl => try sema.zirOpaqueDecl(block, inst, .parent), - //.opaque_decl_anon => try sema.zirOpaqueDecl(block, inst, .anon), - //.opaque_decl_func => try sema.zirOpaqueDecl(block, inst, .func), - //.error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), - //.error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), - //.error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), + .opaque_decl => try sema.zirOpaqueDecl(block, inst, .parent), + .opaque_decl_anon => try sema.zirOpaqueDecl(block, inst, .anon), + .opaque_decl_func => try sema.zirOpaqueDecl(block, inst, .func), + .error_set_decl => try sema.zirErrorSetDecl(block, inst, .parent), + .error_set_decl_anon => try sema.zirErrorSetDecl(block, inst, .anon), + .error_set_decl_func => try sema.zirErrorSetDecl(block, inst, .func), .add => try sema.zirArithmetic(block, inst), .addwrap => try sema.zirArithmetic(block, inst), @@ -396,106 +396,106 @@ pub fn analyzeBody( .panic => return sema.zirPanic(block, inst), // zig fmt: on - //// Instructions that we know can *never* be noreturn based solely on - //// their tag. We avoid needlessly checking if they are noreturn and - //// continue the loop. - //// We also know that they cannot be referenced later, so we avoid - //// putting them into the map. - //.breakpoint => { - // try sema.zirBreakpoint(block, inst); - // i += 1; - // continue; - //}, - //.fence => { - // try sema.zirFence(block, inst); - // i += 1; - // continue; - //}, - //.dbg_stmt => { - // try sema.zirDbgStmt(block, inst); - // i += 1; - // continue; - //}, - //.ensure_err_payload_void => { - // try sema.zirEnsureErrPayloadVoid(block, inst); - // i += 1; - // continue; - //}, - //.ensure_result_non_error => { - // try sema.zirEnsureResultNonError(block, inst); - // i += 1; - // continue; - //}, - //.ensure_result_used => { - // try sema.zirEnsureResultUsed(block, inst); - // i += 1; - // continue; - //}, - //.set_eval_branch_quota => { - // try sema.zirSetEvalBranchQuota(block, inst); - // i += 1; - // continue; - //}, - //.store => { - // try sema.zirStore(block, inst); - // i += 1; - // continue; - //}, - //.store_node => { - // try sema.zirStoreNode(block, inst); - // i += 1; - // continue; - //}, - //.store_to_block_ptr => { - // try sema.zirStoreToBlockPtr(block, inst); - // i += 1; - // continue; - //}, - //.store_to_inferred_ptr => { - // try sema.zirStoreToInferredPtr(block, inst); - // i += 1; - // continue; - //}, - //.resolve_inferred_alloc => { - // try sema.zirResolveInferredAlloc(block, inst); - // i += 1; - // continue; - //}, - //.validate_struct_init_ptr => { - // try sema.zirValidateStructInitPtr(block, inst); - // i += 1; - // continue; - //}, - //.validate_array_init_ptr => { - // try sema.zirValidateArrayInitPtr(block, inst); - // i += 1; - // continue; - //}, - //.@"export" => { - // try sema.zirExport(block, inst); - // i += 1; - // continue; - //}, - //.set_align_stack => { - // try sema.zirSetAlignStack(block, inst); - // i += 1; - // continue; - //}, - //.set_cold => { - // try sema.zirSetCold(block, inst); - // i += 1; - // continue; - //}, - //.set_float_mode => { - // try sema.zirSetFloatMode(block, inst); - // i += 1; - // continue; - //}, - //.set_runtime_safety => { - // try sema.zirSetRuntimeSafety(block, inst); - // i += 1; - // continue; - //}, + // Instructions that we know can *never* be noreturn based solely on + // their tag. We avoid needlessly checking if they are noreturn and + // continue the loop. + // We also know that they cannot be referenced later, so we avoid + // putting them into the map. + .breakpoint => { + try sema.zirBreakpoint(block, inst); + i += 1; + continue; + }, + .fence => { + try sema.zirFence(block, inst); + i += 1; + continue; + }, + .dbg_stmt => { + try sema.zirDbgStmt(block, inst); + i += 1; + continue; + }, + .ensure_err_payload_void => { + try sema.zirEnsureErrPayloadVoid(block, inst); + i += 1; + continue; + }, + .ensure_result_non_error => { + try sema.zirEnsureResultNonError(block, inst); + i += 1; + continue; + }, + .ensure_result_used => { + try sema.zirEnsureResultUsed(block, inst); + i += 1; + continue; + }, + .set_eval_branch_quota => { + try sema.zirSetEvalBranchQuota(block, inst); + i += 1; + continue; + }, + .store => { + try sema.zirStore(block, inst); + i += 1; + continue; + }, + .store_node => { + try sema.zirStoreNode(block, inst); + i += 1; + continue; + }, + .store_to_block_ptr => { + try sema.zirStoreToBlockPtr(block, inst); + i += 1; + continue; + }, + .store_to_inferred_ptr => { + try sema.zirStoreToInferredPtr(block, inst); + i += 1; + continue; + }, + .resolve_inferred_alloc => { + try sema.zirResolveInferredAlloc(block, inst); + i += 1; + continue; + }, + .validate_struct_init_ptr => { + try sema.zirValidateStructInitPtr(block, inst); + i += 1; + continue; + }, + .validate_array_init_ptr => { + try sema.zirValidateArrayInitPtr(block, inst); + i += 1; + continue; + }, + .@"export" => { + try sema.zirExport(block, inst); + i += 1; + continue; + }, + .set_align_stack => { + try sema.zirSetAlignStack(block, inst); + i += 1; + continue; + }, + .set_cold => { + try sema.zirSetCold(block, inst); + i += 1; + continue; + }, + .set_float_mode => { + try sema.zirSetFloatMode(block, inst); + i += 1; + continue; + }, + .set_runtime_safety => { + try sema.zirSetRuntimeSafety(block, inst); + i += 1; + continue; + }, // Special case instructions to handle comptime control flow. .repeat_inline => { @@ -562,7 +562,7 @@ fn zirExtended(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr .frame_address => return sema.zirFrameAddress( block, extended), .alloc => return sema.zirAllocExtended( block, extended), .builtin_extern => return sema.zirBuiltinExtern( block, extended), - .@"asm" => return sema.zirAsm( block, extended), + .@"asm" => return sema.zirAsm( block, extended, inst), .typeof_peer => return sema.zirTypeofPeer( block, extended), .compile_log => return sema.zirCompileLog( block, extended), .add_with_overflow => return sema.zirOverflowArithmetic(block, extended), @@ -1400,11 +1400,14 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const ptr = sema.resolveInst(inst_data.operand); - const ptr_val = ptr.castTag(.constant).?.val; + const ptr_inst = refToIndex(ptr).?; + assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); + const air_datas = sema.air_instructions.items(.data); + const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; const peer_inst_list = inferred_alloc.data.stored_inst_list.items; const final_elem_ty = try sema.resolvePeerTypes(block, ty_src, peer_inst_list); - const var_is_mut = switch (ptr.ty.tag()) { + const var_is_mut = switch (sema.typeOf(ptr).tag()) { .inferred_alloc_const => false, .inferred_alloc_mut => true, else => unreachable, @@ -1415,8 +1418,10 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const final_ptr_ty = try Module.simplePtrType(sema.arena, final_elem_ty, true, .One); // Change it to a normal alloc. - ptr.ty = final_ptr_ty; - ptr.tag = .alloc; + sema.air_instructions.set(ptr_inst, .{ + .tag = .alloc, + .data = .{ .ty = final_ptr_ty }, + }); } fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { @@ -1434,7 +1439,7 @@ fn zirValidateStructInitPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Ind const field_ptr_data = sema.code.instructions.items(.data)[instrs[0]].pl_node; const field_ptr_extra = sema.code.extraData(Zir.Inst.Field, field_ptr_data.payload_index).data; const object_ptr = sema.resolveInst(field_ptr_extra.lhs); - break :s object_ptr.ty.elemType().castTag(.@"struct").?.data; + break :s sema.typeOf(object_ptr).elemType().castTag(.@"struct").?.data; }; // Maps field index to field_ptr index of where it was already initialized. @@ -1564,7 +1569,7 @@ fn zirStoreToBlockPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Co } const ptr = sema.resolveInst(bin_inst.lhs); const value = sema.resolveInst(bin_inst.rhs); - const ptr_ty = try Module.simplePtrType(sema.arena, value.ty, true, .One); + const ptr_ty = try Module.simplePtrType(sema.arena, sema.typeOf(value), true, .One); // TODO detect when this store should be done at compile-time. For example, // if expressions should force it when the condition is compile-time known. const src: LazySrcLoc = .unneeded; @@ -1581,12 +1586,16 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) const bin_inst = sema.code.instructions.items(.data)[inst].bin; const ptr = sema.resolveInst(bin_inst.lhs); const value = sema.resolveInst(bin_inst.rhs); - const inferred_alloc = ptr.castTag(.constant).?.val.castTag(.inferred_alloc).?; + const ptr_inst = refToIndex(ptr).?; + assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); + const air_datas = sema.air_instructions.items(.data); + const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; + const inferred_alloc = ptr_val.castTag(.inferred_alloc).?; // Add the stored instruction to the set we will use to resolve peer types // for the inferred allocation. try inferred_alloc.data.stored_inst_list.append(sema.arena, value); // Create a runtime bitcast instruction with exactly the type the pointer wants. - const ptr_ty = try Module.simplePtrType(sema.arena, value.ty, true, .One); + const ptr_ty = try Module.simplePtrType(sema.arena, sema.typeOf(value), true, .One); try sema.requireRuntimeBlock(block, src); const bitcasted_ptr = try block.addTyOp(.bitcast, ptr_ty, ptr); return sema.storePtr(block, src, bitcasted_ptr, value); @@ -1767,10 +1776,11 @@ fn zirCompileLog( if (i != 0) try writer.print(", ", .{}); const arg = sema.resolveInst(arg_ref); + const arg_ty = sema.typeOf(arg); if (try sema.resolvePossiblyUndefinedValue(block, src, arg)) |val| { - try writer.print("@as({}, {})", .{ arg.ty, val }); + try writer.print("@as({}, {})", .{ arg_ty, val }); } else { - try writer.print("@as({}, [runtime value])", .{arg.ty}); + try writer.print("@as({}, [runtime value])", .{arg_ty}); } } try writer.print("\n", .{}); @@ -2157,7 +2167,13 @@ fn zirDbgStmt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr if (block.is_comptime) return; const inst_data = sema.code.instructions.items(.data)[inst].dbg_stmt; - _ = try block.addDbgStmt(.unneeded, inst_data.line, inst_data.column); + _ = try block.addInst(.{ + .tag = .dbg_stmt, + .data = .{ .dbg_stmt = .{ + .line = inst_data.line, + .column = inst_data.column, + } }, + }); } fn zirDeclRef(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { @@ -3263,9 +3279,10 @@ fn zirPtrToInt(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileEr const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ptr = sema.resolveInst(inst_data.operand); - if (ptr.ty.zigTypeTag() != .Pointer) { + const ptr_ty = sema.typeOf(ptr); + if (ptr_ty.zigTypeTag() != .Pointer) { const ptr_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node }; - return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr.ty}); + return sema.mod.fail(&block.base, ptr_src, "expected pointer, found '{}'", .{ptr_ty}); } // TODO handle known-pointer-address const src = inst_data.src(); @@ -3368,7 +3385,7 @@ fn zirIntCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr ), } - if (operand.value() != null) { + if (try sema.isComptimeKnown(block, operand_src, operand)) { return sema.coerce(block, dest_type, operand, operand_src); } else if (dest_is_comptime_int) { return sema.mod.fail(&block.base, src, "unable to cast runtime value to 'comptime_int'", .{}); @@ -3426,7 +3443,7 @@ fn zirFloatCast(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE ), } - if (operand.value() != null) { + if (try sema.isComptimeKnown(block, operand_src, operand)) { return sema.coerce(block, dest_type, operand, operand_src); } else if (dest_is_comptime_float) { return sema.mod.fail(&block.base, src, "unable to cast runtime value to 'comptime_float'", .{}); @@ -4843,19 +4860,17 @@ fn zirAsm( sema: *Sema, block: *Scope.Block, extended: Zir.Inst.Extended.InstData, + inst: Zir.Inst.Index, ) CompileError!Air.Inst.Ref { const tracy = trace(@src()); defer tracy.end(); const extra = sema.code.extraData(Zir.Inst.Asm, extended.operand); const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; - const asm_source_src: LazySrcLoc = .{ .node_offset_asm_source = extra.data.src_node }; const ret_ty_src: LazySrcLoc = .{ .node_offset_asm_ret_ty = extra.data.src_node }; - const asm_source = try sema.resolveConstString(block, asm_source_src, extra.data.asm_source); const outputs_len = @truncate(u5, extended.small); const inputs_len = @truncate(u5, extended.small >> 5); const clobbers_len = @truncate(u5, extended.small >> 10); - const is_volatile = @truncate(u1, extended.small >> 15) != 0; if (outputs_len > 1) { return sema.mod.fail(&block.base, src, "TODO implement Sema for asm with more than 1 output", .{}); @@ -4883,7 +4898,7 @@ fn zirAsm( }; }; - const args = try sema.arena.alloc(Air.Inst.Index, inputs_len); + const args = try sema.arena.alloc(Air.Inst.Ref, inputs_len); const inputs = try sema.arena.alloc([]const u8, inputs_len); for (args) |*arg, arg_i| { @@ -4904,22 +4919,19 @@ fn zirAsm( } try sema.requireRuntimeBlock(block, src); - const asm_air = try sema.arena.create(Inst.Assembly); - asm_air.* = .{ - .base = .{ - .tag = .assembly, - .ty = if (output) |o| o.ty else Type.initTag(.void), - .src = src, - }, - .asm_source = asm_source, - .is_volatile = is_volatile, - .output_constraint = if (output) |o| o.constraint else null, - .inputs = inputs, - .clobbers = clobbers, - .args = args, - }; - try block.instructions.append(sema.gpa, &asm_air.base); - return &asm_air.base; + const gpa = sema.gpa; + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Asm).Struct.fields.len + args.len); + const asm_air = try block.addInst(.{ + .tag = .assembly, + .data = .{ .ty_pl = .{ + .ty = if (output) |o| try sema.addType(o.ty) else Air.Inst.Ref.void_type, + .payload = sema.addExtraAssumeCapacity(Air.Asm{ + .zir_index = inst, + }), + } }, + }); + sema.appendRefsAssumeCapacity(args); + return asm_air; } fn zirCmp( @@ -5153,7 +5165,7 @@ fn zirTypeofElem(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compile _ = block; const inst_data = sema.code.instructions.items(.data)[inst].un_node; const operand_ptr = sema.resolveInst(inst_data.operand); - const elem_ty = operand_ptr.ty.elemType(); + const elem_ty = sema.typeOf(operand_ptr).elemType(); return sema.addType(elem_ty); } @@ -5181,7 +5193,7 @@ fn zirTypeofPeer( const src: LazySrcLoc = .{ .node_offset = extra.data.src_node }; const args = sema.code.refSlice(extra.end, extended.small); - const inst_list = try sema.gpa.alloc(Air.Inst.Index, args.len); + const inst_list = try sema.gpa.alloc(Air.Inst.Ref, args.len); defer sema.gpa.free(inst_list); for (args) |arg_ref, i| { @@ -5623,7 +5635,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: mem.set(Zir.Inst.Index, found_fields, 0); // The init values to use for the struct instance. - const field_inits = try gpa.alloc(Air.Inst.Index, struct_obj.fields.count()); + const field_inits = try gpa.alloc(Air.Inst.Ref, struct_obj.fields.count()); defer gpa.free(field_inits); var field_i: u32 = 0; @@ -5692,7 +5704,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: } const is_comptime = for (field_inits) |field_init| { - if (field_init.value() == null) { + if (!(try sema.isComptimeKnown(block, src, field_init))) { break false; } } else true; @@ -5700,7 +5712,7 @@ fn zirStructInit(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index, is_ref: if (is_comptime) { const values = try sema.arena.alloc(Value, field_inits.len); for (field_inits) |field_init, i| { - values[i] = field_init.value().?; + values[i] = (sema.resolvePossiblyUndefinedValue(block, src, field_init) catch unreachable).?; } return sema.addConstant(struct_ty, try Value.Tag.@"struct".create(sema.arena, values.ptr)); } @@ -6835,17 +6847,13 @@ fn coerce( } assert(inst_ty.zigTypeTag() != .Undefined); - if (true) { - @panic("TODO finish AIR memory layout rework"); - } - // T to E!T or E to E!T if (dest_type.tag() == .error_union) { return try sema.wrapErrorUnion(block, dest_type, inst, inst_src); } // comptime known number to other number - if (try sema.coerceNum(block, dest_type, inst)) |some| + if (try sema.coerceNum(block, dest_type, inst, inst_src)) |some| return some; const target = mod.getTarget(); @@ -6861,9 +6869,9 @@ fn coerce( var buf: Type.Payload.ElemType = undefined; const child_type = dest_type.optionalChild(&buf); if (child_type.eql(inst_ty)) { - return sema.wrapOptional(block, dest_type, inst); - } else if (try sema.coerceNum(block, child_type, inst)) |some| { - return sema.wrapOptional(block, dest_type, some); + return sema.wrapOptional(block, dest_type, inst, inst_src); + } else if (try sema.coerceNum(block, child_type, inst, inst_src)) |some| { + return sema.wrapOptional(block, dest_type, some, inst_src); } }, .Pointer => { @@ -6885,11 +6893,11 @@ fn coerce( switch (dest_type.ptrSize()) { .Slice => { // *[N]T to []T - return sema.coerceArrayPtrToSlice(block, dest_type, inst); + return sema.coerceArrayPtrToSlice(block, dest_type, inst, inst_src); }, .C => { // *[N]T to [*c]T - return sema.coerceArrayPtrToMany(block, dest_type, inst); + return sema.coerceArrayPtrToMany(block, dest_type, inst, inst_src); }, .Many => { // *[N]T to [*]T @@ -6897,12 +6905,12 @@ fn coerce( const src_sentinel = array_type.sentinel(); const dst_sentinel = dest_type.sentinel(); if (src_sentinel == null and dst_sentinel == null) - return sema.coerceArrayPtrToMany(block, dest_type, inst); + return sema.coerceArrayPtrToMany(block, dest_type, inst, inst_src); if (src_sentinel) |src_s| { if (dst_sentinel) |dst_s| { if (src_s.eql(dst_s)) { - return sema.coerceArrayPtrToMany(block, dest_type, inst); + return sema.coerceArrayPtrToMany(block, dest_type, inst, inst_src); } } } @@ -6914,7 +6922,7 @@ fn coerce( .Int => { // integer widening if (inst_ty.zigTypeTag() == .Int) { - assert(inst.value() == null); // handled above + assert(!(try sema.isComptimeKnown(block, inst_src, inst))); // handled above const dst_info = dest_type.intInfo(target); const src_info = inst_ty.intInfo(target); @@ -6930,7 +6938,7 @@ fn coerce( .Float => { // float widening if (inst_ty.zigTypeTag() == .Float) { - assert(inst.value() == null); // handled above + assert(!(try sema.isComptimeKnown(block, inst_src, inst))); // handled above const src_bits = inst_ty.floatBits(target); const dst_bits = dest_type.floatBits(target); @@ -6991,9 +6999,16 @@ fn coerceInMemoryAllowed(dest_type: Type, src_type: Type) InMemoryCoercionResult return .no_match; } -fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) CompileError!?Air.Inst.Index { - const val = inst.value() orelse return null; - const src_zig_tag = inst.ty.zigTypeTag(); +fn coerceNum( + sema: *Sema, + block: *Scope.Block, + dest_type: Type, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) CompileError!?Air.Inst.Ref { + const val = (try sema.resolveDefinedValue(block, inst_src, inst)) orelse return null; + const inst_ty = sema.typeOf(inst); + const src_zig_tag = inst_ty.zigTypeTag(); const dst_zig_tag = dest_type.zigTypeTag(); const target = sema.mod.getTarget(); @@ -7001,29 +7016,29 @@ fn coerceNum(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.R if (dst_zig_tag == .ComptimeInt or dst_zig_tag == .Int) { if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) { if (val.floatHasFraction()) { - return sema.mod.fail(&block.base, inst.src, "fractional component prevents float value {} from being casted to type '{}'", .{ val, inst.ty }); + return sema.mod.fail(&block.base, inst_src, "fractional component prevents float value {} from being casted to type '{}'", .{ val, inst_ty }); } - return sema.mod.fail(&block.base, inst.src, "TODO float to int", .{}); + return sema.mod.fail(&block.base, inst_src, "TODO float to int", .{}); } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) { if (!val.intFitsInType(dest_type, target)) { - return sema.mod.fail(&block.base, inst.src, "type {} cannot represent integer value {}", .{ inst.ty, val }); + return sema.mod.fail(&block.base, inst_src, "type {} cannot represent integer value {}", .{ inst_ty, val }); } - return sema.addConstant(dest_type, val); + return try sema.addConstant(dest_type, val); } } else if (dst_zig_tag == .ComptimeFloat or dst_zig_tag == .Float) { if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) { const res = val.floatCast(sema.arena, dest_type, target) catch |err| switch (err) { error.Overflow => return sema.mod.fail( &block.base, - inst.src, + inst_src, "cast of value {} to type '{}' loses information", .{ val, dest_type }, ), error.OutOfMemory => return error.OutOfMemory, }; - return sema.addConstant(dest_type, res); + return try sema.addConstant(dest_type, res); } else if (src_zig_tag == .Int or src_zig_tag == .ComptimeInt) { - return sema.mod.fail(&block.base, inst.src, "TODO int to float", .{}); + return sema.mod.fail(&block.base, inst_src, "TODO int to float", .{}); } } return null; @@ -7051,10 +7066,11 @@ fn storePtr( ptr: Air.Inst.Ref, uncasted_value: Air.Inst.Ref, ) !void { - if (ptr.ty.isConstPtr()) + const ptr_ty = sema.typeOf(ptr); + if (ptr_ty.isConstPtr()) return sema.mod.fail(&block.base, src, "cannot assign to constant", .{}); - const elem_ty = ptr.ty.elemType(); + const elem_ty = ptr_ty.elemType(); const value = try sema.coerce(block, elem_ty, uncasted_value, src); if ((try sema.typeHasOnePossibleValue(block, src, elem_ty)) != null) return; @@ -7113,20 +7129,32 @@ fn bitcast( return block.addTyOp(.bitcast, dest_type, inst); } -fn coerceArrayPtrToSlice(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) CompileError!Air.Inst.Ref { - if (inst.value()) |val| { +fn coerceArrayPtrToSlice( + sema: *Sema, + block: *Scope.Block, + dest_type: Type, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) CompileError!Air.Inst.Ref { + if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| { // The comptime Value representation is compatible with both types. return sema.addConstant(dest_type, val); } - return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{}); + return sema.mod.fail(&block.base, inst_src, "TODO implement coerceArrayPtrToSlice runtime instruction", .{}); } -fn coerceArrayPtrToMany(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Ref { - if (inst.value()) |val| { +fn coerceArrayPtrToMany( + sema: *Sema, + block: *Scope.Block, + dest_type: Type, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) !Air.Inst.Ref { + if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| { // The comptime Value representation is compatible with both types. return sema.addConstant(dest_type, val); } - return sema.mod.fail(&block.base, inst.src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); + return sema.mod.fail(&block.base, inst_src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) CompileError!Air.Inst.Ref { @@ -7531,12 +7559,18 @@ fn cmpNumeric( return block.addBinOp(Air.Inst.Tag.fromCmpOp(op), casted_lhs, casted_rhs); } -fn wrapOptional(sema: *Sema, block: *Scope.Block, dest_type: Type, inst: Air.Inst.Ref) !Air.Inst.Index { +fn wrapOptional( + sema: *Sema, + block: *Scope.Block, + dest_type: Type, + inst: Air.Inst.Ref, + inst_src: LazySrcLoc, +) !Air.Inst.Ref { if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { return sema.addConstant(dest_type, val); } - try sema.requireRuntimeBlock(block, inst.src); + try sema.requireRuntimeBlock(block, inst_src); return block.addTyOp(.wrap_optional, dest_type, inst); } @@ -7546,11 +7580,12 @@ fn wrapErrorUnion( dest_type: Type, inst: Air.Inst.Ref, inst_src: LazySrcLoc, -) !Air.Inst.Index { +) !Air.Inst.Ref { + const inst_ty = sema.typeOf(inst); const err_union = dest_type.castTag(.error_union).?; if (try sema.resolvePossiblyUndefinedValue(block, inst_src, inst)) |val| { - if (inst.ty.zigTypeTag() != .ErrorSet) { - _ = try sema.coerce(block, err_union.data.payload, inst, inst.src); + if (inst_ty.zigTypeTag() != .ErrorSet) { + _ = try sema.coerce(block, err_union.data.payload, inst, inst_src); } else switch (err_union.data.error_set.tag()) { .anyerror => {}, .error_set_single => { @@ -7559,9 +7594,9 @@ fn wrapErrorUnion( if (!mem.eql(u8, expected_name, n)) { return sema.mod.fail( &block.base, - inst.src, + inst_src, "expected type '{}', found type '{}'", - .{ err_union.data.error_set, inst.ty }, + .{ err_union.data.error_set, inst_ty }, ); } }, @@ -7577,9 +7612,9 @@ fn wrapErrorUnion( if (!found) { return sema.mod.fail( &block.base, - inst.src, + inst_src, "expected type '{}', found type '{}'", - .{ err_union.data.error_set, inst.ty }, + .{ err_union.data.error_set, inst_ty }, ); } }, @@ -7589,9 +7624,9 @@ fn wrapErrorUnion( if (!map.contains(expected_name)) { return sema.mod.fail( &block.base, - inst.src, + inst_src, "expected type '{}', found type '{}'", - .{ err_union.data.error_set, inst.ty }, + .{ err_union.data.error_set, inst_ty }, ); } }, @@ -7602,14 +7637,14 @@ fn wrapErrorUnion( return sema.addConstant(dest_type, try Value.Tag.error_union.create(sema.arena, val)); } - try sema.requireRuntimeBlock(block, inst.src); + try sema.requireRuntimeBlock(block, inst_src); // we are coercing from E to E!T - if (inst.ty.zigTypeTag() == .ErrorSet) { - var coerced = try sema.coerce(block, err_union.data.error_set, inst, inst.src); + if (inst_ty.zigTypeTag() == .ErrorSet) { + var coerced = try sema.coerce(block, err_union.data.error_set, inst, inst_src); return block.addTyOp(.wrap_errunion_err, dest_type, coerced); } else { - var coerced = try sema.coerce(block, err_union.data.payload, inst, inst.src); + var coerced = try sema.coerce(block, err_union.data.payload, inst, inst_src); return block.addTyOp(.wrap_errunion_payload, dest_type, coerced); } } @@ -8081,7 +8116,10 @@ fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { .ptrtoint => return Type.initTag(.usize), - .call => @panic("TODO Sema.typeOf call"), + .call => { + const callee_ty = sema.typeOf(air_datas[i].pl_op.operand); + return callee_ty.fnReturnType(); + }, } } @@ -8219,3 +8257,12 @@ fn getBreakBlock(sema: *Sema, inst_index: Air.Inst.Index) ?Air.Inst.Index { else => return null, } } + +fn isComptimeKnown( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + inst: Air.Inst.Ref, +) !bool { + return (try sema.resolvePossiblyUndefinedValue(block, src, inst)) != null; +} diff --git a/src/value.zig b/src/value.zig index 0f7194d8c1..df3a97b09a 100644 --- a/src/value.zig +++ b/src/value.zig @@ -1700,7 +1700,7 @@ pub const Value = extern union { /// peer type resolution. This is stored in a separate list so that /// the items are contiguous in memory and thus can be passed to /// `Module.resolvePeerTypes`. - stored_inst_list: std.ArrayListUnmanaged(Air.Inst.Index) = .{}, + stored_inst_list: std.ArrayListUnmanaged(Air.Inst.Ref) = .{}, }, }; From eadbee2041bba1cd03b24d8f30161025af8e3590 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 15 Jul 2021 15:52:06 -0700 Subject: [PATCH 21/53] stage2: first pass at printing AIR/Liveness to text * some instructions are not implemented yet * fix off-by-1 in Air.getMainBody * Compilation: use `@import("builtin")` rather than `std.builtin` for the values that are different for different build configurations. * Sema: avoid calling `addType` in between air_instructions.ensureUnusedCapacity and corresponding appendAssumeCapacity because it can possibly add an instruction. * Value: functions print their names --- BRANCH_TODO | 566 -------------------------------------------- src/Air.zig | 4 +- src/Compilation.zig | 9 +- src/Module.zig | 3 +- src/Sema.zig | 3 +- src/print_air.zig | 294 +++++++++++++++++++++++ src/value.zig | 2 +- 7 files changed, 307 insertions(+), 574 deletions(-) delete mode 100644 BRANCH_TODO create mode 100644 src/print_air.zig diff --git a/BRANCH_TODO b/BRANCH_TODO deleted file mode 100644 index 9055cda307..0000000000 --- a/BRANCH_TODO +++ /dev/null @@ -1,566 +0,0 @@ - * be sure to test debug info of parameters - - - pub fn specialOperandDeaths(self: Inst) bool { - return (self.deaths & (1 << deaths_bits)) != 0; - } - - /// Returns `null` if runtime-known. - /// Should be called by codegen, not by Sema. Sema functions should call - /// `resolvePossiblyUndefinedValue` or `resolveDefinedValue` instead. - /// TODO audit Sema code for violations to the above guidance. - pub fn value(base: *Inst) ?Value { - if (base.ty.onePossibleValue()) |opv| return opv; - - const inst = base.castTag(.constant) orelse return null; - return inst.val; - } - - - -/// For debugging purposes, prints a function representation to stderr. -pub fn dumpFn(old_module: Module, module_fn: *Module.Fn) void { - const allocator = old_module.gpa; - var ctx: DumpAir = .{ - .allocator = allocator, - .arena = std.heap.ArenaAllocator.init(allocator), - .old_module = &old_module, - .module_fn = module_fn, - .indent = 2, - .inst_table = DumpAir.InstTable.init(allocator), - .partial_inst_table = DumpAir.InstTable.init(allocator), - .const_table = DumpAir.InstTable.init(allocator), - }; - defer ctx.inst_table.deinit(); - defer ctx.partial_inst_table.deinit(); - defer ctx.const_table.deinit(); - defer ctx.arena.deinit(); - - switch (module_fn.state) { - .queued => std.debug.print("(queued)", .{}), - .inline_only => std.debug.print("(inline_only)", .{}), - .in_progress => std.debug.print("(in_progress)", .{}), - .sema_failure => std.debug.print("(sema_failure)", .{}), - .dependency_failure => std.debug.print("(dependency_failure)", .{}), - .success => { - const writer = std.io.getStdErr().writer(); - ctx.dump(module_fn.body, writer) catch @panic("failed to dump AIR"); - }, - } -} - -const DumpAir = struct { - allocator: *std.mem.Allocator, - arena: std.heap.ArenaAllocator, - old_module: *const Module, - module_fn: *Module.Fn, - indent: usize, - inst_table: InstTable, - partial_inst_table: InstTable, - const_table: InstTable, - next_index: usize = 0, - next_partial_index: usize = 0, - next_const_index: usize = 0, - - const InstTable = std.AutoArrayHashMap(*Inst, usize); - - /// TODO: Improve this code to include a stack of Body and store the instructions - /// in there. Now we are putting all the instructions in a function local table, - /// however instructions that are in a Body can be thown away when the Body ends. - fn dump(dtz: *DumpAir, body: Body, writer: std.fs.File.Writer) !void { - // First pass to pre-populate the table so that we can show even invalid references. - // Must iterate the same order we iterate the second time. - // We also look for constants and put them in the const_table. - try dtz.fetchInstsAndResolveConsts(body); - - std.debug.print("Module.Function(name={s}):\n", .{dtz.module_fn.owner_decl.name}); - - var it = dtz.const_table.iterator(); - while (it.next()) |entry| { - const constant = entry.key_ptr.*.castTag(.constant).?; - try writer.print(" @{d}: {} = {};\n", .{ - entry.value_ptr.*, constant.base.ty, constant.val, - }); - } - - return dtz.dumpBody(body, writer); - } - - fn fetchInstsAndResolveConsts(dtz: *DumpAir, body: Body) error{OutOfMemory}!void { - for (body.instructions) |inst| { - try dtz.inst_table.put(inst, dtz.next_index); - dtz.next_index += 1; - switch (inst.tag) { - .alloc, - .retvoid, - .unreach, - .breakpoint, - .dbg_stmt, - .arg, - => {}, - - .ref, - .ret, - .bitcast, - .not, - .is_non_null, - .is_non_null_ptr, - .is_null, - .is_null_ptr, - .is_err, - .is_non_err, - .is_err_ptr, - .is_non_err_ptr, - .ptrtoint, - .floatcast, - .intcast, - .load, - .optional_payload, - .optional_payload_ptr, - .wrap_optional, - .wrap_errunion_payload, - .wrap_errunion_err, - .unwrap_errunion_payload, - .unwrap_errunion_err, - .unwrap_errunion_payload_ptr, - .unwrap_errunion_err_ptr, - => { - const un_op = inst.cast(Inst.UnOp).?; - try dtz.findConst(un_op.operand); - }, - - .add, - .addwrap, - .sub, - .subwrap, - .mul, - .mulwrap, - .div, - .cmp_lt, - .cmp_lte, - .cmp_eq, - .cmp_gte, - .cmp_gt, - .cmp_neq, - .store, - .bool_and, - .bool_or, - .bit_and, - .bit_or, - .xor, - => { - const bin_op = inst.cast(Inst.BinOp).?; - try dtz.findConst(bin_op.lhs); - try dtz.findConst(bin_op.rhs); - }, - - .br => { - const br = inst.castTag(.br).?; - try dtz.findConst(&br.block.base); - try dtz.findConst(br.operand); - }, - - .br_block_flat => { - const br_block_flat = inst.castTag(.br_block_flat).?; - try dtz.findConst(&br_block_flat.block.base); - try dtz.fetchInstsAndResolveConsts(br_block_flat.body); - }, - - .br_void => { - const br_void = inst.castTag(.br_void).?; - try dtz.findConst(&br_void.block.base); - }, - - .block => { - const block = inst.castTag(.block).?; - try dtz.fetchInstsAndResolveConsts(block.body); - }, - - .condbr => { - const condbr = inst.castTag(.condbr).?; - try dtz.findConst(condbr.condition); - try dtz.fetchInstsAndResolveConsts(condbr.then_body); - try dtz.fetchInstsAndResolveConsts(condbr.else_body); - }, - .switchbr => { - const switchbr = inst.castTag(.switchbr).?; - try dtz.findConst(switchbr.target); - try dtz.fetchInstsAndResolveConsts(switchbr.else_body); - for (switchbr.cases) |case| { - try dtz.fetchInstsAndResolveConsts(case.body); - } - }, - - .loop => { - const loop = inst.castTag(.loop).?; - try dtz.fetchInstsAndResolveConsts(loop.body); - }, - .call => { - const call = inst.castTag(.call).?; - try dtz.findConst(call.func); - for (call.args) |arg| { - try dtz.findConst(arg); - } - }, - .struct_field_ptr => { - const struct_field_ptr = inst.castTag(.struct_field_ptr).?; - try dtz.findConst(struct_field_ptr.struct_ptr); - }, - - // TODO fill out this debug printing - .assembly, - .constant, - .varptr, - => {}, - } - } - } - - fn dumpBody(dtz: *DumpAir, body: Body, writer: std.fs.File.Writer) (std.fs.File.WriteError || error{OutOfMemory})!void { - for (body.instructions) |inst| { - const my_index = dtz.next_partial_index; - try dtz.partial_inst_table.put(inst, my_index); - dtz.next_partial_index += 1; - - try writer.writeByteNTimes(' ', dtz.indent); - try writer.print("%{d}: {} = {s}(", .{ - my_index, inst.ty, @tagName(inst.tag), - }); - switch (inst.tag) { - .alloc, - .retvoid, - .unreach, - .breakpoint, - .dbg_stmt, - => try writer.writeAll(")\n"), - - .ref, - .ret, - .bitcast, - .not, - .is_non_null, - .is_non_null_ptr, - .is_null, - .is_null_ptr, - .is_err, - .is_err_ptr, - .is_non_err, - .is_non_err_ptr, - .ptrtoint, - .floatcast, - .intcast, - .load, - .optional_payload, - .optional_payload_ptr, - .wrap_optional, - .wrap_errunion_err, - .wrap_errunion_payload, - .unwrap_errunion_err, - .unwrap_errunion_payload, - .unwrap_errunion_payload_ptr, - .unwrap_errunion_err_ptr, - => { - const un_op = inst.cast(Inst.UnOp).?; - const kinky = try dtz.writeInst(writer, un_op.operand); - if (kinky != null) { - try writer.writeAll(") // Instruction does not dominate all uses!\n"); - } else { - try writer.writeAll(")\n"); - } - }, - - .add, - .addwrap, - .sub, - .subwrap, - .mul, - .mulwrap, - .div, - .cmp_lt, - .cmp_lte, - .cmp_eq, - .cmp_gte, - .cmp_gt, - .cmp_neq, - .store, - .bool_and, - .bool_or, - .bit_and, - .bit_or, - .xor, - => { - const bin_op = inst.cast(Inst.BinOp).?; - - const lhs_kinky = try dtz.writeInst(writer, bin_op.lhs); - try writer.writeAll(", "); - const rhs_kinky = try dtz.writeInst(writer, bin_op.rhs); - - if (lhs_kinky != null or rhs_kinky != null) { - try writer.writeAll(") // Instruction does not dominate all uses!"); - if (lhs_kinky) |lhs| { - try writer.print(" %{d}", .{lhs}); - } - if (rhs_kinky) |rhs| { - try writer.print(" %{d}", .{rhs}); - } - try writer.writeAll("\n"); - } else { - try writer.writeAll(")\n"); - } - }, - - .arg => { - const arg = inst.castTag(.arg).?; - try writer.print("{s})\n", .{arg.name}); - }, - - .br => { - const br = inst.castTag(.br).?; - - const lhs_kinky = try dtz.writeInst(writer, &br.block.base); - try writer.writeAll(", "); - const rhs_kinky = try dtz.writeInst(writer, br.operand); - - if (lhs_kinky != null or rhs_kinky != null) { - try writer.writeAll(") // Instruction does not dominate all uses!"); - if (lhs_kinky) |lhs| { - try writer.print(" %{d}", .{lhs}); - } - if (rhs_kinky) |rhs| { - try writer.print(" %{d}", .{rhs}); - } - try writer.writeAll("\n"); - } else { - try writer.writeAll(")\n"); - } - }, - - .br_block_flat => { - const br_block_flat = inst.castTag(.br_block_flat).?; - const block_kinky = try dtz.writeInst(writer, &br_block_flat.block.base); - if (block_kinky != null) { - try writer.writeAll(", { // Instruction does not dominate all uses!\n"); - } else { - try writer.writeAll(", {\n"); - } - - const old_indent = dtz.indent; - dtz.indent += 2; - try dtz.dumpBody(br_block_flat.body, writer); - dtz.indent = old_indent; - - try writer.writeByteNTimes(' ', dtz.indent); - try writer.writeAll("})\n"); - }, - - .br_void => { - const br_void = inst.castTag(.br_void).?; - const kinky = try dtz.writeInst(writer, &br_void.block.base); - if (kinky) |_| { - try writer.writeAll(") // Instruction does not dominate all uses!\n"); - } else { - try writer.writeAll(")\n"); - } - }, - - .block => { - const block = inst.castTag(.block).?; - - try writer.writeAll("{\n"); - - const old_indent = dtz.indent; - dtz.indent += 2; - try dtz.dumpBody(block.body, writer); - dtz.indent = old_indent; - - try writer.writeByteNTimes(' ', dtz.indent); - try writer.writeAll("})\n"); - }, - - .condbr => { - const condbr = inst.castTag(.condbr).?; - - const condition_kinky = try dtz.writeInst(writer, condbr.condition); - if (condition_kinky != null) { - try writer.writeAll(", { // Instruction does not dominate all uses!\n"); - } else { - try writer.writeAll(", {\n"); - } - - const old_indent = dtz.indent; - dtz.indent += 2; - try dtz.dumpBody(condbr.then_body, writer); - - try writer.writeByteNTimes(' ', old_indent); - try writer.writeAll("}, {\n"); - - try dtz.dumpBody(condbr.else_body, writer); - dtz.indent = old_indent; - - try writer.writeByteNTimes(' ', old_indent); - try writer.writeAll("})\n"); - }, - - .switchbr => { - const switchbr = inst.castTag(.switchbr).?; - - const condition_kinky = try dtz.writeInst(writer, switchbr.target); - if (condition_kinky != null) { - try writer.writeAll(", { // Instruction does not dominate all uses!\n"); - } else { - try writer.writeAll(", {\n"); - } - const old_indent = dtz.indent; - - if (switchbr.else_body.instructions.len != 0) { - dtz.indent += 2; - try dtz.dumpBody(switchbr.else_body, writer); - - try writer.writeByteNTimes(' ', old_indent); - try writer.writeAll("}, {\n"); - dtz.indent = old_indent; - } - for (switchbr.cases) |case| { - dtz.indent += 2; - try dtz.dumpBody(case.body, writer); - - try writer.writeByteNTimes(' ', old_indent); - try writer.writeAll("}, {\n"); - dtz.indent = old_indent; - } - - try writer.writeByteNTimes(' ', old_indent); - try writer.writeAll("})\n"); - }, - - .loop => { - const loop = inst.castTag(.loop).?; - - try writer.writeAll("{\n"); - - const old_indent = dtz.indent; - dtz.indent += 2; - try dtz.dumpBody(loop.body, writer); - dtz.indent = old_indent; - - try writer.writeByteNTimes(' ', dtz.indent); - try writer.writeAll("})\n"); - }, - - .call => { - const call = inst.castTag(.call).?; - - const args_kinky = try dtz.allocator.alloc(?usize, call.args.len); - defer dtz.allocator.free(args_kinky); - std.mem.set(?usize, args_kinky, null); - var any_kinky_args = false; - - const func_kinky = try dtz.writeInst(writer, call.func); - - for (call.args) |arg, i| { - try writer.writeAll(", "); - - args_kinky[i] = try dtz.writeInst(writer, arg); - any_kinky_args = any_kinky_args or args_kinky[i] != null; - } - - if (func_kinky != null or any_kinky_args) { - try writer.writeAll(") // Instruction does not dominate all uses!"); - if (func_kinky) |func_index| { - try writer.print(" %{d}", .{func_index}); - } - for (args_kinky) |arg_kinky| { - if (arg_kinky) |arg_index| { - try writer.print(" %{d}", .{arg_index}); - } - } - try writer.writeAll("\n"); - } else { - try writer.writeAll(")\n"); - } - }, - - .struct_field_ptr => { - const struct_field_ptr = inst.castTag(.struct_field_ptr).?; - const kinky = try dtz.writeInst(writer, struct_field_ptr.struct_ptr); - if (kinky != null) { - try writer.print("{d}) // Instruction does not dominate all uses!\n", .{ - struct_field_ptr.field_index, - }); - } else { - try writer.print("{d})\n", .{struct_field_ptr.field_index}); - } - }, - - // TODO fill out this debug printing - .assembly, - .constant, - .varptr, - => { - try writer.writeAll("!TODO!)\n"); - }, - } - } - } - - fn writeInst(dtz: *DumpAir, writer: std.fs.File.Writer, inst: *Inst) !?usize { - if (dtz.partial_inst_table.get(inst)) |operand_index| { - try writer.print("%{d}", .{operand_index}); - return null; - } else if (dtz.const_table.get(inst)) |operand_index| { - try writer.print("@{d}", .{operand_index}); - return null; - } else if (dtz.inst_table.get(inst)) |operand_index| { - try writer.print("%{d}", .{operand_index}); - return operand_index; - } else { - try writer.writeAll("!BADREF!"); - return null; - } - } - - fn findConst(dtz: *DumpAir, operand: *Inst) !void { - if (operand.tag == .constant) { - try dtz.const_table.put(operand, dtz.next_const_index); - dtz.next_const_index += 1; - } - } -}; - -pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { - const zir_module = scope.namespace(); - const source = zir_module.getSource(mod) catch @panic("dumpInst failed to get source"); - const loc = std.zig.findLineColumn(source, inst.src); - if (inst.tag == .constant) { - std.debug.print("constant ty={} val={} src={s}:{d}:{d}\n", .{ - inst.ty, - inst.castTag(.constant).?.val, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } else if (inst.deaths == 0) { - std.debug.print("{s} ty={} src={s}:{d}:{d}\n", .{ - @tagName(inst.tag), - inst.ty, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } else { - std.debug.print("{s} ty={} deaths={b} src={s}:{d}:{d}\n", .{ - @tagName(inst.tag), - inst.ty, - inst.deaths, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } -} - - /// For debugging purposes. - pub fn dump(func: *Fn, mod: Module) void { - ir.dumpFn(mod, func); - } - diff --git a/src/Air.zig b/src/Air.zig index 60e6e9933d..a8b38b7659 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -374,8 +374,8 @@ pub const Asm = struct { pub fn getMainBody(air: Air) []const Air.Inst.Index { const body_index = air.extra[@enumToInt(ExtraIndex.main_block)]; - const body_len = air.extra[body_index]; - return air.extra[body_index..][0..body_len]; + const extra = air.extraData(Block, body_index); + return air.extra[extra.end..][0..extra.data.body_len]; } pub fn getType(air: Air, inst: Air.Inst.Index) Type { diff --git a/src/Compilation.zig b/src/Compilation.zig index f241ae6b10..50d1f5760e 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -1,6 +1,7 @@ const Compilation = @This(); const std = @import("std"); +const builtin = @import("builtin"); const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; @@ -907,7 +908,7 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation { // comptime conditions ((build_options.have_llvm and comptime std.Target.current.isDarwin()) and // runtime conditions - (use_lld and std.builtin.os.tag == .macos and options.target.isDarwin())); + (use_lld and builtin.os.tag == .macos and options.target.isDarwin())); const sysroot = blk: { if (options.sysroot) |sysroot| { @@ -2026,8 +2027,10 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); defer liveness.deinit(gpa); - if (std.builtin.mode == .Debug and self.verbose_air) { - @panic("TODO implement dumping AIR and liveness"); + if (builtin.mode == .Debug and self.verbose_air) { + std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); + @import("print_air.zig").dump(gpa, air, liveness); + std.debug.print("# End Function AIR: {s}:\n", .{decl.name}); } assert(decl.ty.hasCodeGenBits()); diff --git a/src/Module.zig b/src/Module.zig index fb514ccbd2..f452824d33 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -3551,7 +3551,8 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) SemaError!Air { try sema.analyzeFnBody(&inner_block, func.zir_body_inst); // Copy the block into place and mark that as the main block. - try sema.air_extra.ensureUnusedCapacity(gpa, inner_block.instructions.items.len + 1); + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + + inner_block.instructions.items.len); const main_block_index = sema.addExtraAssumeCapacity(Air.Block{ .body_len = @intCast(u32, inner_block.instructions.items.len), }); diff --git a/src/Sema.zig b/src/Sema.zig index ac6755d24e..a144ce1d50 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2028,6 +2028,7 @@ fn analyzeBlockBody( refToIndex(coerced_operand).?); // Convert the br operand to a block. + const br_operand_ty_ref = try sema.addType(br_operand_ty); try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.Block).Struct.fields.len + coerce_block.instructions.items.len); try sema.air_instructions.ensureUnusedCapacity(gpa, 2); @@ -2037,7 +2038,7 @@ fn analyzeBlockBody( sema.air_instructions.appendAssumeCapacity(.{ .tag = .block, .data = .{ .ty_pl = .{ - .ty = try sema.addType(br_operand_ty), + .ty = br_operand_ty_ref, .payload = sema.addExtraAssumeCapacity(Air.Block{ .body_len = @intCast(u32, coerce_block.instructions.items.len), }), diff --git a/src/print_air.zig b/src/print_air.zig new file mode 100644 index 0000000000..44c170a078 --- /dev/null +++ b/src/print_air.zig @@ -0,0 +1,294 @@ +const std = @import("std"); +const Allocator = std.mem.Allocator; +const fmtIntSizeBin = std.fmt.fmtIntSizeBin; + +const Module = @import("Module.zig"); +const Value = @import("value.zig").Value; +const Air = @import("Air.zig"); +const Liveness = @import("Liveness.zig"); + +pub fn dump(gpa: *Allocator, air: Air, liveness: Liveness) void { + const instruction_bytes = air.instructions.len * + // Here we don't use @sizeOf(Air.Inst.Data) because it would include + // the debug safety tag but we want to measure release size. + (@sizeOf(Air.Inst.Tag) + 8); + const extra_bytes = air.extra.len * @sizeOf(u32); + const values_bytes = air.values.len * @sizeOf(Value); + const variables_bytes = air.variables.len * @sizeOf(*Module.Var); + const tomb_bytes = liveness.tomb_bits.len * @sizeOf(usize); + const liveness_extra_bytes = liveness.extra.len * @sizeOf(u32); + const liveness_special_bytes = liveness.special.count() * 8; + const total_bytes = @sizeOf(Air) + instruction_bytes + extra_bytes + + values_bytes * variables_bytes + @sizeOf(Liveness) + liveness_extra_bytes + + liveness_special_bytes + tomb_bytes; + + // zig fmt: off + std.debug.print( + \\# Total AIR+Liveness bytes: {} + \\# AIR Instructions: {d} ({}) + \\# AIR Extra Data: {d} ({}) + \\# AIR Values Bytes: {d} ({}) + \\# AIR Variables Bytes: {d} ({}) + \\# Liveness tomb_bits: {} + \\# Liveness Extra Data: {d} ({}) + \\# Liveness special table: {d} ({}) + \\ + , .{ + fmtIntSizeBin(total_bytes), + air.instructions.len, fmtIntSizeBin(instruction_bytes), + air.extra.len, fmtIntSizeBin(extra_bytes), + air.values.len, fmtIntSizeBin(values_bytes), + air.variables.len, fmtIntSizeBin(variables_bytes), + fmtIntSizeBin(tomb_bytes), + liveness.extra.len, fmtIntSizeBin(liveness_extra_bytes), + liveness.special.count(), fmtIntSizeBin(liveness_special_bytes), + }); + // zig fmt: on + var arena = std.heap.ArenaAllocator.init(gpa); + defer arena.deinit(); + + var writer: Writer = .{ + .gpa = gpa, + .arena = &arena.allocator, + .air = air, + .liveness = liveness, + .indent = 0, + }; + const stream = std.io.getStdErr().writer(); + writer.writeAllConstants(stream) catch return; + writer.writeBody(stream, air.getMainBody()) catch return; +} + +const Writer = struct { + gpa: *Allocator, + arena: *Allocator, + air: Air, + liveness: Liveness, + indent: usize, + + fn writeAllConstants(w: *Writer, s: anytype) @TypeOf(s).Error!void { + for (w.air.instructions.items(.tag)) |tag, i| { + const inst = @intCast(u32, i); + switch (tag) { + .constant, .const_ty => { + try s.writeByteNTimes(' ', w.indent); + try s.print("%{d} ", .{inst}); + try w.writeInst(s, inst); + try s.writeAll(")\n"); + }, + else => continue, + } + } + } + + fn writeBody(w: *Writer, s: anytype, body: []const Air.Inst.Index) @TypeOf(s).Error!void { + for (body) |inst| { + try s.writeByteNTimes(' ', w.indent); + try s.print("%{d} ", .{inst}); + try w.writeInst(s, inst); + if (w.liveness.isUnused(inst)) { + try s.writeAll(") unused\n"); + } else { + try s.writeAll("\n"); + } + } + } + + fn writeInst(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const tags = w.air.instructions.items(.tag); + const tag = tags[inst]; + try s.print("= {s}(", .{@tagName(tags[inst])}); + switch (tag) { + .arg => try w.writeTyStr(s, inst), + + .add, + .addwrap, + .sub, + .subwrap, + .mul, + .mulwrap, + .div, + .bit_and, + .bit_or, + .xor, + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .bool_and, + .bool_or, + .store, + => try w.writeBinOp(s, inst), + + .is_null, + .is_non_null, + .is_null_ptr, + .is_non_null_ptr, + .is_err, + .is_non_err, + .is_err_ptr, + .is_non_err_ptr, + .ptrtoint, + .ret, + => try w.writeUnOp(s, inst), + + .breakpoint, + .unreach, + => try w.writeNoOp(s, inst), + + .const_ty, + .alloc, + => try w.writeTy(s, inst), + + .not, + .bitcast, + .load, + .ref, + .floatcast, + .intcast, + .optional_payload, + .optional_payload_ptr, + .wrap_optional, + .unwrap_errunion_payload, + .unwrap_errunion_err, + .unwrap_errunion_payload_ptr, + .unwrap_errunion_err_ptr, + .wrap_errunion_payload, + .wrap_errunion_err, + => try w.writeTyOp(s, inst), + + .block, + .loop, + => try w.writeBlock(s, inst), + + .struct_field_ptr => try w.writeStructFieldPtr(s, inst), + .varptr => try w.writeVarPtr(s, inst), + .constant => try w.writeConstant(s, inst), + .assembly => try w.writeAssembly(s, inst), + .dbg_stmt => try w.writeDbgStmt(s, inst), + .call => try w.writeCall(s, inst), + .br => try w.writeBr(s, inst), + .cond_br => try w.writeCondBr(s, inst), + .switch_br => try w.writeSwitchBr(s, inst), + } + } + + fn writeTyStr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeBinOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeUnOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeNoOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeTy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const ty = w.air.instructions.items(.data)[inst].ty; + try s.print("{}", .{ty}); + } + + fn writeTyOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeBlock(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeStructFieldPtr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeVarPtr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeConstant(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; + const val = w.air.values[ty_pl.payload]; + try s.print("{}, {}", .{ ty_pl.ty, val }); + } + + fn writeAssembly(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeDbgStmt(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const dbg_stmt = w.air.instructions.items(.data)[inst].dbg_stmt; + try s.print("{d}:{d}", .{ dbg_stmt.line + 1, dbg_stmt.column + 1 }); + } + + fn writeCall(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + const pl_op = w.air.instructions.items(.data)[inst].pl_op; + const extra = w.air.extraData(Air.Call, pl_op.payload); + const args = w.air.extra[extra.end..][0..extra.data.args_len]; + try w.writeInstRef(s, pl_op.operand); + try s.writeAll(", ["); + for (args) |arg, i| { + if (i != 0) try s.writeAll(", "); + try w.writeInstRef(s, @intToEnum(Air.Inst.Ref, arg)); + } + try s.writeAll("]"); + } + + fn writeBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeCondBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeSwitchBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + _ = inst; + try s.writeAll("TODO"); + } + + fn writeInstRef(w: *Writer, s: anytype, inst: Air.Inst.Ref) @TypeOf(s).Error!void { + var i: usize = @enumToInt(inst); + + if (i < Air.Inst.Ref.typed_value_map.len) { + return s.print("@{}", .{inst}); + } + i -= Air.Inst.Ref.typed_value_map.len; + + return w.writeInstIndex(s, @intCast(Air.Inst.Index, i)); + } + + fn writeInstIndex(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + _ = w; + return s.print("%{d}", .{inst}); + } +}; diff --git a/src/value.zig b/src/value.zig index df3a97b09a..abb2ea7b1e 100644 --- a/src/value.zig +++ b/src/value.zig @@ -573,7 +573,7 @@ pub const Value = extern union { .int_i64 => return std.fmt.formatIntValue(val.castTag(.int_i64).?.data, "", options, out_stream), .int_big_positive => return out_stream.print("{}", .{val.castTag(.int_big_positive).?.asBigInt()}), .int_big_negative => return out_stream.print("{}", .{val.castTag(.int_big_negative).?.asBigInt()}), - .function => return out_stream.writeAll("(function)"), + .function => return out_stream.print("(function '{s}')", .{val.castTag(.function).?.data.owner_decl.name}), .extern_fn => return out_stream.writeAll("(extern function)"), .variable => return out_stream.writeAll("(variable)"), .ref_val => { From 8082660118bba78de00e1e103e53730a87b2b70f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Jul 2021 18:22:18 -0700 Subject: [PATCH 22/53] stage2: codegen.zig updated to new AIR memory layout --- src/Air.zig | 143 ++++- src/AstGen.zig | 77 +-- src/Liveness.zig | 54 +- src/Module.zig | 4 +- src/Sema.zig | 150 +----- src/Zir.zig | 6 +- src/codegen.zig | 1349 ++++++++++++++++++++++++---------------------- 7 files changed, 960 insertions(+), 823 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index a8b38b7659..f4c4fa4155 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -13,9 +13,9 @@ const Air = @This(); instructions: std.MultiArrayList(Inst).Slice, /// The meaning of this data is determined by `Inst.Tag` value. /// The first few indexes are reserved. See `ExtraIndex` for the values. -extra: []u32, -values: []Value, -variables: []*Module.Var, +extra: []const u32, +values: []const Value, +variables: []const *Module.Var, pub const ExtraIndex = enum(u32) { /// Payload index of the main `Block` in the `extra` array. @@ -378,22 +378,109 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[extra.end..][0..extra.data.body_len]; } -pub fn getType(air: Air, inst: Air.Inst.Index) Type { - _ = air; - _ = inst; - @panic("TODO Air getType"); +pub fn typeOf(air: Air, inst: Air.Inst.Ref) Type { + const ref_int = @enumToInt(inst); + if (ref_int < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[ref_int].ty; + } + return air.typeOfIndex(@intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len)); +} + +pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type { + const datas = air.instructions.items(.data); + switch (air.instructions.items(.tag)[inst]) { + .arg => return air.getRefType(datas[inst].ty_str.ty), + + .add, + .addwrap, + .sub, + .subwrap, + .mul, + .mulwrap, + .div, + .bit_and, + .bit_or, + .xor, + => return air.typeOf(datas[inst].bin_op.lhs), + + .cmp_lt, + .cmp_lte, + .cmp_eq, + .cmp_gte, + .cmp_gt, + .cmp_neq, + .is_null, + .is_non_null, + .is_null_ptr, + .is_non_null_ptr, + .is_err, + .is_non_err, + .is_err_ptr, + .is_non_err_ptr, + .bool_and, + .bool_or, + => return Type.initTag(.bool), + + .const_ty => return Type.initTag(.type), + + .alloc => return datas[inst].ty, + + .assembly, + .block, + .constant, + .varptr, + .struct_field_ptr, + => return air.getRefType(datas[inst].ty_pl.ty), + + .not, + .bitcast, + .load, + .ref, + .floatcast, + .intcast, + .optional_payload, + .optional_payload_ptr, + .wrap_optional, + .unwrap_errunion_payload, + .unwrap_errunion_err, + .unwrap_errunion_payload_ptr, + .unwrap_errunion_err_ptr, + .wrap_errunion_payload, + .wrap_errunion_err, + => return air.getRefType(datas[inst].ty_op.ty), + + .loop, + .br, + .cond_br, + .switch_br, + .ret, + .unreach, + => return Type.initTag(.noreturn), + + .breakpoint, + .dbg_stmt, + .store, + => return Type.initTag(.void), + + .ptrtoint => return Type.initTag(.usize), + + .call => { + const callee_ty = air.typeOf(datas[inst].pl_op.operand); + return callee_ty.fnReturnType(); + }, + } } pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type { - var i: usize = @enumToInt(ref); - if (i < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; + const ref_int = @enumToInt(ref); + if (ref_int < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[ref_int].val.toType(undefined) catch unreachable; } - i -= Air.Inst.Ref.typed_value_map.len; + const inst_index = ref_int - Air.Inst.Ref.typed_value_map.len; const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); - assert(air_tags[i] == .const_ty); - return air_datas[i].ty; + assert(air_tags[inst_index] == .const_ty); + return air_datas[inst_index].ty; } /// Returns the requested data, as well as the new index which is at the start of the @@ -424,3 +511,33 @@ pub fn deinit(air: *Air, gpa: *std.mem.Allocator) void { gpa.free(air.variables); air.* = undefined; } + +const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; + +pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { + return @intToEnum(Air.Inst.Ref, ref_start_index + inst); +} + +pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { + const ref_int = @enumToInt(inst); + if (ref_int >= ref_start_index) { + return ref_int - ref_start_index; + } else { + return null; + } +} + +/// Returns `null` if runtime-known. +pub fn value(air: Air, inst: Air.Inst.Ref) ?Value { + const ref_int = @enumToInt(inst); + if (ref_int < Air.Inst.Ref.typed_value_map.len) { + return Air.Inst.Ref.typed_value_map[ref_int].val; + } + const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + const air_datas = air.instructions.items(.data); + switch (air.instructions.items(.tag)[inst_index]) { + .constant => return air.values[air_datas[inst_index].ty_pl.payload], + .const_ty => unreachable, + else => return air.typeOfIndex(inst_index).onePossibleValue(), + } +} diff --git a/src/AstGen.zig b/src/AstGen.zig index 1b58b3f2f7..cbd918ecc7 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -6412,37 +6412,12 @@ fn multilineStringLiteral( node: ast.Node.Index, ) InnerError!Zir.Inst.Ref { const astgen = gz.astgen; - const tree = astgen.tree; - const node_datas = tree.nodes.items(.data); - - const start = node_datas[node].lhs; - const end = node_datas[node].rhs; - - const gpa = gz.astgen.gpa; - const string_bytes = &gz.astgen.string_bytes; - const str_index = string_bytes.items.len; - - // First line: do not append a newline. - var tok_i = start; - { - const slice = tree.tokenSlice(tok_i); - const line_bytes = slice[2 .. slice.len - 1]; - try string_bytes.appendSlice(gpa, line_bytes); - tok_i += 1; - } - // Following lines: each line prepends a newline. - while (tok_i <= end) : (tok_i += 1) { - const slice = tree.tokenSlice(tok_i); - const line_bytes = slice[2 .. slice.len - 1]; - try string_bytes.ensureCapacity(gpa, string_bytes.items.len + line_bytes.len + 1); - string_bytes.appendAssumeCapacity('\n'); - string_bytes.appendSliceAssumeCapacity(line_bytes); - } + const str = try astgen.strLitNodeAsString(node); const result = try gz.add(.{ .tag = .str, .data = .{ .str = .{ - .start = @intCast(u32, str_index), - .len = @intCast(u32, string_bytes.items.len - str_index), + .start = str.index, + .len = str.len, } }, }); return rvalue(gz, rl, result, node); @@ -6620,9 +6595,14 @@ fn asmExpr( const tree = astgen.tree; const main_tokens = tree.nodes.items(.main_token); const node_datas = tree.nodes.items(.data); + const node_tags = tree.nodes.items(.tag); const token_tags = tree.tokens.items(.tag); - const asm_source = try comptimeExpr(gz, scope, .{ .ty = .const_slice_u8_type }, full.ast.template); + const asm_source = switch (node_tags[full.ast.template]) { + .string_literal => try astgen.strLitAsString(main_tokens[full.ast.template]), + .multiline_string_literal => try astgen.strLitNodeAsString(full.ast.template), + else => return astgen.failNode(node, "assembly code must use string literal syntax", .{}), + }; // See https://github.com/ziglang/zig/issues/215 and related issues discussing // possible inline assembly improvements. Until then here is status quo AstGen @@ -6752,7 +6732,7 @@ fn asmExpr( const result = try gz.addAsm(.{ .node = node, - .asm_source = asm_source, + .asm_source = asm_source.index, .is_volatile = full.volatile_token != null, .output_type_bits = output_type_bits, .outputs = outputs, @@ -8579,6 +8559,41 @@ fn strLitAsString(astgen: *AstGen, str_lit_token: ast.TokenIndex) !IndexSlice { } } +fn strLitNodeAsString(astgen: *AstGen, node: ast.Node.Index) !IndexSlice { + const tree = astgen.tree; + const node_datas = tree.nodes.items(.data); + + const start = node_datas[node].lhs; + const end = node_datas[node].rhs; + + const gpa = astgen.gpa; + const string_bytes = &astgen.string_bytes; + const str_index = string_bytes.items.len; + + // First line: do not append a newline. + var tok_i = start; + { + const slice = tree.tokenSlice(tok_i); + const line_bytes = slice[2 .. slice.len - 1]; + try string_bytes.appendSlice(gpa, line_bytes); + tok_i += 1; + } + // Following lines: each line prepends a newline. + while (tok_i <= end) : (tok_i += 1) { + const slice = tree.tokenSlice(tok_i); + const line_bytes = slice[2 .. slice.len - 1]; + try string_bytes.ensureCapacity(gpa, string_bytes.items.len + line_bytes.len + 1); + string_bytes.appendAssumeCapacity('\n'); + string_bytes.appendSliceAssumeCapacity(line_bytes); + } + const len = string_bytes.items.len - str_index; + try string_bytes.append(gpa, 0); + return IndexSlice{ + .index = @intCast(u32, str_index), + .len = @intCast(u32, len), + }; +} + fn testNameString(astgen: *AstGen, str_lit_token: ast.TokenIndex) !u32 { const gpa = astgen.gpa; const string_bytes = &astgen.string_bytes; @@ -9440,7 +9455,7 @@ const GenZir = struct { args: struct { /// Absolute node index. This function does the conversion to offset from Decl. node: ast.Node.Index, - asm_source: Zir.Inst.Ref, + asm_source: u32, output_type_bits: u32, is_volatile: bool, outputs: []const Zir.Inst.Asm.Output, diff --git a/src/Liveness.zig b/src/Liveness.zig index 98af9eb429..79fc0d7325 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -21,7 +21,7 @@ const Log2Int = std.math.Log2Int; /// operand dies after this instruction. /// Instructions which need more data to track liveness have special handling via the /// `special` table. -tomb_bits: []const usize, +tomb_bits: []usize, /// Sparse table of specially handled instructions. The value is an index into the `extra` /// array. The meaning of the data depends on the AIR tag. special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), @@ -98,7 +98,7 @@ pub fn operandDies(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) bool return (l.tomb_bits[usize_index] & mask) != 0; } -pub fn clearOperandDeath(l: *Liveness, inst: Air.Inst.Index, operand: OperandInt) void { +pub fn clearOperandDeath(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) void { assert(operand < bpi - 1); const usize_index = (inst * bpi) / @bitSizeOf(usize); const mask = @as(usize, 1) << @@ -106,16 +106,40 @@ pub fn clearOperandDeath(l: *Liveness, inst: Air.Inst.Index, operand: OperandInt l.tomb_bits[usize_index] |= mask; } +/// Higher level API. +pub const CondBrSlices = struct { + then_deaths: []const Air.Inst.Index, + else_deaths: []const Air.Inst.Index, +}; + +pub fn getCondBr(l: Liveness, inst: Air.Inst.Index) CondBrSlices { + var index: usize = l.special.get(inst) orelse return .{ + .then_deaths = &.{}, + .else_deaths = &.{}, + }; + const then_death_count = l.extra[index]; + index += 1; + const else_death_count = l.extra[index]; + index += 1; + const then_deaths = l.extra[index..][0..then_death_count]; + index += then_death_count; + return .{ + .then_deaths = then_deaths, + .else_deaths = l.extra[index..][0..else_death_count], + }; +} + pub fn deinit(l: *Liveness, gpa: *Allocator) void { gpa.free(l.tomb_bits); gpa.free(l.extra); l.special.deinit(gpa); + l.* = undefined; } /// How many tomb bits per AIR instruction. -const bpi = 4; -const Bpi = std.meta.Int(.unsigned, bpi); -const OperandInt = std.math.Log2Int(Bpi); +pub const bpi = 4; +pub const Bpi = std.meta.Int(.unsigned, bpi); +pub const OperandInt = std.math.Log2Int(Bpi); /// In-progress data; on successful analysis converted into `Liveness`. const Analysis = struct { @@ -267,14 +291,14 @@ fn analyzeInst( const inst_data = inst_datas[inst].pl_op; const callee = inst_data.operand; const extra = a.air.extraData(Air.Call, inst_data.payload); - const args = a.air.extra[extra.end..][0..extra.data.args_len]; + const args = @bitCast([]const Air.Inst.Ref, a.air.extra[extra.end..][0..extra.data.args_len]); if (args.len <= bpi - 2) { - var buf: [bpi - 1]Air.Inst.Ref = undefined; + var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1); buf[0] = callee; - std.mem.copy(Air.Inst.Ref, buf[1..], @bitCast([]const Air.Inst.Ref, args)); + std.mem.copy(Air.Inst.Ref, buf[1..], args); return trackOperands(a, new_set, inst, main_tomb, buf); } - @panic("TODO: liveness analysis for function with greater than 2 args"); + @panic("TODO: liveness analysis for function call with greater than 2 args"); }, .struct_field_ptr => { const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data; @@ -285,12 +309,12 @@ fn analyzeInst( const extended = a.zir.instructions.items(.data)[extra.data.zir_index].extended; const outputs_len = @truncate(u5, extended.small); const inputs_len = @truncate(u5, extended.small >> 5); - const outputs = a.air.extra[extra.end..][0..outputs_len]; - const inputs = a.air.extra[extra.end + outputs.len ..][0..inputs_len]; - if (outputs.len + inputs.len <= bpi - 1) { - var buf: [bpi - 1]Air.Inst.Ref = undefined; - std.mem.copy(Air.Inst.Ref, &buf, @bitCast([]const Air.Inst.Ref, outputs)); - std.mem.copy(Air.Inst.Ref, buf[outputs.len..], @bitCast([]const Air.Inst.Ref, inputs)); + const outputs = @bitCast([]const Air.Inst.Ref, a.air.extra[extra.end..][0..outputs_len]); + const args = @bitCast([]const Air.Inst.Ref, a.air.extra[extra.end + outputs.len ..][0..inputs_len]); + if (outputs.len + args.len <= bpi - 1) { + var buf = [1]Air.Inst.Ref{.none} ** (bpi - 1); + std.mem.copy(Air.Inst.Ref, &buf, outputs); + std.mem.copy(Air.Inst.Ref, buf[outputs.len..], args); return trackOperands(a, new_set, inst, main_tomb, buf); } @panic("TODO: liveness analysis for asm with greater than 3 args"); diff --git a/src/Module.zig b/src/Module.zig index f452824d33..c101221f2e 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1309,7 +1309,7 @@ pub const Scope = struct { const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); sema.air_instructions.appendAssumeCapacity(inst); block.instructions.appendAssumeCapacity(result_index); - return Sema.indexToRef(result_index); + return Air.indexToRef(result_index); } }; }; @@ -3533,7 +3533,7 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) SemaError!Air { const ty_ref = try sema.addType(param_type); const arg_index = @intCast(u32, sema.air_instructions.len); inner_block.instructions.appendAssumeCapacity(arg_index); - param_inst.* = Sema.indexToRef(arg_index); + param_inst.* = Air.indexToRef(arg_index); try sema.air_instructions.append(gpa, .{ .tag = .arg, .data = .{ diff --git a/src/Sema.zig b/src/Sema.zig index a144ce1d50..777619dc48 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -1301,7 +1301,7 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!A // Set the name of the Air.Arg instruction for use by codegen debug info. const air_arg = sema.param_inst_list[arg_index]; - sema.air_instructions.items(.data)[refToIndex(air_arg).?].ty_str.str = inst_data.start; + sema.air_instructions.items(.data)[Air.refToIndex(air_arg).?].ty_str.str = inst_data.start; return air_arg; } @@ -1389,7 +1389,7 @@ fn zirAllocInferred( // to the block even though it is currently a `.constant`. const result = try sema.addConstant(inferred_alloc_ty, Value.initPayload(&val_payload.base)); try sema.requireFunctionBlock(block, src); - try block.instructions.append(sema.gpa, refToIndex(result).?); + try block.instructions.append(sema.gpa, Air.refToIndex(result).?); return result; } @@ -1400,7 +1400,7 @@ fn zirResolveInferredAlloc(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Inde const inst_data = sema.code.instructions.items(.data)[inst].un_node; const ty_src: LazySrcLoc = .{ .node_offset_var_decl_ty = inst_data.src_node }; const ptr = sema.resolveInst(inst_data.operand); - const ptr_inst = refToIndex(ptr).?; + const ptr_inst = Air.refToIndex(ptr).?; assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const air_datas = sema.air_instructions.items(.data); const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; @@ -1586,7 +1586,7 @@ fn zirStoreToInferredPtr(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) const bin_inst = sema.code.instructions.items(.data)[inst].bin; const ptr = sema.resolveInst(bin_inst.lhs); const value = sema.resolveInst(bin_inst.rhs); - const ptr_inst = refToIndex(ptr).?; + const ptr_inst = Air.refToIndex(ptr).?; assert(sema.air_instructions.items(.tag)[ptr_inst] == .constant); const air_datas = sema.air_instructions.items(.data); const ptr_val = sema.air_values.items[air_datas[ptr_inst].ty_pl.payload]; @@ -1968,13 +1968,13 @@ fn analyzeBlockBody( // Blocks must terminate with noreturn instruction. assert(child_block.instructions.items.len != 0); - assert(sema.typeOf(indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); + assert(sema.typeOf(Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1])).isNoReturn()); if (merges.results.items.len == 0) { // No need for a block instruction. We can put the new instructions // directly into the parent block. try parent_block.instructions.appendSlice(gpa, child_block.instructions.items); - return indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1]); + return Air.indexToRef(child_block.instructions.items[child_block.instructions.items.len - 1]); } if (merges.results.items.len == 1) { const last_inst_index = child_block.instructions.items.len - 1; @@ -2025,7 +2025,7 @@ fn analyzeBlockBody( continue; } assert(coerce_block.instructions.items[coerce_block.instructions.items.len - 1] == - refToIndex(coerced_operand).?); + Air.refToIndex(coerced_operand).?); // Convert the br operand to a block. const br_operand_ty_ref = try sema.addType(br_operand_ty); @@ -2034,7 +2034,7 @@ fn analyzeBlockBody( try sema.air_instructions.ensureUnusedCapacity(gpa, 2); const sub_block_inst = @intCast(Air.Inst.Index, sema.air_instructions.len); const sub_br_inst = sub_block_inst + 1; - sema.air_instructions.items(.data)[br].br.operand = indexToRef(sub_block_inst); + sema.air_instructions.items(.data)[br].br.operand = Air.indexToRef(sub_block_inst); sema.air_instructions.appendAssumeCapacity(.{ .tag = .block, .data = .{ .ty_pl = .{ @@ -2054,7 +2054,7 @@ fn analyzeBlockBody( } }, }); } - return indexToRef(merges.block_inst); + return Air.indexToRef(merges.block_inst); } fn zirExport(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileError!void { @@ -2149,7 +2149,7 @@ fn zirBreak(sema: *Sema, start_block: *Scope.Block, inst: Zir.Inst.Index) Compil if (label.zir_block == zir_block) { const br_ref = try start_block.addBr(label.merges.block_inst, operand); try label.merges.results.append(sema.gpa, operand); - try label.merges.br_list.append(sema.gpa, refToIndex(br_ref).?); + try label.merges.br_list.append(sema.gpa, Air.refToIndex(br_ref).?); return inst; } } @@ -5310,7 +5310,7 @@ fn zirBoolBr( } } }); try parent_block.instructions.append(gpa, block_inst); - return indexToRef(block_inst); + return Air.indexToRef(block_inst); } fn zirIsNonNull( @@ -7204,7 +7204,7 @@ fn analyzeVarRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, tv: TypedVal } }, }); try block.instructions.append(gpa, result_inst); - return indexToRef(result_inst); + return Air.indexToRef(result_inst); } fn analyzeRef( @@ -8021,107 +8021,18 @@ fn enumFieldSrcLoc( } else unreachable; } -/// This is only meant to be called by `typeOf`. -fn analyzeAsTypeInfallible(sema: *Sema, inst: Air.Inst.Ref) Type { - var i: usize = @enumToInt(inst); - if (i < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[i].val.toType(undefined) catch unreachable; - } - i -= Air.Inst.Ref.typed_value_map.len; - assert(sema.air_instructions.items(.tag)[i] == .const_ty); - return sema.air_instructions.items(.data)[i].ty; -} - /// Returns the type of the AIR instruction. fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { - var i: usize = @enumToInt(inst); - if (i < Air.Inst.Ref.typed_value_map.len) { - return Air.Inst.Ref.typed_value_map[i].ty; - } - i -= Air.Inst.Ref.typed_value_map.len; + return sema.getTmpAir().typeOf(inst); +} - const air_datas = sema.air_instructions.items(.data); - switch (sema.air_instructions.items(.tag)[i]) { - .arg => return sema.analyzeAsTypeInfallible(air_datas[i].ty_str.ty), - - .add, - .addwrap, - .sub, - .subwrap, - .mul, - .mulwrap, - .div, - .bit_and, - .bit_or, - .xor, - => return sema.typeOf(air_datas[i].bin_op.lhs), - - .cmp_lt, - .cmp_lte, - .cmp_eq, - .cmp_gte, - .cmp_gt, - .cmp_neq, - .is_null, - .is_non_null, - .is_null_ptr, - .is_non_null_ptr, - .is_err, - .is_non_err, - .is_err_ptr, - .is_non_err_ptr, - .bool_and, - .bool_or, - => return Type.initTag(.bool), - - .const_ty => return Type.initTag(.type), - - .alloc => return air_datas[i].ty, - - .assembly, - .block, - .constant, - .varptr, - .struct_field_ptr, - => return sema.analyzeAsTypeInfallible(air_datas[i].ty_pl.ty), - - .not, - .bitcast, - .load, - .ref, - .floatcast, - .intcast, - .optional_payload, - .optional_payload_ptr, - .wrap_optional, - .unwrap_errunion_payload, - .unwrap_errunion_err, - .unwrap_errunion_payload_ptr, - .unwrap_errunion_err_ptr, - .wrap_errunion_payload, - .wrap_errunion_err, - => return sema.analyzeAsTypeInfallible(air_datas[i].ty_op.ty), - - .loop, - .br, - .cond_br, - .switch_br, - .ret, - .unreach, - => return Type.initTag(.noreturn), - - .breakpoint, - .dbg_stmt, - .store, - => return Type.initTag(.void), - - .ptrtoint => return Type.initTag(.usize), - - .call => { - const callee_ty = sema.typeOf(air_datas[i].pl_op.operand); - return callee_ty.fnReturnType(); - }, - } +fn getTmpAir(sema: Sema) Air { + return .{ + .instructions = sema.air_instructions.slice(), + .extra = sema.air_extra.items, + .values = sema.air_values.items, + .variables = sema.air_variables.items, + }; } pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { @@ -8185,7 +8096,7 @@ pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { .tag = .const_ty, .data = .{ .ty = ty }, }); - return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } fn addIntUnsigned(sema: *Sema, ty: Type, int: u64) CompileError!Air.Inst.Ref { @@ -8207,22 +8118,7 @@ fn addConstant(sema: *Sema, ty: Type, val: Value) CompileError!Air.Inst.Ref { .payload = @intCast(u32, sema.air_values.items.len - 1), } }, }); - return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); -} - -const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; - -pub fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { - return @intToEnum(Air.Inst.Ref, ref_start_index + inst); -} - -pub fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { - const ref_int = @enumToInt(inst); - if (ref_int >= ref_start_index) { - return ref_int - ref_start_index; - } else { - return null; - } + return Air.indexToRef(@intCast(u32, sema.air_instructions.len - 1)); } pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 { diff --git a/src/Zir.zig b/src/Zir.zig index 42924817fc..cf349a6a8d 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -2176,7 +2176,8 @@ pub const Inst = struct { /// 2. clobber: u32 // index into string_bytes (null terminated) for every clobbers_len. pub const Asm = struct { src_node: i32, - asm_source: Ref, + // null-terminated string index + asm_source: u32, /// 1 bit for each outputs_len: whether it uses `-> T` or not. /// 0b0 - operand is a pointer to where to store the output. /// 0b1 - operand is a type; asm expression has the output as the result. @@ -3383,9 +3384,10 @@ const Writer = struct { const inputs_len = @truncate(u5, extended.small >> 5); const clobbers_len = @truncate(u5, extended.small >> 10); const is_volatile = @truncate(u1, extended.small >> 15) != 0; + const asm_source = self.code.nullTerminatedString(extra.data.asm_source); try self.writeFlag(stream, "volatile, ", is_volatile); - try self.writeInstRef(stream, extra.data.asm_source); + try stream.print("\"{}\", ", .{std.zig.fmtEscapes(asm_source)}); try stream.writeAll(", "); var extra_i: usize = extra.end; diff --git a/src/codegen.zig b/src/codegen.zig index 1495b19673..bc22d7ec19 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -3,6 +3,7 @@ const mem = std.mem; const math = std.math; const assert = std.debug.assert; const Air = @import("Air.zig"); +const Zir = @import("Zir.zig"); const Liveness = @import("Liveness.zig"); const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; @@ -337,6 +338,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// to place a new stack allocation, it goes here, and then bumps `max_end_stack`. next_stack_offset: u32 = 0, + /// Debug field, used to find bugs in the compiler. + air_bookkeeping: @TypeOf(air_bookkeeping_init) = air_bookkeeping_init, + + const air_bookkeeping_init = if (std.debug.runtime_safety) @as(usize, 0) else {}; + const MCValue = union(enum) { /// No runtime bits. `void` types, empty structs, u0, enums with 1 tag, etc. /// TODO Look into deleting this tag and using `dead` instead, since every use @@ -751,24 +757,91 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { + const air_tags = self.air.instructions.items(.tag); + for (body) |inst| { - const tomb_bits = self.liveness.getTombBits(inst); - try self.ensureProcessDeathCapacity(@popCount(@TypeOf(tomb_bits), tomb_bits)); + const old_air_bookkeeping = self.air_bookkeeping; + try self.ensureProcessDeathCapacity(Liveness.bpi); - const mcv = try self.genFuncInst(inst); - if (!self.liveness.isUnused(inst)) { - log.debug("{} => {}", .{ inst, mcv }); - const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - try branch.inst_table.putNoClobber(self.gpa, inst, mcv); + switch (air_tags[inst]) { + // zig fmt: off + .add => try self.airAdd(inst), + .addwrap => try self.airAddWrap(inst), + .sub => try self.airSub(inst), + .subwrap => try self.airSubWrap(inst), + .mul => try self.airMul(inst), + .mulwrap => try self.airMulWrap(inst), + .div => try self.airDiv(inst), + + .cmp_lt => try self.airCmp(inst, .lt), + .cmp_lte => try self.airCmp(inst, .lte), + .cmp_eq => try self.airCmp(inst, .eq), + .cmp_gte => try self.airCmp(inst, .gte), + .cmp_gt => try self.airCmp(inst, .gt), + .cmp_neq => try self.airCmp(inst, .neq), + + .bool_and => try self.airBoolOp(inst), + .bool_or => try self.airBoolOp(inst), + .bit_and => try self.airBitAnd(inst), + .bit_or => try self.airBitOr(inst), + .xor => try self.airXor(inst), + + .alloc => try self.airAlloc(inst), + .arg => try self.airArg(inst), + .assembly => try self.airAsm(inst), + .bitcast => try self.airBitCast(inst), + .block => try self.airBlock(inst), + .br => try self.airBr(inst), + .breakpoint => try self.airBreakpoint(), + .call => try self.airCall(inst), + .cond_br => try self.airCondBr(inst), + .dbg_stmt => try self.airDbgStmt(inst), + .floatcast => try self.airFloatCast(inst), + .intcast => try self.airIntCast(inst), + .is_non_null => try self.airIsNonNull(inst), + .is_non_null_ptr => try self.airIsNonNullPtr(inst), + .is_null => try self.airIsNull(inst), + .is_null_ptr => try self.airIsNullPtr(inst), + .is_non_err => try self.airIsNonErr(inst), + .is_non_err_ptr => try self.airIsNonErrPtr(inst), + .is_err => try self.airIsErr(inst), + .is_err_ptr => try self.airIsErrPtr(inst), + .load => try self.airLoad(inst), + .loop => try self.airLoop(inst), + .not => try self.airNot(inst), + .ptrtoint => try self.airPtrToInt(inst), + .ref => try self.airRef(inst), + .ret => try self.airRet(inst), + .store => try self.airStore(inst), + .struct_field_ptr=> try self.airStructFieldPtr(inst), + .switch_br => try self.airSwitch(inst), + .varptr => try self.airVarPtr(inst), + + .constant => unreachable, // excluded from function bodies + .const_ty => unreachable, // excluded from function bodies + .unreach => self.finishAirBookkeeping(), + + .optional_payload => try self.airOptionalPayload(inst), + .optional_payload_ptr => try self.airOptionalPayloadPtr(inst), + .unwrap_errunion_err => try self.airUnwrapErrErr(inst), + .unwrap_errunion_payload => try self.airUnwrapErrPayload(inst), + .unwrap_errunion_err_ptr => try self.airUnwrapErrErrPtr(inst), + .unwrap_errunion_payload_ptr=> try self.airUnwrapErrPayloadPtr(inst), + + .wrap_optional => try self.airWrapOptional(inst), + .wrap_errunion_payload => try self.airWrapErrUnionPayload(inst), + .wrap_errunion_err => try self.airWrapErrUnionErr(inst), + // zig fmt: on + } + if (std.debug.runtime_safety) { + if (self.air_bookkeeping != old_air_bookkeeping + 1) { + std.debug.panic( + \\in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. + \\Look for a missing call to finishAir or an extra call to it. + \\ + , .{ inst, air_tags[inst] }); + } } - - // TODO inline this logic into every instruction - @panic("TODO rework AIR memory layout codegen for processing deaths"); - //var i: ir.Inst.DeathsBitIndex = 0; - //while (inst.getOperand(i)) |operand| : (i += 1) { - // if (inst.operandDies(i)) - // self.processDeath(operand); - //} } } @@ -833,9 +906,36 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } + /// Called when there are no operands, and the instruction is always unreferenced. + fn finishAirBookkeeping(self: *Self) void { + if (std.debug.runtime_safety) { + self.air_bookkeeping += 1; + } + } + + fn finishAir(self: *Self, inst: Air.Inst.Index, result: MCValue, operands: [Liveness.bpi - 1]Air.Inst.Ref) void { + var tomb_bits = self.liveness.getTombBits(inst); + for (operands) |op| { + const dies = @truncate(u1, tomb_bits) != 0; + tomb_bits >>= 1; + if (!dies) continue; + const op_int = @enumToInt(op); + if (op_int < Air.Inst.Ref.typed_value_map.len) continue; + const operand: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len); + self.processDeath(operand); + } + const is_used = @truncate(u1, tomb_bits) == 0; + if (is_used) { + log.debug("{} => {}", .{ inst, result }); + const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; + branch.inst_table.putAssumeCapacityNoClobber(inst, result); + } + self.finishAirBookkeeping(); + } + fn ensureProcessDeathCapacity(self: *Self, additional_count: usize) !void { const table = &self.branch_stack.items[self.branch_stack.items.len - 1].inst_table; - try table.ensureCapacity(self.gpa, table.count() + additional_count); + try table.ensureUnusedCapacity(self.gpa, additional_count); } /// Adds a Type to the .debug_info at the current position. The bytes will be populated later, @@ -860,83 +960,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genFuncInst(self: *Self, inst: Air.Inst.Index) !MCValue { - const air_tags = self.air.instructions.items(.tag); - switch (air_tags[inst]) { - // zig fmt: off - //.add => return self.genAdd(inst.castTag(.add).?), - //.addwrap => return self.genAddWrap(inst.castTag(.addwrap).?), - //.sub => return self.genSub(inst.castTag(.sub).?), - //.subwrap => return self.genSubWrap(inst.castTag(.subwrap).?), - //.mul => return self.genMul(inst.castTag(.mul).?), - //.mulwrap => return self.genMulWrap(inst.castTag(.mulwrap).?), - //.div => return self.genDiv(inst.castTag(.div).?), - - //.cmp_lt => return self.genCmp(inst.castTag(.cmp_lt).?, .lt), - //.cmp_lte => return self.genCmp(inst.castTag(.cmp_lte).?, .lte), - //.cmp_eq => return self.genCmp(inst.castTag(.cmp_eq).?, .eq), - //.cmp_gte => return self.genCmp(inst.castTag(.cmp_gte).?, .gte), - //.cmp_gt => return self.genCmp(inst.castTag(.cmp_gt).?, .gt), - //.cmp_neq => return self.genCmp(inst.castTag(.cmp_neq).?, .neq), - - //.bool_and => return self.genBoolOp(inst.castTag(.bool_and).?), - //.bool_or => return self.genBoolOp(inst.castTag(.bool_or).?), - //.bit_and => return self.genBitAnd(inst.castTag(.bit_and).?), - //.bit_or => return self.genBitOr(inst.castTag(.bit_or).?), - //.xor => return self.genXor(inst.castTag(.xor).?), - - //.alloc => return self.genAlloc(inst.castTag(.alloc).?), - //.arg => return self.genArg(inst.castTag(.arg).?), - //.assembly => return self.genAsm(inst.castTag(.assembly).?), - //.bitcast => return self.genBitCast(inst.castTag(.bitcast).?), - //.block => return self.genBlock(inst.castTag(.block).?), - //.br => return self.genBr(inst.castTag(.br).?), - //.br_block_flat => return self.genBrBlockFlat(inst.castTag(.br_block_flat).?), - //.breakpoint => return self.genBreakpoint(inst.src), - //.call => return self.genCall(inst.castTag(.call).?), - //.cond_br => return self.genCondBr(inst.castTag(.condbr).?), - //.dbg_stmt => return self.genDbgStmt(inst.castTag(.dbg_stmt).?), - //.floatcast => return self.genFloatCast(inst.castTag(.floatcast).?), - //.intcast => return self.genIntCast(inst.castTag(.intcast).?), - //.is_non_null => return self.genIsNonNull(inst.castTag(.is_non_null).?), - //.is_non_null_ptr => return self.genIsNonNullPtr(inst.castTag(.is_non_null_ptr).?), - //.is_null => return self.genIsNull(inst.castTag(.is_null).?), - //.is_null_ptr => return self.genIsNullPtr(inst.castTag(.is_null_ptr).?), - //.is_non_err => return self.genIsNonErr(inst.castTag(.is_non_err).?), - //.is_non_err_ptr => return self.genIsNonErrPtr(inst.castTag(.is_non_err_ptr).?), - //.is_err => return self.genIsErr(inst.castTag(.is_err).?), - //.is_err_ptr => return self.genIsErrPtr(inst.castTag(.is_err_ptr).?), - //.load => return self.genLoad(inst.castTag(.load).?), - //.loop => return self.genLoop(inst.castTag(.loop).?), - //.not => return self.genNot(inst.castTag(.not).?), - //.ptrtoint => return self.genPtrToInt(inst.castTag(.ptrtoint).?), - //.ref => return self.genRef(inst.castTag(.ref).?), - //.ret => return self.genRet(inst.castTag(.ret).?), - //.store => return self.genStore(inst.castTag(.store).?), - //.struct_field_ptr=> return self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), - //.switch_br => return self.genSwitch(inst.castTag(.switchbr).?), - //.varptr => return self.genVarPtr(inst.castTag(.varptr).?), - - //.constant => unreachable, // excluded from function bodies - //.unreach => return MCValue{ .unreach = {} }, - - //.optional_payload => return self.genOptionalPayload(inst.castTag(.optional_payload).?), - //.optional_payload_ptr => return self.genOptionalPayloadPtr(inst.castTag(.optional_payload_ptr).?), - //.unwrap_errunion_err => return self.genUnwrapErrErr(inst.castTag(.unwrap_errunion_err).?), - //.unwrap_errunion_payload => return self.genUnwrapErrPayload(inst.castTag(.unwrap_errunion_payload).?), - //.unwrap_errunion_err_ptr => return self.genUnwrapErrErrPtr(inst.castTag(.unwrap_errunion_err_ptr).?), - //.unwrap_errunion_payload_ptr=> return self.genUnwrapErrPayloadPtr(inst.castTag(.unwrap_errunion_payload_ptr).?), - - //.wrap_optional => return self.genWrapOptional(inst.castTag(.wrap_optional).?), - //.wrap_errunion_payload => return self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), - //.wrap_errunion_err => return self.genWrapErrUnionErr(inst.castTag(.wrap_errunion_err).?), - - // zig fmt: on - - else => @panic("TODO finish air memory layout branch, more codegen.zig instructions"), - } - } - fn allocMem(self: *Self, inst: Air.Inst.Index, abi_size: u32, abi_align: u32) !u32 { if (abi_align > self.stack_align) self.stack_align = abi_align; @@ -954,7 +977,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// Use a pointer instruction as the basis for allocating stack memory. fn allocMemPtr(self: *Self, inst: Air.Inst.Index) !u32 { - const elem_ty = self.air.getType(inst).elemType(); + const elem_ty = self.air.typeOfIndex(inst).elemType(); const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty}); }; @@ -964,7 +987,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn allocRegOrMem(self: *Self, inst: Air.Inst.Index, reg_ok: bool) !MCValue { - const elem_ty = inst.ty; + const elem_ty = self.air.typeOfIndex(inst); const abi_size = math.cast(u32, elem_ty.abiSize(self.target.*)) catch { return self.fail("type '{}' too big to fit into stack frame", .{elem_ty}); }; @@ -993,7 +1016,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { assert(reg == toCanonicalReg(reg_mcv.register)); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; try branch.inst_table.put(self.gpa, inst, stack_mcv); - try self.genSetStack(inst.ty, stack_mcv.stack_offset, reg_mcv); + try self.genSetStack(self.air.typeOfIndex(inst), stack_mcv.stack_offset, reg_mcv); } /// Copies a value to a register without tracking the register. The register is not considered @@ -1010,281 +1033,274 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// This can have a side effect of spilling instructions to the stack to free up a register. fn copyToNewRegister(self: *Self, reg_owner: Air.Inst.Index, mcv: MCValue) !MCValue { const reg = try self.register_manager.allocReg(reg_owner, &.{}); - try self.genSetReg(reg_owner.ty, reg, mcv); + try self.genSetReg(self.air.typeOfIndex(reg_owner), reg, mcv); return MCValue{ .register = reg }; } - fn genAlloc(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airAlloc(self: *Self, inst: Air.Inst.Index) !void { const stack_offset = try self.allocMemPtr(inst); - return MCValue{ .ptr_stack_offset = stack_offset }; + return self.finishAir(inst, .{ .ptr_stack_offset = stack_offset }, .{ .none, .none, .none }); } - fn genFloatCast(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { - else => return self.fail("TODO implement floatCast for {}", .{self.target.cpu.arch}), - } - } - - fn genIntCast(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - + fn airFloatCast(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.getType(ty_op.operand); + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + else => return self.fail("TODO implement floatCast for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); + } + + fn airIntCast(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + if (self.liveness.isUnused(inst)) + return self.finishAir(inst, .dead, .{ ty_op.operand, .none, .none }); + + const operand_ty = self.air.typeOf(ty_op.operand); const operand = try self.resolveInst(ty_op.operand); const info_a = operand_ty.intInfo(self.target.*); - const info_b = self.air.getType(inst).intInfo(self.target.*); + const info_b = self.air.typeOfIndex(inst).intInfo(self.target.*); if (info_a.signedness != info_b.signedness) return self.fail("TODO gen intcast sign safety in semantic analysis", .{}); if (info_a.bits == info_b.bits) - return operand; + return self.finishAir(inst, operand, .{ ty_op.operand, .none, .none }); - switch (arch) { + const result: MCValue = switch (arch) { else => return self.fail("TODO implement intCast for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genNot(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airNot(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand = try self.resolveInst(ty_op.operand); - switch (operand) { - .dead => unreachable, - .unreach => unreachable, - .compare_flags_unsigned => |op| return MCValue{ - .compare_flags_unsigned = switch (op) { - .gte => .lt, - .gt => .lte, - .neq => .eq, - .lt => .gte, - .lte => .gt, - .eq => .neq, + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(ty_op.operand); + switch (operand) { + .dead => unreachable, + .unreach => unreachable, + .compare_flags_unsigned => |op| { + const r = MCValue{ + .compare_flags_unsigned = switch (op) { + .gte => .lt, + .gt => .lte, + .neq => .eq, + .lt => .gte, + .lte => .gt, + .eq => .neq, + }, + }; + break :result r; }, - }, - .compare_flags_signed => |op| return MCValue{ - .compare_flags_signed = switch (op) { - .gte => .lt, - .gt => .lte, - .neq => .eq, - .lt => .gte, - .lte => .gt, - .eq => .neq, + .compare_flags_signed => |op| { + const r = MCValue{ + .compare_flags_signed = switch (op) { + .gte => .lt, + .gt => .lte, + .neq => .eq, + .lt => .gte, + .lte => .gt, + .eq => .neq, + }, + }; + break :result r; }, - }, - else => {}, - } + else => {}, + } - switch (arch) { - .x86_64 => { - return try self.genX8664BinMath(inst, ty_op.operand, .bool_true); - }, - .arm, .armeb => { - return try self.genArmBinOp(inst, ty_op.operand, .bool_true, .not); - }, - else => return self.fail("TODO implement NOT for {}", .{self.target.cpu.arch}), - } + switch (arch) { + .x86_64 => { + break :result try self.genX8664BinMath(inst, ty_op.operand, .bool_true); + }, + .arm, .armeb => { + break :result try self.genArmBinOp(inst, ty_op.operand, .bool_true, .not); + }, + else => return self.fail("TODO implement NOT for {}", .{self.target.cpu.arch}), + } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genAdd(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airAdd(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .x86_64 => { - return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs); - }, - .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .add), + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .x86_64 => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .add), else => return self.fail("TODO implement add for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genAddWrap(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airAddWrap(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - _ = bin_op; - switch (arch) { + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement addwrap for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genMul(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airSub(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .x86_64 => return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), - .arm, .armeb => return try self.genArmMul(inst, bin_op.lhs, bin_op.rhs), + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .x86_64 => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .sub), + else => return self.fail("TODO implement sub for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + } + + fn airSubWrap(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + else => return self.fail("TODO implement subwrap for {}", .{self.target.cpu.arch}), + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); + } + + fn airMul(self: *Self, inst: Air.Inst.Index) !void { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .x86_64 => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .arm, .armeb => try self.genArmMul(inst, bin_op.lhs, bin_op.rhs), else => return self.fail("TODO implement mul for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genMulWrap(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airMulWrap(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - _ = bin_op; - switch (arch) { + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement mulwrap for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genDiv(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airDiv(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - _ = bin_op; - switch (arch) { + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement div for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genBitAnd(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airBitAnd(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_and), + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_and), else => return self.fail("TODO implement bitwise and for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genBitOr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airBitOr(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_or), + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bit_or), else => return self.fail("TODO implement bitwise or for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genXor(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airXor(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .arm, .armeb => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .xor), + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { + .arm, .armeb => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .xor), else => return self.fail("TODO implement xor for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genOptionalPayload(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airOptionalPayload(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement .optional_payload for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airOptionalPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement .optional_payload_ptr for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airUnwrapErrErr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement unwrap error union error for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airUnwrapErrPayload(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement unwrap error union payload for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } + // *(E!T) -> E - fn genUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airUnwrapErrErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement unwrap error union error ptr for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } + // *(E!T) -> *T - fn genUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airUnwrapErrPayloadPtr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement unwrap error union payload ptr for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genWrapOptional(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - const optional_ty = self.air.getType(inst); + fn airWrapOptional(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const optional_ty = self.air.typeOfIndex(inst); - // Optional type is just a boolean true - if (optional_ty.abiSize(self.target.*) == 1) - return MCValue{ .immediate = 1 }; + // Optional with a zero-bit payload type is just a boolean true + if (optional_ty.abiSize(self.target.*) == 1) + break :result MCValue{ .immediate = 1 }; - switch (arch) { - else => return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}), - } + switch (arch) { + else => return self.fail("TODO implement wrap optional for {}", .{self.target.cpu.arch}), + } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// T to E!T - fn genWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - - switch (arch) { + fn airWrapErrUnionPayload(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement wrap errunion payload for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } /// E to E!T - fn genWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - - switch (arch) { + fn airWrapErrUnionErr(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement wrap errunion error for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genVarPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { + fn airVarPtr(self: *Self, inst: Air.Inst.Index) !void { + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { else => return self.fail("TODO implement varptr for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ .none, .none, .none }); } - fn reuseOperand(self: *Self, inst: Air.Inst.Index, op_index: u2, mcv: MCValue) bool { + fn reuseOperand(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, op_index: Liveness.OperandInt, mcv: MCValue) bool { if (!self.liveness.operandDies(inst, op_index)) return false; @@ -1310,12 +1326,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // That makes us responsible for doing the rest of the stuff that processDeath would have done. const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; - branch.inst_table.putAssumeCapacity(inst.getOperand(op_index).?, .dead); + branch.inst_table.putAssumeCapacity(Air.refToIndex(operand).?, .dead); return true; } - fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue) !void { + fn load(self: *Self, dst_mcv: MCValue, ptr: MCValue, ptr_ty: Type) !void { + const elem_ty = ptr_ty.elemType(); switch (ptr) { .none => unreachable, .undef => unreachable, @@ -1343,31 +1360,37 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genLoad(self: *Self, inst: Air.Inst.Index) !MCValue { - const elem_ty = self.air.getType(inst); - if (!elem_ty.hasCodeGenBits()) - return MCValue.none; - const ptr = try self.resolveInst(inst.operand); - const is_volatile = inst.operand.ty.isVolatilePtr(); - if (self.liveness.isUnused(inst) and !is_volatile) - return MCValue.dead; - const dst_mcv: MCValue = blk: { - if (self.reuseOperand(inst, 0, ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } + fn airLoad(self: *Self, inst: Air.Inst.Index) !void { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const elem_ty = self.air.typeOfIndex(inst); + const result: MCValue = result: { + if (!elem_ty.hasCodeGenBits()) + break :result MCValue.none; + + const ptr = try self.resolveInst(ty_op.operand); + const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + if (self.liveness.isUnused(inst) and !is_volatile) + break :result MCValue.dead; + + const dst_mcv: MCValue = blk: { + if (self.reuseOperand(inst, ty_op.operand, 0, ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(dst_mcv, ptr, self.air.typeOf(ty_op.operand)); + break :result dst_mcv; }; - self.load(dst_mcv, ptr); - return dst_mcv; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn genStore(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airStore(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const ptr = try self.resolveInst(bin_op.lhs); const value = try self.resolveInst(bin_op.rhs); - const elem_ty = self.getType(bin_op.rhs); + const elem_ty = self.air.typeOf(bin_op.rhs); switch (ptr) { .none => unreachable, .undef => unreachable, @@ -1397,36 +1420,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail("TODO implement storing to MCValue.stack_offset", .{}); }, } - return .none; + return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genStructFieldPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - const struct_field_ptr = self.air.instructions.items(.data)[inst].struct_field_ptr; - _ = struct_field_ptr; + fn airStructFieldPtr(self: *Self, inst: Air.Inst.Index) !void { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.StructField, ty_pl.payload).data; + _ = extra; return self.fail("TODO implement codegen struct_field_ptr", .{}); - } - - fn genSub(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - const bin_op = self.air.instructions.items(.data)[inst].bin_op; - switch (arch) { - .x86_64 => return self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), - .arm, .armeb => return self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .sub), - else => return self.fail("TODO implement sub for {}", .{self.target.cpu.arch}), - } - } - - fn genSubWrap(self: *Self, inst: Air.Inst.Index) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; - const bin_op = self.air.instructions.items(.data)[inst].bin_op; - _ = bin_op; - switch (arch) { - else => return self.fail("TODO implement subwrap for {}", .{self.target.cpu.arch}), - } + //return self.finishAir(inst, result, .{ extra.struct_ptr, .none, .none }); } fn armOperandShouldBeRegister(self: *Self, mcv: MCValue) !bool { @@ -1461,8 +1463,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const rhs_is_register = rhs == .register; const lhs_should_be_register = try self.armOperandShouldBeRegister(lhs); const rhs_should_be_register = try self.armOperandShouldBeRegister(rhs); - const reuse_lhs = lhs_is_register and self.reuseOperand(inst, 0, lhs); - const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, 1, rhs); + const reuse_lhs = lhs_is_register and self.reuseOperand(inst, op_lhs, 0, lhs); + const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, op_rhs, 1, rhs); // Destination must be a register var dst_mcv: MCValue = undefined; @@ -1476,14 +1478,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Allocate 0 or 1 registers if (!rhs_is_register and rhs_should_be_register) { rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_rhs, &.{lhs.register}) }; - branch.inst_table.putAssumeCapacity(op_rhs, rhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } dst_mcv = lhs; } else if (reuse_rhs) { // Allocate 0 or 1 registers if (!lhs_is_register and lhs_should_be_register) { lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_lhs, &.{rhs.register}) }; - branch.inst_table.putAssumeCapacity(op_lhs, lhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv); } dst_mcv = rhs; @@ -1508,7 +1510,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { rhs_mcv = MCValue{ .register = regs[1] }; dst_mcv = lhs_mcv; - branch.inst_table.putAssumeCapacity(op_rhs, rhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } } else if (lhs_should_be_register) { // RHS is immediate @@ -1605,14 +1607,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genArmMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Index, op_rhs: Air.Inst.Index) !MCValue { + fn genArmMul(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref) !MCValue { const lhs = try self.resolveInst(op_lhs); const rhs = try self.resolveInst(op_rhs); const lhs_is_register = lhs == .register; const rhs_is_register = rhs == .register; - const reuse_lhs = lhs_is_register and self.reuseOperand(inst, 0, lhs); - const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, 1, rhs); + const reuse_lhs = lhs_is_register and self.reuseOperand(inst, op_lhs, 0, lhs); + const reuse_rhs = !reuse_lhs and rhs_is_register and self.reuseOperand(inst, op_rhs, 1, rhs); // Destination must be a register // LHS must be a register @@ -1627,14 +1629,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Allocate 0 or 1 registers if (!rhs_is_register) { rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_rhs, &.{lhs.register}) }; - branch.inst_table.putAssumeCapacity(op_rhs, rhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } dst_mcv = lhs; } else if (reuse_rhs) { // Allocate 0 or 1 registers if (!lhs_is_register) { lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_lhs, &.{rhs.register}) }; - branch.inst_table.putAssumeCapacity(op_lhs, lhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv); } dst_mcv = rhs; } else { @@ -1656,7 +1658,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { rhs_mcv = MCValue{ .register = regs[1] }; dst_mcv = lhs_mcv; - branch.inst_table.putAssumeCapacity(op_rhs, rhs_mcv); + branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } } @@ -1698,8 +1700,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // as the result MCValue. var dst_mcv: MCValue = undefined; var src_mcv: MCValue = undefined; - var src_inst: Air.Inst.Index = undefined; - if (self.reuseOperand(inst, 0, lhs)) { + var src_inst: Air.Inst.Ref = undefined; + if (self.reuseOperand(inst, op_lhs, 0, lhs)) { // LHS dies; use it as the destination. // Both operands cannot be memory. src_inst = op_rhs; @@ -1710,7 +1712,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { dst_mcv = lhs; src_mcv = rhs; } - } else if (self.reuseOperand(inst, 1, rhs)) { + } else if (self.reuseOperand(inst, op_rhs, 1, rhs)) { // RHS dies; use it as the destination. // Both operands cannot be memory. src_inst = op_lhs; @@ -1747,16 +1749,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } // Now for step 2, we perform the actual op + const inst_ty = self.air.typeOfIndex(inst); const air_tags = self.air.instructions.items(.tag); switch (air_tags[inst]) { // TODO: Generate wrapping and non-wrapping versions separately - .add, .addwrap => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 0, 0x00), - .bool_or, .bit_or => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 1, 0x08), - .bool_and, .bit_and => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 4, 0x20), - .sub, .subwrap => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 5, 0x28), - .xor, .not => try self.genX8664BinMathCode(inst.ty, dst_mcv, src_mcv, 6, 0x30), + .add, .addwrap => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 0, 0x00), + .bool_or, .bit_or => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 1, 0x08), + .bool_and, .bit_and => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 4, 0x20), + .sub, .subwrap => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 5, 0x28), + .xor, .not => try self.genX8664BinMathCode(inst_ty, dst_mcv, src_mcv, 6, 0x30), - .mul, .mulwrap => try self.genX8664Imul(inst.src, inst.ty, dst_mcv, src_mcv), + .mul, .mulwrap => try self.genX8664Imul(inst_ty, dst_mcv, src_mcv), else => unreachable, } @@ -1958,7 +1961,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .ptr_stack_offset => unreachable, .ptr_embedded_in_code => unreachable, .register => |src_reg| { - try self.genX8664ModRMRegToStack(src, dst_ty, off, src_reg, mr + 0x1); + try self.genX8664ModRMRegToStack(dst_ty, off, src_reg, mr + 0x1); }, .immediate => |imm| { _ = imm; @@ -1984,7 +1987,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// Performs integer multiplication between dst_mcv and src_mcv, storing the result in dst_mcv. fn genX8664Imul( self: *Self, - src: LazySrcLoc, dst_ty: Type, dst_mcv: MCValue, src_mcv: MCValue, @@ -2067,7 +2069,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { encoder.imm32(@intCast(i32, imm)); } else { const src_reg = try self.copyToTmpRegister(dst_ty, src_mcv); - return self.genX8664Imul(src, dst_ty, dst_mcv, MCValue{ .register = src_reg }); + return self.genX8664Imul(dst_ty, dst_mcv, MCValue{ .register = src_reg }); } }, .embedded_in_code, .memory, .stack_offset => { @@ -2163,7 +2165,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, mcv: MCValue) !void { - const ty_str = self.air.instruction.items(.data)[inst].ty_str; + const ty_str = self.air.instructions.items(.data)[inst].ty_str; const zir = &self.mod_fn.owner_decl.namespace.file_scope.zir; const name = zir.nullTerminatedString(ty_str.str); const name_with_null = name.ptr[0 .. name.len + 1]; @@ -2224,11 +2226,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genArg(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airArg(self: *Self, inst: Air.Inst.Index) !void { const arg_index = self.arg_index; self.arg_index += 1; - const ty = self.air.getType(inst); + const ty = self.air.typeOfIndex(inst); const result = self.args[arg_index]; const mcv = switch (arch) { @@ -2252,7 +2254,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.genArgDbgInfo(inst, mcv); if (self.liveness.isUnused(inst)) - return MCValue.dead; + return self.finishAirBookkeeping(); switch (mcv) { .register => |reg| { @@ -2261,10 +2263,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { else => {}, } - return mcv; + return self.finishAir(inst, mcv, .{ .none, .none, .none }); } - fn genBreakpoint(self: *Self) !MCValue { + fn airBreakpoint(self: *Self) !void { switch (arch) { .i386, .x86_64 => { try self.code.append(0xcc); // int3 @@ -2280,15 +2282,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, else => return self.fail("TODO implement @breakpoint() for {}", .{self.target.cpu.arch}), } - return .none; + return self.finishAirBookkeeping(); } - fn genCall(self: *Self, inst: Air.Inst.Index) !MCValue { - const pl_op = self.air.instruction.items(.data)[inst].pl_op; - const fn_ty = self.air.getType(pl_op.operand); + fn airCall(self: *Self, inst: Air.Inst.Index) !void { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const fn_ty = self.air.typeOf(pl_op.operand); const callee = pl_op.operand; - const extra = self.air.extraData(Air.Call, inst_data.payload); - const args = self.air.extra[extra.end..][0..extra.data.args_len]; + const extra = self.air.extraData(Air.Call, pl_op.payload); + const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); var info = try self.resolveCallingConventionValues(fn_ty); defer info.deinit(self); @@ -2300,6 +2302,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .x86_64 => { for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); // Here we do not use setRegOrMem even though the logic is similar, because // the function call will move the stack pointer, so the offsets are different. @@ -2307,12 +2310,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .none => continue, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => |off| { // Here we need to emit instructions like this: // mov qword ptr [rsp + stack_offset], x - try self.genSetStack(arg.ty, off, arg_mcv); + try self.genSetStack(arg_ty, off, arg_mcv); }, .ptr_stack_offset => { return self.fail("TODO implement calling with MCValue.ptr_stack_offset arg", .{}); @@ -2389,6 +2392,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .arm, .armeb => { for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -2403,7 +2407,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => { return self.fail("TODO implement calling with parameters in memory", .{}); @@ -2452,6 +2456,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .aarch64 => { for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { @@ -2466,7 +2471,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => { return self.fail("TODO implement calling with parameters in memory", .{}); @@ -2510,6 +2515,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } else if (self.bin_file.cast(link.File.MachO)) |macho_file| { for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); // Here we do not use setRegOrMem even though the logic is similar, because // the function call will move the stack pointer, so the offsets are different. @@ -2521,7 +2527,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .x86_64, .aarch64 => try self.register_manager.getReg(reg, null), else => unreachable, } - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => { // Here we need to emit instructions like this: @@ -2612,6 +2618,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .x86_64 => { for (info.args) |mc_arg, arg_i| { const arg = args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(args[arg_i]); // Here we do not use setRegOrMem even though the logic is similar, because // the function call will move the stack pointer, so the offsets are different. @@ -2619,7 +2626,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .none => continue, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => { // Here we need to emit instructions like this: @@ -2661,6 +2668,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .aarch64 => { for (info.args) |mc_arg, arg_i| { const arg = inst.args[arg_i]; + const arg_ty = self.air.typeOf(arg); const arg_mcv = try self.resolveInst(inst.args[arg_i]); switch (mc_arg) { @@ -2675,7 +2683,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .compare_flags_unsigned => unreachable, .register => |reg| { try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.src, arg.ty, reg, arg_mcv); + try self.genSetReg(arg_ty, reg, arg_mcv); }, .stack_offset => { return self.fail("TODO implement calling with parameters in memory", .{}); @@ -2696,7 +2704,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const got_index = func_payload.data.owner_decl.link.plan9.got_index.?; const fn_got_addr = got_addr + got_index * ptr_bytes; - try self.genSetReg(inst.base.src, Type.initTag(.usize), .x30, .{ .memory = fn_got_addr }); + try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = fn_got_addr }); writeInt(u32, try self.code.addManyAsArray(4), Instruction.blr(.x30).toU32()); } else if (func_value.castTag(.extern_fn)) |_| { @@ -2712,51 +2720,61 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } else unreachable; - switch (info.return_value) { - .register => |reg| { - if (Register.allocIndex(reg) == null) { - // Save function return value in a callee saved register - return try self.copyToNewRegister(inst, info.return_value); - } - }, - else => {}, - } + const result: MCValue = result: { + switch (info.return_value) { + .register => |reg| { + if (Register.allocIndex(reg) == null) { + // Save function return value in a callee saved register + break :result try self.copyToNewRegister(inst, info.return_value); + } + }, + else => {}, + } + break :result info.return_value; + }; - return info.return_value; + if (args.len <= Liveness.bpi - 2) { + var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); + buf[0] = callee; + std.mem.copy(Air.Inst.Ref, buf[1..], args); + return self.finishAir(inst, result, buf); + } + @panic("TODO: codegen for function call with greater than 2 args"); } - fn genRef(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airRef(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - const operand_ty = self.air.getType(ty_op.operand); - const operand = try self.resolveInst(ty_op.operand); - switch (operand) { - .unreach => unreachable, - .dead => unreachable, - .none => return .none, + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ty = self.air.typeOf(ty_op.operand); + const operand = try self.resolveInst(ty_op.operand); + switch (operand) { + .unreach => unreachable, + .dead => unreachable, + .none => break :result MCValue{ .none = {} }, - .immediate, - .register, - .ptr_stack_offset, - .ptr_embedded_in_code, - .compare_flags_unsigned, - .compare_flags_signed, - => { - const stack_offset = try self.allocMemPtr(inst); - try self.genSetStack(operand_ty, stack_offset, operand); - return MCValue{ .ptr_stack_offset = stack_offset }; - }, + .immediate, + .register, + .ptr_stack_offset, + .ptr_embedded_in_code, + .compare_flags_unsigned, + .compare_flags_signed, + => { + const stack_offset = try self.allocMemPtr(inst); + try self.genSetStack(operand_ty, stack_offset, operand); + break :result MCValue{ .ptr_stack_offset = stack_offset }; + }, - .stack_offset => |offset| return MCValue{ .ptr_stack_offset = offset }, - .embedded_in_code => |offset| return MCValue{ .ptr_embedded_in_code = offset }, - .memory => |vaddr| return MCValue{ .immediate = vaddr }, + .stack_offset => |offset| break :result MCValue{ .ptr_stack_offset = offset }, + .embedded_in_code => |offset| break :result MCValue{ .ptr_embedded_in_code = offset }, + .memory => |vaddr| break :result MCValue{ .immediate = vaddr }, - .undef => return self.fail("TODO implement ref on an undefined value", .{}), - } + .undef => return self.fail("TODO implement ref on an undefined value", .{}), + } + }; + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn ret(self: *Self, mcv: MCValue) !MCValue { + fn ret(self: *Self, mcv: MCValue) !void { const ret_ty = self.fn_type.fnReturnType(); try self.setRegOrMem(ret_ty, self.ret_mcv, mcv); switch (arch) { @@ -2786,28 +2804,28 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, else => return self.fail("TODO implement return for {}", .{self.target.cpu.arch}), } - return .unreach; } - fn genRet(self: *Self, inst: Air.Inst.Index) !MCValue { - const operand = try self.resolveInst(self.air.instructions.items(.data)[inst].un_op); - return self.ret(inst.base.src, operand); + fn airRet(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); + try self.ret(operand); + return self.finishAirBookkeeping(); } - fn genCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !MCValue { - // No side effects, so if it's unreferenced, do nothing. - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; - const ty = self.air.getType(bin_op.lhs); - assert(ty.eql(self.air.getType(bin_op.rhs))); + if (self.liveness.isUnused(inst)) + return self.finishAir(inst, .dead, .{ bin_op.lhs, bin_op.rhs, .none }); + const ty = self.air.typeOf(bin_op.lhs); + assert(ty.eql(self.air.typeOf(bin_op.rhs))); if (ty.zigTypeTag() == .ErrorSet) return self.fail("TODO implement cmp for errors", .{}); const lhs = try self.resolveInst(bin_op.lhs); const rhs = try self.resolveInst(bin_op.rhs); - switch (arch) { - .x86_64 => { + const result: MCValue = switch (arch) { + .x86_64 => result: { try self.code.ensureCapacity(self.code.items.len + 8); // There are 2 operands, destination and source. @@ -2822,12 +2840,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.genX8664BinMathCode(Type.initTag(.bool), dst_mcv, src_mcv, 7, 0x38); const info = ty.intInfo(self.target.*); - return switch (info.signedness) { + break :result switch (info.signedness) { .signed => MCValue{ .compare_flags_signed = op }, .unsigned => MCValue{ .compare_flags_unsigned = op }, }; }, - .arm, .armeb => { + .arm, .armeb => result: { const lhs_is_register = lhs == .register; const rhs_is_register = rhs == .register; // lhs should always be a register @@ -2854,39 +2872,40 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; if (lhs_mcv == .register and !lhs_is_register) { try self.genSetReg(ty, lhs_mcv.register, lhs); - branch.inst_table.putAssumeCapacity(bin_op.lhs, lhs); + branch.inst_table.putAssumeCapacity(Air.refToIndex(bin_op.lhs).?, lhs); } if (rhs_mcv == .register and !rhs_is_register) { try self.genSetReg(ty, rhs_mcv.register, rhs); - branch.inst_table.putAssumeCapacity(bin_op.rhs, rhs); + branch.inst_table.putAssumeCapacity(Air.refToIndex(bin_op.rhs).?, rhs); } // The destination register is not present in the cmp instruction try self.genArmBinOpCode(undefined, lhs_mcv, rhs_mcv, false, .cmp_eq); const info = ty.intInfo(self.target.*); - return switch (info.signedness) { + break :result switch (info.signedness) { .signed => MCValue{ .compare_flags_signed = op }, .unsigned => MCValue{ .compare_flags_unsigned = op }, }; }, else => return self.fail("TODO implement cmp for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn genDbgStmt(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airDbgStmt(self: *Self, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; try self.dbgAdvancePCAndLine(dbg_stmt.line, dbg_stmt.column); - assert(self.liveness.isUnused(inst)); - return MCValue.dead; + return self.finishAirBookkeeping(); } - fn genCondBr(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airCondBr(self: *Self, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); - const extra = self.air.extraData(Air.CondBr, inst_data.payload); + const extra = self.air.extraData(Air.CondBr, pl_op.payload); const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + const liveness_condbr = self.liveness.getCondBr(inst); const reloc: Reloc = switch (arch) { .i386, .x86_64 => reloc: { @@ -2985,9 +3004,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.branch_stack.append(.{}); - const then_deaths = self.liveness.thenDeaths(inst); - try self.ensureProcessDeathCapacity(then_deaths.len); - for (then_deaths) |operand| { + try self.ensureProcessDeathCapacity(liveness_condbr.then_deaths.len); + for (liveness_condbr.then_deaths) |operand| { self.processDeath(operand); } try self.genBody(then_body); @@ -3010,9 +3028,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const else_branch = self.branch_stack.addOneAssumeCapacity(); else_branch.* = .{}; - const else_deaths = self.liveness.elseDeaths(inst); - try self.ensureProcessDeathCapacity(else_deaths.len); - for (else_deaths) |operand| { + try self.ensureProcessDeathCapacity(liveness_condbr.else_deaths.len); + for (liveness_condbr.else_deaths) |operand| { self.processDeath(operand); } try self.genBody(else_body); @@ -3026,8 +3043,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // assert that parent_branch.free_registers equals the saved_then_branch.free_registers // rather than assigning it. const parent_branch = &self.branch_stack.items[self.branch_stack.items.len - 2]; - try parent_branch.inst_table.ensureCapacity(self.gpa, parent_branch.inst_table.count() + - else_branch.inst_table.count()); + try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, else_branch.inst_table.count()); const else_slice = else_branch.inst_table.entries.slice(); const else_keys = else_slice.items(.key); @@ -3058,11 +3074,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { log.debug("consolidating else_entry {*} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(else_key.ty, canon_mcv, else_value); + try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); // TODO track the new register / stack allocation } - try parent_branch.inst_table.ensureCapacity(self.gpa, parent_branch.inst_table.count() + - saved_then_branch.inst_table.count()); + try parent_branch.inst_table.ensureUnusedCapacity(self.gpa, saved_then_branch.inst_table.count()); const then_slice = saved_then_branch.inst_table.entries.slice(); const then_keys = then_slice.items(.key); const then_values = then_slice.items(.value); @@ -3086,13 +3101,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { log.debug("consolidating then_entry {*} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. - try self.setRegOrMem(then_key.ty, parent_mcv, then_value); + try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); // TODO track the new register / stack allocation } self.branch_stack.pop().deinit(self.gpa); - return MCValue.unreach; + return self.finishAir(inst, .unreach, .{ pl_op.operand, .none, .none }); } fn isNull(self: *Self, operand: MCValue) !MCValue { @@ -3131,107 +3146,115 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genIsNull(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand = try self.resolveInst(un_op); - return self.isNull(operand); - } - - fn genIsNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; - const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand_ptr = try self.resolveInst(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(un_op); + break :result try self.isNull(operand); }; - try self.load(operand, ptr); - return self.isNull(operand); + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsNonNull(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand = try self.resolveInst(un_op); - return self.isNonNull(operand); - } - - fn genIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; - const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand_ptr = try self.resolveInst(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ptr = try self.resolveInst(un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + break :result try self.isNull(operand); }; - try self.load(operand, ptr); - return self.isNonNull(operand); + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsErr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsNonNull(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand = try self.resolveInst(un_op); - return self.isErr(operand); - } - - fn genIsErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; - const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand_ptr = try self.resolveInst(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(un_op); + break :result try self.isNonNull(operand); }; - try self.load(operand, ptr); - return self.isErr(operand); + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genIsNonErr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand = try self.resolveInst(un_op); - return self.isNonErr(operand); - } - - fn genIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; - const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand_ptr = try self.resolveInst(un_op); - const operand: MCValue = blk: { - if (self.reuseOperand(inst, 0, operand_ptr)) { - // The MCValue that holds the pointer can be re-used as the value. - break :blk operand_ptr; - } else { - break :blk try self.allocRegOrMem(inst, true); - } + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ptr = try self.resolveInst(un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + break :result try self.isNonNull(operand); }; - try self.load(operand, ptr); - return self.isNonErr(operand); + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genLoop(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airIsErr(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(un_op); + break :result try self.isErr(operand); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); + } + + fn airIsErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ptr = try self.resolveInst(un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + break :result try self.isErr(operand); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); + } + + fn airIsNonErr(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand = try self.resolveInst(un_op); + break :result try self.isNonErr(operand); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); + } + + fn airIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !void { + const un_op = self.air.instructions.items(.data)[inst].un_op; + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else result: { + const operand_ptr = try self.resolveInst(un_op); + const operand: MCValue = blk: { + if (self.reuseOperand(inst, un_op, 0, operand_ptr)) { + // The MCValue that holds the pointer can be re-used as the value. + break :blk operand_ptr; + } else { + break :blk try self.allocRegOrMem(inst, true); + } + }; + try self.load(operand, operand_ptr, self.air.typeOf(un_op)); + break :result try self.isNonErr(operand); + }; + return self.finishAir(inst, result, .{ un_op, .none, .none }); + } + + fn airLoop(self: *Self, inst: Air.Inst.Index) !void { // A loop is a setup to be able to jump back to the beginning. const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); @@ -3239,7 +3262,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const start_index = self.code.items.len; try self.genBody(body); try self.jump(start_index); - return MCValue.unreach; + return self.finishAirBookkeeping(); } /// Send control flow to the `index` of `self.code`. @@ -3274,7 +3297,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genBlock(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airBlock(self: *Self, inst: Air.Inst.Index) !void { try self.blocks.putNoClobber(self.gpa, inst, .{ // A block is a setup to be able to jump to the end. .relocs = .{}, @@ -3288,21 +3311,24 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const block_data = self.blocks.getPtr(inst).?; defer block_data.relocs.deinit(self.gpa); - const ty_pl = self.air.instructions.items(.data).ty_pl; + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; try self.genBody(body); for (block_data.relocs.items) |reloc| try self.performReloc(reloc); - return @bitCast(MCValue, block_data.mcv); + const result = @bitCast(MCValue, block_data.mcv); + return self.finishAir(inst, result, .{ .none, .none, .none }); } - fn genSwitch(self: *Self, inst: Air.Inst.Index) !MCValue { - _ = inst; + fn airSwitch(self: *Self, inst: Air.Inst.Index) !void { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const condition = pl_op.operand; switch (arch) { - else => return self.fail("TODO genSwitch for {}", .{self.target.cpu.arch}), + else => return self.fail("TODO airSwitch for {}", .{self.target.cpu.arch}), } + return self.finishAir(inst, .dead, .{ condition, .none, .none }); } fn performReloc(self: *Self, reloc: Reloc) !void { @@ -3335,54 +3361,49 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genBrBlockFlat(self: *Self, inst: Air.Inst.Index) !MCValue { - try self.genBody(inst.body); - const last = inst.body.instructions[inst.body.instructions.len - 1]; - return self.br(inst.block, last); + fn airBr(self: *Self, inst: Air.Inst.Index) !void { + const branch = self.air.instructions.items(.data)[inst].br; + try self.br(branch.block_inst, branch.operand); + return self.finishAirBookkeeping(); } - fn genBr(self: *Self, inst: Air.Inst.Index) !MCValue { - return self.br(inst.block, inst.operand); - } - - fn genBoolOp(self: *Self, inst: Air.Inst.Index) !MCValue { - if (self.liveness.isUnused(inst)) - return MCValue.dead; + fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const air_tags = self.air.instructions.items(.tag); - switch (arch) { + const result: MCValue = if (self.liveness.isUnused(inst)) .dead else switch (arch) { .x86_64 => switch (air_tags[inst]) { // lhs AND rhs - .bool_and => return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .bool_and => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), // lhs OR rhs - .bool_or => return try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), + .bool_or => try self.genX8664BinMath(inst, bin_op.lhs, bin_op.rhs), else => unreachable, // Not a boolean operation }, .arm, .armeb => switch (air_tags[inst]) { - .bool_and => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_and), - .bool_or => return try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_or), + .bool_and => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_and), + .bool_or => try self.genArmBinOp(inst, bin_op.lhs, bin_op.rhs, .bool_or), else => unreachable, // Not a boolean operation }, else => return self.fail("TODO implement boolean operations for {}", .{self.target.cpu.arch}), - } + }; + return self.finishAir(inst, result, .{ bin_op.lhs, bin_op.rhs, .none }); } - fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Index) !MCValue { + fn br(self: *Self, block: Air.Inst.Index, operand: Air.Inst.Ref) !void { const block_data = self.blocks.getPtr(block).?; - if (operand.ty.hasCodeGenBits()) { + if (self.air.typeOf(operand).hasCodeGenBits()) { const operand_mcv = try self.resolveInst(operand); const block_mcv = block_data.mcv; if (block_mcv == .none) { block_data.mcv = operand_mcv; } else { - try self.setRegOrMem(block.base.ty, block_mcv, operand_mcv); + try self.setRegOrMem(self.air.typeOfIndex(block), block_mcv, operand_mcv); } } return self.brVoid(block); } - fn brVoid(self: *Self, block: Air.Inst.Index) !MCValue { + fn brVoid(self: *Self, block: Air.Inst.Index) !void { const block_data = self.blocks.getPtr(block).?; // Emit a jump with a relocation. It will be patched up after the block ends. @@ -3408,131 +3429,170 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, else => return self.fail("TODO implement brvoid for {}", .{self.target.cpu.arch}), } - return .none; } - fn genAsm(self: *Self, inst: Air.Inst.Index) !MCValue { - if (!inst.is_volatile and self.liveness.isUnused(inst)) - return MCValue.dead; - switch (arch) { - .arm, .armeb => { - for (inst.inputs) |input, i| { - if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail("unrecognized asm input constraint: '{s}'", .{input}); + fn airAsm(self: *Self, inst: Air.Inst.Index) !void { + const air_datas = self.air.instructions.items(.data); + const air_extra = self.air.extraData(Air.Asm, air_datas[inst].ty_pl.payload); + const zir = self.mod_fn.owner_decl.namespace.file_scope.zir; + const extended = zir.instructions.items(.data)[air_extra.data.zir_index].extended; + const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand); + const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source); + const outputs_len = @truncate(u5, extended.small); + const args_len = @truncate(u5, extended.small >> 5); + const clobbers_len = @truncate(u5, extended.small >> 10); + _ = clobbers_len; // TODO honor these + const is_volatile = @truncate(u1, extended.small >> 15) != 0; + const outputs = @bitCast([]const Air.Inst.Ref, self.air.extra[air_extra.end..][0..outputs_len]); + const args = @bitCast([]const Air.Inst.Ref, self.air.extra[air_extra.end + outputs.len ..][0..args_len]); + + if (outputs_len > 1) { + return self.fail("TODO implement codegen for asm with more than 1 output", .{}); + } + var extra_i: usize = zir_extra.end; + const output_constraint: ?[]const u8 = out: { + var i: usize = 0; + while (i < outputs_len) : (i += 1) { + const output = zir.extraData(Zir.Inst.Asm.Output, extra_i); + extra_i = output.end; + break :out zir.nullTerminatedString(output.data.constraint); + } + break :out null; + }; + + const dead = !is_volatile and self.liveness.isUnused(inst); + const result: MCValue = if (dead) .dead else switch (arch) { + .arm, .armeb => result: { + for (args) |arg| { + const input = zir.extraData(Zir.Inst.Asm.Input, extra_i); + extra_i = input.end; + const constraint = zir.nullTerminatedString(input.data.constraint); + + if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { + return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); } - const reg_name = input[1 .. input.len - 1]; + const reg_name = constraint[1 .. constraint.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv); } - if (mem.eql(u8, inst.asm_source, "svc #0")) { + if (mem.eql(u8, asm_source, "svc #0")) { writeInt(u32, try self.code.addManyAsArray(4), Instruction.svc(.al, 0).toU32()); } else { return self.fail("TODO implement support for more arm assembly instructions", .{}); } - if (inst.output_constraint) |output| { + if (output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - return MCValue{ .register = reg }; + + break :result MCValue{ .register = reg }; } else { - return MCValue.none; + break :result MCValue.none; } }, - .aarch64 => { - for (inst.inputs) |input, i| { - if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail("unrecognized asm input constraint: '{s}'", .{input}); + .aarch64 => result: { + for (args) |arg| { + const input = zir.extraData(Zir.Inst.Asm.Input, extra_i); + extra_i = input.end; + const constraint = zir.nullTerminatedString(input.data.constraint); + + if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { + return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); } - const reg_name = input[1 .. input.len - 1]; + const reg_name = constraint[1 .. constraint.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv); } - if (mem.eql(u8, inst.asm_source, "svc #0")) { + if (mem.eql(u8, asm_source, "svc #0")) { mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.svc(0x0).toU32()); - } else if (mem.eql(u8, inst.asm_source, "svc #0x80")) { + } else if (mem.eql(u8, asm_source, "svc #0x80")) { mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.svc(0x80).toU32()); } else { return self.fail("TODO implement support for more aarch64 assembly instructions", .{}); } - if (inst.output_constraint) |output| { + if (output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - return MCValue{ .register = reg }; + break :result MCValue{ .register = reg }; } else { - return MCValue.none; + break :result MCValue.none; } }, - .riscv64 => { - for (inst.inputs) |input, i| { - if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail("unrecognized asm input constraint: '{s}'", .{input}); + .riscv64 => result: { + for (args) |arg| { + const input = zir.extraData(Zir.Inst.Asm.Input, extra_i); + extra_i = input.end; + const constraint = zir.nullTerminatedString(input.data.constraint); + + if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { + return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); } - const reg_name = input[1 .. input.len - 1]; + const reg_name = constraint[1 .. constraint.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv); } - if (mem.eql(u8, inst.asm_source, "ecall")) { + if (mem.eql(u8, asm_source, "ecall")) { mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ecall.toU32()); } else { return self.fail("TODO implement support for more riscv64 assembly instructions", .{}); } - if (inst.output_constraint) |output| { + if (output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - return MCValue{ .register = reg }; + break :result MCValue{ .register = reg }; } else { - return MCValue.none; + break :result MCValue.none; } }, - .x86_64, .i386 => { - for (inst.inputs) |input, i| { - if (input.len < 3 or input[0] != '{' or input[input.len - 1] != '}') { - return self.fail("unrecognized asm input constraint: '{s}'", .{input}); + .x86_64, .i386 => result: { + for (args) |arg| { + const input = zir.extraData(Zir.Inst.Asm.Input, extra_i); + extra_i = input.end; + const constraint = zir.nullTerminatedString(input.data.constraint); + + if (constraint.len < 3 or constraint[0] != '{' or constraint[constraint.len - 1] != '}') { + return self.fail("unrecognized asm input constraint: '{s}'", .{constraint}); } - const reg_name = input[1 .. input.len - 1]; + const reg_name = constraint[1 .. constraint.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - const arg = inst.args[i]; const arg_mcv = try self.resolveInst(arg); try self.register_manager.getReg(reg, null); - try self.genSetReg(arg.ty, reg, arg_mcv); + try self.genSetReg(self.air.typeOf(arg), reg, arg_mcv); } { - var iter = std.mem.tokenize(inst.asm_source, "\n\r"); + var iter = std.mem.tokenize(asm_source, "\n\r"); while (iter.next()) |ins| { if (mem.eql(u8, ins, "syscall")) { try self.code.appendSlice(&[_]u8{ 0x0f, 0x05 }); @@ -3571,20 +3631,27 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - if (inst.output_constraint) |output| { + if (output_constraint) |output| { if (output.len < 4 or output[0] != '=' or output[1] != '{' or output[output.len - 1] != '}') { return self.fail("unrecognized asm output constraint: '{s}'", .{output}); } const reg_name = output[2 .. output.len - 1]; const reg = parseRegName(reg_name) orelse return self.fail("unrecognized register: '{s}'", .{reg_name}); - return MCValue{ .register = reg }; + break :result MCValue{ .register = reg }; } else { - return MCValue.none; + break :result MCValue{ .none = {} }; } }, else => return self.fail("TODO implement inline asm support for more architectures", .{}), + }; + if (outputs.len + args.len <= Liveness.bpi - 1) { + var buf = [1]Air.Inst.Ref{.none} ** (Liveness.bpi - 1); + std.mem.copy(Air.Inst.Ref, &buf, outputs); + std.mem.copy(Air.Inst.Ref, buf[outputs.len..], args); + return self.finishAir(inst, result, buf); } + @panic("TODO: codegen for asm with greater than 3 args"); } /// Sets the value without any modifications to register allocation metadata or stack allocation metadata. @@ -3761,7 +3828,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.genSetStack(ty, stack_offset, MCValue{ .register = reg }); }, .register => |reg| { - try self.genX8664ModRMRegToStack(src, ty, stack_offset, reg, 0x89); + try self.genX8664ModRMRegToStack(ty, stack_offset, reg, 0x89); }, .memory => |vaddr| { _ = vaddr; @@ -4409,32 +4476,48 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } - fn genPtrToInt(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airPtrToInt(self: *Self, inst: Air.Inst.Index) !void { const un_op = self.air.instructions.items(.data)[inst].un_op; - return self.resolveInst(un_op); + const result = try self.resolveInst(un_op); + return self.finishAir(inst, result, .{ un_op, .none, .none }); } - fn genBitCast(self: *Self, inst: Air.Inst.Index) !MCValue { + fn airBitCast(self: *Self, inst: Air.Inst.Index) !void { const ty_op = self.air.instructions.items(.data)[inst].ty_op; - return self.resolveInst(ty_op.operand); + const result = try self.resolveInst(ty_op.operand); + return self.finishAir(inst, result, .{ ty_op.operand, .none, .none }); } - fn resolveInst(self: *Self, inst: Air.Inst.Index) !MCValue { - // If the type has no codegen bits, no need to store it. - if (!inst.ty.hasCodeGenBits()) - return MCValue.none; - - // Constants have static lifetimes, so they are always memoized in the outer most table. - if (inst.castTag(.constant)) |const_inst| { - const branch = &self.branch_stack.items[0]; - const gop = try branch.inst_table.getOrPut(self.gpa, inst); - if (!gop.found_existing) { - gop.value_ptr.* = try self.genTypedValue(inst.src, .{ .ty = inst.ty, .val = const_inst.val }); - } - return gop.value_ptr.*; + fn resolveInst(self: *Self, inst: Air.Inst.Ref) InnerError!MCValue { + // First section of indexes correspond to a set number of constant values. + const ref_int = @enumToInt(inst); + if (ref_int < Air.Inst.Ref.typed_value_map.len) { + return self.genTypedValue(Air.Inst.Ref.typed_value_map[ref_int]); } - return self.getResolvedInstValue(inst); + // If the type has no codegen bits, no need to store it. + const inst_ty = self.air.typeOf(inst); + if (!inst_ty.hasCodeGenBits()) + return MCValue{ .none = {} }; + + const inst_index = @intCast(Air.Inst.Index, ref_int - Air.Inst.Ref.typed_value_map.len); + switch (self.air.instructions.items(.tag)[inst_index]) { + .constant => { + // Constants have static lifetimes, so they are always memoized in the outer most table. + const branch = &self.branch_stack.items[0]; + const gop = try branch.inst_table.getOrPut(self.gpa, inst_index); + if (!gop.found_existing) { + const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + gop.value_ptr.* = try self.genTypedValue(.{ + .ty = inst_ty, + .val = self.air.values[ty_pl.payload], + }); + } + return gop.value_ptr.*; + }, + .const_ty => unreachable, + else => return self.getResolvedInstValue(inst_index), + } } fn getResolvedInstValue(self: *Self, inst: Air.Inst.Index) MCValue { @@ -4454,8 +4537,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { /// A potential opportunity for future optimization here would be keeping track /// of the fact that the instruction is available both as an immediate /// and as a register. - fn limitImmediateType(self: *Self, inst: Air.Inst.Index, comptime T: type) !MCValue { - const mcv = try self.resolveInst(inst); + fn limitImmediateType(self: *Self, operand: Air.Inst.Ref, comptime T: type) !MCValue { + const mcv = try self.resolveInst(operand); const ti = @typeInfo(T).Int; switch (mcv) { .immediate => |imm| { @@ -4470,7 +4553,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return mcv; } - fn genTypedValue(self: *Self, src: LazySrcLoc, typed_value: TypedValue) InnerError!MCValue { + fn genTypedValue(self: *Self, typed_value: TypedValue) InnerError!MCValue { if (typed_value.val.isUndef()) return MCValue{ .undef = {} }; const ptr_bits = self.target.cpu.arch.ptrBitWidth(); @@ -4480,7 +4563,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { .Slice => { var buf: Type.Payload.ElemType = undefined; const ptr_type = typed_value.ty.slicePtrFieldType(&buf); - const ptr_mcv = try self.genTypedValue(src, .{ .ty = ptr_type, .val = typed_value.val }); + const ptr_mcv = try self.genTypedValue(.{ .ty = ptr_type, .val = typed_value.val }); const slice_len = typed_value.val.sliceLen(); // Codegen can't handle some kinds of indirection. If the wrong union field is accessed here it may mean // the Sema code needs to use anonymous Decls or alloca instructions to store data. @@ -4541,7 +4624,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return MCValue{ .immediate = 0 }; var buf: Type.Payload.ElemType = undefined; - return self.genTypedValue(src, .{ + return self.genTypedValue(.{ .ty = typed_value.ty.optionalChild(&buf), .val = typed_value.val, }); From 424f260f850cb22637888bbfdf5bfaf9c08a4dae Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Fri, 16 Jul 2021 14:48:51 +0200 Subject: [PATCH 23/53] Fix wasm-related compile errors: - Update `fail()` to not require a `srcLoc`. This brings it in line with other backends, and we were always passing 'node_offset = 0', anyway. - Fix unused local due to change of architecture wrt function/decl generation. - Replace all old instructions to indexes within the function signatures. --- src/codegen/wasm.zig | 221 +++++++++++++++++++++---------------------- src/link/Wasm.zig | 17 ++-- 2 files changed, 118 insertions(+), 120 deletions(-) diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 912577a358..33ab07faf3 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -25,7 +25,7 @@ const WValue = union(enum) { /// Index of the local variable local: u32, /// Instruction holding a constant `Value` - constant: *Inst, + constant: Air.Inst.Index, /// Offset position in the list of bytecode instructions code_offset: usize, /// Used for variables that create multiple locals on the stack when allocated @@ -484,7 +484,7 @@ pub const Result = union(enum) { }; /// Hashmap to store generated `WValue` for each `Inst` -pub const ValueTable = std.AutoHashMapUnmanaged(*Inst, WValue); +pub const ValueTable = std.AutoHashMapUnmanaged(Air.Inst.Index, WValue); /// Code represents the `Code` section of wasm that /// belongs to a function @@ -497,8 +497,8 @@ pub const Context = struct { gpa: *mem.Allocator, /// Table to save `WValue`'s generated by an `Inst` values: ValueTable, - /// Mapping from *Inst.Block to block ids - blocks: std.AutoArrayHashMapUnmanaged(*Inst.Block, u32) = .{}, + /// Mapping from Air.Inst.Index to block ids + blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, u32) = .{}, /// `bytes` contains the wasm bytecode belonging to the 'code' section. code: ArrayList(u8), /// Contains the generated function type bytecode for the current function @@ -538,7 +538,8 @@ pub const Context = struct { } /// Sets `err_msg` on `Context` and returns `error.CodegemFail` which is caught in link/Wasm.zig - fn fail(self: *Context, src: LazySrcLoc, comptime fmt: []const u8, args: anytype) InnerError { + fn fail(self: *Context, comptime fmt: []const u8, args: anytype) InnerError { + const src: LazySrcLoc = .{ .node_offset = 0 }; const src_loc = src.toSrcLocWithDecl(self.decl); self.err_msg = try Module.ErrorMsg.create(self.gpa, src_loc, fmt, args); return error.CodegenFail; @@ -546,7 +547,7 @@ pub const Context = struct { /// Resolves the `WValue` for the given instruction `inst` /// When the given instruction has a `Value`, it returns a constant instead - fn resolveInst(self: Context, inst: *Inst) WValue { + fn resolveInst(self: Context, inst: Air.Inst) Index { if (!inst.ty.hasCodeGenBits()) return .none; if (inst.value()) |_| { @@ -557,48 +558,45 @@ pub const Context = struct { } /// Using a given `Type`, returns the corresponding wasm Valtype - fn typeToValtype(self: *Context, src: LazySrcLoc, ty: Type) InnerError!wasm.Valtype { + fn typeToValtype(self: *Context, ty: Type) InnerError!wasm.Valtype { return switch (ty.zigTypeTag()) { .Float => blk: { const bits = ty.floatBits(self.target); if (bits == 16 or bits == 32) break :blk wasm.Valtype.f32; if (bits == 64) break :blk wasm.Valtype.f64; - return self.fail(src, "Float bit size not supported by wasm: '{d}'", .{bits}); + return self.fail("Float bit size not supported by wasm: '{d}'", .{bits}); }, .Int => blk: { const info = ty.intInfo(self.target); if (info.bits <= 32) break :blk wasm.Valtype.i32; if (info.bits > 32 and info.bits <= 64) break :blk wasm.Valtype.i64; - return self.fail(src, "Integer bit size not supported by wasm: '{d}'", .{info.bits}); + return self.fail("Integer bit size not supported by wasm: '{d}'", .{info.bits}); }, .Enum => switch (ty.tag()) { .enum_simple => wasm.Valtype.i32, - else => self.typeToValtype( - src, - ty.cast(Type.Payload.EnumFull).?.data.tag_ty, - ), + else => self.typeToValtype(ty.cast(Type.Payload.EnumFull).?.data.tag_ty), }, .Bool, .Pointer, .ErrorSet, => wasm.Valtype.i32, .Struct, .ErrorUnion => unreachable, // Multi typed, must be handled individually. - else => self.fail(src, "TODO - Wasm valtype for type '{s}'", .{ty.zigTypeTag()}), + else => self.fail("TODO - Wasm valtype for type '{s}'", .{ty.zigTypeTag()}), }; } /// Using a given `Type`, returns the byte representation of its wasm value type - fn genValtype(self: *Context, src: LazySrcLoc, ty: Type) InnerError!u8 { - return wasm.valtype(try self.typeToValtype(src, ty)); + fn genValtype(self: *Context, ty: Type) InnerError!u8 { + return wasm.valtype(try self.typeToValtype(ty)); } /// Using a given `Type`, returns the corresponding wasm value type /// Differently from `genValtype` this also allows `void` to create a block /// with no return type - fn genBlockType(self: *Context, src: LazySrcLoc, ty: Type) InnerError!u8 { + fn genBlockType(self: *Context, ty: Type) InnerError!u8 { return switch (ty.tag()) { .void, .noreturn => wasm.block_empty, - else => self.genValtype(src, ty), + else => self.genValtype(ty), }; } @@ -612,7 +610,7 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.local_get)); try leb.writeULEB128(writer, idx); }, - .constant => |inst| try self.emitConstant(inst.src, inst.value().?, inst.ty), // creates a new constant onto the stack + .constant => |inst| try self.emitConstant(inst.value().?, inst.ty), // creates a new constant onto the stack } } @@ -682,7 +680,7 @@ pub const Context = struct { ty.fnParamTypes(params); for (params) |param_type| { // Can we maybe get the source index of each param? - const val_type = try self.genValtype(.{ .node_offset = 0 }, param_type); + const val_type = try self.genValtype(param_type); try writer.writeByte(val_type); } } @@ -691,13 +689,10 @@ pub const Context = struct { const return_type = ty.fnReturnType(); switch (return_type.zigTypeTag()) { .Void, .NoReturn => try leb.writeULEB128(writer, @as(u32, 0)), - .Struct => return self.fail(.{ .node_offset = 0 }, "TODO: Implement struct as return type for wasm", .{}), - .Optional => return self.fail(.{ .node_offset = 0 }, "TODO: Implement optionals as return type for wasm", .{}), + .Struct => return self.fail("TODO: Implement struct as return type for wasm", .{}), + .Optional => return self.fail("TODO: Implement optionals as return type for wasm", .{}), .ErrorUnion => { - const val_type = try self.genValtype( - .{ .node_offset = 0 }, - return_type.errorUnionChild(), - ); + const val_type = try self.genValtype(return_type.errorUnionChild()); // write down the amount of return values try leb.writeULEB128(writer, @as(u32, 2)); @@ -707,22 +702,21 @@ pub const Context = struct { else => { try leb.writeULEB128(writer, @as(u32, 1)); // Can we maybe get the source index of the return type? - const val_type = try self.genValtype(.{ .node_offset = 0 }, return_type); + const val_type = try self.genValtype(return_type); try writer.writeByte(val_type); }, } } pub fn genFunc(self: *Context, func: *Module.Fn) InnerError!Result { + _ = func; try self.genFunctype(); - - // Write instructions // TODO: check for and handle death of instructions // Reserve space to write the size after generating the code as well as space for locals count try self.code.resize(10); - try self.genBody(func.body); + try self.genBody(self.air.getMainBody()); // finally, write our local types at the 'offset' position { @@ -753,7 +747,7 @@ pub const Context = struct { return Result.appended; } - /// Generates the wasm bytecode for the function declaration belonging to `Context` + /// Generates the wasm bytecode for the declaration belonging to `Context` pub fn gen(self: *Context, typed_value: TypedValue) InnerError!Result { switch (typed_value.ty.zigTypeTag()) { .Fn => { @@ -793,58 +787,59 @@ pub const Context = struct { } } - fn genInst(self: *Context, inst: *Inst) InnerError!WValue { - return switch (inst.tag) { - .add => self.genBinOp(inst.castTag(.add).?, .add), - .alloc => self.genAlloc(inst.castTag(.alloc).?), - .arg => self.genArg(inst.castTag(.arg).?), - .bit_and => self.genBinOp(inst.castTag(.bit_and).?, .@"and"), - .bitcast => self.genBitcast(inst.castTag(.bitcast).?), - .bit_or => self.genBinOp(inst.castTag(.bit_or).?, .@"or"), - .block => self.genBlock(inst.castTag(.block).?), - .bool_and => self.genBinOp(inst.castTag(.bool_and).?, .@"and"), - .bool_or => self.genBinOp(inst.castTag(.bool_or).?, .@"or"), - .breakpoint => self.genBreakpoint(inst.castTag(.breakpoint).?), - .br => self.genBr(inst.castTag(.br).?), - .call => self.genCall(inst.castTag(.call).?), - .cmp_eq => self.genCmp(inst.castTag(.cmp_eq).?, .eq), - .cmp_gte => self.genCmp(inst.castTag(.cmp_gte).?, .gte), - .cmp_gt => self.genCmp(inst.castTag(.cmp_gt).?, .gt), - .cmp_lte => self.genCmp(inst.castTag(.cmp_lte).?, .lte), - .cmp_lt => self.genCmp(inst.castTag(.cmp_lt).?, .lt), - .cmp_neq => self.genCmp(inst.castTag(.cmp_neq).?, .neq), - .condbr => self.genCondBr(inst.castTag(.condbr).?), - .constant => unreachable, - .dbg_stmt => WValue.none, - .div => self.genBinOp(inst.castTag(.div).?, .div), - .is_err => self.genIsErr(inst.castTag(.is_err).?, .i32_ne), - .is_non_err => self.genIsErr(inst.castTag(.is_non_err).?, .i32_eq), - .load => self.genLoad(inst.castTag(.load).?), - .loop => self.genLoop(inst.castTag(.loop).?), - .mul => self.genBinOp(inst.castTag(.mul).?, .mul), - .not => self.genNot(inst.castTag(.not).?), - .ret => self.genRet(inst.castTag(.ret).?), - .retvoid => WValue.none, - .store => self.genStore(inst.castTag(.store).?), - .struct_field_ptr => self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), - .sub => self.genBinOp(inst.castTag(.sub).?, .sub), - .switchbr => self.genSwitchBr(inst.castTag(.switchbr).?), - .unreach => self.genUnreachable(inst.castTag(.unreach).?), - .unwrap_errunion_payload => self.genUnwrapErrUnionPayload(inst.castTag(.unwrap_errunion_payload).?), - .wrap_errunion_payload => self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), - .xor => self.genBinOp(inst.castTag(.xor).?, .xor), - else => self.fail(.{ .node_offset = 0 }, "TODO: Implement wasm inst: {s}", .{inst.tag}), + fn genInst(self: *Context, inst: Air.Inst.Index) !WValue { + const air_tags = self.air.instructions.items(.tag); + return switch (air_tags[inst]) { + // .add => self.genBinOp(inst.castTag(.add).?, .add), + // .alloc => self.genAlloc(inst.castTag(.alloc).?), + // .arg => self.genArg(inst.castTag(.arg).?), + // .bit_and => self.genBinOp(inst.castTag(.bit_and).?, .@"and"), + // .bitcast => self.genBitcast(inst.castTag(.bitcast).?), + // .bit_or => self.genBinOp(inst.castTag(.bit_or).?, .@"or"), + // .block => self.genBlock(inst.castTag(.block).?), + // .bool_and => self.genBinOp(inst.castTag(.bool_and).?, .@"and"), + // .bool_or => self.genBinOp(inst.castTag(.bool_or).?, .@"or"), + // .breakpoint => self.genBreakpoint(inst.castTag(.breakpoint).?), + // .br => self.genBr(inst.castTag(.br).?), + // .call => self.genCall(inst.castTag(.call).?), + // .cmp_eq => self.genCmp(inst.castTag(.cmp_eq).?, .eq), + // .cmp_gte => self.genCmp(inst.castTag(.cmp_gte).?, .gte), + // .cmp_gt => self.genCmp(inst.castTag(.cmp_gt).?, .gt), + // .cmp_lte => self.genCmp(inst.castTag(.cmp_lte).?, .lte), + // .cmp_lt => self.genCmp(inst.castTag(.cmp_lt).?, .lt), + // .cmp_neq => self.genCmp(inst.castTag(.cmp_neq).?, .neq), + // .condbr => self.genCondBr(inst.castTag(.condbr).?), + // .constant => unreachable, + // .dbg_stmt => WValue.none, + // .div => self.genBinOp(inst.castTag(.div).?, .div), + // .is_err => self.genIsErr(inst.castTag(.is_err).?, .i32_ne), + // .is_non_err => self.genIsErr(inst.castTag(.is_non_err).?, .i32_eq), + // .load => self.genLoad(inst.castTag(.load).?), + // .loop => self.genLoop(inst.castTag(.loop).?), + // .mul => self.genBinOp(inst.castTag(.mul).?, .mul), + // .not => self.genNot(inst.castTag(.not).?), + // .ret => self.genRet(inst.castTag(.ret).?), + // .retvoid => WValue.none, + // .store => self.genStore(inst.castTag(.store).?), + // .struct_field_ptr => self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), + // .sub => self.genBinOp(inst.castTag(.sub).?, .sub), + // .switchbr => self.genSwitchBr(inst.castTag(.switchbr).?), + // .unreach => self.genUnreachable(inst.castTag(.unreach).?), + // .unwrap_errunion_payload => self.genUnwrapErrUnionPayload(inst.castTag(.unwrap_errunion_payload).?), + // .wrap_errunion_payload => self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), + // .xor => self.genBinOp(inst.castTag(.xor).?, .xor), + else => |tag| self.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}), }; } - fn genBody(self: *Context, body: ir.Body) InnerError!void { - for (body.instructions) |inst| { + fn genBody(self: *Context, body: []const Air.Inst.Index) InnerError!void { + for (body) |inst| { const result = try self.genInst(inst); try self.values.putNoClobber(self.gpa, inst, result); } } - fn genRet(self: *Context, inst: *Inst.UnOp) InnerError!WValue { + fn genRet(self: *Context, inst: Air.Inst.Index) InnerError!WValue { // TODO: Implement tail calls const operand = self.resolveInst(inst.operand); try self.emitWValue(operand); @@ -852,7 +847,7 @@ pub const Context = struct { return .none; } - fn genCall(self: *Context, inst: *Inst.Call) InnerError!WValue { + fn genCall(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const func_val = inst.func.value().?; const target: *Decl = blk: { @@ -861,7 +856,7 @@ pub const Context = struct { } else if (func_val.castTag(.extern_fn)) |ext_fn| { break :blk ext_fn.data; } - return self.fail(inst.base.src, "Expected a function, but instead found type '{s}'", .{func_val.tag()}); + return self.fail("Expected a function, but instead found type '{s}'", .{func_val.tag()}); }; for (inst.args) |arg| { @@ -881,12 +876,12 @@ pub const Context = struct { return .none; } - fn genAlloc(self: *Context, inst: *Inst.NoOp) InnerError!WValue { + fn genAlloc(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const elem_type = inst.base.ty.elemType(); return self.allocLocal(elem_type); } - fn genStore(self: *Context, inst: *Inst.BinOp) InnerError!WValue { + fn genStore(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const writer = self.code.writer(); const lhs = self.resolveInst(inst.lhs); @@ -924,18 +919,18 @@ pub const Context = struct { return .none; } - fn genLoad(self: *Context, inst: *Inst.UnOp) InnerError!WValue { + fn genLoad(self: *Context, inst: Air.Inst.Index) InnerError!WValue { return self.resolveInst(inst.operand); } - fn genArg(self: *Context, inst: *Inst.Arg) InnerError!WValue { + fn genArg(self: *Context, inst: Air.Inst.Index) InnerError!WValue { _ = inst; // arguments share the index with locals defer self.local_index += 1; return WValue{ .local = self.local_index }; } - fn genBinOp(self: *Context, inst: *Inst.BinOp, op: Op) InnerError!WValue { + fn genBinOp(self: *Context, inst: Air.Inst.Index, op: Op) InnerError!WValue { const lhs = self.resolveInst(inst.lhs); const rhs = self.resolveInst(inst.rhs); @@ -952,21 +947,21 @@ pub const Context = struct { const opcode: wasm.Opcode = buildOpcode(.{ .op = op, - .valtype1 = try self.typeToValtype(inst.base.src, inst.base.ty), + .valtype1 = try self.typeToValtype(inst.base.ty), .signedness = if (inst.base.ty.isSignedInt()) .signed else .unsigned, }); try self.code.append(wasm.opcode(opcode)); return WValue{ .code_offset = offset }; } - fn emitConstant(self: *Context, src: LazySrcLoc, value: Value, ty: Type) InnerError!void { + fn emitConstant(self: *Context, value: Value, ty: Type) InnerError!void { const writer = self.code.writer(); switch (ty.zigTypeTag()) { .Int => { // write opcode const opcode: wasm.Opcode = buildOpcode(.{ .op = .@"const", - .valtype1 = try self.typeToValtype(src, ty), + .valtype1 = try self.typeToValtype(ty), }); try writer.writeByte(wasm.opcode(opcode)); // write constant @@ -985,14 +980,14 @@ pub const Context = struct { // write opcode const opcode: wasm.Opcode = buildOpcode(.{ .op = .@"const", - .valtype1 = try self.typeToValtype(src, ty), + .valtype1 = try self.typeToValtype(ty), }); try writer.writeByte(wasm.opcode(opcode)); // write constant switch (ty.floatBits(self.target)) { 0...32 => try writer.writeIntLittle(u32, @bitCast(u32, value.toFloat(f32))), 64 => try writer.writeIntLittle(u64, @bitCast(u64, value.toFloat(f64))), - else => |bits| return self.fail(src, "Wasm TODO: emitConstant for float with {d} bits", .{bits}), + else => |bits| return self.fail("Wasm TODO: emitConstant for float with {d} bits", .{bits}), } }, .Pointer => { @@ -1009,7 +1004,7 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.i32_load)); try leb.writeULEB128(writer, @as(u32, 0)); try leb.writeULEB128(writer, @as(u32, 0)); - } else return self.fail(src, "Wasm TODO: emitConstant for other const pointer tag {s}", .{value.tag()}); + } else return self.fail("Wasm TODO: emitConstant for other const pointer tag {s}", .{value.tag()}); }, .Void => {}, .Enum => { @@ -1023,7 +1018,7 @@ pub const Context = struct { const enum_full = ty.cast(Type.Payload.EnumFull).?.data; if (enum_full.values.count() != 0) { const tag_val = enum_full.values.keys()[field_index.data]; - try self.emitConstant(src, tag_val, enum_full.tag_ty); + try self.emitConstant(tag_val, enum_full.tag_ty); } else { try writer.writeByte(wasm.opcode(.i32_const)); try leb.writeULEB128(writer, field_index.data); @@ -1034,7 +1029,7 @@ pub const Context = struct { } else { var int_tag_buffer: Type.Payload.Bits = undefined; const int_tag_ty = ty.intTagType(&int_tag_buffer); - try self.emitConstant(src, value, int_tag_ty); + try self.emitConstant(value, int_tag_ty); } }, .ErrorSet => { @@ -1048,12 +1043,12 @@ pub const Context = struct { const payload_type = ty.errorUnionChild(); if (value.getError()) |_| { // write the error value - try self.emitConstant(src, data, error_type); + try self.emitConstant(data, error_type); // no payload, so write a '0' const const opcode: wasm.Opcode = buildOpcode(.{ .op = .@"const", - .valtype1 = try self.typeToValtype(src, payload_type), + .valtype1 = try self.typeToValtype(payload_type), }); try writer.writeByte(wasm.opcode(opcode)); try leb.writeULEB128(writer, @as(u32, 0)); @@ -1062,15 +1057,15 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.i32_const)); try leb.writeULEB128(writer, @as(u32, 0)); // after the error code, we emit the payload - try self.emitConstant(src, data, payload_type); + try self.emitConstant(data, payload_type); } }, - else => |zig_type| return self.fail(src, "Wasm TODO: emitConstant for zigTypeTag {s}", .{zig_type}), + else => |zig_type| return self.fail("Wasm TODO: emitConstant for zigTypeTag {s}", .{zig_type}), } } - fn genBlock(self: *Context, block: *Inst.Block) InnerError!WValue { - const block_ty = try self.genBlockType(block.base.src, block.base.ty); + fn genBlock(self: *Context, block: Air.Inst.Index) InnerError!WValue { + const block_ty = try self.genBlockType(block.base.ty); try self.startBlock(.block, block_ty, null); // Here we set the current block idx, so breaks know the depth to jump @@ -1100,8 +1095,8 @@ pub const Context = struct { self.block_depth -= 1; } - fn genLoop(self: *Context, loop: *Inst.Loop) InnerError!WValue { - const loop_ty = try self.genBlockType(loop.base.src, loop.base.ty); + fn genLoop(self: *Context, loop: Air.Inst.Index) InnerError!WValue { + const loop_ty = try self.genBlockType(loop.base.ty); try self.startBlock(.loop, loop_ty, null); try self.genBody(loop.body); @@ -1115,7 +1110,7 @@ pub const Context = struct { return .none; } - fn genCondBr(self: *Context, condbr: *Inst.CondBr) InnerError!WValue { + fn genCondBr(self: *Context, condbr: Air.Inst.Index) InnerError!WValue { const condition = self.resolveInst(condbr.condition); const writer = self.code.writer(); @@ -1131,7 +1126,7 @@ pub const Context = struct { break :blk offset; }, }; - const block_ty = try self.genBlockType(condbr.base.src, condbr.base.ty); + const block_ty = try self.genBlockType(condbr.base.ty); try self.startBlock(.block, block_ty, offset); // we inserted the block in front of the condition @@ -1149,7 +1144,7 @@ pub const Context = struct { return .none; } - fn genCmp(self: *Context, inst: *Inst.BinOp, op: std.math.CompareOperator) InnerError!WValue { + fn genCmp(self: *Context, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!WValue { // save offset, so potential conditions can insert blocks in front of // the comparison that we can later jump back to const offset = self.code.items.len; @@ -1168,7 +1163,7 @@ pub const Context = struct { break :blk inst.lhs.ty.intInfo(self.target).signedness; }; const opcode: wasm.Opcode = buildOpcode(.{ - .valtype1 = try self.typeToValtype(inst.base.src, inst.lhs.ty), + .valtype1 = try self.typeToValtype(inst.lhs.ty), .op = switch (op) { .lt => .lt, .lte => .le, @@ -1183,7 +1178,7 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genBr(self: *Context, br: *Inst.Br) InnerError!WValue { + fn genBr(self: *Context, br: Air.Inst.Index) InnerError!WValue { // if operand has codegen bits we should break with a value if (br.operand.ty.hasCodeGenBits()) { const operand = self.resolveInst(br.operand); @@ -1200,7 +1195,7 @@ pub const Context = struct { return .none; } - fn genNot(self: *Context, not: *Inst.UnOp) InnerError!WValue { + fn genNot(self: *Context, not: Air.Inst.Index) InnerError!WValue { const offset = self.code.items.len; const operand = self.resolveInst(not.operand); @@ -1217,7 +1212,7 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genBreakpoint(self: *Context, breakpoint: *Inst.NoOp) InnerError!WValue { + fn genBreakpoint(self: *Context, breakpoint: Air.Inst.Index) InnerError!WValue { _ = self; _ = breakpoint; // unsupported by wasm itself. Can be implemented once we support DWARF @@ -1225,27 +1220,27 @@ pub const Context = struct { return .none; } - fn genUnreachable(self: *Context, unreach: *Inst.NoOp) InnerError!WValue { + fn genUnreachable(self: *Context, unreach: Air.Inst.Index) InnerError!WValue { _ = unreach; try self.code.append(wasm.opcode(.@"unreachable")); return .none; } - fn genBitcast(self: *Context, bitcast: *Inst.UnOp) InnerError!WValue { + fn genBitcast(self: *Context, bitcast: Air.Inst.Index) InnerError!WValue { return self.resolveInst(bitcast.operand); } - fn genStructFieldPtr(self: *Context, inst: *Inst.StructFieldPtr) InnerError!WValue { + fn genStructFieldPtr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const struct_ptr = self.resolveInst(inst.struct_ptr); return WValue{ .local = struct_ptr.multi_value.index + @intCast(u32, inst.field_index) }; } - fn genSwitchBr(self: *Context, inst: *Inst.SwitchBr) InnerError!WValue { + fn genSwitchBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const target = self.resolveInst(inst.target); const target_ty = inst.target.ty; const valtype = try self.typeToValtype(.{ .node_offset = 0 }, target_ty); - const blocktype = try self.genBlockType(inst.base.src, inst.base.ty); + const blocktype = try self.genBlockType(inst.base.ty); const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) @@ -1282,7 +1277,7 @@ pub const Context = struct { return .none; } - fn genIsErr(self: *Context, inst: *Inst.UnOp, opcode: wasm.Opcode) InnerError!WValue { + fn genIsErr(self: *Context, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue { const operand = self.resolveInst(inst.operand); const offset = self.code.items.len; const writer = self.code.writer(); @@ -1298,7 +1293,7 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genUnwrapErrUnionPayload(self: *Context, inst: *Inst.UnOp) InnerError!WValue { + fn genUnwrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const operand = self.resolveInst(inst.operand); // The index of multi_value contains the error code. To get the initial index of the payload we get // the following index. Next, convert it to a `WValue.local` @@ -1307,7 +1302,7 @@ pub const Context = struct { return WValue{ .local = operand.multi_value.index + 1 }; } - fn genWrapErrUnionPayload(self: *Context, inst: *Inst.UnOp) InnerError!WValue { + fn genWrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { return self.resolveInst(inst.operand); } }; diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index be6ad78701..1387615d15 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -228,7 +228,7 @@ pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, live }, else => |e| return e, }; - return self.finishUpdateDecl(decl, result); + return self.finishUpdateDecl(decl, result, &context); } // Generate code for the Decl, storing it in memory to be later written to @@ -270,18 +270,21 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { }, else => |e| return e, }; - return self.finishUpdateDecl(decl, result); + + return self.finishUpdateDecl(decl, result, &context); } -fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, result: codegen.Result) !void { - const code: []const u8 = switch (result) { - .appended => @as([]const u8, context.code.items), - .externally_managed => |payload| payload, - }; +fn finishUpdateDecl(self: *Wasm, decl: *Module.Decl, result: codegen.Result, context: *codegen.Context) !void { + const fn_data: *FnData = &decl.fn_link.wasm; fn_data.code = context.code.toUnmanaged(); fn_data.functype = context.func_type_data.toUnmanaged(); + const code: []const u8 = switch (result) { + .appended => @as([]const u8, fn_data.code.items), + .externally_managed => |payload| payload, + }; + const block = &decl.link.wasm; if (decl.ty.zigTypeTag() == .Fn) { // as locals are patched afterwards, the offsets of funcidx's are off, From 2438f61f1c37aefa16852130370df44b3fabf785 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Fri, 16 Jul 2021 22:43:06 +0200 Subject: [PATCH 24/53] Refactor entire wasm-backend to use new AIR memory layout --- src/codegen/wasm.zig | 275 +++++++++++++++++++++++++------------------ src/link/Wasm.zig | 2 +- 2 files changed, 161 insertions(+), 116 deletions(-) diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 33ab07faf3..5cf3fb15fd 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -483,8 +483,8 @@ pub const Result = union(enum) { externally_managed: []const u8, }; -/// Hashmap to store generated `WValue` for each `Inst` -pub const ValueTable = std.AutoHashMapUnmanaged(Air.Inst.Index, WValue); +/// Hashmap to store generated `WValue` for each `Air.Inst.Ref` +pub const ValueTable = std.AutoHashMapUnmanaged(Air.Inst.Ref, WValue); /// Code represents the `Code` section of wasm that /// belongs to a function @@ -495,7 +495,7 @@ pub const Context = struct { air: Air, liveness: Liveness, gpa: *mem.Allocator, - /// Table to save `WValue`'s generated by an `Inst` + /// Table to save `WValue`'s generated by an `Air.Inst` values: ValueTable, /// Mapping from Air.Inst.Index to block ids blocks: std.AutoArrayHashMapUnmanaged(Air.Inst.Index, u32) = .{}, @@ -547,14 +547,15 @@ pub const Context = struct { /// Resolves the `WValue` for the given instruction `inst` /// When the given instruction has a `Value`, it returns a constant instead - fn resolveInst(self: Context, inst: Air.Inst) Index { - if (!inst.ty.hasCodeGenBits()) return .none; + fn resolveInst(self: Context, ref: Air.Inst.Ref) WValue { + const ref_type = self.air.getRefType(ref); + if (ref_type.hasCodeGenBits()) return .none; - if (inst.value()) |_| { - return WValue{ .constant = inst }; + if (self.air.instructions.items(.tag)[@enumToInt(ref)] == .constant) { + return WValue{ .constant = @enumToInt(ref) }; } - return self.values.get(inst).?; // Instruction does not dominate all uses! + return self.values.get(ref).?; // Instruction does not dominate all uses! } /// Using a given `Type`, returns the corresponding wasm Valtype @@ -610,7 +611,12 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.local_get)); try leb.writeULEB128(writer, idx); }, - .constant => |inst| try self.emitConstant(inst.value().?, inst.ty), // creates a new constant onto the stack + .constant => |index| { + const ty_pl = self.air.instructions.items(.data)[index].ty_pl; + const value = self.air.values[ty_pl.payload]; + // create a new constant onto the stack + try self.emitConstant(value, self.air.getRefType(ty_pl.ty)); + }, } } @@ -626,10 +632,7 @@ pub const Context = struct { const fields_len = @intCast(u32, struct_data.fields.count()); try self.locals.ensureCapacity(self.gpa, self.locals.items.len + fields_len); for (struct_data.fields.values()) |*value| { - const val_type = try self.genValtype( - .{ .node_offset = struct_data.node_offset }, - value.ty, - ); + const val_type = try self.genValtype(value.ty); self.locals.appendAssumeCapacity(val_type); self.local_index += 1; } @@ -640,7 +643,7 @@ pub const Context = struct { }, .ErrorUnion => { const payload_type = ty.errorUnionChild(); - const val_type = try self.genValtype(.{ .node_offset = 0 }, payload_type); + const val_type = try self.genValtype(payload_type); // we emit the error value as the first local, and the payload as the following. // The first local is also used to find the index of the error and payload. @@ -657,7 +660,7 @@ pub const Context = struct { } }; }, else => { - const valtype = try self.genValtype(.{ .node_offset = 0 }, ty); + const valtype = try self.genValtype(ty); try self.locals.append(self.gpa, valtype); self.local_index += 1; return WValue{ .local = initial_index }; @@ -708,8 +711,7 @@ pub const Context = struct { } } - pub fn genFunc(self: *Context, func: *Module.Fn) InnerError!Result { - _ = func; + pub fn genFunc(self: *Context) InnerError!Result { try self.genFunctype(); // TODO: check for and handle death of instructions @@ -790,44 +792,43 @@ pub const Context = struct { fn genInst(self: *Context, inst: Air.Inst.Index) !WValue { const air_tags = self.air.instructions.items(.tag); return switch (air_tags[inst]) { - // .add => self.genBinOp(inst.castTag(.add).?, .add), - // .alloc => self.genAlloc(inst.castTag(.alloc).?), - // .arg => self.genArg(inst.castTag(.arg).?), - // .bit_and => self.genBinOp(inst.castTag(.bit_and).?, .@"and"), - // .bitcast => self.genBitcast(inst.castTag(.bitcast).?), - // .bit_or => self.genBinOp(inst.castTag(.bit_or).?, .@"or"), - // .block => self.genBlock(inst.castTag(.block).?), - // .bool_and => self.genBinOp(inst.castTag(.bool_and).?, .@"and"), - // .bool_or => self.genBinOp(inst.castTag(.bool_or).?, .@"or"), - // .breakpoint => self.genBreakpoint(inst.castTag(.breakpoint).?), - // .br => self.genBr(inst.castTag(.br).?), - // .call => self.genCall(inst.castTag(.call).?), - // .cmp_eq => self.genCmp(inst.castTag(.cmp_eq).?, .eq), - // .cmp_gte => self.genCmp(inst.castTag(.cmp_gte).?, .gte), - // .cmp_gt => self.genCmp(inst.castTag(.cmp_gt).?, .gt), - // .cmp_lte => self.genCmp(inst.castTag(.cmp_lte).?, .lte), - // .cmp_lt => self.genCmp(inst.castTag(.cmp_lt).?, .lt), - // .cmp_neq => self.genCmp(inst.castTag(.cmp_neq).?, .neq), - // .condbr => self.genCondBr(inst.castTag(.condbr).?), - // .constant => unreachable, - // .dbg_stmt => WValue.none, - // .div => self.genBinOp(inst.castTag(.div).?, .div), - // .is_err => self.genIsErr(inst.castTag(.is_err).?, .i32_ne), - // .is_non_err => self.genIsErr(inst.castTag(.is_non_err).?, .i32_eq), - // .load => self.genLoad(inst.castTag(.load).?), - // .loop => self.genLoop(inst.castTag(.loop).?), - // .mul => self.genBinOp(inst.castTag(.mul).?, .mul), - // .not => self.genNot(inst.castTag(.not).?), - // .ret => self.genRet(inst.castTag(.ret).?), - // .retvoid => WValue.none, - // .store => self.genStore(inst.castTag(.store).?), - // .struct_field_ptr => self.genStructFieldPtr(inst.castTag(.struct_field_ptr).?), - // .sub => self.genBinOp(inst.castTag(.sub).?, .sub), - // .switchbr => self.genSwitchBr(inst.castTag(.switchbr).?), - // .unreach => self.genUnreachable(inst.castTag(.unreach).?), - // .unwrap_errunion_payload => self.genUnwrapErrUnionPayload(inst.castTag(.unwrap_errunion_payload).?), - // .wrap_errunion_payload => self.genWrapErrUnionPayload(inst.castTag(.wrap_errunion_payload).?), - // .xor => self.genBinOp(inst.castTag(.xor).?, .xor), + .add => self.genBinOp(inst, .add), + .alloc => self.genAlloc(inst), + .arg => self.genArg(inst), + .bit_and => self.genBinOp(inst, .@"and"), + .bitcast => self.genBitcast(inst), + .bit_or => self.genBinOp(inst, .@"or"), + .block => self.genBlock(inst), + .bool_and => self.genBinOp(inst, .@"and"), + .bool_or => self.genBinOp(inst, .@"or"), + .breakpoint => self.genBreakpoint(inst), + .br => self.genBr(inst), + .call => self.genCall(inst), + .cmp_eq => self.genCmp(inst, .eq), + .cmp_gte => self.genCmp(inst, .gte), + .cmp_gt => self.genCmp(inst, .gt), + .cmp_lte => self.genCmp(inst, .lte), + .cmp_lt => self.genCmp(inst, .lt), + .cmp_neq => self.genCmp(inst, .neq), + .cond_br => self.genCondBr(inst), + .constant => unreachable, + .dbg_stmt => WValue.none, + .div => self.genBinOp(inst, .div), + .is_err => self.genIsErr(inst, .i32_ne), + .is_non_err => self.genIsErr(inst, .i32_eq), + .load => self.genLoad(inst), + .loop => self.genLoop(inst), + .mul => self.genBinOp(inst, .mul), + .not => self.genNot(inst), + .ret => self.genRet(inst), + .store => self.genStore(inst), + .struct_field_ptr => self.genStructFieldPtr(inst), + .sub => self.genBinOp(inst, .sub), + .switch_br => self.genSwitchBr(inst), + .unreach => self.genUnreachable(inst), + .unwrap_errunion_payload => self.genUnwrapErrUnionPayload(inst), + .wrap_errunion_payload => self.genWrapErrUnionPayload(inst), + .xor => self.genBinOp(inst, .xor), else => |tag| self.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}), }; } @@ -835,22 +836,27 @@ pub const Context = struct { fn genBody(self: *Context, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { const result = try self.genInst(inst); - try self.values.putNoClobber(self.gpa, inst, result); + try self.values.putNoClobber(self.gpa, @intToEnum(Air.Inst.Ref, inst), result); } } fn genRet(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - // TODO: Implement tail calls - const operand = self.resolveInst(inst.operand); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = self.resolveInst(un_op); try self.emitWValue(operand); try self.code.append(wasm.opcode(.@"return")); return .none; } fn genCall(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const func_val = inst.func.value().?; + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const extra = self.air.extraData(Air.Call, pl_op.payload); + const args = self.air.extra[extra.end..][0..extra.data.args_len]; const target: *Decl = blk: { + const ty_pl = self.air.instructions.items(.data)[@enumToInt(pl_op.operand)].ty_pl; + const func_val = self.air.values[ty_pl.payload]; + if (func_val.castTag(.function)) |func| { break :blk func.data.owner_decl; } else if (func_val.castTag(.extern_fn)) |ext_fn| { @@ -859,8 +865,8 @@ pub const Context = struct { return self.fail("Expected a function, but instead found type '{s}'", .{func_val.tag()}); }; - for (inst.args) |arg| { - const arg_val = self.resolveInst(arg); + for (args) |arg| { + const arg_val = self.resolveInst(@intToEnum(Air.Inst.Ref, arg)); try self.emitWValue(arg_val); } @@ -877,15 +883,16 @@ pub const Context = struct { } fn genAlloc(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const elem_type = inst.base.ty.elemType(); + const elem_type = self.air.getType(inst).elemType(); return self.allocLocal(elem_type); } fn genStore(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; const writer = self.code.writer(); - const lhs = self.resolveInst(inst.lhs); - const rhs = self.resolveInst(inst.rhs); + const lhs = self.resolveInst(bin_op.lhs); + const rhs = self.resolveInst(bin_op.rhs); switch (lhs) { .multi_value => |multi_value| switch (rhs) { @@ -893,7 +900,7 @@ pub const Context = struct { // we simply assign the local_index to the rhs one. // This allows us to update struct fields without having to individually // set each local as each field's index will be calculated off the struct's base index - .multi_value => self.values.put(self.gpa, inst.lhs, rhs) catch unreachable, // Instruction does not dominate all uses! + .multi_value => self.values.put(self.gpa, bin_op.lhs, rhs) catch unreachable, // Instruction does not dominate all uses! .constant, .none => { // emit all values onto the stack if constant try self.emitWValue(rhs); @@ -920,7 +927,8 @@ pub const Context = struct { } fn genLoad(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - return self.resolveInst(inst.operand); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + return self.resolveInst(ty_op.operand); } fn genArg(self: *Context, inst: Air.Inst.Index) InnerError!WValue { @@ -931,8 +939,9 @@ pub const Context = struct { } fn genBinOp(self: *Context, inst: Air.Inst.Index, op: Op) InnerError!WValue { - const lhs = self.resolveInst(inst.lhs); - const rhs = self.resolveInst(inst.rhs); + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = self.resolveInst(bin_op.lhs); + const rhs = self.resolveInst(bin_op.rhs); // it's possible for both lhs and/or rhs to return an offset as well, // in which case we return the first offset occurance we find. @@ -945,10 +954,11 @@ pub const Context = struct { try self.emitWValue(lhs); try self.emitWValue(rhs); + const bin_ty = self.air.getRefType(bin_op.lhs); const opcode: wasm.Opcode = buildOpcode(.{ .op = op, - .valtype1 = try self.typeToValtype(inst.base.ty), - .signedness = if (inst.base.ty.isSignedInt()) .signed else .unsigned, + .valtype1 = try self.typeToValtype(bin_ty), + .signedness = if (bin_ty.isSignedInt()) .signed else .unsigned, }); try self.code.append(wasm.opcode(opcode)); return WValue{ .code_offset = offset }; @@ -1064,14 +1074,17 @@ pub const Context = struct { } } - fn genBlock(self: *Context, block: Air.Inst.Index) InnerError!WValue { - const block_ty = try self.genBlockType(block.base.ty); + fn genBlock(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const block_ty = try self.genBlockType(self.air.getRefType(ty_pl.ty)); + const extra = self.air.extraData(Air.Block, ty_pl.payload); + const body = self.air.extra[extra.end..][0..extra.data.body_len]; try self.startBlock(.block, block_ty, null); // Here we set the current block idx, so breaks know the depth to jump // to when breaking out. - try self.blocks.putNoClobber(self.gpa, block, self.block_depth); - try self.genBody(block.body); + try self.blocks.putNoClobber(self.gpa, inst, self.block_depth); + try self.genBody(body); try self.endBlock(); return .none; @@ -1095,11 +1108,15 @@ pub const Context = struct { self.block_depth -= 1; } - fn genLoop(self: *Context, loop: Air.Inst.Index) InnerError!WValue { - const loop_ty = try self.genBlockType(loop.base.ty); + fn genLoop(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const loop = self.air.extraData(Air.Block, ty_pl.payload); + const body = self.air.extra[loop.end..][0..loop.data.body_len]; - try self.startBlock(.loop, loop_ty, null); - try self.genBody(loop.body); + // result type of loop is always 'noreturn', meaning we can always + // emit the wasm type 'block_empty'. + try self.startBlock(.loop, wasm.block_empty, null); + try self.genBody(body); // breaking to the index of a loop block will continue the loop instead try self.code.append(wasm.opcode(.br)); @@ -1110,8 +1127,12 @@ pub const Context = struct { return .none; } - fn genCondBr(self: *Context, condbr: Air.Inst.Index) InnerError!WValue { - const condition = self.resolveInst(condbr.condition); + fn genCondBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const condition = self.resolveInst(pl_op.operand); + const extra = self.air.extraData(Air.CondBr, pl_op.payload); + const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; + const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const writer = self.code.writer(); // TODO: Handle death instructions for then and else body @@ -1126,8 +1147,9 @@ pub const Context = struct { break :blk offset; }, }; - const block_ty = try self.genBlockType(condbr.base.ty); - try self.startBlock(.block, block_ty, offset); + + // result type is always noreturn, so use `block_empty` as type. + try self.startBlock(.block, wasm.block_empty, offset); // we inserted the block in front of the condition // so now check if condition matches. If not, break outside this block @@ -1135,11 +1157,11 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.br_if)); try leb.writeULEB128(writer, @as(u32, 0)); - try self.genBody(condbr.else_body); + try self.genBody(else_body); try self.endBlock(); // Outer block that matches the condition - try self.genBody(condbr.then_body); + try self.genBody(then_body); return .none; } @@ -1149,21 +1171,23 @@ pub const Context = struct { // the comparison that we can later jump back to const offset = self.code.items.len; - const lhs = self.resolveInst(inst.lhs); - const rhs = self.resolveInst(inst.rhs); + const data: Air.Inst.Data = self.air.instructions.items(.data)[inst]; + const lhs = self.resolveInst(data.bin_op.lhs); + const rhs = self.resolveInst(data.bin_op.rhs); + const lhs_ty = self.air.getRefType(data.bin_op.lhs); try self.emitWValue(lhs); try self.emitWValue(rhs); const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (inst.lhs.ty.zigTypeTag() != .Int) break :blk .unsigned; + if (lhs_ty.zigTypeTag() != .Int) break :blk .unsigned; // incase of an actual integer, we emit the correct signedness - break :blk inst.lhs.ty.intInfo(self.target).signedness; + break :blk lhs_ty.intInfo(self.target).signedness; }; const opcode: wasm.Opcode = buildOpcode(.{ - .valtype1 = try self.typeToValtype(inst.lhs.ty), + .valtype1 = try self.typeToValtype(lhs_ty), .op = switch (op) { .lt => .lt, .lte => .le, @@ -1178,16 +1202,17 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genBr(self: *Context, br: Air.Inst.Index) InnerError!WValue { + fn genBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const br = self.air.instructions.items(.data)[inst].br; + // if operand has codegen bits we should break with a value - if (br.operand.ty.hasCodeGenBits()) { - const operand = self.resolveInst(br.operand); - try self.emitWValue(operand); + if (self.air.getRefType(br.operand).hasCodeGenBits()) { + try self.emitWValue(self.resolveInst(br.operand)); } // We map every block to its block index. // We then determine how far we have to jump to it by substracting it from current block depth - const idx: u32 = self.block_depth - self.blocks.get(br.block).?; + const idx: u32 = self.block_depth - self.blocks.get(br.block_inst).?; const writer = self.code.writer(); try writer.writeByte(wasm.opcode(.br)); try leb.writeULEB128(writer, idx); @@ -1195,10 +1220,11 @@ pub const Context = struct { return .none; } - fn genNot(self: *Context, not: Air.Inst.Index) InnerError!WValue { + fn genNot(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; const offset = self.code.items.len; - const operand = self.resolveInst(not.operand); + const operand = self.resolveInst(ty_op.operand); try self.emitWValue(operand); // wasm does not have booleans nor the `not` instruction, therefore compare with 0 @@ -1212,35 +1238,44 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genBreakpoint(self: *Context, breakpoint: Air.Inst.Index) InnerError!WValue { + fn genBreakpoint(self: *Context, inst: Air.Inst.Index) InnerError!WValue { _ = self; - _ = breakpoint; + _ = inst; // unsupported by wasm itself. Can be implemented once we support DWARF // for wasm return .none; } - fn genUnreachable(self: *Context, unreach: Air.Inst.Index) InnerError!WValue { - _ = unreach; + fn genUnreachable(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + _ = inst; try self.code.append(wasm.opcode(.@"unreachable")); return .none; } - fn genBitcast(self: *Context, bitcast: Air.Inst.Index) InnerError!WValue { - return self.resolveInst(bitcast.operand); + fn genBitcast(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + return self.resolveInst(ty_op.operand); } fn genStructFieldPtr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const struct_ptr = self.resolveInst(inst.struct_ptr); + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.StructField, ty_pl.payload); + const struct_ptr = self.resolveInst(extra.data.struct_ptr); - return WValue{ .local = struct_ptr.multi_value.index + @intCast(u32, inst.field_index) }; + return WValue{ .local = struct_ptr.multi_value.index + @intCast(u32, extra.data.field_index) }; } fn genSwitchBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const target = self.resolveInst(inst.target); - const target_ty = inst.target.ty; - const valtype = try self.typeToValtype(.{ .node_offset = 0 }, target_ty); - const blocktype = try self.genBlockType(inst.base.ty); + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const extra = self.air.extraData(Air.SwitchBr, pl_op.payload); + const cases = self.air.extra[extra.end..][0..extra.data.cases_len]; + const else_body = self.air.extra[extra.end + cases.len ..][0..extra.data.else_body_len]; + + const target = self.resolveInst(pl_op.operand); + const target_ty = self.air.getRefType(pl_op.operand); + const valtype = try self.typeToValtype(target_ty); + // result type is always 'noreturn' + const blocktype = wasm.block_empty; const signedness: std.builtin.Signedness = blk: { // by default we tell the operand type is unsigned (i.e. bools and enum values) @@ -1249,11 +1284,18 @@ pub const Context = struct { // incase of an actual integer, we emit the correct signedness break :blk target_ty.intInfo(self.target).signedness; }; - for (inst.cases) |case| { + for (cases) |case_idx| { + const case = self.air.extraData(Air.SwitchBr.Case, case_idx); + const case_body = self.air.extra[case.end..][0..case.data.body_len]; + // create a block for each case, when the condition does not match we break out of it try self.startBlock(.block, blocktype, null); try self.emitWValue(target); - try self.emitConstant(.{ .node_offset = 0 }, case.item, target_ty); + + // cases must represent a constant of which its type is in the `typed_value_map` + // Therefore we can simply retrieve it. + const ty_val = Air.Inst.Ref.typed_value_map[@enumToInt(case.data.item)]; + try self.emitConstant(ty_val.val, target_ty); const opcode = buildOpcode(.{ .valtype1 = valtype, .op = .ne, // not equal because we jump out the block if it does not match the condition @@ -1264,7 +1306,7 @@ pub const Context = struct { try leb.writeULEB128(self.code.writer(), @as(u32, 0)); // emit our block code - try self.genBody(case.body); + try self.genBody(case_body); // end the block we created earlier try self.endBlock(); @@ -1272,13 +1314,14 @@ pub const Context = struct { // finally, emit the else case if it exists. Here we will not have to // check for a condition, so also no need to emit a block. - try self.genBody(inst.else_body); + try self.genBody(else_body); return .none; } fn genIsErr(self: *Context, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue { - const operand = self.resolveInst(inst.operand); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = self.resolveInst(un_op); const offset = self.code.items.len; const writer = self.code.writer(); @@ -1294,7 +1337,8 @@ pub const Context = struct { } fn genUnwrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const operand = self.resolveInst(inst.operand); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = self.resolveInst(ty_op.operand); // The index of multi_value contains the error code. To get the initial index of the payload we get // the following index. Next, convert it to a `WValue.local` // @@ -1303,6 +1347,7 @@ pub const Context = struct { } fn genWrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - return self.resolveInst(inst.operand); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + return self.resolveInst(ty_op.operand); } }; diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 1387615d15..81e50c46b6 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -220,7 +220,7 @@ pub fn updateFunc(self: *Wasm, module: *Module, func: *Module.Fn, air: Air, live defer context.deinit(); // generate the 'code' section for the function declaration - const result = context.genFunc(func) catch |err| switch (err) { + const result = context.genFunc() catch |err| switch (err) { error.CodegenFail => { decl.analysis = .codegen_failure; try module.failed_decls.put(module.gpa, decl, context.err_msg); From d17f492017c77d5d52d2fbd65eaa5c1e08b24161 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Fri, 16 Jul 2021 23:06:59 -0700 Subject: [PATCH 25/53] stage2: miscellaneous fixes for the branch * Breaking language change: inline assembly must use string literal syntax. This is in preparation for inline assembly improvements that involve more integration with the Zig language. This means we cannot rely on text substitution. * Liveness: properly handle inline assembly and function calls with more than 3 operands. - More than 35 operands is not yet supported. This is a low priority to implement. - This required implementation in codegen.zig as well. * Liveness: fix bug causing incorrect tomb bits. * Sema: enable switch expressions that are evaluated at compile-time. - Runtime switch instructions still need to be reworked in this branch. There was a TODO left here (by me) with a suggestion to do some bigger changes as part of the AIR memory reworking. Now that time has come and I plan to honor the suggestion in a future commit before merging this branch. * AIR printing: fix missing ')' on alive instructions. We're back to "hello world" working for the x86_64 backend. --- lib/std/Thread.zig | 64 +++++--- lib/std/atomic.zig | 38 ++--- lib/std/atomic/Atomic.zig | 88 ++++++++--- src/AstGen.zig | 2 +- src/Liveness.zig | 72 ++++++++- src/Sema.zig | 307 +++++++++++++++++++------------------- src/codegen.zig | 74 ++++++++- src/print_air.zig | 2 +- 8 files changed, 428 insertions(+), 219 deletions(-) diff --git a/lib/std/Thread.zig b/lib/std/Thread.zig index 91f7ff58c3..58a409c64e 100644 --- a/lib/std/Thread.zig +++ b/lib/std/Thread.zig @@ -505,8 +505,8 @@ const LinuxThreadImpl = struct { /// Ported over from musl libc's pthread detached implementation: /// https://github.com/ifduyue/musl/search?q=__unmapself fn freeAndExit(self: *ThreadCompletion) noreturn { - const unmap_and_exit: []const u8 = switch (target.cpu.arch) { - .i386 => ( + switch (target.cpu.arch) { + .i386 => asm volatile ( \\ movl $91, %%eax \\ movl %[ptr], %%ebx \\ movl %[len], %%ecx @@ -514,8 +514,12 @@ const LinuxThreadImpl = struct { \\ movl $1, %%eax \\ movl $0, %%ebx \\ int $128 + : + : [ptr] "r" (@ptrToInt(self.mapped.ptr)), + [len] "r" (self.mapped.len) + : "memory" ), - .x86_64 => ( + .x86_64 => asm volatile ( \\ movq $11, %%rax \\ movq %[ptr], %%rbx \\ movq %[len], %%rcx @@ -523,8 +527,12 @@ const LinuxThreadImpl = struct { \\ movq $60, %%rax \\ movq $1, %%rdi \\ syscall + : + : [ptr] "r" (@ptrToInt(self.mapped.ptr)), + [len] "r" (self.mapped.len) + : "memory" ), - .arm, .armeb, .thumb, .thumbeb => ( + .arm, .armeb, .thumb, .thumbeb => asm volatile ( \\ mov r7, #91 \\ mov r0, %[ptr] \\ mov r1, %[len] @@ -532,8 +540,12 @@ const LinuxThreadImpl = struct { \\ mov r7, #1 \\ mov r0, #0 \\ svc 0 + : + : [ptr] "r" (@ptrToInt(self.mapped.ptr)), + [len] "r" (self.mapped.len) + : "memory" ), - .aarch64, .aarch64_be, .aarch64_32 => ( + .aarch64, .aarch64_be, .aarch64_32 => asm volatile ( \\ mov x8, #215 \\ mov x0, %[ptr] \\ mov x1, %[len] @@ -541,8 +553,12 @@ const LinuxThreadImpl = struct { \\ mov x8, #93 \\ mov x0, #0 \\ svc 0 + : + : [ptr] "r" (@ptrToInt(self.mapped.ptr)), + [len] "r" (self.mapped.len) + : "memory" ), - .mips, .mipsel => ( + .mips, .mipsel => asm volatile ( \\ move $sp, $25 \\ li $2, 4091 \\ move $4, %[ptr] @@ -551,8 +567,12 @@ const LinuxThreadImpl = struct { \\ li $2, 4001 \\ li $4, 0 \\ syscall + : + : [ptr] "r" (@ptrToInt(self.mapped.ptr)), + [len] "r" (self.mapped.len) + : "memory" ), - .mips64, .mips64el => ( + .mips64, .mips64el => asm volatile ( \\ li $2, 4091 \\ move $4, %[ptr] \\ move $5, %[len] @@ -560,8 +580,12 @@ const LinuxThreadImpl = struct { \\ li $2, 4001 \\ li $4, 0 \\ syscall + : + : [ptr] "r" (@ptrToInt(self.mapped.ptr)), + [len] "r" (self.mapped.len) + : "memory" ), - .powerpc, .powerpcle, .powerpc64, .powerpc64le => ( + .powerpc, .powerpcle, .powerpc64, .powerpc64le => asm volatile ( \\ li 0, 91 \\ mr %[ptr], 3 \\ mr %[len], 4 @@ -570,8 +594,12 @@ const LinuxThreadImpl = struct { \\ li 3, 0 \\ sc \\ blr + : + : [ptr] "r" (@ptrToInt(self.mapped.ptr)), + [len] "r" (self.mapped.len) + : "memory" ), - .riscv64 => ( + .riscv64 => asm volatile ( \\ li a7, 215 \\ mv a0, %[ptr] \\ mv a1, %[len] @@ -579,19 +607,13 @@ const LinuxThreadImpl = struct { \\ li a7, 93 \\ mv a0, zero \\ ecall + : + : [ptr] "r" (@ptrToInt(self.mapped.ptr)), + [len] "r" (self.mapped.len) + : "memory" ), - else => |cpu_arch| { - @compileLog("Unsupported linux arch ", cpu_arch); - }, - }; - - asm volatile (unmap_and_exit - : - : [ptr] "r" (@ptrToInt(self.mapped.ptr)), - [len] "r" (self.mapped.len) - : "memory" - ); - + else => |cpu_arch| @compileError("Unsupported linux arch: " ++ @tagName(cpu_arch)), + } unreachable; } }; diff --git a/lib/std/atomic.zig b/lib/std/atomic.zig index 1944e5346b..42d57eb8fa 100644 --- a/lib/std/atomic.zig +++ b/lib/std/atomic.zig @@ -46,34 +46,38 @@ test "fence/compilerFence" { /// Signals to the processor that the caller is inside a busy-wait spin-loop. pub inline fn spinLoopHint() void { - const hint_instruction = switch (target.cpu.arch) { - // No-op instruction that can hint to save (or share with a hardware-thread) pipelining/power resources + switch (target.cpu.arch) { + // No-op instruction that can hint to save (or share with a hardware-thread) + // pipelining/power resources // https://software.intel.com/content/www/us/en/develop/articles/benefitting-power-and-performance-sleep-loops.html - .i386, .x86_64 => "pause", + .i386, .x86_64 => asm volatile ("pause" ::: "memory"), // No-op instruction that serves as a hardware-thread resource yield hint. // https://stackoverflow.com/a/7588941 - .powerpc64, .powerpc64le => "or 27, 27, 27", + .powerpc64, .powerpc64le => asm volatile ("or 27, 27, 27" ::: "memory"), - // `isb` appears more reliable for releasing execution resources than `yield` on common aarch64 CPUs. + // `isb` appears more reliable for releasing execution resources than `yield` + // on common aarch64 CPUs. // https://bugs.java.com/bugdatabase/view_bug.do?bug_id=8258604 // https://bugs.mysql.com/bug.php?id=100664 - .aarch64, .aarch64_be, .aarch64_32 => "isb", + .aarch64, .aarch64_be, .aarch64_32 => asm volatile ("isb" ::: "memory"), // `yield` was introduced in v6k but is also available on v6m. // https://www.keil.com/support/man/docs/armasm/armasm_dom1361289926796.htm - .arm, .armeb, .thumb, .thumbeb => blk: { - const can_yield = comptime std.Target.arm.featureSetHasAny(target.cpu.features, .{ .has_v6k, .has_v6m }); - const instruction = if (can_yield) "yield" else ""; - break :blk instruction; + .arm, .armeb, .thumb, .thumbeb => { + const can_yield = comptime std.Target.arm.featureSetHasAny(target.cpu.features, .{ + .has_v6k, .has_v6m, + }); + if (can_yield) { + asm volatile ("yield" ::: "memory"); + } else { + asm volatile ("" ::: "memory"); + } }, - - else => "", - }; - - // Memory barrier to prevent the compiler from optimizing away the spin-loop - // even if no hint_instruction was provided. - asm volatile (hint_instruction ::: "memory"); + // Memory barrier to prevent the compiler from optimizing away the spin-loop + // even if no hint_instruction was provided. + else => asm volatile ("" ::: "memory"), + } } test "spinLoopHint" { diff --git a/lib/std/atomic/Atomic.zig b/lib/std/atomic/Atomic.zig index 80fb1ae297..f4e3ebda9d 100644 --- a/lib/std/atomic/Atomic.zig +++ b/lib/std/atomic/Atomic.zig @@ -178,26 +178,78 @@ pub fn Atomic(comptime T: type) type { ) u1 { // x86 supports dedicated bitwise instructions if (comptime target.cpu.arch.isX86() and @sizeOf(T) >= 2 and @sizeOf(T) <= 8) { - const instruction = switch (op) { - .Set => "lock bts", - .Reset => "lock btr", - .Toggle => "lock btc", - }; - - const suffix = switch (@sizeOf(T)) { - 2 => "w", - 4 => "l", - 8 => "q", + const old_bit: u8 = switch (@sizeOf(T)) { + 2 => switch (op) { + .Set => asm volatile ("lock btsw %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + .Reset => asm volatile ("lock btrw %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + .Toggle => asm volatile ("lock btcw %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + }, + 4 => switch (op) { + .Set => asm volatile ("lock btsl %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + .Reset => asm volatile ("lock btrl %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + .Toggle => asm volatile ("lock btcl %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + }, + 8 => switch (op) { + .Set => asm volatile ("lock btsq %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + .Reset => asm volatile ("lock btrq %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + .Toggle => asm volatile ("lock btcq %[bit], %[ptr]" + // LLVM doesn't support u1 flag register return values + : [result] "={@ccc}" (-> u8) + : [ptr] "*p" (&self.value), + [bit] "X" (@as(T, bit)) + : "cc", "memory" + ), + }, else => @compileError("Invalid atomic type " ++ @typeName(T)), }; - - const old_bit = asm volatile (instruction ++ suffix ++ " %[bit], %[ptr]" - : [result] "={@ccc}" (-> u8) // LLVM doesn't support u1 flag register return values - : [ptr] "*p" (&self.value), - [bit] "X" (@as(T, bit)) - : "cc", "memory" - ); - return @intCast(u1, old_bit); } diff --git a/src/AstGen.zig b/src/AstGen.zig index cbd918ecc7..31e7f040a2 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -6601,7 +6601,7 @@ fn asmExpr( const asm_source = switch (node_tags[full.ast.template]) { .string_literal => try astgen.strLitAsString(main_tokens[full.ast.template]), .multiline_string_literal => try astgen.strLitNodeAsString(full.ast.template), - else => return astgen.failNode(node, "assembly code must use string literal syntax", .{}), + else => return astgen.failNode(full.ast.template, "assembly code must use string literal syntax", .{}), }; // See https://github.com/ziglang/zig/issues/215 and related issues discussing diff --git a/src/Liveness.zig b/src/Liveness.zig index 79fc0d7325..2c226122bf 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -24,6 +24,11 @@ const Log2Int = std.math.Log2Int; tomb_bits: []usize, /// Sparse table of specially handled instructions. The value is an index into the `extra` /// array. The meaning of the data depends on the AIR tag. +/// * `cond_br` - points to a `CondBr` in `extra` at this index. +/// * `switch_br` - points to a `SwitchBr` in `extra` at this index. +/// * `asm`, `call` - the value is a set of bits which are the extra tomb bits of operands. +/// The main tomb bits are still used and the extra ones are starting with the lsb of the +/// value here. special: std.AutoHashMapUnmanaged(Air.Inst.Index, u32), /// Auxilliary data. The way this data is interpreted is determined contextually. extra: []const u32, @@ -67,6 +72,8 @@ pub fn analyze(gpa: *Allocator, air: Air, zir: Zir) Allocator.Error!Liveness { defer a.extra.deinit(gpa); defer a.table.deinit(gpa); + std.mem.set(usize, a.tomb_bits, 0); + const main_body = air.getMainBody(); try a.table.ensureTotalCapacity(gpa, @intCast(u32, main_body.len)); try analyzeWithContext(&a, null, main_body); @@ -103,7 +110,7 @@ pub fn clearOperandDeath(l: Liveness, inst: Air.Inst.Index, operand: OperandInt) const usize_index = (inst * bpi) / @bitSizeOf(usize); const mask = @as(usize, 1) << @intCast(Log2Int(usize), (inst % (@bitSizeOf(usize) / bpi)) * bpi + operand); - l.tomb_bits[usize_index] |= mask; + l.tomb_bits[usize_index] &= ~mask; } /// Higher level API. @@ -298,7 +305,17 @@ fn analyzeInst( std.mem.copy(Air.Inst.Ref, buf[1..], args); return trackOperands(a, new_set, inst, main_tomb, buf); } - @panic("TODO: liveness analysis for function call with greater than 2 args"); + var extra_tombs: ExtraTombs = .{ + .analysis = a, + .new_set = new_set, + .inst = inst, + .main_tomb = main_tomb, + }; + try extra_tombs.feed(callee); + for (args) |arg| { + try extra_tombs.feed(arg); + } + return extra_tombs.finish(); }, .struct_field_ptr => { const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data; @@ -317,7 +334,19 @@ fn analyzeInst( std.mem.copy(Air.Inst.Ref, buf[outputs.len..], args); return trackOperands(a, new_set, inst, main_tomb, buf); } - @panic("TODO: liveness analysis for asm with greater than 3 args"); + var extra_tombs: ExtraTombs = .{ + .analysis = a, + .new_set = new_set, + .inst = inst, + .main_tomb = main_tomb, + }; + for (outputs) |output| { + try extra_tombs.feed(output); + } + for (args) |arg| { + try extra_tombs.feed(arg); + } + return extra_tombs.finish(); }, .block => { const extra = a.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); @@ -531,3 +560,40 @@ fn trackOperands( } a.storeTombBits(inst, tomb_bits); } + +const ExtraTombs = struct { + analysis: *Analysis, + new_set: ?*std.AutoHashMapUnmanaged(Air.Inst.Index, void), + inst: Air.Inst.Index, + main_tomb: bool, + bit_index: usize = 0, + tomb_bits: Bpi = 0, + big_tomb_bits: u32 = 0, + + fn feed(et: *ExtraTombs, op_ref: Air.Inst.Ref) !void { + const this_bit_index = et.bit_index; + assert(this_bit_index < 32); // TODO mechanism for when there are greater than 32 operands + et.bit_index += 1; + const gpa = et.analysis.gpa; + const op_int = @enumToInt(op_ref); + if (op_int < Air.Inst.Ref.typed_value_map.len) return; + const op_index: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len); + const prev = try et.analysis.table.fetchPut(gpa, op_index, {}); + if (prev == null) { + // Death. + if (et.new_set) |ns| try ns.putNoClobber(gpa, op_index, {}); + if (this_bit_index < bpi - 1) { + et.tomb_bits |= @as(Bpi, 1) << @intCast(OperandInt, this_bit_index); + } else { + const big_bit_index = this_bit_index - (bpi - 1); + et.big_tomb_bits |= @as(u32, 1) << @intCast(u5, big_bit_index); + } + } + } + + fn finish(et: *ExtraTombs) !void { + et.tomb_bits |= @as(Bpi, @boolToInt(et.main_tomb)) << (bpi - 1); + et.analysis.storeTombBits(et.inst, et.tomb_bits); + try et.analysis.special.put(et.analysis.gpa, et.inst, et.big_tomb_bits); + } +}; diff --git a/src/Sema.zig b/src/Sema.zig index 777619dc48..79f1ed0614 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -258,24 +258,24 @@ pub fn analyzeBody( .slice_sentinel => try sema.zirSliceSentinel(block, inst), .slice_start => try sema.zirSliceStart(block, inst), .str => try sema.zirStr(block, inst), - //.switch_block => try sema.zirSwitchBlock(block, inst, false, .none), - //.switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), - //.switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), - //.switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"), - //.switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under), - //.switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under), - //.switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none), - //.switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none), - //.switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"), - //.switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"), - //.switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under), - //.switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under), - //.switch_capture => try sema.zirSwitchCapture(block, inst, false, false), - //.switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), - //.switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), - //.switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), - //.switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), - //.switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), + .switch_block => try sema.zirSwitchBlock(block, inst, false, .none), + .switch_block_multi => try sema.zirSwitchBlockMulti(block, inst, false, .none), + .switch_block_else => try sema.zirSwitchBlock(block, inst, false, .@"else"), + .switch_block_else_multi => try sema.zirSwitchBlockMulti(block, inst, false, .@"else"), + .switch_block_under => try sema.zirSwitchBlock(block, inst, false, .under), + .switch_block_under_multi => try sema.zirSwitchBlockMulti(block, inst, false, .under), + .switch_block_ref => try sema.zirSwitchBlock(block, inst, true, .none), + .switch_block_ref_multi => try sema.zirSwitchBlockMulti(block, inst, true, .none), + .switch_block_ref_else => try sema.zirSwitchBlock(block, inst, true, .@"else"), + .switch_block_ref_else_multi => try sema.zirSwitchBlockMulti(block, inst, true, .@"else"), + .switch_block_ref_under => try sema.zirSwitchBlock(block, inst, true, .under), + .switch_block_ref_under_multi => try sema.zirSwitchBlockMulti(block, inst, true, .under), + .switch_capture => try sema.zirSwitchCapture(block, inst, false, false), + .switch_capture_ref => try sema.zirSwitchCapture(block, inst, false, true), + .switch_capture_multi => try sema.zirSwitchCapture(block, inst, true, false), + .switch_capture_multi_ref => try sema.zirSwitchCapture(block, inst, true, true), + .switch_capture_else => try sema.zirSwitchCaptureElse(block, inst, false), + .switch_capture_else_ref => try sema.zirSwitchCaptureElse(block, inst, true), .type_info => try sema.zirTypeInfo(block, inst), .size_of => try sema.zirSizeOf(block, inst), .bit_size_of => try sema.zirBitSizeOf(block, inst), @@ -534,7 +534,6 @@ pub fn analyzeBody( return break_inst; } }, - else => |t| @panic(@tagName(t)), }; if (sema.typeOf(air_inst).isNoReturn()) return always_noreturn; @@ -4110,8 +4109,8 @@ fn analyzeSwitch( const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body_len; + const item = sema.resolveInst(item_ref); // Validation above ensured these will succeed. - const item = sema.resolveInst(item_ref) catch unreachable; const item_val = sema.resolveConstValue(&child_block, .unneeded, item) catch unreachable; if (operand_val.eql(item_val)) { return sema.resolveBlockBody(block, src, &child_block, body, merges); @@ -4132,9 +4131,9 @@ fn analyzeSwitch( const body = sema.code.extra[extra_index + 2 * ranges_len ..][0..body_len]; for (items) |item_ref| { + const item = sema.resolveInst(item_ref); // Validation above ensured these will succeed. - const item = sema.resolveInst(item_ref) catch unreachable; - const item_val = sema.resolveConstValue(&child_block, item.src, item) catch unreachable; + const item_val = sema.resolveConstValue(&child_block, .unneeded, item) catch unreachable; if (operand_val.eql(item_val)) { return sema.resolveBlockBody(block, src, &child_block, body, merges); } @@ -4171,156 +4170,157 @@ fn analyzeSwitch( // TODO when reworking AIR memory layout make multi cases get generated as cases, // not as part of the "else" block. - const cases = try sema.arena.alloc(Inst.SwitchBr.Case, scalar_cases_len); + return mod.fail(&block.base, src, "TODO rework runtime switch Sema", .{}); + //const cases = try sema.arena.alloc(Inst.SwitchBr.Case, scalar_cases_len); - var case_block = child_block.makeSubBlock(); - case_block.runtime_loop = null; - case_block.runtime_cond = operand.src; - case_block.runtime_index += 1; - defer case_block.instructions.deinit(gpa); + //var case_block = child_block.makeSubBlock(); + //case_block.runtime_loop = null; + //case_block.runtime_cond = operand.src; + //case_block.runtime_index += 1; + //defer case_block.instructions.deinit(gpa); - var extra_index: usize = special.end; + //var extra_index: usize = special.end; - var scalar_i: usize = 0; - while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); - extra_index += 1; - const body_len = sema.code.extra[extra_index]; - extra_index += 1; - const body = sema.code.extra[extra_index..][0..body_len]; - extra_index += body_len; + //var scalar_i: usize = 0; + //while (scalar_i < scalar_cases_len) : (scalar_i += 1) { + // const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); + // extra_index += 1; + // const body_len = sema.code.extra[extra_index]; + // extra_index += 1; + // const body = sema.code.extra[extra_index..][0..body_len]; + // extra_index += body_len; - case_block.instructions.shrinkRetainingCapacity(0); - // We validate these above; these two calls are guaranteed to succeed. - const item = sema.resolveInst(item_ref) catch unreachable; - const item_val = sema.resolveConstValue(&case_block, .unneeded, item) catch unreachable; + // case_block.instructions.shrinkRetainingCapacity(0); + // const item = sema.resolveInst(item_ref); + // // We validate these above; these two calls are guaranteed to succeed. + // const item_val = sema.resolveConstValue(&case_block, .unneeded, item) catch unreachable; - _ = try sema.analyzeBody(&case_block, body); + // _ = try sema.analyzeBody(&case_block, body); - cases[scalar_i] = .{ - .item = item_val, - .body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items) }, - }; - } + // cases[scalar_i] = .{ + // .item = item_val, + // .body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items) }, + // }; + //} - var first_else_body: Body = undefined; - var prev_condbr: ?*Inst.CondBr = null; + //var first_else_body: Body = undefined; + //var prev_condbr: ?*Inst.CondBr = null; - var multi_i: usize = 0; - while (multi_i < multi_cases_len) : (multi_i += 1) { - const items_len = sema.code.extra[extra_index]; - extra_index += 1; - const ranges_len = sema.code.extra[extra_index]; - extra_index += 1; - const body_len = sema.code.extra[extra_index]; - extra_index += 1; - const items = sema.code.refSlice(extra_index, items_len); - extra_index += items_len; + //var multi_i: usize = 0; + //while (multi_i < multi_cases_len) : (multi_i += 1) { + // const items_len = sema.code.extra[extra_index]; + // extra_index += 1; + // const ranges_len = sema.code.extra[extra_index]; + // extra_index += 1; + // const body_len = sema.code.extra[extra_index]; + // extra_index += 1; + // const items = sema.code.refSlice(extra_index, items_len); + // extra_index += items_len; - case_block.instructions.shrinkRetainingCapacity(0); + // case_block.instructions.shrinkRetainingCapacity(0); - var any_ok: ?Air.Inst.Index = null; + // var any_ok: ?Air.Inst.Index = null; - for (items) |item_ref| { - const item = sema.resolveInst(item_ref); - _ = try sema.resolveConstValue(&child_block, item.src, item); + // for (items) |item_ref| { + // const item = sema.resolveInst(item_ref); + // _ = try sema.resolveConstValue(&child_block, item.src, item); - const cmp_ok = try case_block.addBinOp(.cmp_eq, operand, item); - if (any_ok) |some| { - any_ok = try case_block.addBinOp(.bool_or, some, cmp_ok); - } else { - any_ok = cmp_ok; - } - } + // const cmp_ok = try case_block.addBinOp(.cmp_eq, operand, item); + // if (any_ok) |some| { + // any_ok = try case_block.addBinOp(.bool_or, some, cmp_ok); + // } else { + // any_ok = cmp_ok; + // } + // } - var range_i: usize = 0; - while (range_i < ranges_len) : (range_i += 1) { - const first_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); - extra_index += 1; - const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); - extra_index += 1; + // var range_i: usize = 0; + // while (range_i < ranges_len) : (range_i += 1) { + // const first_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); + // extra_index += 1; + // const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); + // extra_index += 1; - const item_first = sema.resolveInst(first_ref); - const item_last = sema.resolveInst(last_ref); + // const item_first = sema.resolveInst(first_ref); + // const item_last = sema.resolveInst(last_ref); - _ = try sema.resolveConstValue(&child_block, item_first.src, item_first); - _ = try sema.resolveConstValue(&child_block, item_last.src, item_last); + // _ = try sema.resolveConstValue(&child_block, item_first.src, item_first); + // _ = try sema.resolveConstValue(&child_block, item_last.src, item_last); - // operand >= first and operand <= last - const range_first_ok = try case_block.addBinOp( - .cmp_gte, - operand, - item_first, - ); - const range_last_ok = try case_block.addBinOp( - .cmp_lte, - operand, - item_last, - ); - const range_ok = try case_block.addBinOp( - .bool_and, - range_first_ok, - range_last_ok, - ); - if (any_ok) |some| { - any_ok = try case_block.addBinOp(.bool_or, some, range_ok); - } else { - any_ok = range_ok; - } - } + // // operand >= first and operand <= last + // const range_first_ok = try case_block.addBinOp( + // .cmp_gte, + // operand, + // item_first, + // ); + // const range_last_ok = try case_block.addBinOp( + // .cmp_lte, + // operand, + // item_last, + // ); + // const range_ok = try case_block.addBinOp( + // .bool_and, + // range_first_ok, + // range_last_ok, + // ); + // if (any_ok) |some| { + // any_ok = try case_block.addBinOp(.bool_or, some, range_ok); + // } else { + // any_ok = range_ok; + // } + // } - const new_condbr = try sema.arena.create(Inst.CondBr); - new_condbr.* = .{ - .base = .{ - .tag = .condbr, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .condition = any_ok.?, - .then_body = undefined, - .else_body = undefined, - }; - try case_block.instructions.append(gpa, &new_condbr.base); + // const new_condbr = try sema.arena.create(Inst.CondBr); + // new_condbr.* = .{ + // .base = .{ + // .tag = .condbr, + // .ty = Type.initTag(.noreturn), + // .src = src, + // }, + // .condition = any_ok.?, + // .then_body = undefined, + // .else_body = undefined, + // }; + // try case_block.instructions.append(gpa, &new_condbr.base); - const cond_body: Body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), - }; + // const cond_body: Body = .{ + // .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), + // }; - case_block.instructions.shrinkRetainingCapacity(0); - const body = sema.code.extra[extra_index..][0..body_len]; - extra_index += body_len; - _ = try sema.analyzeBody(&case_block, body); - new_condbr.then_body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), - }; - if (prev_condbr) |condbr| { - condbr.else_body = cond_body; - } else { - first_else_body = cond_body; - } - prev_condbr = new_condbr; - } + // case_block.instructions.shrinkRetainingCapacity(0); + // const body = sema.code.extra[extra_index..][0..body_len]; + // extra_index += body_len; + // _ = try sema.analyzeBody(&case_block, body); + // new_condbr.then_body = .{ + // .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), + // }; + // if (prev_condbr) |condbr| { + // condbr.else_body = cond_body; + // } else { + // first_else_body = cond_body; + // } + // prev_condbr = new_condbr; + //} - const final_else_body: Body = blk: { - if (special.body.len != 0) { - case_block.instructions.shrinkRetainingCapacity(0); - _ = try sema.analyzeBody(&case_block, special.body); - const else_body: Body = .{ - .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), - }; - if (prev_condbr) |condbr| { - condbr.else_body = else_body; - break :blk first_else_body; - } else { - break :blk else_body; - } - } else { - break :blk .{ .instructions = &.{} }; - } - }; + //const final_else_body: Body = blk: { + // if (special.body.len != 0) { + // case_block.instructions.shrinkRetainingCapacity(0); + // _ = try sema.analyzeBody(&case_block, special.body); + // const else_body: Body = .{ + // .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), + // }; + // if (prev_condbr) |condbr| { + // condbr.else_body = else_body; + // break :blk first_else_body; + // } else { + // break :blk else_body; + // } + // } else { + // break :blk .{ .instructions = &.{} }; + // } + //}; - _ = try child_block.addSwitchBr(src, operand, cases, final_else_body); - return sema.analyzeBlockBody(block, src, &child_block, merges); + //_ = try child_block.addSwitchBr(src, operand, cases, final_else_body); + //return sema.analyzeBlockBody(block, src, &child_block, merges); } fn resolveSwitchItemVal( @@ -4332,16 +4332,17 @@ fn resolveSwitchItemVal( range_expand: Module.SwitchProngSrc.RangeExpand, ) CompileError!TypedValue { const item = sema.resolveInst(item_ref); + const item_ty = sema.typeOf(item); // Constructing a LazySrcLoc is costly because we only have the switch AST node. // Only if we know for sure we need to report a compile error do we resolve the // full source locations. if (sema.resolveConstValue(block, .unneeded, item)) |val| { - return TypedValue{ .ty = item.ty, .val = val }; + return TypedValue{ .ty = item_ty, .val = val }; } else |err| switch (err) { error.NeededSourceLocation => { const src = switch_prong_src.resolve(sema.gpa, block.src_decl, switch_node_offset, range_expand); return TypedValue{ - .ty = item.ty, + .ty = item_ty, .val = try sema.resolveConstValue(block, src, item), }; }, diff --git a/src/codegen.zig b/src/codegen.zig index bc22d7ec19..11a2603aac 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -452,6 +452,43 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, }; + const BigTomb = struct { + function: *Self, + inst: Air.Inst.Index, + tomb_bits: Liveness.Bpi, + big_tomb_bits: u32, + bit_index: usize, + + fn feed(bt: *BigTomb, op_ref: Air.Inst.Ref) void { + const this_bit_index = bt.bit_index; + bt.bit_index += 1; + + const op_int = @enumToInt(op_ref); + if (op_int < Air.Inst.Ref.typed_value_map.len) return; + const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + + if (this_bit_index < Liveness.bpi - 1) { + const dies = @truncate(u1, bt.tomb_bits >> @intCast(Liveness.OperandInt, this_bit_index)) != 0; + if (!dies) return; + } else { + const big_bit_index = @intCast(u5, this_bit_index - (Liveness.bpi - 1)); + const dies = @truncate(u1, bt.big_tomb_bits >> big_bit_index) != 0; + if (!dies) return; + } + bt.function.processDeath(op_index); + } + + fn finishAir(bt: *BigTomb, result: MCValue) void { + const is_used = !bt.function.liveness.isUnused(bt.inst); + if (is_used) { + log.debug("{} => {}", .{ bt.inst, result }); + const branch = &bt.function.branch_stack.items[bt.function.branch_stack.items.len - 1]; + branch.inst_table.putAssumeCapacityNoClobber(bt.inst, result); + } + bt.function.finishAirBookkeeping(); + } + }; + const Self = @This(); fn generate( @@ -921,8 +958,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (!dies) continue; const op_int = @enumToInt(op); if (op_int < Air.Inst.Ref.typed_value_map.len) continue; - const operand: Air.Inst.Index = op_int - @intCast(u32, Air.Inst.Ref.typed_value_map.len); - self.processDeath(operand); + const op_index = @intCast(Air.Inst.Index, op_int - Air.Inst.Ref.typed_value_map.len); + self.processDeath(op_index); } const is_used = @truncate(u1, tomb_bits) == 0; if (is_used) { @@ -2739,7 +2776,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { std.mem.copy(Air.Inst.Ref, buf[1..], args); return self.finishAir(inst, result, buf); } - @panic("TODO: codegen for function call with greater than 2 args"); + var bt = try self.iterateBigTomb(inst, 1 + args.len); + bt.feed(callee); + for (args) |arg| { + bt.feed(arg); + } + return bt.finishAir(result); } fn airRef(self: *Self, inst: Air.Inst.Index) !void { @@ -3651,7 +3693,25 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { std.mem.copy(Air.Inst.Ref, buf[outputs.len..], args); return self.finishAir(inst, result, buf); } - @panic("TODO: codegen for asm with greater than 3 args"); + var bt = try self.iterateBigTomb(inst, outputs.len + args.len); + for (outputs) |output| { + bt.feed(output); + } + for (args) |arg| { + bt.feed(arg); + } + return bt.finishAir(result); + } + + fn iterateBigTomb(self: *Self, inst: Air.Inst.Index, operand_count: usize) !BigTomb { + try self.ensureProcessDeathCapacity(operand_count + 1); + return BigTomb{ + .function = self, + .inst = inst, + .tomb_bits = self.liveness.getTombBits(inst), + .big_tomb_bits = self.liveness.special.get(inst) orelse 0, + .bit_index = 0, + }; } /// Sets the value without any modifications to register allocation metadata or stack allocation metadata. @@ -4492,7 +4552,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // First section of indexes correspond to a set number of constant values. const ref_int = @enumToInt(inst); if (ref_int < Air.Inst.Ref.typed_value_map.len) { - return self.genTypedValue(Air.Inst.Ref.typed_value_map[ref_int]); + const tv = Air.Inst.Ref.typed_value_map[ref_int]; + if (!tv.ty.hasCodeGenBits()) { + return MCValue{ .none = {} }; + } + return self.genTypedValue(tv); } // If the type has no codegen bits, no need to store it. diff --git a/src/print_air.zig b/src/print_air.zig index 44c170a078..76159d0796 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -89,7 +89,7 @@ const Writer = struct { if (w.liveness.isUnused(inst)) { try s.writeAll(") unused\n"); } else { - try s.writeAll("\n"); + try s.writeAll(")\n"); } } } From a804de13c8e2d7a6a99c55355f964f658a5a76bc Mon Sep 17 00:00:00 2001 From: Jacob G-W Date: Sat, 17 Jul 2021 10:39:20 -0400 Subject: [PATCH 26/53] plan9 linker: fix after testing * exports get rendered properly in symbol table * global offset table is at the start of data section instead of after symtab * various null use fixes --- src/link/Plan9.zig | 65 +++++++++++++++++++++++++++++++++++++++------- 1 file changed, 55 insertions(+), 10 deletions(-) diff --git a/src/link/Plan9.zig b/src/link/Plan9.zig index 9b123f56aa..135b59f82b 100644 --- a/src/link/Plan9.zig +++ b/src/link/Plan9.zig @@ -34,7 +34,7 @@ data_decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, []const u8) = .{}, hdr: aout.ExecHdr = undefined, -entry_decl: ?*Module.Decl = null, +entry_val: ?u64 = null, got_len: u64 = 0, @@ -213,6 +213,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { if (build_options.skip_non_native and builtin.object_format != .plan9) { @panic("Attempted to compile for object format that was disabled by build configuration"); } + _ = comp; const tracy = trace(@src()); defer tracy.end(); @@ -221,7 +222,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { defer assert(self.hdr.entry != 0x0); - _ = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented; + const mod = self.base.options.module orelse return error.LinkingWithoutZigSourceUnimplemented; assert(self.got_len == self.fn_decl_table.count() + self.data_decl_table.count()); const got_size = self.got_len * if (!self.sixtyfour_bit) @as(u32, 4) else 8; @@ -230,6 +231,7 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { // + 2 for header, got, symbols var iovecs = try self.base.allocator.alloc(std.os.iovec_const, self.fn_decl_table.count() + self.data_decl_table.count() + 3); + defer self.base.allocator.free(iovecs); const file = self.base.file.?; @@ -247,11 +249,12 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { while (it.next()) |entry| { const decl = entry.key_ptr.*; const code = entry.value_ptr.*; + log.debug("write text decl {*} ({s})", .{ decl, decl.name }); foff += code.len; - text_i += code.len; iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len }; iovecs_i += 1; const off = self.getAddr(text_i, .t); + text_i += code.len; decl.link.plan9.offset = off; if (!self.sixtyfour_bit) { mem.writeIntNative(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off)); @@ -260,10 +263,16 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } self.syms.items[decl.link.plan9.sym_index.?].value = off; + if (mod.decl_exports.get(decl)) |exports| { + try self.addDeclExports(mod, decl, exports); + } } // etext symbol self.syms.items[2].value = self.getAddr(text_i, .t); } + // global offset table is in data + iovecs[iovecs_i] = .{ .iov_base = got_table.ptr, .iov_len = got_table.len }; + iovecs_i += 1; // data var data_i: u64 = got_size; { @@ -271,11 +280,13 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { while (it.next()) |entry| { const decl = entry.key_ptr.*; const code = entry.value_ptr.*; + log.debug("write data decl {*} ({s})", .{ decl, decl.name }); + foff += code.len; - data_i += code.len; iovecs[iovecs_i] = .{ .iov_base = code.ptr, .iov_len = code.len }; iovecs_i += 1; const off = self.getAddr(data_i, .d); + data_i += code.len; decl.link.plan9.offset = off; if (!self.sixtyfour_bit) { mem.writeInt(u32, got_table[decl.link.plan9.got_index.? * 4 ..][0..4], @intCast(u32, off), self.base.options.target.cpu.arch.endian()); @@ -283,6 +294,9 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { mem.writeInt(u64, got_table[decl.link.plan9.got_index.? * 8 ..][0..8], off, self.base.options.target.cpu.arch.endian()); } self.syms.items[decl.link.plan9.sym_index.?].value = off; + if (mod.decl_exports.get(decl)) |exports| { + try self.addDeclExports(mod, decl, exports); + } } // edata symbol self.syms.items[0].value = self.getAddr(data_i, .b); @@ -292,8 +306,6 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { var sym_buf = std.ArrayList(u8).init(self.base.allocator); defer sym_buf.deinit(); try self.writeSyms(&sym_buf); - iovecs[iovecs_i] = .{ .iov_base = got_table.ptr, .iov_len = got_table.len }; - iovecs_i += 1; assert(2 + self.fn_decl_table.count() + self.data_decl_table.count() == iovecs_i); // we didn't write all the decls iovecs[iovecs_i] = .{ .iov_base = sym_buf.items.ptr, .iov_len = sym_buf.items.len }; iovecs_i += 1; @@ -306,16 +318,45 @@ pub fn flushModule(self: *Plan9, comp: *Compilation) !void { .bss = 0, .pcsz = 0, .spsz = 0, - .entry = @intCast(u32, self.entry_decl.?.link.plan9.offset.?), + .entry = @intCast(u32, self.entry_val.?), }; std.mem.copy(u8, hdr_slice, self.hdr.toU8s()[0..hdr_size]); // write the fat header for 64 bit entry points if (self.sixtyfour_bit) { - mem.writeIntSliceBig(u64, hdr_buf[32..40], self.entry_decl.?.link.plan9.offset.?); + mem.writeIntSliceBig(u64, hdr_buf[32..40], self.entry_val.?); } // write it all! try file.pwritevAll(iovecs, 0); } +fn addDeclExports( + self: *Plan9, + module: *Module, + decl: *Module.Decl, + exports: []const *Module.Export, +) !void { + for (exports) |exp| { + // plan9 does not support custom sections + if (exp.options.section) |section_name| { + if (!mem.eql(u8, section_name, ".text") or !mem.eql(u8, section_name, ".data")) { + try module.failed_exports.put(module.gpa, exp, try Module.ErrorMsg.create(self.base.allocator, decl.srcLoc(), "plan9 does not support extra sections", .{})); + break; + } + } + const sym = .{ + .value = decl.link.plan9.offset.?, + .type = decl.link.plan9.type.toGlobal(), + .name = exp.options.name, + }; + + if (exp.link.plan9) |i| { + self.syms.items[i] = sym; + } else { + try self.syms.append(self.base.allocator, sym); + exp.link.plan9 = self.syms.items.len - 1; + } + } +} + pub fn freeDecl(self: *Plan9, decl: *Module.Decl) void { const is_fn = (decl.ty.zigTypeTag() == .Fn); if (is_fn) @@ -394,19 +435,23 @@ pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Optio pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void { const writer = buf.writer(); for (self.syms.items) |sym| { + log.debug("sym.name: {s}", .{sym.name}); + log.debug("sym.value: {x}", .{sym.value}); + if (mem.eql(u8, sym.name, "_start")) + self.entry_val = sym.value; if (!self.sixtyfour_bit) { try writer.writeIntBig(u32, @intCast(u32, sym.value)); } else { try writer.writeIntBig(u64, sym.value); } try writer.writeByte(@enumToInt(sym.type)); - try writer.writeAll(std.mem.span(sym.name)); + try writer.writeAll(sym.name); try writer.writeByte(0); } } pub fn allocateDeclIndexes(self: *Plan9, decl: *Module.Decl) !void { - if (decl.link.plan9.got_index != null) { + if (decl.link.plan9.got_index == null) { self.got_len += 1; decl.link.plan9.got_index = self.got_len - 1; } From 761f36ff93b5c551101d7f731a136c2d66093e76 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 17 Jul 2021 12:15:27 -0700 Subject: [PATCH 27/53] stage2: rework C backend for new AIR memory layout --- src/Air.zig | 2 + src/codegen/c.zig | 662 +++++++++++++++++++++++++++------------------- 2 files changed, 398 insertions(+), 266 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index f4c4fa4155..0e19202244 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -200,6 +200,8 @@ pub const Inst = struct { ret, /// Returns a pointer to a global variable. /// Uses the `ty_pl` field. Index is into the `variables` array. + /// TODO this can be modeled simply as a constant with a decl ref and then + /// the variables array can be removed from Air. varptr, /// Write a value to a pointer. LHS is pointer, RHS is value. /// Result type is always void. diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 0ee6972654..1fe330a894 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -14,6 +14,7 @@ const Decl = Module.Decl; const trace = @import("../tracy.zig").trace; const LazySrcLoc = Module.LazySrcLoc; const Air = @import("../Air.zig"); +const Zir = @import("../Zir.zig"); const Liveness = @import("../Liveness.zig"); const Mutability = enum { Const, Mut }; @@ -25,7 +26,7 @@ pub const CValue = union(enum) { /// Index into local_names, but take the address. local_ref: usize, /// A constant instruction, to be rendered inline. - constant: Air.Inst.Index, + constant: Air.Inst.Ref, /// Index into the parameters arg: usize, /// By-value @@ -105,11 +106,12 @@ pub const Object = struct { next_block_index: usize = 0, indent_writer: IndentWriter(std.ArrayList(u8).Writer), - fn resolveInst(o: *Object, inst: Air.Inst.Index) !CValue { - if (inst.value()) |_| { + fn resolveInst(o: *Object, inst: Air.Inst.Ref) !CValue { + if (o.air.value(inst)) |_| { return CValue{ .constant = inst }; } - return o.value_map.get(inst).?; // Instruction does not dominate all uses! + const index = Air.refToIndex(inst).?; + return o.value_map.get(index).?; // Assertion means instruction does not dominate usage. } fn allocLocalValue(o: *Object) CValue { @@ -134,9 +136,8 @@ pub const Object = struct { .local => |i| return w.print("t{d}", .{i}), .local_ref => |i| return w.print("&t{d}", .{i}), .constant => |inst| { - const ty_pl = o.air.instructions.items(.data)[inst].ty_pl; - const ty = o.air.getRefType(ty_pl.ty); - const val = o.air.values[ty_pl.payload]; + const ty = o.air.typeOf(inst); + const val = o.air.value(inst).?; return o.dg.renderValue(w, ty, val); }, .arg => |i| return w.print("a{d}", .{i}), @@ -854,81 +855,87 @@ fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfM for (body) |inst| { const result_value = switch (air_tags[inst]) { - //// TODO use a different strategy for add that communicates to the optimizer - //// that wrapping is UB. - //.add => try genBinOp(o, inst.castTag(.add).?, " + "), - //.addwrap => try genWrapOp(o, inst.castTag(.addwrap).?, " + ", "addw_"), - //// TODO use a different strategy for sub that communicates to the optimizer - //// that wrapping is UB. - //.sub => try genBinOp(o, inst.castTag(.sub).?, " - "), - //.subwrap => try genWrapOp(o, inst.castTag(.subwrap).?, " - ", "subw_"), - //// TODO use a different strategy for mul that communicates to the optimizer - //// that wrapping is UB. - //.mul => try genBinOp(o, inst.castTag(.sub).?, " * "), - //.mulwrap => try genWrapOp(o, inst.castTag(.mulwrap).?, " * ", "mulw_"), - //// TODO use a different strategy for div that communicates to the optimizer - //// that wrapping is UB. - //.div => try genBinOp(o, inst.castTag(.div).?, " / "), + // zig fmt: off + .constant => unreachable, // excluded from function bodies + .const_ty => unreachable, // excluded from function bodies + .arg => airArg(o), - //.constant => unreachable, // excluded from function bodies - //.alloc => try genAlloc(o, inst.castTag(.alloc).?), - //.arg => genArg(o), - //.assembly => try genAsm(o, inst.castTag(.assembly).?), - //.block => try genBlock(o, inst.castTag(.block).?), - //.bitcast => try genBitcast(o, inst.castTag(.bitcast).?), - //.breakpoint => try genBreakpoint(o, inst.castTag(.breakpoint).?), - //.call => try genCall(o, inst.castTag(.call).?), - //.cmp_eq => try genBinOp(o, inst.castTag(.cmp_eq).?, " == "), - //.cmp_gt => try genBinOp(o, inst.castTag(.cmp_gt).?, " > "), - //.cmp_gte => try genBinOp(o, inst.castTag(.cmp_gte).?, " >= "), - //.cmp_lt => try genBinOp(o, inst.castTag(.cmp_lt).?, " < "), - //.cmp_lte => try genBinOp(o, inst.castTag(.cmp_lte).?, " <= "), - //.cmp_neq => try genBinOp(o, inst.castTag(.cmp_neq).?, " != "), - //.dbg_stmt => try genDbgStmt(o, inst.castTag(.dbg_stmt).?), - //.intcast => try genIntCast(o, inst.castTag(.intcast).?), - //.load => try genLoad(o, inst.castTag(.load).?), - //.ret => try genRet(o, inst.castTag(.ret).?), - //.retvoid => try genRetVoid(o), - //.store => try genStore(o, inst.castTag(.store).?), - //.unreach => try genUnreach(o, inst.castTag(.unreach).?), - //.loop => try genLoop(o, inst.castTag(.loop).?), - //.condbr => try genCondBr(o, inst.castTag(.condbr).?), - //.br => try genBr(o, inst.castTag(.br).?), - //.br_void => try genBrVoid(o, inst.castTag(.br_void).?.block), - //.switchbr => try genSwitchBr(o, inst.castTag(.switchbr).?), - //// bool_and and bool_or are non-short-circuit operations - //.bool_and => try genBinOp(o, inst.castTag(.bool_and).?, " & "), - //.bool_or => try genBinOp(o, inst.castTag(.bool_or).?, " | "), - //.bit_and => try genBinOp(o, inst.castTag(.bit_and).?, " & "), - //.bit_or => try genBinOp(o, inst.castTag(.bit_or).?, " | "), - //.xor => try genBinOp(o, inst.castTag(.xor).?, " ^ "), - //.not => try genUnOp(o, inst.castTag(.not).?, "!"), - //.is_null => try genIsNull(o, inst.castTag(.is_null).?), - //.is_non_null => try genIsNull(o, inst.castTag(.is_non_null).?), - //.is_null_ptr => try genIsNull(o, inst.castTag(.is_null_ptr).?), - //.is_non_null_ptr => try genIsNull(o, inst.castTag(.is_non_null_ptr).?), - //.wrap_optional => try genWrapOptional(o, inst.castTag(.wrap_optional).?), - //.optional_payload => try genOptionalPayload(o, inst.castTag(.optional_payload).?), - //.optional_payload_ptr => try genOptionalPayload(o, inst.castTag(.optional_payload_ptr).?), - //.ref => try genRef(o, inst.castTag(.ref).?), - //.struct_field_ptr => try genStructFieldPtr(o, inst.castTag(.struct_field_ptr).?), + .breakpoint => try airBreakpoint(o), + .unreach => try airUnreach(o), - //.is_err => try genIsErr(o, inst.castTag(.is_err).?, "", ".", "!="), - //.is_non_err => try genIsErr(o, inst.castTag(.is_non_err).?, "", ".", "=="), - //.is_err_ptr => try genIsErr(o, inst.castTag(.is_err_ptr).?, "*", "->", "!="), - //.is_non_err_ptr => try genIsErr(o, inst.castTag(.is_non_err_ptr).?, "*", "->", "=="), + // TODO use a different strategy for add that communicates to the optimizer + // that wrapping is UB. + .add => try airBinOp( o, inst, " + "), + .addwrap => try airWrapOp(o, inst, " + ", "addw_"), + // TODO use a different strategy for sub that communicates to the optimizer + // that wrapping is UB. + .sub => try airBinOp( o, inst, " - "), + .subwrap => try airWrapOp(o, inst, " - ", "subw_"), + // TODO use a different strategy for mul that communicates to the optimizer + // that wrapping is UB. + .mul => try airBinOp( o, inst, " * "), + .mulwrap => try airWrapOp(o, inst, " * ", "mulw_"), + // TODO use a different strategy for div that communicates to the optimizer + // that wrapping is UB. + .div => try airBinOp( o, inst, " / "), - //.unwrap_errunion_payload => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload).?), - //.unwrap_errunion_err => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err).?), - //.unwrap_errunion_payload_ptr => try genUnwrapErrUnionPay(o, inst.castTag(.unwrap_errunion_payload_ptr).?), - //.unwrap_errunion_err_ptr => try genUnwrapErrUnionErr(o, inst.castTag(.unwrap_errunion_err_ptr).?), - //.wrap_errunion_payload => try genWrapErrUnionPay(o, inst.castTag(.wrap_errunion_payload).?), - //.wrap_errunion_err => try genWrapErrUnionErr(o, inst.castTag(.wrap_errunion_err).?), - //.br_block_flat => return o.dg.fail("TODO: C backend: implement codegen for br_block_flat", .{}), - //.ptrtoint => return o.dg.fail("TODO: C backend: implement codegen for ptrtoint", .{}), - //.varptr => try genVarPtr(o, inst.castTag(.varptr).?), - //.floatcast => return o.dg.fail("TODO: C backend: implement codegen for floatcast", .{}), - else => return o.dg.fail("TODO: C backend: rework AIR memory layout", .{}), + .cmp_eq => try airBinOp(o, inst, " == "), + .cmp_gt => try airBinOp(o, inst, " > "), + .cmp_gte => try airBinOp(o, inst, " >= "), + .cmp_lt => try airBinOp(o, inst, " < "), + .cmp_lte => try airBinOp(o, inst, " <= "), + .cmp_neq => try airBinOp(o, inst, " != "), + + // bool_and and bool_or are non-short-circuit operations + .bool_and => try airBinOp(o, inst, " & "), + .bool_or => try airBinOp(o, inst, " | "), + .bit_and => try airBinOp(o, inst, " & "), + .bit_or => try airBinOp(o, inst, " | "), + .xor => try airBinOp(o, inst, " ^ "), + .not => try airUnOp( o, inst, "!"), + + .optional_payload => try airOptionalPayload(o, inst), + .optional_payload_ptr => try airOptionalPayload(o, inst), + + .is_err => try airIsErr(o, inst, "", ".", "!="), + .is_non_err => try airIsErr(o, inst, "", ".", "=="), + .is_err_ptr => try airIsErr(o, inst, "*", "->", "!="), + .is_non_err_ptr => try airIsErr(o, inst, "*", "->", "=="), + + .is_null => try airIsNull(o, inst, "==", ""), + .is_non_null => try airIsNull(o, inst, "!=", ""), + .is_null_ptr => try airIsNull(o, inst, "==", "[0]"), + .is_non_null_ptr => try airIsNull(o, inst, "!=", "[0]"), + + .alloc => try airAlloc(o, inst), + .assembly => try airAsm(o, inst), + .block => try airBlock(o, inst), + .bitcast => try airBitcast(o, inst), + .call => try airCall(o, inst), + .dbg_stmt => try airDbgStmt(o, inst), + .intcast => try airIntCast(o, inst), + .load => try airLoad(o, inst), + .ret => try airRet(o, inst), + .store => try airStore(o, inst), + .loop => try airLoop(o, inst), + .cond_br => try airCondBr(o, inst), + .br => try airBr(o, inst), + .switch_br => try airSwitchBr(o, inst), + .wrap_optional => try airWrapOptional(o, inst), + .ref => try airRef(o, inst), + .struct_field_ptr => try airStructFieldPtr(o, inst), + .varptr => try airVarPtr(o, inst), + + .unwrap_errunion_payload => try airUnwrapErrUnionPay(o, inst), + .unwrap_errunion_err => try airUnwrapErrUnionErr(o, inst), + .unwrap_errunion_payload_ptr => try airUnwrapErrUnionPay(o, inst), + .unwrap_errunion_err_ptr => try airUnwrapErrUnionErr(o, inst), + .wrap_errunion_payload => try airWrapErrUnionPay(o, inst), + .wrap_errunion_err => try airWrapErrUnionErr(o, inst), + + .ptrtoint => return o.dg.fail("TODO: C backend: implement codegen for ptrtoint", .{}), + .floatcast => return o.dg.fail("TODO: C backend: implement codegen for floatcast", .{}), + // zig fmt: on }; switch (result_value) { .none => {}, @@ -940,38 +947,37 @@ fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfM try writer.writeAll("}"); } -fn genVarPtr(o: *Object, inst: *Inst.VarPtr) !CValue { - _ = o; - return CValue{ .decl_ref = inst.variable.owner_decl }; +fn airVarPtr(o: *Object, inst: Air.Inst.Index) !CValue { + const ty_pl = o.air.instructions.items(.data)[inst].ty_pl; + const variable = o.air.variables[ty_pl.payload]; + return CValue{ .decl_ref = variable.owner_decl }; } -fn genAlloc(o: *Object, alloc: *Inst.NoOp) !CValue { +fn airAlloc(o: *Object, inst: Air.Inst.Index) !CValue { const writer = o.writer(); + const inst_ty = o.air.typeOfIndex(inst); // First line: the variable used as data storage. - const elem_type = alloc.base.ty.elemType(); - const mutability: Mutability = if (alloc.base.ty.isConstPtr()) .Const else .Mut; + const elem_type = inst_ty.elemType(); + const mutability: Mutability = if (inst_ty.isConstPtr()) .Const else .Mut; const local = try o.allocLocal(elem_type, mutability); try writer.writeAll(";\n"); return CValue{ .local_ref = local.local }; } -fn genArg(o: *Object) CValue { +fn airArg(o: *Object) CValue { const i = o.next_arg_index; o.next_arg_index += 1; return .{ .arg = i }; } -fn genRetVoid(o: *Object) !CValue { - try o.writer().print("return;\n", .{}); - return CValue.none; -} - -fn genLoad(o: *Object, inst: *Inst.UnOp) !CValue { - const operand = try o.resolveInst(inst.operand); +fn airLoad(o: *Object, inst: Air.Inst.Index) !CValue { + const ty_op = o.air.instructions.items(.data)[inst].ty_op; + const inst_ty = o.air.typeOfIndex(inst); + const operand = try o.resolveInst(ty_op.operand); const writer = o.writer(); - const local = try o.allocLocal(inst.base.ty, .Const); + const local = try o.allocLocal(inst_ty, .Const); switch (operand) { .local_ref => |i| { const wrapped: CValue = .{ .local = i }; @@ -994,8 +1000,9 @@ fn genLoad(o: *Object, inst: *Inst.UnOp) !CValue { return local; } -fn genRet(o: *Object, inst: *Inst.UnOp) !CValue { - const operand = try o.resolveInst(inst.operand); +fn airRet(o: *Object, inst: Air.Inst.Index) !CValue { + const un_op = o.air.instructions.items(.data)[inst].un_op; + const operand = try o.resolveInst(un_op); const writer = o.writer(); try writer.writeAll("return "); try o.writeCValue(writer, operand); @@ -1003,26 +1010,29 @@ fn genRet(o: *Object, inst: *Inst.UnOp) !CValue { return CValue.none; } -fn genIntCast(o: *Object, inst: *Inst.UnOp) !CValue { - if (inst.base.isUnused()) +fn airIntCast(o: *Object, inst: Air.Inst.Index) !CValue { + if (o.liveness.isUnused(inst)) return CValue.none; - const from = try o.resolveInst(inst.operand); + const ty_op = o.air.instructions.items(.data)[inst].ty_op; + const from = try o.resolveInst(ty_op.operand); const writer = o.writer(); - const local = try o.allocLocal(inst.base.ty, .Const); + const inst_ty = o.air.typeOfIndex(inst); + const local = try o.allocLocal(inst_ty, .Const); try writer.writeAll(" = ("); - try o.dg.renderType(writer, inst.base.ty); + try o.dg.renderType(writer, inst_ty); try writer.writeAll(")"); try o.writeCValue(writer, from); try writer.writeAll(";\n"); return local; } -fn genStore(o: *Object, inst: *Inst.BinOp) !CValue { +fn airStore(o: *Object, inst: Air.Inst.Index) !CValue { // *a = b; - const dest_ptr = try o.resolveInst(inst.lhs); - const src_val = try o.resolveInst(inst.rhs); + const bin_op = o.air.instructions.items(.data)[inst].bin_op; + const dest_ptr = try o.resolveInst(bin_op.lhs); + const src_val = try o.resolveInst(bin_op.rhs); const writer = o.writer(); switch (dest_ptr) { @@ -1051,11 +1061,18 @@ fn genStore(o: *Object, inst: *Inst.BinOp) !CValue { return CValue.none; } -fn genWrapOp(o: *Object, inst: *Inst.BinOp, str_op: [*:0]const u8, fn_op: [*:0]const u8) !CValue { - if (inst.base.isUnused()) +fn airWrapOp( + o: *Object, + inst: Air.Inst.Index, + str_op: [*:0]const u8, + fn_op: [*:0]const u8, +) !CValue { + if (o.liveness.isUnused(inst)) return CValue.none; - const int_info = inst.base.ty.intInfo(o.dg.module.getTarget()); + const bin_op = o.air.instructions.items(.data)[inst].bin_op; + const inst_ty = o.air.typeOfIndex(inst); + const int_info = inst_ty.intInfo(o.dg.module.getTarget()); const bits = int_info.bits; // if it's an unsigned int with non-arbitrary bit size then we can just add @@ -1064,19 +1081,19 @@ fn genWrapOp(o: *Object, inst: *Inst.BinOp, str_op: [*:0]const u8, fn_op: [*:0]c 8, 16, 32, 64, 128 => true, else => false, }; - if (ok_bits or inst.base.ty.tag() != .int_unsigned) { - return try genBinOp(o, inst, str_op); + if (ok_bits or inst_ty.tag() != .int_unsigned) { + return try airBinOp(o, inst, str_op); } } if (bits > 64) { - return o.dg.fail("TODO: C backend: genWrapOp for large integers", .{}); + return o.dg.fail("TODO: C backend: airWrapOp for large integers", .{}); } var min_buf: [80]u8 = undefined; const min = switch (int_info.signedness) { .unsigned => "0", - else => switch (inst.base.ty.tag()) { + else => switch (inst_ty.tag()) { .c_short => "SHRT_MIN", .c_int => "INT_MIN", .c_long => "LONG_MIN", @@ -1093,7 +1110,7 @@ fn genWrapOp(o: *Object, inst: *Inst.BinOp, str_op: [*:0]const u8, fn_op: [*:0]c }; var max_buf: [80]u8 = undefined; - const max = switch (inst.base.ty.tag()) { + const max = switch (inst_ty.tag()) { .c_short => "SHRT_MAX", .c_ushort => "USHRT_MAX", .c_int => "INT_MAX", @@ -1117,14 +1134,14 @@ fn genWrapOp(o: *Object, inst: *Inst.BinOp, str_op: [*:0]const u8, fn_op: [*:0]c }, }; - const lhs = try o.resolveInst(inst.lhs); - const rhs = try o.resolveInst(inst.rhs); + const lhs = try o.resolveInst(bin_op.lhs); + const rhs = try o.resolveInst(bin_op.rhs); const w = o.writer(); - const ret = try o.allocLocal(inst.base.ty, .Mut); + const ret = try o.allocLocal(inst_ty, .Mut); try w.print(" = zig_{s}", .{fn_op}); - switch (inst.base.ty.tag()) { + switch (inst_ty.tag()) { .isize => try w.writeAll("isize"), .c_short => try w.writeAll("short"), .c_int => try w.writeAll("int"), @@ -1161,15 +1178,17 @@ fn genWrapOp(o: *Object, inst: *Inst.BinOp, str_op: [*:0]const u8, fn_op: [*:0]c return ret; } -fn genBinOp(o: *Object, inst: *Inst.BinOp, operator: [*:0]const u8) !CValue { - if (inst.base.isUnused()) +fn airBinOp(o: *Object, inst: Air.Inst.Index, operator: [*:0]const u8) !CValue { + if (o.liveness.isUnused(inst)) return CValue.none; - const lhs = try o.resolveInst(inst.lhs); - const rhs = try o.resolveInst(inst.rhs); + const bin_op = o.air.instructions.items(.data)[inst].bin_op; + const lhs = try o.resolveInst(bin_op.lhs); + const rhs = try o.resolveInst(bin_op.rhs); const writer = o.writer(); - const local = try o.allocLocal(inst.base.ty, .Const); + const inst_ty = o.air.typeOfIndex(inst); + const local = try o.allocLocal(inst_ty, .Const); try writer.writeAll(" = "); try o.writeCValue(writer, lhs); @@ -1180,14 +1199,16 @@ fn genBinOp(o: *Object, inst: *Inst.BinOp, operator: [*:0]const u8) !CValue { return local; } -fn genUnOp(o: *Object, inst: *Inst.UnOp, operator: []const u8) !CValue { - if (inst.base.isUnused()) +fn airUnOp(o: *Object, inst: Air.Inst.Index, operator: []const u8) !CValue { + if (o.liveness.isUnused(inst)) return CValue.none; - const operand = try o.resolveInst(inst.operand); + const un_op = o.air.instructions.items(.data)[inst].un_op; + const operand = try o.resolveInst(un_op); const writer = o.writer(); - const local = try o.allocLocal(inst.base.ty, .Const); + const inst_ty = o.air.typeOfIndex(inst); + const local = try o.allocLocal(inst_ty, .Const); try writer.print(" = {s}", .{operator}); try o.writeCValue(writer, operand); @@ -1196,18 +1217,22 @@ fn genUnOp(o: *Object, inst: *Inst.UnOp, operator: []const u8) !CValue { return local; } -fn genCall(o: *Object, inst: *Inst.Call) !CValue { - if (inst.func.castTag(.constant)) |func_inst| { - const fn_decl = if (func_inst.val.castTag(.extern_fn)) |extern_fn| +fn airCall(o: *Object, inst: Air.Inst.Index) !CValue { + const pl_op = o.air.instructions.items(.data)[inst].pl_op; + const extra = o.air.extraData(Air.Call, pl_op.payload); + const args = @bitCast([]const Air.Inst.Ref, o.air.extra[extra.end..][0..extra.data.args_len]); + + if (o.air.value(pl_op.operand)) |func_val| { + const fn_decl = if (func_val.castTag(.extern_fn)) |extern_fn| extern_fn.data - else if (func_inst.val.castTag(.function)) |func_payload| + else if (func_val.castTag(.function)) |func_payload| func_payload.data.owner_decl else unreachable; const fn_ty = fn_decl.ty; const ret_ty = fn_ty.fnReturnType(); - const unused_result = inst.base.isUnused(); + const unused_result = o.liveness.isUnused(inst); var result_local: CValue = .none; const writer = o.writer(); @@ -1221,17 +1246,15 @@ fn genCall(o: *Object, inst: *Inst.Call) !CValue { } const fn_name = mem.spanZ(fn_decl.name); try writer.print("{s}(", .{fn_name}); - if (inst.args.len != 0) { - for (inst.args) |arg, i| { - if (i > 0) { - try writer.writeAll(", "); - } - if (arg.value()) |val| { - try o.dg.renderValue(writer, arg.ty, val); - } else { - const val = try o.resolveInst(arg); - try o.writeCValue(writer, val); - } + for (args) |arg, i| { + if (i != 0) { + try writer.writeAll(", "); + } + if (o.air.value(arg)) |val| { + try o.dg.renderValue(writer, o.air.typeOf(arg), val); + } else { + const val = try o.resolveInst(arg); + try o.writeCValue(writer, val); } } try writer.writeAll(");\n"); @@ -1241,21 +1264,26 @@ fn genCall(o: *Object, inst: *Inst.Call) !CValue { } } -fn genDbgStmt(o: *Object, inst: *Inst.DbgStmt) !CValue { - _ = o; - _ = inst; - // TODO emit #line directive here with line number and filename +fn airDbgStmt(o: *Object, inst: Air.Inst.Index) !CValue { + const dbg_stmt = o.air.instructions.items(.data)[inst].dbg_stmt; + const writer = o.writer(); + try writer.print("#line {d}\n", .{dbg_stmt.line}); return CValue.none; } -fn genBlock(o: *Object, inst: *Inst.Block) !CValue { +fn airBlock(o: *Object, inst: Air.Inst.Index) !CValue { + const ty_pl = o.air.instructions.items(.data)[inst].ty_pl; + const extra = o.air.extraData(Air.Block, ty_pl.payload); + const body = o.air.extra[extra.end..][0..extra.data.body_len]; + const block_id: usize = o.next_block_index; o.next_block_index += 1; const writer = o.writer(); - const result = if (inst.base.ty.tag() != .void and !inst.base.isUnused()) blk: { + const inst_ty = o.air.typeOfIndex(inst); + const result = if (inst_ty.tag() != .void and !o.liveness.isUnused(inst)) blk: { // allocate a location for the result - const local = try o.allocLocal(inst.base.ty, .Mut); + const local = try o.allocLocal(inst_ty, .Mut); try writer.writeAll(";\n"); break :blk local; } else CValue{ .none = {} }; @@ -1265,42 +1293,44 @@ fn genBlock(o: *Object, inst: *Inst.Block) !CValue { .result = result, }); - try genBody(o, inst.body); + try genBody(o, body); try o.indent_writer.insertNewline(); // label must be followed by an expression, add an empty one. try writer.print("zig_block_{d}:;\n", .{block_id}); return result; } -fn genBr(o: *Object, inst: *Inst.Br) !CValue { - const result = o.blocks.get(inst.block).?.result; +fn airBr(o: *Object, inst: Air.Inst.Index) !CValue { + const branch = o.air.instructions.items(.data)[inst].br; + const block = o.blocks.get(branch.block_inst).?; + const result = block.result; const writer = o.writer(); // If result is .none then the value of the block is unused. - if (inst.operand.ty.tag() != .void and result != .none) { - const operand = try o.resolveInst(inst.operand); + if (result != .none) { + const operand = try o.resolveInst(branch.operand); try o.writeCValue(writer, result); try writer.writeAll(" = "); try o.writeCValue(writer, operand); try writer.writeAll(";\n"); } - return genBrVoid(o, inst.block); -} - -fn genBrVoid(o: *Object, block: *Inst.Block) !CValue { - try o.writer().print("goto zig_block_{d};\n", .{o.blocks.get(block).?.block_id}); + try o.writer().print("goto zig_block_{d};\n", .{block.block_id}); return CValue.none; } -fn genBitcast(o: *Object, inst: *Inst.UnOp) !CValue { - const operand = try o.resolveInst(inst.operand); +fn airBitcast(o: *Object, inst: Air.Inst.Index) !CValue { + const ty_op = o.air.instructions.items(.data)[inst].ty_op; + const operand = try o.resolveInst(ty_op.operand); const writer = o.writer(); - if (inst.base.ty.zigTypeTag() == .Pointer and inst.operand.ty.zigTypeTag() == .Pointer) { - const local = try o.allocLocal(inst.base.ty, .Const); + const inst_ty = o.air.typeOfIndex(inst); + if (inst_ty.zigTypeTag() == .Pointer and + o.air.typeOf(ty_op.operand).zigTypeTag() == .Pointer) + { + const local = try o.allocLocal(inst_ty, .Const); try writer.writeAll(" = ("); - try o.dg.renderType(writer, inst.base.ty); + try o.dg.renderType(writer, inst_ty); try writer.writeAll(")"); try o.writeCValue(writer, operand); @@ -1308,7 +1338,7 @@ fn genBitcast(o: *Object, inst: *Inst.UnOp) !CValue { return local; } - const local = try o.allocLocal(inst.base.ty, .Mut); + const local = try o.allocLocal(inst_ty, .Mut); try writer.writeAll(";\n"); try writer.writeAll("memcpy(&"); @@ -1322,79 +1352,124 @@ fn genBitcast(o: *Object, inst: *Inst.UnOp) !CValue { return local; } -fn genBreakpoint(o: *Object, inst: *Inst.NoOp) !CValue { - _ = inst; +fn airBreakpoint(o: *Object) !CValue { try o.writer().writeAll("zig_breakpoint();\n"); return CValue.none; } -fn genUnreach(o: *Object, inst: *Inst.NoOp) !CValue { - _ = inst; +fn airUnreach(o: *Object) !CValue { try o.writer().writeAll("zig_unreachable();\n"); return CValue.none; } -fn genLoop(o: *Object, inst: *Inst.Loop) !CValue { +fn airLoop(o: *Object, inst: Air.Inst.Index) !CValue { + const ty_pl = o.air.instructions.items(.data)[inst].ty_pl; + const loop = o.air.extraData(Air.Block, ty_pl.payload); + const body = o.air.extra[loop.end..][0..loop.data.body_len]; try o.writer().writeAll("while (true) "); - try genBody(o, inst.body); + try genBody(o, body); try o.indent_writer.insertNewline(); return CValue.none; } -fn genCondBr(o: *Object, inst: *Inst.CondBr) !CValue { - const cond = try o.resolveInst(inst.condition); +fn airCondBr(o: *Object, inst: Air.Inst.Index) !CValue { + const pl_op = o.air.instructions.items(.data)[inst].pl_op; + const cond = try o.resolveInst(pl_op.operand); + const extra = o.air.extraData(Air.CondBr, pl_op.payload); + const then_body = o.air.extra[extra.end..][0..extra.data.then_body_len]; + const else_body = o.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const writer = o.writer(); try writer.writeAll("if ("); try o.writeCValue(writer, cond); try writer.writeAll(") "); - try genBody(o, inst.then_body); + try genBody(o, then_body); try writer.writeAll(" else "); - try genBody(o, inst.else_body); + try genBody(o, else_body); try o.indent_writer.insertNewline(); return CValue.none; } -fn genSwitchBr(o: *Object, inst: *Inst.SwitchBr) !CValue { - const target = try o.resolveInst(inst.target); +fn airSwitchBr(o: *Object, inst: Air.Inst.Index) !CValue { + const pl_op = o.air.instructions.items(.data)[inst].pl_op; + const condition = try o.resolveInst(pl_op.operand); + const condition_ty = o.air.typeOf(pl_op.operand); const writer = o.writer(); try writer.writeAll("switch ("); - try o.writeCValue(writer, target); + try o.writeCValue(writer, condition); try writer.writeAll(") {\n"); o.indent_writer.pushIndent(); - for (inst.cases) |case| { - try writer.writeAll("case "); - try o.dg.renderValue(writer, inst.target.ty, case.item); - try writer.writeAll(": "); - // the case body must be noreturn so we don't need to insert a break - try genBody(o, case.body); - try o.indent_writer.insertNewline(); - } + // Need to rework Sema so that multiple cases are represented rather than + // getting branching logic inside the else, this way we get multiple case + // labels here rather than logic in the default case. + _ = condition_ty; + return o.dg.fail("TODO implement switch in C backend", .{}); - try writer.writeAll("default: "); - try genBody(o, inst.else_body); - try o.indent_writer.insertNewline(); + //for (inst.cases) |case| { + // try writer.writeAll("case "); + // try o.dg.renderValue(writer, condition_ty, case.item); + // try writer.writeAll(": "); + // // the case body must be noreturn so we don't need to insert a break + // try genBody(o, case.body); + // try o.indent_writer.insertNewline(); + //} - o.indent_writer.popIndent(); - try writer.writeAll("}\n"); - return CValue.none; + //try writer.writeAll("default: "); + //try genBody(o, inst.else_body); + //try o.indent_writer.insertNewline(); + + //o.indent_writer.popIndent(); + //try writer.writeAll("}\n"); + //return CValue.none; } -fn genAsm(o: *Object, as: *Inst.Assembly) !CValue { - if (as.base.isUnused() and !as.is_volatile) +fn airAsm(o: *Object, inst: Air.Inst.Index) !CValue { + const air_datas = o.air.instructions.items(.data); + const air_extra = o.air.extraData(Air.Asm, air_datas[inst].ty_pl.payload); + const zir = o.dg.decl.namespace.file_scope.zir; + const extended = zir.instructions.items(.data)[air_extra.data.zir_index].extended; + const zir_extra = zir.extraData(Zir.Inst.Asm, extended.operand); + const asm_source = zir.nullTerminatedString(zir_extra.data.asm_source); + const outputs_len = @truncate(u5, extended.small); + const args_len = @truncate(u5, extended.small >> 5); + const clobbers_len = @truncate(u5, extended.small >> 10); + _ = clobbers_len; // TODO honor these + const is_volatile = @truncate(u1, extended.small >> 15) != 0; + const outputs = @bitCast([]const Air.Inst.Ref, o.air.extra[air_extra.end..][0..outputs_len]); + const args = @bitCast([]const Air.Inst.Ref, o.air.extra[air_extra.end + outputs.len ..][0..args_len]); + + if (outputs_len > 1) { + return o.dg.fail("TODO implement codegen for asm with more than 1 output", .{}); + } + + if (o.liveness.isUnused(inst) and !is_volatile) return CValue.none; + var extra_i: usize = zir_extra.end; + const output_constraint: ?[]const u8 = out: { + var i: usize = 0; + while (i < outputs_len) : (i += 1) { + const output = zir.extraData(Zir.Inst.Asm.Output, extra_i); + extra_i = output.end; + break :out zir.nullTerminatedString(output.data.constraint); + } + break :out null; + }; + const args_extra_begin = extra_i; + const writer = o.writer(); - for (as.inputs) |i, index| { - if (i[0] == '{' and i[i.len - 1] == '}') { - const reg = i[1 .. i.len - 1]; - const arg = as.args[index]; + for (args) |arg| { + const input = zir.extraData(Zir.Inst.Asm.Input, extra_i); + extra_i = input.end; + const constraint = zir.nullTerminatedString(input.data.constraint); + if (constraint[0] == '{' and constraint[constraint.len - 1] == '}') { + const reg = constraint[1 .. constraint.len - 1]; const arg_c_value = try o.resolveInst(arg); try writer.writeAll("register "); - try o.dg.renderType(writer, arg.ty); + try o.dg.renderType(writer, o.air.typeOf(arg)); try writer.print(" {s}_constant __asm__(\"{s}\") = ", .{ reg, reg }); try o.writeCValue(writer, arg_c_value); @@ -1403,19 +1478,23 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue { return o.dg.fail("TODO non-explicit inline asm regs", .{}); } } - const volatile_string: []const u8 = if (as.is_volatile) "volatile " else ""; - try writer.print("__asm {s}(\"{s}\"", .{ volatile_string, as.asm_source }); - if (as.output_constraint) |_| { + const volatile_string: []const u8 = if (is_volatile) "volatile " else ""; + try writer.print("__asm {s}(\"{s}\"", .{ volatile_string, asm_source }); + if (output_constraint) |_| { return o.dg.fail("TODO: CBE inline asm output", .{}); } - if (as.inputs.len > 0) { - if (as.output_constraint == null) { + if (args.len > 0) { + if (output_constraint == null) { try writer.writeAll(" :"); } try writer.writeAll(": "); - for (as.inputs) |i, index| { - if (i[0] == '{' and i[i.len - 1] == '}') { - const reg = i[1 .. i.len - 1]; + extra_i = args_extra_begin; + for (args) |_, index| { + const input = zir.extraData(Zir.Inst.Asm.Input, extra_i); + extra_i = input.end; + const constraint = zir.nullTerminatedString(input.data.constraint); + if (constraint[0] == '{' and constraint[constraint.len - 1] == '}') { + const reg = constraint[1 .. constraint.len - 1]; if (index > 0) { try writer.writeAll(", "); } @@ -1428,40 +1507,51 @@ fn genAsm(o: *Object, as: *Inst.Assembly) !CValue { } try writer.writeAll(");\n"); - if (as.base.isUnused()) + if (o.liveness.isUnused(inst)) return CValue.none; return o.dg.fail("TODO: C backend: inline asm expression result used", .{}); } -fn genIsNull(o: *Object, inst: *Inst.UnOp) !CValue { +fn airIsNull( + o: *Object, + inst: Air.Inst.Index, + operator: [*:0]const u8, + deref_suffix: [*:0]const u8, +) !CValue { + if (o.liveness.isUnused(inst)) + return CValue.none; + + const un_op = o.air.instructions.items(.data)[inst].un_op; const writer = o.writer(); - const invert_logic = inst.base.tag == .is_non_null or inst.base.tag == .is_non_null_ptr; - const operator = if (invert_logic) "!=" else "=="; - const maybe_deref = if (inst.base.tag == .is_null_ptr or inst.base.tag == .is_non_null_ptr) "[0]" else ""; - const operand = try o.resolveInst(inst.operand); + const operand = try o.resolveInst(un_op); const local = try o.allocLocal(Type.initTag(.bool), .Const); try writer.writeAll(" = ("); try o.writeCValue(writer, operand); - if (inst.operand.ty.isPtrLikeOptional()) { + if (o.air.typeOf(un_op).isPtrLikeOptional()) { // operand is a regular pointer, test `operand !=/== NULL` - try writer.print("){s} {s} NULL;\n", .{ maybe_deref, operator }); + try writer.print("){s} {s} NULL;\n", .{ deref_suffix, operator }); } else { - try writer.print("){s}.is_null {s} true;\n", .{ maybe_deref, operator }); + try writer.print("){s}.is_null {s} true;\n", .{ deref_suffix, operator }); } return local; } -fn genOptionalPayload(o: *Object, inst: *Inst.UnOp) !CValue { - const writer = o.writer(); - const operand = try o.resolveInst(inst.operand); +fn airOptionalPayload(o: *Object, inst: Air.Inst.Index) !CValue { + if (o.liveness.isUnused(inst)) + return CValue.none; - const opt_ty = if (inst.operand.ty.zigTypeTag() == .Pointer) - inst.operand.ty.elemType() + const ty_op = o.air.instructions.items(.data)[inst].ty_op; + const writer = o.writer(); + const operand = try o.resolveInst(ty_op.operand); + const operand_ty = o.air.typeOf(ty_op.operand); + + const opt_ty = if (operand_ty.zigTypeTag() == .Pointer) + operand_ty.elemType() else - inst.operand.ty; + operand_ty; if (opt_ty.isPtrLikeOptional()) { // the operand is just a regular pointer, no need to do anything special. @@ -1469,10 +1559,11 @@ fn genOptionalPayload(o: *Object, inst: *Inst.UnOp) !CValue { return operand; } - const maybe_deref = if (inst.operand.ty.zigTypeTag() == .Pointer) "->" else "."; - const maybe_addrof = if (inst.base.ty.zigTypeTag() == .Pointer) "&" else ""; + const inst_ty = o.air.typeOfIndex(inst); + const maybe_deref = if (operand_ty.zigTypeTag() == .Pointer) "->" else "."; + const maybe_addrof = if (inst_ty.zigTypeTag() == .Pointer) "&" else ""; - const local = try o.allocLocal(inst.base.ty, .Const); + const local = try o.allocLocal(inst_ty, .Const); try writer.print(" = {s}(", .{maybe_addrof}); try o.writeCValue(writer, operand); @@ -1480,24 +1571,36 @@ fn genOptionalPayload(o: *Object, inst: *Inst.UnOp) !CValue { return local; } -fn genRef(o: *Object, inst: *Inst.UnOp) !CValue { - const writer = o.writer(); - const operand = try o.resolveInst(inst.operand); +fn airRef(o: *Object, inst: Air.Inst.Index) !CValue { + if (o.liveness.isUnused(inst)) + return CValue.none; - const local = try o.allocLocal(inst.base.ty, .Const); + const ty_op = o.air.instructions.items(.data)[inst].ty_op; + const writer = o.writer(); + const operand = try o.resolveInst(ty_op.operand); + + const inst_ty = o.air.typeOfIndex(inst); + const local = try o.allocLocal(inst_ty, .Const); try writer.writeAll(" = "); try o.writeCValue(writer, operand); try writer.writeAll(";\n"); return local; } -fn genStructFieldPtr(o: *Object, inst: *Inst.StructFieldPtr) !CValue { - const writer = o.writer(); - const struct_ptr = try o.resolveInst(inst.struct_ptr); - const struct_obj = inst.struct_ptr.ty.elemType().castTag(.@"struct").?.data; - const field_name = struct_obj.fields.keys()[inst.field_index]; +fn airStructFieldPtr(o: *Object, inst: Air.Inst.Index) !CValue { + if (o.liveness.isUnused(inst)) + return CValue.none; - const local = try o.allocLocal(inst.base.ty, .Const); + const ty_pl = o.air.instructions.items(.data)[inst].ty_pl; + const extra = o.air.extraData(Air.StructField, ty_pl.payload).data; + const writer = o.writer(); + const struct_ptr = try o.resolveInst(extra.struct_ptr); + const struct_ptr_ty = o.air.typeOf(extra.struct_ptr); + const struct_obj = struct_ptr_ty.elemType().castTag(.@"struct").?.data; + const field_name = struct_obj.fields.keys()[extra.field_index]; + + const inst_ty = o.air.typeOfIndex(inst); + const local = try o.allocLocal(inst_ty, .Const); switch (struct_ptr) { .local_ref => |i| { try writer.print(" = &t{d}.{};\n", .{ i, fmtIdent(field_name) }); @@ -1512,17 +1615,20 @@ fn genStructFieldPtr(o: *Object, inst: *Inst.StructFieldPtr) !CValue { } // *(E!T) -> E NOT *E -fn genUnwrapErrUnionErr(o: *Object, inst: *Inst.UnOp) !CValue { - if (inst.base.isUnused()) +fn airUnwrapErrUnionErr(o: *Object, inst: Air.Inst.Index) !CValue { + if (o.liveness.isUnused(inst)) return CValue.none; + const ty_op = o.air.instructions.items(.data)[inst].ty_op; + const inst_ty = o.air.typeOfIndex(inst); const writer = o.writer(); - const operand = try o.resolveInst(inst.operand); + const operand = try o.resolveInst(ty_op.operand); + const operand_ty = o.air.typeOf(ty_op.operand); - const payload_ty = inst.operand.ty.errorUnionChild(); + const payload_ty = operand_ty.errorUnionChild(); if (!payload_ty.hasCodeGenBits()) { - if (inst.operand.ty.zigTypeTag() == .Pointer) { - const local = try o.allocLocal(inst.base.ty, .Const); + if (operand_ty.zigTypeTag() == .Pointer) { + const local = try o.allocLocal(inst_ty, .Const); try writer.writeAll(" = *"); try o.writeCValue(writer, operand); try writer.writeAll(";\n"); @@ -1532,9 +1638,9 @@ fn genUnwrapErrUnionErr(o: *Object, inst: *Inst.UnOp) !CValue { } } - const maybe_deref = if (inst.operand.ty.zigTypeTag() == .Pointer) "->" else "."; + const maybe_deref = if (operand_ty.zigTypeTag() == .Pointer) "->" else "."; - const local = try o.allocLocal(inst.base.ty, .Const); + const local = try o.allocLocal(inst_ty, .Const); try writer.writeAll(" = ("); try o.writeCValue(writer, operand); @@ -1542,22 +1648,25 @@ fn genUnwrapErrUnionErr(o: *Object, inst: *Inst.UnOp) !CValue { return local; } -fn genUnwrapErrUnionPay(o: *Object, inst: *Inst.UnOp) !CValue { - if (inst.base.isUnused()) +fn airUnwrapErrUnionPay(o: *Object, inst: Air.Inst.Index) !CValue { + if (o.liveness.isUnused(inst)) return CValue.none; + const ty_op = o.air.instructions.items(.data)[inst].ty_op; const writer = o.writer(); - const operand = try o.resolveInst(inst.operand); + const operand = try o.resolveInst(ty_op.operand); + const operand_ty = o.air.typeOf(ty_op.operand); - const payload_ty = inst.operand.ty.errorUnionChild(); + const payload_ty = operand_ty.errorUnionChild(); if (!payload_ty.hasCodeGenBits()) { return CValue.none; } - const maybe_deref = if (inst.operand.ty.zigTypeTag() == .Pointer) "->" else "."; - const maybe_addrof = if (inst.base.ty.zigTypeTag() == .Pointer) "&" else ""; + const inst_ty = o.air.typeOfIndex(inst); + const maybe_deref = if (operand_ty.zigTypeTag() == .Pointer) "->" else "."; + const maybe_addrof = if (inst_ty.zigTypeTag() == .Pointer) "&" else ""; - const local = try o.allocLocal(inst.base.ty, .Const); + const local = try o.allocLocal(inst_ty, .Const); try writer.print(" = {s}(", .{maybe_addrof}); try o.writeCValue(writer, operand); @@ -1565,54 +1674,75 @@ fn genUnwrapErrUnionPay(o: *Object, inst: *Inst.UnOp) !CValue { return local; } -fn genWrapOptional(o: *Object, inst: *Inst.UnOp) !CValue { - const writer = o.writer(); - const operand = try o.resolveInst(inst.operand); +fn airWrapOptional(o: *Object, inst: Air.Inst.Index) !CValue { + if (o.liveness.isUnused(inst)) + return CValue.none; - if (inst.base.ty.isPtrLikeOptional()) { + const ty_op = o.air.instructions.items(.data)[inst].ty_op; + const writer = o.writer(); + const operand = try o.resolveInst(ty_op.operand); + + const inst_ty = o.air.typeOfIndex(inst); + if (inst_ty.isPtrLikeOptional()) { // the operand is just a regular pointer, no need to do anything special. return operand; } // .wrap_optional is used to convert non-optionals into optionals so it can never be null. - const local = try o.allocLocal(inst.base.ty, .Const); + const local = try o.allocLocal(inst_ty, .Const); try writer.writeAll(" = { .is_null = false, .payload ="); try o.writeCValue(writer, operand); try writer.writeAll("};\n"); return local; } -fn genWrapErrUnionErr(o: *Object, inst: *Inst.UnOp) !CValue { - const writer = o.writer(); - const operand = try o.resolveInst(inst.operand); +fn airWrapErrUnionErr(o: *Object, inst: Air.Inst.Index) !CValue { + if (o.liveness.isUnused(inst)) + return CValue.none; - const local = try o.allocLocal(inst.base.ty, .Const); + const writer = o.writer(); + const ty_op = o.air.instructions.items(.data)[inst].ty_op; + const operand = try o.resolveInst(ty_op.operand); + + const inst_ty = o.air.typeOfIndex(inst); + const local = try o.allocLocal(inst_ty, .Const); try writer.writeAll(" = { .error = "); try o.writeCValue(writer, operand); try writer.writeAll(" };\n"); return local; } -fn genWrapErrUnionPay(o: *Object, inst: *Inst.UnOp) !CValue { - const writer = o.writer(); - const operand = try o.resolveInst(inst.operand); - const local = try o.allocLocal(inst.base.ty, .Const); +fn airWrapErrUnionPay(o: *Object, inst: Air.Inst.Index) !CValue { + if (o.liveness.isUnused(inst)) + return CValue.none; + + const ty_op = o.air.instructions.items(.data)[inst].ty_op; + const writer = o.writer(); + const operand = try o.resolveInst(ty_op.operand); + + const inst_ty = o.air.typeOfIndex(inst); + const local = try o.allocLocal(inst_ty, .Const); try writer.writeAll(" = { .error = 0, .payload = "); try o.writeCValue(writer, operand); try writer.writeAll(" };\n"); return local; } -fn genIsErr( +fn airIsErr( o: *Object, - inst: *Inst.UnOp, + inst: Air.Inst.Index, deref_prefix: [*:0]const u8, deref_suffix: [*:0]const u8, op_str: [*:0]const u8, ) !CValue { + if (o.liveness.isUnused(inst)) + return CValue.none; + + const un_op = o.air.instructions.items(.data)[inst].un_op; const writer = o.writer(); - const operand = try o.resolveInst(inst.operand); + const operand = try o.resolveInst(un_op); + const operand_ty = o.air.typeOf(un_op); const local = try o.allocLocal(Type.initTag(.bool), .Const); - const payload_ty = inst.operand.ty.errorUnionChild(); + const payload_ty = operand_ty.errorUnionChild(); if (!payload_ty.hasCodeGenBits()) { try writer.print(" = {s}", .{deref_prefix}); try o.writeCValue(writer, operand); From b2733a36f8fa2379fd4e07f936a4ad22a4541c7c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 17 Jul 2021 12:41:39 -0700 Subject: [PATCH 28/53] Sema: memoize decl_val instructions when result is constant --- src/Sema.zig | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 79f1ed0614..d796ae2a5a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -45,6 +45,7 @@ branch_count: u32 = 0, /// contain a mapped source location. src: LazySrcLoc = .{ .token_offset = 0 }, next_arg_index: usize = 0, +decl_val_table: std.AutoHashMapUnmanaged(*Decl, Air.Inst.Ref) = .{}, const std = @import("std"); const mem = std.mem; @@ -77,6 +78,7 @@ pub fn deinit(sema: *Sema) void { sema.air_values.deinit(gpa); sema.air_variables.deinit(gpa); sema.inst_map.deinit(gpa); + sema.decl_val_table.deinit(gpa); sema.* = undefined; } @@ -7159,9 +7161,23 @@ fn coerceArrayPtrToMany( return sema.mod.fail(&block.base, inst_src, "TODO implement coerceArrayPtrToMany runtime instruction", .{}); } -fn analyzeDeclVal(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) CompileError!Air.Inst.Ref { +fn analyzeDeclVal( + sema: *Sema, + block: *Scope.Block, + src: LazySrcLoc, + decl: *Decl, +) CompileError!Air.Inst.Ref { + if (sema.decl_val_table.get(decl)) |result| { + return result; + } const decl_ref = try sema.analyzeDeclRef(block, src, decl); - return sema.analyzeLoad(block, src, decl_ref, src); + const result = try sema.analyzeLoad(block, src, decl_ref, src); + if (Air.refToIndex(result)) |index| { + if (sema.air_instructions.items(.tag)[index] == .constant) { + sema.decl_val_table.put(sema.gpa, decl, result) catch {}; + } + } + return result; } fn analyzeDeclRef(sema: *Sema, block: *Scope.Block, src: LazySrcLoc, decl: *Decl) CompileError!Air.Inst.Ref { From 33aab2c1bbe55cdd3d2d08dc429260d06898d36d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 17 Jul 2021 12:42:05 -0700 Subject: [PATCH 29/53] stage2: ELF linking: avoid crashing for stupidly large functions --- src/link/Elf.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/link/Elf.zig b/src/link/Elf.zig index 815c0c9f23..315dfb563b 100644 --- a/src/link/Elf.zig +++ b/src/link/Elf.zig @@ -3080,7 +3080,7 @@ fn pwriteDbgLineNops( const page_of_nops = [1]u8{DW.LNS_negate_stmt} ** 4096; const three_byte_nop = [3]u8{ DW.LNS_advance_pc, 0b1000_0000, 0 }; - var vecs: [256]std.os.iovec_const = undefined; + var vecs: [512]std.os.iovec_const = undefined; var vec_index: usize = 0; { var padding_left = prev_padding_size; From 4a0f38bb7671750be1590e815def72e3b4a34ccf Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 18 Jul 2021 22:26:36 -0700 Subject: [PATCH 30/53] stage2: update LLVM backend to new AIR memory layout Also fix compile errors when not using -Dskip-non-native --- src/codegen.zig | 54 ++++--- src/codegen/c.zig | 3 + src/codegen/llvm.zig | 359 ++++++++++++++++++++++++------------------ src/codegen/spirv.zig | 142 +++++++++-------- src/codegen/wasm.zig | 8 +- src/link/Coff.zig | 15 +- src/link/MachO.zig | 245 +++++++++++++++------------- src/link/SpirV.zig | 16 +- src/link/Wasm.zig | 2 + 9 files changed, 477 insertions(+), 367 deletions(-) diff --git a/src/codegen.zig b/src/codegen.zig index 11a2603aac..20d7035822 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -642,7 +642,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); // Backpatch push callee saved regs var saved_regs = Instruction.RegisterList{ @@ -703,7 +703,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.ldm(.al, .sp, true, saved_regs).toU32()); } else { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); try self.dbgSetEpilogueBegin(); } }, @@ -727,7 +727,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); // Backpatch stack offset const stack_end = self.max_end_stack; @@ -779,13 +779,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { writeInt(u32, try self.code.addManyAsArray(4), Instruction.ret(null).toU32()); } else { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); try self.dbgSetEpilogueBegin(); } }, else => { try self.dbgSetPrologueEnd(); - try self.genBody(self.mod_fn.body); + try self.genBody(self.air.getMainBody()); try self.dbgSetEpilogueBegin(); }, } @@ -1492,7 +1492,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }; } - fn genArmBinOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref, op: ir.Inst.Tag) !MCValue { + fn genArmBinOp(self: *Self, inst: Air.Inst.Index, op_lhs: Air.Inst.Ref, op_rhs: Air.Inst.Ref, op: Air.Inst.Tag) !MCValue { const lhs = try self.resolveInst(op_lhs); const rhs = try self.resolveInst(op_rhs); @@ -1514,14 +1514,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (reuse_lhs) { // Allocate 0 or 1 registers if (!rhs_is_register and rhs_should_be_register) { - rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_rhs, &.{lhs.register}) }; + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?, &.{lhs.register}) }; branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } dst_mcv = lhs; } else if (reuse_rhs) { // Allocate 0 or 1 registers if (!lhs_is_register and lhs_should_be_register) { - lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_lhs, &.{rhs.register}) }; + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?, &.{rhs.register}) }; branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv); } dst_mcv = rhs; @@ -1542,7 +1542,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { lhs_mcv = dst_mcv; } else { // Move LHS and RHS to register - const regs = try self.register_manager.allocRegs(2, .{ inst, op_rhs }, &.{}); + const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? }, &.{}); lhs_mcv = MCValue{ .register = regs[0] }; rhs_mcv = MCValue{ .register = regs[1] }; dst_mcv = lhs_mcv; @@ -1572,10 +1572,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Move the operands to the newly allocated registers if (lhs_mcv == .register and !lhs_is_register) { - try self.genSetReg(op_lhs.ty, lhs_mcv.register, lhs); + try self.genSetReg(self.air.typeOf(op_lhs), lhs_mcv.register, lhs); } if (rhs_mcv == .register and !rhs_is_register) { - try self.genSetReg(op_rhs.ty, rhs_mcv.register, rhs); + try self.genSetReg(self.air.typeOf(op_rhs), rhs_mcv.register, rhs); } try self.genArmBinOpCode( @@ -1594,7 +1594,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { lhs_mcv: MCValue, rhs_mcv: MCValue, swap_lhs_and_rhs: bool, - op: ir.Inst.Tag, + op: Air.Inst.Tag, ) !void { assert(lhs_mcv == .register or rhs_mcv == .register); @@ -1665,14 +1665,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { if (reuse_lhs) { // Allocate 0 or 1 registers if (!rhs_is_register) { - rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_rhs, &.{lhs.register}) }; + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_rhs).?, &.{lhs.register}) }; branch.inst_table.putAssumeCapacity(Air.refToIndex(op_rhs).?, rhs_mcv); } dst_mcv = lhs; } else if (reuse_rhs) { // Allocate 0 or 1 registers if (!lhs_is_register) { - lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(op_lhs, &.{rhs.register}) }; + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(op_lhs).?, &.{rhs.register}) }; branch.inst_table.putAssumeCapacity(Air.refToIndex(op_lhs).?, lhs_mcv); } dst_mcv = rhs; @@ -1690,7 +1690,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { lhs_mcv = dst_mcv; } else { // Move LHS and RHS to register - const regs = try self.register_manager.allocRegs(2, .{ inst, op_rhs }, &.{}); + const regs = try self.register_manager.allocRegs(2, .{ inst, Air.refToIndex(op_rhs).? }, &.{}); lhs_mcv = MCValue{ .register = regs[0] }; rhs_mcv = MCValue{ .register = regs[1] }; dst_mcv = lhs_mcv; @@ -1701,10 +1701,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Move the operands to the newly allocated registers if (!lhs_is_register) { - try self.genSetReg(op_lhs.ty, lhs_mcv.register, lhs); + try self.genSetReg(self.air.typeOf(op_lhs), lhs_mcv.register, lhs); } if (!rhs_is_register) { - try self.genSetReg(op_rhs.ty, rhs_mcv.register, rhs); + try self.genSetReg(self.air.typeOf(op_rhs), rhs_mcv.register, rhs); } writeInt(u32, try self.code.addManyAsArray(4), Instruction.mul(.al, dst_mcv.register, lhs_mcv.register, rhs_mcv.register).toU32()); @@ -2704,9 +2704,9 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, .aarch64 => { for (info.args) |mc_arg, arg_i| { - const arg = inst.args[arg_i]; + const arg = args[arg_i]; const arg_ty = self.air.typeOf(arg); - const arg_mcv = try self.resolveInst(inst.args[arg_i]); + const arg_mcv = try self.resolveInst(args[arg_i]); switch (mc_arg) { .none => continue, @@ -2733,7 +2733,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { }, } } - if (inst.func.value()) |func_value| { + if (self.air.value(callee)) |func_value| { if (func_value.castTag(.function)) |func_payload| { const ptr_bits = self.target.cpu.arch.ptrBitWidth(); const ptr_bytes: u64 = @divExact(ptr_bits, 8); @@ -2899,15 +2899,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // Allocate registers if (rhs_should_be_register) { if (!lhs_is_register and !rhs_is_register) { - const regs = try self.register_manager.allocRegs(2, .{ bin_op.rhs, bin_op.lhs }, &.{}); + const regs = try self.register_manager.allocRegs(2, .{ + Air.refToIndex(bin_op.rhs).?, Air.refToIndex(bin_op.lhs).?, + }, &.{}); lhs_mcv = MCValue{ .register = regs[0] }; rhs_mcv = MCValue{ .register = regs[1] }; } else if (!rhs_is_register) { - rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(bin_op.rhs, &.{}) }; + rhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.rhs).?, &.{}) }; } } if (!lhs_is_register) { - lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(bin_op.lhs, &.{}) }; + lhs_mcv = MCValue{ .register = try self.register_manager.allocReg(Air.refToIndex(bin_op.lhs).?, &.{}) }; } // Move the operands to the newly allocated registers @@ -3538,7 +3540,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { break :result MCValue{ .register = reg }; } else { - break :result MCValue.none; + break :result MCValue{ .none = {} }; } }, .aarch64 => result: { @@ -3576,7 +3578,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail("unrecognized register: '{s}'", .{reg_name}); break :result MCValue{ .register = reg }; } else { - break :result MCValue.none; + break :result MCValue{ .none = {} }; } }, .riscv64 => result: { @@ -3612,7 +3614,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return self.fail("unrecognized register: '{s}'", .{reg_name}); break :result MCValue{ .register = reg }; } else { - break :result MCValue.none; + break :result MCValue{ .none = {} }; } }, .x86_64, .i386 => result: { diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 1fe330a894..7137116037 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -974,6 +974,9 @@ fn airArg(o: *Object) CValue { fn airLoad(o: *Object, inst: Air.Inst.Index) !CValue { const ty_op = o.air.instructions.items(.data)[inst].ty_op; + const is_volatile = o.air.typeOf(ty_op.operand).isVolatilePtr(); + if (!is_volatile and o.liveness.isUnused(inst)) + return CValue.none; const inst_ty = o.air.typeOfIndex(inst); const operand = try o.resolveInst(ty_op.operand); const writer = o.writer(); diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index ddf2883259..d9090c9f2c 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -10,7 +10,7 @@ const math = std.math; const Module = @import("../Module.zig"); const TypedValue = @import("../TypedValue.zig"); const Air = @import("../Air.zig"); -const Inst = ir.Inst; +const Liveness = @import("../Liveness.zig"); const Value = @import("../value.zig").Value; const Type = @import("../type.zig").Type; @@ -355,6 +355,7 @@ pub const DeclGen = struct { builder.positionBuilderAtEnd(entry_block); var fg: FuncGen = .{ + .gpa = self.gpa, .dg = self, .builder = builder, .args = args, @@ -593,29 +594,29 @@ pub const DeclGen = struct { }; pub const FuncGen = struct { + gpa: *Allocator, dg: *DeclGen, builder: *const llvm.Builder, - /// This stores the LLVM values used in a function, such that they can be - /// referred to in other instructions. This table is cleared before every function is generated. - /// TODO: Change this to a stack of Branch. Currently we store all the values from all the blocks - /// in here, however if a block ends, the instructions can be thrown away. - func_inst_table: std.AutoHashMapUnmanaged(*Inst, *const llvm.Value), + /// This stores the LLVM values used in a function, such that they can be referred to + /// in other instructions. This table is cleared before every function is generated. + func_inst_table: std.AutoHashMapUnmanaged(Air.Inst.Index, *const llvm.Value), - /// These fields are used to refer to the LLVM value of the function paramaters in an Arg instruction. + /// These fields are used to refer to the LLVM value of the function paramaters + /// in an Arg instruction. args: []*const llvm.Value, arg_index: usize, entry_block: *const llvm.BasicBlock, - /// This fields stores the last alloca instruction, such that we can append more alloca instructions - /// to the top of the function. + /// This fields stores the last alloca instruction, such that we can append + /// more alloca instructions to the top of the function. latest_alloca_inst: ?*const llvm.Value, llvm_func: *const llvm.Value, /// This data structure is used to implement breaking to blocks. - blocks: std.AutoHashMapUnmanaged(*Inst.Block, struct { + blocks: std.AutoHashMapUnmanaged(Air.Inst.Index, struct { parent_bb: *const llvm.BasicBlock, break_bbs: *BreakBasicBlocks, break_vals: *BreakValues, @@ -626,9 +627,9 @@ pub const FuncGen = struct { fn deinit(self: *FuncGen) void { self.builder.dispose(); - self.func_inst_table.deinit(self.gpa()); - self.gpa().free(self.args); - self.blocks.deinit(self.gpa()); + self.func_inst_table.deinit(self.gpa); + self.gpa.free(self.args); + self.blocks.deinit(self.gpa); } fn todo(self: *FuncGen, comptime format: []const u8, args: anytype) error{ OutOfMemory, CodegenFail } { @@ -644,13 +645,9 @@ pub const FuncGen = struct { return self.dg.object.context; } - fn gpa(self: *FuncGen) *Allocator { - return self.dg.gpa; - } - - fn resolveInst(self: *FuncGen, inst: *ir.Inst) !*const llvm.Value { - if (inst.value()) |val| { - return self.dg.genTypedValue(.{ .ty = inst.ty, .val = val }, self); + fn resolveInst(self: *FuncGen, inst: Air.Inst.Ref) !*const llvm.Value { + if (self.air.value(inst)) |val| { + return self.dg.genTypedValue(.{ .ty = self.air.typeOf(inst), .val = val }, self); } if (self.func_inst_table.get(inst)) |value| return value; @@ -658,51 +655,57 @@ pub const FuncGen = struct { } fn genBody(self: *FuncGen, body: ir.Body) error{ OutOfMemory, CodegenFail }!void { + const air_tags = self.air.instructions.items(.tag); for (body.instructions) |inst| { - const opt_value = switch (inst.tag) { - .add => try self.genAdd(inst.castTag(.add).?), - .alloc => try self.genAlloc(inst.castTag(.alloc).?), - .arg => try self.genArg(inst.castTag(.arg).?), - .bitcast => try self.genBitCast(inst.castTag(.bitcast).?), - .block => try self.genBlock(inst.castTag(.block).?), - .br => try self.genBr(inst.castTag(.br).?), - .breakpoint => try self.genBreakpoint(inst.castTag(.breakpoint).?), - .br_void => try self.genBrVoid(inst.castTag(.br_void).?), - .call => try self.genCall(inst.castTag(.call).?), - .cmp_eq => try self.genCmp(inst.castTag(.cmp_eq).?, .eq), - .cmp_gt => try self.genCmp(inst.castTag(.cmp_gt).?, .gt), - .cmp_gte => try self.genCmp(inst.castTag(.cmp_gte).?, .gte), - .cmp_lt => try self.genCmp(inst.castTag(.cmp_lt).?, .lt), - .cmp_lte => try self.genCmp(inst.castTag(.cmp_lte).?, .lte), - .cmp_neq => try self.genCmp(inst.castTag(.cmp_neq).?, .neq), - .condbr => try self.genCondBr(inst.castTag(.condbr).?), - .intcast => try self.genIntCast(inst.castTag(.intcast).?), - .is_non_null => try self.genIsNonNull(inst.castTag(.is_non_null).?, false), - .is_non_null_ptr => try self.genIsNonNull(inst.castTag(.is_non_null_ptr).?, true), - .is_null => try self.genIsNull(inst.castTag(.is_null).?, false), - .is_null_ptr => try self.genIsNull(inst.castTag(.is_null_ptr).?, true), - .load => try self.genLoad(inst.castTag(.load).?), - .loop => try self.genLoop(inst.castTag(.loop).?), - .not => try self.genNot(inst.castTag(.not).?), - .ret => try self.genRet(inst.castTag(.ret).?), - .retvoid => self.genRetVoid(inst.castTag(.retvoid).?), - .store => try self.genStore(inst.castTag(.store).?), - .sub => try self.genSub(inst.castTag(.sub).?), - .unreach => self.genUnreach(inst.castTag(.unreach).?), - .optional_payload => try self.genOptionalPayload(inst.castTag(.optional_payload).?, false), - .optional_payload_ptr => try self.genOptionalPayload(inst.castTag(.optional_payload_ptr).?, true), + const opt_value = switch (air_tags[inst]) { + .add => try self.airAdd(inst), + .sub => try self.airSub(inst), + + .cmp_eq => try self.airCmp(inst, .eq), + .cmp_gt => try self.airCmp(inst, .gt), + .cmp_gte => try self.airCmp(inst, .gte), + .cmp_lt => try self.airCmp(inst, .lt), + .cmp_lte => try self.airCmp(inst, .lte), + .cmp_neq => try self.airCmp(inst, .neq), + + .is_non_null => try self.airIsNonNull(inst, false), + .is_non_null_ptr => try self.airIsNonNull(inst, true), + .is_null => try self.airIsNull(inst, false), + .is_null_ptr => try self.airIsNull(inst, true), + + .alloc => try self.airAlloc(inst), + .arg => try self.airArg(inst), + .bitcast => try self.airBitCast(inst), + .block => try self.airBlock(inst), + .br => try self.airBr(inst), + .breakpoint => try self.airBreakpoint(inst), + .call => try self.airCall(inst), + .cond_br => try self.airCondBr(inst), + .intcast => try self.airIntCast(inst), + .load => try self.airLoad(inst), + .loop => try self.airLoop(inst), + .not => try self.airNot(inst), + .ret => try self.airRet(inst), + .store => try self.airStore(inst), + .unreach => self.airUnreach(inst), + .optional_payload => try self.airOptionalPayload(inst, false), + .optional_payload_ptr => try self.airOptionalPayload(inst, true), .dbg_stmt => blk: { // TODO: implement debug info break :blk null; }, - else => |tag| return self.todo("implement TZIR instruction: {}", .{tag}), + else => |tag| return self.todo("implement AIR instruction: {}", .{tag}), }; - if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa(), inst, val); + if (opt_value) |val| try self.func_inst_table.putNoClobber(self.gpa, inst, val); } } - fn genCall(self: *FuncGen, inst: *Inst.Call) !?*const llvm.Value { - if (inst.func.value()) |func_value| { + fn airCall(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const extra = self.air.extraData(Air.Call, pl_op.payload); + const args = @bitCast([]const Air.Inst.Ref, self.air.extra[extra.end..][0..extra.data.args_len]); + + if (self.air.value(pl_op.operand)) |func_value| { const fn_decl = if (func_value.castTag(.extern_fn)) |extern_fn| extern_fn.data else if (func_value.castTag(.function)) |func_payload| @@ -714,12 +717,10 @@ pub const FuncGen = struct { const zig_fn_type = fn_decl.ty; const llvm_fn = try self.dg.resolveLLVMFunction(fn_decl); - const num_args = inst.args.len; + const llvm_param_vals = try self.gpa.alloc(*const llvm.Value, args.len); + defer self.gpa.free(llvm_param_vals); - const llvm_param_vals = try self.gpa().alloc(*const llvm.Value, num_args); - defer self.gpa().free(llvm_param_vals); - - for (inst.args) |arg, i| { + for (args) |arg, i| { llvm_param_vals[i] = try self.resolveInst(arg); } @@ -727,8 +728,8 @@ pub const FuncGen = struct { // Do we need that? const call = self.builder.buildCall( llvm_fn, - if (num_args == 0) null else llvm_param_vals.ptr, - @intCast(c_uint, num_args), + if (args.len == 0) null else llvm_param_vals.ptr, + @intCast(c_uint, args.len), "", ); @@ -746,31 +747,31 @@ pub const FuncGen = struct { } } - fn genRetVoid(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value { - _ = inst; - _ = self.builder.buildRetVoid(); - return null; - } - - fn genRet(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value { - if (!inst.operand.ty.hasCodeGenBits()) { - // TODO: in astgen these instructions should turn into `retvoid` instructions. + fn airRet(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const un_op = self.air.instructions.items(.data)[inst].un_op; + if (!self.air.typeOf(un_op).hasCodeGenBits()) { _ = self.builder.buildRetVoid(); return null; } - _ = self.builder.buildRet(try self.resolveInst(inst.operand)); + const operand = try self.resolveInst(un_op); + _ = self.builder.buildRet(operand); return null; } - fn genCmp(self: *FuncGen, inst: *Inst.BinOp, op: math.CompareOperator) !?*const llvm.Value { - const lhs = try self.resolveInst(inst.lhs); - const rhs = try self.resolveInst(inst.rhs); + fn airCmp(self: *FuncGen, inst: Air.Inst.Index, op: math.CompareOperator) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; - if (!inst.base.ty.isInt()) - if (inst.base.ty.tag() != .bool) - return self.todo("implement 'genCmp' for type {}", .{inst.base.ty}); + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const inst_ty = self.air.typeOfIndex(inst); - const is_signed = inst.base.ty.isSignedInt(); + if (!inst_ty.isInt()) + if (inst_ty.tag() != .bool) + return self.todo("implement 'airCmp' for type {}", .{inst_ty}); + + const is_signed = inst_ty.isSignedInt(); const operation = switch (op) { .eq => .EQ, .neq => .NE, @@ -783,32 +784,36 @@ pub const FuncGen = struct { return self.builder.buildICmp(operation, lhs, rhs, ""); } - fn genBlock(self: *FuncGen, inst: *Inst.Block) !?*const llvm.Value { + fn airBlock(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const extra = self.air.extraData(Air.Block, ty_pl.payload); + const body = self.air.extra[extra.end..][0..extra.data.body_len]; const parent_bb = self.context().createBasicBlock("Block"); // 5 breaks to a block seems like a reasonable default. - var break_bbs = try BreakBasicBlocks.initCapacity(self.gpa(), 5); - var break_vals = try BreakValues.initCapacity(self.gpa(), 5); - try self.blocks.putNoClobber(self.gpa(), inst, .{ + var break_bbs = try BreakBasicBlocks.initCapacity(self.gpa, 5); + var break_vals = try BreakValues.initCapacity(self.gpa, 5); + try self.blocks.putNoClobber(self.gpa, inst, .{ .parent_bb = parent_bb, .break_bbs = &break_bbs, .break_vals = &break_vals, }); defer { assert(self.blocks.remove(inst)); - break_bbs.deinit(self.gpa()); - break_vals.deinit(self.gpa()); + break_bbs.deinit(self.gpa); + break_vals.deinit(self.gpa); } - try self.genBody(inst.body); + try self.genBody(body); self.llvm_func.appendExistingBasicBlock(parent_bb); self.builder.positionBuilderAtEnd(parent_bb); // If the block does not return a value, we dont have to create a phi node. - if (!inst.base.ty.hasCodeGenBits()) return null; + const inst_ty = self.air.typeOfIndex(inst); + if (!inst_ty.hasCodeGenBits()) return null; - const phi_node = self.builder.buildPhi(try self.dg.getLLVMType(inst.base.ty), ""); + const phi_node = self.builder.buildPhi(try self.dg.getLLVMType(inst_ty), ""); phi_node.addIncoming( break_vals.items.ptr, break_bbs.items.ptr, @@ -817,35 +822,30 @@ pub const FuncGen = struct { return phi_node; } - fn genBr(self: *FuncGen, inst: *Inst.Br) !?*const llvm.Value { - var block = self.blocks.get(inst.block).?; + fn airBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const branch = self.air.instructions.items(.data)[inst].br; + const block = self.blocks.get(branch.block_inst).?; // If the break doesn't break a value, then we don't have to add // the values to the lists. - if (!inst.operand.ty.hasCodeGenBits()) { - // TODO: in astgen these instructions should turn into `br_void` instructions. - _ = self.builder.buildBr(block.parent_bb); - } else { - const val = try self.resolveInst(inst.operand); + if (self.air.typeOf(branch.result).hasCodeGenBits()) { + const val = try self.resolveInst(branch.result); // For the phi node, we need the basic blocks and the values of the // break instructions. - try block.break_bbs.append(self.gpa(), self.builder.getInsertBlock()); - try block.break_vals.append(self.gpa(), val); - - _ = self.builder.buildBr(block.parent_bb); + try block.break_bbs.append(self.gpa, self.builder.getInsertBlock()); + try block.break_vals.append(self.gpa, val); } - return null; - } - - fn genBrVoid(self: *FuncGen, inst: *Inst.BrVoid) !?*const llvm.Value { - var block = self.blocks.get(inst.block).?; _ = self.builder.buildBr(block.parent_bb); return null; } - fn genCondBr(self: *FuncGen, inst: *Inst.CondBr) !?*const llvm.Value { - const condition_value = try self.resolveInst(inst.condition); + fn airCondBr(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const pl_op = self.air.instructions.items(.data)[inst].pl_op; + const cond = try self.resolveInst(pl_op.operand); + const extra = self.air.extraData(Air.CondBr, pl_op.payload); + const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; + const else_body = self.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; const then_block = self.context().appendBasicBlock(self.llvm_func, "Then"); const else_block = self.context().appendBasicBlock(self.llvm_func, "Else"); @@ -854,38 +854,51 @@ pub const FuncGen = struct { defer self.builder.positionBuilderAtEnd(prev_block); self.builder.positionBuilderAtEnd(then_block); - try self.genBody(inst.then_body); + try self.genBody(then_body); self.builder.positionBuilderAtEnd(else_block); - try self.genBody(inst.else_body); + try self.genBody(else_body); } - _ = self.builder.buildCondBr(condition_value, then_block, else_block); + _ = self.builder.buildCondBr(cond, then_block, else_block); return null; } - fn genLoop(self: *FuncGen, inst: *Inst.Loop) !?*const llvm.Value { + fn airLoop(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const loop = self.air.extraData(Air.Block, ty_pl.payload); + const body = self.air.extra[loop.end..][0..loop.data.body_len]; const loop_block = self.context().appendBasicBlock(self.llvm_func, "Loop"); _ = self.builder.buildBr(loop_block); self.builder.positionBuilderAtEnd(loop_block); - try self.genBody(inst.body); + try self.genBody(body); _ = self.builder.buildBr(loop_block); return null; } - fn genNot(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value { - return self.builder.buildNot(try self.resolveInst(inst.operand), ""); + fn airNot(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); + + return self.builder.buildNot(operand, ""); } - fn genUnreach(self: *FuncGen, inst: *Inst.NoOp) ?*const llvm.Value { + fn airUnreach(self: *FuncGen, inst: Air.Inst.Index) ?*const llvm.Value { _ = inst; _ = self.builder.buildUnreachable(); return null; } - fn genIsNonNull(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value { - const operand = try self.resolveInst(inst.operand); + fn airIsNonNull(self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); if (operand_is_ptr) { const index_type = self.context().intType(32); @@ -901,12 +914,23 @@ pub const FuncGen = struct { } } - fn genIsNull(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value { - return self.builder.buildNot((try self.genIsNonNull(inst, operand_is_ptr)).?, ""); + fn airIsNull(self: *FuncGen, inst: Air.Inst.Index, operand_is_ptr: bool) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + return self.builder.buildNot((try self.airIsNonNull(inst, operand_is_ptr)).?, ""); } - fn genOptionalPayload(self: *FuncGen, inst: *Inst.UnOp, operand_is_ptr: bool) !?*const llvm.Value { - const operand = try self.resolveInst(inst.operand); + fn airOptionalPayload( + self: *FuncGen, + inst: Air.Inst.Index, + operand_is_ptr: bool, + ) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = try self.resolveInst(ty_op.operand); if (operand_is_ptr) { const index_type = self.context().intType(32); @@ -922,61 +946,83 @@ pub const FuncGen = struct { } } - fn genAdd(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value { - const lhs = try self.resolveInst(inst.lhs); - const rhs = try self.resolveInst(inst.rhs); + fn airAdd(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const inst_ty = self.air.typeOfIndex(inst); - if (!inst.base.ty.isInt()) - return self.todo("implement 'genAdd' for type {}", .{inst.base.ty}); + if (!inst_ty.isInt()) + return self.todo("implement 'airAdd' for type {}", .{inst_ty}); - return if (inst.base.ty.isSignedInt()) + return if (inst_ty.isSignedInt()) self.builder.buildNSWAdd(lhs, rhs, "") else self.builder.buildNUWAdd(lhs, rhs, ""); } - fn genSub(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value { - const lhs = try self.resolveInst(inst.lhs); - const rhs = try self.resolveInst(inst.rhs); + fn airSub(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const lhs = try self.resolveInst(bin_op.lhs); + const rhs = try self.resolveInst(bin_op.rhs); + const inst_ty = self.air.typeOfIndex(inst); - if (!inst.base.ty.isInt()) - return self.todo("implement 'genSub' for type {}", .{inst.base.ty}); + if (!inst_ty.isInt()) + return self.todo("implement 'airSub' for type {}", .{inst_ty}); - return if (inst.base.ty.isSignedInt()) + return if (inst_ty.isSignedInt()) self.builder.buildNSWSub(lhs, rhs, "") else self.builder.buildNUWSub(lhs, rhs, ""); } - fn genIntCast(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value { - const val = try self.resolveInst(inst.operand); + fn airIntCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; - const signed = inst.base.ty.isSignedInt(); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = try self.resolveInst(ty_op.operand); + const inst_ty = self.air.typeOfIndex(inst); + + const signed = inst_ty.isSignedInt(); // TODO: Should we use intcast here or just a simple bitcast? // LLVM does truncation vs bitcast (+signed extension) in the intcast depending on the sizes - return self.builder.buildIntCast2(val, try self.dg.getLLVMType(inst.base.ty), llvm.Bool.fromBool(signed), ""); + return self.builder.buildIntCast2(operand, try self.dg.getLLVMType(inst_ty), llvm.Bool.fromBool(signed), ""); } - fn genBitCast(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value { - const val = try self.resolveInst(inst.operand); - const dest_type = try self.dg.getLLVMType(inst.base.ty); + fn airBitCast(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; - return self.builder.buildBitCast(val, dest_type, ""); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = try self.resolveInst(ty_op.operand); + const inst_ty = self.air.typeOfIndex(inst); + const dest_type = try self.dg.getLLVMType(inst_ty); + + return self.builder.buildBitCast(operand, dest_type, ""); } - fn genArg(self: *FuncGen, inst: *Inst.Arg) !?*const llvm.Value { + fn airArg(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { const arg_val = self.args[self.arg_index]; self.arg_index += 1; - const ptr_val = self.buildAlloca(try self.dg.getLLVMType(inst.base.ty)); + const inst_ty = self.air.typeOfIndex(inst); + const ptr_val = self.buildAlloca(try self.dg.getLLVMType(inst_ty)); _ = self.builder.buildStore(arg_val, ptr_val); return self.builder.buildLoad(ptr_val, ""); } - fn genAlloc(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value { + fn airAlloc(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + if (self.liveness.isUnused(inst)) + return null; // buildAlloca expects the pointee type, not the pointer type, so assert that // a Payload.PointerSimple is passed to the alloc instruction. - const pointee_type = inst.base.ty.castPointer().?.data; + const inst_ty = self.air.typeOfIndex(inst); + const pointee_type = inst_ty.castPointer().?.data; // TODO: figure out a way to get the name of the var decl. // TODO: set alignment and volatile @@ -1007,19 +1053,26 @@ pub const FuncGen = struct { return val; } - fn genStore(self: *FuncGen, inst: *Inst.BinOp) !?*const llvm.Value { - const val = try self.resolveInst(inst.rhs); - const ptr = try self.resolveInst(inst.lhs); - _ = self.builder.buildStore(val, ptr); + fn airStore(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const bin_op = self.air.instructions.items(.data)[inst].bin_op; + const dest_ptr = try self.resolveInst(bin_op.lhs); + const src_operand = try self.resolveInst(bin_op.rhs); + // TODO set volatile on this store properly + _ = self.builder.buildStore(src_operand, dest_ptr); return null; } - fn genLoad(self: *FuncGen, inst: *Inst.UnOp) !?*const llvm.Value { - const ptr_val = try self.resolveInst(inst.operand); - return self.builder.buildLoad(ptr_val, ""); + fn airLoad(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const is_volatile = self.air.typeOf(ty_op.operand).isVolatilePtr(); + if (!is_volatile and self.liveness.isUnused(inst)) + return null; + const ptr = try self.resolveInst(ty_op.operand); + // TODO set volatile on this load properly + return self.builder.buildLoad(ptr, ""); } - fn genBreakpoint(self: *FuncGen, inst: *Inst.NoOp) !?*const llvm.Value { + fn airBreakpoint(self: *FuncGen, inst: Air.Inst.Index) !?*const llvm.Value { _ = inst; const llvn_fn = self.getIntrinsic("llvm.debugtrap"); _ = self.builder.buildCall(llvn_fn, null, 0, ""); diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 4da320b087..7429e3c3b0 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -13,6 +13,7 @@ const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; const LazySrcLoc = Module.LazySrcLoc; const Air = @import("../Air.zig"); +const Liveness = @import("../Liveness.zig"); pub const Word = u32; pub const ResultId = u32; @@ -247,6 +248,7 @@ pub const DeclGen = struct { return .{ .spv = spv, .air = undefined, + .liveness = undefined, .args = std.ArrayList(ResultId).init(spv.gpa), .next_arg_index = undefined, .inst_results = InstMap.init(spv.gpa), @@ -259,11 +261,12 @@ pub const DeclGen = struct { } /// Generate the code for `decl`. If a reportable error occured during code generation, - /// a message is returned by this function. Callee owns the memory. If this function returns such - /// a reportable error, it is valid to be called again for a different decl. - pub fn gen(self: *DeclGen, decl: *Decl, air: Air) !?*Module.ErrorMsg { + /// a message is returned by this function. Callee owns the memory. If this function + /// returns such a reportable error, it is valid to be called again for a different decl. + pub fn gen(self: *DeclGen, decl: *Decl, air: Air, liveness: Liveness) !?*Module.ErrorMsg { // Reset internal resources, we don't want to re-allocate these. - self.air = &air; + self.air = air; + self.liveness = liveness; self.args.items.len = 0; self.next_arg_index = 0; self.inst_results.clearRetainingCapacity(); @@ -297,12 +300,12 @@ pub const DeclGen = struct { return error.AnalysisFail; } - fn resolve(self: *DeclGen, inst: Air.Inst.Index) !ResultId { - if (inst.value()) |val| { - return self.genConstant(inst.ty, val); + fn resolve(self: *DeclGen, inst: Air.Inst.Ref) !ResultId { + if (self.air.value(inst)) |val| { + return self.genConstant(self.air.typeOf(inst), val); } - - return self.inst_results.get(inst).?; // Instruction does not dominate all uses! + const index = Air.refToIndex(inst).?; + return self.inst_results.get(index).?; // Assertion means instruction does not dominate usage. } fn beginSPIRVBlock(self: *DeclGen, label_id: ResultId) !void { @@ -663,40 +666,40 @@ pub const DeclGen = struct { const air_tags = self.air.instructions.items(.tag); const result_id = switch (air_tags[inst]) { // zig fmt: off - .add, .addwrap => try self.genArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}), - .sub, .subwrap => try self.genArithOp(inst, .{.OpFSub, .OpISub, .OpISub}), - .mul, .mulwrap => try self.genArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}), - .div => try self.genArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}), + .add, .addwrap => try self.airArithOp(inst, .{.OpFAdd, .OpIAdd, .OpIAdd}), + .sub, .subwrap => try self.airArithOp(inst, .{.OpFSub, .OpISub, .OpISub}), + .mul, .mulwrap => try self.airArithOp(inst, .{.OpFMul, .OpIMul, .OpIMul}), + .div => try self.airArithOp(inst, .{.OpFDiv, .OpSDiv, .OpUDiv}), - .bit_and => try self.genBinOpSimple(inst, .OpBitwiseAnd), - .bit_or => try self.genBinOpSimple(inst, .OpBitwiseOr), - .xor => try self.genBinOpSimple(inst, .OpBitwiseXor), - .bool_and => try self.genBinOpSimple(inst, .OpLogicalAnd), - .bool_or => try self.genBinOpSimple(inst, .OpLogicalOr), + .bit_and => try self.airBinOpSimple(inst, .OpBitwiseAnd), + .bit_or => try self.airBinOpSimple(inst, .OpBitwiseOr), + .xor => try self.airBinOpSimple(inst, .OpBitwiseXor), + .bool_and => try self.airBinOpSimple(inst, .OpLogicalAnd), + .bool_or => try self.airBinOpSimple(inst, .OpLogicalOr), - .not => try self.genNot(inst), + .not => try self.airNot(inst), - .cmp_eq => try self.genCmp(inst, .{.OpFOrdEqual, .OpLogicalEqual, .OpIEqual}), - .cmp_neq => try self.genCmp(inst, .{.OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual}), - .cmp_gt => try self.genCmp(inst, .{.OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan}), - .cmp_gte => try self.genCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}), - .cmp_lt => try self.genCmp(inst, .{.OpFOrdLessThan, .OpSLessThan, .OpULessThan}), - .cmp_lte => try self.genCmp(inst, .{.OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual}), + .cmp_eq => try self.airCmp(inst, .{.OpFOrdEqual, .OpLogicalEqual, .OpIEqual}), + .cmp_neq => try self.airCmp(inst, .{.OpFOrdNotEqual, .OpLogicalNotEqual, .OpINotEqual}), + .cmp_gt => try self.airCmp(inst, .{.OpFOrdGreaterThan, .OpSGreaterThan, .OpUGreaterThan}), + .cmp_gte => try self.airCmp(inst, .{.OpFOrdGreaterThanEqual, .OpSGreaterThanEqual, .OpUGreaterThanEqual}), + .cmp_lt => try self.airCmp(inst, .{.OpFOrdLessThan, .OpSLessThan, .OpULessThan}), + .cmp_lte => try self.airCmp(inst, .{.OpFOrdLessThanEqual, .OpSLessThanEqual, .OpULessThanEqual}), - .arg => self.genArg(), - .alloc => try self.genAlloc(inst), - .block => (try self.genBlock(inst)) orelse return, - .load => try self.genLoad(inst), + .arg => self.airArg(), + .alloc => try self.airAlloc(inst), + .block => (try self.airBlock(inst)) orelse return, + .load => try self.airLoad(inst), - .br => return self.genBr(inst), + .br => return self.airBr(inst), .breakpoint => return, - .cond_br => return self.genCondBr(inst), + .cond_br => return self.airCondBr(inst), .constant => unreachable, - .dbg_stmt => return self.genDbgStmt(inst), - .loop => return self.genLoop(inst), - .ret => return self.genRet(inst), - .store => return self.genStore(inst), - .unreach => return self.genUnreach(), + .dbg_stmt => return self.airDbgStmt(inst), + .loop => return self.airLoop(inst), + .ret => return self.airRet(inst), + .store => return self.airStore(inst), + .unreach => return self.airUnreach(), // zig fmt: on else => |tag| return self.fail("TODO: SPIR-V backend: implement AIR tag {s}", .{ @@ -707,21 +710,22 @@ pub const DeclGen = struct { try self.inst_results.putNoClobber(inst, result_id); } - fn genBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId { + fn airBinOpSimple(self: *DeclGen, inst: Air.Inst.Index, opcode: Opcode) !ResultId { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocResultId(); + const result_type_id = try self.genType(self.air.typeOfIndex(inst)); try writeInstruction(&self.code, opcode, &[_]Word{ result_type_id, result_id, lhs_id, rhs_id, }); return result_id; } - fn genArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { + fn airArithOp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { // LHS and RHS are guaranteed to have the same type, and AIR guarantees // the result to be the same as the LHS and RHS, which matches SPIR-V. - const ty = self.air.getType(inst); + const ty = self.air.typeOfIndex(inst); const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); @@ -729,8 +733,8 @@ pub const DeclGen = struct { const result_id = self.spv.allocResultId(); const result_type_id = try self.genType(ty); - assert(self.air.getType(bin_op.lhs).eql(ty)); - assert(self.air.getType(bin_op.rhs).eql(ty)); + assert(self.air.typeOf(bin_op.lhs).eql(ty)); + assert(self.air.typeOf(bin_op.rhs).eql(ty)); // Binary operations are generally applicable to both scalar and vector operations // in SPIR-V, but int and float versions of operations require different opcodes. @@ -744,8 +748,8 @@ pub const DeclGen = struct { return self.fail("TODO: SPIR-V backend: binary operations for strange integers", .{}); }, .integer => switch (info.signedness) { - .signed => 1, - .unsigned => 2, + .signed => @as(usize, 1), + .unsigned => @as(usize, 2), }, .float => 0, else => unreachable, @@ -759,14 +763,14 @@ pub const DeclGen = struct { return result_id; } - fn genCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { + fn airCmp(self: *DeclGen, inst: Air.Inst.Index, ops: [3]Opcode) !ResultId { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs_id = try self.resolve(bin_op.lhs); const rhs_id = try self.resolve(bin_op.rhs); const result_id = self.spv.allocResultId(); const result_type_id = try self.genType(Type.initTag(.bool)); - const op_ty = self.air.getType(bin_op.lhs); - assert(op_ty.eql(self.air.getType(bin_op.rhs))); + const op_ty = self.air.typeOf(bin_op.lhs); + assert(op_ty.eql(self.air.typeOf(bin_op.rhs))); // Comparisons are generally applicable to both scalar and vector operations in SPIR-V, // but int and float versions of operations require different opcodes. @@ -782,10 +786,9 @@ pub const DeclGen = struct { .float => 0, .bool => 1, .integer => switch (info.signedness) { - .signed => 1, - .unsigned => 2, + .signed => @as(usize, 1), + .unsigned => @as(usize, 2), }, - else => unreachable, }; const opcode = ops[opcode_index]; @@ -793,7 +796,7 @@ pub const DeclGen = struct { return result_id; } - fn genNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + fn airNot(self: *DeclGen, inst: Air.Inst.Index) !ResultId { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); const result_id = self.spv.allocResultId(); @@ -803,8 +806,8 @@ pub const DeclGen = struct { return result_id; } - fn genAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId { - const ty = self.air.getType(inst); + fn airAlloc(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + const ty = self.air.typeOfIndex(inst); const storage_class = spec.StorageClass.Function; const result_type_id = try self.genPointerType(ty, storage_class); const result_id = self.spv.allocResultId(); @@ -816,12 +819,12 @@ pub const DeclGen = struct { return result_id; } - fn genArg(self: *DeclGen) ResultId { + fn airArg(self: *DeclGen) ResultId { defer self.next_arg_index += 1; return self.args.items[self.next_arg_index]; } - fn genBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId { + fn airBlock(self: *DeclGen, inst: Air.Inst.Index) !?ResultId { // In IR, a block doesn't really define an entry point like a block, but more like a scope that breaks can jump out of and // "return" a value from. This cannot be directly modelled in SPIR-V, so in a block instruction, we're going to split up // the current block by first generating the code of the block, then a label, and then generate the rest of the current @@ -841,7 +844,7 @@ pub const DeclGen = struct { incoming_blocks.deinit(self.spv.gpa); } - const ty = self.air.getType(inst); + const ty = self.air.typeOfIndex(inst); const inst_datas = self.air.instructions.items(.data); const extra = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); const body = self.air.extra[extra.end..][0..extra.data.body_len]; @@ -872,10 +875,10 @@ pub const DeclGen = struct { return result_id; } - fn genBr(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airBr(self: *DeclGen, inst: Air.Inst.Index) !void { const br = self.air.instructions.items(.data)[inst].br; const block = self.blocks.get(br.block_inst).?; - const operand_ty = self.air.getType(br.operand); + const operand_ty = self.air.typeOf(br.operand); if (operand_ty.hasCodeGenBits()) { const operand_id = try self.resolve(br.operand); @@ -886,7 +889,7 @@ pub const DeclGen = struct { try writeInstruction(&self.code, .OpBranch, &[_]Word{block.label_id}); } - fn genCondBr(self: *DeclGen, inst: *Inst.CondBr) !void { + fn airCondBr(self: *DeclGen, inst: Air.Inst.Index) !void { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond_br = self.air.extraData(Air.CondBr, pl_op.payload); const then_body = self.air.extra[cond_br.end..][0..cond_br.data.then_body_len]; @@ -912,16 +915,16 @@ pub const DeclGen = struct { try self.genBody(else_body); } - fn genDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airDbgStmt(self: *DeclGen, inst: Air.Inst.Index) !void { const dbg_stmt = self.air.instructions.items(.data)[inst].dbg_stmt; const src_fname_id = try self.spv.resolveSourceFileName(self.decl); try writeInstruction(&self.code, .OpLine, &[_]Word{ src_fname_id, dbg_stmt.line, dbg_stmt.column }); } - fn genLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId { + fn airLoad(self: *DeclGen, inst: Air.Inst.Index) !ResultId { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand_id = try self.resolve(ty_op.operand); - const ty = self.air.getType(inst); + const ty = self.air.typeOfIndex(inst); const result_type_id = try self.genType(ty); const result_id = self.spv.allocResultId(); @@ -936,8 +939,9 @@ pub const DeclGen = struct { return result_id; } - fn genLoop(self: *DeclGen, inst: Air.Inst.Index) !void { - const loop = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + fn airLoop(self: *DeclGen, inst: Air.Inst.Index) !void { + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; const loop_label_id = self.spv.allocResultId(); @@ -952,9 +956,9 @@ pub const DeclGen = struct { try writeInstruction(&self.code, .OpBranch, &[_]Word{loop_label_id}); } - fn genRet(self: *DeclGen, inst: Air.Inst.Index) !void { - const operand = inst_datas[inst].un_op; - const operand_ty = self.air.getType(operand); + fn airRet(self: *DeclGen, inst: Air.Inst.Index) !void { + const operand = self.air.instructions.items(.data)[inst].un_op; + const operand_ty = self.air.typeOf(operand); if (operand_ty.hasCodeGenBits()) { const operand_id = try self.resolve(operand); try writeInstruction(&self.code, .OpReturnValue, &[_]Word{operand_id}); @@ -963,11 +967,11 @@ pub const DeclGen = struct { } } - fn genStore(self: *DeclGen, inst: Air.Inst.Index) !void { + fn airStore(self: *DeclGen, inst: Air.Inst.Index) !void { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const dst_ptr_id = try self.resolve(bin_op.lhs); const src_val_id = try self.resolve(bin_op.rhs); - const lhs_ty = self.air.getType(bin_op.lhs); + const lhs_ty = self.air.typeOf(bin_op.lhs); const operands = if (lhs_ty.isVolatilePtr()) &[_]Word{ dst_ptr_id, src_val_id, @bitCast(u32, spec.MemoryAccess{ .Volatile = true }) } @@ -977,7 +981,7 @@ pub const DeclGen = struct { try writeInstruction(&self.code, .OpStore, operands); } - fn genUnreach(self: *DeclGen) !void { + fn airUnreach(self: *DeclGen) !void { try writeInstruction(&self.code, .OpUnreachable, &[_]Word{}); } }; diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index 5cf3fb15fd..dbca818297 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -774,7 +774,7 @@ pub const Context = struct { } } return Result{ .externally_managed = payload.data }; - } else return self.fail(.{ .node_offset = 0 }, "TODO implement gen for more kinds of arrays", .{}); + } else return self.fail("TODO implement gen for more kinds of arrays", .{}); }, .Int => { const info = typed_value.ty.intInfo(self.target); @@ -783,9 +783,9 @@ pub const Context = struct { try self.code.append(@intCast(u8, int_byte)); return Result.appended; } - return self.fail(.{ .node_offset = 0 }, "TODO: Implement codegen for int type: '{}'", .{typed_value.ty}); + return self.fail("TODO: Implement codegen for int type: '{}'", .{typed_value.ty}); }, - else => |tag| return self.fail(.{ .node_offset = 0 }, "TODO: Implement zig type codegen for type: '{s}'", .{tag}), + else => |tag| return self.fail("TODO: Implement zig type codegen for type: '{s}'", .{tag}), } } @@ -883,7 +883,7 @@ pub const Context = struct { } fn genAlloc(self: *Context, inst: Air.Inst.Index) InnerError!WValue { - const elem_type = self.air.getType(inst).elemType(); + const elem_type = self.air.typeOfIndex(inst).elemType(); return self.allocLocal(elem_type); } diff --git a/src/link/Coff.zig b/src/link/Coff.zig index 44442b73a3..50ad6bc1a0 100644 --- a/src/link/Coff.zig +++ b/src/link/Coff.zig @@ -657,11 +657,16 @@ fn writeOffsetTableEntry(self: *Coff, index: usize) !void { } pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, liveness: Liveness) !void { - if (build_options.skip_non_native and builtin.object_format != .coff and builtin.object_format != .pe) { + if (build_options.skip_non_native and + builtin.object_format != .coff and + builtin.object_format != .pe) + { @panic("Attempted to compile for object format that was disabled by build configuration"); } if (build_options.have_llvm) { - if (self.llvm_object) |llvm_object| return llvm_object.updateFunc(module, func, air, liveness); + if (self.llvm_object) |llvm_object| { + return llvm_object.updateFunc(module, func, air, liveness); + } } const tracy = trace(@src()); defer tracy.end(); @@ -669,6 +674,7 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); + const decl = func.owner_decl; const res = try codegen.generateFunction( &self.base, decl.srcLoc(), @@ -679,7 +685,6 @@ pub fn updateFunc(self: *Coff, module: *Module, func: *Module.Fn, air: Air, live .none, ); const code = switch (res) { - .externally_managed => |x| x, .appended => code_buffer.items, .fail => |em| { decl.analysis = .codegen_failure; @@ -725,10 +730,10 @@ pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void { }, }; - return self.finishUpdateDecl(module, func.owner_decl, code); + return self.finishUpdateDecl(module, decl, code); } -fn finishUpdateDecl(self: *Coff, decl: *Module.Decl, code: []const u8) !void { +fn finishUpdateDecl(self: *Coff, module: *Module, decl: *Module.Decl, code: []const u8) !void { const required_alignment = decl.ty.abiAlignment(self.base.options.target); const curr_size = decl.link.coff.size; if (curr_size != 0) { diff --git a/src/link/MachO.zig b/src/link/MachO.zig index cd020c1b27..4107607924 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -1150,9 +1150,13 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - var debug_buffers = if (self.d_sym) |*ds| try ds.initDeclDebugBuffers(self.base.allocator, module, decl) else null; + var debug_buffers_buf: DebugSymbols.DeclDebugBuffers = undefined; + const debug_buffers = if (self.d_sym) |*ds| blk: { + debug_buffers_buf = try ds.initDeclDebugBuffers(self.base.allocator, module, decl); + break :blk &debug_buffers_buf; + } else null; defer { - if (debug_buffers) |*dbg| { + if (debug_buffers) |dbg| { dbg.dbg_line_buffer.deinit(); dbg.dbg_info_buffer.deinit(); var it = dbg.dbg_info_type_relocs.valueIterator(); @@ -1163,7 +1167,7 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv } } - const res = if (debug_buffers) |*dbg| + const res = if (debug_buffers) |dbg| try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .{ .dwarf = .{ .dbg_line = &dbg.dbg_line_buffer, @@ -1172,9 +1176,109 @@ pub fn updateFunc(self: *MachO, module: *Module, func: *Module.Fn, air: Air, liv }, }) else - try codegen.generateSymbol(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + try codegen.generateFunction(&self.base, decl.srcLoc(), func, air, liveness, &code_buffer, .none); + switch (res) { + .appended => {}, + .fail => |em| { + // Clear any PIE fixups for this decl. + self.pie_fixups.shrinkRetainingCapacity(0); + // Clear any stub fixups for this decl. + self.stub_fixups.shrinkRetainingCapacity(0); + decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, decl, em); + return; + }, + } + const symbol = try self.placeDecl(decl, code_buffer.items.len); - return self.finishUpdateDecl(module, decl, res); + // Calculate displacements to target addr (if any). + while (self.pie_fixups.popOrNull()) |fixup| { + assert(fixup.size == 4); + const this_addr = symbol.n_value + fixup.offset; + const target_addr = fixup.target_addr; + + switch (self.base.options.target.cpu.arch) { + .x86_64 => { + const displacement = try math.cast(u32, target_addr - this_addr - 4); + mem.writeIntLittle(u32, code_buffer.items[fixup.offset..][0..4], displacement); + }, + .aarch64 => { + // TODO optimize instruction based on jump length (use ldr(literal) + nop if possible). + { + const inst = code_buffer.items[fixup.offset..][0..4]; + const parsed = mem.bytesAsValue(meta.TagPayload( + aarch64.Instruction, + aarch64.Instruction.pc_relative_address, + ), inst); + const this_page = @intCast(i32, this_addr >> 12); + const target_page = @intCast(i32, target_addr >> 12); + const pages = @bitCast(u21, @intCast(i21, target_page - this_page)); + parsed.immhi = @truncate(u19, pages >> 2); + parsed.immlo = @truncate(u2, pages); + } + { + const inst = code_buffer.items[fixup.offset + 4 ..][0..4]; + const parsed = mem.bytesAsValue(meta.TagPayload( + aarch64.Instruction, + aarch64.Instruction.load_store_register, + ), inst); + const narrowed = @truncate(u12, target_addr); + const offset = try math.divExact(u12, narrowed, 8); + parsed.offset = offset; + } + }, + else => unreachable, // unsupported target architecture + } + } + + // Resolve stubs (if any) + const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; + const stubs = text_segment.sections.items[self.stubs_section_index.?]; + for (self.stub_fixups.items) |fixup| { + const stub_addr = stubs.addr + fixup.symbol * stubs.reserved2; + const text_addr = symbol.n_value + fixup.start; + switch (self.base.options.target.cpu.arch) { + .x86_64 => { + assert(stub_addr >= text_addr + fixup.len); + const displacement = try math.cast(u32, stub_addr - text_addr - fixup.len); + const placeholder = code_buffer.items[fixup.start + fixup.len - @sizeOf(u32) ..][0..@sizeOf(u32)]; + mem.writeIntSliceLittle(u32, placeholder, displacement); + }, + .aarch64 => { + assert(stub_addr >= text_addr); + const displacement = try math.cast(i28, stub_addr - text_addr); + const placeholder = code_buffer.items[fixup.start..][0..fixup.len]; + mem.writeIntSliceLittle(u32, placeholder, aarch64.Instruction.bl(displacement).toU32()); + }, + else => unreachable, // unsupported target architecture + } + if (!fixup.already_defined) { + try self.writeStub(fixup.symbol); + try self.writeStubInStubHelper(fixup.symbol); + try self.writeLazySymbolPointer(fixup.symbol); + + self.rebase_info_dirty = true; + self.lazy_binding_info_dirty = true; + } + } + self.stub_fixups.shrinkRetainingCapacity(0); + + try self.writeCode(symbol, code_buffer.items); + + if (debug_buffers) |db| { + try self.d_sym.?.commitDeclDebugInfo( + self.base.allocator, + module, + decl, + db, + self.base.options.target, + ); + } + + // Since we updated the vaddr and the size, each corresponding export symbol also + // needs to be updated. + const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; + try self.updateDeclExports(module, decl, decl_exports); } pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { @@ -1194,9 +1298,13 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { var code_buffer = std.ArrayList(u8).init(self.base.allocator); defer code_buffer.deinit(); - var debug_buffers = if (self.d_sym) |*ds| try ds.initDeclDebugBuffers(self.base.allocator, module, decl) else null; + var debug_buffers_buf: DebugSymbols.DeclDebugBuffers = undefined; + const debug_buffers = if (self.d_sym) |*ds| blk: { + debug_buffers_buf = try ds.initDeclDebugBuffers(self.base.allocator, module, decl); + break :blk &debug_buffers_buf; + } else null; defer { - if (debug_buffers) |*dbg| { + if (debug_buffers) |dbg| { dbg.dbg_line_buffer.deinit(); dbg.dbg_info_buffer.deinit(); var it = dbg.dbg_info_type_relocs.valueIterator(); @@ -1207,7 +1315,7 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { } } - const res = if (debug_buffers) |*dbg| + const res = if (debug_buffers) |dbg| try codegen.generateSymbol(&self.base, decl.srcLoc(), .{ .ty = decl.ty, .val = decl.val, @@ -1224,33 +1332,37 @@ pub fn updateDecl(self: *MachO, module: *Module, decl: *Module.Decl) !void { .val = decl.val, }, &code_buffer, .none); - return self.finishUpdateDecl(module, decl, res); -} - -fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: codegen.Result) !void { const code = switch (res) { .externally_managed => |x| x, .appended => code_buffer.items, .fail => |em| { - // Clear any PIE fixups for this decl. - self.pie_fixups.shrinkRetainingCapacity(0); - // Clear any stub fixups for this decl. - self.stub_fixups.shrinkRetainingCapacity(0); decl.analysis = .codegen_failure; try module.failed_decls.put(module.gpa, decl, em); return; }, }; + const symbol = try self.placeDecl(decl, code.len); + assert(self.pie_fixups.items.len == 0); + assert(self.stub_fixups.items.len == 0); + try self.writeCode(symbol, code); + + // Since we updated the vaddr and the size, each corresponding export symbol also + // needs to be updated. + const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; + try self.updateDeclExports(module, decl, decl_exports); +} + +fn placeDecl(self: *MachO, decl: *Module.Decl, code_len: usize) !*macho.nlist_64 { const required_alignment = decl.ty.abiAlignment(self.base.options.target); assert(decl.link.macho.local_sym_index != 0); // Caller forgot to call allocateDeclIndexes() const symbol = &self.locals.items[decl.link.macho.local_sym_index]; if (decl.link.macho.size != 0) { const capacity = decl.link.macho.capacity(self.*); - const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, symbol.n_value, required_alignment); + const need_realloc = code_len > capacity or !mem.isAlignedGeneric(u64, symbol.n_value, required_alignment); if (need_realloc) { - const vaddr = try self.growTextBlock(&decl.link.macho, code.len, required_alignment); + const vaddr = try self.growTextBlock(&decl.link.macho, code_len, required_alignment); log.debug("growing {s} and moving from 0x{x} to 0x{x}", .{ decl.name, symbol.n_value, vaddr }); @@ -1265,10 +1377,10 @@ fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: code } symbol.n_value = vaddr; - } else if (code.len < decl.link.macho.size) { - self.shrinkTextBlock(&decl.link.macho, code.len); + } else if (code_len < decl.link.macho.size) { + self.shrinkTextBlock(&decl.link.macho, code_len); } - decl.link.macho.size = code.len; + decl.link.macho.size = code_len; const new_name = try std.fmt.allocPrint(self.base.allocator, "_{s}", .{mem.spanZ(decl.name)}); defer self.base.allocator.free(new_name); @@ -1286,7 +1398,7 @@ fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: code defer self.base.allocator.free(decl_name); const name_str_index = try self.makeString(decl_name); - const addr = try self.allocateTextBlock(&decl.link.macho, code.len, required_alignment); + const addr = try self.allocateTextBlock(&decl.link.macho, code_len, required_alignment); log.debug("allocated text block for {s} at 0x{x}", .{ decl_name, addr }); @@ -1311,96 +1423,15 @@ fn finishUpdateDecl(self: *MachO, module: *Module, decl: *Module.Decl, res: code try self.writeOffsetTableEntry(decl.link.macho.offset_table_index); } - // Calculate displacements to target addr (if any). - while (self.pie_fixups.popOrNull()) |fixup| { - assert(fixup.size == 4); - const this_addr = symbol.n_value + fixup.offset; - const target_addr = fixup.target_addr; - - switch (self.base.options.target.cpu.arch) { - .x86_64 => { - const displacement = try math.cast(u32, target_addr - this_addr - 4); - mem.writeIntLittle(u32, code_buffer.items[fixup.offset..][0..4], displacement); - }, - .aarch64 => { - // TODO optimize instruction based on jump length (use ldr(literal) + nop if possible). - { - const inst = code_buffer.items[fixup.offset..][0..4]; - var parsed = mem.bytesAsValue(meta.TagPayload( - aarch64.Instruction, - aarch64.Instruction.pc_relative_address, - ), inst); - const this_page = @intCast(i32, this_addr >> 12); - const target_page = @intCast(i32, target_addr >> 12); - const pages = @bitCast(u21, @intCast(i21, target_page - this_page)); - parsed.immhi = @truncate(u19, pages >> 2); - parsed.immlo = @truncate(u2, pages); - } - { - const inst = code_buffer.items[fixup.offset + 4 ..][0..4]; - var parsed = mem.bytesAsValue(meta.TagPayload( - aarch64.Instruction, - aarch64.Instruction.load_store_register, - ), inst); - const narrowed = @truncate(u12, target_addr); - const offset = try math.divExact(u12, narrowed, 8); - parsed.offset = offset; - } - }, - else => unreachable, // unsupported target architecture - } - } - - // Resolve stubs (if any) - const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment; - const stubs = text_segment.sections.items[self.stubs_section_index.?]; - for (self.stub_fixups.items) |fixup| { - const stub_addr = stubs.addr + fixup.symbol * stubs.reserved2; - const text_addr = symbol.n_value + fixup.start; - switch (self.base.options.target.cpu.arch) { - .x86_64 => { - assert(stub_addr >= text_addr + fixup.len); - const displacement = try math.cast(u32, stub_addr - text_addr - fixup.len); - var placeholder = code_buffer.items[fixup.start + fixup.len - @sizeOf(u32) ..][0..@sizeOf(u32)]; - mem.writeIntSliceLittle(u32, placeholder, displacement); - }, - .aarch64 => { - assert(stub_addr >= text_addr); - const displacement = try math.cast(i28, stub_addr - text_addr); - var placeholder = code_buffer.items[fixup.start..][0..fixup.len]; - mem.writeIntSliceLittle(u32, placeholder, aarch64.Instruction.bl(displacement).toU32()); - }, - else => unreachable, // unsupported target architecture - } - if (!fixup.already_defined) { - try self.writeStub(fixup.symbol); - try self.writeStubInStubHelper(fixup.symbol); - try self.writeLazySymbolPointer(fixup.symbol); - - self.rebase_info_dirty = true; - self.lazy_binding_info_dirty = true; - } - } - self.stub_fixups.shrinkRetainingCapacity(0); + return symbol; +} +fn writeCode(self: *MachO, symbol: *macho.nlist_64, code: []const u8) !void { + const text_segment = &self.load_commands.items[self.text_segment_cmd_index.?].Segment; const text_section = text_segment.sections.items[self.text_section_index.?]; const section_offset = symbol.n_value - text_section.addr; const file_offset = text_section.offset + section_offset; try self.base.file.?.pwriteAll(code, file_offset); - - if (debug_buffers) |*db| { - try self.d_sym.?.commitDeclDebugInfo( - self.base.allocator, - module, - decl, - db, - self.base.options.target, - ); - } - - // Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated. - const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{}; - try self.updateDeclExports(module, decl, decl_exports); } pub fn updateDeclLineNumber(self: *MachO, module: *Module, decl: *const Module.Decl) !void { diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index bc9e560582..17b656a06c 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -51,7 +51,12 @@ base: link.File, /// This linker backend does not try to incrementally link output SPIR-V code. /// Instead, it tracks all declarations in this table, and iterates over it /// in the flush function. -decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, void) = .{}, +decl_table: std.AutoArrayHashMapUnmanaged(*Module.Decl, DeclGenContext) = .{}, + +const DeclGenContext = struct { + air: Air, + liveness: Liveness, +}; pub fn createEmpty(gpa: *Allocator, options: link.Options) !*SpirV { const spirv = try gpa.create(SpirV); @@ -181,10 +186,15 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void { var decl_gen = codegen.DeclGen.init(&spv); defer decl_gen.deinit(); - for (self.decl_table.keys()) |decl| { + var it = self.decl_table.iterator(); + while (it.next()) |entry| { + const decl = entry.key_ptr.*; if (!decl.has_tv) continue; - if (try decl_gen.gen(decl)) |msg| { + const air = entry.value_ptr.air; + const liveness = entry.value_ptr.liveness; + + if (try decl_gen.gen(decl, air, liveness)) |msg| { try module.failed_decls.put(module.gpa, decl, msg); return; // TODO: Attempt to generate more decls? } diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index 81e50c46b6..d9139a178c 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -250,6 +250,8 @@ pub fn updateDecl(self: *Wasm, module: *Module, decl: *Module.Decl) !void { var context = codegen.Context{ .gpa = self.base.allocator, + .air = undefined, + .liveness = undefined, .values = .{}, .code = fn_data.code.toManaged(self.base.allocator), .func_type_data = fn_data.functype.toManaged(self.base.allocator), From 414b144257e440be00928290220adcdfcdfb1e77 Mon Sep 17 00:00:00 2001 From: Jacob G-W Date: Sat, 17 Jul 2021 21:16:41 -0400 Subject: [PATCH 31/53] cbe: fix not (it is a ty_op, not un_op) --- src/codegen/c.zig | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 7137116037..f938b28ec2 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -892,7 +892,8 @@ fn genBody(o: *Object, body: []const Air.Inst.Index) error{ AnalysisFail, OutOfM .bit_and => try airBinOp(o, inst, " & "), .bit_or => try airBinOp(o, inst, " | "), .xor => try airBinOp(o, inst, " ^ "), - .not => try airUnOp( o, inst, "!"), + + .not => try airNot( o, inst), .optional_payload => try airOptionalPayload(o, inst), .optional_payload_ptr => try airOptionalPayload(o, inst), @@ -1181,6 +1182,28 @@ fn airWrapOp( return ret; } +fn airNot(o: *Object, inst: Air.Inst.Index) !CValue { + if (o.liveness.isUnused(inst)) + return CValue.none; + + const ty_op = o.air.instructions.items(.data)[inst].ty_op; + const op = try o.resolveInst(ty_op.operand); + + const writer = o.writer(); + const inst_ty = o.air.typeOfIndex(inst); + const local = try o.allocLocal(inst_ty, .Const); + + try writer.writeAll(" = "); + if (inst_ty.zigTypeTag() == .Bool) + try writer.writeAll("!") + else + try writer.writeAll("~"); + try o.writeCValue(writer, op); + try writer.writeAll(";\n"); + + return local; +} + fn airBinOp(o: *Object, inst: Air.Inst.Index, operator: [*:0]const u8) !CValue { if (o.liveness.isUnused(inst)) return CValue.none; @@ -1202,24 +1225,6 @@ fn airBinOp(o: *Object, inst: Air.Inst.Index, operator: [*:0]const u8) !CValue { return local; } -fn airUnOp(o: *Object, inst: Air.Inst.Index, operator: []const u8) !CValue { - if (o.liveness.isUnused(inst)) - return CValue.none; - - const un_op = o.air.instructions.items(.data)[inst].un_op; - const operand = try o.resolveInst(un_op); - - const writer = o.writer(); - const inst_ty = o.air.typeOfIndex(inst); - const local = try o.allocLocal(inst_ty, .Const); - - try writer.print(" = {s}", .{operator}); - try o.writeCValue(writer, operand); - try writer.writeAll(";\n"); - - return local; -} - fn airCall(o: *Object, inst: Air.Inst.Index) !CValue { const pl_op = o.air.instructions.items(.data)[inst].pl_op; const extra = o.air.extraData(Air.Call, pl_op.payload); From 934ebbe900dcc770d7761a87ae35beaf1c5a6797 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 18 Jul 2021 22:29:50 -0700 Subject: [PATCH 32/53] stage2: fix AIR not instruction (see prev commit) --- src/codegen/llvm.zig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index d9090c9f2c..511c3fabf2 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -881,8 +881,8 @@ pub const FuncGen = struct { if (self.liveness.isUnused(inst)) return null; - const un_op = self.air.instructions.items(.data)[inst].un_op; - const operand = try self.resolveInst(un_op); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + const operand = try self.resolveInst(ty_op.operand); return self.builder.buildNot(operand, ""); } From 44fe9c52e15deb1a1ea82760c5f4204d50fb0e9f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 18 Jul 2021 22:32:07 -0700 Subject: [PATCH 33/53] stage2: wasm backend: update to latest naming convention --- src/codegen/wasm.zig | 114 ++++++++++++++++++++++--------------------- 1 file changed, 58 insertions(+), 56 deletions(-) diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index dbca818297..e7e498ef40 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -792,43 +792,45 @@ pub const Context = struct { fn genInst(self: *Context, inst: Air.Inst.Index) !WValue { const air_tags = self.air.instructions.items(.tag); return switch (air_tags[inst]) { - .add => self.genBinOp(inst, .add), - .alloc => self.genAlloc(inst), - .arg => self.genArg(inst), - .bit_and => self.genBinOp(inst, .@"and"), - .bitcast => self.genBitcast(inst), - .bit_or => self.genBinOp(inst, .@"or"), - .block => self.genBlock(inst), - .bool_and => self.genBinOp(inst, .@"and"), - .bool_or => self.genBinOp(inst, .@"or"), - .breakpoint => self.genBreakpoint(inst), - .br => self.genBr(inst), - .call => self.genCall(inst), - .cmp_eq => self.genCmp(inst, .eq), - .cmp_gte => self.genCmp(inst, .gte), - .cmp_gt => self.genCmp(inst, .gt), - .cmp_lte => self.genCmp(inst, .lte), - .cmp_lt => self.genCmp(inst, .lt), - .cmp_neq => self.genCmp(inst, .neq), - .cond_br => self.genCondBr(inst), + .add => self.airBinOp(inst, .add), + .sub => self.airBinOp(inst, .sub), + .mul => self.airBinOp(inst, .mul), + .div => self.airBinOp(inst, .div), + .bit_and => self.airBinOp(inst, .@"and"), + .bit_or => self.airBinOp(inst, .@"or"), + .bool_and => self.airBinOp(inst, .@"and"), + .bool_or => self.airBinOp(inst, .@"or"), + .xor => self.airBinOp(inst, .xor), + + .cmp_eq => self.airCmp(inst, .eq), + .cmp_gte => self.airCmp(inst, .gte), + .cmp_gt => self.airCmp(inst, .gt), + .cmp_lte => self.airCmp(inst, .lte), + .cmp_lt => self.airCmp(inst, .lt), + .cmp_neq => self.airCmp(inst, .neq), + + .alloc => self.airAlloc(inst), + .arg => self.airArg(inst), + .bitcast => self.airBitcast(inst), + .block => self.airBlock(inst), + .breakpoint => self.airBreakpoint(inst), + .br => self.airBr(inst), + .call => self.airCall(inst), + .cond_br => self.airCondBr(inst), .constant => unreachable, .dbg_stmt => WValue.none, - .div => self.genBinOp(inst, .div), - .is_err => self.genIsErr(inst, .i32_ne), - .is_non_err => self.genIsErr(inst, .i32_eq), - .load => self.genLoad(inst), - .loop => self.genLoop(inst), - .mul => self.genBinOp(inst, .mul), - .not => self.genNot(inst), - .ret => self.genRet(inst), - .store => self.genStore(inst), - .struct_field_ptr => self.genStructFieldPtr(inst), - .sub => self.genBinOp(inst, .sub), - .switch_br => self.genSwitchBr(inst), - .unreach => self.genUnreachable(inst), - .unwrap_errunion_payload => self.genUnwrapErrUnionPayload(inst), - .wrap_errunion_payload => self.genWrapErrUnionPayload(inst), - .xor => self.genBinOp(inst, .xor), + .is_err => self.airIsErr(inst, .i32_ne), + .is_non_err => self.airIsErr(inst, .i32_eq), + .load => self.airLoad(inst), + .loop => self.airLoop(inst), + .not => self.airNot(inst), + .ret => self.airRet(inst), + .store => self.airStore(inst), + .struct_field_ptr => self.airStructFieldPtr(inst), + .switch_br => self.airSwitchBr(inst), + .unreach => self.airUnreachable(inst), + .unwrap_errunion_payload => self.airUnwrapErrUnionPayload(inst), + .wrap_errunion_payload => self.airWrapErrUnionPayload(inst), else => |tag| self.fail("TODO: Implement wasm inst: {s}", .{@tagName(tag)}), }; } @@ -840,7 +842,7 @@ pub const Context = struct { } } - fn genRet(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airRet(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = self.resolveInst(un_op); try self.emitWValue(operand); @@ -848,7 +850,7 @@ pub const Context = struct { return .none; } - fn genCall(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airCall(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.Call, pl_op.payload); const args = self.air.extra[extra.end..][0..extra.data.args_len]; @@ -882,12 +884,12 @@ pub const Context = struct { return .none; } - fn genAlloc(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airAlloc(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const elem_type = self.air.typeOfIndex(inst).elemType(); return self.allocLocal(elem_type); } - fn genStore(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airStore(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const writer = self.code.writer(); @@ -926,19 +928,19 @@ pub const Context = struct { return .none; } - fn genLoad(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airLoad(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; return self.resolveInst(ty_op.operand); } - fn genArg(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airArg(self: *Context, inst: Air.Inst.Index) InnerError!WValue { _ = inst; // arguments share the index with locals defer self.local_index += 1; return WValue{ .local = self.local_index }; } - fn genBinOp(self: *Context, inst: Air.Inst.Index, op: Op) InnerError!WValue { + fn airBinOp(self: *Context, inst: Air.Inst.Index, op: Op) InnerError!WValue { const bin_op = self.air.instructions.items(.data)[inst].bin_op; const lhs = self.resolveInst(bin_op.lhs); const rhs = self.resolveInst(bin_op.rhs); @@ -1074,7 +1076,7 @@ pub const Context = struct { } } - fn genBlock(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airBlock(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const block_ty = try self.genBlockType(self.air.getRefType(ty_pl.ty)); const extra = self.air.extraData(Air.Block, ty_pl.payload); @@ -1108,7 +1110,7 @@ pub const Context = struct { self.block_depth -= 1; } - fn genLoop(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airLoop(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; @@ -1127,7 +1129,7 @@ pub const Context = struct { return .none; } - fn genCondBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airCondBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const condition = self.resolveInst(pl_op.operand); const extra = self.air.extraData(Air.CondBr, pl_op.payload); @@ -1166,7 +1168,7 @@ pub const Context = struct { return .none; } - fn genCmp(self: *Context, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!WValue { + fn airCmp(self: *Context, inst: Air.Inst.Index, op: std.math.CompareOperator) InnerError!WValue { // save offset, so potential conditions can insert blocks in front of // the comparison that we can later jump back to const offset = self.code.items.len; @@ -1202,7 +1204,7 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const br = self.air.instructions.items(.data)[inst].br; // if operand has codegen bits we should break with a value @@ -1220,7 +1222,7 @@ pub const Context = struct { return .none; } - fn genNot(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airNot(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const offset = self.code.items.len; @@ -1238,7 +1240,7 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genBreakpoint(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airBreakpoint(self: *Context, inst: Air.Inst.Index) InnerError!WValue { _ = self; _ = inst; // unsupported by wasm itself. Can be implemented once we support DWARF @@ -1246,18 +1248,18 @@ pub const Context = struct { return .none; } - fn genUnreachable(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airUnreachable(self: *Context, inst: Air.Inst.Index) InnerError!WValue { _ = inst; try self.code.append(wasm.opcode(.@"unreachable")); return .none; } - fn genBitcast(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airBitcast(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; return self.resolveInst(ty_op.operand); } - fn genStructFieldPtr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airStructFieldPtr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; const extra = self.air.extraData(Air.StructField, ty_pl.payload); const struct_ptr = self.resolveInst(extra.data.struct_ptr); @@ -1265,7 +1267,7 @@ pub const Context = struct { return WValue{ .local = struct_ptr.multi_value.index + @intCast(u32, extra.data.field_index) }; } - fn genSwitchBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airSwitchBr(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const pl_op = self.air.instructions.items(.data)[inst].pl_op; const extra = self.air.extraData(Air.SwitchBr, pl_op.payload); const cases = self.air.extra[extra.end..][0..extra.data.cases_len]; @@ -1319,7 +1321,7 @@ pub const Context = struct { return .none; } - fn genIsErr(self: *Context, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue { + fn airIsErr(self: *Context, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = self.resolveInst(un_op); const offset = self.code.items.len; @@ -1336,7 +1338,7 @@ pub const Context = struct { return WValue{ .code_offset = offset }; } - fn genUnwrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airUnwrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; const operand = self.resolveInst(ty_op.operand); // The index of multi_value contains the error code. To get the initial index of the payload we get @@ -1346,7 +1348,7 @@ pub const Context = struct { return WValue{ .local = operand.multi_value.index + 1 }; } - fn genWrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { + fn airWrapErrUnionPayload(self: *Context, inst: Air.Inst.Index) InnerError!WValue { const ty_op = self.air.instructions.items(.data)[inst].ty_op; return self.resolveInst(ty_op.operand); } From 480242b78a5018bbe28824eb3549bfa86126a6e8 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Sun, 18 Jul 2021 16:53:21 +0200 Subject: [PATCH 34/53] Debug info - Implement more instructions: - bin_op - un_op - block - struct_field_ptr - br - condbr Also updates constant to write the actual Type, rather than the enum tag of the `Ref`. --- src/print_air.zig | 74 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 51 insertions(+), 23 deletions(-) diff --git a/src/print_air.zig b/src/print_air.zig index 76159d0796..6fa04fbfab 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -182,21 +182,22 @@ const Writer = struct { } fn writeBinOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - _ = w; - _ = inst; - try s.writeAll("TODO"); + const bin_op = w.air.instructions.items(.data)[inst].bin_op; + try w.writeInstRef(s, bin_op.lhs); + try s.writeAll(", "); + try w.writeInstRef(s, bin_op.rhs); } fn writeUnOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - _ = w; - _ = inst; - try s.writeAll("TODO"); + const un_op = w.air.instructions.items(.data)[inst].un_op; + try w.writeInstRef(s, un_op); } fn writeNoOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { _ = w; _ = inst; - try s.writeAll("TODO"); + _ = s; + // no-op, no argument to write } fn writeTy(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { @@ -205,21 +206,31 @@ const Writer = struct { } fn writeTyOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - _ = w; - _ = inst; - try s.writeAll("TODO"); + const ty_op = w.air.instructions.items(.data)[inst].ty_op; + try s.print("{}, ", .{w.air.getRefType(ty_op.ty)}); + try w.writeInstRef(s, ty_op.operand); } fn writeBlock(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - _ = w; - _ = inst; - try s.writeAll("TODO"); + const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; + const extra = w.air.extraData(Air.Block, ty_pl.payload); + const body = w.air.extra[extra.end..][0..extra.data.body_len]; + + try s.writeAll("{\n"); + const old_indent = w.indent; + w.indent += 2; + try w.writeBody(s, body); + w.indent = old_indent; + try s.writeByteNTimes(' ', w.indent); + try s.writeAll("}"); } fn writeStructFieldPtr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - _ = w; - _ = inst; - try s.writeAll("TODO"); + const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; + const extra = w.air.extraData(Air.StructField, ty_pl.payload); + + try w.writeInstRef(s, extra.data.struct_ptr); + try s.print(", {d}", .{extra.data.field_index}); } fn writeVarPtr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { @@ -231,7 +242,7 @@ const Writer = struct { fn writeConstant(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const val = w.air.values[ty_pl.payload]; - try s.print("{}, {}", .{ ty_pl.ty, val }); + try s.print("{}, {}", .{ w.air.getRefType(ty_pl.ty), val }); } fn writeAssembly(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { @@ -259,15 +270,32 @@ const Writer = struct { } fn writeBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - _ = w; - _ = inst; - try s.writeAll("TODO"); + const br = w.air.instructions.items(.data)[inst].br; + try w.writeInstIndex(s, br.block_inst); + try s.writeAll(", "); + try w.writeInstRef(s, br.operand); } fn writeCondBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - _ = w; - _ = inst; - try s.writeAll("TODO"); + const pl_op = w.air.instructions.items(.data)[inst].pl_op; + const extra = w.air.extraData(Air.CondBr, pl_op.payload); + const then_body = w.air.extra[extra.end..][0..extra.data.then_body_len]; + const else_body = w.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + + try w.writeInstRef(s, pl_op.operand); + try s.writeAll(", {\n"); + const old_indent = w.indent; + w.indent += 2; + + try w.writeBody(s, then_body); + try s.writeByteNTimes(' ', old_indent); + try s.writeAll("}, {\n"); + + try w.writeBody(s, else_body); + w.indent = old_indent; + + try s.writeByteNTimes(' ', old_indent); + try s.writeAll("}"); } fn writeSwitchBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { From 1bc3bfc04b51af8a00999fc4bf766a202dcc7384 Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Sun, 18 Jul 2021 17:10:33 +0200 Subject: [PATCH 35/53] Implement switch_br dump --- src/print_air.zig | 33 ++++++++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/src/print_air.zig b/src/print_air.zig index 6fa04fbfab..f31b307b57 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -299,9 +299,36 @@ const Writer = struct { } fn writeSwitchBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - _ = w; - _ = inst; - try s.writeAll("TODO"); + const pl_op = w.air.instructions.items(.data)[inst].pl_op; + const extra = w.air.extraData(Air.SwitchBr, pl_op.payload); + const cases = w.air.extra[extra.end..][0..extra.data.cases_len]; + const else_body = w.air.extra[extra.end + cases.len ..][0..extra.data.else_body_len]; + + try w.writeInstRef(s, pl_op.operand); + try s.writeAll(", {\n"); + + const old_indent = w.indent; + if (else_body.len != 0) { + w.indent += 2; + try w.writeBody(s, else_body); + try s.writeByteNTimes(' ', old_indent); + try s.writeAll("}, {\n"); + w.indent = old_indent; + } + + for (cases) |case_index| { + const case = w.air.extraData(Air.SwitchBr.Case, case_index); + const case_body = w.air.extra[case.end..][0..case.data.body_len]; + + w.indent += 2; + try w.writeBody(s, case_body); + try s.writeByteNTimes(' ', old_indent); + try s.writeAll("}, {\n"); + w.indent = old_indent; + } + + try s.writeByteNTimes(' ', old_indent); + try s.writeAll("}"); } fn writeInstRef(w: *Writer, s: anytype, inst: Air.Inst.Ref) @TypeOf(s).Error!void { From 7381431e68eba09f76ddc41cff8931d3e4f1a798 Mon Sep 17 00:00:00 2001 From: Lewis Gaul Date: Sun, 18 Jul 2021 23:17:00 +0100 Subject: [PATCH 36/53] Get register_manager.zig tests to compile - use value '1' as mock Air.Inst.Index --- src/register_manager.zig | 50 ++++++++++++++-------------------------- 1 file changed, 17 insertions(+), 33 deletions(-) diff --git a/src/register_manager.zig b/src/register_manager.zig index f0d128e7f9..47528e53d6 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -296,15 +296,11 @@ test "tryAllocReg: no spilling" { }; defer function.deinit(); - var mock_instruction = ir.Inst{ - .tag = .breakpoint, - .ty = Type.initTag(.void), - .src = .unneeded, - }; + const mock_instruction: Air.Inst.Index = 1; - try expectEqual(@as(?MockRegister1, .r2), function.register_manager.tryAllocReg(&mock_instruction, &.{})); - try expectEqual(@as(?MockRegister1, .r3), function.register_manager.tryAllocReg(&mock_instruction, &.{})); - try expectEqual(@as(?MockRegister1, null), function.register_manager.tryAllocReg(&mock_instruction, &.{})); + try expectEqual(@as(?MockRegister1, .r2), function.register_manager.tryAllocReg(mock_instruction, &.{})); + try expectEqual(@as(?MockRegister1, .r3), function.register_manager.tryAllocReg(mock_instruction, &.{})); + try expectEqual(@as(?MockRegister1, null), function.register_manager.tryAllocReg(mock_instruction, &.{})); try expect(function.register_manager.isRegAllocated(.r2)); try expect(function.register_manager.isRegAllocated(.r3)); @@ -328,28 +324,24 @@ test "allocReg: spilling" { }; defer function.deinit(); - var mock_instruction = ir.Inst{ - .tag = .breakpoint, - .ty = Type.initTag(.void), - .src = .unneeded, - }; + const mock_instruction: Air.Inst.Index = 1; - try expectEqual(@as(?MockRegister1, .r2), try function.register_manager.allocReg(&mock_instruction, &.{})); - try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(&mock_instruction, &.{})); + try expectEqual(@as(?MockRegister1, .r2), try function.register_manager.allocReg(mock_instruction, &.{})); + try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction, &.{})); // Spill a register - try expectEqual(@as(?MockRegister1, .r2), try function.register_manager.allocReg(&mock_instruction, &.{})); + try expectEqual(@as(?MockRegister1, .r2), try function.register_manager.allocReg(mock_instruction, &.{})); try expectEqualSlices(MockRegister1, &[_]MockRegister1{.r2}, function.spilled.items); // No spilling necessary function.register_manager.freeReg(.r3); - try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(&mock_instruction, &.{})); + try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction, &.{})); try expectEqualSlices(MockRegister1, &[_]MockRegister1{.r2}, function.spilled.items); // Exceptions function.register_manager.freeReg(.r2); function.register_manager.freeReg(.r3); - try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(&mock_instruction, &.{.r2})); + try expectEqual(@as(?MockRegister1, .r3), try function.register_manager.allocReg(mock_instruction, &.{.r2})); } test "tryAllocRegs" { @@ -377,16 +369,12 @@ test "allocRegs" { }; defer function.deinit(); - var mock_instruction = ir.Inst{ - .tag = .breakpoint, - .ty = Type.initTag(.void), - .src = .unneeded, - }; + const mock_instruction: Air.Inst.Index = 1; try expectEqual([_]MockRegister2{ .r0, .r1, .r2 }, try function.register_manager.allocRegs(3, .{ - &mock_instruction, - &mock_instruction, - &mock_instruction, + mock_instruction, + mock_instruction, + mock_instruction, }, &.{})); // Exceptions @@ -402,13 +390,9 @@ test "getReg" { }; defer function.deinit(); - var mock_instruction = ir.Inst{ - .tag = .breakpoint, - .ty = Type.initTag(.void), - .src = .unneeded, - }; + const mock_instruction: Air.Inst.Index = 1; - try function.register_manager.getReg(.r3, &mock_instruction); + try function.register_manager.getReg(.r3, mock_instruction); try expect(!function.register_manager.isRegAllocated(.r2)); try expect(function.register_manager.isRegAllocated(.r3)); @@ -416,7 +400,7 @@ test "getReg" { try expect(!function.register_manager.isRegFree(.r3)); // Spill r3 - try function.register_manager.getReg(.r3, &mock_instruction); + try function.register_manager.getReg(.r3, mock_instruction); try expect(!function.register_manager.isRegAllocated(.r2)); try expect(function.register_manager.isRegAllocated(.r3)); From bf8e347b1b99ab711036c76df44a4eed2f6677d5 Mon Sep 17 00:00:00 2001 From: Lewis Gaul Date: Sun, 18 Jul 2021 23:26:02 +0100 Subject: [PATCH 37/53] Get codegen.zig to compile - use '{d}' format for Air.Inst.Index values --- src/codegen.zig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/codegen.zig b/src/codegen.zig index 20d7035822..fa096bc13f 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -1048,7 +1048,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { const stack_mcv = try self.allocRegOrMem(inst, false); - log.debug("spilling {*} to stack mcv {any}", .{ inst, stack_mcv }); + log.debug("spilling {d} to stack mcv {any}", .{ inst, stack_mcv }); const reg_mcv = self.getResolvedInstValue(inst); assert(reg == toCanonicalReg(reg_mcv.register)); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; @@ -3115,7 +3115,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } }; - log.debug("consolidating else_entry {*} {}=>{}", .{ else_key, else_value, canon_mcv }); + log.debug("consolidating else_entry {d} {}=>{}", .{ else_key, else_value, canon_mcv }); // TODO make sure the destination stack offset / register does not already have something // going on there. try self.setRegOrMem(self.air.typeOfIndex(else_key), canon_mcv, else_value); @@ -3142,7 +3142,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } } }; - log.debug("consolidating then_entry {*} {}=>{}", .{ then_key, parent_mcv, then_value }); + log.debug("consolidating then_entry {d} {}=>{}", .{ then_key, parent_mcv, then_value }); // TODO make sure the destination stack offset / register does not already have something // going on there. try self.setRegOrMem(self.air.typeOfIndex(then_key), parent_mcv, then_value); From 95756299af77a83564c2dbae09884be20ffe0c5c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sun, 18 Jul 2021 22:49:46 -0700 Subject: [PATCH 38/53] stage2: fix compile errors in LLVM backend --- src/codegen/llvm.zig | 121 ++++++++++++++++++++++++++----------------- src/link/MachO.zig | 13 ++++- src/link/Wasm.zig | 12 ++++- 3 files changed, 95 insertions(+), 51 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 511c3fabf2..81484e93db 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -276,10 +276,71 @@ pub const Object = struct { } } - pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void { - const tracy = trace(@src()); - defer tracy.end(); + pub fn updateFunc( + self: *Object, + module: *Module, + func: *Module.Fn, + air: Air, + liveness: Liveness, + ) !void { + var dg: DeclGen = .{ + .object = self, + .module = module, + .decl = func.owner_decl, + .err_msg = null, + .gpa = module.gpa, + }; + const llvm_func = try dg.resolveLLVMFunction(func.owner_decl); + + // This gets the LLVM values from the function and stores them in `dg.args`. + const fn_param_len = func.owner_decl.ty.fnParamLen(); + var args = try dg.gpa.alloc(*const llvm.Value, fn_param_len); + + for (args) |*arg, i| { + arg.* = llvm.getParam(llvm_func, @intCast(c_uint, i)); + } + + // We remove all the basic blocks of a function to support incremental + // compilation! + // TODO: remove all basic blocks if functions can have more than one + if (llvm_func.getFirstBasicBlock()) |bb| { + bb.deleteBasicBlock(); + } + + const builder = dg.context().createBuilder(); + + const entry_block = dg.context().appendBasicBlock(llvm_func, "Entry"); + builder.positionBuilderAtEnd(entry_block); + + var fg: FuncGen = .{ + .gpa = dg.gpa, + .air = air, + .liveness = liveness, + .dg = &dg, + .builder = builder, + .args = args, + .arg_index = 0, + .func_inst_table = .{}, + .entry_block = entry_block, + .latest_alloca_inst = null, + .llvm_func = llvm_func, + .blocks = .{}, + }; + defer fg.deinit(); + + fg.genBody(air.getMainBody()) catch |err| switch (err) { + error.CodegenFail => { + func.owner_decl.analysis = .codegen_failure; + try module.failed_decls.put(module.gpa, func.owner_decl, dg.err_msg.?); + dg.err_msg = null; + return; + }, + else => |e| return e, + }; + } + + pub fn updateDecl(self: *Object, module: *Module, decl: *Module.Decl) !void { var dg: DeclGen = .{ .object = self, .module = module, @@ -330,45 +391,8 @@ pub const DeclGen = struct { log.debug("gen: {s} type: {}, value: {}", .{ decl.name, decl.ty, decl.val }); if (decl.val.castTag(.function)) |func_payload| { - const func = func_payload.data; - - const llvm_func = try self.resolveLLVMFunction(func.owner_decl); - - // This gets the LLVM values from the function and stores them in `self.args`. - const fn_param_len = func.owner_decl.ty.fnParamLen(); - var args = try self.gpa.alloc(*const llvm.Value, fn_param_len); - - for (args) |*arg, i| { - arg.* = llvm.getParam(llvm_func, @intCast(c_uint, i)); - } - - // We remove all the basic blocks of a function to support incremental - // compilation! - // TODO: remove all basic blocks if functions can have more than one - if (llvm_func.getFirstBasicBlock()) |bb| { - bb.deleteBasicBlock(); - } - - const builder = self.context().createBuilder(); - - const entry_block = self.context().appendBasicBlock(llvm_func, "Entry"); - builder.positionBuilderAtEnd(entry_block); - - var fg: FuncGen = .{ - .gpa = self.gpa, - .dg = self, - .builder = builder, - .args = args, - .arg_index = 0, - .func_inst_table = .{}, - .entry_block = entry_block, - .latest_alloca_inst = null, - .llvm_func = llvm_func, - .blocks = .{}, - }; - defer fg.deinit(); - - try fg.genBody(func.body); + _ = func_payload; + @panic("TODO llvm backend genDecl function pointer"); } else if (decl.val.castTag(.extern_fn)) |extern_fn| { _ = try self.resolveLLVMFunction(extern_fn.data); } else { @@ -596,6 +620,8 @@ pub const DeclGen = struct { pub const FuncGen = struct { gpa: *Allocator, dg: *DeclGen, + air: Air, + liveness: Liveness, builder: *const llvm.Builder, @@ -649,14 +675,15 @@ pub const FuncGen = struct { if (self.air.value(inst)) |val| { return self.dg.genTypedValue(.{ .ty = self.air.typeOf(inst), .val = val }, self); } - if (self.func_inst_table.get(inst)) |value| return value; + const inst_index = Air.refToIndex(inst).?; + if (self.func_inst_table.get(inst_index)) |value| return value; return self.todo("implement global llvm values (or the value is not in the func_inst_table table)", .{}); } - fn genBody(self: *FuncGen, body: ir.Body) error{ OutOfMemory, CodegenFail }!void { + fn genBody(self: *FuncGen, body: []const Air.Inst.Index) error{ OutOfMemory, CodegenFail }!void { const air_tags = self.air.instructions.items(.tag); - for (body.instructions) |inst| { + for (body) |inst| { const opt_value = switch (air_tags[inst]) { .add => try self.airAdd(inst), .sub => try self.airSub(inst), @@ -828,8 +855,8 @@ pub const FuncGen = struct { // If the break doesn't break a value, then we don't have to add // the values to the lists. - if (self.air.typeOf(branch.result).hasCodeGenBits()) { - const val = try self.resolveInst(branch.result); + if (self.air.typeOf(branch.operand).hasCodeGenBits()) { + const val = try self.resolveInst(branch.operand); // For the phi node, we need the basic blocks and the values of the // break instructions. diff --git a/src/link/MachO.zig b/src/link/MachO.zig index 4107607924..02ea5856f4 100644 --- a/src/link/MachO.zig +++ b/src/link/MachO.zig @@ -30,6 +30,7 @@ const DebugSymbols = @import("MachO/DebugSymbols.zig"); const Trie = @import("MachO/Trie.zig"); const CodeSignature = @import("MachO/CodeSignature.zig"); const Zld = @import("MachO/Zld.zig"); +const llvm_backend = @import("../codegen/llvm.zig"); usingnamespace @import("MachO/commands.zig"); @@ -37,6 +38,9 @@ pub const base_tag: File.Tag = File.Tag.macho; base: File, +/// If this is not null, an object file is created by LLVM and linked with LLD afterwards. +llvm_object: ?*llvm_backend.Object = null, + /// Debug symbols bundle (or dSym). d_sym: ?DebugSymbols = null, @@ -347,8 +351,13 @@ pub const SrcFn = struct { pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*MachO { assert(options.object_format == .macho); - if (options.use_llvm) return error.LLVM_BackendIsTODO_ForMachO; // TODO - if (options.use_lld) return error.LLD_LinkingIsTODO_ForMachO; // TODO + if (build_options.have_llvm and options.use_llvm) { + const self = try createEmpty(allocator, options); + errdefer self.base.destroy(); + + self.llvm_object = try llvm_backend.Object.create(allocator, sub_path, options); + return self; + } const file = try options.emit.?.directory.handle.createFile(sub_path, .{ .truncate = false, diff --git a/src/link/Wasm.zig b/src/link/Wasm.zig index d9139a178c..f478d2ee47 100644 --- a/src/link/Wasm.zig +++ b/src/link/Wasm.zig @@ -19,12 +19,15 @@ const build_options = @import("build_options"); const wasi_libc = @import("../wasi_libc.zig"); const Cache = @import("../Cache.zig"); const TypedValue = @import("../TypedValue.zig"); +const llvm_backend = @import("../codegen/llvm.zig"); const Air = @import("../Air.zig"); const Liveness = @import("../Liveness.zig"); pub const base_tag = link.File.Tag.wasm; base: link.File, +/// If this is not null, an object file is created by LLVM and linked with LLD afterwards. +llvm_object: ?*llvm_backend.Object = null, /// List of all function Decls to be written to the output file. The index of /// each Decl in this list at the time of writing the binary is used as the /// function index. In the event where ext_funcs' size is not 0, the index of @@ -114,8 +117,13 @@ pub const DeclBlock = struct { pub fn openPath(allocator: *Allocator, sub_path: []const u8, options: link.Options) !*Wasm { assert(options.object_format == .wasm); - if (options.use_llvm) return error.LLVM_BackendIsTODO_ForWasm; // TODO - if (options.use_lld) return error.LLD_LinkingIsTODO_ForWasm; // TODO + if (build_options.have_llvm and options.use_llvm) { + const self = try createEmpty(allocator, options); + errdefer self.base.destroy(); + + self.llvm_object = try llvm_backend.Object.create(allocator, sub_path, options); + return self; + } // TODO: read the file and keep valid parts instead of truncating const file = try options.emit.?.directory.handle.createFile(sub_path, .{ .truncate = true, .read = true }); From 1150fc13dc779c91d54538304466cc068ccbf8ed Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Mon, 19 Jul 2021 15:56:02 +0200 Subject: [PATCH 39/53] wasm: Resolve regressions, add intcast support --- src/codegen/wasm.zig | 47 ++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index e7e498ef40..b6edfb7b20 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -24,8 +24,8 @@ const WValue = union(enum) { none: void, /// Index of the local variable local: u32, - /// Instruction holding a constant `Value` - constant: Air.Inst.Index, + /// Holds a memoized typed value + constant: TypedValue, /// Offset position in the list of bytecode instructions code_offset: usize, /// Used for variables that create multiple locals on the stack when allocated @@ -484,7 +484,7 @@ pub const Result = union(enum) { }; /// Hashmap to store generated `WValue` for each `Air.Inst.Ref` -pub const ValueTable = std.AutoHashMapUnmanaged(Air.Inst.Ref, WValue); +pub const ValueTable = std.AutoHashMapUnmanaged(Air.Inst.Index, WValue); /// Code represents the `Code` section of wasm that /// belongs to a function @@ -548,14 +548,23 @@ pub const Context = struct { /// Resolves the `WValue` for the given instruction `inst` /// When the given instruction has a `Value`, it returns a constant instead fn resolveInst(self: Context, ref: Air.Inst.Ref) WValue { - const ref_type = self.air.getRefType(ref); - if (ref_type.hasCodeGenBits()) return .none; + const inst_index = Air.refToIndex(ref) orelse { + const tv = Air.Inst.Ref.typed_value_map[@enumToInt(ref)]; + if (!tv.ty.hasCodeGenBits()) { + return WValue.none; + } + return WValue{ .constant = tv }; + }; - if (self.air.instructions.items(.tag)[@enumToInt(ref)] == .constant) { - return WValue{ .constant = @enumToInt(ref) }; + const inst_type = self.air.typeOfIndex(inst_index); + if (!inst_type.hasCodeGenBits()) return .none; + + if (self.air.instructions.items(.tag)[inst_index] == .constant) { + const ty_pl = self.air.instructions.items(.data)[inst_index].ty_pl; + return WValue{ .constant = .{ .ty = inst_type, .val = self.air.values[ty_pl.payload] } }; } - return self.values.get(ref).?; // Instruction does not dominate all uses! + return self.values.get(inst_index).?; // Instruction does not dominate all uses! } /// Using a given `Type`, returns the corresponding wasm Valtype @@ -611,12 +620,7 @@ pub const Context = struct { try writer.writeByte(wasm.opcode(.local_get)); try leb.writeULEB128(writer, idx); }, - .constant => |index| { - const ty_pl = self.air.instructions.items(.data)[index].ty_pl; - const value = self.air.values[ty_pl.payload]; - // create a new constant onto the stack - try self.emitConstant(value, self.air.getRefType(ty_pl.ty)); - }, + .constant => |tv| try self.emitConstant(tv.val, tv.ty), // Creates a new constant on the stack } } @@ -838,7 +842,7 @@ pub const Context = struct { fn genBody(self: *Context, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { const result = try self.genInst(inst); - try self.values.putNoClobber(self.gpa, @intToEnum(Air.Inst.Ref, inst), result); + try self.values.putNoClobber(self.gpa, inst, result); } } @@ -856,8 +860,7 @@ pub const Context = struct { const args = self.air.extra[extra.end..][0..extra.data.args_len]; const target: *Decl = blk: { - const ty_pl = self.air.instructions.items(.data)[@enumToInt(pl_op.operand)].ty_pl; - const func_val = self.air.values[ty_pl.payload]; + const func_val = self.air.value(pl_op.operand).?; if (func_val.castTag(.function)) |func| { break :blk func.data.owner_decl; @@ -868,7 +871,7 @@ pub const Context = struct { }; for (args) |arg| { - const arg_val = self.resolveInst(@intToEnum(Air.Inst.Ref, arg)); + const arg_val = self.resolveInst(Air.indexToRef(arg)); try self.emitWValue(arg_val); } @@ -902,7 +905,7 @@ pub const Context = struct { // we simply assign the local_index to the rhs one. // This allows us to update struct fields without having to individually // set each local as each field's index will be calculated off the struct's base index - .multi_value => self.values.put(self.gpa, bin_op.lhs, rhs) catch unreachable, // Instruction does not dominate all uses! + .multi_value => self.values.put(self.gpa, Air.refToIndex(bin_op.lhs).?, rhs) catch unreachable, // Instruction does not dominate all uses! .constant, .none => { // emit all values onto the stack if constant try self.emitWValue(rhs); @@ -1294,10 +1297,8 @@ pub const Context = struct { try self.startBlock(.block, blocktype, null); try self.emitWValue(target); - // cases must represent a constant of which its type is in the `typed_value_map` - // Therefore we can simply retrieve it. - const ty_val = Air.Inst.Ref.typed_value_map[@enumToInt(case.data.item)]; - try self.emitConstant(ty_val.val, target_ty); + const val = self.air.value(case.data.item).?; + try self.emitConstant(val, target_ty); const opcode = buildOpcode(.{ .valtype1 = valtype, .op = .ne, // not equal because we jump out the block if it does not match the condition From caa0de545e2f45a96ac3136178f478dab1c89ebd Mon Sep 17 00:00:00 2001 From: Luuk de Gram Date: Mon, 19 Jul 2021 21:50:15 +0200 Subject: [PATCH 40/53] Resolve regressions - Get correct types in wasm backend. - `arg` is already a `Ref`, therefore simply use `@intToEnum`. - Fix regression in `zirBoolBr, where the order of insertion was incorrect. --- src/Sema.zig | 10 +++++----- src/codegen/wasm.zig | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index d796ae2a5a..b3feeb8b1c 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -5295,11 +5295,6 @@ fn zirBoolBr( then_block.instructions.items.len + else_block.instructions.items.len + @typeInfo(Air.Block).Struct.fields.len + child_block.instructions.items.len); - sema.air_instructions.items(.data)[block_inst].ty_pl.payload = sema.addExtraAssumeCapacity( - Air.Block{ .body_len = @intCast(u32, child_block.instructions.items.len) }, - ); - sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); - const cond_br_payload = sema.addExtraAssumeCapacity(Air.CondBr{ .then_body_len = @intCast(u32, then_block.instructions.items.len), .else_body_len = @intCast(u32, else_block.instructions.items.len), @@ -5312,6 +5307,11 @@ fn zirBoolBr( .payload = cond_br_payload, } } }); + sema.air_instructions.items(.data)[block_inst].ty_pl.payload = sema.addExtraAssumeCapacity( + Air.Block{ .body_len = @intCast(u32, child_block.instructions.items.len) }, + ); + sema.air_extra.appendSliceAssumeCapacity(child_block.instructions.items); + try parent_block.instructions.append(gpa, block_inst); return Air.indexToRef(block_inst); } diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index b6edfb7b20..e72140d826 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -871,7 +871,7 @@ pub const Context = struct { }; for (args) |arg| { - const arg_val = self.resolveInst(Air.indexToRef(arg)); + const arg_val = self.resolveInst(@intToEnum(Air.Inst.Ref, arg)); try self.emitWValue(arg_val); } @@ -959,7 +959,7 @@ pub const Context = struct { try self.emitWValue(lhs); try self.emitWValue(rhs); - const bin_ty = self.air.getRefType(bin_op.lhs); + const bin_ty = self.air.typeOf(bin_op.lhs); const opcode: wasm.Opcode = buildOpcode(.{ .op = op, .valtype1 = try self.typeToValtype(bin_ty), @@ -1179,7 +1179,7 @@ pub const Context = struct { const data: Air.Inst.Data = self.air.instructions.items(.data)[inst]; const lhs = self.resolveInst(data.bin_op.lhs); const rhs = self.resolveInst(data.bin_op.rhs); - const lhs_ty = self.air.getRefType(data.bin_op.lhs); + const lhs_ty = self.air.typeOf(data.bin_op.lhs); try self.emitWValue(lhs); try self.emitWValue(rhs); @@ -1211,7 +1211,7 @@ pub const Context = struct { const br = self.air.instructions.items(.data)[inst].br; // if operand has codegen bits we should break with a value - if (self.air.getRefType(br.operand).hasCodeGenBits()) { + if (self.air.typeOf(br.operand).hasCodeGenBits()) { try self.emitWValue(self.resolveInst(br.operand)); } @@ -1277,7 +1277,7 @@ pub const Context = struct { const else_body = self.air.extra[extra.end + cases.len ..][0..extra.data.else_body_len]; const target = self.resolveInst(pl_op.operand); - const target_ty = self.air.getRefType(pl_op.operand); + const target_ty = self.air.typeOf(pl_op.operand); const valtype = try self.typeToValtype(target_ty); // result type is always 'noreturn' const blocktype = wasm.block_empty; From ea902ffe8f5f337b04f25b4efc69599db74d99ce Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 19 Jul 2021 17:35:14 -0700 Subject: [PATCH 41/53] Sema: reimplement runtime switch Now supports multiple items pointing to the same body. This is a common pattern even when using a jump table, with multiple cases pointing to the same block of code. In the case of a range specified, the items are moved to branches in the else body. A future improvement may make it possible to have jump table items as well as ranges pointing to the same block of code. --- src/Air.zig | 3 +- src/Module.zig | 6 +- src/Sema.zig | 306 ++++++++++++++++++++++++------------------- src/codegen/wasm.zig | 67 +++++----- 4 files changed, 217 insertions(+), 165 deletions(-) diff --git a/src/Air.zig b/src/Air.zig index 0e19202244..718123818b 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -352,9 +352,10 @@ pub const SwitchBr = struct { else_body_len: u32, /// Trailing: + /// * item: Inst.Ref // for each `items_len`. /// * instruction index for each `body_len`. pub const Case = struct { - item: Inst.Ref, + items_len: u32, body_len: u32, }; }; diff --git a/src/Module.zig b/src/Module.zig index c101221f2e..9fadf67c6f 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1300,6 +1300,10 @@ pub const Scope = struct { } pub fn addInst(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Ref { + return Air.indexToRef(try block.addInstAsIndex(inst)); + } + + pub fn addInstAsIndex(block: *Block, inst: Air.Inst) error{OutOfMemory}!Air.Inst.Index { const sema = block.sema; const gpa = sema.gpa; @@ -1309,7 +1313,7 @@ pub const Scope = struct { const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len); sema.air_instructions.appendAssumeCapacity(inst); block.instructions.appendAssumeCapacity(result_index); - return Air.indexToRef(result_index); + return result_index; } }; }; diff --git a/src/Sema.zig b/src/Sema.zig index b3feeb8b1c..b9449157e2 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -4170,159 +4170,201 @@ fn analyzeSwitch( try sema.requireRuntimeBlock(block, src); - // TODO when reworking AIR memory layout make multi cases get generated as cases, - // not as part of the "else" block. - return mod.fail(&block.base, src, "TODO rework runtime switch Sema", .{}); - //const cases = try sema.arena.alloc(Inst.SwitchBr.Case, scalar_cases_len); + var cases_extra: std.ArrayListUnmanaged(u32) = .{}; + defer cases_extra.deinit(gpa); - //var case_block = child_block.makeSubBlock(); - //case_block.runtime_loop = null; - //case_block.runtime_cond = operand.src; - //case_block.runtime_index += 1; - //defer case_block.instructions.deinit(gpa); + try cases_extra.ensureTotalCapacity(gpa, (scalar_cases_len + multi_cases_len) * + @typeInfo(Air.SwitchBr.Case).Struct.fields.len + 2); - //var extra_index: usize = special.end; + var case_block = child_block.makeSubBlock(); + case_block.runtime_loop = null; + case_block.runtime_cond = operand_src; + case_block.runtime_index += 1; + defer case_block.instructions.deinit(gpa); - //var scalar_i: usize = 0; - //while (scalar_i < scalar_cases_len) : (scalar_i += 1) { - // const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); - // extra_index += 1; - // const body_len = sema.code.extra[extra_index]; - // extra_index += 1; - // const body = sema.code.extra[extra_index..][0..body_len]; - // extra_index += body_len; + var extra_index: usize = special.end; - // case_block.instructions.shrinkRetainingCapacity(0); - // const item = sema.resolveInst(item_ref); - // // We validate these above; these two calls are guaranteed to succeed. - // const item_val = sema.resolveConstValue(&case_block, .unneeded, item) catch unreachable; + var scalar_i: usize = 0; + while (scalar_i < scalar_cases_len) : (scalar_i += 1) { + const item_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); + extra_index += 1; + const body_len = sema.code.extra[extra_index]; + extra_index += 1; + const body = sema.code.extra[extra_index..][0..body_len]; + extra_index += body_len; - // _ = try sema.analyzeBody(&case_block, body); + case_block.instructions.shrinkRetainingCapacity(0); + const item = sema.resolveInst(item_ref); + // `item` is already guaranteed to be constant known. - // cases[scalar_i] = .{ - // .item = item_val, - // .body = .{ .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items) }, - // }; - //} + _ = try sema.analyzeBody(&case_block, body); - //var first_else_body: Body = undefined; - //var prev_condbr: ?*Inst.CondBr = null; + try cases_extra.ensureUnusedCapacity(gpa, 3 + case_block.instructions.items.len); + cases_extra.appendAssumeCapacity(1); // items_len + cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); + cases_extra.appendAssumeCapacity(@enumToInt(item)); + cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); + } - //var multi_i: usize = 0; - //while (multi_i < multi_cases_len) : (multi_i += 1) { - // const items_len = sema.code.extra[extra_index]; - // extra_index += 1; - // const ranges_len = sema.code.extra[extra_index]; - // extra_index += 1; - // const body_len = sema.code.extra[extra_index]; - // extra_index += 1; - // const items = sema.code.refSlice(extra_index, items_len); - // extra_index += items_len; + var is_first = true; + var prev_cond_br: Air.Inst.Index = undefined; + var first_else_body: []const Air.Inst.Index = &.{}; + defer gpa.free(first_else_body); + var prev_then_body: []const Air.Inst.Index = &.{}; + defer gpa.free(prev_then_body); - // case_block.instructions.shrinkRetainingCapacity(0); + var multi_i: usize = 0; + while (multi_i < multi_cases_len) : (multi_i += 1) { + const items_len = sema.code.extra[extra_index]; + extra_index += 1; + const ranges_len = sema.code.extra[extra_index]; + extra_index += 1; + const body_len = sema.code.extra[extra_index]; + extra_index += 1; + const items = sema.code.refSlice(extra_index, items_len); + extra_index += items_len; - // var any_ok: ?Air.Inst.Index = null; + case_block.instructions.shrinkRetainingCapacity(0); - // for (items) |item_ref| { - // const item = sema.resolveInst(item_ref); - // _ = try sema.resolveConstValue(&child_block, item.src, item); + var any_ok: Air.Inst.Ref = .none; - // const cmp_ok = try case_block.addBinOp(.cmp_eq, operand, item); - // if (any_ok) |some| { - // any_ok = try case_block.addBinOp(.bool_or, some, cmp_ok); - // } else { - // any_ok = cmp_ok; - // } - // } + // If there are any ranges, we have to put all the items into the + // else prong. Otherwise, we can take advantage of multiple items + // mapping to the same body. + if (ranges_len == 0) { + const body = sema.code.extra[extra_index..][0..body_len]; + extra_index += body_len; + _ = try sema.analyzeBody(&case_block, body); - // var range_i: usize = 0; - // while (range_i < ranges_len) : (range_i += 1) { - // const first_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); - // extra_index += 1; - // const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); - // extra_index += 1; + try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len + + case_block.instructions.items.len); - // const item_first = sema.resolveInst(first_ref); - // const item_last = sema.resolveInst(last_ref); + cases_extra.appendAssumeCapacity(1); // items_len + cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); - // _ = try sema.resolveConstValue(&child_block, item_first.src, item_first); - // _ = try sema.resolveConstValue(&child_block, item_last.src, item_last); + for (items) |item_ref| { + const item = sema.resolveInst(item_ref); + cases_extra.appendAssumeCapacity(@enumToInt(item)); + } - // // operand >= first and operand <= last - // const range_first_ok = try case_block.addBinOp( - // .cmp_gte, - // operand, - // item_first, - // ); - // const range_last_ok = try case_block.addBinOp( - // .cmp_lte, - // operand, - // item_last, - // ); - // const range_ok = try case_block.addBinOp( - // .bool_and, - // range_first_ok, - // range_last_ok, - // ); - // if (any_ok) |some| { - // any_ok = try case_block.addBinOp(.bool_or, some, range_ok); - // } else { - // any_ok = range_ok; - // } - // } + cases_extra.appendSliceAssumeCapacity(case_block.instructions.items); + } else { + for (items) |item_ref| { + const item = sema.resolveInst(item_ref); + const cmp_ok = try case_block.addBinOp(.cmp_eq, operand, item); + if (any_ok != .none) { + any_ok = try case_block.addBinOp(.bool_or, any_ok, cmp_ok); + } else { + any_ok = cmp_ok; + } + } - // const new_condbr = try sema.arena.create(Inst.CondBr); - // new_condbr.* = .{ - // .base = .{ - // .tag = .condbr, - // .ty = Type.initTag(.noreturn), - // .src = src, - // }, - // .condition = any_ok.?, - // .then_body = undefined, - // .else_body = undefined, - // }; - // try case_block.instructions.append(gpa, &new_condbr.base); + var range_i: usize = 0; + while (range_i < ranges_len) : (range_i += 1) { + const first_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); + extra_index += 1; + const last_ref = @intToEnum(Zir.Inst.Ref, sema.code.extra[extra_index]); + extra_index += 1; - // const cond_body: Body = .{ - // .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), - // }; + const item_first = sema.resolveInst(first_ref); + const item_last = sema.resolveInst(last_ref); - // case_block.instructions.shrinkRetainingCapacity(0); - // const body = sema.code.extra[extra_index..][0..body_len]; - // extra_index += body_len; - // _ = try sema.analyzeBody(&case_block, body); - // new_condbr.then_body = .{ - // .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), - // }; - // if (prev_condbr) |condbr| { - // condbr.else_body = cond_body; - // } else { - // first_else_body = cond_body; - // } - // prev_condbr = new_condbr; - //} + // operand >= first and operand <= last + const range_first_ok = try case_block.addBinOp( + .cmp_gte, + operand, + item_first, + ); + const range_last_ok = try case_block.addBinOp( + .cmp_lte, + operand, + item_last, + ); + const range_ok = try case_block.addBinOp( + .bool_and, + range_first_ok, + range_last_ok, + ); + if (any_ok != .none) { + any_ok = try case_block.addBinOp(.bool_or, any_ok, range_ok); + } else { + any_ok = range_ok; + } + } - //const final_else_body: Body = blk: { - // if (special.body.len != 0) { - // case_block.instructions.shrinkRetainingCapacity(0); - // _ = try sema.analyzeBody(&case_block, special.body); - // const else_body: Body = .{ - // .instructions = try sema.arena.dupe(Air.Inst.Index, case_block.instructions.items), - // }; - // if (prev_condbr) |condbr| { - // condbr.else_body = else_body; - // break :blk first_else_body; - // } else { - // break :blk else_body; - // } - // } else { - // break :blk .{ .instructions = &.{} }; - // } - //}; + const new_cond_br = try case_block.addInstAsIndex(.{ .tag = .cond_br, .data = .{ + .pl_op = .{ + .operand = any_ok, + .payload = undefined, + }, + } }); + var cond_body = case_block.instructions.toOwnedSlice(gpa); + defer gpa.free(cond_body); - //_ = try child_block.addSwitchBr(src, operand, cases, final_else_body); - //return sema.analyzeBlockBody(block, src, &child_block, merges); + case_block.instructions.shrinkRetainingCapacity(0); + const body = sema.code.extra[extra_index..][0..body_len]; + extra_index += body_len; + _ = try sema.analyzeBody(&case_block, body); + + if (is_first) { + is_first = false; + first_else_body = cond_body; + cond_body = &.{}; + } else { + try sema.air_extra.ensureUnusedCapacity( + gpa, + @typeInfo(Air.CondBr).Struct.fields.len + prev_then_body.len + cond_body.len, + ); + + sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload = + sema.addExtraAssumeCapacity(Air.CondBr{ + .then_body_len = @intCast(u32, prev_then_body.len), + .else_body_len = @intCast(u32, cond_body.len), + }); + sema.air_extra.appendSliceAssumeCapacity(prev_then_body); + sema.air_extra.appendSliceAssumeCapacity(cond_body); + } + prev_then_body = case_block.instructions.toOwnedSlice(gpa); + prev_cond_br = new_cond_br; + } + } + + var final_else_body: []const Air.Inst.Index = &.{}; + if (special.body.len != 0) { + case_block.instructions.shrinkRetainingCapacity(0); + _ = try sema.analyzeBody(&case_block, special.body); + + if (is_first) { + final_else_body = case_block.instructions.items; + } else { + try sema.air_extra.ensureUnusedCapacity(gpa, prev_then_body.len + + @typeInfo(Air.CondBr).Struct.fields.len + case_block.instructions.items.len); + + sema.air_instructions.items(.data)[prev_cond_br].pl_op.payload = + sema.addExtraAssumeCapacity(Air.CondBr{ + .then_body_len = @intCast(u32, prev_then_body.len), + .else_body_len = @intCast(u32, case_block.instructions.items.len), + }); + sema.air_extra.appendSliceAssumeCapacity(prev_then_body); + sema.air_extra.appendSliceAssumeCapacity(case_block.instructions.items); + final_else_body = first_else_body; + } + } + + try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).Struct.fields.len + + cases_extra.items.len); + + _ = try child_block.addInst(.{ .tag = .switch_br, .data = .{ .pl_op = .{ + .operand = operand, + .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{ + .cases_len = @intCast(u32, scalar_cases_len + multi_cases_len), + .else_body_len = @intCast(u32, final_else_body.len), + }), + } } }); + sema.air_extra.appendSliceAssumeCapacity(cases_extra.items); + sema.air_extra.appendSliceAssumeCapacity(final_else_body); + + return sema.analyzeBlockBody(block, src, &child_block, merges); } fn resolveSwitchItemVal( diff --git a/src/codegen/wasm.zig b/src/codegen/wasm.zig index e72140d826..41397f55f4 100644 --- a/src/codegen/wasm.zig +++ b/src/codegen/wasm.zig @@ -1282,44 +1282,49 @@ pub const Context = struct { // result type is always 'noreturn' const blocktype = wasm.block_empty; - const signedness: std.builtin.Signedness = blk: { - // by default we tell the operand type is unsigned (i.e. bools and enum values) - if (target_ty.zigTypeTag() != .Int) break :blk .unsigned; + _ = valtype; + _ = blocktype; + _ = target; + _ = else_body; + return self.fail("TODO implement wasm codegen for switch", .{}); + //const signedness: std.builtin.Signedness = blk: { + // // by default we tell the operand type is unsigned (i.e. bools and enum values) + // if (target_ty.zigTypeTag() != .Int) break :blk .unsigned; - // incase of an actual integer, we emit the correct signedness - break :blk target_ty.intInfo(self.target).signedness; - }; - for (cases) |case_idx| { - const case = self.air.extraData(Air.SwitchBr.Case, case_idx); - const case_body = self.air.extra[case.end..][0..case.data.body_len]; + // // incase of an actual integer, we emit the correct signedness + // break :blk target_ty.intInfo(self.target).signedness; + //}; + //for (cases) |case_idx| { + // const case = self.air.extraData(Air.SwitchBr.Case, case_idx); + // const case_body = self.air.extra[case.end..][0..case.data.body_len]; - // create a block for each case, when the condition does not match we break out of it - try self.startBlock(.block, blocktype, null); - try self.emitWValue(target); + // // create a block for each case, when the condition does not match we break out of it + // try self.startBlock(.block, blocktype, null); + // try self.emitWValue(target); - const val = self.air.value(case.data.item).?; - try self.emitConstant(val, target_ty); - const opcode = buildOpcode(.{ - .valtype1 = valtype, - .op = .ne, // not equal because we jump out the block if it does not match the condition - .signedness = signedness, - }); - try self.code.append(wasm.opcode(opcode)); - try self.code.append(wasm.opcode(.br_if)); - try leb.writeULEB128(self.code.writer(), @as(u32, 0)); + // const val = self.air.value(case.data.item).?; + // try self.emitConstant(val, target_ty); + // const opcode = buildOpcode(.{ + // .valtype1 = valtype, + // .op = .ne, // not equal because we jump out the block if it does not match the condition + // .signedness = signedness, + // }); + // try self.code.append(wasm.opcode(opcode)); + // try self.code.append(wasm.opcode(.br_if)); + // try leb.writeULEB128(self.code.writer(), @as(u32, 0)); - // emit our block code - try self.genBody(case_body); + // // emit our block code + // try self.genBody(case_body); - // end the block we created earlier - try self.endBlock(); - } + // // end the block we created earlier + // try self.endBlock(); + //} - // finally, emit the else case if it exists. Here we will not have to - // check for a condition, so also no need to emit a block. - try self.genBody(else_body); + //// finally, emit the else case if it exists. Here we will not have to + //// check for a condition, so also no need to emit a block. + //try self.genBody(else_body); - return .none; + //return .none; } fn airIsErr(self: *Context, inst: Air.Inst.Index, opcode: wasm.Opcode) InnerError!WValue { From 495e60d6415d6de63c9ef0f604a959d969118116 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 19 Jul 2021 18:57:57 -0700 Subject: [PATCH 42/53] std.ArrayList: add missing assertion in appendSliceAssumeCapacity --- lib/std/array_list.zig | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/lib/std/array_list.zig b/lib/std/array_list.zig index f55ef81a6b..8b46ec2145 100644 --- a/lib/std/array_list.zig +++ b/lib/std/array_list.zig @@ -227,10 +227,11 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type { /// Append the slice of items to the list, asserting the capacity is already /// enough to store the new items. **Does not** invalidate pointers. pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void { - const oldlen = self.items.len; - const newlen = self.items.len + items.len; - self.items.len = newlen; - mem.copy(T, self.items[oldlen..], items); + const old_len = self.items.len; + const new_len = old_len + items.len; + assert(new_len <= self.capacity); + self.items.len = new_len; + mem.copy(T, self.items[old_len..], items); } pub usingnamespace if (T != u8) struct {} else struct { @@ -570,11 +571,11 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ /// Append the slice of items to the list, asserting the capacity is enough /// to store the new items. pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void { - const oldlen = self.items.len; - const newlen = self.items.len + items.len; - - self.items.len = newlen; - mem.copy(T, self.items[oldlen..], items); + const old_len = self.items.len; + const new_len = old_len + items.len; + assert(new_len <= self.capacity); + self.items.len = new_len; + mem.copy(T, self.items[old_len..], items); } /// Append a value to the list `n` times. From a97e5e119afb80e0d6d047682b8301bab9423078 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Mon, 19 Jul 2021 18:58:51 -0700 Subject: [PATCH 43/53] stage2: switch: fix Sema bugs and implement AIR printing --- src/Liveness.zig | 10 +++++----- src/Sema.zig | 9 ++++++--- src/print_air.zig | 47 +++++++++++++++++++++++++++++------------------ 3 files changed, 40 insertions(+), 26 deletions(-) diff --git a/src/Liveness.zig b/src/Liveness.zig index 2c226122bf..02d0ea7bc5 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -442,9 +442,9 @@ fn analyzeInst( return trackOperands(a, new_set, inst, main_tomb, .{ condition, .none, .none }); }, .switch_br => { - const inst_data = inst_datas[inst].pl_op; - const condition = inst_data.operand; - const switch_br = a.air.extraData(Air.SwitchBr, inst_data.payload); + const pl_op = inst_datas[inst].pl_op; + const condition = pl_op.operand; + const switch_br = a.air.extraData(Air.SwitchBr, pl_op.payload); const Table = std.AutoHashMapUnmanaged(Air.Inst.Index, void); const case_tables = try gpa.alloc(Table, switch_br.data.cases_len + 1); // +1 for else @@ -456,8 +456,8 @@ fn analyzeInst( var air_extra_index: usize = switch_br.end; for (case_tables[0..switch_br.data.cases_len]) |*case_table| { const case = a.air.extraData(Air.SwitchBr.Case, air_extra_index); - const case_body = a.air.extra[case.end..][0..case.data.body_len]; - air_extra_index = case.end + case_body.len; + const case_body = a.air.extra[case.end + case.data.items_len ..][0..case.data.body_len]; + air_extra_index = case.end + case.data.items_len + case_body.len; try analyzeWithContext(a, case_table, case_body); // Reset the table back to its state from before the case. diff --git a/src/Sema.zig b/src/Sema.zig index b9449157e2..826097c3d1 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -4213,6 +4213,7 @@ fn analyzeSwitch( var prev_then_body: []const Air.Inst.Index = &.{}; defer gpa.free(prev_then_body); + var cases_len = scalar_cases_len; var multi_i: usize = 0; while (multi_i < multi_cases_len) : (multi_i += 1) { const items_len = sema.code.extra[extra_index]; @@ -4232,6 +4233,8 @@ fn analyzeSwitch( // else prong. Otherwise, we can take advantage of multiple items // mapping to the same body. if (ranges_len == 0) { + cases_len += 1; + const body = sema.code.extra[extra_index..][0..body_len]; extra_index += body_len; _ = try sema.analyzeBody(&case_block, body); @@ -4239,7 +4242,7 @@ fn analyzeSwitch( try cases_extra.ensureUnusedCapacity(gpa, 2 + items.len + case_block.instructions.items.len); - cases_extra.appendAssumeCapacity(1); // items_len + cases_extra.appendAssumeCapacity(@intCast(u32, items.len)); cases_extra.appendAssumeCapacity(@intCast(u32, case_block.instructions.items.len)); for (items) |item_ref| { @@ -4352,12 +4355,12 @@ fn analyzeSwitch( } try sema.air_extra.ensureUnusedCapacity(gpa, @typeInfo(Air.SwitchBr).Struct.fields.len + - cases_extra.items.len); + cases_extra.items.len + final_else_body.len); _ = try child_block.addInst(.{ .tag = .switch_br, .data = .{ .pl_op = .{ .operand = operand, .payload = sema.addExtraAssumeCapacity(Air.SwitchBr{ - .cases_len = @intCast(u32, scalar_cases_len + multi_cases_len), + .cases_len = @intCast(u32, cases_len), .else_body_len = @intCast(u32, final_else_body.len), }), } } }); diff --git a/src/print_air.zig b/src/print_air.zig index f31b307b57..51f0ce4f49 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -300,33 +300,44 @@ const Writer = struct { fn writeSwitchBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const pl_op = w.air.instructions.items(.data)[inst].pl_op; - const extra = w.air.extraData(Air.SwitchBr, pl_op.payload); - const cases = w.air.extra[extra.end..][0..extra.data.cases_len]; - const else_body = w.air.extra[extra.end + cases.len ..][0..extra.data.else_body_len]; + const switch_br = w.air.extraData(Air.SwitchBr, pl_op.payload); + var extra_index: usize = switch_br.end; + var case_i: u32 = 0; try w.writeInstRef(s, pl_op.operand); - try s.writeAll(", {\n"); - const old_indent = w.indent; - if (else_body.len != 0) { - w.indent += 2; - try w.writeBody(s, else_body); - try s.writeByteNTimes(' ', old_indent); - try s.writeAll("}, {\n"); - w.indent = old_indent; - } + w.indent += 2; - for (cases) |case_index| { - const case = w.air.extraData(Air.SwitchBr.Case, case_index); - const case_body = w.air.extra[case.end..][0..case.data.body_len]; + while (case_i < switch_br.data.cases_len) : (case_i += 1) { + const case = w.air.extraData(Air.SwitchBr.Case, extra_index); + const items = @bitCast([]const Air.Inst.Ref, w.air.extra[case.end..][0..case.data.items_len]); + const case_body = w.air.extra[case.end + items.len ..][0..case.data.body_len]; + extra_index = case.end + case.data.items_len + case_body.len; + try s.writeAll(", ["); + for (items) |item, item_i| { + if (item_i != 0) try s.writeAll(", "); + try w.writeInstRef(s, item); + } + try s.writeAll("] => {\n"); w.indent += 2; try w.writeBody(s, case_body); - try s.writeByteNTimes(' ', old_indent); - try s.writeAll("}, {\n"); - w.indent = old_indent; + w.indent -= 2; + try s.writeByteNTimes(' ', w.indent); + try s.writeAll("}"); } + const else_body = w.air.extra[extra_index..][0..switch_br.data.else_body_len]; + if (else_body.len != 0) { + try s.writeAll(", else => {\n"); + w.indent += 2; + try w.writeBody(s, else_body); + w.indent -= 2; + try s.writeByteNTimes(' ', w.indent); + try s.writeAll("}"); + } + + try s.writeAll("\n"); try s.writeByteNTimes(' ', old_indent); try s.writeAll("}"); } From fe14e339458a578657f3890f00d654a15c84422c Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Jul 2021 15:22:37 -0700 Subject: [PATCH 44/53] stage2: separate work queue item for functions than decls Previously we had codegen_decl for both constant values as well as function bodies. A recent commit updated the linker backends to add updateFunc as a separate function than updateDecl, and now this commit does the same with work queue tasks. The frontend now distinguishes between function pointers and function bodies. --- src/Compilation.zig | 158 ++++++++++++++++++++++++++----------- src/Module.zig | 186 ++++++++++++++++++++++---------------------- 2 files changed, 209 insertions(+), 135 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index 50d1f5760e..ea484c2d15 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -169,8 +169,10 @@ pub const CSourceFile = struct { }; const Job = union(enum) { - /// Write the machine code for a Decl to the output file. + /// Write the constant value for a Decl to the output file. codegen_decl: *Module.Decl, + /// Write the machine code for a function to the output file. + codegen_func: *Module.Fn, /// Render the .h file snippet for the Decl. emit_h_decl: *Module.Decl, /// The Decl needs to be analyzed and possibly export itself. @@ -2006,54 +2008,56 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor const module = self.bin_file.options.module.?; assert(decl.has_tv); if (decl.val.castTag(.function)) |payload| { - const func = payload.data; + if (decl.owns_tv) { + const func = payload.data; - var air = switch (func.state) { - .queued => module.analyzeFnBody(decl, func) catch |err| switch (err) { + var air = switch (func.state) { + .sema_failure, .dependency_failure => continue, + .queued => module.analyzeFnBody(decl, func) catch |err| switch (err) { + error.AnalysisFail => { + assert(func.state != .in_progress); + continue; + }, + error.OutOfMemory => return error.OutOfMemory, + }, + .in_progress => unreachable, + .inline_only => unreachable, // don't queue work for this + .success => unreachable, // don't queue it twice + }; + defer air.deinit(gpa); + + log.debug("analyze liveness of {s}", .{decl.name}); + var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); + defer liveness.deinit(gpa); + + if (builtin.mode == .Debug and self.verbose_air) { + std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); + @import("print_air.zig").dump(gpa, air, liveness); + std.debug.print("# End Function AIR: {s}:\n", .{decl.name}); + } + + assert(decl.ty.hasCodeGenBits()); + + self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, error.AnalysisFail => { - assert(func.state != .in_progress); + decl.analysis = .codegen_failure; continue; }, - error.OutOfMemory => return error.OutOfMemory, - }, - .in_progress => unreachable, - .inline_only => unreachable, // don't queue work for this - .sema_failure, .dependency_failure => continue, - .success => unreachable, // don't queue it twice - }; - defer air.deinit(gpa); - - log.debug("analyze liveness of {s}", .{decl.name}); - var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); - defer liveness.deinit(gpa); - - if (builtin.mode == .Debug and self.verbose_air) { - std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); - @import("print_air.zig").dump(gpa, air, liveness); - std.debug.print("# End Function AIR: {s}:\n", .{decl.name}); + else => { + try module.failed_decls.ensureUnusedCapacity(gpa, 1); + module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( + gpa, + decl.srcLoc(), + "unable to codegen: {s}", + .{@errorName(err)}, + )); + decl.analysis = .codegen_failure_retryable; + continue; + }, + }; + continue; } - - assert(decl.ty.hasCodeGenBits()); - - self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - decl.analysis = .codegen_failure; - continue; - }, - else => { - try module.failed_decls.ensureUnusedCapacity(gpa, 1); - module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( - gpa, - decl.srcLoc(), - "unable to codegen: {s}", - .{@errorName(err)}, - )); - decl.analysis = .codegen_failure_retryable; - continue; - }, - }; - continue; } assert(decl.ty.hasCodeGenBits()); @@ -2078,6 +2082,72 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor }; }, }, + .codegen_func => |func| switch (func.owner_decl.analysis) { + .unreferenced => unreachable, + .in_progress => unreachable, + .outdated => unreachable, + + .file_failure, + .sema_failure, + .codegen_failure, + .dependency_failure, + .sema_failure_retryable, + => continue, + + .complete, .codegen_failure_retryable => { + if (build_options.omit_stage2) + @panic("sadly stage2 is omitted from this build to save memory on the CI server"); + switch (func.state) { + .sema_failure, .dependency_failure => continue, + .queued => {}, + .in_progress => unreachable, + .inline_only => unreachable, // don't queue work for this + .success => unreachable, // don't queue it twice + } + + const module = self.bin_file.options.module.?; + const decl = func.owner_decl; + + var air = module.analyzeFnBody(decl, func) catch |err| switch (err) { + error.AnalysisFail => { + assert(func.state != .in_progress); + continue; + }, + error.OutOfMemory => return error.OutOfMemory, + }; + defer air.deinit(gpa); + + log.debug("analyze liveness of {s}", .{decl.name}); + var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); + defer liveness.deinit(gpa); + + if (builtin.mode == .Debug and self.verbose_air) { + std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); + @import("print_air.zig").dump(gpa, air, liveness); + std.debug.print("# End Function AIR: {s}:\n", .{decl.name}); + } + + self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) { + error.OutOfMemory => return error.OutOfMemory, + error.AnalysisFail => { + decl.analysis = .codegen_failure; + continue; + }, + else => { + try module.failed_decls.ensureUnusedCapacity(gpa, 1); + module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( + gpa, + decl.srcLoc(), + "unable to codegen: {s}", + .{@errorName(err)}, + )); + decl.analysis = .codegen_failure_retryable; + continue; + }, + }; + continue; + }, + }, .emit_h_decl => |decl| switch (decl.analysis) { .unreferenced => unreachable, .in_progress => unreachable, diff --git a/src/Module.zig b/src/Module.zig index 9fadf67c6f..4930e7846c 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -2902,6 +2902,7 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { decl.generation = mod.generation; return false; } + log.debug("semaDecl {*} ({s})", .{ decl, decl.name }); var block_scope: Scope.Block = .{ .parent = null, @@ -2938,106 +2939,109 @@ fn semaDecl(mod: *Module, decl: *Decl) !bool { const decl_arena_state = try decl_arena.allocator.create(std.heap.ArenaAllocator.State); if (decl_tv.val.castTag(.function)) |fn_payload| { - var prev_type_has_bits = false; - var prev_is_inline = false; - var type_changed = true; + const func = fn_payload.data; + const owns_tv = func.owner_decl == decl; + if (owns_tv) { + var prev_type_has_bits = false; + var prev_is_inline = false; + var type_changed = true; - if (decl.has_tv) { - prev_type_has_bits = decl.ty.hasCodeGenBits(); - type_changed = !decl.ty.eql(decl_tv.ty); - if (decl.getFunction()) |prev_func| { - prev_is_inline = prev_func.state == .inline_only; + if (decl.has_tv) { + prev_type_has_bits = decl.ty.hasCodeGenBits(); + type_changed = !decl.ty.eql(decl_tv.ty); + if (decl.getFunction()) |prev_func| { + prev_is_inline = prev_func.state == .inline_only; + } + decl.clearValues(gpa); } - decl.clearValues(gpa); - } - decl.ty = try decl_tv.ty.copy(&decl_arena.allocator); - decl.val = try decl_tv.val.copy(&decl_arena.allocator); - decl.align_val = try align_val.copy(&decl_arena.allocator); - decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); - decl.has_tv = true; - decl.owns_tv = fn_payload.data.owner_decl == decl; - decl_arena_state.* = decl_arena.state; - decl.value_arena = decl_arena_state; - decl.analysis = .complete; - decl.generation = mod.generation; + decl.ty = try decl_tv.ty.copy(&decl_arena.allocator); + decl.val = try decl_tv.val.copy(&decl_arena.allocator); + decl.align_val = try align_val.copy(&decl_arena.allocator); + decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); + decl.has_tv = true; + decl.owns_tv = owns_tv; + decl_arena_state.* = decl_arena.state; + decl.value_arena = decl_arena_state; + decl.analysis = .complete; + decl.generation = mod.generation; - const is_inline = decl_tv.ty.fnCallingConvention() == .Inline; - if (!is_inline and decl_tv.ty.hasCodeGenBits()) { - // We don't fully codegen the decl until later, but we do need to reserve a global - // offset table index for it. This allows us to codegen decls out of dependency order, - // increasing how many computations can be done in parallel. - try mod.comp.bin_file.allocateDeclIndexes(decl); - try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl }); - if (type_changed and mod.emit_h != null) { - try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); + const is_inline = decl_tv.ty.fnCallingConvention() == .Inline; + if (!is_inline and decl_tv.ty.hasCodeGenBits()) { + // We don't fully codegen the decl until later, but we do need to reserve a global + // offset table index for it. This allows us to codegen decls out of dependency order, + // increasing how many computations can be done in parallel. + try mod.comp.bin_file.allocateDeclIndexes(decl); + try mod.comp.work_queue.writeItem(.{ .codegen_func = func }); + if (type_changed and mod.emit_h != null) { + try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); + } + } else if (!prev_is_inline and prev_type_has_bits) { + mod.comp.bin_file.freeDecl(decl); } - } else if (!prev_is_inline and prev_type_has_bits) { - mod.comp.bin_file.freeDecl(decl); - } - if (decl.is_exported) { - const export_src = src; // TODO make this point at `export` token - if (is_inline) { - return mod.fail(&block_scope.base, export_src, "export of inline function", .{}); + if (decl.is_exported) { + const export_src = src; // TODO make this point at `export` token + if (is_inline) { + return mod.fail(&block_scope.base, export_src, "export of inline function", .{}); + } + // The scope needs to have the decl in it. + try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl); } - // The scope needs to have the decl in it. - try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl); + return type_changed or is_inline != prev_is_inline; } - return type_changed or is_inline != prev_is_inline; - } else { - var type_changed = true; - if (decl.has_tv) { - type_changed = !decl.ty.eql(decl_tv.ty); - decl.clearValues(gpa); - } - - decl.owns_tv = false; - var queue_linker_work = false; - if (decl_tv.val.castTag(.variable)) |payload| { - const variable = payload.data; - if (variable.owner_decl == decl) { - decl.owns_tv = true; - queue_linker_work = true; - - const copied_init = try variable.init.copy(&decl_arena.allocator); - variable.init = copied_init; - } - } else if (decl_tv.val.castTag(.extern_fn)) |payload| { - const owner_decl = payload.data; - if (decl == owner_decl) { - decl.owns_tv = true; - queue_linker_work = true; - } - } - - decl.ty = try decl_tv.ty.copy(&decl_arena.allocator); - decl.val = try decl_tv.val.copy(&decl_arena.allocator); - decl.align_val = try align_val.copy(&decl_arena.allocator); - decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); - decl.has_tv = true; - decl_arena_state.* = decl_arena.state; - decl.value_arena = decl_arena_state; - decl.analysis = .complete; - decl.generation = mod.generation; - - if (queue_linker_work and decl.ty.hasCodeGenBits()) { - try mod.comp.bin_file.allocateDeclIndexes(decl); - try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl }); - - if (type_changed and mod.emit_h != null) { - try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); - } - } - - if (decl.is_exported) { - const export_src = src; // TODO point to the export token - // The scope needs to have the decl in it. - try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl); - } - - return type_changed; } + var type_changed = true; + if (decl.has_tv) { + type_changed = !decl.ty.eql(decl_tv.ty); + decl.clearValues(gpa); + } + + decl.owns_tv = false; + var queue_linker_work = false; + if (decl_tv.val.castTag(.variable)) |payload| { + const variable = payload.data; + if (variable.owner_decl == decl) { + decl.owns_tv = true; + queue_linker_work = true; + + const copied_init = try variable.init.copy(&decl_arena.allocator); + variable.init = copied_init; + } + } else if (decl_tv.val.castTag(.extern_fn)) |payload| { + const owner_decl = payload.data; + if (decl == owner_decl) { + decl.owns_tv = true; + queue_linker_work = true; + } + } + + decl.ty = try decl_tv.ty.copy(&decl_arena.allocator); + decl.val = try decl_tv.val.copy(&decl_arena.allocator); + decl.align_val = try align_val.copy(&decl_arena.allocator); + decl.linksection_val = try linksection_val.copy(&decl_arena.allocator); + decl.has_tv = true; + decl_arena_state.* = decl_arena.state; + decl.value_arena = decl_arena_state; + decl.analysis = .complete; + decl.generation = mod.generation; + + if (queue_linker_work and decl.ty.hasCodeGenBits()) { + try mod.comp.bin_file.allocateDeclIndexes(decl); + try mod.comp.work_queue.writeItem(.{ .codegen_decl = decl }); + + if (type_changed and mod.emit_h != null) { + try mod.comp.work_queue.writeItem(.{ .emit_h_decl = decl }); + } + } + + if (decl.is_exported) { + const export_src = src; // TODO point to the export token + // The scope needs to have the decl in it. + try mod.analyzeExport(&block_scope.base, export_src, mem.spanZ(decl.name), decl); + } + + return type_changed; } /// Returns the depender's index of the dependee. From 9c652cc6507e6cba9bba5403bd819676f23c93a1 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Jul 2021 15:52:58 -0700 Subject: [PATCH 45/53] stage2: C backend: implement support for switch_br AIR --- src/codegen/c.zig | 48 ++++++++++++++++++++++++++--------------------- 1 file changed, 27 insertions(+), 21 deletions(-) diff --git a/src/codegen/c.zig b/src/codegen/c.zig index f938b28ec2..71a0869046 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1275,7 +1275,7 @@ fn airCall(o: *Object, inst: Air.Inst.Index) !CValue { fn airDbgStmt(o: *Object, inst: Air.Inst.Index) !CValue { const dbg_stmt = o.air.instructions.items(.data)[inst].dbg_stmt; const writer = o.writer(); - try writer.print("#line {d}\n", .{dbg_stmt.line}); + try writer.print("#line {d}\n", .{dbg_stmt.line + 1}); return CValue.none; } @@ -1403,35 +1403,41 @@ fn airSwitchBr(o: *Object, inst: Air.Inst.Index) !CValue { const pl_op = o.air.instructions.items(.data)[inst].pl_op; const condition = try o.resolveInst(pl_op.operand); const condition_ty = o.air.typeOf(pl_op.operand); + const switch_br = o.air.extraData(Air.SwitchBr, pl_op.payload); const writer = o.writer(); try writer.writeAll("switch ("); try o.writeCValue(writer, condition); - try writer.writeAll(") {\n"); + try writer.writeAll(") {"); o.indent_writer.pushIndent(); - // Need to rework Sema so that multiple cases are represented rather than - // getting branching logic inside the else, this way we get multiple case - // labels here rather than logic in the default case. - _ = condition_ty; - return o.dg.fail("TODO implement switch in C backend", .{}); + var extra_index: usize = switch_br.end; + var case_i: u32 = 0; + while (case_i < switch_br.data.cases_len) : (case_i += 1) { + const case = o.air.extraData(Air.SwitchBr.Case, extra_index); + const items = @bitCast([]const Air.Inst.Ref, o.air.extra[case.end..][0..case.data.items_len]); + const case_body = o.air.extra[case.end + items.len ..][0..case.data.body_len]; + extra_index = case.end + case.data.items_len + case_body.len; - //for (inst.cases) |case| { - // try writer.writeAll("case "); - // try o.dg.renderValue(writer, condition_ty, case.item); - // try writer.writeAll(": "); - // // the case body must be noreturn so we don't need to insert a break - // try genBody(o, case.body); - // try o.indent_writer.insertNewline(); - //} + for (items) |item| { + try o.indent_writer.insertNewline(); + try writer.writeAll("case "); + try o.dg.renderValue(writer, condition_ty, o.air.value(item).?); + try writer.writeAll(": "); + } + // The case body must be noreturn so we don't need to insert a break. + try genBody(o, case_body); + } - //try writer.writeAll("default: "); - //try genBody(o, inst.else_body); - //try o.indent_writer.insertNewline(); + const else_body = o.air.extra[extra_index..][0..switch_br.data.else_body_len]; + try o.indent_writer.insertNewline(); + try writer.writeAll("default: "); + try genBody(o, else_body); + try o.indent_writer.insertNewline(); - //o.indent_writer.popIndent(); - //try writer.writeAll("}\n"); - //return CValue.none; + o.indent_writer.popIndent(); + try writer.writeAll("}\n"); + return CValue.none; } fn airAsm(o: *Object, inst: Air.Inst.Index) !CValue { From f47cf93b477c47cb4be00019203b2cd11a94d53d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Jul 2021 15:56:42 -0700 Subject: [PATCH 46/53] stage2: C backend: fix ret AIR instruction when operand has 0 runtime bits --- src/codegen/c.zig | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/codegen/c.zig b/src/codegen/c.zig index 71a0869046..71714cc1b8 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -1006,11 +1006,15 @@ fn airLoad(o: *Object, inst: Air.Inst.Index) !CValue { fn airRet(o: *Object, inst: Air.Inst.Index) !CValue { const un_op = o.air.instructions.items(.data)[inst].un_op; - const operand = try o.resolveInst(un_op); const writer = o.writer(); - try writer.writeAll("return "); - try o.writeCValue(writer, operand); - try writer.writeAll(";\n"); + if (o.air.typeOf(un_op).hasCodeGenBits()) { + const operand = try o.resolveInst(un_op); + try writer.writeAll("return "); + try o.writeCValue(writer, operand); + try writer.writeAll(";\n"); + } else { + try writer.writeAll("return;\n"); + } return CValue.none; } From 91c4e28c5102223917ccf270fd466b796e0e0587 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Jul 2021 16:04:46 -0700 Subject: [PATCH 47/53] Liveness: fix br instruction not tracking its operand --- src/Liveness.zig | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/Liveness.zig b/src/Liveness.zig index 02d0ea7bc5..2039dd7146 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -250,7 +250,6 @@ fn analyzeInst( .arg, .alloc, - .br, .constant, .const_ty, .breakpoint, @@ -321,6 +320,10 @@ fn analyzeInst( const extra = a.air.extraData(Air.StructField, inst_datas[inst].ty_pl.payload).data; return trackOperands(a, new_set, inst, main_tomb, .{ extra.struct_ptr, .none, .none }); }, + .br => { + const br = inst_datas[inst].br; + return trackOperands(a, new_set, inst, main_tomb, .{ br.operand, .none, .none }); + }, .assembly => { const extra = a.air.extraData(Air.Asm, inst_datas[inst].ty_pl.payload); const extended = a.zir.instructions.items(.data)[extra.data.zir_index].extended; From 1097b0ec77d421225250d981704aca6a617bd6b3 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Jul 2021 18:51:40 -0700 Subject: [PATCH 48/53] codegen: fix lowering of AIR return instruction It incorrectly did not process the death of its operand. Additionally: * delete dead code accidentally introduced in fe14e339458a578657f3890f00d654a15c84422c * improve AIR printing code to include liveness data for operands. Now an exclamation point ("!") indicates the tombstone of an AIR instruction. --- src/Compilation.zig | 59 ++---------------------- src/codegen.zig | 18 +++----- src/print_air.zig | 109 ++++++++++++++++++++++++++++++++------------ 3 files changed, 91 insertions(+), 95 deletions(-) diff --git a/src/Compilation.zig b/src/Compilation.zig index ea484c2d15..78d03d4534 100644 --- a/src/Compilation.zig +++ b/src/Compilation.zig @@ -2007,59 +2007,6 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor @panic("sadly stage2 is omitted from this build to save memory on the CI server"); const module = self.bin_file.options.module.?; assert(decl.has_tv); - if (decl.val.castTag(.function)) |payload| { - if (decl.owns_tv) { - const func = payload.data; - - var air = switch (func.state) { - .sema_failure, .dependency_failure => continue, - .queued => module.analyzeFnBody(decl, func) catch |err| switch (err) { - error.AnalysisFail => { - assert(func.state != .in_progress); - continue; - }, - error.OutOfMemory => return error.OutOfMemory, - }, - .in_progress => unreachable, - .inline_only => unreachable, // don't queue work for this - .success => unreachable, // don't queue it twice - }; - defer air.deinit(gpa); - - log.debug("analyze liveness of {s}", .{decl.name}); - var liveness = try Liveness.analyze(gpa, air, decl.namespace.file_scope.zir); - defer liveness.deinit(gpa); - - if (builtin.mode == .Debug and self.verbose_air) { - std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); - @import("print_air.zig").dump(gpa, air, liveness); - std.debug.print("# End Function AIR: {s}:\n", .{decl.name}); - } - - assert(decl.ty.hasCodeGenBits()); - - self.bin_file.updateFunc(module, func, air, liveness) catch |err| switch (err) { - error.OutOfMemory => return error.OutOfMemory, - error.AnalysisFail => { - decl.analysis = .codegen_failure; - continue; - }, - else => { - try module.failed_decls.ensureUnusedCapacity(gpa, 1); - module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( - gpa, - decl.srcLoc(), - "unable to codegen: {s}", - .{@errorName(err)}, - )); - decl.analysis = .codegen_failure_retryable; - continue; - }, - }; - continue; - } - } - assert(decl.ty.hasCodeGenBits()); self.bin_file.updateDecl(module, decl) catch |err| switch (err) { @@ -2069,7 +2016,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor continue; }, else => { - try module.failed_decls.ensureCapacity(gpa, module.failed_decls.count() + 1); + try module.failed_decls.ensureUnusedCapacity(gpa, 1); module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( gpa, decl.srcLoc(), @@ -2123,7 +2070,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor if (builtin.mode == .Debug and self.verbose_air) { std.debug.print("# Begin Function AIR: {s}:\n", .{decl.name}); - @import("print_air.zig").dump(gpa, air, liveness); + @import("print_air.zig").dump(gpa, air, decl.namespace.file_scope.zir, liveness); std.debug.print("# End Function AIR: {s}:\n", .{decl.name}); } @@ -2207,7 +2154,7 @@ pub fn performAllTheWork(self: *Compilation) error{ TimerUnsupported, OutOfMemor @panic("sadly stage2 is omitted from this build to save memory on the CI server"); const module = self.bin_file.options.module.?; self.bin_file.updateDeclLineNumber(module, decl) catch |err| { - try module.failed_decls.ensureCapacity(gpa, module.failed_decls.count() + 1); + try module.failed_decls.ensureUnusedCapacity(gpa, 1); module.failed_decls.putAssumeCapacityNoClobber(decl, try Module.ErrorMsg.create( gpa, decl.srcLoc(), diff --git a/src/codegen.zig b/src/codegen.zig index fa096bc13f..84a47a70ac 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -481,7 +481,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn finishAir(bt: *BigTomb, result: MCValue) void { const is_used = !bt.function.liveness.isUnused(bt.inst); if (is_used) { - log.debug("{} => {}", .{ bt.inst, result }); + log.debug("%{d} => {}", .{ bt.inst, result }); const branch = &bt.function.branch_stack.items[bt.function.branch_stack.items.len - 1]; branch.inst_table.putAssumeCapacityNoClobber(bt.inst, result); } @@ -871,12 +871,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { // zig fmt: on } if (std.debug.runtime_safety) { - if (self.air_bookkeeping != old_air_bookkeeping + 1) { - std.debug.panic( - \\in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. - \\Look for a missing call to finishAir or an extra call to it. - \\ - , .{ inst, air_tags[inst] }); + if (self.air_bookkeeping < old_air_bookkeeping + 1) { + std.debug.panic("in codegen.zig, handling of AIR instruction %{d} ('{}') did not do proper bookkeeping. Look for a missing call to finishAir.", .{ inst, air_tags[inst] }); } } } @@ -963,7 +959,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } const is_used = @truncate(u1, tomb_bits) == 0; if (is_used) { - log.debug("{} => {}", .{ inst, result }); + log.debug("%{d} => {}", .{ inst, result }); const branch = &self.branch_stack.items[self.branch_stack.items.len - 1]; branch.inst_table.putAssumeCapacityNoClobber(inst, result); } @@ -1350,10 +1346,10 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { self.register_manager.registers[index] = inst; } } - log.debug("reusing {} => {}", .{ reg, inst }); + log.debug("%{d} => {} (reused)", .{ inst, reg }); }, .stack_offset => |off| { - log.debug("reusing stack offset {} => {}", .{ off, inst }); + log.debug("%{d} => stack offset {d} (reused)", .{ inst, off }); }, else => return false, } @@ -2852,7 +2848,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const un_op = self.air.instructions.items(.data)[inst].un_op; const operand = try self.resolveInst(un_op); try self.ret(operand); - return self.finishAirBookkeeping(); + return self.finishAir(inst, .dead, .{ un_op, .none, .none }); } fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void { diff --git a/src/print_air.zig b/src/print_air.zig index 51f0ce4f49..21288ebff9 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -4,10 +4,11 @@ const fmtIntSizeBin = std.fmt.fmtIntSizeBin; const Module = @import("Module.zig"); const Value = @import("value.zig").Value; +const Zir = @import("Zir.zig"); const Air = @import("Air.zig"); const Liveness = @import("Liveness.zig"); -pub fn dump(gpa: *Allocator, air: Air, liveness: Liveness) void { +pub fn dump(gpa: *Allocator, air: Air, zir: Zir, liveness: Liveness) void { const instruction_bytes = air.instructions.len * // Here we don't use @sizeOf(Air.Inst.Data) because it would include // the debug safety tag but we want to measure release size. @@ -51,11 +52,13 @@ pub fn dump(gpa: *Allocator, air: Air, liveness: Liveness) void { .gpa = gpa, .arena = &arena.allocator, .air = air, + .zir = zir, .liveness = liveness, - .indent = 0, + .indent = 2, }; const stream = std.io.getStdErr().writer(); writer.writeAllConstants(stream) catch return; + stream.writeByte('\n') catch return; writer.writeBody(stream, air.getMainBody()) catch return; } @@ -63,6 +66,7 @@ const Writer = struct { gpa: *Allocator, arena: *Allocator, air: Air, + zir: Zir, liveness: Liveness, indent: usize, @@ -84,13 +88,13 @@ const Writer = struct { fn writeBody(w: *Writer, s: anytype, body: []const Air.Inst.Index) @TypeOf(s).Error!void { for (body) |inst| { try s.writeByteNTimes(' ', w.indent); - try s.print("%{d} ", .{inst}); - try w.writeInst(s, inst); if (w.liveness.isUnused(inst)) { - try s.writeAll(") unused\n"); + try s.print("%{d}!", .{inst}); } else { - try s.writeAll(")\n"); + try s.print("%{d} ", .{inst}); } + try w.writeInst(s, inst); + try s.writeAll(")\n"); } } @@ -176,21 +180,21 @@ const Writer = struct { } fn writeTyStr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { - _ = w; - _ = inst; - try s.writeAll("TODO"); + const ty_str = w.air.instructions.items(.data)[inst].ty_str; + const name = w.zir.nullTerminatedString(ty_str.str); + try s.print("\"{}\", {}", .{ std.zig.fmtEscapes(name), ty_str.ty }); } fn writeBinOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const bin_op = w.air.instructions.items(.data)[inst].bin_op; - try w.writeInstRef(s, bin_op.lhs); + try w.writeOperand(s, inst, 0, bin_op.lhs); try s.writeAll(", "); - try w.writeInstRef(s, bin_op.rhs); + try w.writeOperand(s, inst, 1, bin_op.rhs); } fn writeUnOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const un_op = w.air.instructions.items(.data)[inst].un_op; - try w.writeInstRef(s, un_op); + try w.writeOperand(s, inst, 0, un_op); } fn writeNoOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { @@ -208,7 +212,7 @@ const Writer = struct { fn writeTyOp(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const ty_op = w.air.instructions.items(.data)[inst].ty_op; try s.print("{}, ", .{w.air.getRefType(ty_op.ty)}); - try w.writeInstRef(s, ty_op.operand); + try w.writeOperand(s, inst, 0, ty_op.operand); } fn writeBlock(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { @@ -229,7 +233,7 @@ const Writer = struct { const ty_pl = w.air.instructions.items(.data)[inst].ty_pl; const extra = w.air.extraData(Air.StructField, ty_pl.payload); - try w.writeInstRef(s, extra.data.struct_ptr); + try w.writeOperand(s, inst, 0, extra.data.struct_ptr); try s.print(", {d}", .{extra.data.field_index}); } @@ -259,21 +263,21 @@ const Writer = struct { fn writeCall(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const pl_op = w.air.instructions.items(.data)[inst].pl_op; const extra = w.air.extraData(Air.Call, pl_op.payload); - const args = w.air.extra[extra.end..][0..extra.data.args_len]; - try w.writeInstRef(s, pl_op.operand); + const args = @bitCast([]const Air.Inst.Ref, w.air.extra[extra.end..][0..extra.data.args_len]); + try w.writeOperand(s, inst, 0, pl_op.operand); try s.writeAll(", ["); for (args) |arg, i| { if (i != 0) try s.writeAll(", "); - try w.writeInstRef(s, @intToEnum(Air.Inst.Ref, arg)); + try w.writeOperand(s, inst, 1 + i, arg); } try s.writeAll("]"); } fn writeBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { const br = w.air.instructions.items(.data)[inst].br; - try w.writeInstIndex(s, br.block_inst); + try w.writeInstIndex(s, br.block_inst, false); try s.writeAll(", "); - try w.writeInstRef(s, br.operand); + try w.writeOperand(s, inst, 0, br.operand); } fn writeCondBr(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { @@ -281,16 +285,35 @@ const Writer = struct { const extra = w.air.extraData(Air.CondBr, pl_op.payload); const then_body = w.air.extra[extra.end..][0..extra.data.then_body_len]; const else_body = w.air.extra[extra.end + then_body.len ..][0..extra.data.else_body_len]; + const liveness_condbr = w.liveness.getCondBr(inst); - try w.writeInstRef(s, pl_op.operand); + try w.writeOperand(s, inst, 0, pl_op.operand); try s.writeAll(", {\n"); const old_indent = w.indent; w.indent += 2; + if (liveness_condbr.then_deaths.len != 0) { + try s.writeByteNTimes(' ', w.indent); + for (liveness_condbr.then_deaths) |operand, i| { + if (i != 0) try s.writeAll(" "); + try s.print("%{d}!", .{operand}); + } + try s.writeAll("\n"); + } + try w.writeBody(s, then_body); try s.writeByteNTimes(' ', old_indent); try s.writeAll("}, {\n"); + if (liveness_condbr.else_deaths.len != 0) { + try s.writeByteNTimes(' ', w.indent); + for (liveness_condbr.else_deaths) |operand, i| { + if (i != 0) try s.writeAll(" "); + try s.print("%{d}!", .{operand}); + } + try s.writeAll("\n"); + } + try w.writeBody(s, else_body); w.indent = old_indent; @@ -304,7 +327,7 @@ const Writer = struct { var extra_index: usize = switch_br.end; var case_i: u32 = 0; - try w.writeInstRef(s, pl_op.operand); + try w.writeOperand(s, inst, 0, pl_op.operand); const old_indent = w.indent; w.indent += 2; @@ -317,7 +340,7 @@ const Writer = struct { try s.writeAll(", ["); for (items) |item, item_i| { if (item_i != 0) try s.writeAll(", "); - try w.writeInstRef(s, item); + try w.writeInstRef(s, item, false); } try s.writeAll("] => {\n"); w.indent += 2; @@ -342,19 +365,49 @@ const Writer = struct { try s.writeAll("}"); } - fn writeInstRef(w: *Writer, s: anytype, inst: Air.Inst.Ref) @TypeOf(s).Error!void { - var i: usize = @enumToInt(inst); + fn writeOperand( + w: *Writer, + s: anytype, + inst: Air.Inst.Index, + op_index: usize, + operand: Air.Inst.Ref, + ) @TypeOf(s).Error!void { + const dies = if (op_index < Liveness.bpi - 1) + w.liveness.operandDies(inst, @intCast(Liveness.OperandInt, op_index)) + else blk: { + // TODO + break :blk false; + }; + return w.writeInstRef(s, operand, dies); + } + + fn writeInstRef( + w: *Writer, + s: anytype, + operand: Air.Inst.Ref, + dies: bool, + ) @TypeOf(s).Error!void { + var i: usize = @enumToInt(operand); if (i < Air.Inst.Ref.typed_value_map.len) { - return s.print("@{}", .{inst}); + return s.print("@{}", .{operand}); } i -= Air.Inst.Ref.typed_value_map.len; - return w.writeInstIndex(s, @intCast(Air.Inst.Index, i)); + return w.writeInstIndex(s, @intCast(Air.Inst.Index, i), dies); } - fn writeInstIndex(w: *Writer, s: anytype, inst: Air.Inst.Index) @TypeOf(s).Error!void { + fn writeInstIndex( + w: *Writer, + s: anytype, + inst: Air.Inst.Index, + dies: bool, + ) @TypeOf(s).Error!void { _ = w; - return s.print("%{d}", .{inst}); + if (dies) { + try s.print("%{d}!", .{inst}); + } else { + try s.print("%{d}", .{inst}); + } } }; From 885477e2dfe1176a31dc0a2e8140a37ffb1237cd Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Jul 2021 19:01:19 -0700 Subject: [PATCH 49/53] stage2: disable wasm switch test case for now The wasm codegen for switch was using br_if opcodes, but it needs to be reworked to use a br_table opcode instead. --- test/stage2/wasm.zig | 114 ++++++++++++++++++++++--------------------- 1 file changed, 58 insertions(+), 56 deletions(-) diff --git a/test/stage2/wasm.zig b/test/stage2/wasm.zig index f746be99d2..a43c4e5de3 100644 --- a/test/stage2/wasm.zig +++ b/test/stage2/wasm.zig @@ -479,66 +479,68 @@ pub fn addCases(ctx: *TestContext) !void { , "30\n"); } - { - var case = ctx.exe("wasm switch", wasi); + // This test case is disabled until the codegen for switch is reworked + // to take advantage of br_table rather than a series of br_if opcodes. + //{ + // var case = ctx.exe("wasm switch", wasi); - case.addCompareOutput( - \\pub export fn _start() u32 { - \\ var val: u32 = 1; - \\ var a: u32 = switch (val) { - \\ 0, 1 => 2, - \\ 2 => 3, - \\ 3 => 4, - \\ else => 5, - \\ }; - \\ - \\ return a; - \\} - , "2\n"); + // case.addCompareOutput( + // \\pub export fn _start() u32 { + // \\ var val: u32 = 1; + // \\ var a: u32 = switch (val) { + // \\ 0, 1 => 2, + // \\ 2 => 3, + // \\ 3 => 4, + // \\ else => 5, + // \\ }; + // \\ + // \\ return a; + // \\} + // , "2\n"); - case.addCompareOutput( - \\pub export fn _start() u32 { - \\ var val: u32 = 2; - \\ var a: u32 = switch (val) { - \\ 0, 1 => 2, - \\ 2 => 3, - \\ 3 => 4, - \\ else => 5, - \\ }; - \\ - \\ return a; - \\} - , "3\n"); + // case.addCompareOutput( + // \\pub export fn _start() u32 { + // \\ var val: u32 = 2; + // \\ var a: u32 = switch (val) { + // \\ 0, 1 => 2, + // \\ 2 => 3, + // \\ 3 => 4, + // \\ else => 5, + // \\ }; + // \\ + // \\ return a; + // \\} + // , "3\n"); - case.addCompareOutput( - \\pub export fn _start() u32 { - \\ var val: u32 = 10; - \\ var a: u32 = switch (val) { - \\ 0, 1 => 2, - \\ 2 => 3, - \\ 3 => 4, - \\ else => 5, - \\ }; - \\ - \\ return a; - \\} - , "5\n"); + // case.addCompareOutput( + // \\pub export fn _start() u32 { + // \\ var val: u32 = 10; + // \\ var a: u32 = switch (val) { + // \\ 0, 1 => 2, + // \\ 2 => 3, + // \\ 3 => 4, + // \\ else => 5, + // \\ }; + // \\ + // \\ return a; + // \\} + // , "5\n"); - case.addCompareOutput( - \\const MyEnum = enum { One, Two, Three }; - \\ - \\pub export fn _start() u32 { - \\ var val: MyEnum = .Two; - \\ var a: u32 = switch (val) { - \\ .One => 1, - \\ .Two => 2, - \\ .Three => 3, - \\ }; - \\ - \\ return a; - \\} - , "2\n"); - } + // case.addCompareOutput( + // \\const MyEnum = enum { One, Two, Three }; + // \\ + // \\pub export fn _start() u32 { + // \\ var val: MyEnum = .Two; + // \\ var a: u32 = switch (val) { + // \\ .One => 1, + // \\ .Two => 2, + // \\ .Three => 3, + // \\ }; + // \\ + // \\ return a; + // \\} + // , "2\n"); + //} { var case = ctx.exe("wasm error unions", wasi); From f7ee3b4ca5339791b08d20568270908d5e71e7ea Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Jul 2021 19:03:21 -0700 Subject: [PATCH 50/53] std.Progress: revert to the older strategy This reverts the most recent big changes to `std.Progress` changing the strategy for printing. Before the changes, it would leave the cursor after the progress line, having better behavior when a stray print happened, and supporting sub-process progress without any coordination. After the changes, the cursor was left at the beginning of the line, making any prints print garbage and often interfering with stack traces or other debug information. This commit reverts to before the changes. Revert "std: Use more common escape sequences in Progress" This reverts commit 8ebb18d9da0bfbe6a974636fd36e3391d1de253b. Revert "Handle some weird edge cases of Win32 API" This reverts commit b0724a350f07c5e2e8fab572951ffaaa92860b2c. Revert "Fix many thinkos" This reverts commit b5a50a26ebac6a08dacf79f5d1db9bdd94ba33a5. Revert "Fix Progress printing on Windows systems" This reverts commit 3010bfb08af0b47d801d492e4f2e21a988e8399a. Revert "std: Better handling of line-wrapping in Progress" This reverts commit 4fc2e92876d8aafd087a5f0bdb6ea7a54f195704. --- lib/std/Progress.zig | 127 +++++++++++++++++++------------------------ 1 file changed, 57 insertions(+), 70 deletions(-) diff --git a/lib/std/Progress.zig b/lib/std/Progress.zig index 9afb93348a..ba60f91233 100644 --- a/lib/std/Progress.zig +++ b/lib/std/Progress.zig @@ -63,6 +63,10 @@ done: bool = true, /// while it was still being accessed by the `refresh` function. update_lock: std.Thread.Mutex = .{}, +/// Keeps track of how many columns in the terminal have been output, so that +/// we can move the cursor back later. +columns_written: usize = undefined, + /// Represents one unit of progress. Each node can have children nodes, or /// one can use integers with `update`. pub const Node = struct { @@ -160,6 +164,7 @@ pub fn start(self: *Progress, name: []const u8, estimated_total_items: usize) !* .unprotected_estimated_total_items = estimated_total_items, .unprotected_completed_items = 0, }; + self.columns_written = 0; self.prev_refresh_timestamp = 0; self.timer = try std.time.Timer.start(); self.done = false; @@ -187,15 +192,6 @@ pub fn refresh(self: *Progress) void { return self.refreshWithHeldLock(); } -// ED -- Clear screen -const ED = "\x1b[J"; -// DECSC -- Save cursor position -const DECSC = "\x1b7"; -// DECRC -- Restore cursor position -const DECRC = "\x1b8"; -// Note that ESC7/ESC8 are used instead of CSI s/CSI u as the latter are not -// supported by some terminals (eg. Terminal.app). - fn refreshWithHeldLock(self: *Progress) void { const is_dumb = !self.supports_ansi_escape_codes and !self.is_windows_terminal; if (is_dumb and self.dont_print_on_dumb) return; @@ -203,54 +199,59 @@ fn refreshWithHeldLock(self: *Progress) void { const file = self.terminal orelse return; var end: usize = 0; - // Save the cursor position and clear the part of the screen below. - // Clearing only the line is not enough as the terminal may wrap the line - // when it becomes too long. - var saved_cursor_pos: windows.COORD = undefined; - if (self.supports_ansi_escape_codes) { - const seq_before = DECSC ++ ED; - std.mem.copy(u8, self.output_buffer[end..], seq_before); - end += seq_before.len; - } else if (std.builtin.os.tag == .windows) winapi: { - std.debug.assert(self.is_windows_terminal); + if (self.columns_written > 0) { + // restore the cursor position by moving the cursor + // `columns_written` cells to the left, then clear the rest of the + // line + if (self.supports_ansi_escape_codes) { + end += (std.fmt.bufPrint(self.output_buffer[end..], "\x1b[{d}D", .{self.columns_written}) catch unreachable).len; + end += (std.fmt.bufPrint(self.output_buffer[end..], "\x1b[0K", .{}) catch unreachable).len; + } else if (std.builtin.os.tag == .windows) winapi: { + std.debug.assert(self.is_windows_terminal); - var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; - if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != windows.TRUE) - unreachable; + var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined; + if (windows.kernel32.GetConsoleScreenBufferInfo(file.handle, &info) != windows.TRUE) + unreachable; - saved_cursor_pos = info.dwCursorPosition; + var cursor_pos = windows.COORD{ + .X = info.dwCursorPosition.X - @intCast(windows.SHORT, self.columns_written), + .Y = info.dwCursorPosition.Y, + }; - const window_height = @intCast(windows.DWORD, info.srWindow.Bottom - info.srWindow.Top + 1); - const window_width = @intCast(windows.DWORD, info.srWindow.Right - info.srWindow.Left + 1); - // Number of terminal cells to clear, starting from the cursor position - // and ending at the window bottom right corner. - const fill_chars = if (window_width == 0 or window_height == 0) 0 else chars: { - break :chars window_width * (window_height - - @intCast(windows.DWORD, info.dwCursorPosition.Y - info.srWindow.Top)) - - @intCast(windows.DWORD, info.dwCursorPosition.X - info.srWindow.Left); - }; + if (cursor_pos.X < 0) + cursor_pos.X = 0; - var written: windows.DWORD = undefined; - if (windows.kernel32.FillConsoleOutputAttribute( - file.handle, - info.wAttributes, - fill_chars, - saved_cursor_pos, - &written, - ) != windows.TRUE) { - // Stop trying to write to this file. - self.terminal = null; - break :winapi; - } - if (windows.kernel32.FillConsoleOutputCharacterW( - file.handle, - ' ', - fill_chars, - saved_cursor_pos, - &written, - ) != windows.TRUE) { - unreachable; + const fill_chars = @intCast(windows.DWORD, info.dwSize.X - cursor_pos.X); + + var written: windows.DWORD = undefined; + if (windows.kernel32.FillConsoleOutputAttribute( + file.handle, + info.wAttributes, + fill_chars, + cursor_pos, + &written, + ) != windows.TRUE) { + // Stop trying to write to this file. + self.terminal = null; + break :winapi; + } + if (windows.kernel32.FillConsoleOutputCharacterW( + file.handle, + ' ', + fill_chars, + cursor_pos, + &written, + ) != windows.TRUE) unreachable; + + if (windows.kernel32.SetConsoleCursorPosition(file.handle, cursor_pos) != windows.TRUE) + unreachable; + } else { + // we are in a "dumb" terminal like in acme or writing to a file + self.output_buffer[end] = '\n'; + end += 1; } + + self.columns_written = 0; } if (!self.done) { @@ -285,28 +286,10 @@ fn refreshWithHeldLock(self: *Progress) void { } } - // We're done printing the updated message, restore the cursor position. - if (self.supports_ansi_escape_codes) { - const seq_after = DECRC; - std.mem.copy(u8, self.output_buffer[end..], seq_after); - end += seq_after.len; - } else if (!self.is_windows_terminal) { - self.output_buffer[end] = '\n'; - end += 1; - } - _ = file.write(self.output_buffer[0..end]) catch { // Stop trying to write to this file once it errors. self.terminal = null; }; - - if (std.builtin.os.tag == .windows) { - if (self.is_windows_terminal) { - const res = windows.kernel32.SetConsoleCursorPosition(file.handle, saved_cursor_pos); - std.debug.assert(res == windows.TRUE); - } - } - self.prev_refresh_timestamp = self.timer.read(); } @@ -317,14 +300,17 @@ pub fn log(self: *Progress, comptime format: []const u8, args: anytype) void { self.terminal = null; return; }; + self.columns_written = 0; } fn bufWrite(self: *Progress, end: *usize, comptime format: []const u8, args: anytype) void { if (std.fmt.bufPrint(self.output_buffer[end.*..], format, args)) |written| { const amt = written.len; end.* += amt; + self.columns_written += amt; } else |err| switch (err) { error.NoSpaceLeft => { + self.columns_written += self.output_buffer.len - end.*; end.* = self.output_buffer.len; }, } @@ -332,6 +318,7 @@ fn bufWrite(self: *Progress, end: *usize, comptime format: []const u8, args: any const max_end = self.output_buffer.len - bytes_needed_for_esc_codes_at_end; if (end.* > max_end) { const suffix = "... "; + self.columns_written = self.columns_written - (end.* - max_end) + suffix.len; std.mem.copy(u8, self.output_buffer[max_end..], suffix); end.* = max_end + suffix.len; } From bf09dd87b6bde0c7af6b9415661be07b250afa27 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Jul 2021 19:30:30 -0700 Subject: [PATCH 51/53] codegen: fix lowering of AIR br instruction It incorrectly did not process the death of its operand. --- src/codegen.zig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/codegen.zig b/src/codegen.zig index 84a47a70ac..bf7f167849 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -3404,7 +3404,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn airBr(self: *Self, inst: Air.Inst.Index) !void { const branch = self.air.instructions.items(.data)[inst].br; try self.br(branch.block_inst, branch.operand); - return self.finishAirBookkeeping(); + return self.finishAir(inst, .dead, .{ branch.operand, .none, .none }); } fn airBoolOp(self: *Self, inst: Air.Inst.Index) !void { From d15dd78abd058b13c82156e5e19ec716e860889e Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Jul 2021 19:42:59 -0700 Subject: [PATCH 52/53] Sema: fix regression in merging error sets When updating the code, I accidentally made it look at the fact that the error set operands were a `type` rather than looking at exactly which error set types they were. --- src/Sema.zig | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 826097c3d1..59534cb74a 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -2625,9 +2625,7 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Com const rhs_src: LazySrcLoc = .{ .node_offset_bin_rhs = inst_data.src_node }; const lhs = sema.resolveInst(extra.lhs); const rhs = sema.resolveInst(extra.rhs); - const lhs_ty = sema.typeOf(lhs); - const rhs_ty = sema.typeOf(rhs); - if (rhs_ty.zigTypeTag() == .Bool and lhs_ty.zigTypeTag() == .Bool) { + if (sema.typeOf(lhs).zigTypeTag() == .Bool and sema.typeOf(rhs).zigTypeTag() == .Bool) { const msg = msg: { const msg = try sema.mod.errMsg(&block.base, lhs_src, "expected error set type, found 'bool'", .{}); errdefer msg.destroy(sema.gpa); @@ -2636,10 +2634,12 @@ fn zirMergeErrorSets(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Com }; return sema.mod.failWithOwnedErrorMsg(&block.base, msg); } - if (rhs_ty.zigTypeTag() != .ErrorSet) - return sema.mod.fail(&block.base, rhs_src, "expected error set type, found {}", .{rhs_ty}); + const lhs_ty = try sema.analyzeAsType(block, lhs_src, lhs); + const rhs_ty = try sema.analyzeAsType(block, rhs_src, rhs); if (lhs_ty.zigTypeTag() != .ErrorSet) return sema.mod.fail(&block.base, lhs_src, "expected error set type, found {}", .{lhs_ty}); + if (rhs_ty.zigTypeTag() != .ErrorSet) + return sema.mod.fail(&block.base, rhs_src, "expected error set type, found {}", .{rhs_ty}); // Anything merged with anyerror is anyerror. if (lhs_ty.tag() == .anyerror or rhs_ty.tag() == .anyerror) { From c39c46c0d12b15874b1586ff47cf473b31867918 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 20 Jul 2021 20:05:54 -0700 Subject: [PATCH 53/53] stage2 tests: respect -Dskip-non-native for object formats --- src/test.zig | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/test.zig b/src/test.zig index ef37fd0065..75a504b817 100644 --- a/src/test.zig +++ b/src/test.zig @@ -1,4 +1,5 @@ const std = @import("std"); +const builtin = @import("builtin"); const link = @import("link.zig"); const Compilation = @import("Compilation.zig"); const Allocator = std.mem.Allocator; @@ -610,7 +611,7 @@ pub const TestContext = struct { fn run(self: *TestContext) !void { var progress = std.Progress{}; - const root_node = try progress.start("tests", self.cases.items.len); + const root_node = try progress.start("compiler", self.cases.items.len); defer root_node.end(); var zig_lib_directory = try introspect.findZigLibDir(std.testing.allocator); @@ -640,8 +641,12 @@ pub const TestContext = struct { var fail_count: usize = 0; for (self.cases.items) |case| { - if (build_options.skip_non_native and case.target.getCpuArch() != std.Target.current.cpu.arch) - continue; + if (build_options.skip_non_native) { + if (case.target.getCpuArch() != builtin.cpu.arch) + continue; + if (case.target.getObjectFormat() != builtin.object_format) + continue; + } // Skip tests that require LLVM backend when it is not available if (!build_options.have_llvm and case.backend == .llvm)