diff --git a/BRANCH_TODO b/BRANCH_TODO index be3959e035..585c8adf44 100644 --- a/BRANCH_TODO +++ b/BRANCH_TODO @@ -568,3 +568,125 @@ const DumpAir = struct { } } }; + +pub fn constInst(mod: *Module, arena: *Allocator, src: LazySrcLoc, typed_value: TypedValue) !*ir.Inst { + _ = mod; + const const_inst = try arena.create(ir.Inst.Constant); + const_inst.* = .{ + .base = .{ + .tag = ir.Inst.Constant.base_tag, + .ty = typed_value.ty, + .src = src, + }, + .val = typed_value.val, + }; + return &const_inst.base; +} + +pub fn constType(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = Type.initTag(.type), + .val = try ty.toValue(arena), + }); +} + +pub fn constVoid(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = Type.initTag(.void), + .val = Value.initTag(.void_value), + }); +} + +pub fn constNoReturn(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = Type.initTag(.noreturn), + .val = Value.initTag(.unreachable_value), + }); +} + +pub fn constUndef(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = ty, + .val = Value.initTag(.undef), + }); +} + +pub fn constBool(mod: *Module, arena: *Allocator, src: LazySrcLoc, v: bool) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = Type.initTag(.bool), + .val = ([2]Value{ Value.initTag(.bool_false), Value.initTag(.bool_true) })[@boolToInt(v)], + }); +} + +pub fn constIntUnsigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: u64) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = ty, + .val = try Value.Tag.int_u64.create(arena, int), + }); +} + +pub fn constIntSigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: i64) !*ir.Inst { + return mod.constInst(arena, src, .{ + .ty = ty, + .val = try Value.Tag.int_i64.create(arena, int), + }); +} + +pub fn constIntBig(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, big_int: BigIntConst) !*ir.Inst { + if (big_int.positive) { + if (big_int.to(u64)) |x| { + return mod.constIntUnsigned(arena, src, ty, x); + } else |err| switch (err) { + error.NegativeIntoUnsigned => unreachable, + error.TargetTooSmall => {}, // handled below + } + return mod.constInst(arena, src, .{ + .ty = ty, + .val = try Value.Tag.int_big_positive.create(arena, big_int.limbs), + }); + } else { + if (big_int.to(i64)) |x| { + return mod.constIntSigned(arena, src, ty, x); + } else |err| switch (err) { + error.NegativeIntoUnsigned => unreachable, + error.TargetTooSmall => {}, // handled below + } + return mod.constInst(arena, src, .{ + .ty = ty, + .val = try Value.Tag.int_big_negative.create(arena, big_int.limbs), + }); + } +} + +pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { + const zir_module = scope.namespace(); + const source = zir_module.getSource(mod) catch @panic("dumpInst failed to get source"); + const loc = std.zig.findLineColumn(source, inst.src); + if (inst.tag == .constant) { + std.debug.print("constant ty={} val={} src={s}:{d}:{d}\n", .{ + inst.ty, + inst.castTag(.constant).?.val, + zir_module.subFilePath(), + loc.line + 1, + loc.column + 1, + }); + } else if (inst.deaths == 0) { + std.debug.print("{s} ty={} src={s}:{d}:{d}\n", .{ + @tagName(inst.tag), + inst.ty, + zir_module.subFilePath(), + loc.line + 1, + loc.column + 1, + }); + } else { + std.debug.print("{s} ty={} deaths={b} src={s}:{d}:{d}\n", .{ + @tagName(inst.tag), + inst.ty, + inst.deaths, + zir_module.subFilePath(), + loc.line + 1, + loc.column + 1, + }); + } +} + diff --git a/src/Air.zig b/src/Air.zig index 112845559d..e85f2e5c43 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -29,8 +29,11 @@ pub const Inst = struct { data: Data, pub const Tag = enum(u8) { - /// The first N instructions in Air must be one arg instruction per function parameter. - /// Uses the `ty` field. + /// The first N instructions in the main block must be one arg instruction per + /// function parameter. This makes function parameters participate in + /// liveness analysis without any special handling. + /// Uses the `ty_str` field. + /// The string is the parameter name. arg, /// Float or integer addition. For integers, wrapping is undefined behavior. /// Both operands are guaranteed to be the same type, and the result type @@ -131,6 +134,8 @@ pub const Inst = struct { /// A comptime-known value. Uses the `ty_pl` field, payload is index of /// `values` array. constant, + /// A comptime-known type. Uses the `ty` field. + const_ty, /// Notes the beginning of a source code statement and marks the line and column. /// Result type is always void. /// Uses the `dbg_stmt` field. @@ -289,6 +294,11 @@ pub const Inst = struct { // Index into a different array. payload: u32, }, + ty_str: struct { + ty: Ref, + // ZIR string table index. + str: u32, + }, br: struct { block_inst: Index, operand: Ref, diff --git a/src/AstGen.zig b/src/AstGen.zig index 19906c94d3..24766aaf60 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -9821,7 +9821,7 @@ fn advanceSourceCursor(astgen: *AstGen, source: []const u8, end: usize) void { astgen.source_column = column; } -const ref_start_index = Zir.Inst.Ref.typed_value_map.len; +const ref_start_index: u32 = Zir.Inst.Ref.typed_value_map.len; fn indexToRef(inst: Zir.Inst.Index) Zir.Inst.Ref { return @intToEnum(Zir.Inst.Ref, ref_start_index + inst); diff --git a/src/Module.zig b/src/Module.zig index 6273243ee2..8971a57487 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -1155,7 +1155,7 @@ pub const Scope = struct { /// This can vary during inline or comptime function calls. See `Sema.owner_decl` /// for the one that will be the same for all Block instances. src_decl: *Decl, - instructions: ArrayListUnmanaged(*ir.Inst), + instructions: ArrayListUnmanaged(Air.Inst.Index), label: ?*Label = null, inlining: ?*Inlining, /// If runtime_index is not 0 then one of these is guaranteed to be non null. @@ -1187,14 +1187,14 @@ pub const Scope = struct { }; pub const Merges = struct { - block_inst: *ir.Inst.Block, + block_inst: Air.Inst.Index, /// Separate array list from break_inst_list so that it can be passed directly /// to resolvePeerTypes. - results: ArrayListUnmanaged(*ir.Inst), + results: ArrayListUnmanaged(Air.Inst.Index), /// Keeps track of the break instructions so that the operand can be replaced /// if we need to add type coercion at the end of block analysis. /// Same indexes, capacity, length as `results`. - br_list: ArrayListUnmanaged(*ir.Inst.Br), + br_list: ArrayListUnmanaged(Air.Inst.Index), }; /// For debugging purposes. @@ -1230,187 +1230,6 @@ pub const Scope = struct { pub fn getFileScope(block: *Block) *Scope.File { return block.src_decl.namespace.file_scope; } - - pub fn addNoOp( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - comptime tag: ir.Inst.Tag, - ) !*ir.Inst { - const inst = try block.sema.arena.create(tag.Type()); - inst.* = .{ - .base = .{ - .tag = tag, - .ty = ty, - .src = src, - }, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addUnOp( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - tag: ir.Inst.Tag, - operand: *ir.Inst, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.UnOp); - inst.* = .{ - .base = .{ - .tag = tag, - .ty = ty, - .src = src, - }, - .operand = operand, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addBinOp( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - tag: ir.Inst.Tag, - lhs: *ir.Inst, - rhs: *ir.Inst, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.BinOp); - inst.* = .{ - .base = .{ - .tag = tag, - .ty = ty, - .src = src, - }, - .lhs = lhs, - .rhs = rhs, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addBr( - scope_block: *Scope.Block, - src: LazySrcLoc, - target_block: *ir.Inst.Block, - operand: *ir.Inst, - ) !*ir.Inst.Br { - const inst = try scope_block.sema.arena.create(ir.Inst.Br); - inst.* = .{ - .base = .{ - .tag = .br, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .operand = operand, - .block = target_block, - }; - try scope_block.instructions.append(scope_block.sema.gpa, &inst.base); - return inst; - } - - pub fn addCondBr( - block: *Scope.Block, - src: LazySrcLoc, - condition: *ir.Inst, - then_body: ir.Body, - else_body: ir.Body, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.CondBr); - inst.* = .{ - .base = .{ - .tag = .condbr, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .condition = condition, - .then_body = then_body, - .else_body = else_body, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addCall( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - func: *ir.Inst, - args: []const *ir.Inst, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.Call); - inst.* = .{ - .base = .{ - .tag = .call, - .ty = ty, - .src = src, - }, - .func = func, - .args = args, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addSwitchBr( - block: *Scope.Block, - src: LazySrcLoc, - operand: *ir.Inst, - cases: []ir.Inst.SwitchBr.Case, - else_body: ir.Body, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.SwitchBr); - inst.* = .{ - .base = .{ - .tag = .switchbr, - .ty = Type.initTag(.noreturn), - .src = src, - }, - .target = operand, - .cases = cases, - .else_body = else_body, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addDbgStmt(block: *Scope.Block, src: LazySrcLoc, line: u32, column: u32) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.DbgStmt); - inst.* = .{ - .base = .{ - .tag = .dbg_stmt, - .ty = Type.initTag(.void), - .src = src, - }, - .line = line, - .column = column, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } - - pub fn addStructFieldPtr( - block: *Scope.Block, - src: LazySrcLoc, - ty: Type, - struct_ptr: *ir.Inst, - field_index: u32, - ) !*ir.Inst { - const inst = try block.sema.arena.create(ir.Inst.StructFieldPtr); - inst.* = .{ - .base = .{ - .tag = .struct_field_ptr, - .ty = ty, - .src = src, - }, - .struct_ptr = struct_ptr, - .field_index = field_index, - }; - try block.instructions.append(block.sema.gpa, &inst.base); - return &inst.base; - } }; }; @@ -3594,30 +3413,14 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { defer decl.value_arena.?.* = arena.state; const fn_ty = decl.ty; - const param_inst_list = try gpa.alloc(*ir.Inst, fn_ty.fnParamLen()); + const param_inst_list = try gpa.alloc(Air.Inst.Index, fn_ty.fnParamLen()); defer gpa.free(param_inst_list); - for (param_inst_list) |*param_inst, param_index| { - const param_type = fn_ty.fnParamType(param_index); - const arg_inst = try arena.allocator.create(ir.Inst.Arg); - arg_inst.* = .{ - .base = .{ - .tag = .arg, - .ty = param_type, - .src = .unneeded, - }, - .name = undefined, // Set in the semantic analysis of the arg instruction. - }; - param_inst.* = &arg_inst.base; - } - - const zir = decl.namespace.file_scope.zir; - var sema: Sema = .{ .mod = mod, .gpa = gpa, .arena = &arena.allocator, - .code = zir, + .code = decl.namespace.file_scope.zir, .owner_decl = decl, .namespace = decl.namespace, .func = func, @@ -3641,7 +3444,21 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { }; defer inner_block.instructions.deinit(gpa); - // AIR currently requires the arg parameters to be the first N instructions + // AIR requires the arg parameters to be the first N instructions. + for (param_inst_list) |*param_inst, param_index| { + const param_type = fn_ty.fnParamType(param_index); + const ty_ref = try sema.addType(param_type); + param_inst.* = @intCast(u32, sema.air_instructions.len); + try sema.air_instructions.append(gpa, .{ + .tag = .arg, + .data = .{ + .ty_str = .{ + .ty = ty_ref, + .str = undefined, // Set in the semantic analysis of the arg instruction. + }, + }, + }); + } try inner_block.instructions.appendSlice(gpa, param_inst_list); func.state = .in_progress; @@ -3650,17 +3467,21 @@ pub fn analyzeFnBody(mod: *Module, decl: *Decl, func: *Fn) !Air { try sema.analyzeFnBody(&inner_block, func.zir_body_inst); // Copy the block into place and mark that as the main block. - sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = sema.air_extra.items.len; - try sema.air_extra.appendSlice(inner_block.instructions.items); + try sema.air_extra.ensureUnusedCapacity(gpa, inner_block.instructions.items.len + 1); + const main_block_index = sema.addExtraAssumeCapacity(Air.Block{ + .body_len = @intCast(u32, inner_block.instructions.items.len), + }); + sema.air_extra.appendSliceAssumeCapacity(inner_block.instructions.items); + sema.air_extra.items[@enumToInt(Air.ExtraIndex.main_block)] = main_block_index; func.state = .success; log.debug("set {s} to success", .{decl.name}); return Air{ .instructions = sema.air_instructions.toOwnedSlice(), - .extra = sema.air_extra.toOwnedSlice(), - .values = sema.air_values.toOwnedSlice(), - .variables = sema.air_variables.toOwnedSlice(), + .extra = sema.air_extra.toOwnedSlice(gpa), + .values = sema.air_values.toOwnedSlice(gpa), + .variables = sema.air_variables.toOwnedSlice(gpa), }; } @@ -3815,94 +3636,6 @@ pub fn analyzeExport( de_gop.value_ptr.*[de_gop.value_ptr.len - 1] = new_export; errdefer de_gop.value_ptr.* = mod.gpa.shrink(de_gop.value_ptr.*, de_gop.value_ptr.len - 1); } -pub fn constInst(mod: *Module, arena: *Allocator, src: LazySrcLoc, typed_value: TypedValue) !*ir.Inst { - _ = mod; - const const_inst = try arena.create(ir.Inst.Constant); - const_inst.* = .{ - .base = .{ - .tag = ir.Inst.Constant.base_tag, - .ty = typed_value.ty, - .src = src, - }, - .val = typed_value.val, - }; - return &const_inst.base; -} - -pub fn constType(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.type), - .val = try ty.toValue(arena), - }); -} - -pub fn constVoid(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.void), - .val = Value.initTag(.void_value), - }); -} - -pub fn constNoReturn(mod: *Module, arena: *Allocator, src: LazySrcLoc) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.noreturn), - .val = Value.initTag(.unreachable_value), - }); -} - -pub fn constUndef(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = Value.initTag(.undef), - }); -} - -pub fn constBool(mod: *Module, arena: *Allocator, src: LazySrcLoc, v: bool) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = Type.initTag(.bool), - .val = ([2]Value{ Value.initTag(.bool_false), Value.initTag(.bool_true) })[@boolToInt(v)], - }); -} - -pub fn constIntUnsigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: u64) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_u64.create(arena, int), - }); -} - -pub fn constIntSigned(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, int: i64) !*ir.Inst { - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_i64.create(arena, int), - }); -} - -pub fn constIntBig(mod: *Module, arena: *Allocator, src: LazySrcLoc, ty: Type, big_int: BigIntConst) !*ir.Inst { - if (big_int.positive) { - if (big_int.to(u64)) |x| { - return mod.constIntUnsigned(arena, src, ty, x); - } else |err| switch (err) { - error.NegativeIntoUnsigned => unreachable, - error.TargetTooSmall => {}, // handled below - } - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_big_positive.create(arena, big_int.limbs), - }); - } else { - if (big_int.to(i64)) |x| { - return mod.constIntSigned(arena, src, ty, x); - } else |err| switch (err) { - error.NegativeIntoUnsigned => unreachable, - error.TargetTooSmall => {}, // handled below - } - return mod.constInst(arena, src, .{ - .ty = ty, - .val = try Value.Tag.int_big_negative.create(arena, big_int.limbs), - }); - } -} pub fn deleteAnonDecl(mod: *Module, scope: *Scope, decl: *Decl) void { const scope_decl = scope.ownerDecl().?; @@ -4438,38 +4171,6 @@ pub fn errorUnionType( }); } -pub fn dumpInst(mod: *Module, scope: *Scope, inst: *ir.Inst) void { - const zir_module = scope.namespace(); - const source = zir_module.getSource(mod) catch @panic("dumpInst failed to get source"); - const loc = std.zig.findLineColumn(source, inst.src); - if (inst.tag == .constant) { - std.debug.print("constant ty={} val={} src={s}:{d}:{d}\n", .{ - inst.ty, - inst.castTag(.constant).?.val, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } else if (inst.deaths == 0) { - std.debug.print("{s} ty={} src={s}:{d}:{d}\n", .{ - @tagName(inst.tag), - inst.ty, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } else { - std.debug.print("{s} ty={} deaths={b} src={s}:{d}:{d}\n", .{ - @tagName(inst.tag), - inst.ty, - inst.deaths, - zir_module.subFilePath(), - loc.line + 1, - loc.column + 1, - }); - } -} - pub fn getTarget(mod: Module) Target { return mod.comp.bin_file.options.target; } diff --git a/src/Sema.zig b/src/Sema.zig index b4e10837af..d7ec01696f 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -12,9 +12,9 @@ gpa: *Allocator, arena: *Allocator, code: Zir, air_instructions: std.MultiArrayList(Air.Inst) = .{}, -air_extra: ArrayListUnmanaged(u32) = .{}, -air_values: ArrayListUnmanaged(Value) = .{}, -air_variables: ArrayListUnmanaged(Module.Var) = .{}, +air_extra: std.ArrayListUnmanaged(u32) = .{}, +air_values: std.ArrayListUnmanaged(Value) = .{}, +air_variables: std.ArrayListUnmanaged(*Module.Var) = .{}, /// Maps ZIR to AIR. inst_map: InstMap = .{}, /// When analyzing an inline function call, owner_decl is the Decl of the caller @@ -1263,15 +1263,16 @@ fn zirArg(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) InnerError!Air sema.next_arg_index += 1; // TODO check if arg_name shadows a Decl + _ = arg_name; if (block.inlining) |_| { return sema.param_inst_list[arg_index]; } - // Need to set the name of the Air.Arg instruction. - const air_arg = sema.param_inst_list[arg_index].castTag(.arg).?; - air_arg.name = arg_name; - return &air_arg.base; + // Set the name of the Air.Arg instruction for use by codegen debug info. + const air_arg = sema.param_inst_list[arg_index]; + sema.air.instructions.items(.data)[air_arg].ty_str.str = inst_data.start; + return air_arg; } fn zirAllocExtended( @@ -7940,3 +7941,102 @@ fn enumFieldSrcLoc( } } else unreachable; } + +pub fn addType(sema: *Sema, ty: Type) !Air.Inst.Ref { + switch (ty.tag()) { + .u8 => return .u8_type, + .i8 => return .i8_type, + .u16 => return .u16_type, + .i16 => return .i16_type, + .u32 => return .u32_type, + .i32 => return .i32_type, + .u64 => return .u64_type, + .i64 => return .i64_type, + .u128 => return .u128_type, + .i128 => return .i128_type, + .usize => return .usize_type, + .isize => return .isize_type, + .c_short => return .c_short_type, + .c_ushort => return .c_ushort_type, + .c_int => return .c_int_type, + .c_uint => return .c_uint_type, + .c_long => return .c_long_type, + .c_ulong => return .c_ulong_type, + .c_longlong => return .c_longlong_type, + .c_ulonglong => return .c_ulonglong_type, + .c_longdouble => return .c_longdouble_type, + .f16 => return .f16_type, + .f32 => return .f32_type, + .f64 => return .f64_type, + .f128 => return .f128_type, + .c_void => return .c_void_type, + .bool => return .bool_type, + .void => return .void_type, + .type => return .type_type, + .anyerror => return .anyerror_type, + .comptime_int => return .comptime_int_type, + .comptime_float => return .comptime_float_type, + .noreturn => return .noreturn_type, + .@"anyframe" => return .anyframe_type, + .@"null" => return .null_type, + .@"undefined" => return .undefined_type, + .enum_literal => return .enum_literal_type, + .atomic_ordering => return .atomic_ordering_type, + .atomic_rmw_op => return .atomic_rmw_op_type, + .calling_convention => return .calling_convention_type, + .float_mode => return .float_mode_type, + .reduce_op => return .reduce_op_type, + .call_options => return .call_options_type, + .export_options => return .export_options_type, + .extern_options => return .extern_options_type, + .manyptr_u8 => return .manyptr_u8_type, + .manyptr_const_u8 => return .manyptr_const_u8_type, + .fn_noreturn_no_args => return .fn_noreturn_no_args_type, + .fn_void_no_args => return .fn_void_no_args_type, + .fn_naked_noreturn_no_args => return .fn_naked_noreturn_no_args_type, + .fn_ccc_void_no_args => return .fn_ccc_void_no_args_type, + .single_const_pointer_to_comptime_int => return .single_const_pointer_to_comptime_int_type, + .const_slice_u8 => return .const_slice_u8_type, + else => {}, + } + try sema.air_instructions.append(sema.gpa, .{ + .tag = .const_ty, + .data = .{ .ty = ty }, + }); + return indexToRef(@intCast(u32, sema.air_instructions.len - 1)); +} + +const ref_start_index: u32 = Air.Inst.Ref.typed_value_map.len; + +fn indexToRef(inst: Air.Inst.Index) Air.Inst.Ref { + return @intToEnum(Air.Inst.Ref, ref_start_index + inst); +} + +fn refToIndex(inst: Air.Inst.Ref) ?Air.Inst.Index { + const ref_int = @enumToInt(inst); + if (ref_int >= ref_start_index) { + return ref_int - ref_start_index; + } else { + return null; + } +} + +pub fn addExtra(sema: *Sema, extra: anytype) Allocator.Error!u32 { + const fields = std.meta.fields(@TypeOf(extra)); + try sema.air_extra.ensureUnusedCapacity(sema.gpa, fields.len); + return addExtraAssumeCapacity(sema, extra); +} + +pub fn addExtraAssumeCapacity(sema: *Sema, extra: anytype) u32 { + const fields = std.meta.fields(@TypeOf(extra)); + const result = @intCast(u32, sema.air_extra.items.len); + inline for (fields) |field| { + sema.air_extra.appendAssumeCapacity(switch (field.field_type) { + u32 => @field(extra, field.name), + Air.Inst.Ref => @enumToInt(@field(extra, field.name)), + i32 => @bitCast(u32, @field(extra, field.name)), + else => @compileError("bad field type"), + }); + } + return result; +} diff --git a/src/codegen.zig b/src/codegen.zig index 65e85702e5..eaf910977e 100644 --- a/src/codegen.zig +++ b/src/codegen.zig @@ -3,6 +3,7 @@ const mem = std.mem; const math = std.math; const assert = std.debug.assert; const Air = @import("Air.zig"); +const Liveness = @import("Liveness.zig"); const Type = @import("type.zig").Type; const Value = @import("value.zig").Value; const TypedValue = @import("TypedValue.zig"); @@ -45,6 +46,71 @@ pub const DebugInfoOutput = union(enum) { none, }; +pub fn generateFunction( + bin_file: *link.File, + src_loc: Module.SrcLoc, + func: *Module.Fn, + air: Air, + liveness: Liveness, + code: *std.ArrayList(u8), + debug_output: DebugInfoOutput, +) GenerateSymbolError!Result { + switch (bin_file.options.target.cpu.arch) { + .wasm32 => unreachable, // has its own code path + .wasm64 => unreachable, // has its own code path + .arm => return Function(.arm).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .armeb => return Function(.armeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .aarch64 => return Function(.aarch64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .aarch64_be => return Function(.aarch64_be).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .aarch64_32 => return Function(.aarch64_32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.arc => return Function(.arc).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.avr => return Function(.avr).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.bpfel => return Function(.bpfel).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.bpfeb => return Function(.bpfeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.hexagon => return Function(.hexagon).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.mips => return Function(.mips).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.mipsel => return Function(.mipsel).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.mips64 => return Function(.mips64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.mips64el => return Function(.mips64el).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.msp430 => return Function(.msp430).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.powerpc => return Function(.powerpc).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.powerpc64 => return Function(.powerpc64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.powerpc64le => return Function(.powerpc64le).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.r600 => return Function(.r600).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.amdgcn => return Function(.amdgcn).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.riscv32 => return Function(.riscv32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .riscv64 => return Function(.riscv64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.sparc => return Function(.sparc).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.sparcv9 => return Function(.sparcv9).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.sparcel => return Function(.sparcel).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.s390x => return Function(.s390x).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.tce => return Function(.tce).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.tcele => return Function(.tcele).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.thumb => return Function(.thumb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.thumbeb => return Function(.thumbeb).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.i386 => return Function(.i386).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + .x86_64 => return Function(.x86_64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.xcore => return Function(.xcore).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.nvptx => return Function(.nvptx).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.nvptx64 => return Function(.nvptx64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.le32 => return Function(.le32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.le64 => return Function(.le64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.amdil => return Function(.amdil).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.amdil64 => return Function(.amdil64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.hsail => return Function(.hsail).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.hsail64 => return Function(.hsail64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.spir => return Function(.spir).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.spir64 => return Function(.spir64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.kalimba => return Function(.kalimba).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.shave => return Function(.shave).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.lanai => return Function(.lanai).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.renderscript32 => return Function(.renderscript32).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.renderscript64 => return Function(.renderscript64).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + //.ve => return Function(.ve).generate(bin_file, src_loc, func, air, liveness, code, debug_output), + else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."), + } +} + pub fn generateSymbol( bin_file: *link.File, src_loc: Module.SrcLoc, @@ -57,60 +123,14 @@ pub fn generateSymbol( switch (typed_value.ty.zigTypeTag()) { .Fn => { - switch (bin_file.options.target.cpu.arch) { - .wasm32 => unreachable, // has its own code path - .wasm64 => unreachable, // has its own code path - .arm => return Function(.arm).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .armeb => return Function(.armeb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .aarch64 => return Function(.aarch64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.arc => return Function(.arc).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.avr => return Function(.avr).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.bpfel => return Function(.bpfel).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.hexagon => return Function(.hexagon).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.mips => return Function(.mips).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.mipsel => return Function(.mipsel).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.mips64 => return Function(.mips64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.mips64el => return Function(.mips64el).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.msp430 => return Function(.msp430).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.powerpc => return Function(.powerpc).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.r600 => return Function(.r600).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .riscv64 => return Function(.riscv64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.sparc => return Function(.sparc).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.sparcel => return Function(.sparcel).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.s390x => return Function(.s390x).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.tce => return Function(.tce).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.tcele => return Function(.tcele).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.thumb => return Function(.thumb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.i386 => return Function(.i386).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - .x86_64 => return Function(.x86_64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.xcore => return Function(.xcore).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.nvptx => return Function(.nvptx).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.le32 => return Function(.le32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.le64 => return Function(.le64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.amdil => return Function(.amdil).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.hsail => return Function(.hsail).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.spir => return Function(.spir).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.spir64 => return Function(.spir64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.kalimba => return Function(.kalimba).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.shave => return Function(.shave).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.lanai => return Function(.lanai).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - //.ve => return Function(.ve).generateSymbol(bin_file, src_loc, typed_value, code, debug_output), - else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."), - } + return Result{ + .fail = try ErrorMsg.create( + bin_file.allocator, + src_loc, + "TODO implement generateSymbol function pointers", + .{}, + ), + }; }, .Array => { // TODO populate .debug_info for the array @@ -262,6 +282,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { return struct { gpa: *Allocator, + air: *const Air, bin_file: *link.File, target: *const std.Target, mod_fn: *const Module.Fn, @@ -421,10 +442,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { const Self = @This(); - fn generateSymbol( + fn generate( bin_file: *link.File, src_loc: Module.SrcLoc, - typed_value: TypedValue, + module_fn: *Module.Fn, + air: Air, + liveness: Liveness, code: *std.ArrayList(u8), debug_output: DebugInfoOutput, ) GenerateSymbolError!Result { @@ -432,8 +455,6 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { @panic("Attempted to compile for architecture that was disabled by build configuration"); } - const module_fn = typed_value.val.castTag(.function).?.data; - assert(module_fn.owner_decl.has_tv); const fn_type = module_fn.owner_decl.ty; @@ -447,6 +468,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { var function = Self{ .gpa = bin_file.allocator, + .air = &air, + .liveness = &liveness, .target = &bin_file.options.target, .bin_file = bin_file, .mod_fn = module_fn, @@ -2131,8 +2154,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genArgDbgInfo(self: *Self, inst: Air.Inst.Index, mcv: MCValue) !void { - const name_with_null = inst.name[0 .. mem.lenZ(inst.name) + 1]; - const ty = self.air.getType(inst); + const ty_str = self.air.instruction.items(.data)[inst].ty_str; + const zir = &self.mod_fn.owner_decl.namespace.file_scope.zir; + const name = zir.nullTerminatedString(ty_str.str); + const name_with_null = name.ptr[0 .. name.len + 1]; + const ty = self.air.getRefType(ty_str.ty); switch (mcv) { .register => |reg| { @@ -2249,8 +2275,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genCall(self: *Self, inst: Air.Inst.Index) !MCValue { - const inst_datas = self.air.instructions.items(.data); - const pl_op = inst_datas[inst].pl_op; + const pl_op = self.air.instruction.items(.data)[inst].pl_op; const fn_ty = self.air.getType(pl_op.operand); const callee = pl_op.operand; const extra = self.air.extraData(Air.Call, inst_data.payload); @@ -2848,8 +2873,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genCondBr(self: *Self, inst: Air.Inst.Index) !MCValue { - const inst_datas = self.air.instructions.items(.data); - const pl_op = inst_datas[inst].pl_op; + const pl_op = self.air.instructions.items(.data)[inst].pl_op; const cond = try self.resolveInst(pl_op.operand); const extra = self.air.extraData(Air.CondBr, inst_data.payload); const then_body = self.air.extra[extra.end..][0..extra.data.then_body_len]; @@ -3101,16 +3125,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genIsNull(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); return self.isNull(operand); } fn genIsNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3126,16 +3150,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genIsNonNull(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); return self.isNonNull(operand); } fn genIsNonNullPtr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3151,16 +3175,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genIsErr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); return self.isErr(operand); } fn genIsErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3176,16 +3200,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genIsNonErr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand = try self.resolveInst(un_op); return self.isNonErr(operand); } fn genIsNonErrPtr(self: *Self, inst: Air.Inst.Index) !MCValue { if (self.liveness.isUnused(inst)) return MCValue.dead; - const inst_datas = self.air.instructions.items(.data); - const operand_ptr = try self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + const operand_ptr = try self.resolveInst(un_op); const operand: MCValue = blk: { if (self.reuseOperand(inst, 0, operand_ptr)) { // The MCValue that holds the pointer can be re-used as the value. @@ -3200,8 +3224,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { fn genLoop(self: *Self, inst: Air.Inst.Index) !MCValue { // A loop is a setup to be able to jump back to the beginning. - const inst_datas = self.air.instructions.items(.data); - const loop = self.air.extraData(Air.Block, inst_datas[inst].ty_pl.payload); + const ty_pl = self.air.instructions.items(.data)[inst].ty_pl; + const loop = self.air.extraData(Air.Block, ty_pl.payload); const body = self.air.extra[loop.end..][0..loop.data.body_len]; const start_index = self.code.items.len; try self.genBody(body); @@ -4377,13 +4401,13 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type { } fn genPtrToInt(self: *Self, inst: Air.Inst.Index) !MCValue { - const inst_datas = self.air.instructions.items(.data); - return self.resolveInst(inst_datas[inst].un_op); + const un_op = self.air.instructions.items(.data)[inst].un_op; + return self.resolveInst(un_op); } fn genBitCast(self: *Self, inst: Air.Inst.Index) !MCValue { - const inst_datas = self.air.instructions.items(.data); - return self.resolveInst(inst_datas[inst].ty_op.operand); + const ty_op = self.air.instructions.items(.data)[inst].ty_op; + return self.resolveInst(ty_op.operand); } fn resolveInst(self: *Self, inst: Air.Inst.Index) !MCValue { diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 4a9087d7f5..3d704a8dc5 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -159,7 +159,10 @@ pub const DeclGen = struct { /// The SPIR-V module code should be put in. spv: *SPIRVModule, - /// An array of function argument result-ids. Each index corresponds with the function argument of the same index. + air: *const Air, + + /// An array of function argument result-ids. Each index corresponds with the + /// function argument of the same index. args: std.ArrayList(ResultId), /// A counter to keep track of how many `arg` instructions we've seen yet. @@ -168,33 +171,35 @@ pub const DeclGen = struct { /// A map keeping track of which instruction generated which result-id. inst_results: InstMap, - /// We need to keep track of result ids for block labels, as well as the 'incoming' blocks for a block. + /// We need to keep track of result ids for block labels, as well as the 'incoming' + /// blocks for a block. blocks: BlockMap, /// The label of the SPIR-V block we are currently generating. current_block_label_id: ResultId, - /// The actual instructions for this function. We need to declare all locals in the first block, and because we don't - /// know which locals there are going to be, we're just going to generate everything after the locals-section in this array. - /// Note: It will not contain OpFunction, OpFunctionParameter, OpVariable and the initial OpLabel. These will be generated - /// into spv.binary.fn_decls directly. + /// The actual instructions for this function. We need to declare all locals in + /// the first block, and because we don't know which locals there are going to be, + /// we're just going to generate everything after the locals-section in this array. + /// Note: It will not contain OpFunction, OpFunctionParameter, OpVariable and the + /// initial OpLabel. These will be generated into spv.binary.fn_decls directly. code: std.ArrayList(Word), /// The decl we are currently generating code for. decl: *Decl, - /// If `gen` returned `Error.AnalysisFail`, this contains an explanatory message. Memory is owned by - /// `module.gpa`. + /// If `gen` returned `Error.AnalysisFail`, this contains an explanatory message. + /// Memory is owned by `module.gpa`. error_msg: ?*Module.ErrorMsg, /// Possible errors the `gen` function may return. const Error = error{ AnalysisFail, OutOfMemory }; - /// This structure is used to return information about a type typically used for arithmetic operations. - /// These types may either be integers, floats, or a vector of these. Most scalar operations also work on vectors, - /// so we can easily represent those as arithmetic types. - /// If the type is a scalar, 'inner type' refers to the scalar type. Otherwise, if its a vector, it refers - /// to the vector's element type. + /// This structure is used to return information about a type typically used for + /// arithmetic operations. These types may either be integers, floats, or a vector + /// of these. Most scalar operations also work on vectors, so we can easily represent + /// those as arithmetic types. If the type is a scalar, 'inner type' refers to the + /// scalar type. Otherwise, if its a vector, it refers to the vector's element type. const ArithmeticTypeInfo = struct { /// A classification of the inner type. const Class = enum { @@ -206,13 +211,14 @@ pub const DeclGen = struct { /// the relevant capability is enabled). integer, - /// A regular float. These are all required to be natively supported. Floating points for - /// which the relevant capability is not enabled are not emulated. + /// A regular float. These are all required to be natively supported. Floating points + /// for which the relevant capability is not enabled are not emulated. float, - /// An integer of a 'strange' size (which' bit size is not the same as its backing type. **Note**: this - /// may **also** include power-of-2 integers for which the relevant capability is not enabled), but still - /// within the limits of the largest natively supported integer type. + /// An integer of a 'strange' size (which' bit size is not the same as its backing + /// type. **Note**: this may **also** include power-of-2 integers for which the + /// relevant capability is not enabled), but still within the limits of the largest + /// natively supported integer type. strange_integer, /// An integer with more bits than the largest natively supported integer type. @@ -220,7 +226,7 @@ pub const DeclGen = struct { }; /// The number of bits in the inner type. - /// Note: this is the actual number of bits of the type, not the size of the backing integer. + /// This is the actual number of bits of the type, not the size of the backing integer. bits: u16, /// Whether the type is a vector. @@ -234,10 +240,12 @@ pub const DeclGen = struct { class: Class, }; - /// Initialize the common resources of a DeclGen. Some fields are left uninitialized, only set when `gen` is called. + /// Initialize the common resources of a DeclGen. Some fields are left uninitialized, + /// only set when `gen` is called. pub fn init(spv: *SPIRVModule) DeclGen { return .{ .spv = spv, + .air = undefined, .args = std.ArrayList(ResultId).init(spv.gpa), .next_arg_index = undefined, .inst_results = InstMap.init(spv.gpa), @@ -252,8 +260,9 @@ pub const DeclGen = struct { /// Generate the code for `decl`. If a reportable error occured during code generation, /// a message is returned by this function. Callee owns the memory. If this function returns such /// a reportable error, it is valid to be called again for a different decl. - pub fn gen(self: *DeclGen, decl: *Decl) !?*Module.ErrorMsg { + pub fn gen(self: *DeclGen, decl: *Decl, air: Air) !?*Module.ErrorMsg { // Reset internal resources, we don't want to re-allocate these. + self.air = &air; self.args.items.len = 0; self.next_arg_index = 0; self.inst_results.clearRetainingCapacity(); @@ -680,7 +689,7 @@ pub const DeclGen = struct { .br => return self.genBr(inst), .breakpoint => return, - .condbr => return self.genCondBr(inst), + .cond_br => return self.genCondBr(inst), .constant => unreachable, .dbg_stmt => return self.genDbgStmt(inst), .loop => return self.genLoop(inst), @@ -688,6 +697,10 @@ pub const DeclGen = struct { .store => return self.genStore(inst), .unreach => return self.genUnreach(), // zig fmt: on + + else => |tag| return self.fail("TODO: SPIR-V backend: implement AIR tag {s}", .{ + @tagName(tag), + }), }; try self.inst_results.putNoClobber(inst, result_id); diff --git a/src/link/SpirV.zig b/src/link/SpirV.zig index bfae799462..8a2e877d42 100644 --- a/src/link/SpirV.zig +++ b/src/link/SpirV.zig @@ -135,6 +135,10 @@ pub fn flushModule(self: *SpirV, comp: *Compilation) !void { const tracy = trace(@src()); defer tracy.end(); + if (build_options.skip_non_native) { + @panic("Attempted to compile for architecture that was disabled by build configuration"); + } + const module = self.base.options.module.?; const target = comp.getTarget(); diff --git a/src/register_manager.zig b/src/register_manager.zig index 8aca7fcc3d..f0d128e7f9 100644 --- a/src/register_manager.zig +++ b/src/register_manager.zig @@ -20,7 +20,7 @@ pub fn RegisterManager( ) type { return struct { /// The key must be canonical register. - registers: [callee_preserved_regs.len]?*ir.Inst = [_]?*ir.Inst{null} ** callee_preserved_regs.len, + registers: [callee_preserved_regs.len]?Air.Inst.Index = [_]?Air.Inst.Index{null} ** callee_preserved_regs.len, free_registers: FreeRegInt = math.maxInt(FreeRegInt), /// Tracks all registers allocated in the course of this function allocated_registers: FreeRegInt = 0, @@ -75,7 +75,7 @@ pub fn RegisterManager( pub fn tryAllocRegs( self: *Self, comptime count: comptime_int, - insts: [count]?*ir.Inst, + insts: [count]?Air.Inst.Index, exceptions: []const Register, ) ?[count]Register { comptime if (callee_preserved_regs.len == 0) return null; @@ -113,7 +113,7 @@ pub fn RegisterManager( /// Allocates a register and optionally tracks it with a /// corresponding instruction. Returns `null` if all registers /// are allocated. - pub fn tryAllocReg(self: *Self, inst: ?*ir.Inst, exceptions: []const Register) ?Register { + pub fn tryAllocReg(self: *Self, inst: ?Air.Inst.Index, exceptions: []const Register) ?Register { return if (tryAllocRegs(self, 1, .{inst}, exceptions)) |regs| regs[0] else null; } @@ -123,7 +123,7 @@ pub fn RegisterManager( pub fn allocRegs( self: *Self, comptime count: comptime_int, - insts: [count]?*ir.Inst, + insts: [count]?Air.Inst.Index, exceptions: []const Register, ) ![count]Register { comptime assert(count > 0 and count <= callee_preserved_regs.len); @@ -168,14 +168,14 @@ pub fn RegisterManager( /// Allocates a register and optionally tracks it with a /// corresponding instruction. - pub fn allocReg(self: *Self, inst: ?*ir.Inst, exceptions: []const Register) !Register { + pub fn allocReg(self: *Self, inst: ?Air.Inst.Index, exceptions: []const Register) !Register { return (try self.allocRegs(1, .{inst}, exceptions))[0]; } /// Spills the register if it is currently allocated. If a /// corresponding instruction is passed, will also track this /// register. - pub fn getReg(self: *Self, reg: Register, inst: ?*ir.Inst) !void { + pub fn getReg(self: *Self, reg: Register, inst: ?Air.Inst.Index) !void { const index = reg.allocIndex() orelse return; if (inst) |tracked_inst| @@ -202,7 +202,7 @@ pub fn RegisterManager( /// Allocates the specified register with the specified /// instruction. Asserts that the register is free and no /// spilling is necessary. - pub fn getRegAssumeFree(self: *Self, reg: Register, inst: *ir.Inst) void { + pub fn getRegAssumeFree(self: *Self, reg: Register, inst: Air.Inst.Index) void { const index = reg.allocIndex() orelse return; assert(self.registers[index] == null); @@ -264,7 +264,7 @@ fn MockFunction(comptime Register: type) type { self.spilled.deinit(self.allocator); } - pub fn spillInstruction(self: *Self, reg: Register, inst: *ir.Inst) !void { + pub fn spillInstruction(self: *Self, reg: Register, inst: Air.Inst.Index) !void { _ = inst; try self.spilled.append(self.allocator, reg); }