From 93d7fd95477b4a4803b52d5f52d3da9bda1926c8 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 8 Jun 2022 15:17:53 -0700 Subject: [PATCH 1/5] test harness: fix sort comparator It was returning "true" for lessThan() when the objects were in fact equal. --- src/test.zig | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/src/test.zig b/src/test.zig index 3d1f536f2c..2a9b82027f 100644 --- a/src/test.zig +++ b/src/test.zig @@ -490,9 +490,7 @@ fn getTestFileNameParts(name: []const u8) struct { /// Sort test filenames in-place, so that incremental test cases ("foo.0.zig", /// "foo.1.zig", etc.) are contiguous and appear in numerical order. -fn sortTestFilenames( - filenames: [][]const u8, -) void { +fn sortTestFilenames(filenames: [][]const u8) void { const Context = struct { pub fn lessThan(_: @This(), a: []const u8, b: []const u8) bool { const a_parts = getTestFileNameParts(a); @@ -505,14 +503,20 @@ fn sortTestFilenames( .eq => switch (std.mem.order(u8, a_parts.file_ext, b_parts.file_ext)) { .lt => true, .gt => false, - .eq => b: { // a and b differ only in their ".X" part + .eq => { + // a and b differ only in their ".X" part // Sort "." before any ".X." - if (a_parts.test_index == null) break :b true; - if (b_parts.test_index == null) break :b false; - - // Make sure that incremental tests appear in linear order - return a_parts.test_index.? < b_parts.test_index.?; + if (a_parts.test_index) |a_index| { + if (b_parts.test_index) |b_index| { + // Make sure that incremental tests appear in linear order + return a_index < b_index; + } else { + return false; + } + } else { + return b_parts.test_index != null; + } }, }, }; From 434226c89d45570842e9b9f34d1163b4d40156af Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 8 Jun 2022 15:18:43 -0700 Subject: [PATCH 2/5] stage2: fix type printing of sub-byte pointers --- src/type.zig | 4 ++-- test/behavior.zig | 1 - .../incompatible sub-byte fields.zig} | 16 +++++++++------- 3 files changed, 11 insertions(+), 10 deletions(-) rename test/{behavior/bugs/1120.zig => cases/compile_errors/incompatible sub-byte fields.zig} (58%) diff --git a/src/type.zig b/src/type.zig index b6f8db4ca1..9b91572427 100644 --- a/src/type.zig +++ b/src/type.zig @@ -1843,7 +1843,7 @@ pub const Type = extern union { if (payload.@"align" != 0 or payload.host_size != 0) { try writer.print("align({d}", .{payload.@"align"}); - if (payload.bit_offset != 0) { + if (payload.bit_offset != 0 or payload.host_size != 0) { try writer.print(":{d}:{d}", .{ payload.bit_offset, payload.host_size }); } try writer.writeAll(") "); @@ -2167,7 +2167,7 @@ pub const Type = extern union { if (info.@"align" != 0 or info.host_size != 0) { try writer.print("align({d}", .{info.@"align"}); - if (info.bit_offset != 0) { + if (info.bit_offset != 0 or info.host_size != 0) { try writer.print(":{d}:{d}", .{ info.bit_offset, info.host_size }); } try writer.writeAll(") "); diff --git a/test/behavior.zig b/test/behavior.zig index 6618ddb20d..18e65564ad 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -27,7 +27,6 @@ test { _ = @import("behavior/bugs/1025.zig"); _ = @import("behavior/bugs/1076.zig"); _ = @import("behavior/bugs/1111.zig"); - _ = @import("behavior/bugs/1120.zig"); _ = @import("behavior/bugs/1277.zig"); _ = @import("behavior/bugs/1310.zig"); _ = @import("behavior/bugs/1381.zig"); diff --git a/test/behavior/bugs/1120.zig b/test/cases/compile_errors/incompatible sub-byte fields.zig similarity index 58% rename from test/behavior/bugs/1120.zig rename to test/cases/compile_errors/incompatible sub-byte fields.zig index 84c51feeac..20fbf5e30c 100644 --- a/test/behavior/bugs/1120.zig +++ b/test/cases/compile_errors/incompatible sub-byte fields.zig @@ -1,6 +1,3 @@ -const std = @import("std"); -const expect = std.testing.expect; - const A = packed struct { a: u2, b: u6, @@ -10,9 +7,7 @@ const B = packed struct { a: u2, b: u6, }; -test "bug 1120" { - if (@import("builtin").zig_backend != .stage1) return error.SkipZigTest; // TODO - +export fn entry() void { var a = A{ .a = 2, .b = 2 }; var b = B{ .q = 22, .a = 3, .b = 2 }; var t: usize = 0; @@ -21,5 +16,12 @@ test "bug 1120" { 1 => &b.a, else => unreachable, }; - try expect(ptr.* == 2); + if (ptr.* == 2) { + @compileError("wrong compile error"); + } } +// error +// backend=stage2 +// target=native +// +// :14:17: error: incompatible types: '*align(0:0:1) u2' and '*align(2:8:2) u2' From d557dedf6cb836e11038faada436299547044ffc Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 8 Jun 2022 15:38:15 -0700 Subject: [PATCH 3/5] add a missing align() to a behavior test --- test/behavior/bugs/6781.zig | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/test/behavior/bugs/6781.zig b/test/behavior/bugs/6781.zig index 8315e81c67..d35612b695 100644 --- a/test/behavior/bugs/6781.zig +++ b/test/behavior/bugs/6781.zig @@ -1,3 +1,4 @@ +const builtin = @import("builtin"); const std = @import("std"); const assert = std.debug.assert; @@ -61,9 +62,13 @@ pub const JournalHeader = packed struct { }; test "fixed" { - if (@import("builtin").zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO - var buffer = [_]u8{0} ** 65536; + var buffer align(@alignOf(JournalHeader)) = [_]u8{0} ** 65536; var entry = std.mem.bytesAsValue(JournalHeader, buffer[0..@sizeOf(JournalHeader)]); entry.* = .{ .prev_hash_chain_root = 0, From 7c0614ea659b3f404f9d702c990afcac5f0b1479 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 8 Jun 2022 15:51:48 -0700 Subject: [PATCH 4/5] Sema: implement zirRetErrValueCode --- src/Sema.zig | 17 ++++++++++++----- test/behavior/defer.zig | 5 ++++- 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 8ce9226bd4..d64b067771 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -771,7 +771,7 @@ fn analyzeBodyInner( .ptr_type => try sema.zirPtrType(block, inst), .ptr_type_simple => try sema.zirPtrTypeSimple(block, inst), .ref => try sema.zirRef(block, inst), - .ret_err_value_code => try sema.zirRetErrValueCode(block, inst), + .ret_err_value_code => try sema.zirRetErrValueCode(inst), .shr => try sema.zirShr(block, inst, .shr), .shr_exact => try sema.zirShr(block, inst, .shr_exact), .slice_end => try sema.zirSliceEnd(block, inst), @@ -9250,10 +9250,17 @@ fn zirEmbedFile(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A return sema.analyzeDeclRef(embed_file.owner_decl); } -fn zirRetErrValueCode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { - _ = block; - _ = inst; - return sema.fail(block, sema.src, "TODO implement zirRetErrValueCode", .{}); +fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref { + const inst_data = sema.code.instructions.items(.data)[inst].str_tok; + const err_name = inst_data.get(sema.code); + + // Return the error code from the function. + const kv = try sema.mod.getErrorValue(err_name); + const result_inst = try sema.addConstant( + try Type.Tag.error_set_single.create(sema.arena, kv.key), + try Value.Tag.@"error".create(sema.arena, .{ .name = kv.key }), + ); + return result_inst; } fn zirShl( diff --git a/test/behavior/defer.zig b/test/behavior/defer.zig index c8239395eb..70053ba01f 100644 --- a/test/behavior/defer.zig +++ b/test/behavior/defer.zig @@ -108,7 +108,10 @@ test "mixing normal and error defers" { } test "errdefer with payload" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO + if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO const S = struct { fn foo() !i32 { From f4d5fcde727d93da9ebe17a27cc62270545ccda6 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Wed, 8 Jun 2022 20:40:16 -0700 Subject: [PATCH 5/5] AstGen: avoid redundant "ref" instructions Whenever a `ref` instruction is needed, it is created and saved in `AstGen.ref_table` instead of being immediately appended to the current block body. Then, when the referenced instruction is being added to the parent block (e.g. from setBlockBody), if it has a ref_table entry, then the ref instruction is added directly after the instruction being referenced. This makes sure two properties are upheld: 1. All pointers to the same locals return the same address. This is required to be compliant with the language specification. 2. `ref` instructions will dominate their uses. This is a required property of ZIR. A complication arises when a ref instruction refs another ref instruction. The logic in appendBodyWithFixups must take this into account, recursively handling ref refs. --- src/AstGen.zig | 228 ++++++++++++++++++++++++++++++----------- src/Zir.zig | 12 +++ test/behavior/eval.zig | 2 - 3 files changed, 182 insertions(+), 60 deletions(-) diff --git a/src/AstGen.zig b/src/AstGen.zig index 831071c479..3f22146f0e 100644 --- a/src/AstGen.zig +++ b/src/AstGen.zig @@ -45,6 +45,17 @@ fn_block: ?*GenZir = null, imports: std.AutoArrayHashMapUnmanaged(u32, Ast.TokenIndex) = .{}, /// Used for temporary storage when building payloads. scratch: std.ArrayListUnmanaged(u32) = .{}, +/// Whenever a `ref` instruction is needed, it is created and saved in this +/// table instead of being immediately appended to the current block body. +/// Then, when the instruction is being added to the parent block (typically from +/// setBlockBody), if it has a ref_table entry, then the ref instruction is added +/// there. This makes sure two properties are upheld: +/// 1. All pointers to the same locals return the same address. This is required +/// to be compliant with the language specification. +/// 2. `ref` instructions will dominate their uses. This is a required property +/// of ZIR. +/// The key is the ref operand; the value is the ref instruction. +ref_table: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .{}, const InnerError = error{ OutOfMemory, AnalysisFail }; @@ -199,6 +210,7 @@ pub fn deinit(astgen: *AstGen, gpa: Allocator) void { astgen.compile_errors.deinit(gpa); astgen.imports.deinit(gpa); astgen.scratch.deinit(gpa); + astgen.ref_table.deinit(gpa); } pub const ResultLoc = union(enum) { @@ -4216,11 +4228,12 @@ fn structDeclInner( } const body = block_scope.instructionsSlice(); + const body_len = astgen.countBodyLenAfterFixups(body); try gz.setStruct(decl_inst, .{ .src_node = node, .layout = layout, - .body_len = @intCast(u32, body.len), + .body_len = body_len, .fields_len = field_count, .decls_len = decl_count, .known_non_opv = known_non_opv, @@ -4230,9 +4243,9 @@ fn structDeclInner( wip_members.finishBits(bits_per_field); const decls_slice = wip_members.declsSlice(); const fields_slice = wip_members.fieldsSlice(); - try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + body.len + fields_slice.len); + try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + body_len + fields_slice.len); astgen.extra.appendSliceAssumeCapacity(decls_slice); - astgen.extra.appendSliceAssumeCapacity(body); + astgen.appendBodyWithFixups(body); astgen.extra.appendSliceAssumeCapacity(fields_slice); block_scope.unstack(); @@ -4364,12 +4377,13 @@ fn unionDeclInner( } const body = block_scope.instructionsSlice(); + const body_len = astgen.countBodyLenAfterFixups(body); try gz.setUnion(decl_inst, .{ .src_node = node, .layout = layout, .tag_type = arg_inst, - .body_len = @intCast(u32, body.len), + .body_len = body_len, .fields_len = field_count, .decls_len = decl_count, .auto_enum_tag = have_auto_enum, @@ -4378,9 +4392,9 @@ fn unionDeclInner( wip_members.finishBits(bits_per_field); const decls_slice = wip_members.declsSlice(); const fields_slice = wip_members.fieldsSlice(); - try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + body.len + fields_slice.len); + try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + body_len + fields_slice.len); astgen.extra.appendSliceAssumeCapacity(decls_slice); - astgen.extra.appendSliceAssumeCapacity(body); + astgen.appendBodyWithFixups(body); astgen.extra.appendSliceAssumeCapacity(fields_slice); block_scope.unstack(); @@ -4616,12 +4630,13 @@ fn containerDecl( } const body = block_scope.instructionsSlice(); + const body_len = astgen.countBodyLenAfterFixups(body); try gz.setEnum(decl_inst, .{ .src_node = node, .nonexhaustive = nonexhaustive, .tag_type = arg_inst, - .body_len = @intCast(u32, body.len), + .body_len = body_len, .fields_len = @intCast(u32, counts.total_fields), .decls_len = @intCast(u32, counts.decls), }); @@ -4629,9 +4644,9 @@ fn containerDecl( wip_members.finishBits(bits_per_field); const decls_slice = wip_members.declsSlice(); const fields_slice = wip_members.fieldsSlice(); - try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + body.len + fields_slice.len); + try astgen.extra.ensureUnusedCapacity(gpa, decls_slice.len + body_len + fields_slice.len); astgen.extra.appendSliceAssumeCapacity(decls_slice); - astgen.extra.appendSliceAssumeCapacity(body); + astgen.appendBodyWithFixups(body); astgen.extra.appendSliceAssumeCapacity(fields_slice); block_scope.unstack(); @@ -5403,10 +5418,12 @@ fn setCondBrPayload( const astgen = then_scope.astgen; const then_body = then_scope.instructionsSliceUpto(else_scope); const else_body = else_scope.instructionsSlice(); - const then_body_len = @intCast(u32, then_body.len + @boolToInt(then_break != 0)); - const else_body_len = @intCast(u32, else_body.len + @boolToInt(else_break != 0)); - try astgen.extra.ensureUnusedCapacity(astgen.gpa, @typeInfo(Zir.Inst.CondBr).Struct.fields.len + - then_body_len + else_body_len); + const then_body_len = astgen.countBodyLenAfterFixups(then_body) + @boolToInt(then_break != 0); + const else_body_len = astgen.countBodyLenAfterFixups(else_body) + @boolToInt(else_break != 0); + try astgen.extra.ensureUnusedCapacity( + astgen.gpa, + @typeInfo(Zir.Inst.CondBr).Struct.fields.len + then_body_len + else_body_len, + ); const zir_datas = astgen.instructions.items(.data); zir_datas[condbr].pl_node.payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.CondBr{ @@ -5414,9 +5431,9 @@ fn setCondBrPayload( .then_body_len = then_body_len, .else_body_len = else_body_len, }); - astgen.extra.appendSliceAssumeCapacity(then_body); + astgen.appendBodyWithFixups(then_body); if (then_break != 0) astgen.extra.appendAssumeCapacity(then_break); - astgen.extra.appendSliceAssumeCapacity(else_body); + astgen.appendBodyWithFixups(else_body); if (else_break != 0) astgen.extra.appendAssumeCapacity(else_break); } @@ -5437,10 +5454,12 @@ fn setCondBrPayloadElideBlockStorePtr( const else_body = else_scope.instructionsSlice(); const has_then_break = then_break != 0; const has_else_break = else_break != 0; - const then_body_len = @intCast(u32, then_body.len + @boolToInt(has_then_break)); - const else_body_len = @intCast(u32, else_body.len + @boolToInt(has_else_break)); - try astgen.extra.ensureUnusedCapacity(astgen.gpa, @typeInfo(Zir.Inst.CondBr).Struct.fields.len + - then_body_len + else_body_len); + const then_body_len = astgen.countBodyLenAfterFixups(then_body) + @boolToInt(has_then_break); + const else_body_len = astgen.countBodyLenAfterFixups(else_body) + @boolToInt(has_else_break); + try astgen.extra.ensureUnusedCapacity( + astgen.gpa, + @typeInfo(Zir.Inst.CondBr).Struct.fields.len + then_body_len + else_body_len, + ); const zir_tags = astgen.instructions.items(.tag); const zir_datas = astgen.instructions.items(.data); @@ -5475,7 +5494,7 @@ fn setCondBrPayloadElideBlockStorePtr( continue; } } - astgen.extra.appendAssumeCapacity(src_inst); + appendPossiblyRefdBodyInst(astgen, &astgen.extra, src_inst); } if (has_then_break) astgen.extra.appendAssumeCapacity(then_break); @@ -5495,7 +5514,7 @@ fn setCondBrPayloadElideBlockStorePtr( continue; } } - astgen.extra.appendAssumeCapacity(src_inst); + appendPossiblyRefdBodyInst(astgen, &astgen.extra, src_inst); } if (has_else_break) astgen.extra.appendAssumeCapacity(else_break); } @@ -6249,8 +6268,10 @@ fn switchExpr( } const case_slice = case_scope.instructionsSlice(); - payloads.items[body_len_index] = @intCast(u32, case_slice.len); - try payloads.appendSlice(gpa, case_slice); + const body_len = astgen.countBodyLenAfterFixups(case_slice); + try payloads.ensureUnusedCapacity(gpa, body_len); + payloads.items[body_len_index] = body_len; + appendBodyWithFixupsArrayList(astgen, payloads, case_slice); } } // Now that the item expressions are generated we can add this. @@ -7043,10 +7064,11 @@ fn typeOf( node: Ast.Node.Index, args: []const Ast.Node.Index, ) InnerError!Zir.Inst.Ref { + const astgen = gz.astgen; if (args.len < 1) { - return gz.astgen.failNode(node, "expected at least 1 argument, found 0", .{}); + return astgen.failNode(node, "expected at least 1 argument, found 0", .{}); } - const gpa = gz.astgen.gpa; + const gpa = astgen.gpa; if (args.len == 1) { const typeof_inst = try gz.makeBlockInst(.typeof_builtin, node); @@ -7065,7 +7087,7 @@ fn typeOf( return rvalue(gz, rl, indexToRef(typeof_inst), node); } const payload_size: u32 = std.meta.fields(Zir.Inst.TypeOfPeer).len; - const payload_index = try reserveExtra(gz.astgen, payload_size + args.len); + const payload_index = try reserveExtra(astgen, payload_size + args.len); var args_index = payload_index + payload_size; const typeof_inst = try gz.addExtendedMultiOpPayloadIndex(.typeof_peer, payload_index, args.len); @@ -7075,17 +7097,19 @@ fn typeOf( for (args) |arg, i| { const param_ref = try reachableExpr(&typeof_scope, &typeof_scope.base, .none, arg, node); - gz.astgen.extra.items[args_index + i] = @enumToInt(param_ref); + astgen.extra.items[args_index + i] = @enumToInt(param_ref); } _ = try typeof_scope.addBreak(.break_inline, refToIndex(typeof_inst).?, .void_value); const body = typeof_scope.instructionsSlice(); - gz.astgen.setExtra(payload_index, Zir.Inst.TypeOfPeer{ - .body_len = @intCast(u32, body.len), - .body_index = @intCast(u32, gz.astgen.extra.items.len), + const body_len = astgen.countBodyLenAfterFixups(body); + astgen.setExtra(payload_index, Zir.Inst.TypeOfPeer{ + .body_len = @intCast(u32, body_len), + .body_index = @intCast(u32, astgen.extra.items.len), .src_node = gz.nodeIndexToRelative(node), }); - try gz.astgen.extra.appendSlice(gpa, body); + try astgen.extra.ensureUnusedCapacity(gpa, body_len); + astgen.appendBodyWithFixups(body); typeof_scope.unstack(); return rvalue(gz, rl, typeof_inst, node); @@ -9032,9 +9056,22 @@ fn rvalue( }, .ref => { // We need a pointer but we have a value. - const tree = gz.astgen.tree; + // Unfortunately it's not quite as simple as directly emitting a ref + // instruction here because we need subsequent address-of operator on + // const locals to return the same address. + const astgen = gz.astgen; + const tree = astgen.tree; const src_token = tree.firstToken(src_node); - return gz.addUnTok(.ref, result, src_token); + const result_index = refToIndex(result) orelse + return gz.addUnTok(.ref, result, src_token); + const zir_tags = gz.astgen.instructions.items(.tag); + if (zir_tags[result_index].isParam()) + return gz.addUnTok(.ref, result, src_token); + const gop = try astgen.ref_table.getOrPut(astgen.gpa, result_index); + if (!gop.found_existing) { + gop.value_ptr.* = try gz.makeUnTok(.ref, result, src_token); + } + return indexToRef(gop.value_ptr.*); }, .ty => |ty_inst| { // Quickly eliminate some common, unnecessary type coercion. @@ -9976,43 +10013,58 @@ const GenZir = struct { /// Assumes nothing stacked on `gz`. Unstacks `gz`. fn setBoolBrBody(gz: *GenZir, inst: Zir.Inst.Index) !void { - const gpa = gz.astgen.gpa; + const astgen = gz.astgen; + const gpa = astgen.gpa; const body = gz.instructionsSlice(); - try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Block).Struct.fields.len + body.len); - const zir_datas = gz.astgen.instructions.items(.data); - zir_datas[inst].bool_br.payload_index = gz.astgen.addExtraAssumeCapacity( - Zir.Inst.Block{ .body_len = @intCast(u32, body.len) }, + const body_len = astgen.countBodyLenAfterFixups(body); + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.Block).Struct.fields.len + body_len, ); - gz.astgen.extra.appendSliceAssumeCapacity(body); + const zir_datas = astgen.instructions.items(.data); + zir_datas[inst].bool_br.payload_index = astgen.addExtraAssumeCapacity( + Zir.Inst.Block{ .body_len = body_len }, + ); + astgen.appendBodyWithFixups(body); gz.unstack(); } /// Assumes nothing stacked on `gz`. Unstacks `gz`. fn setBlockBody(gz: *GenZir, inst: Zir.Inst.Index) !void { - const gpa = gz.astgen.gpa; + const astgen = gz.astgen; + const gpa = astgen.gpa; const body = gz.instructionsSlice(); - try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Block).Struct.fields.len + body.len); - const zir_datas = gz.astgen.instructions.items(.data); - zir_datas[inst].pl_node.payload_index = gz.astgen.addExtraAssumeCapacity( - Zir.Inst.Block{ .body_len = @intCast(u32, body.len) }, + const body_len = astgen.countBodyLenAfterFixups(body); + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.Block).Struct.fields.len + body_len, ); - gz.astgen.extra.appendSliceAssumeCapacity(body); + const zir_datas = astgen.instructions.items(.data); + zir_datas[inst].pl_node.payload_index = astgen.addExtraAssumeCapacity( + Zir.Inst.Block{ .body_len = body_len }, + ); + astgen.appendBodyWithFixups(body); gz.unstack(); } /// Assumes nothing stacked on `gz`. Unstacks `gz`. fn setTryBody(gz: *GenZir, inst: Zir.Inst.Index, operand: Zir.Inst.Ref) !void { - const gpa = gz.astgen.gpa; + const astgen = gz.astgen; + const gpa = astgen.gpa; const body = gz.instructionsSlice(); - try gz.astgen.extra.ensureUnusedCapacity(gpa, @typeInfo(Zir.Inst.Try).Struct.fields.len + body.len); - const zir_datas = gz.astgen.instructions.items(.data); - zir_datas[inst].pl_node.payload_index = gz.astgen.addExtraAssumeCapacity( + const body_len = astgen.countBodyLenAfterFixups(body); + try astgen.extra.ensureUnusedCapacity( + gpa, + @typeInfo(Zir.Inst.Try).Struct.fields.len + body_len, + ); + const zir_datas = astgen.instructions.items(.data); + zir_datas[inst].pl_node.payload_index = astgen.addExtraAssumeCapacity( Zir.Inst.Try{ .operand = operand, - .body_len = @intCast(u32, body.len), + .body_len = body_len, }, ); - gz.astgen.extra.appendSliceAssumeCapacity(body); + astgen.appendBodyWithFixups(body); gz.unstack(); } @@ -10089,6 +10141,7 @@ const GenZir = struct { if (args.ret_gz) |ret_gz| ret_body = ret_gz.instructionsSlice(); } + const body_len = astgen.countBodyLenAfterFixups(body); if (args.cc_ref != .none or args.lib_name != 0 or args.is_var_args or args.is_test or args.is_extern or @@ -10114,13 +10167,13 @@ const GenZir = struct { fancyFnExprExtraLen(section_body, args.section_ref) + fancyFnExprExtraLen(cc_body, args.cc_ref) + fancyFnExprExtraLen(ret_body, ret_ref) + - body.len + src_locs.len + + body_len + src_locs.len + @boolToInt(args.lib_name != 0) + @boolToInt(args.noalias_bits != 0), ); const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.FuncFancy{ .param_block = args.param_block, - .body_len = @intCast(u32, body.len), + .body_len = body_len, .bits = .{ .is_var_args = args.is_var_args, .is_inferred_error = args.is_inferred_error, @@ -10187,7 +10240,7 @@ const GenZir = struct { astgen.extra.appendAssumeCapacity(args.noalias_bits); } - astgen.extra.appendSliceAssumeCapacity(body); + astgen.appendBodyWithFixups(body); astgen.extra.appendSliceAssumeCapacity(src_locs); // Order is important when unstacking. @@ -10214,9 +10267,9 @@ const GenZir = struct { } else { try astgen.extra.ensureUnusedCapacity( gpa, - @typeInfo(Zir.Inst.Func).Struct.fields.len + + @typeInfo(Zir.Inst.Func).Struct.fields.len + 1 + @maximum(ret_body.len, @boolToInt(ret_ref != .none)) + - body.len + src_locs.len, + body_len + src_locs.len, ); const ret_body_len = if (ret_body.len != 0) @intCast(u32, ret_body.len) @@ -10226,7 +10279,7 @@ const GenZir = struct { const payload_index = astgen.addExtraAssumeCapacity(Zir.Inst.Func{ .param_block = args.param_block, .ret_body_len = ret_body_len, - .body_len = @intCast(u32, body.len), + .body_len = body_len, }); const zir_datas = astgen.instructions.items(.data); if (ret_body.len != 0) { @@ -10235,7 +10288,7 @@ const GenZir = struct { } else if (ret_ref != .none) { astgen.extra.appendAssumeCapacity(@enumToInt(ret_ref)); } - astgen.extra.appendSliceAssumeCapacity(body); + astgen.appendBodyWithFixups(body); astgen.extra.appendSliceAssumeCapacity(src_locs); // Order is important when unstacking. @@ -10593,6 +10646,26 @@ const GenZir = struct { }); } + fn makeUnTok( + gz: *GenZir, + tag: Zir.Inst.Tag, + operand: Zir.Inst.Ref, + /// Absolute token index. This function does the conversion to Decl offset. + abs_tok_index: Ast.TokenIndex, + ) !Zir.Inst.Index { + const astgen = gz.astgen; + const new_index = @intCast(Zir.Inst.Index, astgen.instructions.len); + assert(operand != .none); + try astgen.instructions.append(astgen.gpa, .{ + .tag = tag, + .data = .{ .un_tok = .{ + .operand = operand, + .src_tok = gz.tokenIndexToRelative(abs_tok_index), + } }, + }); + return new_index; + } + fn addStrTok( gz: *GenZir, tag: Zir.Inst.Tag, @@ -11337,3 +11410,42 @@ fn isInferred(astgen: *AstGen, ref: Zir.Inst.Ref) bool { else => false, }; } + +/// Assumes capacity for body has already been added. Needed capacity taking into +/// account fixups can be found with `countBodyLenAfterFixups`. +fn appendBodyWithFixups(astgen: *AstGen, body: []const Zir.Inst.Index) void { + return appendBodyWithFixupsArrayList(astgen, &astgen.extra, body); +} + +fn appendBodyWithFixupsArrayList( + astgen: *AstGen, + list: *std.ArrayListUnmanaged(u32), + body: []const Zir.Inst.Index, +) void { + for (body) |body_inst| { + appendPossiblyRefdBodyInst(astgen, list, body_inst); + } +} + +fn appendPossiblyRefdBodyInst( + astgen: *AstGen, + list: *std.ArrayListUnmanaged(u32), + body_inst: Zir.Inst.Index, +) void { + list.appendAssumeCapacity(body_inst); + const kv = astgen.ref_table.fetchRemove(body_inst) orelse return; + const ref_inst = kv.value; + return appendPossiblyRefdBodyInst(astgen, list, ref_inst); +} + +fn countBodyLenAfterFixups(astgen: *AstGen, body: []const Zir.Inst.Index) u32 { + var count = body.len; + for (body) |body_inst| { + var check_inst = body_inst; + while (astgen.ref_table.get(check_inst)) |ref_inst| { + count += 1; + check_inst = ref_inst; + } + } + return @intCast(u32, count); +} diff --git a/src/Zir.zig b/src/Zir.zig index 98e4167355..370f996fd5 100644 --- a/src/Zir.zig +++ b/src/Zir.zig @@ -1271,6 +1271,18 @@ pub const Inst = struct { }; } + pub fn isParam(tag: Tag) bool { + return switch (tag) { + .param, + .param_comptime, + .param_anytype, + .param_anytype_comptime, + => true, + + else => false, + }; + } + /// AstGen uses this to find out if `Ref.void_value` should be used in place /// of the result of a given instruction. This allows Sema to forego adding /// the instruction to the map after analysis. diff --git a/test/behavior/eval.zig b/test/behavior/eval.zig index f14a25451d..220768c820 100644 --- a/test/behavior/eval.zig +++ b/test/behavior/eval.zig @@ -1191,8 +1191,6 @@ test "no dependency loop for alignment of self tagged union" { } test "equality of pointers to comptime const" { - if (builtin.zig_backend != .stage1) return error.SkipZigTest; // TODO - const a: i32 = undefined; comptime assert(&a == &a); }