From 75c33ba85e47eec9f7257cfb972a54b22a5283eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?V=C3=96R=C3=96SK=C5=90I=20Andr=C3=A1s?= Date: Thu, 7 Jul 2022 19:05:56 +0200 Subject: [PATCH 1/5] Sema: add a note about @setEvalBranchQuota() when branch quota is exceeded closes #11996 --- src/Sema.zig | 15 ++++++++++++++- src/stage1/ir.cpp | 4 +++- test/cases/recursive_inline_function.1.zig | 1 + 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/src/Sema.zig b/src/Sema.zig index 054f645230..0f504c6c1d 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -18427,7 +18427,20 @@ fn safetyPanic( fn emitBackwardBranch(sema: *Sema, block: *Block, src: LazySrcLoc) !void { sema.branch_count += 1; if (sema.branch_count > sema.branch_quota) { - return sema.fail(block, src, "evaluation exceeded {d} backwards branches", .{sema.branch_quota}); + const msg = try sema.errMsg( + block, + src, + "evaluation exceeded {d} backwards branches", + .{sema.branch_quota}, + ); + try sema.errNote( + block, + src, + msg, + "use @setEvalBranchQuota() to raise the branch limit from {d}", + .{sema.branch_quota}, + ); + return sema.failWithOwnedErrorMsg(block, msg); } } diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp index 52044e9dce..c26a65aac2 100644 --- a/src/stage1/ir.cpp +++ b/src/stage1/ir.cpp @@ -5769,8 +5769,10 @@ static bool ir_emit_backward_branch(IrAnalyze *ira, AstNode* source_node) { *bbc += 1; if (*bbc > *quota) { - ir_add_error_node(ira, source_node, + ErrorMsg *msg = ir_add_error_node(ira, source_node, buf_sprintf("evaluation exceeded %" ZIG_PRI_usize " backwards branches", *quota)); + add_error_note(ira->codegen, msg, source_node, + buf_sprintf("use @setEvalBranchQuota to raise branch limit from %" ZIG_PRI_usize, *quota)); return false; } return true; diff --git a/test/cases/recursive_inline_function.1.zig b/test/cases/recursive_inline_function.1.zig index 8ed9bde8e8..0b7dd56d38 100644 --- a/test/cases/recursive_inline_function.1.zig +++ b/test/cases/recursive_inline_function.1.zig @@ -14,6 +14,7 @@ inline fn fibonacci(n: usize) usize { // error // // :11:21: error: evaluation exceeded 1000 backwards branches +// :11:21: note: use @setEvalBranchQuota() to raise the branch limit from 1000 // :11:40: note: called from here (6 times) // :11:21: note: called from here (495 times) // :5:24: note: called from here From cbc85f4516a5bd545ce365dedec19f6fcad47b58 Mon Sep 17 00:00:00 2001 From: Cody Tapscott Date: Thu, 7 Jul 2022 11:21:39 -0700 Subject: [PATCH 2/5] stage1: Fix seg-fault when slicing string literal with sentinel --- src/stage1/ir.cpp | 1 + test/behavior.zig | 1 + test/behavior/bugs/12033.zig | 12 ++++++++++++ 3 files changed, 14 insertions(+) create mode 100644 test/behavior/bugs/12033.zig diff --git a/src/stage1/ir.cpp b/src/stage1/ir.cpp index c26a65aac2..5a3952dc67 100644 --- a/src/stage1/ir.cpp +++ b/src/stage1/ir.cpp @@ -21575,6 +21575,7 @@ done_with_return_type: // handle `[N]T` target_len = target->type->data.array.len; target_sentinel = target->type->data.array.sentinel; + expand_undef_array(ira->codegen, target); target_elements = target->data.x_array.data.s_none.elements; break; } else if (target->type->id == ZigTypeIdPointer && target->type->data.pointer.child_type->id == ZigTypeIdArray) { diff --git a/test/behavior.zig b/test/behavior.zig index 087b821c7d..7d87f01dbc 100644 --- a/test/behavior.zig +++ b/test/behavior.zig @@ -83,6 +83,7 @@ test { _ = @import("behavior/bugs/11181.zig"); _ = @import("behavior/bugs/11213.zig"); _ = @import("behavior/bugs/12003.zig"); + _ = @import("behavior/bugs/12033.zig"); _ = @import("behavior/byteswap.zig"); _ = @import("behavior/byval_arg_var.zig"); _ = @import("behavior/call.zig"); diff --git a/test/behavior/bugs/12033.zig b/test/behavior/bugs/12033.zig new file mode 100644 index 0000000000..563ed8e79b --- /dev/null +++ b/test/behavior/bugs/12033.zig @@ -0,0 +1,12 @@ +const std = @import("std"); + +test { + const string = "Hello!\x00World!"; + try std.testing.expect(@TypeOf(string) == *const [13:0]u8); + + const slice_without_sentinel: []const u8 = string[0..6]; + try std.testing.expect(@TypeOf(slice_without_sentinel) == []const u8); + + const slice_with_sentinel: [:0]const u8 = string[0..6 :0]; + try std.testing.expect(@TypeOf(slice_with_sentinel) == [:0]const u8); +} From e5e9e5a7aaf4605fcdc4272fb79be42612da694f Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 7 Jul 2022 17:45:28 -0700 Subject: [PATCH 3/5] std.builtin.returnError: disable runtime safety --- lib/std/builtin.zig | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/std/builtin.zig b/lib/std/builtin.zig index 894103707c..68de3e9ddd 100644 --- a/lib/std/builtin.zig +++ b/lib/std/builtin.zig @@ -858,6 +858,7 @@ pub fn panicOutOfBounds(index: usize, len: usize) noreturn { pub noinline fn returnError(maybe_st: ?*StackTrace) void { @setCold(true); + @setRuntimeSafety(false); const st = maybe_st orelse return; addErrRetTraceAddr(st, @returnAddress()); } From 8d6011361fb088e3e4d1ad649ac196bec101ee78 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 7 Jul 2022 17:46:07 -0700 Subject: [PATCH 4/5] LLVM: handle byref combined with multiple_llvm_ints --- src/codegen/llvm.zig | 96 ++++++++++++++++++++++---------------------- 1 file changed, 49 insertions(+), 47 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index c3194ccda1..e8115f8795 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -860,61 +860,63 @@ pub const Object = struct { .multiple_llvm_ints => { const param_ty = fn_info.param_types[it.zig_index - 1]; const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len]; - const is_by_ref = isByRef(param_ty); switch (param_ty.zigTypeTag()) { .Struct => { const fields = param_ty.structFields().values(); - if (is_by_ref) { - const param_llvm_ty = try dg.lowerType(param_ty); - const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty); - arg_ptr.setAlignment(param_ty.abiAlignment(target)); + const param_llvm_ty = try dg.lowerType(param_ty); + const param_alignment = param_ty.abiAlignment(target); + const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty); + arg_ptr.setAlignment(param_alignment); - var field_i: u32 = 0; - var field_offset: u32 = 0; - for (llvm_ints) |int_bits| { - const param = llvm_func.getParam(llvm_arg_i); - llvm_arg_i += 1; + var field_i: u32 = 0; + var field_offset: u32 = 0; + for (llvm_ints) |int_bits| { + const param = llvm_func.getParam(llvm_arg_i); + llvm_arg_i += 1; - const big_int_ty = dg.context.intType(int_bits); - var bits_used: u32 = 0; - while (bits_used < int_bits) { - const field = fields[field_i]; - const field_alignment = field.normalAlignment(target); - const prev_offset = field_offset; - field_offset = std.mem.alignForwardGeneric(u32, field_offset, field_alignment); - if (field_offset > prev_offset) { - // Padding counts as bits used. - bits_used += (field_offset - prev_offset) * 8; - if (bits_used >= int_bits) break; - } - const field_size = @intCast(u16, field.ty.abiSize(target)); - const field_abi_bits = field_size * 8; - const field_int_ty = dg.context.intType(field_abi_bits); - const shifted = if (bits_used == 0) param else s: { - const shift_amt = big_int_ty.constInt(bits_used, .False); - break :s builder.buildLShr(param, shift_amt, ""); - }; - const field_as_int = builder.buildTrunc(shifted, field_int_ty, ""); - var ty_buf: Type.Payload.Pointer = undefined; - const llvm_i = llvmFieldIndex(param_ty, field_i, target, &ty_buf).?; - const field_ptr = builder.buildStructGEP(arg_ptr, llvm_i, ""); - const casted_ptr = builder.buildBitCast(field_ptr, field_int_ty.pointerType(0), ""); - const store_inst = builder.buildStore(field_as_int, casted_ptr); - store_inst.setAlignment(field_alignment); - - field_i += 1; - if (field_i >= fields.len) break; - - bits_used += field_abi_bits; - field_offset += field_size; + const big_int_ty = dg.context.intType(int_bits); + var bits_used: u32 = 0; + while (bits_used < int_bits) { + const field = fields[field_i]; + const field_alignment = field.normalAlignment(target); + const prev_offset = field_offset; + field_offset = std.mem.alignForwardGeneric(u32, field_offset, field_alignment); + if (field_offset > prev_offset) { + // Padding counts as bits used. + bits_used += (field_offset - prev_offset) * 8; + if (bits_used >= int_bits) break; } - if (field_i >= fields.len) break; - } + const field_size = @intCast(u16, field.ty.abiSize(target)); + const field_abi_bits = field_size * 8; + const field_int_ty = dg.context.intType(field_abi_bits); + const shifted = if (bits_used == 0) param else s: { + const shift_amt = big_int_ty.constInt(bits_used, .False); + break :s builder.buildLShr(param, shift_amt, ""); + }; + const field_as_int = builder.buildTrunc(shifted, field_int_ty, ""); + var ty_buf: Type.Payload.Pointer = undefined; + const llvm_i = llvmFieldIndex(param_ty, field_i, target, &ty_buf).?; + const field_ptr = builder.buildStructGEP(arg_ptr, llvm_i, ""); + const casted_ptr = builder.buildBitCast(field_ptr, field_int_ty.pointerType(0), ""); + const store_inst = builder.buildStore(field_as_int, casted_ptr); + store_inst.setAlignment(field_alignment); - try args.append(arg_ptr); - } else { - @panic("TODO: LLVM backend: implement C calling convention on x86_64 with byval struct parameter"); + field_i += 1; + if (field_i >= fields.len) break; + + bits_used += field_abi_bits; + field_offset += field_size; + } + if (field_i >= fields.len) break; } + + const is_by_ref = isByRef(param_ty); + const loaded = if (is_by_ref) arg_ptr else l: { + const load_inst = builder.buildLoad(arg_ptr, ""); + load_inst.setAlignment(param_alignment); + break :l load_inst; + }; + try args.append(loaded); }, .Union => { @panic("TODO: LLVM backend: implement C calling convention on x86_64 with union parameter"); From 3a03872af76652515e467c1f33d918ead2c0a6b0 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Thu, 7 Jul 2022 18:23:07 -0700 Subject: [PATCH 5/5] LLVM: more robust implementation of C ABI for multiple_llvm_ints The previous code here was potentially more optimal for some cases, however, I never tested the perf, so it might not actually matter. This code handles more cases. We can go back and re-evaluate that other implementation if it seems worthwhile in the future. --- src/codegen/llvm.zig | 242 +++++++++---------------------------------- 1 file changed, 49 insertions(+), 193 deletions(-) diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index e8115f8795..184b097270 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -858,71 +858,34 @@ pub const Object = struct { try args.append(aggregate); }, .multiple_llvm_ints => { - const param_ty = fn_info.param_types[it.zig_index - 1]; const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len]; - switch (param_ty.zigTypeTag()) { - .Struct => { - const fields = param_ty.structFields().values(); - const param_llvm_ty = try dg.lowerType(param_ty); - const param_alignment = param_ty.abiAlignment(target); - const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty); - arg_ptr.setAlignment(param_alignment); - - var field_i: u32 = 0; - var field_offset: u32 = 0; - for (llvm_ints) |int_bits| { - const param = llvm_func.getParam(llvm_arg_i); - llvm_arg_i += 1; - - const big_int_ty = dg.context.intType(int_bits); - var bits_used: u32 = 0; - while (bits_used < int_bits) { - const field = fields[field_i]; - const field_alignment = field.normalAlignment(target); - const prev_offset = field_offset; - field_offset = std.mem.alignForwardGeneric(u32, field_offset, field_alignment); - if (field_offset > prev_offset) { - // Padding counts as bits used. - bits_used += (field_offset - prev_offset) * 8; - if (bits_used >= int_bits) break; - } - const field_size = @intCast(u16, field.ty.abiSize(target)); - const field_abi_bits = field_size * 8; - const field_int_ty = dg.context.intType(field_abi_bits); - const shifted = if (bits_used == 0) param else s: { - const shift_amt = big_int_ty.constInt(bits_used, .False); - break :s builder.buildLShr(param, shift_amt, ""); - }; - const field_as_int = builder.buildTrunc(shifted, field_int_ty, ""); - var ty_buf: Type.Payload.Pointer = undefined; - const llvm_i = llvmFieldIndex(param_ty, field_i, target, &ty_buf).?; - const field_ptr = builder.buildStructGEP(arg_ptr, llvm_i, ""); - const casted_ptr = builder.buildBitCast(field_ptr, field_int_ty.pointerType(0), ""); - const store_inst = builder.buildStore(field_as_int, casted_ptr); - store_inst.setAlignment(field_alignment); - - field_i += 1; - if (field_i >= fields.len) break; - - bits_used += field_abi_bits; - field_offset += field_size; - } - if (field_i >= fields.len) break; - } - - const is_by_ref = isByRef(param_ty); - const loaded = if (is_by_ref) arg_ptr else l: { - const load_inst = builder.buildLoad(arg_ptr, ""); - load_inst.setAlignment(param_alignment); - break :l load_inst; - }; - try args.append(loaded); - }, - .Union => { - @panic("TODO: LLVM backend: implement C calling convention on x86_64 with union parameter"); - }, - else => unreachable, + const param_ty = fn_info.param_types[it.zig_index - 1]; + const param_llvm_ty = try dg.lowerType(param_ty); + const param_alignment = param_ty.abiAlignment(target); + const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty); + arg_ptr.setAlignment(param_alignment); + var field_types_buf: [8]*const llvm.Type = undefined; + const field_types = field_types_buf[0..llvm_ints.len]; + for (llvm_ints) |int_bits, i| { + field_types[i] = dg.context.intType(int_bits); } + const ints_llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False); + const casted_ptr = builder.buildBitCast(arg_ptr, ints_llvm_ty.pointerType(0), ""); + for (llvm_ints) |_, i_usize| { + const i = @intCast(c_uint, i_usize); + const param = llvm_func.getParam(i); + const field_ptr = builder.buildStructGEP(casted_ptr, i, ""); + const store_inst = builder.buildStore(param, field_ptr); + store_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8); + } + + const is_by_ref = isByRef(param_ty); + const loaded = if (is_by_ref) arg_ptr else l: { + const load_inst = builder.buildLoad(arg_ptr, ""); + load_inst.setAlignment(param_alignment); + break :l load_inst; + }; + try args.append(loaded); }, }; } @@ -2821,65 +2784,11 @@ pub const DeclGen = struct { llvm_params.appendAssumeCapacity(len_llvm_ty); }, .multiple_llvm_ints => { - const param_ty = fn_info.param_types[it.zig_index - 1]; const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len]; try llvm_params.ensureUnusedCapacity(it.llvm_types_len); - - // The reason we have all this logic instead of simply appending - // big_int_ty is for the special case of a pointer type; - // we want to use a pointer type instead of inttoptr at the callsites, - // which may prevent optimization. - switch (param_ty.zigTypeTag()) { - .Struct => { - const fields = param_ty.structFields().values(); - var field_i: u32 = 0; - var field_offset: u32 = 0; - llvm_arg: for (llvm_ints) |int_bits| { - const big_int_ty = dg.context.intType(int_bits); - var bits_used: u32 = 0; - while (bits_used < int_bits) { - const field = fields[field_i]; - const field_alignment = field.normalAlignment(target); - const prev_offset = field_offset; - field_offset = std.mem.alignForwardGeneric(u32, field_offset, field_alignment); - if (field_offset > prev_offset) { - // Padding counts as bits used. - bits_used += (field_offset - prev_offset) * 8; - if (bits_used >= int_bits) break; - } - const field_size = @intCast(u16, field.ty.abiSize(target)); - const field_abi_bits = field_size * 8; - - // Special case for when the entire LLVM integer represents - // one field; in this case keep the type information - // to avoid the potentially costly ptrtoint/bitcast. - if (bits_used == 0 and field_abi_bits == int_bits) { - const llvm_field_ty = try dg.lowerType(field.ty); - llvm_params.appendAssumeCapacity(llvm_field_ty); - field_i += 1; - if (field_i >= fields.len) { - break :llvm_arg; - } else { - continue :llvm_arg; - } - } - - field_i += 1; - if (field_i >= fields.len) break; - - bits_used += field_abi_bits; - field_offset += field_size; - } - llvm_params.appendAssumeCapacity(big_int_ty); - if (field_i >= fields.len) break; - } - }, - else => { - for (llvm_ints) |int_bits| { - const big_int_ty = dg.context.intType(int_bits); - llvm_params.appendAssumeCapacity(big_int_ty); - } - }, + for (llvm_ints) |int_bits| { + const big_int_ty = dg.context.intType(int_bits); + llvm_params.appendAssumeCapacity(big_int_ty); } }, }; @@ -4299,80 +4208,27 @@ pub const FuncGen = struct { const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len]; const llvm_arg = try self.resolveInst(arg); const is_by_ref = isByRef(param_ty); + const arg_ptr = if (is_by_ref) llvm_arg else p: { + const p = self.buildAlloca(llvm_arg.typeOf()); + const store_inst = self.builder.buildStore(llvm_arg, p); + store_inst.setAlignment(param_ty.abiAlignment(target)); + break :p p; + }; + + var field_types_buf: [8]*const llvm.Type = undefined; + const field_types = field_types_buf[0..llvm_ints.len]; + for (llvm_ints) |int_bits, i| { + field_types[i] = self.dg.context.intType(int_bits); + } + const ints_llvm_ty = self.dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False); + const casted_ptr = self.builder.buildBitCast(arg_ptr, ints_llvm_ty.pointerType(0), ""); try llvm_args.ensureUnusedCapacity(it.llvm_types_len); - switch (param_ty.zigTypeTag()) { - .Struct => { - const fields = param_ty.structFields().values(); - var field_i: u32 = 0; - var field_offset: u32 = 0; - for (llvm_ints) |int_bits| { - const big_int_ty = self.dg.context.intType(int_bits); - var int_arg: *const llvm.Value = undefined; - var bits_used: u32 = 0; - while (bits_used < int_bits) { - const field = fields[field_i]; - const field_alignment = field.normalAlignment(target); - const prev_offset = field_offset; - field_offset = std.mem.alignForwardGeneric(u32, field_offset, field_alignment); - if (field_offset > prev_offset) { - // Padding counts as bits used. - bits_used += (field_offset - prev_offset) * 8; - if (bits_used >= int_bits) break; - } - var ty_buf: Type.Payload.Pointer = undefined; - const llvm_i = llvmFieldIndex(param_ty, field_i, target, &ty_buf).?; - const field_size = @intCast(u16, field.ty.abiSize(target)); - const field_abi_bits = field_size * 8; - - // Special case for when the entire LLVM integer represents - // one field; in this case keep the type information - // to avoid the potentially costly ptrtoint/bitcast. - if (bits_used == 0 and field_abi_bits == int_bits) { - int_arg = if (is_by_ref) f: { - const field_ptr = self.builder.buildStructGEP(llvm_arg, llvm_i, ""); - const load_inst = self.builder.buildLoad(field_ptr, ""); - load_inst.setAlignment(field_alignment); - break :f load_inst; - } else self.builder.buildExtractValue(llvm_arg, llvm_i, ""); - field_i += 1; - break; - } - - const field_int_ty = self.dg.context.intType(field_abi_bits); - const llvm_field = if (is_by_ref) f: { - const field_ptr = self.builder.buildStructGEP(llvm_arg, llvm_i, ""); - const casted_ptr = self.builder.buildBitCast(field_ptr, field_int_ty.pointerType(0), ""); - const load_inst = self.builder.buildLoad(casted_ptr, ""); - load_inst.setAlignment(field_alignment); - break :f load_inst; - } else f: { - const llvm_field = self.builder.buildExtractValue(llvm_arg, llvm_i, ""); - break :f self.builder.buildBitCast(llvm_field, field_int_ty, ""); - }; - - const extended = self.builder.buildZExt(llvm_field, big_int_ty, ""); - if (bits_used == 0) { - int_arg = extended; - } else { - const shift_amt = big_int_ty.constInt(bits_used, .False); - const shifted = self.builder.buildShl(extended, shift_amt, ""); - int_arg = self.builder.buildOr(int_arg, shifted, ""); - } - - field_i += 1; - if (field_i >= fields.len) break; - - bits_used += field_abi_bits; - field_offset += field_size; - } - llvm_args.appendAssumeCapacity(int_arg); - if (field_i >= fields.len) break; - } - }, - .Union => { - return self.todo("airCall C calling convention on x86_64 with union argument ", .{}); - }, - else => unreachable, + for (llvm_ints) |_, i_usize| { + const i = @intCast(c_uint, i_usize); + const field_ptr = self.builder.buildStructGEP(casted_ptr, i, ""); + const load_inst = self.builder.buildLoad(field_ptr, ""); + load_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8); + llvm_args.appendAssumeCapacity(load_inst); } }, };