mirror of
https://github.com/ziglang/zig.git
synced 2026-01-20 22:35:24 +00:00
Merge remote-tracking branch 'origin/master' into llvm14
This commit is contained in:
commit
6a3a0fe7ae
@ -858,6 +858,7 @@ pub fn panicOutOfBounds(index: usize, len: usize) noreturn {
|
||||
|
||||
pub noinline fn returnError(maybe_st: ?*StackTrace) void {
|
||||
@setCold(true);
|
||||
@setRuntimeSafety(false);
|
||||
const st = maybe_st orelse return;
|
||||
addErrRetTraceAddr(st, @returnAddress());
|
||||
}
|
||||
|
||||
15
src/Sema.zig
15
src/Sema.zig
@ -18427,7 +18427,20 @@ fn safetyPanic(
|
||||
fn emitBackwardBranch(sema: *Sema, block: *Block, src: LazySrcLoc) !void {
|
||||
sema.branch_count += 1;
|
||||
if (sema.branch_count > sema.branch_quota) {
|
||||
return sema.fail(block, src, "evaluation exceeded {d} backwards branches", .{sema.branch_quota});
|
||||
const msg = try sema.errMsg(
|
||||
block,
|
||||
src,
|
||||
"evaluation exceeded {d} backwards branches",
|
||||
.{sema.branch_quota},
|
||||
);
|
||||
try sema.errNote(
|
||||
block,
|
||||
src,
|
||||
msg,
|
||||
"use @setEvalBranchQuota() to raise the branch limit from {d}",
|
||||
.{sema.branch_quota},
|
||||
);
|
||||
return sema.failWithOwnedErrorMsg(block, msg);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -858,69 +858,34 @@ pub const Object = struct {
|
||||
try args.append(aggregate);
|
||||
},
|
||||
.multiple_llvm_ints => {
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len];
|
||||
const is_by_ref = isByRef(param_ty);
|
||||
switch (param_ty.zigTypeTag()) {
|
||||
.Struct => {
|
||||
const fields = param_ty.structFields().values();
|
||||
if (is_by_ref) {
|
||||
const param_llvm_ty = try dg.lowerType(param_ty);
|
||||
const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty);
|
||||
arg_ptr.setAlignment(param_ty.abiAlignment(target));
|
||||
|
||||
var field_i: u32 = 0;
|
||||
var field_offset: u32 = 0;
|
||||
for (llvm_ints) |int_bits| {
|
||||
const param = llvm_func.getParam(llvm_arg_i);
|
||||
llvm_arg_i += 1;
|
||||
|
||||
const big_int_ty = dg.context.intType(int_bits);
|
||||
var bits_used: u32 = 0;
|
||||
while (bits_used < int_bits) {
|
||||
const field = fields[field_i];
|
||||
const field_alignment = field.normalAlignment(target);
|
||||
const prev_offset = field_offset;
|
||||
field_offset = std.mem.alignForwardGeneric(u32, field_offset, field_alignment);
|
||||
if (field_offset > prev_offset) {
|
||||
// Padding counts as bits used.
|
||||
bits_used += (field_offset - prev_offset) * 8;
|
||||
if (bits_used >= int_bits) break;
|
||||
}
|
||||
const field_size = @intCast(u16, field.ty.abiSize(target));
|
||||
const field_abi_bits = field_size * 8;
|
||||
const field_int_ty = dg.context.intType(field_abi_bits);
|
||||
const shifted = if (bits_used == 0) param else s: {
|
||||
const shift_amt = big_int_ty.constInt(bits_used, .False);
|
||||
break :s builder.buildLShr(param, shift_amt, "");
|
||||
};
|
||||
const field_as_int = builder.buildTrunc(shifted, field_int_ty, "");
|
||||
var ty_buf: Type.Payload.Pointer = undefined;
|
||||
const llvm_i = llvmFieldIndex(param_ty, field_i, target, &ty_buf).?;
|
||||
const field_ptr = builder.buildStructGEP(arg_ptr, llvm_i, "");
|
||||
const casted_ptr = builder.buildBitCast(field_ptr, field_int_ty.pointerType(0), "");
|
||||
const store_inst = builder.buildStore(field_as_int, casted_ptr);
|
||||
store_inst.setAlignment(field_alignment);
|
||||
|
||||
field_i += 1;
|
||||
if (field_i >= fields.len) break;
|
||||
|
||||
bits_used += field_abi_bits;
|
||||
field_offset += field_size;
|
||||
}
|
||||
if (field_i >= fields.len) break;
|
||||
}
|
||||
|
||||
try args.append(arg_ptr);
|
||||
} else {
|
||||
@panic("TODO: LLVM backend: implement C calling convention on x86_64 with byval struct parameter");
|
||||
}
|
||||
},
|
||||
.Union => {
|
||||
@panic("TODO: LLVM backend: implement C calling convention on x86_64 with union parameter");
|
||||
},
|
||||
else => unreachable,
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const param_llvm_ty = try dg.lowerType(param_ty);
|
||||
const param_alignment = param_ty.abiAlignment(target);
|
||||
const arg_ptr = buildAllocaInner(builder, llvm_func, false, param_llvm_ty);
|
||||
arg_ptr.setAlignment(param_alignment);
|
||||
var field_types_buf: [8]*const llvm.Type = undefined;
|
||||
const field_types = field_types_buf[0..llvm_ints.len];
|
||||
for (llvm_ints) |int_bits, i| {
|
||||
field_types[i] = dg.context.intType(int_bits);
|
||||
}
|
||||
const ints_llvm_ty = dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
|
||||
const casted_ptr = builder.buildBitCast(arg_ptr, ints_llvm_ty.pointerType(0), "");
|
||||
for (llvm_ints) |_, i_usize| {
|
||||
const i = @intCast(c_uint, i_usize);
|
||||
const param = llvm_func.getParam(i);
|
||||
const field_ptr = builder.buildStructGEP(casted_ptr, i, "");
|
||||
const store_inst = builder.buildStore(param, field_ptr);
|
||||
store_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
|
||||
}
|
||||
|
||||
const is_by_ref = isByRef(param_ty);
|
||||
const loaded = if (is_by_ref) arg_ptr else l: {
|
||||
const load_inst = builder.buildLoad(arg_ptr, "");
|
||||
load_inst.setAlignment(param_alignment);
|
||||
break :l load_inst;
|
||||
};
|
||||
try args.append(loaded);
|
||||
},
|
||||
};
|
||||
}
|
||||
@ -2822,65 +2787,11 @@ pub const DeclGen = struct {
|
||||
llvm_params.appendAssumeCapacity(len_llvm_ty);
|
||||
},
|
||||
.multiple_llvm_ints => {
|
||||
const param_ty = fn_info.param_types[it.zig_index - 1];
|
||||
const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len];
|
||||
try llvm_params.ensureUnusedCapacity(it.llvm_types_len);
|
||||
|
||||
// The reason we have all this logic instead of simply appending
|
||||
// big_int_ty is for the special case of a pointer type;
|
||||
// we want to use a pointer type instead of inttoptr at the callsites,
|
||||
// which may prevent optimization.
|
||||
switch (param_ty.zigTypeTag()) {
|
||||
.Struct => {
|
||||
const fields = param_ty.structFields().values();
|
||||
var field_i: u32 = 0;
|
||||
var field_offset: u32 = 0;
|
||||
llvm_arg: for (llvm_ints) |int_bits| {
|
||||
const big_int_ty = dg.context.intType(int_bits);
|
||||
var bits_used: u32 = 0;
|
||||
while (bits_used < int_bits) {
|
||||
const field = fields[field_i];
|
||||
const field_alignment = field.normalAlignment(target);
|
||||
const prev_offset = field_offset;
|
||||
field_offset = std.mem.alignForwardGeneric(u32, field_offset, field_alignment);
|
||||
if (field_offset > prev_offset) {
|
||||
// Padding counts as bits used.
|
||||
bits_used += (field_offset - prev_offset) * 8;
|
||||
if (bits_used >= int_bits) break;
|
||||
}
|
||||
const field_size = @intCast(u16, field.ty.abiSize(target));
|
||||
const field_abi_bits = field_size * 8;
|
||||
|
||||
// Special case for when the entire LLVM integer represents
|
||||
// one field; in this case keep the type information
|
||||
// to avoid the potentially costly ptrtoint/bitcast.
|
||||
if (bits_used == 0 and field_abi_bits == int_bits) {
|
||||
const llvm_field_ty = try dg.lowerType(field.ty);
|
||||
llvm_params.appendAssumeCapacity(llvm_field_ty);
|
||||
field_i += 1;
|
||||
if (field_i >= fields.len) {
|
||||
break :llvm_arg;
|
||||
} else {
|
||||
continue :llvm_arg;
|
||||
}
|
||||
}
|
||||
|
||||
field_i += 1;
|
||||
if (field_i >= fields.len) break;
|
||||
|
||||
bits_used += field_abi_bits;
|
||||
field_offset += field_size;
|
||||
}
|
||||
llvm_params.appendAssumeCapacity(big_int_ty);
|
||||
if (field_i >= fields.len) break;
|
||||
}
|
||||
},
|
||||
else => {
|
||||
for (llvm_ints) |int_bits| {
|
||||
const big_int_ty = dg.context.intType(int_bits);
|
||||
llvm_params.appendAssumeCapacity(big_int_ty);
|
||||
}
|
||||
},
|
||||
for (llvm_ints) |int_bits| {
|
||||
const big_int_ty = dg.context.intType(int_bits);
|
||||
llvm_params.appendAssumeCapacity(big_int_ty);
|
||||
}
|
||||
},
|
||||
};
|
||||
@ -4300,80 +4211,27 @@ pub const FuncGen = struct {
|
||||
const llvm_ints = it.llvm_types_buffer[0..it.llvm_types_len];
|
||||
const llvm_arg = try self.resolveInst(arg);
|
||||
const is_by_ref = isByRef(param_ty);
|
||||
const arg_ptr = if (is_by_ref) llvm_arg else p: {
|
||||
const p = self.buildAlloca(llvm_arg.typeOf());
|
||||
const store_inst = self.builder.buildStore(llvm_arg, p);
|
||||
store_inst.setAlignment(param_ty.abiAlignment(target));
|
||||
break :p p;
|
||||
};
|
||||
|
||||
var field_types_buf: [8]*const llvm.Type = undefined;
|
||||
const field_types = field_types_buf[0..llvm_ints.len];
|
||||
for (llvm_ints) |int_bits, i| {
|
||||
field_types[i] = self.dg.context.intType(int_bits);
|
||||
}
|
||||
const ints_llvm_ty = self.dg.context.structType(field_types.ptr, @intCast(c_uint, field_types.len), .False);
|
||||
const casted_ptr = self.builder.buildBitCast(arg_ptr, ints_llvm_ty.pointerType(0), "");
|
||||
try llvm_args.ensureUnusedCapacity(it.llvm_types_len);
|
||||
switch (param_ty.zigTypeTag()) {
|
||||
.Struct => {
|
||||
const fields = param_ty.structFields().values();
|
||||
var field_i: u32 = 0;
|
||||
var field_offset: u32 = 0;
|
||||
for (llvm_ints) |int_bits| {
|
||||
const big_int_ty = self.dg.context.intType(int_bits);
|
||||
var int_arg: *const llvm.Value = undefined;
|
||||
var bits_used: u32 = 0;
|
||||
while (bits_used < int_bits) {
|
||||
const field = fields[field_i];
|
||||
const field_alignment = field.normalAlignment(target);
|
||||
const prev_offset = field_offset;
|
||||
field_offset = std.mem.alignForwardGeneric(u32, field_offset, field_alignment);
|
||||
if (field_offset > prev_offset) {
|
||||
// Padding counts as bits used.
|
||||
bits_used += (field_offset - prev_offset) * 8;
|
||||
if (bits_used >= int_bits) break;
|
||||
}
|
||||
var ty_buf: Type.Payload.Pointer = undefined;
|
||||
const llvm_i = llvmFieldIndex(param_ty, field_i, target, &ty_buf).?;
|
||||
const field_size = @intCast(u16, field.ty.abiSize(target));
|
||||
const field_abi_bits = field_size * 8;
|
||||
|
||||
// Special case for when the entire LLVM integer represents
|
||||
// one field; in this case keep the type information
|
||||
// to avoid the potentially costly ptrtoint/bitcast.
|
||||
if (bits_used == 0 and field_abi_bits == int_bits) {
|
||||
int_arg = if (is_by_ref) f: {
|
||||
const field_ptr = self.builder.buildStructGEP(llvm_arg, llvm_i, "");
|
||||
const load_inst = self.builder.buildLoad(field_ptr, "");
|
||||
load_inst.setAlignment(field_alignment);
|
||||
break :f load_inst;
|
||||
} else self.builder.buildExtractValue(llvm_arg, llvm_i, "");
|
||||
field_i += 1;
|
||||
break;
|
||||
}
|
||||
|
||||
const field_int_ty = self.dg.context.intType(field_abi_bits);
|
||||
const llvm_field = if (is_by_ref) f: {
|
||||
const field_ptr = self.builder.buildStructGEP(llvm_arg, llvm_i, "");
|
||||
const casted_ptr = self.builder.buildBitCast(field_ptr, field_int_ty.pointerType(0), "");
|
||||
const load_inst = self.builder.buildLoad(casted_ptr, "");
|
||||
load_inst.setAlignment(field_alignment);
|
||||
break :f load_inst;
|
||||
} else f: {
|
||||
const llvm_field = self.builder.buildExtractValue(llvm_arg, llvm_i, "");
|
||||
break :f self.builder.buildBitCast(llvm_field, field_int_ty, "");
|
||||
};
|
||||
|
||||
const extended = self.builder.buildZExt(llvm_field, big_int_ty, "");
|
||||
if (bits_used == 0) {
|
||||
int_arg = extended;
|
||||
} else {
|
||||
const shift_amt = big_int_ty.constInt(bits_used, .False);
|
||||
const shifted = self.builder.buildShl(extended, shift_amt, "");
|
||||
int_arg = self.builder.buildOr(int_arg, shifted, "");
|
||||
}
|
||||
|
||||
field_i += 1;
|
||||
if (field_i >= fields.len) break;
|
||||
|
||||
bits_used += field_abi_bits;
|
||||
field_offset += field_size;
|
||||
}
|
||||
llvm_args.appendAssumeCapacity(int_arg);
|
||||
if (field_i >= fields.len) break;
|
||||
}
|
||||
},
|
||||
.Union => {
|
||||
return self.todo("airCall C calling convention on x86_64 with union argument ", .{});
|
||||
},
|
||||
else => unreachable,
|
||||
for (llvm_ints) |_, i_usize| {
|
||||
const i = @intCast(c_uint, i_usize);
|
||||
const field_ptr = self.builder.buildStructGEP(casted_ptr, i, "");
|
||||
const load_inst = self.builder.buildLoad(field_ptr, "");
|
||||
load_inst.setAlignment(target.cpu.arch.ptrBitWidth() / 8);
|
||||
llvm_args.appendAssumeCapacity(load_inst);
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
@ -5769,8 +5769,10 @@ static bool ir_emit_backward_branch(IrAnalyze *ira, AstNode* source_node) {
|
||||
|
||||
*bbc += 1;
|
||||
if (*bbc > *quota) {
|
||||
ir_add_error_node(ira, source_node,
|
||||
ErrorMsg *msg = ir_add_error_node(ira, source_node,
|
||||
buf_sprintf("evaluation exceeded %" ZIG_PRI_usize " backwards branches", *quota));
|
||||
add_error_note(ira->codegen, msg, source_node,
|
||||
buf_sprintf("use @setEvalBranchQuota to raise branch limit from %" ZIG_PRI_usize, *quota));
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -21573,6 +21575,7 @@ done_with_return_type:
|
||||
// handle `[N]T`
|
||||
target_len = target->type->data.array.len;
|
||||
target_sentinel = target->type->data.array.sentinel;
|
||||
expand_undef_array(ira->codegen, target);
|
||||
target_elements = target->data.x_array.data.s_none.elements;
|
||||
break;
|
||||
} else if (target->type->id == ZigTypeIdPointer && target->type->data.pointer.child_type->id == ZigTypeIdArray) {
|
||||
|
||||
@ -83,6 +83,7 @@ test {
|
||||
_ = @import("behavior/bugs/11181.zig");
|
||||
_ = @import("behavior/bugs/11213.zig");
|
||||
_ = @import("behavior/bugs/12003.zig");
|
||||
_ = @import("behavior/bugs/12033.zig");
|
||||
_ = @import("behavior/byteswap.zig");
|
||||
_ = @import("behavior/byval_arg_var.zig");
|
||||
_ = @import("behavior/call.zig");
|
||||
|
||||
12
test/behavior/bugs/12033.zig
Normal file
12
test/behavior/bugs/12033.zig
Normal file
@ -0,0 +1,12 @@
|
||||
const std = @import("std");
|
||||
|
||||
test {
|
||||
const string = "Hello!\x00World!";
|
||||
try std.testing.expect(@TypeOf(string) == *const [13:0]u8);
|
||||
|
||||
const slice_without_sentinel: []const u8 = string[0..6];
|
||||
try std.testing.expect(@TypeOf(slice_without_sentinel) == []const u8);
|
||||
|
||||
const slice_with_sentinel: [:0]const u8 = string[0..6 :0];
|
||||
try std.testing.expect(@TypeOf(slice_with_sentinel) == [:0]const u8);
|
||||
}
|
||||
@ -14,6 +14,7 @@ inline fn fibonacci(n: usize) usize {
|
||||
// error
|
||||
//
|
||||
// :11:21: error: evaluation exceeded 1000 backwards branches
|
||||
// :11:21: note: use @setEvalBranchQuota() to raise the branch limit from 1000
|
||||
// :11:40: note: called from here (6 times)
|
||||
// :11:21: note: called from here (495 times)
|
||||
// :5:24: note: called from here
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user