mirror of
https://github.com/ziglang/zig.git
synced 2026-01-20 06:15:21 +00:00
parent
138a35df8f
commit
59375b3c22
@ -5137,7 +5137,7 @@ pub const FuncGen = struct {
|
||||
try attributes.addParamAttr(0, .{ .sret = llvm_ret_ty }, &o.builder);
|
||||
|
||||
const alignment = return_type.abiAlignment(mod).toLlvm();
|
||||
const ret_ptr = try self.buildAlloca(llvm_ret_ty, alignment);
|
||||
const ret_ptr = try self.buildAllocaWorkaround(return_type, alignment);
|
||||
try llvm_args.append(ret_ptr);
|
||||
break :blk ret_ptr;
|
||||
};
|
||||
@ -5186,7 +5186,7 @@ pub const FuncGen = struct {
|
||||
|
||||
const alignment = param_ty.abiAlignment(mod).toLlvm();
|
||||
const param_llvm_ty = try o.lowerType(param_ty);
|
||||
const arg_ptr = try self.buildAlloca(param_llvm_ty, alignment);
|
||||
const arg_ptr = try self.buildAllocaWorkaround(param_ty, alignment);
|
||||
if (isByRef(param_ty, mod)) {
|
||||
const loaded = try self.wip.load(.normal, param_llvm_ty, llvm_arg, alignment, "");
|
||||
_ = try self.wip.store(.normal, loaded, arg_ptr, alignment);
|
||||
@ -5209,7 +5209,7 @@ pub const FuncGen = struct {
|
||||
// LLVM does not allow bitcasting structs so we must allocate
|
||||
// a local, store as one type, and then load as another type.
|
||||
const alignment = param_ty.abiAlignment(mod).toLlvm();
|
||||
const int_ptr = try self.buildAlloca(int_llvm_ty, alignment);
|
||||
const int_ptr = try self.buildAllocaWorkaround(param_ty, alignment);
|
||||
_ = try self.wip.store(.normal, llvm_arg, int_ptr, alignment);
|
||||
const loaded = try self.wip.load(.normal, int_llvm_ty, int_ptr, alignment, "");
|
||||
try llvm_args.append(loaded);
|
||||
@ -5552,7 +5552,7 @@ pub const FuncGen = struct {
|
||||
const mod = o.module;
|
||||
|
||||
const result_alignment = va_list_ty.abiAlignment(mod).toLlvm();
|
||||
const dest_list = try self.buildAlloca(llvm_va_list_ty, result_alignment);
|
||||
const dest_list = try self.buildAllocaWorkaround(va_list_ty, result_alignment);
|
||||
|
||||
_ = try self.wip.callIntrinsic(.normal, .none, .va_copy, &.{}, &.{ dest_list, src_list }, "");
|
||||
return if (isByRef(va_list_ty, mod))
|
||||
@ -5576,7 +5576,7 @@ pub const FuncGen = struct {
|
||||
const llvm_va_list_ty = try o.lowerType(va_list_ty);
|
||||
|
||||
const result_alignment = va_list_ty.abiAlignment(mod).toLlvm();
|
||||
const dest_list = try self.buildAlloca(llvm_va_list_ty, result_alignment);
|
||||
const dest_list = try self.buildAllocaWorkaround(va_list_ty, result_alignment);
|
||||
|
||||
_ = try self.wip.callIntrinsic(.normal, .none, .va_start, &.{}, &.{dest_list}, "");
|
||||
return if (isByRef(va_list_ty, mod))
|
||||
@ -7407,7 +7407,7 @@ pub const FuncGen = struct {
|
||||
self.ret_ptr
|
||||
else brk: {
|
||||
const alignment = optional_ty.abiAlignment(mod).toLlvm();
|
||||
const optional_ptr = try self.buildAlloca(llvm_optional_ty, alignment);
|
||||
const optional_ptr = try self.buildAllocaWorkaround(optional_ty, alignment);
|
||||
break :brk optional_ptr;
|
||||
};
|
||||
|
||||
@ -7443,7 +7443,7 @@ pub const FuncGen = struct {
|
||||
self.ret_ptr
|
||||
else brk: {
|
||||
const alignment = err_un_ty.abiAlignment(mod).toLlvm();
|
||||
const result_ptr = try self.buildAlloca(err_un_llvm_ty, alignment);
|
||||
const result_ptr = try self.buildAllocaWorkaround(err_un_ty, alignment);
|
||||
break :brk result_ptr;
|
||||
};
|
||||
|
||||
@ -7481,7 +7481,7 @@ pub const FuncGen = struct {
|
||||
self.ret_ptr
|
||||
else brk: {
|
||||
const alignment = err_un_ty.abiAlignment(mod).toLlvm();
|
||||
const result_ptr = try self.buildAlloca(err_un_llvm_ty, alignment);
|
||||
const result_ptr = try self.buildAllocaWorkaround(err_un_ty, alignment);
|
||||
break :brk result_ptr;
|
||||
};
|
||||
|
||||
@ -7965,7 +7965,7 @@ pub const FuncGen = struct {
|
||||
|
||||
if (isByRef(inst_ty, mod)) {
|
||||
const result_alignment = inst_ty.abiAlignment(mod).toLlvm();
|
||||
const alloca_inst = try self.buildAlloca(llvm_inst_ty, result_alignment);
|
||||
const alloca_inst = try self.buildAllocaWorkaround(inst_ty, result_alignment);
|
||||
{
|
||||
const field_ptr = try self.wip.gepStruct(llvm_inst_ty, alloca_inst, result_index, "");
|
||||
_ = try self.wip.store(.normal, result_val, field_ptr, result_alignment);
|
||||
@ -8323,7 +8323,7 @@ pub const FuncGen = struct {
|
||||
|
||||
if (isByRef(dest_ty, mod)) {
|
||||
const result_alignment = dest_ty.abiAlignment(mod).toLlvm();
|
||||
const alloca_inst = try self.buildAlloca(llvm_dest_ty, result_alignment);
|
||||
const alloca_inst = try self.buildAllocaWorkaround(dest_ty, result_alignment);
|
||||
{
|
||||
const field_ptr = try self.wip.gepStruct(llvm_dest_ty, alloca_inst, result_index, "");
|
||||
_ = try self.wip.store(.normal, result, field_ptr, result_alignment);
|
||||
@ -8614,7 +8614,7 @@ pub const FuncGen = struct {
|
||||
if (!result_is_ref) {
|
||||
return self.dg.todo("implement bitcast vector to non-ref array", .{});
|
||||
}
|
||||
const array_ptr = try self.buildAlloca(llvm_dest_ty, .default);
|
||||
const array_ptr = try self.buildAllocaWorkaround(inst_ty, .default);
|
||||
const bitcast_ok = elem_ty.bitSize(mod) == elem_ty.abiSize(mod) * 8;
|
||||
if (bitcast_ok) {
|
||||
const alignment = inst_ty.abiAlignment(mod).toLlvm();
|
||||
@ -8676,7 +8676,7 @@ pub const FuncGen = struct {
|
||||
|
||||
if (result_is_ref) {
|
||||
const alignment = operand_ty.abiAlignment(mod).max(inst_ty.abiAlignment(mod)).toLlvm();
|
||||
const result_ptr = try self.buildAlloca(llvm_dest_ty, alignment);
|
||||
const result_ptr = try self.buildAllocaWorkaround(inst_ty, alignment);
|
||||
_ = try self.wip.store(.normal, operand, result_ptr, alignment);
|
||||
return result_ptr;
|
||||
}
|
||||
@ -8688,7 +8688,7 @@ pub const FuncGen = struct {
|
||||
// but LLVM won't let us bitcast struct values or vectors with padding bits.
|
||||
// Therefore, we store operand to alloca, then load for result.
|
||||
const alignment = operand_ty.abiAlignment(mod).max(inst_ty.abiAlignment(mod)).toLlvm();
|
||||
const result_ptr = try self.buildAlloca(llvm_dest_ty, alignment);
|
||||
const result_ptr = try self.buildAllocaWorkaround(inst_ty, alignment);
|
||||
_ = try self.wip.store(.normal, operand, result_ptr, alignment);
|
||||
return self.wip.load(.normal, llvm_dest_ty, result_ptr, alignment, "");
|
||||
}
|
||||
@ -8753,9 +8753,9 @@ pub const FuncGen = struct {
|
||||
if (!pointee_type.isFnOrHasRuntimeBitsIgnoreComptime(mod))
|
||||
return (try o.lowerPtrToVoid(ptr_ty)).toValue();
|
||||
|
||||
const pointee_llvm_ty = try o.lowerType(pointee_type);
|
||||
//const pointee_llvm_ty = try o.lowerType(pointee_type);
|
||||
const alignment = ptr_ty.ptrAlignment(mod).toLlvm();
|
||||
return self.buildAlloca(pointee_llvm_ty, alignment);
|
||||
return self.buildAllocaWorkaround(pointee_type, alignment);
|
||||
}
|
||||
|
||||
fn airRetPtr(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
|
||||
@ -8766,9 +8766,9 @@ pub const FuncGen = struct {
|
||||
if (!ret_ty.isFnOrHasRuntimeBitsIgnoreComptime(mod))
|
||||
return (try o.lowerPtrToVoid(ptr_ty)).toValue();
|
||||
if (self.ret_ptr != .none) return self.ret_ptr;
|
||||
const ret_llvm_ty = try o.lowerType(ret_ty);
|
||||
//const ret_llvm_ty = try o.lowerType(ret_ty);
|
||||
const alignment = ptr_ty.ptrAlignment(mod).toLlvm();
|
||||
return self.buildAlloca(ret_llvm_ty, alignment);
|
||||
return self.buildAllocaWorkaround(ret_ty, alignment);
|
||||
}
|
||||
|
||||
/// Use this instead of builder.buildAlloca, because this function makes sure to
|
||||
@ -8782,6 +8782,16 @@ pub const FuncGen = struct {
|
||||
return buildAllocaInner(&self.wip, self.di_scope != null, llvm_ty, alignment, target);
|
||||
}
|
||||
|
||||
// Workaround for https://github.com/ziglang/zig/issues/16392
|
||||
fn buildAllocaWorkaround(
|
||||
self: *FuncGen,
|
||||
ty: Type,
|
||||
alignment: Builder.Alignment,
|
||||
) Allocator.Error!Builder.Value {
|
||||
const o = self.dg.object;
|
||||
return self.buildAlloca(try o.builder.arrayType(ty.abiSize(o.module), .i8), alignment);
|
||||
}
|
||||
|
||||
fn airStore(self: *FuncGen, inst: Air.Inst.Index, safety: bool) !Builder.Value {
|
||||
const o = self.dg.object;
|
||||
const mod = o.module;
|
||||
@ -9736,7 +9746,7 @@ pub const FuncGen = struct {
|
||||
const llvm_result_ty = accum_init.typeOfWip(&self.wip);
|
||||
|
||||
// Allocate and initialize our mutable variables
|
||||
const i_ptr = try self.buildAlloca(usize_ty, .default);
|
||||
const i_ptr = try self.buildAllocaWorkaround(Type.usize, .default);
|
||||
_ = try self.wip.store(.normal, try o.builder.intValue(usize_ty, 0), i_ptr, .default);
|
||||
const accum_ptr = try self.buildAlloca(llvm_result_ty, .default);
|
||||
_ = try self.wip.store(.normal, accum_init, accum_ptr, .default);
|
||||
@ -9948,7 +9958,7 @@ pub const FuncGen = struct {
|
||||
// TODO in debug builds init to undef so that the padding will be 0xaa
|
||||
// even if we fully populate the fields.
|
||||
const alignment = result_ty.abiAlignment(mod).toLlvm();
|
||||
const alloca_inst = try self.buildAlloca(llvm_result_ty, alignment);
|
||||
const alloca_inst = try self.buildAllocaWorkaround(result_ty, alignment);
|
||||
|
||||
for (elements, 0..) |elem, i| {
|
||||
if ((try result_ty.structFieldValueComptime(mod, i)) != null) continue;
|
||||
@ -9985,7 +9995,7 @@ pub const FuncGen = struct {
|
||||
const llvm_usize = try o.lowerType(Type.usize);
|
||||
const usize_zero = try o.builder.intValue(llvm_usize, 0);
|
||||
const alignment = result_ty.abiAlignment(mod).toLlvm();
|
||||
const alloca_inst = try self.buildAlloca(llvm_result_ty, alignment);
|
||||
const alloca_inst = try self.buildAllocaWorkaround(result_ty, alignment);
|
||||
|
||||
const array_info = result_ty.arrayInfo(mod);
|
||||
const elem_ptr_ty = try mod.ptrType(.{
|
||||
@ -10061,7 +10071,7 @@ pub const FuncGen = struct {
|
||||
// We must construct the correct unnamed struct type here, in order to then set
|
||||
// the fields appropriately.
|
||||
const alignment = layout.abi_align.toLlvm();
|
||||
const result_ptr = try self.buildAlloca(union_llvm_ty, alignment);
|
||||
const result_ptr = try self.buildAllocaWorkaround(union_ty, alignment);
|
||||
const llvm_payload = try self.resolveInst(extra.init);
|
||||
const field_ty = union_obj.field_types.get(ip)[extra.field_index].toType();
|
||||
const field_llvm_ty = try o.lowerType(field_ty);
|
||||
@ -10340,7 +10350,7 @@ pub const FuncGen = struct {
|
||||
|
||||
if (isByRef(optional_ty, mod)) {
|
||||
const payload_alignment = optional_ty.abiAlignment(mod).toLlvm();
|
||||
const alloca_inst = try self.buildAlloca(optional_llvm_ty, payload_alignment);
|
||||
const alloca_inst = try self.buildAllocaWorkaround(optional_ty, payload_alignment);
|
||||
|
||||
{
|
||||
const field_ptr = try self.wip.gepStruct(optional_llvm_ty, alloca_inst, 0, "");
|
||||
@ -10481,9 +10491,9 @@ pub const FuncGen = struct {
|
||||
) !Builder.Value {
|
||||
const o = fg.dg.object;
|
||||
const mod = o.module;
|
||||
const pointee_llvm_ty = try o.lowerType(pointee_type);
|
||||
//const pointee_llvm_ty = try o.lowerType(pointee_type);
|
||||
const result_align = InternPool.Alignment.fromLlvm(ptr_alignment).max(pointee_type.abiAlignment(mod)).toLlvm();
|
||||
const result_ptr = try fg.buildAlloca(pointee_llvm_ty, result_align);
|
||||
const result_ptr = try fg.buildAllocaWorkaround(pointee_type, result_align);
|
||||
const size_bytes = pointee_type.abiSize(mod);
|
||||
_ = try fg.wip.callMemCpy(
|
||||
result_ptr,
|
||||
@ -10542,7 +10552,7 @@ pub const FuncGen = struct {
|
||||
|
||||
if (isByRef(elem_ty, mod)) {
|
||||
const result_align = elem_ty.abiAlignment(mod).toLlvm();
|
||||
const result_ptr = try self.buildAlloca(elem_llvm_ty, result_align);
|
||||
const result_ptr = try self.buildAllocaWorkaround(elem_ty, result_align);
|
||||
|
||||
const same_size_int = try o.builder.intType(@intCast(elem_bits));
|
||||
const truncated_int = try self.wip.cast(.trunc, shifted_value, same_size_int, "");
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user