mirror of
https://github.com/ziglang/zig.git
synced 2025-12-21 21:53:08 +00:00
Merge pull request #11583 from ziglang/stage2-test-behavior
stage2 behavior tests for all targets passing with the LLVM backend
This commit is contained in:
commit
59905a62f9
@ -1773,6 +1773,88 @@ pub const Target = struct {
|
|||||||
else => false,
|
else => false,
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub inline fn maxIntAlignment(target: Target) u16 {
|
||||||
|
return switch (target.cpu.arch) {
|
||||||
|
.avr => 1,
|
||||||
|
.msp430 => 2,
|
||||||
|
.xcore => 4,
|
||||||
|
|
||||||
|
.arm,
|
||||||
|
.armeb,
|
||||||
|
.thumb,
|
||||||
|
.thumbeb,
|
||||||
|
.hexagon,
|
||||||
|
.mips,
|
||||||
|
.mipsel,
|
||||||
|
.powerpc,
|
||||||
|
.powerpcle,
|
||||||
|
.r600,
|
||||||
|
.amdgcn,
|
||||||
|
.riscv32,
|
||||||
|
.sparc,
|
||||||
|
.sparcel,
|
||||||
|
.s390x,
|
||||||
|
.lanai,
|
||||||
|
.wasm32,
|
||||||
|
.wasm64,
|
||||||
|
=> 8,
|
||||||
|
|
||||||
|
.i386 => return switch (target.os.tag) {
|
||||||
|
.windows => 8,
|
||||||
|
else => 4,
|
||||||
|
},
|
||||||
|
|
||||||
|
// For x86_64, LLVMABIAlignmentOfType(i128) reports 8. However I think 16
|
||||||
|
// is a better number for two reasons:
|
||||||
|
// 1. Better machine code when loading into SIMD register.
|
||||||
|
// 2. The C ABI wants 16 for extern structs.
|
||||||
|
// 3. 16-byte cmpxchg needs 16-byte alignment.
|
||||||
|
// Same logic for riscv64, powerpc64, mips64, sparcv9.
|
||||||
|
.x86_64,
|
||||||
|
.riscv64,
|
||||||
|
.powerpc64,
|
||||||
|
.powerpc64le,
|
||||||
|
.mips64,
|
||||||
|
.mips64el,
|
||||||
|
.sparcv9,
|
||||||
|
|
||||||
|
// Even LLVMABIAlignmentOfType(i128) agrees on these targets.
|
||||||
|
.aarch64,
|
||||||
|
.aarch64_be,
|
||||||
|
.aarch64_32,
|
||||||
|
.bpfel,
|
||||||
|
.bpfeb,
|
||||||
|
.nvptx,
|
||||||
|
.nvptx64,
|
||||||
|
=> 16,
|
||||||
|
|
||||||
|
// Below this comment are unverified but based on the fact that C requires
|
||||||
|
// int128_t to be 16 bytes aligned, it's a safe default.
|
||||||
|
.spu_2,
|
||||||
|
.csky,
|
||||||
|
.arc,
|
||||||
|
.m68k,
|
||||||
|
.tce,
|
||||||
|
.tcele,
|
||||||
|
.le32,
|
||||||
|
.amdil,
|
||||||
|
.hsail,
|
||||||
|
.spir,
|
||||||
|
.kalimba,
|
||||||
|
.renderscript32,
|
||||||
|
.spirv32,
|
||||||
|
.shave,
|
||||||
|
.le64,
|
||||||
|
.amdil64,
|
||||||
|
.hsail64,
|
||||||
|
.spir64,
|
||||||
|
.renderscript64,
|
||||||
|
.ve,
|
||||||
|
.spirv64,
|
||||||
|
=> 16,
|
||||||
|
};
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
test {
|
test {
|
||||||
|
|||||||
@ -7423,42 +7423,22 @@ fn builtinCall(
|
|||||||
},
|
},
|
||||||
|
|
||||||
.atomic_load => {
|
.atomic_load => {
|
||||||
const int_type = try typeExpr(gz, scope, params[0]);
|
const result = try gz.addPlNode(.atomic_load, node, Zir.Inst.AtomicLoad{
|
||||||
// TODO allow this pointer type to be volatile
|
|
||||||
const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{
|
|
||||||
.ptr_type_simple = .{
|
|
||||||
.is_allowzero = false,
|
|
||||||
.is_mutable = false,
|
|
||||||
.is_volatile = false,
|
|
||||||
.size = .One,
|
|
||||||
.elem_type = int_type,
|
|
||||||
},
|
|
||||||
} });
|
|
||||||
const result = try gz.addPlNode(.atomic_load, node, Zir.Inst.Bin{
|
|
||||||
// zig fmt: off
|
// zig fmt: off
|
||||||
.lhs = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]),
|
.elem_type = try typeExpr(gz, scope, params[0]),
|
||||||
.rhs = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[2]),
|
.ptr = try expr (gz, scope, .none, params[1]),
|
||||||
|
.ordering = try expr (gz, scope, .{ .coerced_ty = .atomic_order_type }, params[2]),
|
||||||
// zig fmt: on
|
// zig fmt: on
|
||||||
});
|
});
|
||||||
return rvalue(gz, rl, result, node);
|
return rvalue(gz, rl, result, node);
|
||||||
},
|
},
|
||||||
.atomic_rmw => {
|
.atomic_rmw => {
|
||||||
const int_type = try typeExpr(gz, scope, params[0]);
|
const int_type = try typeExpr(gz, scope, params[0]);
|
||||||
// TODO allow this pointer type to be volatile
|
|
||||||
const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{
|
|
||||||
.ptr_type_simple = .{
|
|
||||||
.is_allowzero = false,
|
|
||||||
.is_mutable = true,
|
|
||||||
.is_volatile = false,
|
|
||||||
.size = .One,
|
|
||||||
.elem_type = int_type,
|
|
||||||
},
|
|
||||||
} });
|
|
||||||
const result = try gz.addPlNode(.atomic_rmw, node, Zir.Inst.AtomicRmw{
|
const result = try gz.addPlNode(.atomic_rmw, node, Zir.Inst.AtomicRmw{
|
||||||
// zig fmt: off
|
// zig fmt: off
|
||||||
.ptr = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]),
|
.ptr = try expr(gz, scope, .none, params[1]),
|
||||||
.operation = try expr(gz, scope, .{ .coerced_ty = .atomic_rmw_op_type }, params[2]),
|
.operation = try expr(gz, scope, .{ .coerced_ty = .atomic_rmw_op_type }, params[2]),
|
||||||
.operand = try expr(gz, scope, .{ .coerced_ty = int_type }, params[3]),
|
.operand = try expr(gz, scope, .{ .ty = int_type }, params[3]),
|
||||||
.ordering = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[4]),
|
.ordering = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[4]),
|
||||||
// zig fmt: on
|
// zig fmt: on
|
||||||
});
|
});
|
||||||
@ -7466,20 +7446,10 @@ fn builtinCall(
|
|||||||
},
|
},
|
||||||
.atomic_store => {
|
.atomic_store => {
|
||||||
const int_type = try typeExpr(gz, scope, params[0]);
|
const int_type = try typeExpr(gz, scope, params[0]);
|
||||||
// TODO allow this pointer type to be volatile
|
|
||||||
const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{
|
|
||||||
.ptr_type_simple = .{
|
|
||||||
.is_allowzero = false,
|
|
||||||
.is_mutable = true,
|
|
||||||
.is_volatile = false,
|
|
||||||
.size = .One,
|
|
||||||
.elem_type = int_type,
|
|
||||||
},
|
|
||||||
} });
|
|
||||||
const result = try gz.addPlNode(.atomic_store, node, Zir.Inst.AtomicStore{
|
const result = try gz.addPlNode(.atomic_store, node, Zir.Inst.AtomicStore{
|
||||||
// zig fmt: off
|
// zig fmt: off
|
||||||
.ptr = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]),
|
.ptr = try expr(gz, scope, .none, params[1]),
|
||||||
.operand = try expr(gz, scope, .{ .coerced_ty = int_type }, params[2]),
|
.operand = try expr(gz, scope, .{ .ty = int_type }, params[2]),
|
||||||
.ordering = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[3]),
|
.ordering = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[3]),
|
||||||
// zig fmt: on
|
// zig fmt: on
|
||||||
});
|
});
|
||||||
@ -7684,20 +7654,10 @@ fn cmpxchg(
|
|||||||
tag: Zir.Inst.Tag,
|
tag: Zir.Inst.Tag,
|
||||||
) InnerError!Zir.Inst.Ref {
|
) InnerError!Zir.Inst.Ref {
|
||||||
const int_type = try typeExpr(gz, scope, params[0]);
|
const int_type = try typeExpr(gz, scope, params[0]);
|
||||||
// TODO: allow this to be volatile
|
|
||||||
const ptr_type = try gz.add(.{ .tag = .ptr_type_simple, .data = .{
|
|
||||||
.ptr_type_simple = .{
|
|
||||||
.is_allowzero = false,
|
|
||||||
.is_mutable = true,
|
|
||||||
.is_volatile = false,
|
|
||||||
.size = .One,
|
|
||||||
.elem_type = int_type,
|
|
||||||
},
|
|
||||||
} });
|
|
||||||
const result = try gz.addPlNode(tag, node, Zir.Inst.Cmpxchg{
|
const result = try gz.addPlNode(tag, node, Zir.Inst.Cmpxchg{
|
||||||
// zig fmt: off
|
// zig fmt: off
|
||||||
.ptr = try expr(gz, scope, .{ .coerced_ty = ptr_type }, params[1]),
|
.ptr = try expr(gz, scope, .none, params[1]),
|
||||||
.expected_value = try expr(gz, scope, .{ .coerced_ty = int_type }, params[2]),
|
.expected_value = try expr(gz, scope, .{ .ty = int_type }, params[2]),
|
||||||
.new_value = try expr(gz, scope, .{ .coerced_ty = int_type }, params[3]),
|
.new_value = try expr(gz, scope, .{ .coerced_ty = int_type }, params[3]),
|
||||||
.success_order = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[4]),
|
.success_order = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[4]),
|
||||||
.failure_order = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[5]),
|
.failure_order = try expr(gz, scope, .{ .coerced_ty = .atomic_order_type }, params[5]),
|
||||||
|
|||||||
@ -151,6 +151,8 @@ allocated_decls: std.SegmentedList(Decl, 0) = .{},
|
|||||||
/// When a Decl object is freed from `allocated_decls`, it is pushed into this stack.
|
/// When a Decl object is freed from `allocated_decls`, it is pushed into this stack.
|
||||||
decls_free_list: std.ArrayListUnmanaged(Decl.Index) = .{},
|
decls_free_list: std.ArrayListUnmanaged(Decl.Index) = .{},
|
||||||
|
|
||||||
|
global_assembly: std.AutoHashMapUnmanaged(Decl.Index, []u8) = .{},
|
||||||
|
|
||||||
const MonomorphedFuncsSet = std.HashMapUnmanaged(
|
const MonomorphedFuncsSet = std.HashMapUnmanaged(
|
||||||
*Fn,
|
*Fn,
|
||||||
void,
|
void,
|
||||||
@ -2831,6 +2833,7 @@ pub fn deinit(mod: *Module) void {
|
|||||||
|
|
||||||
mod.decls_free_list.deinit(gpa);
|
mod.decls_free_list.deinit(gpa);
|
||||||
mod.allocated_decls.deinit(gpa);
|
mod.allocated_decls.deinit(gpa);
|
||||||
|
mod.global_assembly.deinit(gpa);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
|
pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
|
||||||
@ -2842,6 +2845,9 @@ pub fn destroyDecl(mod: *Module, decl_index: Decl.Index) void {
|
|||||||
if (decl.deletion_flag) {
|
if (decl.deletion_flag) {
|
||||||
assert(mod.deletion_set.swapRemove(decl_index));
|
assert(mod.deletion_set.swapRemove(decl_index));
|
||||||
}
|
}
|
||||||
|
if (mod.global_assembly.fetchRemove(decl_index)) |kv| {
|
||||||
|
gpa.free(kv.value);
|
||||||
|
}
|
||||||
if (decl.has_tv) {
|
if (decl.has_tv) {
|
||||||
if (decl.getInnerNamespace()) |namespace| {
|
if (decl.getInnerNamespace()) |namespace| {
|
||||||
namespace.destroyDecls(mod);
|
namespace.destroyDecls(mod);
|
||||||
@ -5714,3 +5720,12 @@ pub fn markDeclAlive(mod: *Module, decl: *Decl) void {
|
|||||||
fn markDeclIndexAlive(mod: *Module, decl_index: Decl.Index) void {
|
fn markDeclIndexAlive(mod: *Module, decl_index: Decl.Index) void {
|
||||||
return mod.markDeclAlive(mod.declPtr(decl_index));
|
return mod.markDeclAlive(mod.declPtr(decl_index));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn addGlobalAssembly(mod: *Module, decl_index: Decl.Index, source: []const u8) !void {
|
||||||
|
try mod.global_assembly.ensureUnusedCapacity(mod.gpa, 1);
|
||||||
|
|
||||||
|
const duped_source = try mod.gpa.dupe(u8, source);
|
||||||
|
errdefer mod.gpa.free(duped_source);
|
||||||
|
|
||||||
|
mod.global_assembly.putAssumeCapacityNoClobber(decl_index, duped_source);
|
||||||
|
}
|
||||||
|
|||||||
180
src/Sema.zig
180
src/Sema.zig
@ -10517,16 +10517,35 @@ fn zirAsm(
|
|||||||
const is_volatile = @truncate(u1, extended.small >> 15) != 0;
|
const is_volatile = @truncate(u1, extended.small >> 15) != 0;
|
||||||
const is_global_assembly = sema.func == null;
|
const is_global_assembly = sema.func == null;
|
||||||
|
|
||||||
if (block.is_comptime and !is_global_assembly) {
|
|
||||||
try sema.requireRuntimeBlock(block, src);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (extra.data.asm_source == 0) {
|
if (extra.data.asm_source == 0) {
|
||||||
// This can move to become an AstGen error after inline assembly improvements land
|
// This can move to become an AstGen error after inline assembly improvements land
|
||||||
// and stage1 code matches stage2 code.
|
// and stage1 code matches stage2 code.
|
||||||
return sema.fail(block, src, "assembly code must use string literal syntax", .{});
|
return sema.fail(block, src, "assembly code must use string literal syntax", .{});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const asm_source = sema.code.nullTerminatedString(extra.data.asm_source);
|
||||||
|
|
||||||
|
if (is_global_assembly) {
|
||||||
|
if (outputs_len != 0) {
|
||||||
|
return sema.fail(block, src, "module-level assembly does not support outputs", .{});
|
||||||
|
}
|
||||||
|
if (inputs_len != 0) {
|
||||||
|
return sema.fail(block, src, "module-level assembly does not support inputs", .{});
|
||||||
|
}
|
||||||
|
if (clobbers_len != 0) {
|
||||||
|
return sema.fail(block, src, "module-level assembly does not support clobbers", .{});
|
||||||
|
}
|
||||||
|
if (is_volatile) {
|
||||||
|
return sema.fail(block, src, "volatile keyword is redundant on module-level assembly", .{});
|
||||||
|
}
|
||||||
|
try sema.mod.addGlobalAssembly(sema.owner_decl_index, asm_source);
|
||||||
|
return Air.Inst.Ref.void_value;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (block.is_comptime) {
|
||||||
|
try sema.requireRuntimeBlock(block, src);
|
||||||
|
}
|
||||||
|
|
||||||
if (outputs_len > 1) {
|
if (outputs_len > 1) {
|
||||||
return sema.fail(block, src, "TODO implement Sema for asm with more than 1 output", .{});
|
return sema.fail(block, src, "TODO implement Sema for asm with more than 1 output", .{});
|
||||||
}
|
}
|
||||||
@ -10591,7 +10610,6 @@ fn zirAsm(
|
|||||||
needed_capacity += name.*.len / 4 + 1;
|
needed_capacity += name.*.len / 4 + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
const asm_source = sema.code.nullTerminatedString(extra.data.asm_source);
|
|
||||||
needed_capacity += (asm_source.len + 3) / 4;
|
needed_capacity += (asm_source.len + 3) / 4;
|
||||||
|
|
||||||
const gpa = sema.gpa;
|
const gpa = sema.gpa;
|
||||||
@ -14715,51 +14733,64 @@ fn checkNumericType(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn checkAtomicOperandType(
|
/// Returns the casted pointer.
|
||||||
|
fn checkAtomicPtrOperand(
|
||||||
sema: *Sema,
|
sema: *Sema,
|
||||||
block: *Block,
|
block: *Block,
|
||||||
ty_src: LazySrcLoc,
|
elem_ty: Type,
|
||||||
ty: Type,
|
elem_ty_src: LazySrcLoc,
|
||||||
) CompileError!void {
|
ptr: Air.Inst.Ref,
|
||||||
var buffer: Type.Payload.Bits = undefined;
|
ptr_src: LazySrcLoc,
|
||||||
|
ptr_const: bool,
|
||||||
|
) CompileError!Air.Inst.Ref {
|
||||||
const target = sema.mod.getTarget();
|
const target = sema.mod.getTarget();
|
||||||
const max_atomic_bits = target_util.largestAtomicBits(target);
|
var diag: target_util.AtomicPtrAlignmentDiagnostics = .{};
|
||||||
const int_ty = switch (ty.zigTypeTag()) {
|
const alignment = target_util.atomicPtrAlignment(target, elem_ty, &diag) catch |err| switch (err) {
|
||||||
.Int => ty,
|
error.FloatTooBig => return sema.fail(
|
||||||
.Enum => ty.intTagType(&buffer),
|
|
||||||
.Float => {
|
|
||||||
const bit_count = ty.floatBits(target);
|
|
||||||
if (bit_count > max_atomic_bits) {
|
|
||||||
return sema.fail(
|
|
||||||
block,
|
block,
|
||||||
ty_src,
|
elem_ty_src,
|
||||||
"expected {d}-bit float type or smaller; found {d}-bit float type",
|
"expected {d}-bit float type or smaller; found {d}-bit float type",
|
||||||
.{ max_atomic_bits, bit_count },
|
.{ diag.max_bits, diag.bits },
|
||||||
);
|
),
|
||||||
}
|
error.IntTooBig => return sema.fail(
|
||||||
return;
|
|
||||||
},
|
|
||||||
.Bool => return, // Will be treated as `u8`.
|
|
||||||
else => {
|
|
||||||
if (ty.isPtrAtRuntime()) return;
|
|
||||||
|
|
||||||
return sema.fail(
|
|
||||||
block,
|
block,
|
||||||
ty_src,
|
elem_ty_src,
|
||||||
|
"expected {d}-bit integer type or smaller; found {d}-bit integer type",
|
||||||
|
.{ diag.max_bits, diag.bits },
|
||||||
|
),
|
||||||
|
error.BadType => return sema.fail(
|
||||||
|
block,
|
||||||
|
elem_ty_src,
|
||||||
"expected bool, integer, float, enum, or pointer type; found {}",
|
"expected bool, integer, float, enum, or pointer type; found {}",
|
||||||
.{ty.fmt(sema.mod)},
|
.{elem_ty.fmt(sema.mod)},
|
||||||
);
|
),
|
||||||
|
};
|
||||||
|
|
||||||
|
var wanted_ptr_data: Type.Payload.Pointer.Data = .{
|
||||||
|
.pointee_type = elem_ty,
|
||||||
|
.@"align" = alignment,
|
||||||
|
.@"addrspace" = .generic,
|
||||||
|
.mutable = !ptr_const,
|
||||||
|
};
|
||||||
|
|
||||||
|
const ptr_ty = sema.typeOf(ptr);
|
||||||
|
const ptr_data = switch (try ptr_ty.zigTypeTagOrPoison()) {
|
||||||
|
.Pointer => ptr_ty.ptrInfo().data,
|
||||||
|
else => {
|
||||||
|
const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data);
|
||||||
|
_ = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
|
||||||
|
unreachable;
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
const bit_count = int_ty.intInfo(target).bits;
|
|
||||||
if (bit_count > max_atomic_bits) {
|
wanted_ptr_data.@"addrspace" = ptr_data.@"addrspace";
|
||||||
return sema.fail(
|
wanted_ptr_data.@"allowzero" = ptr_data.@"allowzero";
|
||||||
block,
|
wanted_ptr_data.@"volatile" = ptr_data.@"volatile";
|
||||||
ty_src,
|
|
||||||
"expected {d}-bit integer type or smaller; found {d}-bit integer type",
|
const wanted_ptr_ty = try Type.ptr(sema.arena, sema.mod, wanted_ptr_data);
|
||||||
.{ max_atomic_bits, bit_count },
|
const casted_ptr = try sema.coerce(block, wanted_ptr_ty, ptr, ptr_src);
|
||||||
);
|
|
||||||
}
|
return casted_ptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
fn checkPtrIsNotComptimeMutable(
|
fn checkPtrIsNotComptimeMutable(
|
||||||
@ -15036,10 +15067,8 @@ fn zirCmpxchg(
|
|||||||
const success_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node };
|
const success_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node };
|
||||||
const failure_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg5 = inst_data.src_node };
|
const failure_order_src: LazySrcLoc = .{ .node_offset_builtin_call_arg5 = inst_data.src_node };
|
||||||
// zig fmt: on
|
// zig fmt: on
|
||||||
const ptr = sema.resolveInst(extra.ptr);
|
const expected_value = sema.resolveInst(extra.expected_value);
|
||||||
const ptr_ty = sema.typeOf(ptr);
|
const elem_ty = sema.typeOf(expected_value);
|
||||||
const elem_ty = ptr_ty.elemType();
|
|
||||||
try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty);
|
|
||||||
if (elem_ty.zigTypeTag() == .Float) {
|
if (elem_ty.zigTypeTag() == .Float) {
|
||||||
return sema.fail(
|
return sema.fail(
|
||||||
block,
|
block,
|
||||||
@ -15048,7 +15077,8 @@ fn zirCmpxchg(
|
|||||||
.{elem_ty.fmt(sema.mod)},
|
.{elem_ty.fmt(sema.mod)},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
const expected_value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.expected_value), expected_src);
|
const uncasted_ptr = sema.resolveInst(extra.ptr);
|
||||||
|
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false);
|
||||||
const new_value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.new_value), new_value_src);
|
const new_value = try sema.coerce(block, elem_ty, sema.resolveInst(extra.new_value), new_value_src);
|
||||||
const success_order = try sema.resolveAtomicOrder(block, success_order_src, extra.success_order);
|
const success_order = try sema.resolveAtomicOrder(block, success_order_src, extra.success_order);
|
||||||
const failure_order = try sema.resolveAtomicOrder(block, failure_order_src, extra.failure_order);
|
const failure_order = try sema.resolveAtomicOrder(block, failure_order_src, extra.failure_order);
|
||||||
@ -15081,6 +15111,7 @@ fn zirCmpxchg(
|
|||||||
// to become undef as well
|
// to become undef as well
|
||||||
return sema.addConstUndef(result_ty);
|
return sema.addConstUndef(result_ty);
|
||||||
}
|
}
|
||||||
|
const ptr_ty = sema.typeOf(ptr);
|
||||||
const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
|
const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
|
||||||
const result_val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: {
|
const result_val = if (stored_val.eql(expected_val, elem_ty, sema.mod)) blk: {
|
||||||
try sema.storePtr(block, src, ptr, new_value);
|
try sema.storePtr(block, src, ptr, new_value);
|
||||||
@ -15487,17 +15518,16 @@ fn zirSelect(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
|
|||||||
|
|
||||||
fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
|
||||||
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
|
||||||
const extra = sema.code.extraData(Zir.Inst.Bin, inst_data.payload_index).data;
|
const extra = sema.code.extraData(Zir.Inst.AtomicLoad, inst_data.payload_index).data;
|
||||||
// zig fmt: off
|
// zig fmt: off
|
||||||
const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
||||||
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
||||||
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
||||||
// zig fmt: on
|
// zig fmt: on
|
||||||
const ptr = sema.resolveInst(extra.lhs);
|
const elem_ty = try sema.resolveType(block, elem_ty_src, extra.elem_type);
|
||||||
const ptr_ty = sema.typeOf(ptr);
|
const uncasted_ptr = sema.resolveInst(extra.ptr);
|
||||||
const elem_ty = ptr_ty.elemType();
|
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, true);
|
||||||
try sema.checkAtomicOperandType(block, elem_ty_src, elem_ty);
|
const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering);
|
||||||
const order = try sema.resolveAtomicOrder(block, order_src, extra.rhs);
|
|
||||||
|
|
||||||
switch (order) {
|
switch (order) {
|
||||||
.Release, .AcqRel => {
|
.Release, .AcqRel => {
|
||||||
@ -15516,7 +15546,7 @@ fn zirAtomicLoad(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
|
if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| {
|
||||||
if (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) |elem_val| {
|
if (try sema.pointerDeref(block, ptr_src, ptr_val, sema.typeOf(ptr))) |elem_val| {
|
||||||
return sema.addConstant(elem_ty, elem_val);
|
return sema.addConstant(elem_ty, elem_val);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -15536,19 +15566,19 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
|||||||
const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data;
|
const extra = sema.code.extraData(Zir.Inst.AtomicRmw, inst_data.payload_index).data;
|
||||||
const src = inst_data.src();
|
const src = inst_data.src();
|
||||||
// zig fmt: off
|
// zig fmt: off
|
||||||
const operand_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
const elem_ty_src : LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
||||||
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
||||||
const op_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
const op_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
||||||
const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
|
const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
|
||||||
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node };
|
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg4 = inst_data.src_node };
|
||||||
// zig fmt: on
|
// zig fmt: on
|
||||||
const ptr = sema.resolveInst(extra.ptr);
|
const operand = sema.resolveInst(extra.operand);
|
||||||
const ptr_ty = sema.typeOf(ptr);
|
const elem_ty = sema.typeOf(operand);
|
||||||
const operand_ty = ptr_ty.elemType();
|
const uncasted_ptr = sema.resolveInst(extra.ptr);
|
||||||
try sema.checkAtomicOperandType(block, operand_ty_src, operand_ty);
|
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false);
|
||||||
const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation);
|
const op = try sema.resolveAtomicRmwOp(block, op_src, extra.operation);
|
||||||
|
|
||||||
switch (operand_ty.zigTypeTag()) {
|
switch (elem_ty.zigTypeTag()) {
|
||||||
.Enum => if (op != .Xchg) {
|
.Enum => if (op != .Xchg) {
|
||||||
return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{});
|
return sema.fail(block, op_src, "@atomicRmw with enum only allowed with .Xchg", .{});
|
||||||
},
|
},
|
||||||
@ -15561,7 +15591,6 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
|||||||
},
|
},
|
||||||
else => {},
|
else => {},
|
||||||
}
|
}
|
||||||
const operand = try sema.coerce(block, operand_ty, sema.resolveInst(extra.operand), operand_src);
|
|
||||||
const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering);
|
const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering);
|
||||||
|
|
||||||
if (order == .Unordered) {
|
if (order == .Unordered) {
|
||||||
@ -15569,8 +15598,8 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
|||||||
}
|
}
|
||||||
|
|
||||||
// special case zero bit types
|
// special case zero bit types
|
||||||
if (try sema.typeHasOnePossibleValue(block, operand_ty_src, operand_ty)) |val| {
|
if (try sema.typeHasOnePossibleValue(block, elem_ty_src, elem_ty)) |val| {
|
||||||
return sema.addConstant(operand_ty, val);
|
return sema.addConstant(elem_ty, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
|
const runtime_src = if (try sema.resolveDefinedValue(block, ptr_src, ptr)) |ptr_val| rs: {
|
||||||
@ -15581,22 +15610,23 @@ fn zirAtomicRmw(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!A
|
|||||||
};
|
};
|
||||||
if (ptr_val.isComptimeMutablePtr()) {
|
if (ptr_val.isComptimeMutablePtr()) {
|
||||||
const target = sema.mod.getTarget();
|
const target = sema.mod.getTarget();
|
||||||
|
const ptr_ty = sema.typeOf(ptr);
|
||||||
const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
|
const stored_val = (try sema.pointerDeref(block, ptr_src, ptr_val, ptr_ty)) orelse break :rs ptr_src;
|
||||||
const new_val = switch (op) {
|
const new_val = switch (op) {
|
||||||
// zig fmt: off
|
// zig fmt: off
|
||||||
.Xchg => operand_val,
|
.Xchg => operand_val,
|
||||||
.Add => try stored_val.numberAddWrap(operand_val, operand_ty, sema.arena, target),
|
.Add => try stored_val.numberAddWrap(operand_val, elem_ty, sema.arena, target),
|
||||||
.Sub => try stored_val.numberSubWrap(operand_val, operand_ty, sema.arena, target),
|
.Sub => try stored_val.numberSubWrap(operand_val, elem_ty, sema.arena, target),
|
||||||
.And => try stored_val.bitwiseAnd (operand_val, operand_ty, sema.arena, target),
|
.And => try stored_val.bitwiseAnd (operand_val, elem_ty, sema.arena, target),
|
||||||
.Nand => try stored_val.bitwiseNand (operand_val, operand_ty, sema.arena, target),
|
.Nand => try stored_val.bitwiseNand (operand_val, elem_ty, sema.arena, target),
|
||||||
.Or => try stored_val.bitwiseOr (operand_val, operand_ty, sema.arena, target),
|
.Or => try stored_val.bitwiseOr (operand_val, elem_ty, sema.arena, target),
|
||||||
.Xor => try stored_val.bitwiseXor (operand_val, operand_ty, sema.arena, target),
|
.Xor => try stored_val.bitwiseXor (operand_val, elem_ty, sema.arena, target),
|
||||||
.Max => stored_val.numberMax (operand_val, target),
|
.Max => stored_val.numberMax (operand_val, target),
|
||||||
.Min => stored_val.numberMin (operand_val, target),
|
.Min => stored_val.numberMin (operand_val, target),
|
||||||
// zig fmt: on
|
// zig fmt: on
|
||||||
};
|
};
|
||||||
try sema.storePtrVal(block, src, ptr_val, new_val, operand_ty);
|
try sema.storePtrVal(block, src, ptr_val, new_val, elem_ty);
|
||||||
return sema.addConstant(operand_ty, stored_val);
|
return sema.addConstant(elem_ty, stored_val);
|
||||||
} else break :rs ptr_src;
|
} else break :rs ptr_src;
|
||||||
} else ptr_src;
|
} else ptr_src;
|
||||||
|
|
||||||
@ -15620,15 +15650,15 @@ fn zirAtomicStore(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError
|
|||||||
const extra = sema.code.extraData(Zir.Inst.AtomicStore, inst_data.payload_index).data;
|
const extra = sema.code.extraData(Zir.Inst.AtomicStore, inst_data.payload_index).data;
|
||||||
const src = inst_data.src();
|
const src = inst_data.src();
|
||||||
// zig fmt: off
|
// zig fmt: off
|
||||||
const operand_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
const elem_ty_src: LazySrcLoc = .{ .node_offset_builtin_call_arg0 = inst_data.src_node };
|
||||||
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
const ptr_src : LazySrcLoc = .{ .node_offset_builtin_call_arg1 = inst_data.src_node };
|
||||||
const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
const operand_src : LazySrcLoc = .{ .node_offset_builtin_call_arg2 = inst_data.src_node };
|
||||||
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
|
const order_src : LazySrcLoc = .{ .node_offset_builtin_call_arg3 = inst_data.src_node };
|
||||||
// zig fmt: on
|
// zig fmt: on
|
||||||
const ptr = sema.resolveInst(extra.ptr);
|
const operand = sema.resolveInst(extra.operand);
|
||||||
const operand_ty = sema.typeOf(ptr).elemType();
|
const elem_ty = sema.typeOf(operand);
|
||||||
try sema.checkAtomicOperandType(block, operand_ty_src, operand_ty);
|
const uncasted_ptr = sema.resolveInst(extra.ptr);
|
||||||
const operand = try sema.coerce(block, operand_ty, sema.resolveInst(extra.operand), operand_src);
|
const ptr = try sema.checkAtomicPtrOperand(block, elem_ty, elem_ty_src, uncasted_ptr, ptr_src, false);
|
||||||
const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering);
|
const order = try sema.resolveAtomicOrder(block, order_src, extra.ordering);
|
||||||
|
|
||||||
const air_tag: Air.Inst.Tag = switch (order) {
|
const air_tag: Air.Inst.Tag = switch (order) {
|
||||||
|
|||||||
@ -903,7 +903,7 @@ pub const Inst = struct {
|
|||||||
/// Uses the `pl_node` union field with payload `Select`.
|
/// Uses the `pl_node` union field with payload `Select`.
|
||||||
select,
|
select,
|
||||||
/// Implements the `@atomicLoad` builtin.
|
/// Implements the `@atomicLoad` builtin.
|
||||||
/// Uses the `pl_node` union field with payload `Bin`.
|
/// Uses the `pl_node` union field with payload `AtomicLoad`.
|
||||||
atomic_load,
|
atomic_load,
|
||||||
/// Implements the `@atomicRmw` builtin.
|
/// Implements the `@atomicRmw` builtin.
|
||||||
/// Uses the `pl_node` union field with payload `AtomicRmw`.
|
/// Uses the `pl_node` union field with payload `AtomicRmw`.
|
||||||
@ -3293,6 +3293,12 @@ pub const Inst = struct {
|
|||||||
ordering: Ref,
|
ordering: Ref,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
pub const AtomicLoad = struct {
|
||||||
|
elem_type: Ref,
|
||||||
|
ptr: Ref,
|
||||||
|
ordering: Ref,
|
||||||
|
};
|
||||||
|
|
||||||
pub const MulAdd = struct {
|
pub const MulAdd = struct {
|
||||||
mulend1: Ref,
|
mulend1: Ref,
|
||||||
mulend2: Ref,
|
mulend2: Ref,
|
||||||
|
|||||||
@ -12,13 +12,10 @@ pub fn classifyWindows(ty: Type, target: Target) Class {
|
|||||||
// and the registers used for those arguments. Any argument that doesn't fit in 8
|
// and the registers used for those arguments. Any argument that doesn't fit in 8
|
||||||
// bytes, or isn't 1, 2, 4, or 8 bytes, must be passed by reference. A single argument
|
// bytes, or isn't 1, 2, 4, or 8 bytes, must be passed by reference. A single argument
|
||||||
// is never spread across multiple registers."
|
// is never spread across multiple registers."
|
||||||
|
// "All floating point operations are done using the 16 XMM registers."
|
||||||
// "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
|
// "Structs and unions of size 8, 16, 32, or 64 bits, and __m64 types, are passed
|
||||||
// as if they were integers of the same size."
|
// as if they were integers of the same size."
|
||||||
switch (ty.abiSize(target)) {
|
switch (ty.zigTypeTag()) {
|
||||||
1, 2, 4, 8 => {},
|
|
||||||
else => return .memory,
|
|
||||||
}
|
|
||||||
return switch (ty.zigTypeTag()) {
|
|
||||||
.Pointer,
|
.Pointer,
|
||||||
.Int,
|
.Int,
|
||||||
.Bool,
|
.Bool,
|
||||||
@ -33,9 +30,13 @@ pub fn classifyWindows(ty: Type, target: Target) Class {
|
|||||||
.ErrorUnion,
|
.ErrorUnion,
|
||||||
.AnyFrame,
|
.AnyFrame,
|
||||||
.Frame,
|
.Frame,
|
||||||
=> .integer,
|
=> switch (ty.abiSize(target)) {
|
||||||
|
0 => unreachable,
|
||||||
|
1, 2, 4, 8 => return .integer,
|
||||||
|
else => return .memory,
|
||||||
|
},
|
||||||
|
|
||||||
.Float, .Vector => .sse,
|
.Float, .Vector => return .sse,
|
||||||
|
|
||||||
.Type,
|
.Type,
|
||||||
.ComptimeFloat,
|
.ComptimeFloat,
|
||||||
@ -47,7 +48,7 @@ pub fn classifyWindows(ty: Type, target: Target) Class {
|
|||||||
.Opaque,
|
.Opaque,
|
||||||
.EnumLiteral,
|
.EnumLiteral,
|
||||||
=> unreachable,
|
=> unreachable,
|
||||||
};
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// There are a maximum of 8 possible return slots. Returned values are in
|
/// There are a maximum of 8 possible return slots. Returned values are in
|
||||||
|
|||||||
@ -476,6 +476,19 @@ pub const Object = struct {
|
|||||||
_ = builder.buildRet(is_lt);
|
_ = builder.buildRet(is_lt);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn genModuleLevelAssembly(object: *Object, comp: *Compilation) !void {
|
||||||
|
const mod = comp.bin_file.options.module.?;
|
||||||
|
if (mod.global_assembly.count() == 0) return;
|
||||||
|
var buffer = std.ArrayList(u8).init(comp.gpa);
|
||||||
|
defer buffer.deinit();
|
||||||
|
var it = mod.global_assembly.iterator();
|
||||||
|
while (it.next()) |kv| {
|
||||||
|
try buffer.appendSlice(kv.value_ptr.*);
|
||||||
|
try buffer.append('\n');
|
||||||
|
}
|
||||||
|
object.llvm_module.setModuleInlineAsm2(buffer.items.ptr, buffer.items.len - 1);
|
||||||
|
}
|
||||||
|
|
||||||
pub fn flushModule(self: *Object, comp: *Compilation, prog_node: *std.Progress.Node) !void {
|
pub fn flushModule(self: *Object, comp: *Compilation, prog_node: *std.Progress.Node) !void {
|
||||||
var sub_prog_node = prog_node.start("LLVM Emit Object", 0);
|
var sub_prog_node = prog_node.start("LLVM Emit Object", 0);
|
||||||
sub_prog_node.activate();
|
sub_prog_node.activate();
|
||||||
@ -484,6 +497,7 @@ pub const Object = struct {
|
|||||||
|
|
||||||
try self.genErrorNameTable(comp);
|
try self.genErrorNameTable(comp);
|
||||||
try self.genCmpLtErrorsLenFunction(comp);
|
try self.genCmpLtErrorsLenFunction(comp);
|
||||||
|
try self.genModuleLevelAssembly(comp);
|
||||||
|
|
||||||
if (self.di_builder) |dib| {
|
if (self.di_builder) |dib| {
|
||||||
// When lowering debug info for pointers, we emitted the element types as
|
// When lowering debug info for pointers, we emitted the element types as
|
||||||
@ -630,7 +644,17 @@ pub const Object = struct {
|
|||||||
if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue;
|
if (!param_ty.hasRuntimeBitsIgnoreComptime()) continue;
|
||||||
|
|
||||||
const llvm_arg_i = @intCast(c_uint, args.items.len) + param_offset;
|
const llvm_arg_i = @intCast(c_uint, args.items.len) + param_offset;
|
||||||
try args.append(llvm_func.getParam(llvm_arg_i));
|
const param = llvm_func.getParam(llvm_arg_i);
|
||||||
|
// It is possible for the calling convention to make the argument's by-reference nature
|
||||||
|
// disagree with our canonical value for it, in which case we must dereference here.
|
||||||
|
const need_deref = !param_ty.isPtrAtRuntime() and !isByRef(param_ty) and
|
||||||
|
(param.typeOf().getTypeKind() == .Pointer);
|
||||||
|
const loaded_param = if (!need_deref) param else l: {
|
||||||
|
const load_inst = builder.buildLoad(param, "");
|
||||||
|
load_inst.setAlignment(param_ty.abiAlignment(target));
|
||||||
|
break :l load_inst;
|
||||||
|
};
|
||||||
|
try args.append(loaded_param);
|
||||||
}
|
}
|
||||||
|
|
||||||
var di_file: ?*llvm.DIFile = null;
|
var di_file: ?*llvm.DIFile = null;
|
||||||
@ -3729,6 +3753,19 @@ pub const FuncGen = struct {
|
|||||||
arg_ptr.setAlignment(alignment);
|
arg_ptr.setAlignment(alignment);
|
||||||
const store_inst = self.builder.buildStore(llvm_arg, arg_ptr);
|
const store_inst = self.builder.buildStore(llvm_arg, arg_ptr);
|
||||||
store_inst.setAlignment(alignment);
|
store_inst.setAlignment(alignment);
|
||||||
|
|
||||||
|
if (abi_llvm_ty.getTypeKind() == .Pointer) {
|
||||||
|
// In this case, the calling convention wants a pointer, but
|
||||||
|
// we have a value.
|
||||||
|
if (arg_ptr.typeOf() == abi_llvm_ty) {
|
||||||
|
try llvm_args.append(arg_ptr);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
const casted_ptr = self.builder.buildBitCast(arg_ptr, abi_llvm_ty, "");
|
||||||
|
try llvm_args.append(casted_ptr);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
break :p self.builder.buildBitCast(arg_ptr, ptr_abi_ty, "");
|
break :p self.builder.buildBitCast(arg_ptr, ptr_abi_ty, "");
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -7583,7 +7620,7 @@ pub const FuncGen = struct {
|
|||||||
const size_bytes = elem_ty.abiSize(target);
|
const size_bytes = elem_ty.abiSize(target);
|
||||||
_ = self.builder.buildMemCpy(
|
_ = self.builder.buildMemCpy(
|
||||||
self.builder.buildBitCast(ptr, llvm_ptr_u8, ""),
|
self.builder.buildBitCast(ptr, llvm_ptr_u8, ""),
|
||||||
ptr_ty.ptrAlignment(target),
|
ptr_alignment,
|
||||||
self.builder.buildBitCast(elem, llvm_ptr_u8, ""),
|
self.builder.buildBitCast(elem, llvm_ptr_u8, ""),
|
||||||
elem_ty.abiAlignment(target),
|
elem_ty.abiAlignment(target),
|
||||||
self.context.intType(Type.usize.intInfo(target).bits).constInt(size_bytes, .False),
|
self.context.intType(Type.usize.intInfo(target).bits).constInt(size_bytes, .False),
|
||||||
@ -7917,6 +7954,8 @@ fn llvmFieldIndex(
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool {
|
fn firstParamSRet(fn_info: Type.Payload.Function.Data, target: std.Target) bool {
|
||||||
|
if (!fn_info.return_type.hasRuntimeBitsIgnoreComptime()) return false;
|
||||||
|
|
||||||
switch (fn_info.cc) {
|
switch (fn_info.cc) {
|
||||||
.Unspecified, .Inline => return isByRef(fn_info.return_type),
|
.Unspecified, .Inline => return isByRef(fn_info.return_type),
|
||||||
.C => switch (target.cpu.arch) {
|
.C => switch (target.cpu.arch) {
|
||||||
@ -8016,7 +8055,8 @@ fn lowerFnRetTy(dg: *DeclGen, fn_info: Type.Payload.Function.Data) !*const llvm.
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (classes[0] == .integer and classes[1] == .none) {
|
if (classes[0] == .integer and classes[1] == .none) {
|
||||||
return llvm_types_buffer[0];
|
const abi_size = fn_info.return_type.abiSize(target);
|
||||||
|
return dg.context.intType(@intCast(c_uint, abi_size * 8));
|
||||||
}
|
}
|
||||||
return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False);
|
return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False);
|
||||||
},
|
},
|
||||||
@ -8110,7 +8150,8 @@ fn lowerFnParamTy(dg: *DeclGen, cc: std.builtin.CallingConvention, ty: Type) !*c
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (classes[0] == .integer and classes[1] == .none) {
|
if (classes[0] == .integer and classes[1] == .none) {
|
||||||
return llvm_types_buffer[0];
|
const abi_size = ty.abiSize(target);
|
||||||
|
return dg.context.intType(@intCast(c_uint, abi_size * 8));
|
||||||
}
|
}
|
||||||
return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False);
|
return dg.context.structType(&llvm_types_buffer, llvm_types_index, .False);
|
||||||
},
|
},
|
||||||
|
|||||||
@ -381,6 +381,9 @@ pub const Module = opaque {
|
|||||||
|
|
||||||
pub const createDIBuilder = ZigLLVMCreateDIBuilder;
|
pub const createDIBuilder = ZigLLVMCreateDIBuilder;
|
||||||
extern fn ZigLLVMCreateDIBuilder(module: *const Module, allow_unresolved: bool) *DIBuilder;
|
extern fn ZigLLVMCreateDIBuilder(module: *const Module, allow_unresolved: bool) *DIBuilder;
|
||||||
|
|
||||||
|
pub const setModuleInlineAsm2 = LLVMSetModuleInlineAsm2;
|
||||||
|
extern fn LLVMSetModuleInlineAsm2(M: *const Module, Asm: [*]const u8, Len: usize) void;
|
||||||
};
|
};
|
||||||
|
|
||||||
pub const lookupIntrinsicID = LLVMLookupIntrinsicID;
|
pub const lookupIntrinsicID = LLVMLookupIntrinsicID;
|
||||||
|
|||||||
@ -283,6 +283,7 @@ const Writer = struct {
|
|||||||
=> try self.writeStructInit(stream, inst),
|
=> try self.writeStructInit(stream, inst),
|
||||||
|
|
||||||
.cmpxchg_strong, .cmpxchg_weak => try self.writeCmpxchg(stream, inst),
|
.cmpxchg_strong, .cmpxchg_weak => try self.writeCmpxchg(stream, inst),
|
||||||
|
.atomic_load => try self.writeAtomicLoad(stream, inst),
|
||||||
.atomic_store => try self.writeAtomicStore(stream, inst),
|
.atomic_store => try self.writeAtomicStore(stream, inst),
|
||||||
.atomic_rmw => try self.writeAtomicRmw(stream, inst),
|
.atomic_rmw => try self.writeAtomicRmw(stream, inst),
|
||||||
.memcpy => try self.writeMemcpy(stream, inst),
|
.memcpy => try self.writeMemcpy(stream, inst),
|
||||||
@ -351,7 +352,6 @@ const Writer = struct {
|
|||||||
.offset_of,
|
.offset_of,
|
||||||
.splat,
|
.splat,
|
||||||
.reduce,
|
.reduce,
|
||||||
.atomic_load,
|
|
||||||
.bitcast,
|
.bitcast,
|
||||||
.vector_type,
|
.vector_type,
|
||||||
.maximum,
|
.maximum,
|
||||||
@ -929,6 +929,19 @@ const Writer = struct {
|
|||||||
try self.writeSrc(stream, inst_data.src());
|
try self.writeSrc(stream, inst_data.src());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn writeAtomicLoad(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
|
||||||
|
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
|
||||||
|
const extra = self.code.extraData(Zir.Inst.AtomicLoad, inst_data.payload_index).data;
|
||||||
|
|
||||||
|
try self.writeInstRef(stream, extra.elem_type);
|
||||||
|
try stream.writeAll(", ");
|
||||||
|
try self.writeInstRef(stream, extra.ptr);
|
||||||
|
try stream.writeAll(", ");
|
||||||
|
try self.writeInstRef(stream, extra.ordering);
|
||||||
|
try stream.writeAll(") ");
|
||||||
|
try self.writeSrc(stream, inst_data.src());
|
||||||
|
}
|
||||||
|
|
||||||
fn writeAtomicStore(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
|
fn writeAtomicStore(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
|
||||||
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
|
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
|
||||||
const extra = self.code.extraData(Zir.Inst.AtomicStore, inst_data.payload_index).data;
|
const extra = self.code.extraData(Zir.Inst.AtomicStore, inst_data.payload_index).data;
|
||||||
|
|||||||
@ -7686,6 +7686,7 @@ ZigType *make_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits) {
|
|||||||
// However for some targets, LLVM incorrectly reports this as 8.
|
// However for some targets, LLVM incorrectly reports this as 8.
|
||||||
// See: https://github.com/ziglang/zig/issues/2987
|
// See: https://github.com/ziglang/zig/issues/2987
|
||||||
entry->abi_align = 16;
|
entry->abi_align = 16;
|
||||||
|
entry->abi_size = align_forward(entry->abi_size, entry->abi_align);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -1,5 +1,6 @@
|
|||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const llvm = @import("codegen/llvm/bindings.zig");
|
const llvm = @import("codegen/llvm/bindings.zig");
|
||||||
|
const Type = @import("type.zig").Type;
|
||||||
|
|
||||||
pub const ArchOsAbi = struct {
|
pub const ArchOsAbi = struct {
|
||||||
arch: std.Target.Cpu.Arch,
|
arch: std.Target.Cpu.Arch,
|
||||||
@ -543,10 +544,28 @@ pub fn needUnwindTables(target: std.Target) bool {
|
|||||||
return target.os.tag == .windows;
|
return target.os.tag == .windows;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// TODO this was ported from stage1 but it does not take into account CPU features,
|
pub const AtomicPtrAlignmentError = error{
|
||||||
/// which can affect this value. Audit this!
|
FloatTooBig,
|
||||||
pub fn largestAtomicBits(target: std.Target) u32 {
|
IntTooBig,
|
||||||
return switch (target.cpu.arch) {
|
BadType,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const AtomicPtrAlignmentDiagnostics = struct {
|
||||||
|
bits: u16 = undefined,
|
||||||
|
max_bits: u16 = undefined,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// If ABI alignment of `ty` is OK for atomic operations, returns 0.
|
||||||
|
/// Otherwise returns the alignment required on a pointer for the target
|
||||||
|
/// to perform atomic operations.
|
||||||
|
pub fn atomicPtrAlignment(
|
||||||
|
target: std.Target,
|
||||||
|
ty: Type,
|
||||||
|
diags: *AtomicPtrAlignmentDiagnostics,
|
||||||
|
) AtomicPtrAlignmentError!u32 {
|
||||||
|
// TODO this was ported from stage1 but it does not take into account CPU features,
|
||||||
|
// which can affect this value. Audit this!
|
||||||
|
const max_atomic_bits: u16 = switch (target.cpu.arch) {
|
||||||
.avr,
|
.avr,
|
||||||
.msp430,
|
.msp430,
|
||||||
.spu_2,
|
.spu_2,
|
||||||
@ -611,6 +630,40 @@ pub fn largestAtomicBits(target: std.Target) u32 {
|
|||||||
|
|
||||||
.x86_64 => 128,
|
.x86_64 => 128,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
var buffer: Type.Payload.Bits = undefined;
|
||||||
|
|
||||||
|
const int_ty = switch (ty.zigTypeTag()) {
|
||||||
|
.Int => ty,
|
||||||
|
.Enum => ty.intTagType(&buffer),
|
||||||
|
.Float => {
|
||||||
|
const bit_count = ty.floatBits(target);
|
||||||
|
if (bit_count > max_atomic_bits) {
|
||||||
|
diags.* = .{
|
||||||
|
.bits = bit_count,
|
||||||
|
.max_bits = max_atomic_bits,
|
||||||
|
};
|
||||||
|
return error.FloatTooBig;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
},
|
||||||
|
.Bool => return 0,
|
||||||
|
else => {
|
||||||
|
if (ty.isPtrAtRuntime()) return 0;
|
||||||
|
return error.BadType;
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const bit_count = int_ty.intInfo(target).bits;
|
||||||
|
if (bit_count > max_atomic_bits) {
|
||||||
|
diags.* = .{
|
||||||
|
.bits = bit_count,
|
||||||
|
.max_bits = max_atomic_bits,
|
||||||
|
};
|
||||||
|
return error.IntTooBig;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn defaultAddressSpace(
|
pub fn defaultAddressSpace(
|
||||||
|
|||||||
40
src/type.zig
40
src/type.zig
@ -2788,11 +2788,6 @@ pub const Type = extern union {
|
|||||||
return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) };
|
return AbiAlignmentAdvanced{ .scalar = target_util.defaultFunctionAlignment(target) };
|
||||||
},
|
},
|
||||||
|
|
||||||
.i16, .u16 => return AbiAlignmentAdvanced{ .scalar = 2 },
|
|
||||||
.i32, .u32 => return AbiAlignmentAdvanced{ .scalar = 4 },
|
|
||||||
.i64, .u64 => return AbiAlignmentAdvanced{ .scalar = 8 },
|
|
||||||
.u128, .i128 => return AbiAlignmentAdvanced{ .scalar = 16 },
|
|
||||||
|
|
||||||
.isize,
|
.isize,
|
||||||
.usize,
|
.usize,
|
||||||
.single_const_pointer_to_comptime_int,
|
.single_const_pointer_to_comptime_int,
|
||||||
@ -2865,14 +2860,15 @@ pub const Type = extern union {
|
|||||||
// ABI alignment of vectors?
|
// ABI alignment of vectors?
|
||||||
.vector => return AbiAlignmentAdvanced{ .scalar = 16 },
|
.vector => return AbiAlignmentAdvanced{ .scalar = 16 },
|
||||||
|
|
||||||
|
.i16, .u16 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(16, target) },
|
||||||
|
.i32, .u32 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(32, target) },
|
||||||
|
.i64, .u64 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(64, target) },
|
||||||
|
.u128, .i128 => return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(128, target) },
|
||||||
|
|
||||||
.int_signed, .int_unsigned => {
|
.int_signed, .int_unsigned => {
|
||||||
const bits: u16 = ty.cast(Payload.Bits).?.data;
|
const bits: u16 = ty.cast(Payload.Bits).?.data;
|
||||||
if (bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 };
|
if (bits == 0) return AbiAlignmentAdvanced{ .scalar = 0 };
|
||||||
if (bits <= 8) return AbiAlignmentAdvanced{ .scalar = 1 };
|
return AbiAlignmentAdvanced{ .scalar = intAbiAlignment(bits, target) };
|
||||||
if (bits <= 16) return AbiAlignmentAdvanced{ .scalar = 2 };
|
|
||||||
if (bits <= 32) return AbiAlignmentAdvanced{ .scalar = 4 };
|
|
||||||
if (bits <= 64) return AbiAlignmentAdvanced{ .scalar = 8 };
|
|
||||||
return AbiAlignmentAdvanced{ .scalar = 16 };
|
|
||||||
},
|
},
|
||||||
|
|
||||||
.optional => {
|
.optional => {
|
||||||
@ -3113,10 +3109,6 @@ pub const Type = extern union {
|
|||||||
assert(elem_size >= payload.elem_type.abiAlignment(target));
|
assert(elem_size >= payload.elem_type.abiAlignment(target));
|
||||||
return (payload.len + 1) * elem_size;
|
return (payload.len + 1) * elem_size;
|
||||||
},
|
},
|
||||||
.i16, .u16 => return 2,
|
|
||||||
.i32, .u32 => return 4,
|
|
||||||
.i64, .u64 => return 8,
|
|
||||||
.u128, .i128 => return 16,
|
|
||||||
|
|
||||||
.isize,
|
.isize,
|
||||||
.usize,
|
.usize,
|
||||||
@ -3189,10 +3181,14 @@ pub const Type = extern union {
|
|||||||
.error_set_merged,
|
.error_set_merged,
|
||||||
=> return 2, // TODO revisit this when we have the concept of the error tag type
|
=> return 2, // TODO revisit this when we have the concept of the error tag type
|
||||||
|
|
||||||
|
.i16, .u16 => return intAbiSize(16, target),
|
||||||
|
.i32, .u32 => return intAbiSize(32, target),
|
||||||
|
.i64, .u64 => return intAbiSize(64, target),
|
||||||
|
.u128, .i128 => return intAbiSize(128, target),
|
||||||
.int_signed, .int_unsigned => {
|
.int_signed, .int_unsigned => {
|
||||||
const bits: u16 = self.cast(Payload.Bits).?.data;
|
const bits: u16 = self.cast(Payload.Bits).?.data;
|
||||||
if (bits == 0) return 0;
|
if (bits == 0) return 0;
|
||||||
return std.math.ceilPowerOfTwoPromote(u16, (bits + 7) / 8);
|
return intAbiSize(bits, target);
|
||||||
},
|
},
|
||||||
|
|
||||||
.optional => {
|
.optional => {
|
||||||
@ -3234,6 +3230,18 @@ pub const Type = extern union {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn intAbiSize(bits: u16, target: Target) u64 {
|
||||||
|
const alignment = intAbiAlignment(bits, target);
|
||||||
|
return std.mem.alignForwardGeneric(u64, (bits + 7) / 8, alignment);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn intAbiAlignment(bits: u16, target: Target) u32 {
|
||||||
|
return @minimum(
|
||||||
|
std.math.ceilPowerOfTwoPromote(u16, (bits + 7) / 8),
|
||||||
|
target.maxIntAlignment(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
/// Asserts the type has the bit size already resolved.
|
/// Asserts the type has the bit size already resolved.
|
||||||
pub fn bitSize(ty: Type, target: Target) u64 {
|
pub fn bitSize(ty: Type, target: Target) u64 {
|
||||||
return switch (ty.tag()) {
|
return switch (ty.tag()) {
|
||||||
@ -5169,7 +5177,7 @@ pub const Type = extern union {
|
|||||||
|
|
||||||
const field = it.struct_obj.fields.values()[it.field];
|
const field = it.struct_obj.fields.values()[it.field];
|
||||||
defer it.field += 1;
|
defer it.field += 1;
|
||||||
if (!field.ty.hasRuntimeBits())
|
if (!field.ty.hasRuntimeBits() or field.is_comptime)
|
||||||
return FieldOffset{ .field = it.field, .offset = it.offset };
|
return FieldOffset{ .field = it.field, .offset = it.offset };
|
||||||
|
|
||||||
const field_align = field.normalAlignment(it.target);
|
const field_align = field.normalAlignment(it.target);
|
||||||
|
|||||||
@ -47,41 +47,128 @@ fn expects4(x: *align(4) u32) void {
|
|||||||
x.* += 1;
|
x.* += 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
test "alignment of structs" {
|
test "alignment of struct with pointer has same alignment as usize" {
|
||||||
try expect(@alignOf(struct {
|
try expect(@alignOf(struct {
|
||||||
a: i32,
|
a: i32,
|
||||||
b: *i32,
|
b: *i32,
|
||||||
}) == @alignOf(usize));
|
}) == @alignOf(usize));
|
||||||
}
|
}
|
||||||
|
|
||||||
test "alignment of >= 128-bit integer type" {
|
test "alignment and size of structs with 128-bit fields" {
|
||||||
try expect(@alignOf(u128) == 16);
|
if (builtin.zig_backend == .stage1) {
|
||||||
try expect(@alignOf(u129) == 16);
|
// stage1 gets the wrong answer for a lot of targets
|
||||||
|
return error.SkipZigTest;
|
||||||
}
|
}
|
||||||
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
|
|
||||||
test "alignment of struct with 128-bit field" {
|
const A = struct {
|
||||||
try expect(@alignOf(struct {
|
|
||||||
x: u128,
|
x: u128,
|
||||||
}) == 16);
|
};
|
||||||
|
const B = extern struct {
|
||||||
comptime {
|
|
||||||
try expect(@alignOf(struct {
|
|
||||||
x: u128,
|
|
||||||
}) == 16);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
test "size of extern struct with 128-bit field" {
|
|
||||||
try expect(@sizeOf(extern struct {
|
|
||||||
x: u128,
|
x: u128,
|
||||||
y: u8,
|
y: u8,
|
||||||
}) == 32);
|
};
|
||||||
|
const expected = switch (builtin.cpu.arch) {
|
||||||
|
.arm,
|
||||||
|
.armeb,
|
||||||
|
.thumb,
|
||||||
|
.thumbeb,
|
||||||
|
.hexagon,
|
||||||
|
.mips,
|
||||||
|
.mipsel,
|
||||||
|
.powerpc,
|
||||||
|
.powerpcle,
|
||||||
|
.r600,
|
||||||
|
.amdgcn,
|
||||||
|
.riscv32,
|
||||||
|
.sparc,
|
||||||
|
.sparcel,
|
||||||
|
.s390x,
|
||||||
|
.lanai,
|
||||||
|
.wasm32,
|
||||||
|
.wasm64,
|
||||||
|
=> .{
|
||||||
|
.a_align = 8,
|
||||||
|
.a_size = 16,
|
||||||
|
|
||||||
|
.b_align = 8,
|
||||||
|
.b_size = 24,
|
||||||
|
|
||||||
|
.u128_align = 8,
|
||||||
|
.u128_size = 16,
|
||||||
|
.u129_align = 8,
|
||||||
|
.u129_size = 24,
|
||||||
|
},
|
||||||
|
|
||||||
|
.i386 => switch (builtin.os.tag) {
|
||||||
|
.windows => .{
|
||||||
|
.a_align = 8,
|
||||||
|
.a_size = 16,
|
||||||
|
|
||||||
|
.b_align = 8,
|
||||||
|
.b_size = 24,
|
||||||
|
|
||||||
|
.u128_align = 8,
|
||||||
|
.u128_size = 16,
|
||||||
|
.u129_align = 8,
|
||||||
|
.u129_size = 24,
|
||||||
|
},
|
||||||
|
else => .{
|
||||||
|
.a_align = 4,
|
||||||
|
.a_size = 16,
|
||||||
|
|
||||||
|
.b_align = 4,
|
||||||
|
.b_size = 20,
|
||||||
|
|
||||||
|
.u128_align = 4,
|
||||||
|
.u128_size = 16,
|
||||||
|
.u129_align = 4,
|
||||||
|
.u129_size = 20,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
.mips64,
|
||||||
|
.mips64el,
|
||||||
|
.powerpc64,
|
||||||
|
.powerpc64le,
|
||||||
|
.riscv64,
|
||||||
|
.sparcv9,
|
||||||
|
.x86_64,
|
||||||
|
.aarch64,
|
||||||
|
.aarch64_be,
|
||||||
|
.aarch64_32,
|
||||||
|
.bpfel,
|
||||||
|
.bpfeb,
|
||||||
|
.nvptx,
|
||||||
|
.nvptx64,
|
||||||
|
=> .{
|
||||||
|
.a_align = 16,
|
||||||
|
.a_size = 16,
|
||||||
|
|
||||||
|
.b_align = 16,
|
||||||
|
.b_size = 32,
|
||||||
|
|
||||||
|
.u128_align = 16,
|
||||||
|
.u128_size = 16,
|
||||||
|
.u129_align = 16,
|
||||||
|
.u129_size = 32,
|
||||||
|
},
|
||||||
|
|
||||||
|
else => return error.SkipZigTest,
|
||||||
|
};
|
||||||
comptime {
|
comptime {
|
||||||
try expect(@sizeOf(extern struct {
|
std.debug.assert(@alignOf(A) == expected.a_align);
|
||||||
x: u128,
|
std.debug.assert(@sizeOf(A) == expected.a_size);
|
||||||
y: u8,
|
|
||||||
}) == 32);
|
std.debug.assert(@alignOf(B) == expected.b_align);
|
||||||
|
std.debug.assert(@sizeOf(B) == expected.b_size);
|
||||||
|
|
||||||
|
std.debug.assert(@alignOf(u128) == expected.u128_align);
|
||||||
|
std.debug.assert(@sizeOf(u128) == expected.u128_size);
|
||||||
|
|
||||||
|
std.debug.assert(@alignOf(u129) == expected.u129_align);
|
||||||
|
std.debug.assert(@sizeOf(u129) == expected.u129_size);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -328,7 +415,6 @@ test "read 128-bit field from default aligned struct in stack memory" {
|
|||||||
.nevermind = 1,
|
.nevermind = 1,
|
||||||
.badguy = 12,
|
.badguy = 12,
|
||||||
};
|
};
|
||||||
try expect((@ptrToInt(&default_aligned.badguy) % 16) == 0);
|
|
||||||
try expect(12 == default_aligned.badguy);
|
try expect(12 == default_aligned.badguy);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -345,7 +431,6 @@ test "read 128-bit field from default aligned struct in global memory" {
|
|||||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
|
||||||
|
|
||||||
try expect((@ptrToInt(&default_aligned_global.badguy) % 16) == 0);
|
|
||||||
try expect(12 == default_aligned_global.badguy);
|
try expect(12 == default_aligned_global.badguy);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -23,7 +23,6 @@ test "module level assembly" {
|
|||||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_llvm) return error.SkipZigTest; // TODO
|
|
||||||
|
|
||||||
if (is_x86_64_linux) {
|
if (is_x86_64_linux) {
|
||||||
try expect(this_is_my_alias() == 1234);
|
try expect(this_is_my_alias() == 1234);
|
||||||
|
|||||||
@ -127,7 +127,7 @@ test "128-bit cmpxchg" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn test_u128_cmpxchg() !void {
|
fn test_u128_cmpxchg() !void {
|
||||||
var x: u128 = 1234;
|
var x: u128 align(16) = 1234;
|
||||||
if (@cmpxchgWeak(u128, &x, 99, 5678, .SeqCst, .SeqCst)) |x1| {
|
if (@cmpxchgWeak(u128, &x, 99, 5678, .SeqCst, .SeqCst)) |x1| {
|
||||||
try expect(x1 == 1234);
|
try expect(x1 == 1234);
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@ -120,6 +120,10 @@ test "bitcast generates a temporary value" {
|
|||||||
}
|
}
|
||||||
|
|
||||||
test "@bitCast packed structs at runtime and comptime" {
|
test "@bitCast packed structs at runtime and comptime" {
|
||||||
|
if (builtin.zig_backend == .stage1) {
|
||||||
|
// stage1 gets the wrong answer for a lot of targets
|
||||||
|
return error.SkipZigTest;
|
||||||
|
}
|
||||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||||
@ -138,18 +142,9 @@ test "@bitCast packed structs at runtime and comptime" {
|
|||||||
fn doTheTest() !void {
|
fn doTheTest() !void {
|
||||||
var full = Full{ .number = 0x1234 };
|
var full = Full{ .number = 0x1234 };
|
||||||
var two_halves = @bitCast(Divided, full);
|
var two_halves = @bitCast(Divided, full);
|
||||||
switch (native_endian) {
|
|
||||||
.Big => {
|
|
||||||
try expect(two_halves.half1 == 0x12);
|
|
||||||
try expect(two_halves.quarter3 == 0x3);
|
|
||||||
try expect(two_halves.quarter4 == 0x4);
|
|
||||||
},
|
|
||||||
.Little => {
|
|
||||||
try expect(two_halves.half1 == 0x34);
|
try expect(two_halves.half1 == 0x34);
|
||||||
try expect(two_halves.quarter3 == 0x2);
|
try expect(two_halves.quarter3 == 0x2);
|
||||||
try expect(two_halves.quarter4 == 0x1);
|
try expect(two_halves.quarter4 == 0x1);
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
try S.doTheTest();
|
try S.doTheTest();
|
||||||
|
|||||||
@ -499,17 +499,18 @@ const Bitfields = packed struct {
|
|||||||
f7: u8,
|
f7: u8,
|
||||||
};
|
};
|
||||||
|
|
||||||
test "native bit field understands endianness" {
|
test "packed struct fields are ordered from LSB to MSB" {
|
||||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
|
if (builtin.zig_backend == .stage1) {
|
||||||
|
// stage1 gets the wrong answer for a lot of targets
|
||||||
|
return error.SkipZigTest;
|
||||||
|
}
|
||||||
|
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||||
|
|
||||||
var all: u64 = if (native_endian != .Little)
|
var all: u64 = 0x7765443322221111;
|
||||||
0x1111222233445677
|
|
||||||
else
|
|
||||||
0x7765443322221111;
|
|
||||||
var bytes: [8]u8 = undefined;
|
var bytes: [8]u8 = undefined;
|
||||||
@memcpy(&bytes, @ptrCast([*]u8, &all), 8);
|
@memcpy(&bytes, @ptrCast([*]u8, &all), 8);
|
||||||
var bitfields = @ptrCast(*Bitfields, &bytes).*;
|
var bitfields = @ptrCast(*Bitfields, &bytes).*;
|
||||||
@ -974,6 +975,8 @@ test "comptime struct field" {
|
|||||||
comptime b: i32 = 1234,
|
comptime b: i32 = 1234,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
comptime std.debug.assert(@sizeOf(T) == 4);
|
||||||
|
|
||||||
var foo: T = undefined;
|
var foo: T = undefined;
|
||||||
comptime try expect(foo.b == 1234);
|
comptime try expect(foo.b == 1234);
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user