Merge pull request #14713 from jacobly0/cbe-behavior

CBE: fix more behavior tests
This commit is contained in:
Andrew Kelley 2023-02-24 21:23:54 -05:00 committed by GitHub
commit 6398aabb87
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 1062 additions and 932 deletions

View File

@ -5,6 +5,7 @@
#endif
#include <float.h>
#include <limits.h>
#include <stdarg.h>
#include <stddef.h>
#include <stdint.h>
@ -77,6 +78,32 @@ typedef char bool;
#define zig_cold
#endif
#if zig_has_attribute(flatten)
#define zig_maybe_flatten __attribute__((flatten))
#else
#define zig_maybe_flatten
#endif
#if zig_has_attribute(noinline)
#define zig_never_inline __attribute__((noinline)) zig_maybe_flatten
#elif defined(_MSC_VER)
#define zig_never_inline __declspec(noinline) zig_maybe_flatten
#else
#define zig_never_inline zig_never_inline_unavailable
#endif
#if zig_has_attribute(not_tail_called)
#define zig_never_tail __attribute__((not_tail_called)) zig_never_inline
#else
#define zig_never_tail zig_never_tail_unavailable
#endif
#if zig_has_attribute(always_inline)
#define zig_always_tail __attribute__((musttail))
#else
#define zig_always_tail zig_always_tail_unavailable
#endif
#if __STDC_VERSION__ >= 199901L
#define zig_restrict restrict
#elif defined(__GNUC__)
@ -1049,7 +1076,7 @@ static inline void zig_vmulo_i16(uint8_t *ov, int16_t *res, int n,
\
static inline int##w##_t zig_shls_i##w(int##w##_t lhs, int##w##_t rhs, uint8_t bits) { \
int##w##_t res; \
if ((uint##w##_t)rhs < (uint##w##_t)bits && !zig_shlo_i##w(&res, lhs, rhs, bits)) return res; \
if ((uint##w##_t)rhs < (uint##w##_t)bits && !zig_shlo_i##w(&res, lhs, (uint8_t)rhs, bits)) return res; \
return lhs < INT##w##_C(0) ? zig_minInt_i(w, bits) : zig_maxInt_i(w, bits); \
} \
\
@ -2383,39 +2410,47 @@ zig_msvc_atomics(i64, int64_t, 64)
#define zig_msvc_flt_atomics(Type, ReprType, suffix) \
static inline bool zig_msvc_cmpxchg_##Type(zig_##Type volatile* obj, zig_##Type* expected, zig_##Type desired) { \
ReprType comparand = *((ReprType*)expected); \
ReprType initial = _InterlockedCompareExchange##suffix((ReprType volatile*)obj, *((ReprType*)&desired), comparand); \
bool exchanged = initial == comparand; \
if (!exchanged) { \
*expected = *((zig_##Type*)&initial); \
} \
return exchanged; \
ReprType exchange; \
ReprType comparand; \
ReprType initial; \
bool success; \
memcpy(&comparand, expected, sizeof(comparand)); \
memcpy(&exchange, &desired, sizeof(exchange)); \
initial = _InterlockedCompareExchange##suffix((ReprType volatile*)obj, exchange, comparand); \
success = initial == comparand; \
if (!success) memcpy(expected, &initial, sizeof(*expected)); \
return success; \
} \
static inline zig_##Type zig_msvc_atomicrmw_xchg_##Type(zig_##Type volatile* obj, zig_##Type value) { \
ReprType initial = _InterlockedExchange##suffix((ReprType volatile*)obj, *((ReprType*)&value)); \
return *((zig_##Type*)&initial); \
ReprType repr; \
ReprType initial; \
zig_##Type result; \
memcpy(&repr, &value, sizeof(repr)); \
initial = _InterlockedExchange##suffix((ReprType volatile*)obj, repr); \
memcpy(&result, &initial, sizeof(result)); \
return result; \
} \
static inline zig_##Type zig_msvc_atomicrmw_add_##Type(zig_##Type volatile* obj, zig_##Type value) { \
bool success = false; \
ReprType new; \
zig_##Type prev; \
while (!success) { \
prev = *obj; \
new = prev + value; \
success = zig_msvc_cmpxchg_##Type(obj, &prev, *((ReprType*)&new)); \
} \
return prev; \
ReprType repr; \
zig_##Type expected; \
zig_##Type desired; \
repr = *(ReprType volatile*)obj; \
memcpy(&expected, &repr, sizeof(expected)); \
do { \
desired = expected + value; \
} while (!zig_msvc_cmpxchg_##Type(obj, &expected, desired)); \
return expected; \
} \
static inline zig_##Type zig_msvc_atomicrmw_sub_##Type(zig_##Type volatile* obj, zig_##Type value) { \
bool success = false; \
ReprType new; \
zig_##Type prev; \
while (!success) { \
prev = *obj; \
new = prev - value; \
success = zig_msvc_cmpxchg_##Type(obj, &prev, *((ReprType*)&new)); \
} \
return prev; \
ReprType repr; \
zig_##Type expected; \
zig_##Type desired; \
repr = *(ReprType volatile*)obj; \
memcpy(&expected, &repr, sizeof(expected)); \
do { \
desired = expected - value; \
} while (!zig_msvc_cmpxchg_##Type(obj, &expected, desired)); \
return expected; \
}
zig_msvc_flt_atomics(f32, uint32_t, )

File diff suppressed because it is too large Load Diff

View File

@ -1056,7 +1056,7 @@ pub const CType = extern union {
}
},
.Struct, .Union => |zig_tag| if (ty.containerLayout() == .Packed) {
.Struct, .Union => |zig_ty_tag| if (ty.containerLayout() == .Packed) {
if (ty.castTag(.@"struct")) |struct_obj| {
try self.initType(struct_obj.data.backing_int_ty, kind, lookup);
} else {
@ -1068,9 +1068,13 @@ pub const CType = extern union {
}
} else if (ty.isTupleOrAnonStruct()) {
if (lookup.isMutable()) {
for (0..ty.structFieldCount()) |field_i| {
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(),
.Union => ty.unionFields().count(),
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i);
if (ty.structFieldIsComptime(field_i) or
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
_ = try lookup.typeToIndex(field_ty, switch (kind) {
.forward, .forward_parameter => .forward,
@ -1086,14 +1090,22 @@ pub const CType = extern union {
}
}
self.init(switch (kind) {
.forward, .forward_parameter => .fwd_anon_struct,
.complete, .parameter, .global => .anon_struct,
.forward, .forward_parameter => switch (zig_ty_tag) {
.Struct => .fwd_anon_struct,
.Union => .fwd_anon_union,
else => unreachable,
},
.complete, .parameter, .global => switch (zig_ty_tag) {
.Struct => .anon_struct,
.Union => .anon_union,
else => unreachable,
},
.payload => unreachable,
});
} else {
const tag_ty = ty.unionTagTypeSafety();
const is_tagged_union_wrapper = kind != .payload and tag_ty != null;
const is_struct = zig_tag == .Struct or is_tagged_union_wrapper;
const is_struct = zig_ty_tag == .Struct or is_tagged_union_wrapper;
switch (kind) {
.forward, .forward_parameter => {
self.storage = .{ .fwd = .{
@ -1138,7 +1150,7 @@ pub const CType = extern union {
self.init(.void);
} else {
var is_packed = false;
for (0..switch (zig_tag) {
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(),
.Union => ty.unionFields().count(),
else => unreachable,
@ -1181,10 +1193,10 @@ pub const CType = extern union {
}
},
.Array, .Vector => |zig_tag| {
.Array, .Vector => |zig_ty_tag| {
switch (kind) {
.forward, .complete, .global => {
const t: Tag = switch (zig_tag) {
const t: Tag = switch (zig_ty_tag) {
.Array => .array,
.Vector => .vector,
else => unreachable,
@ -1296,19 +1308,21 @@ pub const CType = extern union {
.Fn => {
const info = ty.fnInfo();
if (lookup.isMutable()) {
const param_kind: Kind = switch (kind) {
.forward, .forward_parameter => .forward_parameter,
.complete, .parameter, .global => .parameter,
.payload => unreachable,
};
_ = try lookup.typeToIndex(info.return_type, param_kind);
for (info.param_types) |param_type| {
if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
_ = try lookup.typeToIndex(param_type, param_kind);
if (!info.is_generic) {
if (lookup.isMutable()) {
const param_kind: Kind = switch (kind) {
.forward, .forward_parameter => .forward_parameter,
.complete, .parameter, .global => .parameter,
.payload => unreachable,
};
_ = try lookup.typeToIndex(info.return_type, param_kind);
for (info.param_types) |param_type| {
if (!param_type.hasRuntimeBitsIgnoreComptime()) continue;
_ = try lookup.typeToIndex(param_type, param_kind);
}
}
}
self.init(if (info.is_var_args) .varargs_function else .function);
self.init(if (info.is_var_args) .varargs_function else .function);
} else self.init(.void);
},
}
}
@ -1499,126 +1513,95 @@ pub const CType = extern union {
.@"union",
.packed_struct,
.packed_union,
=> switch (ty.zigTypeTag()) {
.Struct => {
const fields_len = ty.structFieldCount();
=> {
const zig_ty_tag = ty.zigTypeTag();
const fields_len = switch (zig_ty_tag) {
.Struct => ty.structFieldCount(),
.Union => ty.unionFields().count(),
else => unreachable,
};
var c_fields_len: usize = 0;
for (0..fields_len) |field_i| {
const field_ty = ty.structFieldType(field_i);
if (ty.structFieldIsComptime(field_i) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
c_fields_len += 1;
}
var c_fields_len: usize = 0;
for (0..fields_len) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
c_fields_len += 1;
}
const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len);
var c_field_i: usize = 0;
for (0..fields_len) |field_i| {
const field_ty = ty.structFieldType(field_i);
if (ty.structFieldIsComptime(field_i) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len);
var c_field_i: usize = 0;
for (0..fields_len) |field_i| {
const field_ty = ty.structFieldType(field_i);
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
fields_pl[c_field_i] = .{
.name = try if (ty.isSimpleTuple())
std.fmt.allocPrintZ(arena, "f{}", .{field_i})
else
arena.dupeZ(u8, ty.structFieldName(field_i)),
.type = store.set.typeToIndex(field_ty, target, switch (kind) {
.forward, .forward_parameter => .forward,
.complete, .parameter => .complete,
.global => .global,
.payload => unreachable,
}).?,
.alignas = Payload.Fields.AlignAs.fieldAlign(ty, field_i, target),
};
c_field_i += 1;
}
defer c_field_i += 1;
fields_pl[c_field_i] = .{
.name = try if (ty.isSimpleTuple())
std.fmt.allocPrintZ(arena, "f{}", .{field_i})
else
arena.dupeZ(u8, switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i),
.Union => ty.unionFields().keys()[field_i],
else => unreachable,
}),
.type = store.set.typeToIndex(field_ty, target, switch (kind) {
.forward, .forward_parameter => .forward,
.complete, .parameter, .payload => .complete,
.global => .global,
}).?,
.alignas = Payload.Fields.AlignAs.fieldAlign(ty, field_i, target),
};
}
switch (t) {
.fwd_anon_struct => {
const anon_pl = try arena.create(Payload.Fields);
anon_pl.* = .{ .base = .{ .tag = t }, .data = fields_pl };
return initPayload(anon_pl);
},
switch (t) {
.fwd_anon_struct,
.fwd_anon_union,
=> {
const anon_pl = try arena.create(Payload.Fields);
anon_pl.* = .{ .base = .{ .tag = t }, .data = fields_pl };
return initPayload(anon_pl);
},
.anon_struct,
.@"struct",
.@"union",
.packed_struct,
.packed_union,
=> {
const struct_pl = try arena.create(Payload.Aggregate);
struct_pl.* = .{ .base = .{ .tag = t }, .data = .{
.fields = fields_pl,
.fwd_decl = store.set.typeToIndex(ty, target, .forward).?,
} };
return initPayload(struct_pl);
},
.unnamed_struct,
.unnamed_union,
.packed_unnamed_struct,
.packed_unnamed_union,
=> {
const unnamed_pl = try arena.create(Payload.Unnamed);
unnamed_pl.* = .{ .base = .{ .tag = t }, .data = .{
.fields = fields_pl,
.owner_decl = ty.getOwnerDecl(),
.id = if (ty.unionTagTypeSafety()) |_| 0 else unreachable,
} };
return initPayload(unnamed_pl);
},
else => unreachable,
}
},
.anon_struct,
.anon_union,
.@"struct",
.@"union",
.packed_struct,
.packed_union,
=> {
const struct_pl = try arena.create(Payload.Aggregate);
struct_pl.* = .{ .base = .{ .tag = t }, .data = .{
.fields = fields_pl,
.fwd_decl = store.set.typeToIndex(ty, target, .forward).?,
} };
return initPayload(struct_pl);
},
.Union => {
const union_fields = ty.unionFields();
const fields_len = union_fields.count();
var c_fields_len: usize = 0;
for (0..fields_len) |field_i| {
const field_ty = ty.structFieldType(field_i);
if (!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
c_fields_len += 1;
}
const fields_pl = try arena.alloc(Payload.Fields.Field, c_fields_len);
var field_i: usize = 0;
var c_field_i: usize = 0;
var field_it = union_fields.iterator();
while (field_it.next()) |field| {
defer field_i += 1;
if (!field.value_ptr.ty.hasRuntimeBitsIgnoreComptime()) continue;
fields_pl[c_field_i] = .{
.name = try arena.dupeZ(u8, field.key_ptr.*),
.type = store.set.typeToIndex(field.value_ptr.ty, target, switch (kind) {
.forward, .forward_parameter => unreachable,
.complete, .parameter, .payload => .complete,
.global => .global,
}).?,
.alignas = Payload.Fields.AlignAs.fieldAlign(ty, field_i, target),
};
c_field_i += 1;
}
switch (kind) {
.forward, .forward_parameter => unreachable,
.complete, .parameter, .global => {
const union_pl = try arena.create(Payload.Aggregate);
union_pl.* = .{ .base = .{ .tag = t }, .data = .{
.fields = fields_pl,
.fwd_decl = store.set.typeToIndex(ty, target, .forward).?,
} };
return initPayload(union_pl);
},
.payload => if (ty.unionTagTypeSafety()) |_| {
const union_pl = try arena.create(Payload.Unnamed);
union_pl.* = .{ .base = .{ .tag = t }, .data = .{
.fields = fields_pl,
.owner_decl = ty.getOwnerDecl(),
.id = 0,
} };
return initPayload(union_pl);
} else unreachable,
}
},
else => unreachable,
else => unreachable,
}
},
.function,
.varargs_function,
=> {
const info = ty.fnInfo();
assert(!info.is_generic);
const param_kind: Kind = switch (kind) {
.forward, .forward_parameter => .forward_parameter,
.complete, .parameter, .global => .parameter,
@ -1707,14 +1690,19 @@ pub const CType = extern union {
]u8 = undefined;
const c_fields = cty.cast(Payload.Fields).?.data;
const zig_ty_tag = ty.zigTypeTag();
var c_field_i: usize = 0;
for (0..ty.structFieldCount()) |field_i| {
for (0..switch (zig_ty_tag) {
.Struct => ty.structFieldCount(),
.Union => ty.unionFields().count(),
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i);
if (ty.structFieldIsComptime(field_i) or
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
defer c_field_i += 1;
const c_field = &c_fields[c_field_i];
c_field_i += 1;
if (!self.eqlRecurse(field_ty, c_field.type, switch (self.kind) {
.forward, .forward_parameter => .forward,
@ -1725,8 +1713,11 @@ pub const CType = extern union {
u8,
if (ty.isSimpleTuple())
std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
else
ty.structFieldName(field_i),
else switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i),
.Union => ty.unionFields().keys()[field_i],
else => unreachable,
},
mem.span(c_field.name),
) or Payload.Fields.AlignAs.fieldAlign(ty, field_i, target).@"align" !=
c_field.alignas.@"align") return false;
@ -1764,6 +1755,7 @@ pub const CType = extern union {
if (ty.zigTypeTag() != .Fn) return false;
const info = ty.fnInfo();
assert(!info.is_generic);
const data = cty.cast(Payload.Function).?.data;
const param_kind: Kind = switch (self.kind) {
.forward, .forward_parameter => .forward_parameter,
@ -1824,29 +1816,30 @@ pub const CType = extern union {
var name_buf: [
std.fmt.count("f{}", .{std.math.maxInt(usize)})
]u8 = undefined;
const zig_ty_tag = ty.zigTypeTag();
for (0..switch (ty.zigTypeTag()) {
.Struct => ty.structFieldCount(),
.Union => ty.unionFields().count(),
else => unreachable,
}) |field_i| {
const field_ty = ty.structFieldType(field_i);
if (ty.structFieldIsComptime(field_i) or
if ((zig_ty_tag == .Struct and ty.structFieldIsComptime(field_i)) or
!field_ty.hasRuntimeBitsIgnoreComptime()) continue;
self.updateHasherRecurse(
hasher,
ty.structFieldType(field_i),
switch (self.kind) {
.forward, .forward_parameter => .forward,
.complete, .parameter => .complete,
.global => .global,
.payload => unreachable,
},
);
self.updateHasherRecurse(hasher, field_ty, switch (self.kind) {
.forward, .forward_parameter => .forward,
.complete, .parameter => .complete,
.global => .global,
.payload => unreachable,
});
hasher.update(if (ty.isSimpleTuple())
std.fmt.bufPrint(&name_buf, "f{}", .{field_i}) catch unreachable
else
ty.structFieldName(field_i));
else switch (zig_ty_tag) {
.Struct => ty.structFieldName(field_i),
.Union => ty.unionFields().keys()[field_i],
else => unreachable,
});
autoHash(
hasher,
Payload.Fields.AlignAs.fieldAlign(ty, field_i, target).@"align",
@ -1878,6 +1871,7 @@ pub const CType = extern union {
.varargs_function,
=> {
const info = ty.fnInfo();
assert(!info.is_generic);
const param_kind: Kind = switch (self.kind) {
.forward, .forward_parameter => .forward_parameter,
.complete, .parameter, .global => .parameter,

View File

@ -247,8 +247,8 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
const abi_define = abiDefine(comp);
// Covers defines, zig.h, ctypes, asm, lazy fwd, lazy code.
try f.all_buffers.ensureUnusedCapacity(gpa, 6);
// Covers defines, zig.h, ctypes, asm, lazy fwd.
try f.all_buffers.ensureUnusedCapacity(gpa, 5);
if (abi_define) |buf| f.appendBufAssumeCapacity(buf);
f.appendBufAssumeCapacity(zig_h);
@ -263,8 +263,8 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
f.appendBufAssumeCapacity(asm_buf.items);
}
const lazy_indices = f.all_buffers.items.len;
f.all_buffers.items.len += 2;
const lazy_index = f.all_buffers.items.len;
f.all_buffers.items.len += 1;
try self.flushErrDecls(&f.lazy_db);
@ -297,6 +297,7 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
{
// We need to flush lazy ctypes after flushing all decls but before flushing any decl ctypes.
// This ensures that every lazy CType.Index exactly matches the global CType.Index.
assert(f.ctypes.count() == 0);
try self.flushCTypes(&f, .none, f.lazy_db.ctypes);
@ -305,30 +306,22 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
try self.flushCTypes(&f, entry.key_ptr.toOptional(), entry.value_ptr.ctypes);
}
{
f.all_buffers.items[lazy_indices + 0] = .{
.iov_base = if (f.lazy_db.fwd_decl.items.len > 0) f.lazy_db.fwd_decl.items.ptr else "",
.iov_len = f.lazy_db.fwd_decl.items.len,
};
f.file_size += f.lazy_db.fwd_decl.items.len;
f.all_buffers.items[lazy_indices + 1] = .{
.iov_base = if (f.lazy_db.code.items.len > 0) f.lazy_db.code.items.ptr else "",
.iov_len = f.lazy_db.code.items.len,
};
f.file_size += f.lazy_db.code.items.len;
}
f.all_buffers.items[ctypes_index] = .{
.iov_base = if (f.ctypes_buf.items.len > 0) f.ctypes_buf.items.ptr else "",
.iov_len = f.ctypes_buf.items.len,
};
f.file_size += f.ctypes_buf.items.len;
f.all_buffers.items[lazy_index] = .{
.iov_base = if (f.lazy_db.fwd_decl.items.len > 0) f.lazy_db.fwd_decl.items.ptr else "",
.iov_len = f.lazy_db.fwd_decl.items.len,
};
f.file_size += f.lazy_db.fwd_decl.items.len;
// Now the code.
try f.all_buffers.ensureUnusedCapacity(gpa, decl_values.len);
for (decl_values) |decl|
f.appendBufAssumeCapacity(decl.code.items);
try f.all_buffers.ensureUnusedCapacity(gpa, 1 + decl_values.len);
f.appendBufAssumeCapacity(f.lazy_db.code.items);
for (decl_values) |decl| f.appendBufAssumeCapacity(decl.code.items);
const file = self.base.file.?;
try file.setEndPos(f.file_size);

View File

@ -723,6 +723,7 @@ pub fn supportsFunctionAlignment(target: std.Target) bool {
pub fn supportsTailCall(target: std.Target, backend: std.builtin.CompilerBackend) bool {
switch (backend) {
.stage1, .stage2_llvm => return @import("codegen/llvm.zig").supportsTailCall(target),
.stage2_c => return true,
else => return false,
}
}

View File

@ -48,7 +48,6 @@ fn testParentFieldPtrFirst(a: *const bool) !void {
test "@fieldParentPtr untagged union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try testFieldParentPtrUnion(&bar.c);
@ -75,7 +74,6 @@ fn testFieldParentPtrUnion(c: *const i32) !void {
test "@fieldParentPtr tagged union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try testFieldParentPtrTaggedUnion(&bar_tagged.c);
@ -102,7 +100,6 @@ fn testFieldParentPtrTaggedUnion(c: *const i32) !void {
test "@fieldParentPtr extern union" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
try testFieldParentPtrExternUnion(&bar_extern.c);

View File

@ -603,7 +603,6 @@ test "packed struct initialized in bitcast" {
test "pointer to container level packed struct field" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;

View File

@ -507,7 +507,6 @@ test "ptrCast comptime known slice to C pointer" {
}
test "ptrToInt on a generic function" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO

View File

@ -96,10 +96,9 @@ fn doNothingWithFirstArg(args: anytype) void {
test "simple variadic function" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos and builtin.zig_backend == .stage2_llvm) {
if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos) {
// https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest;
}
@ -112,6 +111,12 @@ test "simple variadic function" {
return @cVaArg(&ap, c_int);
}
fn compatible(_: c_int, ...) callconv(.C) c_int {
var ap = @cVaStart();
defer @cVaEnd(&ap);
return @cVaArg(&ap, c_int);
}
fn add(count: c_int, ...) callconv(.C) c_int {
var ap = @cVaStart();
defer @cVaEnd(&ap);
@ -124,8 +129,13 @@ test "simple variadic function" {
}
};
try std.testing.expectEqual(@as(c_int, 0), S.simple(@as(c_int, 0)));
try std.testing.expectEqual(@as(c_int, 1024), S.simple(@as(c_int, 1024)));
if (builtin.zig_backend != .stage2_c) {
// pre C23 doesn't support varargs without a preceding runtime arg.
try std.testing.expectEqual(@as(c_int, 0), S.simple(@as(c_int, 0)));
try std.testing.expectEqual(@as(c_int, 1024), S.simple(@as(c_int, 1024)));
}
try std.testing.expectEqual(@as(c_int, 0), S.compatible(undefined, @as(c_int, 0)));
try std.testing.expectEqual(@as(c_int, 1024), S.compatible(undefined, @as(c_int, 1024)));
try std.testing.expectEqual(@as(c_int, 0), S.add(0));
try std.testing.expectEqual(@as(c_int, 1), S.add(1, @as(c_int, 1)));
try std.testing.expectEqual(@as(c_int, 3), S.add(2, @as(c_int, 1), @as(c_int, 2)));
@ -134,10 +144,9 @@ test "simple variadic function" {
test "variadic functions" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos and builtin.zig_backend == .stage2_llvm) {
if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos) {
// https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest;
}
@ -178,10 +187,9 @@ test "variadic functions" {
test "copy VaList" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos and builtin.zig_backend == .stage2_llvm) {
if (builtin.cpu.arch == .aarch64 and builtin.os.tag != .macos) {
// https://github.com/ziglang/zig/issues/14096
return error.SkipZigTest;
}