Merge pull request #14691 from jacobly0/ctype

This commit is contained in:
Andrew Kelley 2023-02-22 11:06:13 -05:00 committed by GitHub
commit c9e02d3e69
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 6469 additions and 2037 deletions

View File

@ -569,6 +569,7 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/clang_options_data.zig" "${CMAKE_SOURCE_DIR}/src/clang_options_data.zig"
"${CMAKE_SOURCE_DIR}/src/codegen.zig" "${CMAKE_SOURCE_DIR}/src/codegen.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/c.zig" "${CMAKE_SOURCE_DIR}/src/codegen/c.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/c/type.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/llvm.zig" "${CMAKE_SOURCE_DIR}/src/codegen/llvm.zig"
"${CMAKE_SOURCE_DIR}/src/codegen/llvm/bindings.zig" "${CMAKE_SOURCE_DIR}/src/codegen/llvm/bindings.zig"
"${CMAKE_SOURCE_DIR}/src/glibc.zig" "${CMAKE_SOURCE_DIR}/src/glibc.zig"
@ -784,7 +785,7 @@ set_target_properties(zig2 PROPERTIES
COMPILE_FLAGS ${ZIG2_COMPILE_FLAGS} COMPILE_FLAGS ${ZIG2_COMPILE_FLAGS}
LINK_FLAGS ${ZIG2_LINK_FLAGS} LINK_FLAGS ${ZIG2_LINK_FLAGS}
) )
target_include_directories(zig2 PUBLIC "${CMAKE_SOURCE_DIR}/lib") target_include_directories(zig2 PUBLIC "${CMAKE_SOURCE_DIR}/stage1")
target_link_libraries(zig2 LINK_PUBLIC zigcpp) target_link_libraries(zig2 LINK_PUBLIC zigcpp)
if(MSVC) if(MSVC)

View File

@ -509,8 +509,39 @@ fn addWasiUpdateStep(b: *std.Build, version: [:0]const u8) !void {
run_opt.addArg("-o"); run_opt.addArg("-o");
run_opt.addFileSourceArg(.{ .path = "stage1/zig1.wasm" }); run_opt.addFileSourceArg(.{ .path = "stage1/zig1.wasm" });
const CopyFileStep = struct {
const Step = std.Build.Step;
const FileSource = std.Build.FileSource;
const CopyFileStep = @This();
step: Step,
builder: *std.Build,
source: FileSource,
dest_rel_path: []const u8,
pub fn init(builder: *std.Build, source: FileSource, dest_rel_path: []const u8) CopyFileStep {
return CopyFileStep{
.builder = builder,
.step = Step.init(.custom, builder.fmt("install {s} to {s}", .{ source.getDisplayName(), dest_rel_path }), builder.allocator, make),
.source = source.dupe(builder),
.dest_rel_path = builder.dupePath(dest_rel_path),
};
}
fn make(step: *Step) !void {
const self = @fieldParentPtr(CopyFileStep, "step", step);
const full_src_path = self.source.getPath(self.builder);
const full_dest_path = self.builder.pathFromRoot(self.dest_rel_path);
try self.builder.updateFile(full_src_path, full_dest_path);
}
};
const copy_zig_h = try b.allocator.create(CopyFileStep);
copy_zig_h.* = CopyFileStep.init(b, .{ .path = "lib/zig.h" }, "stage1/zig.h");
const update_zig1_step = b.step("update-zig1", "Update stage1/zig1.wasm"); const update_zig1_step = b.step("update-zig1", "Update stage1/zig1.wasm");
update_zig1_step.dependOn(&run_opt.step); update_zig1_step.dependOn(&run_opt.step);
update_zig1_step.dependOn(&copy_zig_h.step);
} }
fn addCompilerStep( fn addCompilerStep(

View File

@ -508,7 +508,7 @@ pub fn HashMap(
/// If a new entry needs to be stored, this function asserts there /// If a new entry needs to be stored, this function asserts there
/// is enough capacity to store it. /// is enough capacity to store it.
pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult { pub fn getOrPutAssumeCapacityAdapted(self: *Self, key: anytype, ctx: anytype) GetOrPutResult {
return self.unmanaged.getOrPutAssumeCapacityAdapted(self.allocator, key, ctx); return self.unmanaged.getOrPutAssumeCapacityAdapted(key, ctx);
} }
pub fn getOrPutValue(self: *Self, key: K, value: V) Allocator.Error!Entry { pub fn getOrPutValue(self: *Self, key: K, value: V) Allocator.Error!Entry {
@ -2130,7 +2130,7 @@ test "std.hash_map getOrPutAdapted" {
try testing.expectEqual(map.count(), keys.len); try testing.expectEqual(map.count(), keys.len);
inline for (keys, 0..) |key_str, i| { inline for (keys, 0..) |key_str, i| {
const result = try map.getOrPutAdapted(key_str, AdaptedContext{}); const result = map.getOrPutAssumeCapacityAdapted(key_str, AdaptedContext{});
try testing.expect(result.found_existing); try testing.expect(result.found_existing);
try testing.expectEqual(real_keys[i], result.key_ptr.*); try testing.expectEqual(real_keys[i], result.key_ptr.*);
try testing.expectEqual(@as(u64, i) * 2, result.value_ptr.*); try testing.expectEqual(@as(u64, i) * 2, result.value_ptr.*);

View File

@ -433,15 +433,9 @@ pub fn MultiArrayList(comptime S: type) type {
} }
fn capacityInBytes(capacity: usize) usize { fn capacityInBytes(capacity: usize) usize {
if (builtin.zig_backend == .stage2_c) { comptime var elem_bytes: usize = 0;
var bytes: usize = 0; inline for (sizes.bytes) |size| elem_bytes += size;
for (sizes.bytes) |size| bytes += size * capacity; return elem_bytes * capacity;
return bytes;
} else {
const sizes_vector: @Vector(sizes.bytes.len, usize) = sizes.bytes;
const capacity_vector = @splat(sizes.bytes.len, capacity);
return @reduce(.Add, capacity_vector * sizes_vector);
}
} }
fn allocatedBytes(self: Self) []align(@alignOf(S)) u8 { fn allocatedBytes(self: Self) []align(@alignOf(S)) u8 {

1619
lib/zig.h

File diff suppressed because it is too large Load Diff

View File

@ -3325,24 +3325,20 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
const decl_emit_h = emit_h.declPtr(decl_index); const decl_emit_h = emit_h.declPtr(decl_index);
const fwd_decl = &decl_emit_h.fwd_decl; const fwd_decl = &decl_emit_h.fwd_decl;
fwd_decl.shrinkRetainingCapacity(0); fwd_decl.shrinkRetainingCapacity(0);
var typedefs_arena = std.heap.ArenaAllocator.init(gpa); var ctypes_arena = std.heap.ArenaAllocator.init(gpa);
defer typedefs_arena.deinit(); defer ctypes_arena.deinit();
var dg: c_codegen.DeclGen = .{ var dg: c_codegen.DeclGen = .{
.gpa = gpa, .gpa = gpa,
.module = module, .module = module,
.error_msg = null, .error_msg = null,
.decl_index = decl_index, .decl_index = decl_index.toOptional(),
.decl = decl, .decl = decl,
.fwd_decl = fwd_decl.toManaged(gpa), .fwd_decl = fwd_decl.toManaged(gpa),
.typedefs = c_codegen.TypedefMap.initContext(gpa, .{ .mod = module }), .ctypes = .{},
.typedefs_arena = typedefs_arena.allocator(),
}; };
defer { defer {
for (dg.typedefs.values()) |typedef| { dg.ctypes.deinit(gpa);
module.gpa.free(typedef.rendered);
}
dg.typedefs.deinit();
dg.fwd_decl.deinit(); dg.fwd_decl.deinit();
} }

File diff suppressed because it is too large Load Diff

1919
src/codegen/c/type.zig Normal file

File diff suppressed because it is too large Load Diff

View File

@ -22,27 +22,22 @@ base: link.File,
/// Instead, it tracks all declarations in this table, and iterates over it /// Instead, it tracks all declarations in this table, and iterates over it
/// in the flush function, stitching pre-rendered pieces of C code together. /// in the flush function, stitching pre-rendered pieces of C code together.
decl_table: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclBlock) = .{}, decl_table: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, DeclBlock) = .{},
/// Stores Type/Value data for `typedefs` to reference.
/// Accumulates allocations and then there is a periodic garbage collection after flush().
arena: std.heap.ArenaAllocator,
/// Per-declaration data. /// Per-declaration data.
const DeclBlock = struct { const DeclBlock = struct {
code: std.ArrayListUnmanaged(u8) = .{}, code: std.ArrayListUnmanaged(u8) = .{},
fwd_decl: std.ArrayListUnmanaged(u8) = .{}, fwd_decl: std.ArrayListUnmanaged(u8) = .{},
/// Each Decl stores a mapping of Zig Types to corresponding C types, for every /// Each `Decl` stores a set of used `CType`s. In `flush()`, we iterate
/// Zig Type used by the Decl. In flush(), we iterate over each Decl /// over each `Decl` and generate the definition for each used `CType` once.
/// and emit the typedef code for all types, making sure to not emit the same thing twice. ctypes: codegen.CType.Store = .{},
/// Any arena memory the Type points to lives in the `arena` field of `C`. /// Key and Value storage use the ctype arena.
typedefs: codegen.TypedefMap.Unmanaged = .{}, lazy_fns: codegen.LazyFnMap = .{},
fn deinit(db: *DeclBlock, gpa: Allocator) void { fn deinit(db: *DeclBlock, gpa: Allocator) void {
db.code.deinit(gpa); db.lazy_fns.deinit(gpa);
db.ctypes.deinit(gpa);
db.fwd_decl.deinit(gpa); db.fwd_decl.deinit(gpa);
for (db.typedefs.values()) |typedef| { db.code.deinit(gpa);
gpa.free(typedef.rendered);
}
db.typedefs.deinit(gpa);
db.* = undefined; db.* = undefined;
} }
}; };
@ -64,7 +59,6 @@ pub fn openPath(gpa: Allocator, sub_path: []const u8, options: link.Options) !*C
errdefer gpa.destroy(c_file); errdefer gpa.destroy(c_file);
c_file.* = C{ c_file.* = C{
.arena = std.heap.ArenaAllocator.init(gpa),
.base = .{ .base = .{
.tag = .c, .tag = .c,
.options = options, .options = options,
@ -83,8 +77,6 @@ pub fn deinit(self: *C) void {
db.deinit(gpa); db.deinit(gpa);
} }
self.decl_table.deinit(gpa); self.decl_table.deinit(gpa);
self.arena.deinit();
} }
pub fn freeDecl(self: *C, decl_index: Module.Decl.Index) void { pub fn freeDecl(self: *C, decl_index: Module.Decl.Index) void {
@ -99,124 +91,122 @@ pub fn updateFunc(self: *C, module: *Module, func: *Module.Fn, air: Air, livenes
const tracy = trace(@src()); const tracy = trace(@src());
defer tracy.end(); defer tracy.end();
const gpa = self.base.allocator;
const decl_index = func.owner_decl; const decl_index = func.owner_decl;
const gop = try self.decl_table.getOrPut(self.base.allocator, decl_index); const gop = try self.decl_table.getOrPut(gpa, decl_index);
if (!gop.found_existing) { if (!gop.found_existing) {
gop.value_ptr.* = .{}; gop.value_ptr.* = .{};
} }
const ctypes = &gop.value_ptr.ctypes;
const lazy_fns = &gop.value_ptr.lazy_fns;
const fwd_decl = &gop.value_ptr.fwd_decl; const fwd_decl = &gop.value_ptr.fwd_decl;
const typedefs = &gop.value_ptr.typedefs;
const code = &gop.value_ptr.code; const code = &gop.value_ptr.code;
ctypes.clearRetainingCapacity(gpa);
lazy_fns.clearRetainingCapacity();
fwd_decl.shrinkRetainingCapacity(0); fwd_decl.shrinkRetainingCapacity(0);
for (typedefs.values()) |typedef| {
module.gpa.free(typedef.rendered);
}
typedefs.clearRetainingCapacity();
code.shrinkRetainingCapacity(0); code.shrinkRetainingCapacity(0);
var function: codegen.Function = .{ var function: codegen.Function = .{
.value_map = codegen.CValueMap.init(module.gpa), .value_map = codegen.CValueMap.init(gpa),
.air = air, .air = air,
.liveness = liveness, .liveness = liveness,
.func = func, .func = func,
.object = .{ .object = .{
.dg = .{ .dg = .{
.gpa = module.gpa, .gpa = gpa,
.module = module, .module = module,
.error_msg = null, .error_msg = null,
.decl_index = decl_index, .decl_index = decl_index.toOptional(),
.decl = module.declPtr(decl_index), .decl = module.declPtr(decl_index),
.fwd_decl = fwd_decl.toManaged(module.gpa), .fwd_decl = fwd_decl.toManaged(gpa),
.typedefs = typedefs.promoteContext(module.gpa, .{ .mod = module }), .ctypes = ctypes.*,
.typedefs_arena = self.arena.allocator(),
}, },
.code = code.toManaged(module.gpa), .code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code .indent_writer = undefined, // set later so we can get a pointer to object.code
}, },
.arena = std.heap.ArenaAllocator.init(module.gpa), .lazy_fns = lazy_fns.*,
.arena = std.heap.ArenaAllocator.init(gpa),
}; };
function.object.indent_writer = .{ .underlying_writer = function.object.code.writer() }; function.object.indent_writer = .{ .underlying_writer = function.object.code.writer() };
defer function.deinit(module.gpa); defer function.deinit();
codegen.genFunc(&function) catch |err| switch (err) { codegen.genFunc(&function) catch |err| switch (err) {
error.AnalysisFail => { error.AnalysisFail => {
try module.failed_decls.put(module.gpa, decl_index, function.object.dg.error_msg.?); try module.failed_decls.put(gpa, decl_index, function.object.dg.error_msg.?);
return; return;
}, },
else => |e| return e, else => |e| return e,
}; };
ctypes.* = function.object.dg.ctypes.move();
lazy_fns.* = function.lazy_fns.move();
fwd_decl.* = function.object.dg.fwd_decl.moveToUnmanaged(); fwd_decl.* = function.object.dg.fwd_decl.moveToUnmanaged();
typedefs.* = function.object.dg.typedefs.unmanaged;
function.object.dg.typedefs.unmanaged = .{};
code.* = function.object.code.moveToUnmanaged(); code.* = function.object.code.moveToUnmanaged();
// Free excess allocated memory for this Decl. // Free excess allocated memory for this Decl.
fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len); ctypes.shrinkAndFree(gpa, ctypes.count());
code.shrinkAndFree(module.gpa, code.items.len); lazy_fns.shrinkAndFree(gpa, lazy_fns.count());
fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len);
code.shrinkAndFree(gpa, code.items.len);
} }
pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !void { pub fn updateDecl(self: *C, module: *Module, decl_index: Module.Decl.Index) !void {
const tracy = trace(@src()); const tracy = trace(@src());
defer tracy.end(); defer tracy.end();
const gop = try self.decl_table.getOrPut(self.base.allocator, decl_index); const gpa = self.base.allocator;
const gop = try self.decl_table.getOrPut(gpa, decl_index);
if (!gop.found_existing) { if (!gop.found_existing) {
gop.value_ptr.* = .{}; gop.value_ptr.* = .{};
} }
const ctypes = &gop.value_ptr.ctypes;
const fwd_decl = &gop.value_ptr.fwd_decl; const fwd_decl = &gop.value_ptr.fwd_decl;
const typedefs = &gop.value_ptr.typedefs;
const code = &gop.value_ptr.code; const code = &gop.value_ptr.code;
ctypes.clearRetainingCapacity(gpa);
fwd_decl.shrinkRetainingCapacity(0); fwd_decl.shrinkRetainingCapacity(0);
for (typedefs.values()) |value| {
module.gpa.free(value.rendered);
}
typedefs.clearRetainingCapacity();
code.shrinkRetainingCapacity(0); code.shrinkRetainingCapacity(0);
const decl = module.declPtr(decl_index); const decl = module.declPtr(decl_index);
var object: codegen.Object = .{ var object: codegen.Object = .{
.dg = .{ .dg = .{
.gpa = module.gpa, .gpa = gpa,
.module = module, .module = module,
.error_msg = null, .error_msg = null,
.decl_index = decl_index, .decl_index = decl_index.toOptional(),
.decl = decl, .decl = decl,
.fwd_decl = fwd_decl.toManaged(module.gpa), .fwd_decl = fwd_decl.toManaged(gpa),
.typedefs = typedefs.promoteContext(module.gpa, .{ .mod = module }), .ctypes = ctypes.*,
.typedefs_arena = self.arena.allocator(),
}, },
.code = code.toManaged(module.gpa), .code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code .indent_writer = undefined, // set later so we can get a pointer to object.code
}; };
object.indent_writer = .{ .underlying_writer = object.code.writer() }; object.indent_writer = .{ .underlying_writer = object.code.writer() };
defer { defer {
object.code.deinit(); object.code.deinit();
for (object.dg.typedefs.values()) |typedef| { object.dg.ctypes.deinit(object.dg.gpa);
module.gpa.free(typedef.rendered);
}
object.dg.typedefs.deinit();
object.dg.fwd_decl.deinit(); object.dg.fwd_decl.deinit();
} }
codegen.genDecl(&object) catch |err| switch (err) { codegen.genDecl(&object) catch |err| switch (err) {
error.AnalysisFail => { error.AnalysisFail => {
try module.failed_decls.put(module.gpa, decl_index, object.dg.error_msg.?); try module.failed_decls.put(gpa, decl_index, object.dg.error_msg.?);
return; return;
}, },
else => |e| return e, else => |e| return e,
}; };
ctypes.* = object.dg.ctypes.move();
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
typedefs.* = object.dg.typedefs.unmanaged;
object.dg.typedefs.unmanaged = .{};
code.* = object.code.moveToUnmanaged(); code.* = object.code.moveToUnmanaged();
// Free excess allocated memory for this Decl. // Free excess allocated memory for this Decl.
fwd_decl.shrinkAndFree(module.gpa, fwd_decl.items.len); ctypes.shrinkAndFree(gpa, ctypes.count());
code.shrinkAndFree(module.gpa, code.items.len); fwd_decl.shrinkAndFree(gpa, fwd_decl.items.len);
code.shrinkAndFree(gpa, code.items.len);
} }
pub fn updateDeclLineNumber(self: *C, module: *Module, decl_index: Module.Decl.Index) !void { pub fn updateDeclLineNumber(self: *C, module: *Module, decl_index: Module.Decl.Index) !void {
@ -246,7 +236,7 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
sub_prog_node.activate(); sub_prog_node.activate();
defer sub_prog_node.end(); defer sub_prog_node.end();
const gpa = comp.gpa; const gpa = self.base.allocator;
const module = self.base.options.module.?; const module = self.base.options.module.?;
// This code path happens exclusively with -ofmt=c. The flush logic for // This code path happens exclusively with -ofmt=c. The flush logic for
@ -257,30 +247,28 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
const abi_define = abiDefine(comp); const abi_define = abiDefine(comp);
// Covers defines, zig.h, typedef, and asm. // Covers defines, zig.h, ctypes, asm, lazy fwd, lazy code.
var buf_count: usize = 2; try f.all_buffers.ensureUnusedCapacity(gpa, 6);
if (abi_define != null) buf_count += 1;
try f.all_buffers.ensureUnusedCapacity(gpa, buf_count);
if (abi_define) |buf| f.appendBufAssumeCapacity(buf); if (abi_define) |buf| f.appendBufAssumeCapacity(buf);
f.appendBufAssumeCapacity(zig_h); f.appendBufAssumeCapacity(zig_h);
const typedef_index = f.all_buffers.items.len; const ctypes_index = f.all_buffers.items.len;
f.all_buffers.items.len += 1; f.all_buffers.items.len += 1;
{ {
var asm_buf = f.asm_buf.toManaged(module.gpa); var asm_buf = f.asm_buf.toManaged(gpa);
defer asm_buf.deinit(); defer f.asm_buf = asm_buf.moveToUnmanaged();
try codegen.genGlobalAsm(module, asm_buf.writer());
try codegen.genGlobalAsm(module, &asm_buf); f.appendBufAssumeCapacity(asm_buf.items);
f.asm_buf = asm_buf.moveToUnmanaged();
f.appendBufAssumeCapacity(f.asm_buf.items);
} }
try self.flushErrDecls(&f); const lazy_indices = f.all_buffers.items.len;
f.all_buffers.items.len += 2;
// Typedefs, forward decls, and non-functions first. try self.flushErrDecls(&f.lazy_db);
// `CType`s, forward decls, and non-functions first.
// Unlike other backends, the .c code we are emitting is order-dependent. Therefore // Unlike other backends, the .c code we are emitting is order-dependent. Therefore
// we must traverse the set of Decls that we are emitting according to their dependencies. // we must traverse the set of Decls that we are emitting according to their dependencies.
// Our strategy is to populate a set of remaining decls, pop Decls one by one, // Our strategy is to populate a set of remaining decls, pop Decls one by one,
@ -307,11 +295,35 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
} }
} }
f.all_buffers.items[typedef_index] = .{ {
.iov_base = if (f.typedef_buf.items.len > 0) f.typedef_buf.items.ptr else "", // We need to flush lazy ctypes after flushing all decls but before flushing any decl ctypes.
.iov_len = f.typedef_buf.items.len, assert(f.ctypes.count() == 0);
try self.flushCTypes(&f, .none, f.lazy_db.ctypes);
var it = self.decl_table.iterator();
while (it.next()) |entry|
try self.flushCTypes(&f, entry.key_ptr.toOptional(), entry.value_ptr.ctypes);
}
{
f.all_buffers.items[lazy_indices + 0] = .{
.iov_base = if (f.lazy_db.fwd_decl.items.len > 0) f.lazy_db.fwd_decl.items.ptr else "",
.iov_len = f.lazy_db.fwd_decl.items.len,
}; };
f.file_size += f.typedef_buf.items.len; f.file_size += f.lazy_db.fwd_decl.items.len;
f.all_buffers.items[lazy_indices + 1] = .{
.iov_base = if (f.lazy_db.code.items.len > 0) f.lazy_db.code.items.ptr else "",
.iov_len = f.lazy_db.code.items.len,
};
f.file_size += f.lazy_db.code.items.len;
}
f.all_buffers.items[ctypes_index] = .{
.iov_base = if (f.ctypes_buf.items.len > 0) f.ctypes_buf.items.ptr else "",
.iov_len = f.ctypes_buf.items.len,
};
f.file_size += f.ctypes_buf.items.len;
// Now the code. // Now the code.
try f.all_buffers.ensureUnusedCapacity(gpa, decl_values.len); try f.all_buffers.ensureUnusedCapacity(gpa, decl_values.len);
@ -324,22 +336,23 @@ pub fn flushModule(self: *C, comp: *Compilation, prog_node: *std.Progress.Node)
} }
const Flush = struct { const Flush = struct {
err_decls: DeclBlock = .{},
remaining_decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void) = .{}, remaining_decls: std.AutoArrayHashMapUnmanaged(Module.Decl.Index, void) = .{},
typedefs: Typedefs = .{},
typedef_buf: std.ArrayListUnmanaged(u8) = .{}, ctypes: codegen.CType.Store = .{},
ctypes_map: std.ArrayListUnmanaged(codegen.CType.Index) = .{},
ctypes_buf: std.ArrayListUnmanaged(u8) = .{},
lazy_db: DeclBlock = .{},
lazy_fns: LazyFns = .{},
asm_buf: std.ArrayListUnmanaged(u8) = .{}, asm_buf: std.ArrayListUnmanaged(u8) = .{},
/// We collect a list of buffers to write, and write them all at once with pwritev 😎 /// We collect a list of buffers to write, and write them all at once with pwritev 😎
all_buffers: std.ArrayListUnmanaged(std.os.iovec_const) = .{}, all_buffers: std.ArrayListUnmanaged(std.os.iovec_const) = .{},
/// Keeps track of the total bytes of `all_buffers`. /// Keeps track of the total bytes of `all_buffers`.
file_size: u64 = 0, file_size: u64 = 0,
const Typedefs = std.HashMapUnmanaged( const LazyFns = std.AutoHashMapUnmanaged(codegen.LazyFnKey, void);
Type,
void,
Type.HashContext64,
std.hash_map.default_max_load_percentage,
);
fn appendBufAssumeCapacity(f: *Flush, buf: []const u8) void { fn appendBufAssumeCapacity(f: *Flush, buf: []const u8) void {
if (buf.len == 0) return; if (buf.len == 0) return;
@ -349,10 +362,13 @@ const Flush = struct {
fn deinit(f: *Flush, gpa: Allocator) void { fn deinit(f: *Flush, gpa: Allocator) void {
f.all_buffers.deinit(gpa); f.all_buffers.deinit(gpa);
f.typedef_buf.deinit(gpa); f.asm_buf.deinit(gpa);
f.typedefs.deinit(gpa); f.lazy_fns.deinit(gpa);
f.lazy_db.deinit(gpa);
f.ctypes_buf.deinit(gpa);
f.ctypes_map.deinit(gpa);
f.ctypes.deinit(gpa);
f.remaining_decls.deinit(gpa); f.remaining_decls.deinit(gpa);
f.err_decls.deinit(gpa);
} }
}; };
@ -360,53 +376,116 @@ const FlushDeclError = error{
OutOfMemory, OutOfMemory,
}; };
fn flushTypedefs(self: *C, f: *Flush, typedefs: codegen.TypedefMap.Unmanaged) FlushDeclError!void { fn flushCTypes(
if (typedefs.count() == 0) return; self: *C,
f: *Flush,
decl_index: Module.Decl.OptionalIndex,
decl_ctypes: codegen.CType.Store,
) FlushDeclError!void {
const gpa = self.base.allocator; const gpa = self.base.allocator;
const module = self.base.options.module.?; const mod = self.base.options.module.?;
try f.typedefs.ensureUnusedCapacityContext(gpa, @intCast(u32, typedefs.count()), .{ const decl_ctypes_len = decl_ctypes.count();
.mod = module, f.ctypes_map.clearRetainingCapacity();
}); try f.ctypes_map.ensureTotalCapacity(gpa, decl_ctypes_len);
var it = typedefs.iterator();
while (it.next()) |new| { var global_ctypes = f.ctypes.promote(gpa);
const gop = f.typedefs.getOrPutAssumeCapacityContext(new.key_ptr.*, .{ defer f.ctypes.demote(global_ctypes);
.mod = module,
}); var ctypes_buf = f.ctypes_buf.toManaged(gpa);
if (!gop.found_existing) { defer f.ctypes_buf = ctypes_buf.moveToUnmanaged();
try f.typedef_buf.appendSlice(gpa, new.value_ptr.rendered); const writer = ctypes_buf.writer();
const slice = decl_ctypes.set.map.entries.slice();
for (slice.items(.key), 0..) |decl_cty, decl_i| {
const Context = struct {
arena: Allocator,
ctypes_map: []codegen.CType.Index,
cached_hash: codegen.CType.Store.Set.Map.Hash,
idx: codegen.CType.Index,
pub fn hash(ctx: @This(), _: codegen.CType) codegen.CType.Store.Set.Map.Hash {
return ctx.cached_hash;
} }
pub fn eql(ctx: @This(), lhs: codegen.CType, rhs: codegen.CType, _: usize) bool {
return lhs.eqlContext(rhs, ctx);
}
pub fn eqlIndex(
ctx: @This(),
lhs_idx: codegen.CType.Index,
rhs_idx: codegen.CType.Index,
) bool {
if (lhs_idx < codegen.CType.Tag.no_payload_count or
rhs_idx < codegen.CType.Tag.no_payload_count) return lhs_idx == rhs_idx;
const lhs_i = lhs_idx - codegen.CType.Tag.no_payload_count;
if (lhs_i >= ctx.ctypes_map.len) return false;
return ctx.ctypes_map[lhs_i] == rhs_idx;
}
pub fn copyIndex(ctx: @This(), idx: codegen.CType.Index) codegen.CType.Index {
if (idx < codegen.CType.Tag.no_payload_count) return idx;
return ctx.ctypes_map[idx - codegen.CType.Tag.no_payload_count];
}
};
const decl_idx = @intCast(codegen.CType.Index, codegen.CType.Tag.no_payload_count + decl_i);
const ctx = Context{
.arena = global_ctypes.arena.allocator(),
.ctypes_map = f.ctypes_map.items,
.cached_hash = decl_ctypes.indexToHash(decl_idx),
.idx = decl_idx,
};
const gop = try global_ctypes.set.map.getOrPutContextAdapted(gpa, decl_cty, ctx, .{
.store = &global_ctypes.set,
});
const global_idx =
@intCast(codegen.CType.Index, codegen.CType.Tag.no_payload_count + gop.index);
f.ctypes_map.appendAssumeCapacity(global_idx);
if (!gop.found_existing) {
errdefer _ = global_ctypes.set.map.pop();
gop.key_ptr.* = try decl_cty.copyContext(ctx);
}
if (std.debug.runtime_safety) {
const global_cty = &global_ctypes.set.map.entries.items(.key)[gop.index];
assert(global_cty == gop.key_ptr);
assert(decl_cty.eqlContext(global_cty.*, ctx));
assert(decl_cty.hash(decl_ctypes.set) == global_cty.hash(global_ctypes.set));
}
try codegen.genTypeDecl(
mod,
writer,
global_ctypes.set,
global_idx,
decl_index,
decl_ctypes.set,
decl_idx,
gop.found_existing,
);
} }
} }
fn flushErrDecls(self: *C, f: *Flush) FlushDeclError!void { fn flushErrDecls(self: *C, db: *DeclBlock) FlushDeclError!void {
const module = self.base.options.module.?; const gpa = self.base.allocator;
const fwd_decl = &f.err_decls.fwd_decl; const fwd_decl = &db.fwd_decl;
const typedefs = &f.err_decls.typedefs; const ctypes = &db.ctypes;
const code = &f.err_decls.code; const code = &db.code;
var object = codegen.Object{ var object = codegen.Object{
.dg = .{ .dg = .{
.gpa = module.gpa, .gpa = gpa,
.module = module, .module = self.base.options.module.?,
.error_msg = null, .error_msg = null,
.decl_index = undefined, .decl_index = .none,
.decl = undefined, .decl = null,
.fwd_decl = fwd_decl.toManaged(module.gpa), .fwd_decl = fwd_decl.toManaged(gpa),
.typedefs = typedefs.promoteContext(module.gpa, .{ .mod = module }), .ctypes = ctypes.*,
.typedefs_arena = self.arena.allocator(),
}, },
.code = code.toManaged(module.gpa), .code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code .indent_writer = undefined, // set later so we can get a pointer to object.code
}; };
object.indent_writer = .{ .underlying_writer = object.code.writer() }; object.indent_writer = .{ .underlying_writer = object.code.writer() };
defer { defer {
object.code.deinit(); object.code.deinit();
for (object.dg.typedefs.values()) |typedef| { object.dg.ctypes.deinit(gpa);
module.gpa.free(typedef.rendered);
}
object.dg.typedefs.deinit();
object.dg.fwd_decl.deinit(); object.dg.fwd_decl.deinit();
} }
@ -416,14 +495,58 @@ fn flushErrDecls(self: *C, f: *Flush) FlushDeclError!void {
}; };
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged(); fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
typedefs.* = object.dg.typedefs.unmanaged; ctypes.* = object.dg.ctypes.move();
object.dg.typedefs.unmanaged = .{};
code.* = object.code.moveToUnmanaged(); code.* = object.code.moveToUnmanaged();
}
try self.flushTypedefs(f, typedefs.*); fn flushLazyFn(self: *C, db: *DeclBlock, lazy_fn: codegen.LazyFnMap.Entry) FlushDeclError!void {
try f.all_buffers.ensureUnusedCapacity(self.base.allocator, 1); const gpa = self.base.allocator;
f.appendBufAssumeCapacity(fwd_decl.items);
f.appendBufAssumeCapacity(code.items); const fwd_decl = &db.fwd_decl;
const ctypes = &db.ctypes;
const code = &db.code;
var object = codegen.Object{
.dg = .{
.gpa = gpa,
.module = self.base.options.module.?,
.error_msg = null,
.decl_index = .none,
.decl = null,
.fwd_decl = fwd_decl.toManaged(gpa),
.ctypes = ctypes.*,
},
.code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
};
object.indent_writer = .{ .underlying_writer = object.code.writer() };
defer {
object.code.deinit();
object.dg.ctypes.deinit(gpa);
object.dg.fwd_decl.deinit();
}
codegen.genLazyFn(&object, lazy_fn) catch |err| switch (err) {
error.AnalysisFail => unreachable,
else => |e| return e,
};
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
ctypes.* = object.dg.ctypes.move();
code.* = object.code.moveToUnmanaged();
}
fn flushLazyFns(self: *C, f: *Flush, lazy_fns: codegen.LazyFnMap) FlushDeclError!void {
const gpa = self.base.allocator;
try f.lazy_fns.ensureUnusedCapacity(gpa, @intCast(Flush.LazyFns.Size, lazy_fns.count()));
var it = lazy_fns.iterator();
while (it.next()) |entry| {
const gop = f.lazy_fns.getOrPutAssumeCapacity(entry.key_ptr.*);
if (gop.found_existing) continue;
gop.value_ptr.* = {};
try self.flushLazyFn(&f.lazy_db, entry);
}
} }
/// Assumes `decl` was in the `remaining_decls` set, and has already been removed. /// Assumes `decl` was in the `remaining_decls` set, and has already been removed.
@ -433,8 +556,8 @@ fn flushDecl(
decl_index: Module.Decl.Index, decl_index: Module.Decl.Index,
export_names: std.StringHashMapUnmanaged(void), export_names: std.StringHashMapUnmanaged(void),
) FlushDeclError!void { ) FlushDeclError!void {
const module = self.base.options.module.?; const gpa = self.base.allocator;
const decl = module.declPtr(decl_index); const decl = self.base.options.module.?.declPtr(decl_index);
// Before flushing any particular Decl we must ensure its // Before flushing any particular Decl we must ensure its
// dependencies are already flushed, so that the order in the .c // dependencies are already flushed, so that the order in the .c
// file comes out correctly. // file comes out correctly.
@ -445,10 +568,9 @@ fn flushDecl(
} }
const decl_block = self.decl_table.getPtr(decl_index).?; const decl_block = self.decl_table.getPtr(decl_index).?;
const gpa = self.base.allocator;
try self.flushTypedefs(f, decl_block.typedefs); try self.flushLazyFns(f, decl_block.lazy_fns);
try f.all_buffers.ensureUnusedCapacity(gpa, 2); try f.all_buffers.ensureUnusedCapacity(gpa, 1);
if (!(decl.isExtern() and export_names.contains(mem.span(decl.name)))) if (!(decl.isExtern() and export_names.contains(mem.span(decl.name))))
f.appendBufAssumeCapacity(decl_block.fwd_decl.items); f.appendBufAssumeCapacity(decl_block.fwd_decl.items);
} }

2486
stage1/zig.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -551,7 +551,11 @@ test "align(N) on functions" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO this is not supported on MSVC
// This is not supported on MSVC
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) {
return error.SkipZigTest;
}
// function alignment is a compile error on wasm32/wasm64 // function alignment is a compile error on wasm32/wasm64
if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest; if (native_arch == .wasm32 or native_arch == .wasm64) return error.SkipZigTest;

View File

@ -7,6 +7,7 @@ const is_x86_64_linux = builtin.cpu.arch == .x86_64 and builtin.os.tag == .linux
comptime { comptime {
if (builtin.zig_backend != .stage2_arm and if (builtin.zig_backend != .stage2_arm and
builtin.zig_backend != .stage2_aarch64 and builtin.zig_backend != .stage2_aarch64 and
!(builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) and // MSVC doesn't support inline assembly
is_x86_64_linux) is_x86_64_linux)
{ {
asm ( asm (
@ -23,7 +24,8 @@ test "module level assembly" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
if (is_x86_64_linux) { if (is_x86_64_linux) {
try expect(this_is_my_alias() == 1234); try expect(this_is_my_alias() == 1234);
@ -36,7 +38,8 @@ test "output constraint modifiers" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
// This is only testing compilation. // This is only testing compilation.
var a: u32 = 3; var a: u32 = 3;
@ -58,7 +61,8 @@ test "alternative constraints" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
// Make sure we allow commas as a separator for alternative constraints. // Make sure we allow commas as a separator for alternative constraints.
var a: u32 = 3; var a: u32 = 3;
@ -75,7 +79,8 @@ test "sized integer/float in asm input" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
asm volatile ("" asm volatile (""
: :
@ -125,7 +130,8 @@ test "struct/array/union types as input values" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
asm volatile ("" asm volatile (""
: :
@ -151,6 +157,8 @@ test "asm modifiers (AArch64)" {
if (builtin.target.cpu.arch != .aarch64) return error.SkipZigTest; if (builtin.target.cpu.arch != .aarch64) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c and builtin.os.tag == .windows) return error.SkipZigTest; // MSVC doesn't support inline assembly
var x: u32 = 15; var x: u32 = 15;
const double = asm ("add %[ret:w], %[in:w], %[in:w]" const double = asm ("add %[ret:w], %[in:w], %[in:w]"
: [ret] "=r" (-> u32), : [ret] "=r" (-> u32),

View File

@ -13,7 +13,6 @@ test "int comparison elision" {
// TODO: support int types > 128 bits wide in other backends // TODO: support int types > 128 bits wide in other backends
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@ -7,7 +7,6 @@ test "strlit to vector" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
const strlit = "0123456789abcdef0123456789ABCDEF"; const strlit = "0123456789abcdef0123456789ABCDEF";
const vec_from_strlit: @Vector(32, u8) = strlit.*; const vec_from_strlit: @Vector(32, u8) = strlit.*;

View File

@ -1463,7 +1463,6 @@ test "vector integer addition" {
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct { const S = struct {

View File

@ -1330,7 +1330,6 @@ test "struct field init value is size of the struct" {
} }
test "under-aligned struct field" { test "under-aligned struct field" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO

View File

@ -75,7 +75,6 @@ test "vector int operators" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct { const S = struct {
@ -178,7 +177,6 @@ test "tuple to vector" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) { if (builtin.zig_backend == .stage2_llvm and builtin.cpu.arch == .aarch64) {
@ -943,7 +941,6 @@ test "multiplication-assignment operator with an array operand" {
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
const S = struct { const S = struct {
@ -1247,7 +1244,6 @@ test "array operands to shuffle are coerced to vectors" {
test "load packed vector element" { test "load packed vector element" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
@ -1260,7 +1256,6 @@ test "load packed vector element" {
test "store packed vector element" { test "store packed vector element" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO

View File

@ -959,7 +959,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ _ = a; \\ _ = a;
\\} \\}
, ,
\\zig_extern void start(zig_u8 const a0); \\zig_extern void start(uint8_t const a0);
\\ \\
); );
ctx.h("header with multiple param function", linux_x64, ctx.h("header with multiple param function", linux_x64,
@ -967,19 +967,19 @@ pub fn addCases(ctx: *TestContext) !void {
\\ _ = a; _ = b; _ = c; \\ _ = a; _ = b; _ = c;
\\} \\}
, ,
\\zig_extern void start(zig_u8 const a0, zig_u8 const a1, zig_u8 const a2); \\zig_extern void start(uint8_t const a0, uint8_t const a1, uint8_t const a2);
\\ \\
); );
ctx.h("header with u32 param function", linux_x64, ctx.h("header with u32 param function", linux_x64,
\\export fn start(a: u32) void{ _ = a; } \\export fn start(a: u32) void{ _ = a; }
, ,
\\zig_extern void start(zig_u32 const a0); \\zig_extern void start(uint32_t const a0);
\\ \\
); );
ctx.h("header with usize param function", linux_x64, ctx.h("header with usize param function", linux_x64,
\\export fn start(a: usize) void{ _ = a; } \\export fn start(a: usize) void{ _ = a; }
, ,
\\zig_extern void start(zig_usize const a0); \\zig_extern void start(uintptr_t const a0);
\\ \\
); );
ctx.h("header with bool param function", linux_x64, ctx.h("header with bool param function", linux_x64,
@ -993,7 +993,7 @@ pub fn addCases(ctx: *TestContext) !void {
\\ unreachable; \\ unreachable;
\\} \\}
, ,
\\zig_extern zig_noreturn start(void); \\zig_extern zig_noreturn void start(void);
\\ \\
); );
ctx.h("header with multiple functions", linux_x64, ctx.h("header with multiple functions", linux_x64,
@ -1009,7 +1009,7 @@ pub fn addCases(ctx: *TestContext) !void {
ctx.h("header with multiple includes", linux_x64, ctx.h("header with multiple includes", linux_x64,
\\export fn start(a: u32, b: usize) void{ _ = a; _ = b; } \\export fn start(a: u32, b: usize) void{ _ = a; _ = b; }
, ,
\\zig_extern void start(zig_u32 const a0, zig_usize const a1); \\zig_extern void start(uint32_t const a0, uintptr_t const a1);
\\ \\
); );
} }