cbe: reapply writer changes

This commit is contained in:
Andrew Kelley 2025-07-02 17:18:28 -07:00
parent 6963a1c7b9
commit 494819be91
2 changed files with 657 additions and 616 deletions

File diff suppressed because it is too large Load Diff

View File

@ -25,34 +25,34 @@ base: link.File,
/// This linker backend does not try to incrementally link output C source code.
/// Instead, it tracks all declarations in this table, and iterates over it
/// in the flush function, stitching pre-rendered pieces of C code together.
navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvBlock) = .empty,
navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, AvBlock),
/// All the string bytes of rendered C code, all squished into one array.
/// While in progress, a separate buffer is used, and then when finished, the
/// buffer is copied into this one.
string_bytes: std.ArrayListUnmanaged(u8) = .empty,
string_bytes: std.ArrayListUnmanaged(u8),
/// Tracks all the anonymous decls that are used by all the decls so they can
/// be rendered during flush().
uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, AvBlock) = .empty,
uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, AvBlock),
/// Sparse set of uavs that are overaligned. Underaligned anon decls are
/// lowered the same as ABI-aligned anon decls. The keys here are a subset of
/// the keys of `uavs`.
aligned_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment) = .empty,
aligned_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, Alignment),
exported_navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, ExportedBlock) = .empty,
exported_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, ExportedBlock) = .empty,
exported_navs: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, ExportedBlock),
exported_uavs: std.AutoArrayHashMapUnmanaged(InternPool.Index, ExportedBlock),
/// Optimization, `updateDecl` reuses this buffer rather than creating a new
/// one with every call.
fwd_decl_buf: std.ArrayListUnmanaged(u8) = .empty,
fwd_decl_buf: []u8,
/// Optimization, `updateDecl` reuses this buffer rather than creating a new
/// one with every call.
code_buf: std.ArrayListUnmanaged(u8) = .empty,
code_header_buf: []u8,
/// Optimization, `updateDecl` reuses this buffer rather than creating a new
/// one with every call.
code_buf: []u8,
/// Optimization, `flush` reuses this buffer rather than creating a new
/// one with every call.
lazy_fwd_decl_buf: std.ArrayListUnmanaged(u8) = .empty,
/// Optimization, `flush` reuses this buffer rather than creating a new
/// one with every call.
lazy_code_buf: std.ArrayListUnmanaged(u8) = .empty,
scratch_buf: []u32,
/// A reference into `string_bytes`.
const String = extern struct {
@ -67,11 +67,11 @@ const String = extern struct {
/// Per-declaration data.
pub const AvBlock = struct {
code: String = String.empty,
fwd_decl: String = String.empty,
fwd_decl: String = .empty,
code: String = .empty,
/// Each `Decl` stores a set of used `CType`s. In `flush()`, we iterate
/// over each `Decl` and generate the definition for each used `CType` once.
ctype_pool: codegen.CType.Pool = codegen.CType.Pool.empty,
ctype_pool: codegen.CType.Pool = .empty,
/// May contain string references to ctype_pool
lazy_fns: codegen.LazyFnMap = .{},
@ -84,20 +84,21 @@ pub const AvBlock = struct {
/// Per-exported-symbol data.
pub const ExportedBlock = struct {
fwd_decl: String = String.empty,
fwd_decl: String = .empty,
};
pub fn getString(this: C, s: String) []const u8 {
return this.string_bytes.items[s.start..][0..s.len];
}
pub fn addString(this: *C, s: []const u8) Allocator.Error!String {
pub fn addString(this: *C, writers: []const *std.io.Writer.Allocating) Allocator.Error!String {
const comp = this.base.comp;
const gpa = comp.gpa;
try this.string_bytes.appendSlice(gpa, s);
const start = this.string_bytes.items.len;
for (writers) |writer| try this.string_bytes.appendSlice(gpa, writer.getWritten());
return .{
.start = @intCast(this.string_bytes.items.len - s.len),
.len = @intCast(s.len),
.start = @intCast(start),
.len = @intCast(this.string_bytes.items.len - start),
};
}
@ -147,6 +148,16 @@ pub fn createEmpty(
.file = file,
.build_id = options.build_id,
},
.navs = .empty,
.string_bytes = .empty,
.uavs = .empty,
.aligned_uavs = .empty,
.exported_navs = .empty,
.exported_uavs = .empty,
.fwd_decl_buf = &.{},
.code_header_buf = &.{},
.code_buf = &.{},
.scratch_buf = &.{},
};
return c_file;
@ -170,10 +181,10 @@ pub fn deinit(self: *C) void {
self.exported_uavs.deinit(gpa);
self.string_bytes.deinit(gpa);
self.fwd_decl_buf.deinit(gpa);
self.code_buf.deinit(gpa);
self.lazy_fwd_decl_buf.deinit(gpa);
self.lazy_code_buf.deinit(gpa);
gpa.free(self.fwd_decl_buf);
gpa.free(self.code_header_buf);
gpa.free(self.code_buf);
gpa.free(self.scratch_buf);
}
pub fn updateFunc(
@ -196,18 +207,14 @@ pub fn updateFunc(
};
gop.value_ptr.code = try self.addString(mir.c.code);
gop.value_ptr.fwd_decl = try self.addString(mir.c.fwd_decl);
gop.value_ptr.code_header = try self.addString(mir.c.code_header);
try self.addUavsFromCodegen(&mir.c.uavs);
}
fn updateUav(self: *C, pt: Zcu.PerThread, i: usize) !void {
fn updateUav(self: *C, pt: Zcu.PerThread, i: usize) link.File.FlushError!void {
const gpa = self.base.comp.gpa;
const uav = self.uavs.keys()[i];
const fwd_decl = &self.fwd_decl_buf;
const code = &self.code_buf;
fwd_decl.clearRetainingCapacity();
code.clearRetainingCapacity();
var object: codegen.Object = .{
.dg = .{
.gpa = gpa,
@ -217,21 +224,24 @@ fn updateUav(self: *C, pt: Zcu.PerThread, i: usize) !void {
.pass = .{ .uav = uav },
.is_naked_fn = false,
.expected_block = null,
.fwd_decl = fwd_decl.toManaged(gpa),
.ctype_pool = codegen.CType.Pool.empty,
.scratch = .{},
.fwd_decl = undefined,
.ctype_pool = .empty,
.scratch = .initBuffer(self.scratch_buf),
.uavs = .empty,
},
.code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
.code_header = undefined,
.code = undefined,
.indent_counter = 0,
};
object.indent_writer = .{ .underlying_writer = object.code.writer() };
object.dg.fwd_decl.initOwnedSlice(gpa, self.fwd_decl_buf);
object.code.initOwnedSlice(gpa, self.code_buf);
defer {
object.dg.uavs.deinit(gpa);
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
object.dg.ctype_pool.deinit(object.dg.gpa);
object.dg.scratch.deinit(gpa);
code.* = object.code.moveToUnmanaged();
self.fwd_decl_buf = object.dg.fwd_decl.toArrayList().allocatedSlice();
self.code_buf = object.code.toArrayList().allocatedSlice();
self.scratch_buf = object.dg.scratch.allocatedSlice();
}
try object.dg.ctype_pool.init(gpa);
@ -243,15 +253,15 @@ fn updateUav(self: *C, pt: Zcu.PerThread, i: usize) !void {
//try zcu.failed_decls.put(gpa, decl_index, object.dg.error_msg.?);
//return;
},
else => |e| return e,
error.WriteFailed, error.OutOfMemory => return error.OutOfMemory,
};
try self.addUavsFromCodegen(&object.dg.uavs);
object.dg.ctype_pool.freeUnusedCapacity(gpa);
self.uavs.values()[i] = .{
.code = try self.addString(object.code.items),
.fwd_decl = try self.addString(object.dg.fwd_decl.items),
.fwd_decl = try self.addString(&.{&object.dg.fwd_decl}),
.code = try self.addString(&.{&object.code}),
.ctype_pool = object.dg.ctype_pool.move(),
};
}
@ -277,12 +287,8 @@ pub fn updateNav(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) l
errdefer _ = self.navs.pop();
if (!gop.found_existing) gop.value_ptr.* = .{};
const ctype_pool = &gop.value_ptr.ctype_pool;
const fwd_decl = &self.fwd_decl_buf;
const code = &self.code_buf;
try ctype_pool.init(gpa);
ctype_pool.clearRetainingCapacity();
fwd_decl.clearRetainingCapacity();
code.clearRetainingCapacity();
var object: codegen.Object = .{
.dg = .{
@ -293,22 +299,25 @@ pub fn updateNav(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) l
.pass = .{ .nav = nav_index },
.is_naked_fn = false,
.expected_block = null,
.fwd_decl = fwd_decl.toManaged(gpa),
.fwd_decl = undefined,
.ctype_pool = ctype_pool.*,
.scratch = .{},
.scratch = .initBuffer(self.scratch_buf),
.uavs = .empty,
},
.code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
.code_header = undefined,
.code = undefined,
.indent_counter = 0,
};
object.indent_writer = .{ .underlying_writer = object.code.writer() };
object.dg.fwd_decl.initOwnedSlice(gpa, self.fwd_decl_buf);
object.code.initOwnedSlice(gpa, self.code_buf);
defer {
object.dg.uavs.deinit(gpa);
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
ctype_pool.* = object.dg.ctype_pool.move();
ctype_pool.freeUnusedCapacity(gpa);
object.dg.scratch.deinit(gpa);
code.* = object.code.moveToUnmanaged();
self.fwd_decl_buf = object.dg.fwd_decl.toArrayList().allocatedSlice();
self.code_buf = object.code.toArrayList().allocatedSlice();
self.scratch_buf = object.dg.scratch.allocatedSlice();
}
codegen.genDecl(&object) catch |err| switch (err) {
@ -316,10 +325,10 @@ pub fn updateNav(self: *C, pt: Zcu.PerThread, nav_index: InternPool.Nav.Index) l
error.CodegenFail => return,
error.OutOfMemory => |e| return e,
},
else => |e| return e,
error.WriteFailed, error.OutOfMemory => return error.OutOfMemory,
};
gop.value_ptr.code = try self.addString(object.code.items);
gop.value_ptr.fwd_decl = try self.addString(object.dg.fwd_decl.items);
gop.value_ptr.fwd_decl = try self.addString(&.{&object.dg.fwd_decl});
gop.value_ptr.code = try self.addString(&.{&object.code});
try self.addUavsFromCodegen(&object.dg.uavs);
}
@ -331,19 +340,14 @@ pub fn updateLineNumber(self: *C, pt: Zcu.PerThread, ti_id: InternPool.TrackedIn
_ = ti_id;
}
fn abiDefines(self: *C, target: *const std.Target) !std.ArrayList(u8) {
const gpa = self.base.comp.gpa;
var defines = std.ArrayList(u8).init(gpa);
errdefer defines.deinit();
const writer = defines.writer();
fn abiDefines(w: *std.io.Writer, target: std.Target) !void {
switch (target.abi) {
.msvc, .itanium => try writer.writeAll("#define ZIG_TARGET_ABI_MSVC\n"),
.msvc, .itanium => try w.writeAll("#define ZIG_TARGET_ABI_MSVC\n"),
else => {},
}
try writer.print("#define ZIG_TARGET_MAX_INT_ALIGNMENT {d}\n", .{
try w.print("#define ZIG_TARGET_MAX_INT_ALIGNMENT {d}\n", .{
target.cMaxIntAlignment(),
});
return defines;
}
pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.Progress.Node) link.File.FlushError!void {
@ -374,37 +378,47 @@ pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.P
// emit-h is in `flushEmitH` below.
var f: Flush = .{
.ctype_pool = codegen.CType.Pool.empty,
.lazy_ctype_pool = codegen.CType.Pool.empty,
.ctype_pool = .empty,
.ctype_global_from_decl_map = .empty,
.ctypes = .empty,
.lazy_ctype_pool = .empty,
.lazy_fns = .empty,
.lazy_fwd_decl = .empty,
.lazy_code = .empty,
.all_buffers = .empty,
.file_size = 0,
};
defer f.deinit(gpa);
const abi_defines = try self.abiDefines(zcu.getTarget());
defer abi_defines.deinit();
var abi_defines_aw: std.io.Writer.Allocating = .init(gpa);
defer abi_defines_aw.deinit();
abiDefines(&abi_defines_aw.writer, zcu.getTarget()) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
// Covers defines, zig.h, ctypes, asm, lazy fwd.
try f.all_buffers.ensureUnusedCapacity(gpa, 5);
f.appendBufAssumeCapacity(abi_defines.items);
f.appendBufAssumeCapacity(abi_defines_aw.getWritten());
f.appendBufAssumeCapacity(zig_h);
const ctypes_index = f.all_buffers.items.len;
f.all_buffers.items.len += 1;
{
var asm_buf = f.asm_buf.toManaged(gpa);
defer f.asm_buf = asm_buf.moveToUnmanaged();
try codegen.genGlobalAsm(zcu, asm_buf.writer());
f.appendBufAssumeCapacity(asm_buf.items);
}
var asm_aw: std.io.Writer.Allocating = .init(gpa);
defer asm_aw.deinit();
codegen.genGlobalAsm(zcu, &asm_aw.writer) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
f.appendBufAssumeCapacity(asm_aw.getWritten());
const lazy_index = f.all_buffers.items.len;
f.all_buffers.items.len += 1;
self.lazy_fwd_decl_buf.clearRetainingCapacity();
self.lazy_code_buf.clearRetainingCapacity();
try f.lazy_ctype_pool.init(gpa);
try self.flushErrDecls(pt, &f.lazy_ctype_pool);
try self.flushErrDecls(pt, &f);
// Unlike other backends, the .c code we are emitting has order-dependent decls.
// `CType`s, forward decls, and non-functions first.
@ -462,22 +476,15 @@ pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.P
}
}
f.all_buffers.items[ctypes_index] = .{
.base = if (f.ctypes_buf.items.len > 0) f.ctypes_buf.items.ptr else "",
.len = f.ctypes_buf.items.len,
};
f.file_size += f.ctypes_buf.items.len;
f.all_buffers.items[ctypes_index] = f.ctypes.items;
f.file_size += f.ctypes.items.len;
const lazy_fwd_decl_len = self.lazy_fwd_decl_buf.items.len;
f.all_buffers.items[lazy_index] = .{
.base = if (lazy_fwd_decl_len > 0) self.lazy_fwd_decl_buf.items.ptr else "",
.len = lazy_fwd_decl_len,
};
f.file_size += lazy_fwd_decl_len;
f.all_buffers.items[lazy_index] = f.lazy_fwd_decl.items;
f.file_size += f.lazy_fwd_decl.items.len;
// Now the code.
try f.all_buffers.ensureUnusedCapacity(gpa, 1 + (self.uavs.count() + self.navs.count()) * 2);
f.appendBufAssumeCapacity(self.lazy_code_buf.items);
f.appendBufAssumeCapacity(f.lazy_code.items);
for (self.uavs.keys(), self.uavs.values()) |uav, av_block| f.appendCodeAssumeCapacity(
if (self.exported_uavs.contains(uav)) .default else switch (ip.indexToKey(uav)) {
.@"extern" => .zig_extern,
@ -493,31 +500,35 @@ pub fn flush(self: *C, arena: Allocator, tid: Zcu.PerThread.Id, prog_node: std.P
const file = self.base.file.?;
file.setEndPos(f.file_size) catch |err| return diags.fail("failed to allocate file: {s}", .{@errorName(err)});
file.pwritevAll(f.all_buffers.items, 0) catch |err| return diags.fail("failed to write to '{f'}': {s}", .{
self.base.emit, @errorName(err),
});
var fw = file.writer(&.{});
var w = &fw.interface;
w.writeVecAll(f.all_buffers.items) catch |err| switch (err) {
error.WriteFailed => return diags.fail("failed to write to '{f'}': {s}", .{
self.base.emit, @errorName(fw.err.?),
}),
};
}
const Flush = struct {
ctype_pool: codegen.CType.Pool,
ctype_global_from_decl_map: std.ArrayListUnmanaged(codegen.CType) = .empty,
ctypes_buf: std.ArrayListUnmanaged(u8) = .empty,
ctype_global_from_decl_map: std.ArrayListUnmanaged(codegen.CType),
ctypes: std.ArrayListUnmanaged(u8),
lazy_ctype_pool: codegen.CType.Pool,
lazy_fns: LazyFns = .{},
asm_buf: std.ArrayListUnmanaged(u8) = .empty,
lazy_fns: LazyFns,
lazy_fwd_decl: std.ArrayListUnmanaged(u8),
lazy_code: std.ArrayListUnmanaged(u8),
/// We collect a list of buffers to write, and write them all at once with pwritev 😎
all_buffers: std.ArrayListUnmanaged(std.posix.iovec_const) = .empty,
all_buffers: std.ArrayListUnmanaged([]const u8),
/// Keeps track of the total bytes of `all_buffers`.
file_size: u64 = 0,
file_size: u64,
const LazyFns = std.AutoHashMapUnmanaged(codegen.LazyFnKey, void);
fn appendBufAssumeCapacity(f: *Flush, buf: []const u8) void {
if (buf.len == 0) return;
f.all_buffers.appendAssumeCapacity(.{ .base = buf.ptr, .len = buf.len });
f.all_buffers.appendAssumeCapacity(buf);
f.file_size += buf.len;
}
@ -532,14 +543,15 @@ const Flush = struct {
}
fn deinit(f: *Flush, gpa: Allocator) void {
f.all_buffers.deinit(gpa);
f.asm_buf.deinit(gpa);
f.lazy_fns.deinit(gpa);
f.lazy_ctype_pool.deinit(gpa);
f.ctypes_buf.deinit(gpa);
f.ctype_pool.deinit(gpa);
assert(f.ctype_global_from_decl_map.items.len == 0);
f.ctype_global_from_decl_map.deinit(gpa);
f.ctype_pool.deinit(gpa);
f.ctypes.deinit(gpa);
f.lazy_ctype_pool.deinit(gpa);
f.lazy_fns.deinit(gpa);
f.lazy_fwd_decl.deinit(gpa);
f.lazy_code.deinit(gpa);
f.all_buffers.deinit(gpa);
}
};
@ -562,9 +574,9 @@ fn flushCTypes(
try global_from_decl_map.ensureTotalCapacity(gpa, decl_ctype_pool.items.len);
defer global_from_decl_map.clearRetainingCapacity();
var ctypes_buf = f.ctypes_buf.toManaged(gpa);
defer f.ctypes_buf = ctypes_buf.moveToUnmanaged();
const writer = ctypes_buf.writer();
var ctypes_aw: std.io.Writer.Allocating = .fromArrayList(gpa, &f.ctypes);
const ctypes_bw = &ctypes_aw.writer;
defer f.ctypes = ctypes_aw.toArrayList();
for (0..decl_ctype_pool.items.len) |decl_ctype_pool_index| {
const PoolAdapter = struct {
@ -591,26 +603,25 @@ fn flushCTypes(
PoolAdapter{ .global_from_decl_map = global_from_decl_map.items },
);
global_from_decl_map.appendAssumeCapacity(global_ctype);
try codegen.genTypeDecl(
codegen.genTypeDecl(
zcu,
writer,
ctypes_bw,
global_ctype_pool,
global_ctype,
pass,
decl_ctype_pool,
decl_ctype,
found_existing,
);
) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
}
}
fn flushErrDecls(self: *C, pt: Zcu.PerThread, ctype_pool: *codegen.CType.Pool) FlushDeclError!void {
fn flushErrDecls(self: *C, pt: Zcu.PerThread, f: *Flush) FlushDeclError!void {
const gpa = self.base.comp.gpa;
const fwd_decl = &self.lazy_fwd_decl_buf;
const code = &self.lazy_code_buf;
var object = codegen.Object{
var object: codegen.Object = .{
.dg = .{
.gpa = gpa,
.pt = pt,
@ -619,27 +630,30 @@ fn flushErrDecls(self: *C, pt: Zcu.PerThread, ctype_pool: *codegen.CType.Pool) F
.pass = .flush,
.is_naked_fn = false,
.expected_block = null,
.fwd_decl = fwd_decl.toManaged(gpa),
.ctype_pool = ctype_pool.*,
.scratch = .{},
.fwd_decl = undefined,
.ctype_pool = f.lazy_ctype_pool,
.scratch = .initBuffer(self.scratch_buf),
.uavs = .empty,
},
.code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
.code_header = undefined,
.code = undefined,
.indent_counter = 0,
};
object.indent_writer = .{ .underlying_writer = object.code.writer() };
_ = object.dg.fwd_decl.fromArrayList(gpa, &f.lazy_fwd_decl);
_ = object.code.fromArrayList(gpa, &f.lazy_code);
defer {
object.dg.uavs.deinit(gpa);
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
ctype_pool.* = object.dg.ctype_pool.move();
ctype_pool.freeUnusedCapacity(gpa);
object.dg.scratch.deinit(gpa);
code.* = object.code.moveToUnmanaged();
f.lazy_ctype_pool = object.dg.ctype_pool.move();
f.lazy_ctype_pool.freeUnusedCapacity(gpa);
f.lazy_fwd_decl = object.dg.fwd_decl.toArrayList();
f.lazy_code = object.code.toArrayList();
self.scratch_buf = object.dg.scratch.allocatedSlice();
}
codegen.genErrDecls(&object) catch |err| switch (err) {
error.AnalysisFail => unreachable,
else => |e| return e,
error.WriteFailed, error.OutOfMemory => return error.OutOfMemory,
};
try self.addUavsFromCodegen(&object.dg.uavs);
@ -649,16 +663,13 @@ fn flushLazyFn(
self: *C,
pt: Zcu.PerThread,
mod: *Module,
ctype_pool: *codegen.CType.Pool,
f: *Flush,
lazy_ctype_pool: *const codegen.CType.Pool,
lazy_fn: codegen.LazyFnMap.Entry,
) FlushDeclError!void {
const gpa = self.base.comp.gpa;
const fwd_decl = &self.lazy_fwd_decl_buf;
const code = &self.lazy_code_buf;
var object = codegen.Object{
var object: codegen.Object = .{
.dg = .{
.gpa = gpa,
.pt = pt,
@ -667,29 +678,32 @@ fn flushLazyFn(
.pass = .flush,
.is_naked_fn = false,
.expected_block = null,
.fwd_decl = fwd_decl.toManaged(gpa),
.ctype_pool = ctype_pool.*,
.scratch = .{},
.fwd_decl = undefined,
.ctype_pool = f.lazy_ctype_pool,
.scratch = .initBuffer(self.scratch_buf),
.uavs = .empty,
},
.code = code.toManaged(gpa),
.indent_writer = undefined, // set later so we can get a pointer to object.code
.code_header = undefined,
.code = undefined,
.indent_counter = 0,
};
object.indent_writer = .{ .underlying_writer = object.code.writer() };
_ = object.dg.fwd_decl.fromArrayList(gpa, &f.lazy_fwd_decl);
_ = object.code.fromArrayList(gpa, &f.lazy_code);
defer {
// If this assert trips just handle the anon_decl_deps the same as
// `updateFunc()` does.
assert(object.dg.uavs.count() == 0);
fwd_decl.* = object.dg.fwd_decl.moveToUnmanaged();
ctype_pool.* = object.dg.ctype_pool.move();
ctype_pool.freeUnusedCapacity(gpa);
object.dg.scratch.deinit(gpa);
code.* = object.code.moveToUnmanaged();
f.lazy_ctype_pool = object.dg.ctype_pool.move();
f.lazy_ctype_pool.freeUnusedCapacity(gpa);
f.lazy_fwd_decl = object.dg.fwd_decl.toArrayList();
f.lazy_code = object.code.toArrayList();
self.scratch_buf = object.dg.scratch.allocatedSlice();
}
codegen.genLazyFn(&object, lazy_ctype_pool, lazy_fn) catch |err| switch (err) {
error.AnalysisFail => unreachable,
else => |e| return e,
error.WriteFailed, error.OutOfMemory => return error.OutOfMemory,
};
}
@ -709,7 +723,7 @@ fn flushLazyFns(
const gop = f.lazy_fns.getOrPutAssumeCapacity(entry.key_ptr.*);
if (gop.found_existing) continue;
gop.value_ptr.* = {};
try self.flushLazyFn(pt, mod, &f.lazy_ctype_pool, lazy_ctype_pool, entry);
try self.flushLazyFn(pt, mod, f, lazy_ctype_pool, entry);
}
}
@ -802,8 +816,6 @@ pub fn updateExports(
},
};
const ctype_pool = &decl_block.ctype_pool;
const fwd_decl = &self.fwd_decl_buf;
fwd_decl.clearRetainingCapacity();
var dg: codegen.DeclGen = .{
.gpa = gpa,
.pt = pt,
@ -812,20 +824,24 @@ pub fn updateExports(
.pass = pass,
.is_naked_fn = false,
.expected_block = null,
.fwd_decl = fwd_decl.toManaged(gpa),
.fwd_decl = undefined,
.ctype_pool = decl_block.ctype_pool,
.scratch = .{},
.scratch = .initBuffer(self.scratch_buf),
.uavs = .empty,
};
dg.fwd_decl.initOwnedSlice(gpa, self.fwd_decl_buf);
defer {
assert(dg.uavs.count() == 0);
fwd_decl.* = dg.fwd_decl.moveToUnmanaged();
ctype_pool.* = dg.ctype_pool.move();
ctype_pool.freeUnusedCapacity(gpa);
dg.scratch.deinit(gpa);
self.fwd_decl_buf = dg.fwd_decl.toArrayList().allocatedSlice();
self.scratch_buf = dg.scratch.allocatedSlice();
}
try codegen.genExports(&dg, exported, export_indices);
exported_block.* = .{ .fwd_decl = try self.addString(dg.fwd_decl.items) };
codegen.genExports(&dg, exported, export_indices) catch |err| switch (err) {
error.WriteFailed, error.OutOfMemory => return error.OutOfMemory,
};
exported_block.* = .{ .fwd_decl = try self.addString(&.{&dg.fwd_decl}) };
}
pub fn deleteExport(