update compiler source to new APIs

This commit is contained in:
Andrew Kelley 2025-07-01 10:52:53 -07:00
parent fac5fe57be
commit f71d97e4cb
18 changed files with 383 additions and 379 deletions

View File

@ -1921,9 +1921,10 @@ pub fn reader(file: File, buffer: []u8) Reader {
/// Positional is more threadsafe, since the global seek position is not
/// affected, but when such syscalls are not available, preemptively choosing
/// `Reader.Mode.streaming` will skip a failed syscall.
pub fn readerStreaming(file: File) Reader {
pub fn readerStreaming(file: File, buffer: []u8) Reader {
return .{
.file = file,
.interface = Reader.initInterface(buffer),
.mode = .streaming,
.seek_err = error.Unseekable,
};

View File

@ -126,8 +126,8 @@ pub fn fixed(buffer: []u8) Writer {
};
}
pub fn hashed(w: *Writer, hasher: anytype) Hashed(@TypeOf(hasher)) {
return .{ .out = w, .hasher = hasher };
pub fn hashed(w: *Writer, hasher: anytype, buffer: []u8) Hashed(@TypeOf(hasher)) {
return .initHasher(w, hasher, buffer);
}
pub const failing: Writer = .{
@ -1969,20 +1969,25 @@ pub fn Hashed(comptime Hasher: type) type {
return struct {
out: *Writer,
hasher: Hasher,
interface: Writer,
writer: Writer,
pub fn init(out: *Writer) @This() {
pub fn init(out: *Writer, buffer: []u8) @This() {
return .initHasher(out, .{}, buffer);
}
pub fn initHasher(out: *Writer, hasher: Hasher, buffer: []u8) @This() {
return .{
.out = out,
.hasher = .{},
.interface = .{
.hasher = hasher,
.writer = .{
.buffer = buffer,
.vtable = &.{@This().drain},
},
};
}
fn drain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
const this: *@This() = @alignCast(@fieldParentPtr("interface", w));
const this: *@This() = @alignCast(@fieldParentPtr("writer", w));
if (data.len == 0) {
const buf = w.buffered();
try this.out.writeAll(buf);

View File

@ -2689,7 +2689,7 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
const is_hit = man.hit() catch |err| switch (err) {
error.CacheCheckFailed => switch (man.diagnostic) {
.none => unreachable,
.manifest_create, .manifest_read, .manifest_lock, .manifest_seek => |e| return comp.setMiscFailure(
.manifest_create, .manifest_read, .manifest_lock => |e| return comp.setMiscFailure(
.check_whole_cache,
"failed to check cache: {s} {s}",
.{ @tagName(man.diagnostic), @errorName(e) },

View File

@ -1881,23 +1881,23 @@ pub const NullTerminatedString = enum(u32) {
const FormatData = struct {
string: NullTerminatedString,
ip: *const InternPool,
id: bool,
};
fn format(
data: FormatData,
comptime specifier: []const u8,
_: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
fn format(data: FormatData, writer: *std.io.Writer) std.io.Writer.Error!void {
const slice = data.string.toSlice(data.ip);
if (comptime std.mem.eql(u8, specifier, "")) {
if (!data.id) {
try writer.writeAll(slice);
} else if (comptime std.mem.eql(u8, specifier, "i")) {
} else {
try writer.print("{f}", .{std.zig.fmtIdP(slice)});
} else @compileError("invalid format string '" ++ specifier ++ "' for '" ++ @typeName(NullTerminatedString) ++ "'");
}
}
pub fn fmt(string: NullTerminatedString, ip: *const InternPool) std.fmt.Formatter(format) {
return .{ .data = .{ .string = string, .ip = ip } };
pub fn fmt(string: NullTerminatedString, ip: *const InternPool) std.fmt.Formatter(FormatData, format) {
return .{ .data = .{ .string = string, .ip = ip, .id = false } };
}
pub fn fmtId(string: NullTerminatedString, ip: *const InternPool) std.fmt.Formatter(FormatData, format) {
return .{ .data = .{ .string = string, .ip = ip, .id = true } };
}
const debug_state = InternPool.debug_state;
@ -9750,7 +9750,7 @@ fn finishFuncInstance(
const fn_namespace = fn_owner_nav.analysis.?.namespace;
// TODO: improve this name
const nav_name = try ip.getOrPutStringFmt(gpa, tid, "{}__anon_{d}", .{
const nav_name = try ip.getOrPutStringFmt(gpa, tid, "{f}__anon_{d}", .{
fn_owner_nav.name.fmt(ip), @intFromEnum(func_index),
}, .no_embedded_nulls);
const nav_index = try ip.createNav(gpa, tid, .{
@ -11259,8 +11259,9 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
}
fn dumpAllFallible(ip: *const InternPool) anyerror!void {
var bw = std.io.bufferedWriter(std.fs.File.stderr().deprecatedWriter());
const w = bw.writer();
var buffer: [4096]u8 = undefined;
const stderr_bw = std.debug.lockStderrWriter(&buffer);
defer std.debug.unlockStderrWriter();
for (ip.locals, 0..) |*local, tid| {
const items = local.shared.items.view();
for (
@ -11269,12 +11270,12 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
0..,
) |tag, data, index| {
const i = Index.Unwrapped.wrap(.{ .tid = @enumFromInt(tid), .index = @intCast(index) }, ip);
try w.print("${d} = {s}(", .{ i, @tagName(tag) });
try stderr_bw.print("${d} = {s}(", .{ i, @tagName(tag) });
switch (tag) {
.removed => {},
.simple_type => try w.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(@intFromEnum(i))))}),
.simple_value => try w.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(@intFromEnum(i))))}),
.simple_type => try stderr_bw.print("{s}", .{@tagName(@as(SimpleType, @enumFromInt(@intFromEnum(i))))}),
.simple_value => try stderr_bw.print("{s}", .{@tagName(@as(SimpleValue, @enumFromInt(@intFromEnum(i))))}),
.type_int_signed,
.type_int_unsigned,
@ -11347,17 +11348,16 @@ fn dumpAllFallible(ip: *const InternPool) anyerror!void {
.func_coerced,
.union_value,
.memoized_call,
=> try w.print("{d}", .{data}),
=> try stderr_bw.print("{d}", .{data}),
.opt_null,
.type_slice,
.only_possible_value,
=> try w.print("${d}", .{data}),
=> try stderr_bw.print("${d}", .{data}),
}
try w.writeAll(")\n");
try stderr_bw.writeAll(")\n");
}
}
try bw.flush();
}
pub fn dumpGenericInstances(ip: *const InternPool, allocator: Allocator) void {
@ -11369,9 +11369,6 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator)
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
var bw = std.io.bufferedWriter(std.fs.File.stderr().deprecatedWriter());
const w = bw.writer();
var instances: std.AutoArrayHashMapUnmanaged(Index, std.ArrayListUnmanaged(Index)) = .empty;
for (ip.locals, 0..) |*local, tid| {
const items = local.shared.items.view().slice();
@ -11394,6 +11391,10 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator)
}
}
var buffer: [4096]u8 = undefined;
const stderr_bw = std.debug.lockStderrWriter(&buffer);
defer std.debug.unlockStderrWriter();
const SortContext = struct {
values: []std.ArrayListUnmanaged(Index),
pub fn lessThan(ctx: @This(), a_index: usize, b_index: usize) bool {
@ -11405,23 +11406,21 @@ pub fn dumpGenericInstancesFallible(ip: *const InternPool, allocator: Allocator)
var it = instances.iterator();
while (it.next()) |entry| {
const generic_fn_owner_nav = ip.getNav(ip.funcDeclInfo(entry.key_ptr.*).owner_nav);
try w.print("{} ({}): \n", .{ generic_fn_owner_nav.name.fmt(ip), entry.value_ptr.items.len });
try stderr_bw.print("{f} ({}): \n", .{ generic_fn_owner_nav.name.fmt(ip), entry.value_ptr.items.len });
for (entry.value_ptr.items) |index| {
const unwrapped_index = index.unwrap(ip);
const func = ip.extraFuncInstance(unwrapped_index.tid, unwrapped_index.getExtra(ip), unwrapped_index.getData(ip));
const owner_nav = ip.getNav(func.owner_nav);
try w.print(" {}: (", .{owner_nav.name.fmt(ip)});
try stderr_bw.print(" {f}: (", .{owner_nav.name.fmt(ip)});
for (func.comptime_args.get(ip)) |arg| {
if (arg != .none) {
const key = ip.indexToKey(arg);
try w.print(" {} ", .{key});
try stderr_bw.print(" {} ", .{key});
}
}
try w.writeAll(")\n");
try stderr_bw.writeAll(")\n");
}
}
try bw.flush();
}
pub fn getNav(ip: *const InternPool, index: Nav.Index) Nav {

View File

@ -1320,7 +1320,7 @@ fn indexPackFirstPass(
index_entries: *std.AutoHashMapUnmanaged(Oid, IndexEntry),
pending_deltas: *std.ArrayListUnmanaged(IndexEntry),
) !Oid {
var pack_buffered_reader = std.io.bufferedReader(pack.reader());
var pack_buffered_reader = std.io.bufferedReader(pack.deprecatedReader());
var pack_counting_reader = std.io.countingReader(pack_buffered_reader.reader());
var pack_hashed_reader = std.compress.hashedReader(pack_counting_reader.reader(), Oid.Hasher.init(format));
const pack_reader = pack_hashed_reader.reader();
@ -1400,7 +1400,7 @@ fn indexPackHashDelta(
if (cache.get(base_offset)) |base_object| break base_object;
try pack.seekTo(base_offset);
base_header = try EntryHeader.read(format, pack.reader());
base_header = try EntryHeader.read(format, pack.deprecatedReader());
switch (base_header) {
.ofs_delta => |ofs_delta| {
try delta_offsets.append(allocator, base_offset);
@ -1411,7 +1411,7 @@ fn indexPackHashDelta(
base_offset = (index_entries.get(ref_delta.base_object) orelse return null).offset;
},
else => {
const base_data = try readObjectRaw(allocator, pack.reader(), base_header.uncompressedLength());
const base_data = try readObjectRaw(allocator, pack.deprecatedReader(), base_header.uncompressedLength());
errdefer allocator.free(base_data);
const base_object: Object = .{ .type = base_header.objectType(), .data = base_data };
try cache.put(allocator, base_offset, base_object);
@ -1448,8 +1448,8 @@ fn resolveDeltaChain(
const delta_offset = delta_offsets[i];
try pack.seekTo(delta_offset);
const delta_header = try EntryHeader.read(format, pack.reader());
const delta_data = try readObjectRaw(allocator, pack.reader(), delta_header.uncompressedLength());
const delta_header = try EntryHeader.read(format, pack.deprecatedReader());
const delta_data = try readObjectRaw(allocator, pack.deprecatedReader(), delta_header.uncompressedLength());
defer allocator.free(delta_data);
var delta_stream = std.io.fixedBufferStream(delta_data);
const delta_reader = delta_stream.reader();

View File

@ -4307,15 +4307,19 @@ pub fn navFileScope(zcu: *Zcu, nav: InternPool.Nav.Index) *File {
return zcu.fileByIndex(zcu.navFileScopeIndex(nav));
}
pub fn fmtAnalUnit(zcu: *Zcu, unit: AnalUnit) std.fmt.Formatter(formatAnalUnit) {
pub fn fmtAnalUnit(zcu: *Zcu, unit: AnalUnit) std.fmt.Formatter(FormatAnalUnit, formatAnalUnit) {
return .{ .data = .{ .unit = unit, .zcu = zcu } };
}
pub fn fmtDependee(zcu: *Zcu, d: InternPool.Dependee) std.fmt.Formatter(formatDependee) {
pub fn fmtDependee(zcu: *Zcu, d: InternPool.Dependee) std.fmt.Formatter(FormatDependee, formatDependee) {
return .{ .data = .{ .dependee = d, .zcu = zcu } };
}
fn formatAnalUnit(data: struct { unit: AnalUnit, zcu: *Zcu }, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = .{ fmt, options };
const FormatAnalUnit = struct {
unit: AnalUnit,
zcu: *Zcu,
};
fn formatAnalUnit(data: FormatAnalUnit, writer: *std.io.Writer) std.io.Writer.Error!void {
const zcu = data.zcu;
const ip = &zcu.intern_pool;
switch (data.unit.unwrap()) {
@ -4338,8 +4342,10 @@ fn formatAnalUnit(data: struct { unit: AnalUnit, zcu: *Zcu }, comptime fmt: []co
.memoized_state => return writer.writeAll("memoized_state"),
}
}
fn formatDependee(data: struct { dependee: InternPool.Dependee, zcu: *Zcu }, comptime fmt: []const u8, options: std.fmt.FormatOptions, writer: anytype) !void {
_ = .{ fmt, options };
const FormatDependee = struct { dependee: InternPool.Dependee, zcu: *Zcu };
fn formatDependee(data: FormatDependee, writer: *std.io.Writer) std.io.Writer.Error!void {
const zcu = data.zcu;
const ip = &zcu.intern_pool;
switch (data.dependee) {

View File

@ -80,18 +80,19 @@ fn dumpStatusReport() !void {
var fba = std.heap.FixedBufferAllocator.init(&crash_heap);
const allocator = fba.allocator();
const stderr = std.fs.File.stderr().deprecatedWriter();
var stderr_fw = std.fs.File.stderr().writer(&.{});
const stderr = &stderr_fw.interface;
const block: *Sema.Block = anal.block;
const zcu = anal.sema.pt.zcu;
const file, const src_base_node = Zcu.LazySrcLoc.resolveBaseNode(block.src_base_inst, zcu) orelse {
const file = zcu.fileByIndex(block.src_base_inst.resolveFile(&zcu.intern_pool));
try stderr.print("Analyzing lost instruction in file '{}'. This should not happen!\n\n", .{file.path.fmt(zcu.comp)});
try stderr.print("Analyzing lost instruction in file '{f}'. This should not happen!\n\n", .{file.path.fmt(zcu.comp)});
return;
};
try stderr.writeAll("Analyzing ");
try stderr.print("Analyzing '{}'\n", .{file.path.fmt(zcu.comp)});
try stderr.print("Analyzing '{f}'\n", .{file.path.fmt(zcu.comp)});
print_zir.renderInstructionContext(
allocator,
@ -107,7 +108,7 @@ fn dumpStatusReport() !void {
};
try stderr.print(
\\ For full context, use the command
\\ zig ast-check -t {}
\\ zig ast-check -t {f}
\\
\\
, .{file.path.fmt(zcu.comp)});
@ -116,7 +117,7 @@ fn dumpStatusReport() !void {
while (parent) |curr| {
fba.reset();
const cur_block_file = zcu.fileByIndex(curr.block.src_base_inst.resolveFile(&zcu.intern_pool));
try stderr.print(" in {}\n", .{cur_block_file.path.fmt(zcu.comp)});
try stderr.print(" in {f}\n", .{cur_block_file.path.fmt(zcu.comp)});
_, const cur_block_src_base_node = Zcu.LazySrcLoc.resolveBaseNode(curr.block.src_base_inst, zcu) orelse {
try stderr.writeAll(" > [lost instruction; this should not happen]\n");
parent = curr.parent;
@ -139,7 +140,7 @@ fn dumpStatusReport() !void {
parent = curr.parent;
}
try stderr.writeAll("\n");
try stderr.writeByte('\n');
}
var crash_heap: [16 * 4096]u8 = undefined;
@ -268,11 +269,12 @@ const StackContext = union(enum) {
debug.dumpCurrentStackTrace(ct.ret_addr);
},
.exception => |context| {
debug.dumpStackTraceFromBase(context);
var stderr_fw = std.fs.File.stderr().writer(&.{});
const stderr = &stderr_fw.interface;
debug.dumpStackTraceFromBase(context, stderr);
},
.not_supported => {
const stderr = std.fs.File.stderr().deprecatedWriter();
stderr.writeAll("Stack trace not supported on this platform.\n") catch {};
std.fs.File.stderr().writeAll("Stack trace not supported on this platform.\n") catch {};
},
}
}
@ -379,7 +381,8 @@ const PanicSwitch = struct {
state.recover_stage = .release_mutex;
const stderr = std.fs.File.stderr().deprecatedWriter();
var stderr_fw = std.fs.File.stderr().writer(&.{});
const stderr = &stderr_fw.interface;
if (builtin.single_threaded) {
stderr.print("panic: ", .{}) catch goTo(releaseMutex, .{state});
} else {
@ -406,7 +409,8 @@ const PanicSwitch = struct {
recover(state, trace, stack, msg);
state.recover_stage = .release_mutex;
const stderr = std.fs.File.stderr().deprecatedWriter();
var stderr_fw = std.fs.File.stderr().writer(&.{});
const stderr = &stderr_fw.interface;
stderr.writeAll("\nOriginal Error:\n") catch {};
goTo(reportStack, .{state});
}
@ -477,7 +481,8 @@ const PanicSwitch = struct {
recover(state, trace, stack, msg);
state.recover_stage = .silent_abort;
const stderr = std.fs.File.stderr().deprecatedWriter();
var stderr_fw = std.fs.File.stderr().writer(&.{});
const stderr = &stderr_fw.interface;
stderr.writeAll("Aborting...\n") catch {};
goTo(abort, .{});
}
@ -505,7 +510,8 @@ const PanicSwitch = struct {
// lower the verbosity, and restore it at the end if we don't panic.
state.recover_verbosity = .message_only;
const stderr = std.fs.File.stderr().deprecatedWriter();
var stderr_fw = std.fs.File.stderr().writer(&.{});
const stderr = &stderr_fw.interface;
stderr.writeAll("\nPanicked during a panic: ") catch {};
stderr.writeAll(msg) catch {};
stderr.writeAll("\nInner panic stack:\n") catch {};
@ -519,10 +525,11 @@ const PanicSwitch = struct {
.message_only => {
state.recover_verbosity = .silent;
const stderr = std.fs.File.stderr().deprecatedWriter();
var stderr_fw = std.fs.File.stderr().writer(&.{});
const stderr = &stderr_fw.interface;
stderr.writeAll("\nPanicked while dumping inner panic stack: ") catch {};
stderr.writeAll(msg) catch {};
stderr.writeAll("\n") catch {};
stderr.writeByte('\n') catch {};
// If we succeed, restore all the way to dumping the stack.
state.recover_verbosity = .message_and_stack;

View File

@ -78,6 +78,7 @@ pub const Env = enum {
.ast_gen,
.sema,
.legalize,
.c_compiler,
.llvm_backend,
.c_backend,
.wasm_backend,
@ -127,6 +128,7 @@ pub const Env = enum {
.clang_command,
.cc_command,
.translate_c_command,
.c_compiler,
=> true,
else => false,
},
@ -248,6 +250,8 @@ pub const Feature = enum {
sema,
legalize,
c_compiler,
llvm_backend,
c_backend,
wasm_backend,

View File

@ -2660,24 +2660,17 @@ fn formatSymtab(
}
}
pub fn fmtPath(self: Object) std.fmt.Formatter(formatPath) {
pub fn fmtPath(self: Object) std.fmt.Formatter(Object, formatPath) {
return .{ .data = self };
}
fn formatPath(
object: Object,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = unused_fmt_string;
_ = options;
fn formatPath(object: Object, writer: *std.io.Writer) std.io.Writer.Error!void {
if (object.in_archive) |ar| {
try writer.print("{}({s})", .{
try writer.print("{f}({s})", .{
@as(Path, ar.path), object.path.basename(),
});
} else {
try writer.print("{}", .{@as(Path, object.path)});
try writer.print("{f}", .{@as(Path, object.path)});
}
}

View File

@ -10,18 +10,11 @@ pub const File = union(enum) {
};
}
pub fn fmtPath(file: File) std.fmt.Formatter(formatPath) {
pub fn fmtPath(file: File) std.fmt.Formatter(File, formatPath) {
return .{ .data = file };
}
fn formatPath(
file: File,
comptime unused_fmt_string: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = unused_fmt_string;
_ = options;
fn formatPath(file: File, writer: *std.io.Writer) std.io.Writer.Error!void {
switch (file) {
.zig_object => |zo| try writer.writeAll(zo.basename),
.internal => try writer.writeAll("internal"),

View File

@ -117,7 +117,7 @@ pub fn updateNav(self: *SpirV, pt: Zcu.PerThread, nav: InternPool.Nav.Index) lin
}
const ip = &pt.zcu.intern_pool;
log.debug("lowering nav {}({d})", .{ ip.getNav(nav).fqn.fmt(ip), nav });
log.debug("lowering nav {f}({d})", .{ ip.getNav(nav).fqn.fmt(ip), nav });
try self.object.updateNav(pt, nav);
}
@ -203,10 +203,10 @@ pub fn flush(
// We need to export the list of error names somewhere so that we can pretty-print them in the
// executor. This is not really an important thing though, so we can just dump it in any old
// nonsemantic instruction. For now, just put it in OpSourceExtension with a special name.
var error_info = std.ArrayList(u8).init(self.object.gpa);
var error_info: std.io.Writer.Allocating = .init(self.object.gpa);
defer error_info.deinit();
try error_info.appendSlice("zig_errors:");
error_info.writer.writeAll("zig_errors:") catch return error.OutOfMemory;
const ip = &self.base.comp.zcu.?.intern_pool;
for (ip.global_error_set.getNamesFromMainThread()) |name| {
// Errors can contain pretty much any character - to encode them in a string we must escape
@ -214,9 +214,9 @@ pub fn flush(
// name if it contains no strange characters is nice for debugging. URI encoding fits the bill.
// We're using : as separator, which is a reserved character.
try error_info.append(':');
try std.Uri.Component.percentEncode(
error_info.writer(),
error_info.writer.writeByte(':') catch return error.OutOfMemory;
std.Uri.Component.percentEncode(
&error_info.writer,
name.toSlice(ip),
struct {
fn isValidChar(c: u8) bool {
@ -226,10 +226,10 @@ pub fn flush(
};
}
}.isValidChar,
);
) catch return error.OutOfMemory;
}
try spv.sections.debug_strings.emit(gpa, .OpSourceExtension, .{
.extension = error_info.items,
.extension = error_info.getWritten(),
});
const module = try spv.finalize(arena);

View File

@ -110,7 +110,7 @@ const ModuleInfo = struct {
.TypeDeclaration, .ConstantCreation => {
const entry = try entities.getOrPut(result_id);
if (entry.found_existing) {
log.err("type or constant {} has duplicate definition", .{result_id});
log.err("type or constant {f} has duplicate definition", .{result_id});
return error.DuplicateId;
}
entry.value_ptr.* = entity;

View File

@ -92,7 +92,7 @@ const ModuleInfo = struct {
const entry_point: ResultId = @enumFromInt(inst.operands[1]);
const entry = try entry_points.getOrPut(entry_point);
if (entry.found_existing) {
log.err("Entry point type {} has duplicate definition", .{entry_point});
log.err("Entry point type {f} has duplicate definition", .{entry_point});
return error.DuplicateId;
}
},
@ -103,7 +103,7 @@ const ModuleInfo = struct {
const entry = try fn_types.getOrPut(fn_type);
if (entry.found_existing) {
log.err("Function type {} has duplicate definition", .{fn_type});
log.err("Function type {f} has duplicate definition", .{fn_type});
return error.DuplicateId;
}
@ -135,7 +135,7 @@ const ModuleInfo = struct {
},
.OpFunction => {
if (maybe_current_function) |current_function| {
log.err("OpFunction {} does not have an OpFunctionEnd", .{current_function});
log.err("OpFunction {f} does not have an OpFunctionEnd", .{current_function});
return error.InvalidPhysicalFormat;
}
@ -154,7 +154,7 @@ const ModuleInfo = struct {
};
const entry = try functions.getOrPut(current_function);
if (entry.found_existing) {
log.err("Function {} has duplicate definition", .{current_function});
log.err("Function {f} has duplicate definition", .{current_function});
return error.DuplicateId;
}
@ -162,7 +162,7 @@ const ModuleInfo = struct {
try callee_store.appendSlice(calls.keys());
const fn_type = fn_types.get(fn_ty_id) orelse {
log.err("Function {} has invalid OpFunction type", .{current_function});
log.err("Function {f} has invalid OpFunction type", .{current_function});
return error.InvalidId;
};
@ -187,7 +187,7 @@ const ModuleInfo = struct {
}
if (maybe_current_function) |current_function| {
log.err("OpFunction {} does not have an OpFunctionEnd", .{current_function});
log.err("OpFunction {f} does not have an OpFunctionEnd", .{current_function});
return error.InvalidPhysicalFormat;
}
@ -222,7 +222,7 @@ const ModuleInfo = struct {
seen: *std.DynamicBitSetUnmanaged,
) !void {
const index = self.functions.getIndex(id) orelse {
log.err("function calls invalid function {}", .{id});
log.err("function calls invalid function {f}", .{id});
return error.InvalidId;
};
@ -261,7 +261,7 @@ const ModuleInfo = struct {
seen: *std.DynamicBitSetUnmanaged,
) !void {
const index = self.invocation_globals.getIndex(id) orelse {
log.err("invalid invocation global {}", .{id});
log.err("invalid invocation global {f}", .{id});
return error.InvalidId;
};
@ -276,7 +276,7 @@ const ModuleInfo = struct {
}
const initializer = self.functions.get(info.initializer) orelse {
log.err("invocation global {} has invalid initializer {}", .{ id, info.initializer });
log.err("invocation global {f} has invalid initializer {f}", .{ id, info.initializer });
return error.InvalidId;
};

View File

@ -128,7 +128,7 @@ const ModuleInfo = struct {
switch (inst.opcode) {
.OpFunction => {
if (maybe_current_function) |current_function| {
log.err("OpFunction {} does not have an OpFunctionEnd", .{current_function});
log.err("OpFunction {f} does not have an OpFunctionEnd", .{current_function});
return error.InvalidPhysicalFormat;
}
@ -145,7 +145,7 @@ const ModuleInfo = struct {
};
const entry = try functions.getOrPut(current_function);
if (entry.found_existing) {
log.err("Function {} has duplicate definition", .{current_function});
log.err("Function {f} has duplicate definition", .{current_function});
return error.DuplicateId;
}
@ -163,7 +163,7 @@ const ModuleInfo = struct {
}
if (maybe_current_function) |current_function| {
log.err("OpFunction {} does not have an OpFunctionEnd", .{current_function});
log.err("OpFunction {f} does not have an OpFunctionEnd", .{current_function});
return error.InvalidPhysicalFormat;
}
@ -184,7 +184,7 @@ const AliveMarker = struct {
fn markAlive(self: *AliveMarker, result_id: ResultId) BinaryModule.ParseError!void {
const index = self.info.result_id_to_code_offset.getIndex(result_id) orelse {
log.err("undefined result-id {}", .{result_id});
log.err("undefined result-id {f}", .{result_id});
return error.InvalidId;
};

View File

@ -65,6 +65,9 @@ pub fn wasi_cwd() std.os.wasi.fd_t {
const fatal = std.process.fatal;
/// This can be global since stdout is a singleton.
var stdio_buffer: [4096]u8 = undefined;
/// Shaming all the locations that inappropriately use an O(N) search algorithm.
/// Please delete this and fix the compilation errors!
pub const @"bad O(N)" = void;
@ -352,7 +355,7 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
} else if (mem.eql(u8, cmd, "env")) {
dev.check(.env_command);
verifyLibcxxCorrectlyLinked();
return @import("print_env.zig").cmdEnv(arena, cmd_args, fs.File.stdout().deprecatedWriter());
return @import("print_env.zig").cmdEnv(arena, cmd_args);
} else if (mem.eql(u8, cmd, "reduce")) {
return jitCmd(gpa, arena, cmd_args, .{
.cmd_name = "reduce",
@ -1806,6 +1809,7 @@ fn buildOutputType(
} else manifest_file = arg;
},
.assembly, .assembly_with_cpp, .c, .cpp, .h, .hpp, .hm, .hmm, .ll, .bc, .m, .mm => {
dev.check(.c_compiler);
try create_module.c_source_files.append(arena, .{
// Populated after module creation.
.owner = undefined,
@ -1816,6 +1820,7 @@ fn buildOutputType(
});
},
.rc => {
dev.check(.win32_resource);
try create_module.rc_source_files.append(arena, .{
// Populated after module creation.
.owner = undefined,
@ -3301,6 +3306,7 @@ fn buildOutputType(
defer thread_pool.deinit();
for (create_module.c_source_files.items) |*src| {
dev.check(.c_compiler);
if (!mem.eql(u8, src.src_path, "-")) continue;
const ext = src.ext orelse
@ -3325,13 +3331,17 @@ fn buildOutputType(
// for the hashing algorithm here and in the cache are the same.
// We are providing our own cache key, because this file has nothing
// to do with the cache manifest.
var hasher = Cache.Hasher.init("0123456789abcdef");
var w = io.multiWriter(.{ f.writer(), hasher.writer() });
var fifo = std.fifo.LinearFifo(u8, .{ .Static = 4096 }).init();
try fifo.pump(fs.File.stdin().reader(), w.writer());
var file_writer = f.writer(&.{});
var buffer: [1000]u8 = undefined;
var hasher = file_writer.interface.hashed(Cache.Hasher.init("0123456789abcdef"), &buffer);
var stdin_reader = fs.File.stdin().readerStreaming(&.{});
_ = hasher.writer.sendFileAll(&stdin_reader, .unlimited) catch |err| switch (err) {
error.WriteFailed => fatal("failed to write {s}: {s}", .{ dump_path, file_writer.err.? }),
else => fatal("failed to pipe stdin to {s}: {s}", .{ dump_path, err }),
};
try hasher.writer.flush();
var bin_digest: Cache.BinDigest = undefined;
hasher.final(&bin_digest);
const bin_digest: Cache.BinDigest = hasher.hasher.finalResult();
const sub_path = try std.fmt.allocPrint(arena, "tmp" ++ sep ++ "{x}-stdin{s}", .{
&bin_digest, ext.canonicalName(target),
@ -3505,7 +3515,7 @@ fn buildOutputType(
if (t.arch == target.cpu.arch and t.os == target.os.tag) {
// If there's a `glibc_min`, there's also an `os_ver`.
if (t.glibc_min) |glibc_min| {
std.log.info("zig can provide libc for related target {s}-{s}.{}-{s}.{d}.{d}", .{
std.log.info("zig can provide libc for related target {s}-{s}.{f}-{s}.{d}.{d}", .{
@tagName(t.arch),
@tagName(t.os),
t.os_ver.?,
@ -3514,7 +3524,7 @@ fn buildOutputType(
glibc_min.minor,
});
} else if (t.os_ver) |os_ver| {
std.log.info("zig can provide libc for related target {s}-{s}.{}-{s}", .{
std.log.info("zig can provide libc for related target {s}-{s}.{f}-{s}", .{
@tagName(t.arch),
@tagName(t.os),
os_ver,
@ -5480,7 +5490,7 @@ fn jitCmd(
defer comp.destroy();
if (options.server) {
var server = std.zig.Server{
var server: std.zig.Server = .{
.out = fs.File.stdout(),
.in = undefined, // won't be receiving messages
.receive_fifo = undefined, // won't be receiving messages
@ -6064,6 +6074,8 @@ fn cmdAstCheck(
const tree = try Ast.parse(arena, source, mode);
var stdout_writer = fs.File.stdout().writer(&stdio_buffer);
const stdout_bw = &stdout_writer.interface;
switch (mode) {
.zig => {
const zir = try AstGen.generate(arena, tree);
@ -6106,31 +6118,30 @@ fn cmdAstCheck(
const extra_bytes = zir.extra.len * @sizeOf(u32);
const total_bytes = @sizeOf(Zir) + instruction_bytes + extra_bytes +
zir.string_bytes.len * @sizeOf(u8);
const stdout = fs.File.stdout();
const fmtIntSizeBin = std.fmt.fmtIntSizeBin;
// zig fmt: off
try stdout.deprecatedWriter().print(
\\# Source bytes: {}
\\# Tokens: {} ({})
\\# AST Nodes: {} ({})
\\# Total ZIR bytes: {}
\\# Instructions: {d} ({})
try stdout_bw.print(
\\# Source bytes: {Bi}
\\# Tokens: {} ({Bi})
\\# AST Nodes: {} ({Bi})
\\# Total ZIR bytes: {Bi}
\\# Instructions: {d} ({Bi})
\\# String Table Bytes: {}
\\# Extra Data Items: {d} ({})
\\# Extra Data Items: {d} ({Bi})
\\
, .{
fmtIntSizeBin(source.len),
tree.tokens.len, fmtIntSizeBin(token_bytes),
tree.nodes.len, fmtIntSizeBin(tree_bytes),
fmtIntSizeBin(total_bytes),
zir.instructions.len, fmtIntSizeBin(instruction_bytes),
fmtIntSizeBin(zir.string_bytes.len),
zir.extra.len, fmtIntSizeBin(extra_bytes),
source.len,
tree.tokens.len, token_bytes,
tree.nodes.len, tree_bytes,
total_bytes,
zir.instructions.len, instruction_bytes,
zir.string_bytes.len,
zir.extra.len, extra_bytes,
});
// zig fmt: on
}
try @import("print_zir.zig").renderAsTextToFile(arena, tree, zir, fs.File.stdout());
try @import("print_zir.zig").renderAsText(arena, tree, zir, stdout_bw);
try stdout_bw.flush();
if (zir.hasCompileErrors()) {
process.exit(1);
@ -6157,7 +6168,8 @@ fn cmdAstCheck(
fatal("-t option only available in builds of zig with debug extensions", .{});
}
try @import("print_zoir.zig").renderToFile(zoir, arena, fs.File.stdout());
try @import("print_zoir.zig").renderToWriter(zoir, arena, stdout_bw);
try stdout_bw.flush();
return cleanExit();
},
}
@ -6185,8 +6197,7 @@ fn cmdDetectCpu(args: []const []const u8) !void {
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
const stdout = fs.File.stdout().deprecatedWriter();
try stdout.writeAll(detect_cpu_usage);
try fs.File.stdout().writeAll(detect_cpu_usage);
return cleanExit();
} else if (mem.eql(u8, arg, "--llvm")) {
use_llvm = true;
@ -6278,11 +6289,11 @@ fn detectNativeCpuWithLLVM(
}
fn printCpu(cpu: std.Target.Cpu) !void {
var bw = io.bufferedWriter(fs.File.stdout().deprecatedWriter());
const stdout = bw.writer();
var stdout_writer = fs.File.stdout().writer(&stdio_buffer);
const stdout_bw = &stdout_writer.interface;
if (cpu.model.llvm_name) |llvm_name| {
try stdout.print("{s}\n", .{llvm_name});
try stdout_bw.print("{s}\n", .{llvm_name});
}
const all_features = cpu.arch.allFeaturesList();
@ -6291,10 +6302,10 @@ fn printCpu(cpu: std.Target.Cpu) !void {
const index: std.Target.Cpu.Feature.Set.Index = @intCast(index_usize);
const is_enabled = cpu.features.isEnabled(index);
const plus_or_minus = "-+"[@intFromBool(is_enabled)];
try stdout.print("{c}{s}\n", .{ plus_or_minus, llvm_name });
try stdout_bw.print("{c}{s}\n", .{ plus_or_minus, llvm_name });
}
try bw.flush();
try stdout_bw.flush();
}
fn cmdDumpLlvmInts(
@ -6327,16 +6338,14 @@ fn cmdDumpLlvmInts(
const dl = tm.createTargetDataLayout();
const context = llvm.Context.create();
var bw = io.bufferedWriter(fs.File.stdout().deprecatedWriter());
const stdout = bw.writer();
var stdout_writer = fs.File.stdout().writer(&stdio_buffer);
const stdout_bw = &stdout_writer.interface;
for ([_]u16{ 1, 8, 16, 32, 64, 128, 256 }) |bits| {
const int_type = context.intType(bits);
const alignment = dl.abiAlignmentOfType(int_type);
try stdout.print("LLVMABIAlignmentOfType(i{d}) == {d}\n", .{ bits, alignment });
try stdout_bw.print("LLVMABIAlignmentOfType(i{d}) == {d}\n", .{ bits, alignment });
}
try bw.flush();
try stdout_bw.flush();
return cleanExit();
}
@ -6358,6 +6367,8 @@ fn cmdDumpZir(
defer f.close();
const zir = try Zcu.loadZirCache(arena, f);
var stdout_writer = fs.File.stdout().writer(&stdio_buffer);
const stdout_bw = &stdout_writer.interface;
{
const instruction_bytes = zir.instructions.len *
@ -6367,25 +6378,24 @@ fn cmdDumpZir(
const extra_bytes = zir.extra.len * @sizeOf(u32);
const total_bytes = @sizeOf(Zir) + instruction_bytes + extra_bytes +
zir.string_bytes.len * @sizeOf(u8);
const stdout = fs.File.stdout();
const fmtIntSizeBin = std.fmt.fmtIntSizeBin;
// zig fmt: off
try stdout.deprecatedWriter().print(
\\# Total ZIR bytes: {}
\\# Instructions: {d} ({})
\\# String Table Bytes: {}
\\# Extra Data Items: {d} ({})
try stdout_bw.print(
\\# Total ZIR bytes: {Bi}
\\# Instructions: {d} ({Bi})
\\# String Table Bytes: {Bi}
\\# Extra Data Items: {d} ({Bi})
\\
, .{
fmtIntSizeBin(total_bytes),
zir.instructions.len, fmtIntSizeBin(instruction_bytes),
fmtIntSizeBin(zir.string_bytes.len),
zir.extra.len, fmtIntSizeBin(extra_bytes),
total_bytes,
zir.instructions.len, instruction_bytes,
zir.string_bytes.len,
zir.extra.len, extra_bytes,
});
// zig fmt: on
}
return @import("print_zir.zig").renderAsTextToFile(arena, null, zir, fs.File.stdout());
try @import("print_zir.zig").renderAsText(arena, null, zir, stdout_bw);
try stdout_bw.flush();
}
/// This is only enabled for debug builds.
@ -6443,19 +6453,19 @@ fn cmdChangelist(
var inst_map: std.AutoHashMapUnmanaged(Zir.Inst.Index, Zir.Inst.Index) = .empty;
try Zcu.mapOldZirToNew(arena, old_zir, new_zir, &inst_map);
var bw = io.bufferedWriter(fs.File.stdout().deprecatedWriter());
const stdout = bw.writer();
var stdout_writer = fs.File.stdout().writer(&stdio_buffer);
const stdout_bw = &stdout_writer.interface;
{
try stdout.print("Instruction mappings:\n", .{});
try stdout_bw.print("Instruction mappings:\n", .{});
var it = inst_map.iterator();
while (it.next()) |entry| {
try stdout.print(" %{d} => %{d}\n", .{
try stdout_bw.print(" %{d} => %{d}\n", .{
@intFromEnum(entry.key_ptr.*),
@intFromEnum(entry.value_ptr.*),
});
}
}
try bw.flush();
try stdout_bw.flush();
}
fn eatIntPrefix(arg: []const u8, base: u8) []const u8 {
@ -6717,13 +6727,10 @@ fn accessFrameworkPath(
for (&[_][]const u8{ ".tbd", ".dylib", "" }) |ext| {
test_path.clearRetainingCapacity();
try test_path.writer().print("{s}" ++ sep ++ "{s}.framework" ++ sep ++ "{s}{s}", .{
framework_dir_path,
framework_name,
framework_name,
ext,
try test_path.print("{s}" ++ sep ++ "{s}.framework" ++ sep ++ "{s}{s}", .{
framework_dir_path, framework_name, framework_name, ext,
});
try checked_paths.writer().print("\n {s}", .{test_path.items});
try checked_paths.print("\n {s}", .{test_path.items});
fs.cwd().access(test_path.items, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| fatal("unable to search for {s} framework '{s}': {s}", .{
@ -6793,8 +6800,7 @@ fn cmdFetch(
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
const stdout = fs.File.stdout().deprecatedWriter();
try stdout.writeAll(usage_fetch);
try fs.File.stdout().writeAll(usage_fetch);
return cleanExit();
} else if (mem.eql(u8, arg, "--global-cache-dir")) {
if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg});
@ -6907,7 +6913,9 @@ fn cmdFetch(
const name = switch (save) {
.no => {
try fs.File.stdout().deprecatedWriter().print("{s}\n", .{package_hash_slice});
var stdout = fs.File.stdout().writer(&stdio_buffer);
try stdout.interface.print("{s}\n", .{package_hash_slice});
try stdout.interface.flush();
return cleanExit();
},
.yes, .exact => |name| name: {
@ -6943,7 +6951,7 @@ fn cmdFetch(
var saved_path_or_url = path_or_url;
if (fetch.latest_commit) |latest_commit| resolved: {
const latest_commit_hex = try std.fmt.allocPrint(arena, "{}", .{latest_commit});
const latest_commit_hex = try std.fmt.allocPrint(arena, "{f}", .{latest_commit});
var uri = try std.Uri.parse(path_or_url);
@ -6956,7 +6964,7 @@ fn cmdFetch(
std.log.info("resolved ref '{s}' to commit {s}", .{ target_ref, latest_commit_hex });
// include the original refspec in a query parameter, could be used to check for updates
uri.query = .{ .percent_encoded = try std.fmt.allocPrint(arena, "ref={%}", .{fragment}) };
uri.query = .{ .percent_encoded = try std.fmt.allocPrint(arena, "ref={f%}", .{fragment}) };
} else {
std.log.info("resolved to commit {s}", .{latest_commit_hex});
}
@ -6965,7 +6973,7 @@ fn cmdFetch(
uri.fragment = .{ .raw = latest_commit_hex };
switch (save) {
.yes => saved_path_or_url = try std.fmt.allocPrint(arena, "{}", .{uri}),
.yes => saved_path_or_url = try std.fmt.allocPrint(arena, "{f}", .{uri}),
.no, .exact => {}, // keep the original URL
}
}

View File

@ -4,7 +4,7 @@ const introspect = @import("introspect.zig");
const Allocator = std.mem.Allocator;
const fatal = std.process.fatal;
pub fn cmdEnv(arena: Allocator, args: []const []const u8, stdout: std.fs.File.Writer) !void {
pub fn cmdEnv(arena: Allocator, args: []const []const u8) !void {
_ = args;
const cwd_path = try introspect.getResolvedCwd(arena);
const self_exe_path = try std.fs.selfExePathAlloc(arena);
@ -21,7 +21,7 @@ pub fn cmdEnv(arena: Allocator, args: []const []const u8, stdout: std.fs.File.Wr
const host = try std.zig.system.resolveTargetQuery(.{});
const triple = try host.zigTriple(arena);
var bw = std.io.bufferedWriter(stdout);
var bw = std.io.bufferedWriter(std.fs.File.stdout().deprecatedWriter());
const w = bw.writer();
var jws = std.json.writeStream(w, .{ .whitespace = .indent_1 });

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,8 @@
pub fn renderToFile(zoir: Zoir, arena: Allocator, f: std.fs.File) (std.fs.File.WriteError || Allocator.Error)!void {
var bw = std.io.bufferedWriter(f.writer());
try renderToWriter(zoir, arena, bw.writer());
try bw.flush();
}
pub const Error = error{ WriteFailed, OutOfMemory };
pub fn renderToWriter(zoir: Zoir, arena: Allocator, w: anytype) (@TypeOf(w).Error || Allocator.Error)!void {
pub fn renderToWriter(zoir: Zoir, arena: Allocator, w: *Writer) Error!void {
assert(!zoir.hasCompileErrors());
const fmtIntSizeBin = std.fmt.fmtIntSizeBin;
const bytes_per_node = comptime n: {
var n: usize = 0;
for (@typeInfo(Zoir.Node.Repr).@"struct".fields) |f| {
@ -23,42 +18,42 @@ pub fn renderToWriter(zoir: Zoir, arena: Allocator, w: anytype) (@TypeOf(w).Erro
// zig fmt: off
try w.print(
\\# Nodes: {} ({})
\\# Extra Data Items: {} ({})
\\# BigInt Limbs: {} ({})
\\# String Table Bytes: {}
\\# Total ZON Bytes: {}
\\# Nodes: {} ({Bi})
\\# Extra Data Items: {} ({Bi})
\\# BigInt Limbs: {} ({Bi})
\\# String Table Bytes: {Bi}
\\# Total ZON Bytes: {Bi}
\\
, .{
zoir.nodes.len, fmtIntSizeBin(node_bytes),
zoir.extra.len, fmtIntSizeBin(extra_bytes),
zoir.limbs.len, fmtIntSizeBin(limb_bytes),
fmtIntSizeBin(string_bytes),
fmtIntSizeBin(node_bytes + extra_bytes + limb_bytes + string_bytes),
zoir.nodes.len, node_bytes,
zoir.extra.len, extra_bytes,
zoir.limbs.len, limb_bytes,
string_bytes,
node_bytes + extra_bytes + limb_bytes + string_bytes,
});
// zig fmt: on
var pz: PrintZon = .{
.w = w.any(),
.w = w,
.arena = arena,
.zoir = zoir,
.indent = 0,
};
return @errorCast(pz.renderRoot());
return pz.renderRoot();
}
const PrintZon = struct {
w: std.io.AnyWriter,
w: *Writer,
arena: Allocator,
zoir: Zoir,
indent: u32,
fn renderRoot(pz: *PrintZon) anyerror!void {
fn renderRoot(pz: *PrintZon) Error!void {
try pz.renderNode(.root);
try pz.w.writeByte('\n');
}
fn renderNode(pz: *PrintZon, node: Zoir.Node.Index) anyerror!void {
fn renderNode(pz: *PrintZon, node: Zoir.Node.Index) Error!void {
const zoir = pz.zoir;
try pz.w.print("%{d} = ", .{@intFromEnum(node)});
switch (node.get(zoir)) {
@ -110,9 +105,7 @@ const PrintZon = struct {
fn newline(pz: *PrintZon) !void {
try pz.w.writeByte('\n');
for (0..pz.indent) |_| {
try pz.w.writeByteNTimes(' ', 2);
}
try pz.w.splatByteAll(' ', 2 * pz.indent);
}
};
@ -120,3 +113,4 @@ const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Zoir = std.zig.Zoir;
const Writer = std.io.Writer;