Merge remote-tracking branch 'origin/master' into wrangle-writer-buffering

This commit is contained in:
Andrew Kelley 2025-07-10 16:11:10 -07:00
commit 6e6c68d889
364 changed files with 7430 additions and 28684 deletions

View File

@ -537,45 +537,6 @@ set(ZIG_STAGE2_SOURCES
src/Value.zig
src/Zcu.zig
src/Zcu/PerThread.zig
src/arch/aarch64/CodeGen.zig
src/arch/aarch64/Emit.zig
src/arch/aarch64/Mir.zig
src/arch/aarch64/abi.zig
src/arch/aarch64/bits.zig
src/arch/arm/CodeGen.zig
src/arch/arm/Emit.zig
src/arch/arm/Mir.zig
src/arch/arm/abi.zig
src/arch/arm/bits.zig
src/arch/powerpc/CodeGen.zig
src/arch/riscv64/abi.zig
src/arch/riscv64/bits.zig
src/arch/riscv64/CodeGen.zig
src/arch/riscv64/Emit.zig
src/arch/riscv64/encoding.zig
src/arch/riscv64/Lower.zig
src/arch/riscv64/Mir.zig
src/arch/riscv64/mnem.zig
src/arch/sparc64/CodeGen.zig
src/arch/sparc64/Emit.zig
src/arch/sparc64/Mir.zig
src/arch/sparc64/abi.zig
src/arch/sparc64/bits.zig
src/arch/wasm/CodeGen.zig
src/arch/wasm/Emit.zig
src/arch/wasm/Mir.zig
src/arch/wasm/abi.zig
src/arch/x86/bits.zig
src/arch/x86_64/CodeGen.zig
src/arch/x86_64/Disassembler.zig
src/arch/x86_64/Emit.zig
src/arch/x86_64/Encoding.zig
src/arch/x86_64/Lower.zig
src/arch/x86_64/Mir.zig
src/arch/x86_64/abi.zig
src/arch/x86_64/bits.zig
src/arch/x86_64/encoder.zig
src/arch/x86_64/encodings.zon
src/clang.zig
src/clang_options.zig
src/clang_options_data.zig

View File

@ -415,7 +415,18 @@ pub fn build(b: *std.Build) !void {
test_step.dependOn(check_fmt);
const test_cases_step = b.step("test-cases", "Run the main compiler test cases");
try tests.addCases(b, test_cases_step, test_filters, test_target_filters, target, .{
try tests.addCases(b, test_cases_step, target, .{
.test_filters = test_filters,
.test_target_filters = test_target_filters,
.skip_non_native = skip_non_native,
.skip_freebsd = skip_freebsd,
.skip_netbsd = skip_netbsd,
.skip_windows = skip_windows,
.skip_macos = skip_macos,
.skip_linux = skip_linux,
.skip_llvm = skip_llvm,
.skip_libc = skip_libc,
}, .{
.skip_translate_c = skip_translate_c,
.skip_run_translated_c = skip_run_translated_c,
}, .{
@ -439,6 +450,7 @@ pub fn build(b: *std.Build) !void {
.desc = "Run the behavior tests",
.optimize_modes = optimization_modes,
.include_paths = &.{},
.windows_libs = &.{},
.skip_single_threaded = skip_single_threaded,
.skip_non_native = skip_non_native,
.skip_freebsd = skip_freebsd,
@ -448,8 +460,8 @@ pub fn build(b: *std.Build) !void {
.skip_linux = skip_linux,
.skip_llvm = skip_llvm,
.skip_libc = skip_libc,
// 2923515904 was observed on an x86_64-linux-gnu host.
.max_rss = 3100000000,
// 3888779264 was observed on an x86_64-linux-gnu host.
.max_rss = 4000000000,
}));
test_modules_step.dependOn(tests.addModuleTests(b, .{
@ -461,6 +473,7 @@ pub fn build(b: *std.Build) !void {
.desc = "Run the @cImport tests",
.optimize_modes = optimization_modes,
.include_paths = &.{"test/c_import"},
.windows_libs = &.{},
.skip_single_threaded = true,
.skip_non_native = skip_non_native,
.skip_freebsd = skip_freebsd,
@ -481,6 +494,7 @@ pub fn build(b: *std.Build) !void {
.desc = "Run the compiler_rt tests",
.optimize_modes = optimization_modes,
.include_paths = &.{},
.windows_libs = &.{},
.skip_single_threaded = true,
.skip_non_native = skip_non_native,
.skip_freebsd = skip_freebsd,
@ -502,6 +516,7 @@ pub fn build(b: *std.Build) !void {
.desc = "Run the zigc tests",
.optimize_modes = optimization_modes,
.include_paths = &.{},
.windows_libs = &.{},
.skip_single_threaded = true,
.skip_non_native = skip_non_native,
.skip_freebsd = skip_freebsd,
@ -523,6 +538,12 @@ pub fn build(b: *std.Build) !void {
.desc = "Run the standard library tests",
.optimize_modes = optimization_modes,
.include_paths = &.{},
.windows_libs = &.{
"advapi32",
"crypt32",
"iphlpapi",
"ws2_32",
},
.skip_single_threaded = skip_single_threaded,
.skip_non_native = skip_non_native,
.skip_freebsd = skip_freebsd,
@ -720,6 +741,12 @@ fn addCompilerMod(b: *std.Build, options: AddCompilerModOptions) *std.Build.Modu
compiler_mod.addImport("aro", aro_mod);
compiler_mod.addImport("aro_translate_c", aro_translate_c_mod);
if (options.target.result.os.tag == .windows) {
compiler_mod.linkSystemLibrary("advapi32", .{});
compiler_mod.linkSystemLibrary("crypt32", .{});
compiler_mod.linkSystemLibrary("ws2_32", .{});
}
return compiler_mod;
}
@ -1417,6 +1444,10 @@ fn generateLangRef(b: *std.Build) std.Build.LazyPath {
}),
});
if (b.graph.host.result.os.tag == .windows) {
doctest_exe.root_module.linkSystemLibrary("advapi32", .{});
}
var dir = b.build_root.handle.openDir("doc/langref", .{ .iterate = true }) catch |err| {
std.debug.panic("unable to open '{f}doc/langref' directory: {s}", .{
b.build_root, @errorName(err),

View File

@ -12,7 +12,7 @@ CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.15.0-dev.233+7c85dc460"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"
export PATH="$HOME/deps/wasmtime-v29.0.0-$ARCH-linux:$HOME/deps/qemu-linux-x86_64-9.2.0-rc1/bin:$HOME/local/bin:$PATH"
export PATH="$HOME/deps/wasmtime-v29.0.0-$ARCH-linux:$HOME/deps/qemu-linux-x86_64-10.0.2/bin:$HOME/local/bin:$PATH"
# Make the `zig version` number consistent.
# This will affect the cmake command below.

View File

@ -12,7 +12,7 @@ CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.15.0-dev.233+7c85dc460"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"
export PATH="$HOME/deps/wasmtime-v29.0.0-$ARCH-linux:$HOME/deps/qemu-linux-x86_64-9.2.0-rc1/bin:$HOME/local/bin:$PATH"
export PATH="$HOME/deps/wasmtime-v29.0.0-$ARCH-linux:$HOME/deps/qemu-linux-x86_64-10.0.2/bin:$HOME/local/bin:$PATH"
# Make the `zig version` number consistent.
# This will affect the cmake command below.

View File

@ -12,7 +12,7 @@ CACHE_BASENAME="zig+llvm+lld+clang-$TARGET-0.15.0-dev.233+7c85dc460"
PREFIX="$HOME/deps/$CACHE_BASENAME"
ZIG="$PREFIX/bin/zig"
export PATH="$HOME/deps/wasmtime-v29.0.0-$ARCH-linux:$HOME/deps/qemu-linux-x86_64-9.2.0-rc1/bin:$HOME/local/bin:$PATH"
export PATH="$HOME/deps/wasmtime-v29.0.0-$ARCH-linux:$HOME/deps/qemu-linux-x86_64-10.0.2/bin:$HOME/local/bin:$PATH"
# Make the `zig version` number consistent.
# This will affect the cmake command below.

View File

@ -374,7 +374,8 @@
<p>
Most of the time, it is more appropriate to write to stderr rather than stdout, and
whether or not the message is successfully written to the stream is irrelevant.
For this common case, there is a simpler API:
Also, formatted printing often comes in handy. For this common case,
there is a simpler API:
</p>
{#code|hello_again.zig#}
@ -3842,37 +3843,6 @@ void do_a_thing(struct Foo *foo) {
{#header_close#}
{#header_close#}
{#header_open|usingnamespace#}
<p>
{#syntax#}usingnamespace{#endsyntax#} is a declaration that mixes all the public
declarations of the operand, which must be a {#link|struct#}, {#link|union#}, {#link|enum#},
or {#link|opaque#}, into the namespace:
</p>
{#code|test_usingnamespace.zig#}
<p>
{#syntax#}usingnamespace{#endsyntax#} has an important use case when organizing the public
API of a file or package. For example, one might have <code class="file">c.zig</code> with all of the
{#link|C imports|Import from C Header File#}:
</p>
{#syntax_block|zig|c.zig#}
pub usingnamespace @cImport({
@cInclude("epoxy/gl.h");
@cInclude("GLFW/glfw3.h");
@cDefine("STBI_ONLY_PNG", "");
@cDefine("STBI_NO_STDIO", "");
@cInclude("stb_image.h");
});
{#end_syntax_block#}
<p>
The above example demonstrates using {#syntax#}pub{#endsyntax#} to qualify the
{#syntax#}usingnamespace{#endsyntax#} additionally makes the imported declarations
{#syntax#}pub{#endsyntax#}. This can be used to forward declarations, giving precise control
over what declarations a given file exposes.
</p>
{#header_close#}
{#header_open|comptime#}
<p>
Zig places importance on the concept of whether an expression is known at compile-time.
@ -4279,16 +4249,9 @@ pub fn print(self: *Writer, arg0: []const u8, arg1: i32) !void {
{#header_close#}
{#header_open|Async Functions#}
<p>Async functions regressed with the release of 0.11.0. Their future in
the Zig language is unclear due to multiple unsolved problems:</p>
<ul>
<li>LLVM's lack of ability to optimize them.</li>
<li>Third-party debuggers' lack of ability to debug them.</li>
<li><a href="https://github.com/ziglang/zig/issues/5913">The cancellation problem</a>.</li>
<li>Async function pointers preventing the stack size from being known.</li>
</ul>
<p>These problems are surmountable, but it will take time. The Zig team
is currently focused on other priorities.</p>
<p>Async functions regressed with the release of 0.11.0. The current plan is to
reintroduce them as a lower level primitive that powers I/O implementations.</p>
<p>Tracking issue: <a href="https://github.com/ziglang/zig/issues/23446">Proposal: stackless coroutines as low-level primitives</a></p>
{#header_close#}
{#header_open|Builtin Functions|2col#}
@ -6552,7 +6515,7 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
</p>
<ul>
<li>If a call to {#syntax#}@import{#endsyntax#} is analyzed, the file being imported is analyzed.</li>
<li>If a type (including a file) is analyzed, all {#syntax#}comptime{#endsyntax#}, {#syntax#}usingnamespace{#endsyntax#}, and {#syntax#}export{#endsyntax#} declarations within it are analyzed.</li>
<li>If a type (including a file) is analyzed, all {#syntax#}comptime{#endsyntax#} and {#syntax#}export{#endsyntax#} declarations within it are analyzed.</li>
<li>If a type (including a file) is analyzed, and the compilation is for a {#link|test|Zig Test#}, and the module the type is within is the root module of the compilation, then all {#syntax#}test{#endsyntax#} declarations within it are also analyzed.</li>
<li>If a reference to a named declaration (i.e. a usage of it) is analyzed, the declaration being referenced is analyzed. Declarations are order-independent, so this reference may be above or below the declaration being referenced, or even in another file entirely.</li>
</ul>
@ -7372,29 +7335,6 @@ fn readU32Be() u32 {}
</ul>
</td>
</tr>
<tr>
<th scope="row">
<pre>{#syntax#}async{#endsyntax#}</pre>
</th>
<td>
{#syntax#}async{#endsyntax#} can be used before a function call to get a pointer to the function's frame when it suspends.
<ul>
<li>See also {#link|Async Functions#}</li>
</ul>
</td>
</tr>
<tr>
<th scope="row">
<pre>{#syntax#}await{#endsyntax#}</pre>
</th>
<td>
{#syntax#}await{#endsyntax#} can be used to suspend the current function until the frame provided after the {#syntax#}await{#endsyntax#} completes.
{#syntax#}await{#endsyntax#} copies the value returned from the target function's frame to the caller.
<ul>
<li>See also {#link|Async Functions#}</li>
</ul>
</td>
</tr>
<tr>
<th scope="row">
<pre>{#syntax#}break{#endsyntax#}</pre>
@ -7812,18 +7752,6 @@ fn readU32Be() u32 {}
</ul>
</td>
</tr>
<tr>
<th scope="row">
<pre>{#syntax#}usingnamespace{#endsyntax#}</pre>
</th>
<td>
{#syntax#}usingnamespace{#endsyntax#} is a top-level declaration that imports all the public declarations of the operand,
which must be a struct, union, or enum, into the current scope.
<ul>
<li>See also {#link|usingnamespace#}</li>
</ul>
</td>
</tr>
<tr>
<th scope="row">
<pre>{#syntax#}var{#endsyntax#}</pre>
@ -7893,7 +7821,6 @@ ComptimeDecl <- KEYWORD_comptime Block
Decl
<- (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE? / KEYWORD_inline / KEYWORD_noinline)? FnProto (SEMICOLON / Block)
/ (KEYWORD_export / KEYWORD_extern STRINGLITERALSINGLE?)? KEYWORD_threadlocal? GlobalVarDecl
/ KEYWORD_usingnamespace Expr SEMICOLON
FnProto <- KEYWORD_fn IDENTIFIER? LPAREN ParamDeclList RPAREN ByteAlign? AddrSpace? LinkSection? CallConv? EXCLAMATIONMARK? TypeExpr
@ -8006,8 +7933,7 @@ TypeExpr <- PrefixTypeOp* ErrorUnionExpr
ErrorUnionExpr <- SuffixExpr (EXCLAMATIONMARK TypeExpr)?
SuffixExpr
<- KEYWORD_async PrimaryTypeExpr SuffixOp* FnCallArguments
/ PrimaryTypeExpr (SuffixOp / FnCallArguments)*
<- PrimaryTypeExpr (SuffixOp / FnCallArguments)*
PrimaryTypeExpr
<- BUILTINIDENTIFIER FnCallArguments
@ -8183,7 +8109,6 @@ PrefixOp
/ MINUSPERCENT
/ AMPERSAND
/ KEYWORD_try
/ KEYWORD_await
PrefixTypeOp
<- QUESTIONMARK
@ -8404,8 +8329,6 @@ KEYWORD_and <- 'and' end_of_word
KEYWORD_anyframe <- 'anyframe' end_of_word
KEYWORD_anytype <- 'anytype' end_of_word
KEYWORD_asm <- 'asm' end_of_word
KEYWORD_async <- 'async' end_of_word
KEYWORD_await <- 'await' end_of_word
KEYWORD_break <- 'break' end_of_word
KEYWORD_callconv <- 'callconv' end_of_word
KEYWORD_catch <- 'catch' end_of_word
@ -8442,14 +8365,13 @@ KEYWORD_threadlocal <- 'threadlocal' end_of_word
KEYWORD_try <- 'try' end_of_word
KEYWORD_union <- 'union' end_of_word
KEYWORD_unreachable <- 'unreachable' end_of_word
KEYWORD_usingnamespace <- 'usingnamespace' end_of_word
KEYWORD_var <- 'var' end_of_word
KEYWORD_volatile <- 'volatile' end_of_word
KEYWORD_while <- 'while' end_of_word
keyword <- KEYWORD_addrspace / KEYWORD_align / KEYWORD_allowzero / KEYWORD_and
/ KEYWORD_anyframe / KEYWORD_anytype / KEYWORD_asm / KEYWORD_async
/ KEYWORD_await / KEYWORD_break / KEYWORD_callconv / KEYWORD_catch
/ KEYWORD_anyframe / KEYWORD_anytype / KEYWORD_asm
/ KEYWORD_break / KEYWORD_callconv / KEYWORD_catch
/ KEYWORD_comptime / KEYWORD_const / KEYWORD_continue / KEYWORD_defer
/ KEYWORD_else / KEYWORD_enum / KEYWORD_errdefer / KEYWORD_error / KEYWORD_export
/ KEYWORD_extern / KEYWORD_fn / KEYWORD_for / KEYWORD_if
@ -8458,7 +8380,7 @@ keyword <- KEYWORD_addrspace / KEYWORD_align / KEYWORD_allowzero / KEYWORD_and
/ KEYWORD_pub / KEYWORD_resume / KEYWORD_return / KEYWORD_linksection
/ KEYWORD_struct / KEYWORD_suspend / KEYWORD_switch / KEYWORD_test
/ KEYWORD_threadlocal / KEYWORD_try / KEYWORD_union / KEYWORD_unreachable
/ KEYWORD_usingnamespace / KEYWORD_var / KEYWORD_volatile / KEYWORD_while
/ KEYWORD_var / KEYWORD_volatile / KEYWORD_while
{#end_syntax_block#}
{#header_close#}
{#header_open|Zen#}

View File

@ -17,7 +17,7 @@ pub fn main() !void {
.maximum = 0.20,
};
const category = threshold.categorize(0.90);
try std.io.getStdOut().writeAll(@tagName(category));
try std.fs.File.stdout().writeAll(@tagName(category));
}
const std = @import("std");

View File

@ -1,8 +1,7 @@
const std = @import("std");
pub fn main() !void {
const stdout = std.io.getStdOut().writer();
try stdout.print("Hello, {s}!\n", .{"world"});
try std.fs.File.stdout().writeAll("Hello, World!\n");
}
// exe=succeed

View File

@ -1,7 +1,7 @@
const std = @import("std");
pub fn main() void {
std.debug.print("Hello, world!\n", .{});
std.debug.print("Hello, {s}!\n", .{"World"});
}
// exe=succeed

View File

@ -1,8 +0,0 @@
test "using std namespace" {
const S = struct {
usingnamespace @import("std");
};
try S.testing.expect(true);
}
// test

View File

@ -1432,7 +1432,7 @@ fn getFileContents(comp: *Compilation, path: []const u8, limit: ?u32) ![]const u
defer buf.deinit();
const max = limit orelse std.math.maxInt(u32);
file.reader().readAllArrayList(&buf, max) catch |e| switch (e) {
file.deprecatedReader().readAllArrayList(&buf, max) catch |e| switch (e) {
error.StreamTooLong => if (limit == null) return e,
else => return e,
};

View File

@ -1,4 +1,5 @@
const std = @import("std");
const assert = std.debug.assert;
const Allocator = mem.Allocator;
const mem = std.mem;
const Source = @import("Source.zig");
@ -323,12 +324,13 @@ pub fn addExtra(
pub fn render(comp: *Compilation, config: std.io.tty.Config) void {
if (comp.diagnostics.list.items.len == 0) return;
var m = defaultMsgWriter(config);
var buffer: [1000]u8 = undefined;
var m = defaultMsgWriter(config, &buffer);
defer m.deinit();
renderMessages(comp, &m);
}
pub fn defaultMsgWriter(config: std.io.tty.Config) MsgWriter {
return MsgWriter.init(config);
pub fn defaultMsgWriter(config: std.io.tty.Config, buffer: []u8) MsgWriter {
return MsgWriter.init(config, buffer);
}
pub fn renderMessages(comp: *Compilation, m: anytype) void {
@ -449,12 +451,7 @@ pub fn renderMessage(comp: *Compilation, m: anytype, msg: Message) void {
},
.normalized => {
const f = struct {
pub fn f(
bytes: []const u8,
comptime _: []const u8,
_: std.fmt.FormatOptions,
writer: anytype,
) !void {
pub fn f(bytes: []const u8, writer: *std.io.Writer) std.io.Writer.Error!void {
var it: std.unicode.Utf8Iterator = .{
.bytes = bytes,
.i = 0,
@ -464,22 +461,16 @@ pub fn renderMessage(comp: *Compilation, m: anytype, msg: Message) void {
try writer.writeByte(@intCast(codepoint));
} else if (codepoint < 0xFFFF) {
try writer.writeAll("\\u");
try std.fmt.formatInt(codepoint, 16, .upper, .{
.fill = '0',
.width = 4,
}, writer);
try writer.printInt(codepoint, 16, .upper, .{ .fill = '0', .width = 4 });
} else {
try writer.writeAll("\\U");
try std.fmt.formatInt(codepoint, 16, .upper, .{
.fill = '0',
.width = 8,
}, writer);
try writer.printInt(codepoint, 16, .upper, .{ .fill = '0', .width = 8 });
}
}
}
}.f;
printRt(m, prop.msg, .{"{s}"}, .{
std.fmt.Formatter(f){ .data = msg.extra.normalized },
printRt(m, prop.msg, .{"{f}"}, .{
std.fmt.Formatter([]const u8, f){ .data = msg.extra.normalized },
});
},
.none, .offset => m.write(prop.msg),
@ -535,32 +526,31 @@ fn tagKind(d: *Diagnostics, tag: Tag, langopts: LangOpts) Kind {
}
const MsgWriter = struct {
w: *std.fs.File.Writer,
writer: *std.io.Writer,
config: std.io.tty.Config,
fn init(config: std.io.tty.Config, buffer: []u8) MsgWriter {
std.debug.lockStdErr();
return .{
.w = std.fs.stderr().writer(buffer),
.writer = std.debug.lockStderrWriter(buffer),
.config = config,
};
}
pub fn deinit(m: *MsgWriter) void {
m.w.flush() catch {};
std.debug.unlockStdErr();
std.debug.unlockStderrWriter();
m.* = undefined;
}
pub fn print(m: *MsgWriter, comptime fmt: []const u8, args: anytype) void {
m.w.writer().print(fmt, args) catch {};
m.writer.print(fmt, args) catch {};
}
fn write(m: *MsgWriter, msg: []const u8) void {
m.w.writer().writeAll(msg) catch {};
m.writer.writeAll(msg) catch {};
}
fn setColor(m: *MsgWriter, color: std.io.tty.Color) void {
m.config.setColor(m.w.writer(), color) catch {};
m.config.setColor(m.writer, color) catch {};
}
fn location(m: *MsgWriter, path: []const u8, line: u32, col: u32) void {

View File

@ -519,7 +519,7 @@ fn option(arg: []const u8, name: []const u8) ?[]const u8 {
fn addSource(d: *Driver, path: []const u8) !Source {
if (mem.eql(u8, "-", path)) {
const stdin = std.io.getStdIn().reader();
const stdin = std.fs.File.stdin().deprecatedReader();
const input = try stdin.readAllAlloc(d.comp.gpa, std.math.maxInt(u32));
defer d.comp.gpa.free(input);
return d.comp.addSourceFromBuffer("<stdin>", input);
@ -541,7 +541,7 @@ pub fn fatal(d: *Driver, comptime fmt: []const u8, args: anytype) error{ FatalEr
}
pub fn renderErrors(d: *Driver) void {
Diagnostics.render(d.comp, d.detectConfig(std.io.getStdErr()));
Diagnostics.render(d.comp, d.detectConfig(std.fs.File.stderr()));
}
pub fn detectConfig(d: *Driver, file: std.fs.File) std.io.tty.Config {
@ -591,7 +591,7 @@ pub fn main(d: *Driver, tc: *Toolchain, args: []const []const u8, comptime fast_
var macro_buf = std.ArrayList(u8).init(d.comp.gpa);
defer macro_buf.deinit();
const std_out = std.io.getStdOut().writer();
const std_out = std.fs.File.stdout().deprecatedWriter();
if (try parseArgs(d, std_out, macro_buf.writer(), args)) return;
const linking = !(d.only_preprocess or d.only_syntax or d.only_compile or d.only_preprocess_and_compile);
@ -686,10 +686,10 @@ fn processSource(
std.fs.cwd().createFile(some, .{}) catch |er|
return d.fatal("unable to create output file '{s}': {s}", .{ some, errorDescription(er) })
else
std.io.getStdOut();
std.fs.File.stdout();
defer if (d.output_name != null) file.close();
var buf_w = std.io.bufferedWriter(file.writer());
var buf_w = std.io.bufferedWriter(file.deprecatedWriter());
pp.prettyPrintTokens(buf_w.writer(), dump_mode) catch |er|
return d.fatal("unable to write result: {s}", .{errorDescription(er)});
@ -704,8 +704,8 @@ fn processSource(
defer tree.deinit();
if (d.verbose_ast) {
const stdout = std.io.getStdOut();
var buf_writer = std.io.bufferedWriter(stdout.writer());
const stdout = std.fs.File.stdout();
var buf_writer = std.io.bufferedWriter(stdout.deprecatedWriter());
tree.dump(d.detectConfig(stdout), buf_writer.writer()) catch {};
buf_writer.flush() catch {};
}
@ -734,8 +734,8 @@ fn processSource(
defer ir.deinit(d.comp.gpa);
if (d.verbose_ir) {
const stdout = std.io.getStdOut();
var buf_writer = std.io.bufferedWriter(stdout.writer());
const stdout = std.fs.File.stdout();
var buf_writer = std.io.bufferedWriter(stdout.deprecatedWriter());
ir.dump(d.comp.gpa, d.detectConfig(stdout), buf_writer.writer()) catch {};
buf_writer.flush() catch {};
}
@ -806,10 +806,10 @@ fn processSource(
}
fn dumpLinkerArgs(items: []const []const u8) !void {
const stdout = std.io.getStdOut().writer();
const stdout = std.fs.File.stdout().deprecatedWriter();
for (items, 0..) |item, i| {
if (i > 0) try stdout.writeByte(' ');
try stdout.print("\"{}\"", .{std.zig.fmtEscapes(item)});
try stdout.print("\"{f}\"", .{std.zig.fmtString(item)});
}
try stdout.writeByte('\n');
}

View File

@ -500,8 +500,8 @@ fn checkDeprecatedUnavailable(p: *Parser, ty: Type, usage_tok: TokenIndex, decl_
const w = p.strings.writer();
const msg_str = p.comp.interner.get(@"error".msg.ref()).bytes;
try w.print("call to '{s}' declared with attribute error: {}", .{
p.tokSlice(@"error".__name_tok), std.zig.fmtEscapes(msg_str),
try w.print("call to '{s}' declared with attribute error: {f}", .{
p.tokSlice(@"error".__name_tok), std.zig.fmtString(msg_str),
});
const str = try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
try p.errStr(.error_attribute, usage_tok, str);
@ -512,8 +512,8 @@ fn checkDeprecatedUnavailable(p: *Parser, ty: Type, usage_tok: TokenIndex, decl_
const w = p.strings.writer();
const msg_str = p.comp.interner.get(warning.msg.ref()).bytes;
try w.print("call to '{s}' declared with attribute warning: {}", .{
p.tokSlice(warning.__name_tok), std.zig.fmtEscapes(msg_str),
try w.print("call to '{s}' declared with attribute warning: {f}", .{
p.tokSlice(warning.__name_tok), std.zig.fmtString(msg_str),
});
const str = try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
try p.errStr(.warning_attribute, usage_tok, str);
@ -542,7 +542,7 @@ fn errDeprecated(p: *Parser, tag: Diagnostics.Tag, tok_i: TokenIndex, msg: ?Valu
try w.writeAll(reason);
if (msg) |m| {
const str = p.comp.interner.get(m.ref()).bytes;
try w.print(": {}", .{std.zig.fmtEscapes(str)});
try w.print(": {f}", .{std.zig.fmtString(str)});
}
const str = try p.comp.diagnostics.arena.allocator().dupe(u8, p.strings.items[strings_top..]);
return p.errStr(tag, tok_i, str);

View File

@ -811,7 +811,7 @@ fn verboseLog(pp: *Preprocessor, raw: RawToken, comptime fmt: []const u8, args:
const source = pp.comp.getSource(raw.source);
const line_col = source.lineCol(.{ .id = raw.source, .line = raw.line, .byte_offset = raw.start });
const stderr = std.io.getStdErr().writer();
const stderr = std.fs.File.stderr().deprecatedWriter();
var buf_writer = std.io.bufferedWriter(stderr);
const writer = buf_writer.writer();
defer buf_writer.flush() catch {};
@ -3262,7 +3262,8 @@ fn printLinemarker(
// containing the same bytes as the input regardless of encoding.
else => {
try w.writeAll("\\x");
try std.fmt.formatInt(byte, 16, .lower, .{ .width = 2, .fill = '0' }, w);
// TODO try w.printInt(byte, 16, .lower, .{ .width = 2, .fill = '0' });
try w.print("{x:0>2}", .{byte});
},
};
try w.writeByte('"');

View File

@ -961,7 +961,7 @@ pub fn print(v: Value, ty: Type, comp: *const Compilation, w: anytype) @TypeOf(w
switch (key) {
.null => return w.writeAll("nullptr_t"),
.int => |repr| switch (repr) {
inline else => |x| return w.print("{d}", .{x}),
inline .u64, .i64, .big_int => |x| return w.print("{d}", .{x}),
},
.float => |repr| switch (repr) {
.f16 => |x| return w.print("{d}", .{@round(@as(f64, @floatCast(x)) * 1000) / 1000}),
@ -982,7 +982,7 @@ pub fn printString(bytes: []const u8, ty: Type, comp: *const Compilation, w: any
const without_null = bytes[0 .. bytes.len - @intFromEnum(size)];
try w.writeByte('"');
switch (size) {
.@"1" => try w.print("{}", .{std.zig.fmtEscapes(without_null)}),
.@"1" => try w.print("{f}", .{std.zig.fmtString(without_null)}),
.@"2" => {
var items: [2]u16 = undefined;
var i: usize = 0;

View File

@ -171,7 +171,7 @@ pub fn addRelocation(elf: *Elf, name: []const u8, section_kind: Object.Section,
/// strtab
/// section headers
pub fn finish(elf: *Elf, file: std.fs.File) !void {
var buf_writer = std.io.bufferedWriter(file.writer());
var buf_writer = std.io.bufferedWriter(file.deprecatedWriter());
const w = buf_writer.writer();
var num_sections: std.elf.Elf64_Half = additional_sections;

View File

@ -1781,7 +1781,8 @@ test "Macro matching" {
fn renderErrorsAndExit(comp: *aro.Compilation) noreturn {
defer std.process.exit(1);
var writer = aro.Diagnostics.defaultMsgWriter(std.io.tty.detectConfig(std.io.getStdErr()));
var buffer: [1000]u8 = undefined;
var writer = aro.Diagnostics.defaultMsgWriter(std.io.tty.detectConfig(std.fs.File.stderr()), &buffer);
defer writer.deinit(); // writer deinit must run *before* exit so that stderr is flushed
var saw_error = false;
@ -1824,6 +1825,6 @@ pub fn main() !void {
defer tree.deinit(gpa);
const formatted = try tree.render(arena);
try std.io.getStdOut().writeAll(formatted);
try std.fs.File.stdout().writeAll(formatted);
return std.process.cleanExit();
}

View File

@ -849,7 +849,7 @@ const Context = struct {
fn addIdentifier(c: *Context, bytes: []const u8) Allocator.Error!TokenIndex {
if (std.zig.primitives.isPrimitive(bytes))
return c.addTokenFmt(.identifier, "@\"{s}\"", .{bytes});
return c.addTokenFmt(.identifier, "{p}", .{std.zig.fmtId(bytes)});
return c.addTokenFmt(.identifier, "{f}", .{std.zig.fmtIdFlags(bytes, .{ .allow_primitive = true })});
}
fn listToSpan(c: *Context, list: []const NodeIndex) Allocator.Error!NodeSubRange {
@ -1201,7 +1201,7 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
const compile_error_tok = try c.addToken(.builtin, "@compileError");
_ = try c.addToken(.l_paren, "(");
const err_msg_tok = try c.addTokenFmt(.string_literal, "\"{}\"", .{std.zig.fmtEscapes(payload.mangled)});
const err_msg_tok = try c.addTokenFmt(.string_literal, "\"{f}\"", .{std.zig.fmtString(payload.mangled)});
const err_msg = try c.addNode(.{
.tag = .string_literal,
.main_token = err_msg_tok,
@ -2116,7 +2116,7 @@ fn renderRecord(c: *Context, node: Node) !NodeIndex {
defer c.gpa.free(members);
for (payload.fields, 0..) |field, i| {
const name_tok = try c.addTokenFmt(.identifier, "{p}", .{std.zig.fmtId(field.name)});
const name_tok = try c.addTokenFmt(.identifier, "{f}", .{std.zig.fmtIdFlags(field.name, .{ .allow_primitive = true })});
_ = try c.addToken(.colon, ":");
const type_expr = try renderNode(c, field.type);
@ -2205,7 +2205,7 @@ fn renderFieldAccess(c: *Context, lhs: NodeIndex, field_name: []const u8) !NodeI
.main_token = try c.addToken(.period, "."),
.data = .{ .node_and_token = .{
lhs,
try c.addTokenFmt(.identifier, "{p}", .{std.zig.fmtId(field_name)}),
try c.addTokenFmt(.identifier, "{f}", .{std.zig.fmtIdFlags(field_name, .{ .allow_primitive = true })}),
} },
});
}
@ -2681,7 +2681,7 @@ fn renderVar(c: *Context, node: Node) !NodeIndex {
_ = try c.addToken(.l_paren, "(");
const res = try c.addNode(.{
.tag = .string_literal,
.main_token = try c.addTokenFmt(.string_literal, "\"{}\"", .{std.zig.fmtEscapes(some)}),
.main_token = try c.addTokenFmt(.string_literal, "\"{f}\"", .{std.zig.fmtString(some)}),
.data = undefined,
});
_ = try c.addToken(.r_paren, ")");
@ -2765,7 +2765,7 @@ fn renderFunc(c: *Context, node: Node) !NodeIndex {
_ = try c.addToken(.l_paren, "(");
const res = try c.addNode(.{
.tag = .string_literal,
.main_token = try c.addTokenFmt(.string_literal, "\"{}\"", .{std.zig.fmtEscapes(some)}),
.main_token = try c.addTokenFmt(.string_literal, "\"{f}\"", .{std.zig.fmtString(some)}),
.data = undefined,
});
_ = try c.addToken(.r_paren, ")");

View File

@ -255,7 +255,7 @@ pub fn main() !void {
builder.verbose_llvm_ir = "-";
} else if (mem.startsWith(u8, arg, "--verbose-llvm-ir=")) {
builder.verbose_llvm_ir = arg["--verbose-llvm-ir=".len..];
} else if (mem.eql(u8, arg, "--verbose-llvm-bc=")) {
} else if (mem.startsWith(u8, arg, "--verbose-llvm-bc=")) {
builder.verbose_llvm_bc = arg["--verbose-llvm-bc=".len..];
} else if (mem.eql(u8, arg, "--verbose-cimport")) {
builder.verbose_cimport = true;
@ -719,6 +719,8 @@ fn runStepNames(
if (test_fail_count > 0) w.print("; {d} failed", .{test_fail_count}) catch {};
if (test_leak_count > 0) w.print("; {d} leaked", .{test_leak_count}) catch {};
w.writeAll("\n") catch {};
// Print a fancy tree with build results.
var step_stack_copy = try step_stack.clone(gpa);
defer step_stack_copy.deinit(gpa);

View File

@ -40,7 +40,7 @@ pub fn main() !void {
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
const stdout = std.io.getStdOut().writer();
const stdout = std.fs.File.stdout().deprecatedWriter();
try stdout.writeAll(usage_libc);
return std.process.cleanExit();
} else if (mem.eql(u8, arg, "-target")) {
@ -97,7 +97,7 @@ pub fn main() !void {
fatal("no include dirs detected for target {s}", .{zig_target});
}
var bw = std.io.bufferedWriter(std.io.getStdOut().writer());
var bw = std.io.bufferedWriter(std.fs.File.stdout().deprecatedWriter());
var writer = bw.writer();
for (libc_dirs.libc_include_dir_list) |include_dir| {
try writer.writeAll(include_dir);
@ -125,7 +125,7 @@ pub fn main() !void {
};
defer libc.deinit(gpa);
var bw = std.io.bufferedWriter(std.io.getStdOut().writer());
var bw = std.io.bufferedWriter(std.fs.File.stdout().deprecatedWriter());
try libc.render(bw.writer());
try bw.flush();
}

View File

@ -54,7 +54,7 @@ fn cmdObjCopy(
fatal("unexpected positional argument: '{s}'", .{arg});
}
} else if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
return std.io.getStdOut().writeAll(usage);
return std.fs.File.stdout().writeAll(usage);
} else if (mem.eql(u8, arg, "-O") or mem.eql(u8, arg, "--output-target")) {
i += 1;
if (i >= args.len) fatal("expected another argument after '{s}'", .{arg});
@ -227,8 +227,8 @@ fn cmdObjCopy(
if (listen) {
var server = try Server.init(.{
.gpa = gpa,
.in = std.io.getStdIn(),
.out = std.io.getStdOut(),
.in = .stdin(),
.out = .stdout(),
.zig_version = builtin.zig_version_string,
});
defer server.deinit();
@ -635,11 +635,11 @@ const HexWriter = struct {
const payload_bytes = self.getPayloadBytes();
assert(payload_bytes.len <= MAX_PAYLOAD_LEN);
const line = try std.fmt.bufPrint(&outbuf, ":{0X:0>2}{1X:0>4}{2X:0>2}{3s}{4X:0>2}" ++ linesep, .{
const line = try std.fmt.bufPrint(&outbuf, ":{0X:0>2}{1X:0>4}{2X:0>2}{3X}{4X:0>2}" ++ linesep, .{
@as(u8, @intCast(payload_bytes.len)),
self.address,
@intFromEnum(self.payload),
std.fmt.fmtSliceHexUpper(payload_bytes),
payload_bytes,
self.checksum(),
});
try file.writeAll(line);
@ -1495,7 +1495,7 @@ const ElfFileHelper = struct {
if (size < prefix.len) return null;
try in_file.seekTo(offset);
var section_reader = std.io.limitedReader(in_file.reader(), size);
var section_reader = std.io.limitedReader(in_file.deprecatedReader(), size);
// allocate as large as decompressed data. if the compression doesn't fit, keep the data uncompressed.
const compressed_data = try allocator.alignedAlloc(u8, .@"8", @intCast(size));

View File

@ -68,7 +68,7 @@ pub fn main() !void {
const arg = args[i];
if (mem.startsWith(u8, arg, "-")) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
const stdout = std.io.getStdOut().writer();
const stdout = std.fs.File.stdout().deprecatedWriter();
try stdout.writeAll(usage);
return std.process.cleanExit();
} else if (mem.eql(u8, arg, "--")) {

View File

@ -160,12 +160,6 @@ fn walkMember(w: *Walk, decl: Ast.Node.Index) Error!void {
try walkExpression(w, decl);
},
.@"usingnamespace" => {
try w.transformations.append(.{ .delete_node = decl });
const expr = ast.nodeData(decl).node;
try walkExpression(w, expr);
},
.global_var_decl,
.local_var_decl,
.simple_var_decl,
@ -335,7 +329,6 @@ fn walkExpression(w: *Walk, node: Ast.Node.Index) Error!void {
.address_of,
.@"try",
.@"resume",
.@"await",
.deref,
=> {
return walkExpression(w, ast.nodeData(node).node);
@ -379,12 +372,8 @@ fn walkExpression(w: *Walk, node: Ast.Node.Index) Error!void {
.call_one,
.call_one_comma,
.async_call_one,
.async_call_one_comma,
.call,
.call_comma,
.async_call,
.async_call_comma,
=> {
var buf: [1]Ast.Node.Index = undefined;
return walkCall(w, ast.fullCall(&buf, node).?);
@ -525,7 +514,6 @@ fn walkExpression(w: *Walk, node: Ast.Node.Index) Error!void {
.local_var_decl => unreachable,
.simple_var_decl => unreachable,
.aligned_var_decl => unreachable,
.@"usingnamespace" => unreachable,
.test_decl => unreachable,
.asm_output => unreachable,
.asm_input => unreachable,

View File

@ -125,13 +125,12 @@ pub const Diagnostics = struct {
}
pub fn renderToStdErr(self: *Diagnostics, args: []const []const u8, config: std.io.tty.Config) void {
std.debug.lockStdErr();
defer std.debug.unlockStdErr();
const stderr = std.io.getStdErr().writer();
const stderr = std.debug.lockStderrWriter(&.{});
defer std.debug.unlockStderrWriter();
self.renderToWriter(args, stderr, config) catch return;
}
pub fn renderToWriter(self: *Diagnostics, args: []const []const u8, writer: anytype, config: std.io.tty.Config) !void {
pub fn renderToWriter(self: *Diagnostics, args: []const []const u8, writer: *std.io.Writer, config: std.io.tty.Config) !void {
for (self.errors.items) |err_details| {
try renderErrorMessage(writer, config, err_details, args);
}
@ -1403,7 +1402,7 @@ test parsePercent {
try std.testing.expectError(error.InvalidFormat, parsePercent("~1"));
}
pub fn renderErrorMessage(writer: anytype, config: std.io.tty.Config, err_details: Diagnostics.ErrorDetails, args: []const []const u8) !void {
pub fn renderErrorMessage(writer: *std.io.Writer, config: std.io.tty.Config, err_details: Diagnostics.ErrorDetails, args: []const []const u8) !void {
try config.setColor(writer, .dim);
try writer.writeAll("<cli>");
try config.setColor(writer, .reset);
@ -1481,27 +1480,27 @@ pub fn renderErrorMessage(writer: anytype, config: std.io.tty.Config, err_detail
try writer.writeByte('\n');
try config.setColor(writer, .green);
try writer.writeByteNTimes(' ', prefix.len);
try writer.splatByteAll(' ', prefix.len);
// Special case for when the option is *only* a prefix (e.g. invalid option: -)
if (err_details.arg_span.prefix_len == arg_with_name.len) {
try writer.writeByteNTimes('^', err_details.arg_span.prefix_len);
try writer.splatByteAll('^', err_details.arg_span.prefix_len);
} else {
try writer.writeByteNTimes('~', err_details.arg_span.prefix_len);
try writer.writeByteNTimes(' ', err_details.arg_span.name_offset - err_details.arg_span.prefix_len);
try writer.splatByteAll('~', err_details.arg_span.prefix_len);
try writer.splatByteAll(' ', err_details.arg_span.name_offset - err_details.arg_span.prefix_len);
if (!err_details.arg_span.point_at_next_arg and err_details.arg_span.value_offset == 0) {
try writer.writeByte('^');
try writer.writeByteNTimes('~', name_slice.len - 1);
try writer.splatByteAll('~', name_slice.len - 1);
} else if (err_details.arg_span.value_offset > 0) {
try writer.writeByteNTimes('~', err_details.arg_span.value_offset - err_details.arg_span.name_offset);
try writer.splatByteAll('~', err_details.arg_span.value_offset - err_details.arg_span.name_offset);
try writer.writeByte('^');
if (err_details.arg_span.value_offset < arg_with_name.len) {
try writer.writeByteNTimes('~', arg_with_name.len - err_details.arg_span.value_offset - 1);
try writer.splatByteAll('~', arg_with_name.len - err_details.arg_span.value_offset - 1);
}
} else if (err_details.arg_span.point_at_next_arg) {
try writer.writeByteNTimes('~', arg_with_name.len - err_details.arg_span.name_offset + 1);
try writer.splatByteAll('~', arg_with_name.len - err_details.arg_span.name_offset + 1);
try writer.writeByte('^');
if (next_arg_len > 0) {
try writer.writeByteNTimes('~', next_arg_len - 1);
try writer.splatByteAll('~', next_arg_len - 1);
}
}
}

View File

@ -570,7 +570,7 @@ pub const Compiler = struct {
switch (predefined_type) {
.GROUP_ICON, .GROUP_CURSOR => {
// Check for animated icon first
if (ani.isAnimatedIcon(file.reader())) {
if (ani.isAnimatedIcon(file.deprecatedReader())) {
// Animated icons are just put into the resource unmodified,
// and the resource type changes to ANIICON/ANICURSOR
@ -586,14 +586,14 @@ pub const Compiler = struct {
try header.write(writer, self.errContext(node.id));
try file.seekTo(0);
try writeResourceData(writer, file.reader(), header.data_size);
try writeResourceData(writer, file.deprecatedReader(), header.data_size);
return;
}
// isAnimatedIcon moved the file cursor so reset to the start
try file.seekTo(0);
const icon_dir = ico.read(self.allocator, file.reader(), try file.getEndPos()) catch |err| switch (err) {
const icon_dir = ico.read(self.allocator, file.deprecatedReader(), try file.getEndPos()) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
else => |e| {
return self.iconReadError(
@ -672,7 +672,7 @@ pub const Compiler = struct {
}
try file.seekTo(entry.data_offset_from_start_of_file);
var header_bytes = file.reader().readBytesNoEof(16) catch {
var header_bytes = file.deprecatedReader().readBytesNoEof(16) catch {
return self.iconReadError(
error.UnexpectedEOF,
filename_utf8,
@ -803,7 +803,7 @@ pub const Compiler = struct {
}
try file.seekTo(entry.data_offset_from_start_of_file);
try writeResourceDataNoPadding(writer, file.reader(), entry.data_size_in_bytes);
try writeResourceDataNoPadding(writer, file.deprecatedReader(), entry.data_size_in_bytes);
try writeDataPadding(writer, full_data_size);
if (self.state.icon_id == std.math.maxInt(u16)) {
@ -859,7 +859,7 @@ pub const Compiler = struct {
header.applyMemoryFlags(node.common_resource_attributes, self.source);
const file_size = try file.getEndPos();
const bitmap_info = bmp.read(file.reader(), file_size) catch |err| {
const bitmap_info = bmp.read(file.deprecatedReader(), file_size) catch |err| {
const filename_string_index = try self.diagnostics.putString(filename_utf8);
return self.addErrorDetailsAndFail(.{
.err = .bmp_read_error,
@ -922,7 +922,7 @@ pub const Compiler = struct {
header.data_size = bmp_bytes_to_write;
try header.write(writer, self.errContext(node.id));
try file.seekTo(bmp.file_header_len);
const file_reader = file.reader();
const file_reader = file.deprecatedReader();
try writeResourceDataNoPadding(writer, file_reader, bitmap_info.dib_header_size);
if (bitmap_info.getBitmasksByteLen() > 0) {
try writeResourceDataNoPadding(writer, file_reader, bitmap_info.getBitmasksByteLen());
@ -968,7 +968,7 @@ pub const Compiler = struct {
header.data_size = @intCast(file_size);
try header.write(writer, self.errContext(node.id));
var header_slurping_reader = headerSlurpingReader(148, file.reader());
var header_slurping_reader = headerSlurpingReader(148, file.deprecatedReader());
try writeResourceData(writer, header_slurping_reader.reader(), header.data_size);
try self.state.font_dir.add(self.arena, FontDir.Font{
@ -1002,7 +1002,7 @@ pub const Compiler = struct {
// We now know that the data size will fit in a u32
header.data_size = @intCast(data_size);
try header.write(writer, self.errContext(node.id));
try writeResourceData(writer, file.reader(), header.data_size);
try writeResourceData(writer, file.deprecatedReader(), header.data_size);
}
fn iconReadError(
@ -2947,7 +2947,7 @@ pub fn HeaderSlurpingReader(comptime size: usize, comptime ReaderType: anytype)
slurped_header: [size]u8 = [_]u8{0x00} ** size,
pub const Error = ReaderType.Error;
pub const Reader = std.io.Reader(*@This(), Error, read);
pub const Reader = std.io.GenericReader(*@This(), Error, read);
pub fn read(self: *@This(), buf: []u8) Error!usize {
const amt = try self.child_reader.read(buf);
@ -2981,7 +2981,7 @@ pub fn LimitedWriter(comptime WriterType: type) type {
bytes_left: u64,
pub const Error = error{NoSpaceLeft} || WriterType.Error;
pub const Writer = std.io.Writer(*Self, Error, write);
pub const Writer = std.io.GenericWriter(*Self, Error, write);
const Self = @This();

View File

@ -1,4 +1,5 @@
const std = @import("std");
const assert = std.debug.assert;
const Token = @import("lex.zig").Token;
const SourceMappings = @import("source_mapping.zig").SourceMappings;
const utils = @import("utils.zig");
@ -61,16 +62,15 @@ pub const Diagnostics = struct {
}
pub fn renderToStdErr(self: *Diagnostics, cwd: std.fs.Dir, source: []const u8, tty_config: std.io.tty.Config, source_mappings: ?SourceMappings) void {
std.debug.lockStdErr();
defer std.debug.unlockStdErr();
const stderr = std.io.getStdErr().writer();
const stderr = std.debug.lockStderrWriter(&.{});
defer std.debug.unlockStderrWriter();
for (self.errors.items) |err_details| {
renderErrorMessage(stderr, tty_config, cwd, err_details, source, self.strings.items, source_mappings) catch return;
}
}
pub fn renderToStdErrDetectTTY(self: *Diagnostics, cwd: std.fs.Dir, source: []const u8, source_mappings: ?SourceMappings) void {
const tty_config = std.io.tty.detectConfig(std.io.getStdErr());
const tty_config = std.io.tty.detectConfig(std.fs.File.stderr());
return self.renderToStdErr(cwd, source, tty_config, source_mappings);
}
@ -409,15 +409,7 @@ pub const ErrorDetails = struct {
failed_to_open_cwd,
};
fn formatToken(
ctx: TokenFormatContext,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
writer: anytype,
) !void {
_ = fmt;
_ = options;
fn formatToken(ctx: TokenFormatContext, writer: *std.io.Writer) std.io.Writer.Error!void {
switch (ctx.token.id) {
.eof => return writer.writeAll(ctx.token.id.nameForErrorDisplay()),
else => {},
@ -441,7 +433,7 @@ pub const ErrorDetails = struct {
code_page: SupportedCodePage,
};
fn fmtToken(self: ErrorDetails, source: []const u8) std.fmt.Formatter(formatToken) {
fn fmtToken(self: ErrorDetails, source: []const u8) std.fmt.Formatter(TokenFormatContext, formatToken) {
return .{ .data = .{
.token = self.token,
.code_page = self.code_page,
@ -452,7 +444,7 @@ pub const ErrorDetails = struct {
pub fn render(self: ErrorDetails, writer: anytype, source: []const u8, strings: []const []const u8) !void {
switch (self.err) {
.unfinished_string_literal => {
return writer.print("unfinished string literal at '{s}', expected closing '\"'", .{self.fmtToken(source)});
return writer.print("unfinished string literal at '{f}', expected closing '\"'", .{self.fmtToken(source)});
},
.string_literal_too_long => {
return writer.print("string literal too long (max is currently {} characters)", .{self.extra.number});
@ -466,10 +458,14 @@ pub const ErrorDetails = struct {
.hint => return,
},
.illegal_byte => {
return writer.print("character '{s}' is not allowed", .{std.fmt.fmtSliceEscapeUpper(self.token.slice(source))});
return writer.print("character '{f}' is not allowed", .{
std.ascii.hexEscape(self.token.slice(source), .upper),
});
},
.illegal_byte_outside_string_literals => {
return writer.print("character '{s}' is not allowed outside of string literals", .{std.fmt.fmtSliceEscapeUpper(self.token.slice(source))});
return writer.print("character '{f}' is not allowed outside of string literals", .{
std.ascii.hexEscape(self.token.slice(source), .upper),
});
},
.illegal_codepoint_outside_string_literals => {
// This is somewhat hacky, but we know that:
@ -527,26 +523,26 @@ pub const ErrorDetails = struct {
return writer.print("unsupported code page '{s} (id={})' in #pragma code_page", .{ @tagName(code_page), number });
},
.unfinished_raw_data_block => {
return writer.print("unfinished raw data block at '{s}', expected closing '}}' or 'END'", .{self.fmtToken(source)});
return writer.print("unfinished raw data block at '{f}', expected closing '}}' or 'END'", .{self.fmtToken(source)});
},
.unfinished_string_table_block => {
return writer.print("unfinished STRINGTABLE block at '{s}', expected closing '}}' or 'END'", .{self.fmtToken(source)});
return writer.print("unfinished STRINGTABLE block at '{f}', expected closing '}}' or 'END'", .{self.fmtToken(source)});
},
.expected_token => {
return writer.print("expected '{s}', got '{s}'", .{ self.extra.expected.nameForErrorDisplay(), self.fmtToken(source) });
return writer.print("expected '{s}', got '{f}'", .{ self.extra.expected.nameForErrorDisplay(), self.fmtToken(source) });
},
.expected_something_else => {
try writer.writeAll("expected ");
try self.extra.expected_types.writeCommaSeparated(writer);
return writer.print("; got '{s}'", .{self.fmtToken(source)});
return writer.print("; got '{f}'", .{self.fmtToken(source)});
},
.resource_type_cant_use_raw_data => switch (self.type) {
.err, .warning => try writer.print("expected '<filename>', found '{s}' (resource type '{s}' can't use raw data)", .{ self.fmtToken(source), self.extra.resource.nameForErrorDisplay() }),
.note => try writer.print("if '{s}' is intended to be a filename, it must be specified as a quoted string literal", .{self.fmtToken(source)}),
.err, .warning => try writer.print("expected '<filename>', found '{f}' (resource type '{s}' can't use raw data)", .{ self.fmtToken(source), self.extra.resource.nameForErrorDisplay() }),
.note => try writer.print("if '{f}' is intended to be a filename, it must be specified as a quoted string literal", .{self.fmtToken(source)}),
.hint => return,
},
.id_must_be_ordinal => {
try writer.print("id of resource type '{s}' must be an ordinal (u16), got '{s}'", .{ self.extra.resource.nameForErrorDisplay(), self.fmtToken(source) });
try writer.print("id of resource type '{s}' must be an ordinal (u16), got '{f}'", .{ self.extra.resource.nameForErrorDisplay(), self.fmtToken(source) });
},
.name_or_id_not_allowed => {
try writer.print("name or id is not allowed for resource type '{s}'", .{self.extra.resource.nameForErrorDisplay()});
@ -562,7 +558,7 @@ pub const ErrorDetails = struct {
try writer.writeAll("ASCII character not equivalent to virtual key code");
},
.empty_menu_not_allowed => {
try writer.print("empty menu of type '{s}' not allowed", .{self.fmtToken(source)});
try writer.print("empty menu of type '{f}' not allowed", .{self.fmtToken(source)});
},
.rc_would_miscompile_version_value_padding => switch (self.type) {
.err, .warning => return writer.print("the padding before this quoted string value would be miscompiled by the Win32 RC compiler", .{}),
@ -627,7 +623,7 @@ pub const ErrorDetails = struct {
.string_already_defined => switch (self.type) {
.err, .warning => {
const language = self.extra.string_and_language.language;
return writer.print("string with id {d} (0x{X}) already defined for language {}", .{ self.extra.string_and_language.id, self.extra.string_and_language.id, language });
return writer.print("string with id {d} (0x{X}) already defined for language {f}", .{ self.extra.string_and_language.id, self.extra.string_and_language.id, language });
},
.note => return writer.print("previous definition of string with id {d} (0x{X}) here", .{ self.extra.string_and_language.id, self.extra.string_and_language.id }),
.hint => return,
@ -642,7 +638,7 @@ pub const ErrorDetails = struct {
try writer.print("unable to open file '{s}': {s}", .{ strings[self.extra.file_open_error.filename_string_index], @tagName(self.extra.file_open_error.err) });
},
.invalid_accelerator_key => {
try writer.print("invalid accelerator key '{s}': {s}", .{ self.fmtToken(source), @tagName(self.extra.accelerator_error.err) });
try writer.print("invalid accelerator key '{f}': {s}", .{ self.fmtToken(source), @tagName(self.extra.accelerator_error.err) });
},
.accelerator_type_required => {
try writer.writeAll("accelerator type [ASCII or VIRTKEY] required when key is an integer");
@ -898,7 +894,7 @@ fn cellCount(code_page: SupportedCodePage, source: []const u8, start_index: usiz
const truncated_str = "<...truncated...>";
pub fn renderErrorMessage(writer: anytype, tty_config: std.io.tty.Config, cwd: std.fs.Dir, err_details: ErrorDetails, source: []const u8, strings: []const []const u8, source_mappings: ?SourceMappings) !void {
pub fn renderErrorMessage(writer: *std.io.Writer, tty_config: std.io.tty.Config, cwd: std.fs.Dir, err_details: ErrorDetails, source: []const u8, strings: []const []const u8, source_mappings: ?SourceMappings) !void {
if (err_details.type == .hint) return;
const source_line_start = err_details.token.getLineStartForErrorDisplay(source);
@ -981,10 +977,10 @@ pub fn renderErrorMessage(writer: anytype, tty_config: std.io.tty.Config, cwd: s
try tty_config.setColor(writer, .green);
const num_spaces = truncated_visual_info.point_offset - truncated_visual_info.before_len;
try writer.writeByteNTimes(' ', num_spaces);
try writer.writeByteNTimes('~', truncated_visual_info.before_len);
try writer.splatByteAll(' ', num_spaces);
try writer.splatByteAll('~', truncated_visual_info.before_len);
try writer.writeByte('^');
try writer.writeByteNTimes('~', truncated_visual_info.after_len);
try writer.splatByteAll('~', truncated_visual_info.after_len);
try writer.writeByte('\n');
try tty_config.setColor(writer, .reset);

View File

@ -237,7 +237,9 @@ pub const Lexer = struct {
}
pub fn dump(self: *Self, token: *const Token) void {
std.debug.print("{s}:{d}: {s}\n", .{ @tagName(token.id), token.line_number, std.fmt.fmtSliceEscapeLower(token.slice(self.buffer)) });
std.debug.print("{s}:{d}: {f}\n", .{
@tagName(token.id), token.line_number, std.ascii.hexEscape(token.slice(self.buffer), .lower),
});
}
pub const LexMethod = enum {

View File

@ -22,14 +22,14 @@ pub fn main() !void {
defer arena_state.deinit();
const arena = arena_state.allocator();
const stderr = std.io.getStdErr();
const stderr = std.fs.File.stderr();
const stderr_config = std.io.tty.detectConfig(stderr);
const args = try std.process.argsAlloc(allocator);
defer std.process.argsFree(allocator, args);
if (args.len < 2) {
try renderErrorMessage(stderr.writer(), stderr_config, .err, "expected zig lib dir as first argument", .{});
try renderErrorMessage(std.debug.lockStderrWriter(&.{}), stderr_config, .err, "expected zig lib dir as first argument", .{});
std.process.exit(1);
}
const zig_lib_dir = args[1];
@ -44,7 +44,7 @@ pub fn main() !void {
var error_handler: ErrorHandler = switch (zig_integration) {
true => .{
.server = .{
.out = std.io.getStdOut(),
.out = std.fs.File.stdout(),
.in = undefined, // won't be receiving messages
.receive_fifo = undefined, // won't be receiving messages
},
@ -81,15 +81,15 @@ pub fn main() !void {
defer options.deinit();
if (options.print_help_and_exit) {
const stdout = std.io.getStdOut();
try cli.writeUsage(stdout.writer(), "zig rc");
const stdout = std.fs.File.stdout();
try cli.writeUsage(stdout.deprecatedWriter(), "zig rc");
return;
}
// Don't allow verbose when integrating with Zig via stdout
options.verbose = false;
const stdout_writer = std.io.getStdOut().writer();
const stdout_writer = std.fs.File.stdout().deprecatedWriter();
if (options.verbose) {
try options.dumpVerbose(stdout_writer);
try stdout_writer.writeByte('\n');
@ -290,7 +290,7 @@ pub fn main() !void {
};
defer depfile.close();
const depfile_writer = depfile.writer();
const depfile_writer = depfile.deprecatedWriter();
var depfile_buffered_writer = std.io.bufferedWriter(depfile_writer);
switch (options.depfile_fmt) {
.json => {
@ -343,7 +343,7 @@ pub fn main() !void {
switch (err) {
error.DuplicateResource => {
const duplicate_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
try error_handler.emitMessage(allocator, .err, "duplicate resource [id: {}, type: {}, language: {}]", .{
try error_handler.emitMessage(allocator, .err, "duplicate resource [id: {f}, type: {f}, language: {f}]", .{
duplicate_resource.name_value,
fmtResourceType(duplicate_resource.type_value),
duplicate_resource.language,
@ -352,7 +352,7 @@ pub fn main() !void {
error.ResourceDataTooLong => {
const overflow_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
try error_handler.emitMessage(allocator, .err, "resource has a data length that is too large to be written into a coff section", .{});
try error_handler.emitMessage(allocator, .note, "the resource with the invalid size is [id: {}, type: {}, language: {}]", .{
try error_handler.emitMessage(allocator, .note, "the resource with the invalid size is [id: {f}, type: {f}, language: {f}]", .{
overflow_resource.name_value,
fmtResourceType(overflow_resource.type_value),
overflow_resource.language,
@ -361,7 +361,7 @@ pub fn main() !void {
error.TotalResourceDataTooLong => {
const overflow_resource = resources.list.items[cvtres_diagnostics.duplicate_resource];
try error_handler.emitMessage(allocator, .err, "total resource data exceeds the maximum of the coff 'size of raw data' field", .{});
try error_handler.emitMessage(allocator, .note, "size overflow occurred when attempting to write this resource: [id: {}, type: {}, language: {}]", .{
try error_handler.emitMessage(allocator, .note, "size overflow occurred when attempting to write this resource: [id: {f}, type: {f}, language: {f}]", .{
overflow_resource.name_value,
fmtResourceType(overflow_resource.type_value),
overflow_resource.language,
@ -471,7 +471,7 @@ const IoStream = struct {
allocator: std.mem.Allocator,
};
pub const WriteError = std.mem.Allocator.Error || std.fs.File.WriteError;
pub const Writer = std.io.Writer(WriterContext, WriteError, write);
pub const Writer = std.io.GenericWriter(WriterContext, WriteError, write);
pub fn write(ctx: WriterContext, bytes: []const u8) WriteError!usize {
switch (ctx.self.*) {
@ -645,7 +645,9 @@ const ErrorHandler = union(enum) {
},
.tty => {
// extra newline to separate this line from the aro errors
try renderErrorMessage(std.io.getStdErr().writer(), self.tty, .err, "{s}\n", .{fail_msg});
const stderr = std.debug.lockStderrWriter(&.{});
defer std.debug.unlockStderrWriter();
try renderErrorMessage(stderr, self.tty, .err, "{s}\n", .{fail_msg});
aro.Diagnostics.render(comp, self.tty);
},
}
@ -690,7 +692,9 @@ const ErrorHandler = union(enum) {
try server.serveErrorBundle(error_bundle);
},
.tty => {
try renderErrorMessage(std.io.getStdErr().writer(), self.tty, msg_type, format, args);
const stderr = std.debug.lockStderrWriter(&.{});
defer std.debug.unlockStderrWriter();
try renderErrorMessage(stderr, self.tty, msg_type, format, args);
},
}
}

View File

@ -1,4 +1,5 @@
const std = @import("std");
const assert = std.debug.assert;
const rc = @import("rc.zig");
const ResourceType = rc.ResourceType;
const CommonResourceAttributes = rc.CommonResourceAttributes;
@ -163,14 +164,7 @@ pub const Language = packed struct(u16) {
return @bitCast(self);
}
pub fn format(
language: Language,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
out_stream: anytype,
) !void {
_ = fmt;
_ = options;
pub fn format(language: Language, w: *std.io.Writer) std.io.Writer.Error!void {
const language_id = language.asInt();
const language_name = language_name: {
if (std.enums.fromInt(lang.LanguageId, language_id)) |lang_enum_val| {
@ -181,7 +175,7 @@ pub const Language = packed struct(u16) {
}
break :language_name "<UNKNOWN>";
};
try out_stream.print("{s} (0x{X})", .{ language_name, language_id });
try w.print("{s} (0x{X})", .{ language_name, language_id });
}
};
@ -445,47 +439,33 @@ pub const NameOrOrdinal = union(enum) {
}
}
pub fn format(
self: NameOrOrdinal,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
out_stream: anytype,
) !void {
_ = fmt;
_ = options;
pub fn format(self: NameOrOrdinal, w: *std.io.Writer) !void {
switch (self) {
.name => |name| {
try out_stream.print("{s}", .{std.unicode.fmtUtf16Le(name)});
try w.print("{f}", .{std.unicode.fmtUtf16Le(name)});
},
.ordinal => |ordinal| {
try out_stream.print("{d}", .{ordinal});
try w.print("{d}", .{ordinal});
},
}
}
fn formatResourceType(
self: NameOrOrdinal,
comptime fmt: []const u8,
options: std.fmt.FormatOptions,
out_stream: anytype,
) !void {
_ = fmt;
_ = options;
fn formatResourceType(self: NameOrOrdinal, w: *std.io.Writer) std.io.Writer.Error!void {
switch (self) {
.name => |name| {
try out_stream.print("{s}", .{std.unicode.fmtUtf16Le(name)});
try w.print("{f}", .{std.unicode.fmtUtf16Le(name)});
},
.ordinal => |ordinal| {
if (std.enums.tagName(RT, @enumFromInt(ordinal))) |predefined_type_name| {
try out_stream.print("{s}", .{predefined_type_name});
try w.print("{s}", .{predefined_type_name});
} else {
try out_stream.print("{d}", .{ordinal});
try w.print("{d}", .{ordinal});
}
},
}
}
pub fn fmtResourceType(type_value: NameOrOrdinal) std.fmt.Formatter(formatResourceType) {
pub fn fmtResourceType(type_value: NameOrOrdinal) std.fmt.Formatter(NameOrOrdinal, formatResourceType) {
return .{ .data = type_value };
}
};

View File

@ -86,7 +86,7 @@ pub const ErrorMessageType = enum { err, warning, note };
/// Used for generic colored errors/warnings/notes, more context-specific error messages
/// are handled elsewhere.
pub fn renderErrorMessage(writer: anytype, config: std.io.tty.Config, msg_type: ErrorMessageType, comptime format: []const u8, args: anytype) !void {
pub fn renderErrorMessage(writer: *std.io.Writer, config: std.io.tty.Config, msg_type: ErrorMessageType, comptime format: []const u8, args: anytype) !void {
switch (msg_type) {
.err => {
try config.setColor(writer, .bold);

View File

@ -303,7 +303,7 @@ pub fn mainSimple() anyerror!void {
var failed: u64 = 0;
// we don't want to bring in File and Writer if the backend doesn't support it
const stderr = if (comptime enable_print) std.io.getStdErr() else {};
const stderr = if (comptime enable_print) std.fs.File.stderr() else {};
for (builtin.test_functions) |test_fn| {
if (test_fn.func()) |_| {
@ -330,7 +330,7 @@ pub fn mainSimple() anyerror!void {
passed += 1;
}
if (enable_print and print_summary) {
stderr.writer().print("{} passed, {} skipped, {} failed\n", .{ passed, skipped, failed }) catch {};
stderr.deprecatedWriter().print("{} passed, {} skipped, {} failed\n", .{ passed, skipped, failed }) catch {};
}
if (failed != 0) std.process.exit(1);
}

View File

@ -86,6 +86,26 @@ fn clear_cache(start: usize, end: usize) callconv(.c) void {
const result = std.os.linux.syscall3(.cacheflush, start, end - start, flags);
std.debug.assert(result == 0);
exportIt();
} else if (os == .netbsd and mips) {
// Replace with https://github.com/ziglang/zig/issues/23904 in the future.
const cfa: extern struct {
va: usize,
nbytes: usize,
whichcache: u32,
} = .{
.va = start,
.nbytes = end - start,
.whichcache = 3, // ICACHE | DCACHE
};
asm volatile (
\\ syscall
:
: [_] "{$2}" (165), // nr = SYS_sysarch
[_] "{$4}" (0), // op = MIPS_CACHEFLUSH
[_] "{$5}" (&cfa), // args = &cfa
: "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15", "$24", "$25", "hi", "lo", "memory"
);
exportIt();
} else if (mips and os == .openbsd) {
// TODO
//cacheflush(start, (uintptr_t)end - (uintptr_t)start, BCACHE);

View File

@ -18,7 +18,7 @@ const gcc_word = usize;
pub const panic = common.panic;
comptime {
if (builtin.link_libc and (builtin.abi.isAndroid() or builtin.os.tag == .openbsd)) {
if (builtin.link_libc and (builtin.abi.isAndroid() or builtin.abi.isOpenHarmony() or builtin.os.tag == .openbsd)) {
@export(&__emutls_get_address, .{ .name = "__emutls_get_address", .linkage = common.linkage, .visibility = common.visibility });
}
}

View File

@ -238,12 +238,8 @@ pub const File = struct {
.call_one,
.call_one_comma,
.async_call_one,
.async_call_one_comma,
.call,
.call_comma,
.async_call,
.async_call_comma,
=> {
var buf: [1]Ast.Node.Index = undefined;
return categorize_call(file_index, node, ast.fullCall(&buf, node).?);
@ -450,7 +446,7 @@ fn parse(file_name: []const u8, source: []u8) Oom!Ast {
error.WriteFailed => return error.OutOfMemory,
};
}
log.err("{s}:{}:{}: {s}", .{ file_name, err_loc.line + 1, err_loc.column + 1, rendered_err.items });
log.err("{s}:{d}:{d}: {s}", .{ file_name, err_loc.line + 1, err_loc.column + 1, rendered_err.items });
}
return Ast.parse(gpa, "", .zig);
}
@ -577,7 +573,6 @@ fn struct_decl(
},
.@"comptime",
.@"usingnamespace",
=> try w.expr(&namespace.base, parent_decl, ast.nodeData(member).node),
.test_decl => try w.expr(&namespace.base, parent_decl, ast.nodeData(member).opt_token_and_node[1]),
@ -649,7 +644,6 @@ fn expr(w: *Walk, scope: *Scope, parent_decl: Decl.Index, node: Ast.Node.Index)
const ast = w.file.get_ast();
switch (ast.nodeTag(node)) {
.root => unreachable, // Top-level declaration.
.@"usingnamespace" => unreachable, // Top-level declaration.
.test_decl => unreachable, // Top-level declaration.
.container_field_init => unreachable, // Top-level declaration.
.container_field_align => unreachable, // Top-level declaration.
@ -749,7 +743,6 @@ fn expr(w: *Walk, scope: *Scope, parent_decl: Decl.Index, node: Ast.Node.Index)
.@"comptime",
.@"nosuspend",
.@"suspend",
.@"await",
.@"resume",
.@"try",
=> try expr(w, scope, parent_decl, ast.nodeData(node).node),
@ -812,12 +805,8 @@ fn expr(w: *Walk, scope: *Scope, parent_decl: Decl.Index, node: Ast.Node.Index)
.call_one,
.call_one_comma,
.async_call_one,
.async_call_one_comma,
.call,
.call_comma,
.async_call,
.async_call_comma,
=> {
var buf: [1]Ast.Node.Index = undefined;
const full = ast.fullCall(&buf, node).?;

View File

@ -101,8 +101,6 @@ pub fn fileSourceHtml(
.keyword_align,
.keyword_and,
.keyword_asm,
.keyword_async,
.keyword_await,
.keyword_break,
.keyword_catch,
.keyword_comptime,
@ -139,7 +137,6 @@ pub fn fileSourceHtml(
.keyword_try,
.keyword_union,
.keyword_unreachable,
.keyword_usingnamespace,
.keyword_var,
.keyword_volatile,
.keyword_allowzero,

View File

@ -143,7 +143,7 @@ fn mainImpl() !void {
var parser = try Parser.init(gpa);
defer parser.deinit();
var stdin_buf = std.io.bufferedReader(std.io.getStdIn().reader());
var stdin_buf = std.io.bufferedReader(std.fs.File.stdin().deprecatedReader());
var line_buf = std.ArrayList(u8).init(gpa);
defer line_buf.deinit();
while (stdin_buf.reader().streamUntilDelimiter(line_buf.writer(), '\n', null)) {
@ -158,7 +158,7 @@ fn mainImpl() !void {
var doc = try parser.endInput();
defer doc.deinit(gpa);
var stdout_buf = std.io.bufferedWriter(std.io.getStdOut().writer());
var stdout_buf = std.io.bufferedWriter(std.fs.File.stdout().deprecatedWriter());
try doc.render(stdout_buf.writer());
try stdout_buf.flush();
}

View File

@ -9,7 +9,8 @@ pub const std_options = std.Options{
.logFn = logOverride,
};
var log_file: ?std.fs.File = null;
var log_file_buffer: [256]u8 = undefined;
var log_file_writer: ?std.fs.File.Writer = null;
fn logOverride(
comptime level: std.log.Level,
@ -17,15 +18,17 @@ fn logOverride(
comptime format: []const u8,
args: anytype,
) void {
const f = if (log_file) |f| f else f: {
const fw = if (log_file_writer) |*f| f else f: {
const f = fuzzer.cache_dir.createFile("tmp/libfuzzer.log", .{}) catch
@panic("failed to open fuzzer log file");
log_file = f;
break :f f;
log_file_writer = f.writer(&log_file_buffer);
break :f &log_file_writer.?;
};
const prefix1 = comptime level.asText();
const prefix2 = if (scope == .default) ": " else "(" ++ @tagName(scope) ++ "): ";
f.writer().print(prefix1 ++ prefix2 ++ format ++ "\n", args) catch @panic("failed to write to fuzzer log");
fw.interface.print(prefix1 ++ prefix2 ++ format ++ "\n", args) catch
@panic("failed to write to fuzzer log");
fw.interface.flush() catch @panic("failed to flush fuzzer log");
}
/// Helps determine run uniqueness in the face of recursion.
@ -226,18 +229,18 @@ const Fuzzer = struct {
.read = true,
}) catch |e| switch (e) {
error.PathAlreadyExists => continue,
else => fatal("unable to create '{}{d}: {s}", .{ f.corpus_directory, i, @errorName(err) }),
else => fatal("unable to create '{f}{d}: {s}", .{ f.corpus_directory, i, @errorName(err) }),
};
errdefer input_file.close();
// Initialize the mmap for the current input.
f.input = MemoryMappedList.create(input_file, 0, std.heap.page_size_max) catch |e| {
fatal("unable to init memory map for input at '{}{d}': {s}", .{
fatal("unable to init memory map for input at '{f}{d}': {s}", .{
f.corpus_directory, i, @errorName(e),
});
};
break;
},
else => fatal("unable to read '{}{d}': {s}", .{ f.corpus_directory, i, @errorName(err) }),
else => fatal("unable to read '{f}{d}': {s}", .{ f.corpus_directory, i, @errorName(err) }),
};
errdefer gpa.free(input);
f.corpus.append(gpa, .{
@ -263,7 +266,7 @@ const Fuzzer = struct {
const sub_path = try std.fmt.allocPrint(gpa, "f/{s}", .{f.unit_test_name});
f.corpus_directory = .{
.handle = f.cache_dir.makeOpenPath(sub_path, .{}) catch |err|
fatal("unable to open corpus directory 'f/{s}': {s}", .{ sub_path, @errorName(err) }),
fatal("unable to open corpus directory 'f/{s}': {t}", .{ sub_path, err }),
.path = sub_path,
};
initNextInput(f);

View File

@ -5,7 +5,7 @@ pub fn bufferedPrint() !void {
// Stdout is for the actual output of your application, for example if you
// are implementing gzip, then only the compressed bytes should be sent to
// stdout, not any debugging messages.
const stdout_file = std.io.getStdOut().writer();
const stdout_file = std.fs.File.stdout().deprecatedWriter();
// Buffering can improve performance significantly in print-heavy programs.
var bw = std.io.bufferedWriter(stdout_file);
const stdout = bw.writer();

View File

@ -1,51 +0,0 @@
/**
* This file has no copyright assigned and is placed in the Public Domain.
* This file is part of the mingw-w64 runtime package.
* No warranty is given; refer to the file DISCLAIMER.PD within this package.
*/
#include <_mingw_mac.h>
.file "floorf.S"
.text
.p2align 4,,15
.globl __MINGW_USYMBOL(floorf)
.def __MINGW_USYMBOL(floorf); .scl 2; .type 32; .endef
#ifdef __x86_64__
.seh_proc __MINGW_USYMBOL(floorf)
#endif
__MINGW_USYMBOL(floorf):
#if defined(_AMD64_) || defined(__x86_64__)
subq $40, %rsp
.seh_stackalloc 40
.seh_endprologue
unpcklps %xmm0, %xmm0
cvtps2pd %xmm0, %xmm0
call floor
unpcklpd %xmm0, %xmm0
cvtpd2ps %xmm0, %xmm0
addq $40, %rsp
ret
.seh_endproc
.def __MINGW_USYMBOL(floor); .scl 2; .type 32; .endef
#elif defined(_X86_) || defined(__i386__)
flds 4(%esp)
subl $8,%esp
fstcw 4(%esp) /* store fpu control word */
/* We use here %edx although only the low 1 bits are defined.
But none of the operations should care and they are faster
than the 16 bit operations. */
movl $0x400,%edx /* round towards -oo */
orl 4(%esp),%edx
andl $0xf7ff,%edx
movl %edx,(%esp)
fldcw (%esp) /* load modified control word */
frndint /* round */
fldcw 4(%esp) /* restore original control word */
addl $8,%esp
ret
#endif

View File

@ -1,63 +0,0 @@
/**
* This file has no copyright assigned and is placed in the Public Domain.
* This file is part of the mingw-w64 runtime package.
* No warranty is given; refer to the file DISCLAIMER.PD within this package.
*/
#include <_mingw_mac.h>
.file "floorl.S"
.text
#ifdef __x86_64__
.align 8
#else
.align 4
#endif
.globl __MINGW_USYMBOL(floorl)
.def __MINGW_USYMBOL(floorl); .scl 2; .type 32; .endef
__MINGW_USYMBOL(floorl):
#if defined(_AMD64_) || defined(__x86_64__)
fldt (%rdx)
subq $24,%rsp
fstcw 8(%rsp) /* store fpu control word */
/* We use here %edx although only the low 1 bits are defined.
But none of the operations should care and they are faster
than the 16 bit operations. */
movl $0x400,%edx /* round towards -oo */
orl 8(%rsp),%edx
andl $0xf7ff,%edx
movl %edx,(%rsp)
fldcw (%rsp) /* load modified control word */
frndint /* round */
fldcw 8(%rsp) /* restore original control word */
addq $24,%rsp
movq %rcx,%rax
movq $0,8(%rcx)
fstpt (%rcx)
ret
#elif defined(_X86_) || defined(__i386__)
fldt 4(%esp)
subl $8,%esp
fstcw 4(%esp) /* store fpu control word */
/* We use here %edx although only the low 1 bits are defined.
But none of the operations should care and they are faster
than the 16 bit operations. */
movl $0x400,%edx /* round towards -oo */
orl 4(%esp),%edx
andl $0xf7ff,%edx
movl %edx,(%esp)
fldcw (%esp) /* load modified control word */
frndint /* round */
fldcw 4(%esp) /* restore original control word */
addl $8,%esp
ret
#endif

View File

@ -1,7 +0,0 @@
#include <math.h>
double floor(double x)
{
__asm__ ("frintm %d0, %d1" : "=w"(x) : "w"(x));
return x;
}

View File

@ -1,7 +0,0 @@
#include <math.h>
float floorf(float x)
{
__asm__ ("frintm %s0, %s1" : "=w"(x) : "w"(x));
return x;
}

View File

@ -1,31 +0,0 @@
#include "libm.h"
#if FLT_EVAL_METHOD==0 || FLT_EVAL_METHOD==1
#define EPS DBL_EPSILON
#elif FLT_EVAL_METHOD==2
#define EPS LDBL_EPSILON
#endif
static const double_t toint = 1/EPS;
double floor(double x)
{
union {double f; uint64_t i;} u = {x};
int e = u.i >> 52 & 0x7ff;
double_t y;
if (e >= 0x3ff+52 || x == 0)
return x;
/* y = int(x) - x, where int(x) is an integer neighbor of x */
if (u.i >> 63)
y = x - toint + toint - x;
else
y = x + toint - toint - x;
/* special case because of non-nearest rounding modes */
if (e <= 0x3ff-1) {
FORCE_EVAL(y);
return u.i >> 63 ? -1 : 0;
}
if (y > 0)
return x + y - 1;
return x + y;
}

View File

@ -1,27 +0,0 @@
#include "libm.h"
float floorf(float x)
{
union {float f; uint32_t i;} u = {x};
int e = (int)(u.i >> 23 & 0xff) - 0x7f;
uint32_t m;
if (e >= 23)
return x;
if (e >= 0) {
m = 0x007fffff >> e;
if ((u.i & m) == 0)
return x;
FORCE_EVAL(x + 0x1p120f);
if (u.i >> 31)
u.i += m;
u.i &= ~m;
} else {
FORCE_EVAL(x + 0x1p120f);
if (u.i >> 31 == 0)
u.i = 0;
else if (u.i << 1)
u.f = -1.0;
}
return u.f;
}

View File

@ -1,34 +0,0 @@
#include "libm.h"
#if LDBL_MANT_DIG == 53 && LDBL_MAX_EXP == 1024
long double floorl(long double x)
{
return floor(x);
}
#elif (LDBL_MANT_DIG == 64 || LDBL_MANT_DIG == 113) && LDBL_MAX_EXP == 16384
static const long double toint = 1/LDBL_EPSILON;
long double floorl(long double x)
{
union ldshape u = {x};
int e = u.i.se & 0x7fff;
long double y;
if (e >= 0x3fff+LDBL_MANT_DIG-1 || x == 0)
return x;
/* y = int(x) - x, where int(x) is an integer neighbor of x */
if (u.i.se >> 15)
y = x - toint + toint - x;
else
y = x + toint - toint - x;
/* special case because of non-nearest rounding modes */
if (e <= 0x3fff-1) {
FORCE_EVAL(y);
return u.i.se >> 15 ? -1 : 0;
}
if (y > 0)
return x + y - 1;
return x + y;
}
#endif

View File

@ -1,20 +1,5 @@
.global floorf
.type floorf,@function
floorf:
flds 4(%esp)
jmp 1f
/* zig patch: removed `floorl` and `floorf` in favor of using zig compiler_rt's implementations */
.global floorl
.type floorl,@function
floorl:
fldt 4(%esp)
jmp 1f
.global floor
.type floor,@function
floor:
fldl 4(%esp)
1: mov $0x7,%al
1: fstcw 4(%esp)
mov 5(%esp),%ah
mov %al,5(%esp)

View File

@ -1 +0,0 @@
# see floor.s

View File

@ -1 +0,0 @@
# see floor.s

View File

@ -1,15 +0,0 @@
#include <math.h>
#ifdef _ARCH_PWR5X
double floor(double x)
{
__asm__ ("frim %0, %1" : "=d"(x) : "d"(x));
return x;
}
#else
#include "../floor.c"
#endif

View File

@ -1,15 +0,0 @@
#include <math.h>
#ifdef _ARCH_PWR5X
float floorf(float x)
{
__asm__ ("frim %0, %1" : "=f"(x) : "f"(x));
return x;
}
#else
#include "../floorf.c"
#endif

View File

@ -1,15 +0,0 @@
#include <math.h>
#if defined(__HTM__) || __ARCH__ >= 9
double floor(double x)
{
__asm__ ("fidbra %0, 7, %1, 4" : "=f"(x) : "f"(x));
return x;
}
#else
#include "../floor.c"
#endif

View File

@ -1,15 +0,0 @@
#include <math.h>
#if defined(__HTM__) || __ARCH__ >= 9
float floorf(float x)
{
__asm__ ("fiebra %0, 7, %1, 4" : "=f"(x) : "f"(x));
return x;
}
#else
#include "../floorf.c"
#endif

View File

@ -1,15 +0,0 @@
#include <math.h>
#if defined(__HTM__) || __ARCH__ >= 9
long double floorl(long double x)
{
__asm__ ("fixbra %0, 7, %1, 4" : "=f"(x) : "f"(x));
return x;
}
#else
#include "../floorl.c"
#endif

View File

@ -2466,10 +2466,9 @@ pub const GeneratedFile = struct {
pub fn getPath2(gen: GeneratedFile, src_builder: *Build, asking_step: ?*Step) []const u8 {
return gen.path orelse {
std.debug.lockStdErr();
const stderr = std.io.getStdErr();
dumpBadGetPathHelp(gen.step, stderr, src_builder, asking_step) catch {};
std.debug.unlockStdErr();
const w = debug.lockStderrWriter(&.{});
dumpBadGetPathHelp(gen.step, w, .detect(.stderr()), src_builder, asking_step) catch {};
debug.unlockStderrWriter();
@panic("misconfigured build script");
};
}
@ -2676,10 +2675,9 @@ pub const LazyPath = union(enum) {
var file_path: Cache.Path = .{
.root_dir = Cache.Directory.cwd(),
.sub_path = gen.file.path orelse {
std.debug.lockStdErr();
const stderr: fs.File = .stderr();
dumpBadGetPathHelp(gen.file.step, stderr, src_builder, asking_step) catch {};
std.debug.unlockStdErr();
const w = debug.lockStderrWriter(&.{});
dumpBadGetPathHelp(gen.file.step, w, .detect(.stderr()), src_builder, asking_step) catch {};
debug.unlockStderrWriter();
@panic("misconfigured build script");
},
};
@ -2769,17 +2767,16 @@ fn dumpBadDirnameHelp(
const w = debug.lockStderrWriter(&.{});
defer debug.unlockStderrWriter();
const stderr: fs.File = .stderr();
try w.print(msg, args);
const tty_config = std.io.tty.detectConfig(stderr);
const tty_config = std.io.tty.detectConfig(.stderr());
if (fail_step) |s| {
tty_config.setColor(w, .red) catch {};
try stderr.writeAll(" The step was created by this stack trace:\n");
try w.writeAll(" The step was created by this stack trace:\n");
tty_config.setColor(w, .reset) catch {};
s.dump(stderr);
s.dump(w, tty_config);
}
if (asking_step) |as| {
@ -2787,24 +2784,23 @@ fn dumpBadDirnameHelp(
try w.print(" The step '{s}' that is missing a dependency on the above step was created by this stack trace:\n", .{as.name});
tty_config.setColor(w, .reset) catch {};
as.dump(stderr);
as.dump(w, tty_config);
}
tty_config.setColor(w, .red) catch {};
try stderr.writeAll(" Hope that helps. Proceeding to panic.\n");
try w.writeAll(" Hope that helps. Proceeding to panic.\n");
tty_config.setColor(w, .reset) catch {};
}
/// In this function the stderr mutex has already been locked.
pub fn dumpBadGetPathHelp(
s: *Step,
stderr: fs.File,
w: *std.io.Writer,
tty_config: std.io.tty.Config,
src_builder: *Build,
asking_step: ?*Step,
) anyerror!void {
var fw = stderr.writer(&.{});
const bw = &fw.interface;
try bw.print(
try w.print(
\\getPath() was called on a GeneratedFile that wasn't built yet.
\\ source package path: {s}
\\ Is there a missing Step dependency on step '{s}'?
@ -2814,22 +2810,21 @@ pub fn dumpBadGetPathHelp(
s.name,
});
const tty_config = std.io.tty.detectConfig(stderr);
tty_config.setColor(&bw, .red) catch {};
try stderr.writeAll(" The step was created by this stack trace:\n");
tty_config.setColor(&bw, .reset) catch {};
tty_config.setColor(w, .red) catch {};
try w.writeAll(" The step was created by this stack trace:\n");
tty_config.setColor(w, .reset) catch {};
s.dump(stderr);
s.dump(w, tty_config);
if (asking_step) |as| {
tty_config.setColor(&bw, .red) catch {};
try bw.print(" The step '{s}' that is missing a dependency on the above step was created by this stack trace:\n", .{as.name});
tty_config.setColor(&bw, .reset) catch {};
tty_config.setColor(w, .red) catch {};
try w.print(" The step '{s}' that is missing a dependency on the above step was created by this stack trace:\n", .{as.name});
tty_config.setColor(w, .reset) catch {};
as.dump(stderr);
as.dump(w, tty_config);
}
tty_config.setColor(&bw, .red) catch {};
try stderr.writeAll(" Hope that helps. Proceeding to panic.\n");
tty_config.setColor(&bw, .reset) catch {};
tty_config.setColor(w, .red) catch {};
try w.writeAll(" Hope that helps. Proceeding to panic.\n");
tty_config.setColor(w, .reset) catch {};
}
pub const InstallDir = union(enum) {
@ -2866,11 +2861,6 @@ pub fn makeTempPath(b: *Build) []const u8 {
return result_path;
}
/// Deprecated; use `std.fmt.hex` instead.
pub fn hex64(x: u64) [16]u8 {
return std.fmt.hex(x);
}
/// A pair of target query and fully resolved target.
/// This type is generally required by build system API that need to be given a
/// target. The query is kept because the Zig toolchain needs to know which parts

View File

@ -2,6 +2,18 @@
//! This is not a general-purpose cache. It is designed to be fast and simple,
//! not to withstand attacks using specially-crafted input.
const Cache = @This();
const std = @import("std");
const builtin = @import("builtin");
const crypto = std.crypto;
const fs = std.fs;
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
const fmt = std.fmt;
const Allocator = std.mem.Allocator;
const log = std.log.scoped(.cache);
gpa: Allocator,
manifest_dir: fs.Dir,
hash: HashHelper = .{},
@ -21,18 +33,6 @@ pub const Path = @import("Cache/Path.zig");
pub const Directory = @import("Cache/Directory.zig");
pub const DepTokenizer = @import("Cache/DepTokenizer.zig");
const Cache = @This();
const std = @import("std");
const builtin = @import("builtin");
const crypto = std.crypto;
const fs = std.fs;
const assert = std.debug.assert;
const testing = std.testing;
const mem = std.mem;
const fmt = std.fmt;
const Allocator = std.mem.Allocator;
const log = std.log.scoped(.cache);
pub fn addPrefix(cache: *Cache, directory: Directory) void {
cache.prefixes_buffer[cache.prefixes_len] = directory;
cache.prefixes_len += 1;
@ -1118,25 +1118,12 @@ pub const Manifest = struct {
if (self.manifest_dirty) {
self.manifest_dirty = false;
const gpa = self.cache.gpa;
var contents: std.ArrayListUnmanaged(u8) = .empty;
defer contents.deinit(gpa);
try contents.appendSlice(gpa, manifest_header ++ "\n");
for (self.files.keys()) |file| {
try contents.print(gpa, "{d} {d} {d} {x} {d} {s}\n", .{
file.stat.size,
file.stat.inode,
file.stat.mtime,
&file.bin_digest,
file.prefixed_path.prefix,
file.prefixed_path.sub_path,
});
}
try manifest_file.setEndPos(contents.items.len);
var pos: usize = 0;
while (pos < contents.items.len) pos += try manifest_file.pwrite(contents.items[pos..], pos);
var buffer: [4000]u8 = undefined;
var fw = manifest_file.writer(&buffer);
writeDirtyManifestToStream(self, &fw) catch |err| switch (err) {
error.WriteFailed => return fw.err.?,
else => |e| return e,
};
}
if (self.want_shared_lock) {
@ -1144,6 +1131,21 @@ pub const Manifest = struct {
}
}
fn writeDirtyManifestToStream(self: *Manifest, fw: *fs.File.Writer) !void {
try fw.interface.writeAll(manifest_header ++ "\n");
for (self.files.keys()) |file| {
try fw.interface.print("{d} {d} {d} {x} {d} {s}\n", .{
file.stat.size,
file.stat.inode,
file.stat.mtime,
&file.bin_digest,
file.prefixed_path.prefix,
file.prefixed_path.sub_path,
});
}
try fw.end();
}
fn downgradeToSharedLock(self: *Manifest) !void {
if (!self.have_exclusive_lock) return;

View File

@ -1,5 +1,6 @@
const Directory = @This();
const std = @import("../../std.zig");
const assert = std.debug.assert;
const fs = std.fs;
const fmt = std.fmt;
const Allocator = std.mem.Allocator;
@ -55,11 +56,10 @@ pub fn closeAndFree(self: *Directory, gpa: Allocator) void {
self.* = undefined;
}
pub fn format(self: Directory, w: *std.io.Writer, comptime fmt_string: []const u8) !void {
if (fmt_string.len != 0) fmt.invalidFmtError(fmt_string, self);
pub fn format(self: Directory, writer: *std.io.Writer) std.io.Writer.Error!void {
if (self.path) |p| {
try w.writeAll(p);
try w.writeAll(fs.path.sep_str);
try writer.writeAll(p);
try writer.writeAll(fs.path.sep_str);
}
}

View File

@ -1,3 +1,10 @@
const Path = @This();
const std = @import("../../std.zig");
const assert = std.debug.assert;
const fs = std.fs;
const Allocator = std.mem.Allocator;
const Cache = std.Build.Cache;
root_dir: Cache.Directory,
/// The path, relative to the root dir, that this `Path` represents.
/// Empty string means the root_dir is the path.
@ -137,46 +144,55 @@ pub fn toString(p: Path, allocator: Allocator) Allocator.Error![]u8 {
}
pub fn toStringZ(p: Path, allocator: Allocator) Allocator.Error![:0]u8 {
return std.fmt.allocPrintZ(allocator, "{f}", .{p});
return std.fmt.allocPrintSentinel(allocator, "{f}", .{p}, 0);
}
pub fn format(self: Path, w: *std.io.Writer, comptime fmt_string: []const u8) !void {
if (fmt_string.len == 1) {
// Quote-escape the string.
const stringEscape = std.zig.stringEscape;
const f = switch (fmt_string[0]) {
'q' => "",
'\'' => "\'",
else => @compileError("unsupported format string: " ++ fmt_string),
};
if (self.root_dir.path) |p| {
try stringEscape(p, w, f);
if (self.sub_path.len > 0) try stringEscape(fs.path.sep_str, w, f);
}
if (self.sub_path.len > 0) {
try stringEscape(self.sub_path, w, f);
}
return;
pub fn fmtEscapeString(path: Path) std.fmt.Formatter(Path, formatEscapeString) {
return .{ .data = path };
}
pub fn formatEscapeString(path: Path, writer: *std.io.Writer) std.io.Writer.Error!void {
if (path.root_dir.path) |p| {
try std.zig.stringEscape(p, writer);
if (path.sub_path.len > 0) try std.zig.stringEscape(fs.path.sep_str, writer);
}
if (fmt_string.len > 0)
std.fmt.invalidFmtError(fmt_string, self);
if (path.sub_path.len > 0) {
try std.zig.stringEscape(path.sub_path, writer);
}
}
pub fn fmtEscapeChar(path: Path) std.fmt.Formatter(Path, formatEscapeChar) {
return .{ .data = path };
}
pub fn formatEscapeChar(path: Path, writer: *std.io.Writer) std.io.Writer.Error!void {
if (path.root_dir.path) |p| {
try std.zig.charEscape(p, writer);
if (path.sub_path.len > 0) try std.zig.charEscape(fs.path.sep_str, writer);
}
if (path.sub_path.len > 0) {
try std.zig.charEscape(path.sub_path, writer);
}
}
pub fn format(self: Path, writer: *std.io.Writer) std.io.Writer.Error!void {
if (std.fs.path.isAbsolute(self.sub_path)) {
try w.writeAll(self.sub_path);
try writer.writeAll(self.sub_path);
return;
}
if (self.root_dir.path) |p| {
try w.writeAll(p);
try writer.writeAll(p);
if (self.sub_path.len > 0) {
try w.writeAll(fs.path.sep_str);
try w.writeAll(self.sub_path);
try writer.writeAll(fs.path.sep_str);
try writer.writeAll(self.sub_path);
}
return;
}
if (self.sub_path.len > 0) {
try w.writeAll(self.sub_path);
try writer.writeAll(self.sub_path);
return;
}
try w.writeByte('.');
try writer.writeByte('.');
}
pub fn eql(self: Path, other: Path) bool {
@ -218,9 +234,3 @@ pub const TableAdapter = struct {
return a.eql(b);
}
};
const Path = @This();
const std = @import("../../std.zig");
const fs = std.fs;
const Allocator = std.mem.Allocator;
const Cache = std.Build.Cache;

View File

@ -124,9 +124,10 @@ fn rebuildTestsWorkerRunFallible(run: *Step.Run, ttyconf: std.io.tty.Config, par
const show_stderr = compile.step.result_stderr.len > 0;
if (show_error_msgs or show_compile_errors or show_stderr) {
const bw = std.debug.lockStderrWriter(&.{});
var buf: [256]u8 = undefined;
const w = std.debug.lockStderrWriter(&buf);
defer std.debug.unlockStderrWriter();
build_runner.printErrorMessages(gpa, &compile.step, .{ .ttyconf = ttyconf }, bw, false) catch {};
build_runner.printErrorMessages(gpa, &compile.step, .{ .ttyconf = ttyconf }, w, false) catch {};
}
const rebuilt_bin_path = result catch |err| switch (err) {
@ -151,9 +152,10 @@ fn fuzzWorkerRun(
run.rerunInFuzzMode(web_server, unit_test_index, prog_node) catch |err| switch (err) {
error.MakeFailed => {
const bw = std.debug.lockStderrWriter(&.{});
var buf: [256]u8 = undefined;
const w = std.debug.lockStderrWriter(&buf);
defer std.debug.unlockStderrWriter();
build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = ttyconf }, bw, false) catch {};
build_runner.printErrorMessages(gpa, &run.step, .{ .ttyconf = ttyconf }, w, false) catch {};
return;
},
else => {

View File

@ -176,7 +176,7 @@ fn serveFile(
// We load the file with every request so that the user can make changes to the file
// and refresh the HTML page without restarting this server.
const file_contents = ws.zig_lib_directory.handle.readFileAlloc(name, gpa, .limited(10 * 1024 * 1024)) catch |err| {
log.err("failed to read '{f}{s}': {s}", .{ ws.zig_lib_directory, name, @errorName(err) });
log.err("failed to read '{f}{s}': {t}", .{ ws.zig_lib_directory, name, err });
return error.AlreadyReported;
};
defer gpa.free(file_contents);

View File

@ -186,7 +186,7 @@ pub const IncludeDir = union(enum) {
.embed_path => |lazy_path| {
// Special case: this is a single arg.
const resolved = lazy_path.getPath3(b, asking_step);
const arg = b.fmt("--embed-dir={}", .{resolved});
const arg = b.fmt("--embed-dir={f}", .{resolved});
return zig_args.append(arg);
},
};
@ -572,7 +572,7 @@ pub fn appendZigProcessFlags(
try zig_args.append(switch (unwind_tables) {
.none => "-fno-unwind-tables",
.sync => "-funwind-tables",
.@"async" => "-fasync-unwind-tables",
.async => "-fasync-unwind-tables",
});
}

View File

@ -286,28 +286,25 @@ pub fn cast(step: *Step, comptime T: type) ?*T {
}
/// For debugging purposes, prints identifying information about this Step.
pub fn dump(step: *Step, file: std.fs.File) void {
var fw = file.writer(&.{});
const bw = &fw.interface;
const tty_config = std.io.tty.detectConfig(file);
pub fn dump(step: *Step, w: *std.io.Writer, tty_config: std.io.tty.Config) void {
const debug_info = std.debug.getSelfDebugInfo() catch |err| {
bw.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{
w.print("Unable to dump stack trace: Unable to open debug info: {s}\n", .{
@errorName(err),
}) catch {};
return;
};
if (step.getStackTrace()) |stack_trace| {
bw.print("name: '{s}'. creation stack trace:\n", .{step.name}) catch {};
std.debug.writeStackTrace(stack_trace, &bw, debug_info, tty_config) catch |err| {
bw.print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch {};
w.print("name: '{s}'. creation stack trace:\n", .{step.name}) catch {};
std.debug.writeStackTrace(stack_trace, w, debug_info, tty_config) catch |err| {
w.print("Unable to dump stack trace: {s}\n", .{@errorName(err)}) catch {};
return;
};
} else {
const field = "debug_stack_frames_count";
comptime assert(@hasField(Build, field));
tty_config.setColor(&bw, .yellow) catch {};
bw.print("name: '{s}'. no stack trace collected for this step, see std.Build." ++ field ++ "\n", .{step.name}) catch {};
tty_config.setColor(&bw, .reset) catch {};
tty_config.setColor(w, .yellow) catch {};
w.print("name: '{s}'. no stack trace collected for this step, see std.Build." ++ field ++ "\n", .{step.name}) catch {};
tty_config.setColor(w, .reset) catch {};
}
}
@ -483,9 +480,9 @@ pub fn evalZigProcess(
pub fn installFile(s: *Step, src_lazy_path: Build.LazyPath, dest_path: []const u8) !std.fs.Dir.PrevStatus {
const b = s.owner;
const src_path = src_lazy_path.getPath3(b, s);
try handleVerbose(b, null, &.{ "install", "-C", b.fmt("{}", .{src_path}), dest_path });
try handleVerbose(b, null, &.{ "install", "-C", b.fmt("{f}", .{src_path}), dest_path });
return src_path.root_dir.handle.updateFile(src_path.sub_path, std.fs.cwd(), dest_path, .{}) catch |err| {
return s.fail("unable to update file from '{}' to '{s}': {s}", .{
return s.fail("unable to update file from '{f}' to '{s}': {s}", .{
src_path, dest_path, @errorName(err),
});
};

View File

@ -230,16 +230,11 @@ const ComputeCompareExpected = struct {
literal: u64,
},
pub fn format(
value: ComputeCompareExpected,
bw: *Writer,
comptime fmt: []const u8,
) !void {
if (fmt.len != 0) std.fmt.invalidFmtError(fmt, value);
try bw.print("{s} ", .{@tagName(value.op)});
pub fn format(value: ComputeCompareExpected, w: *Writer) Writer.Error!void {
try w.print("{t} ", .{value.op});
switch (value.value) {
.variable => |name| try bw.writeAll(name),
.literal => |x| try bw.print("{x}", .{x}),
.variable => |name| try w.writeAll(name),
.literal => |x| try w.print("{x}", .{x}),
}
}
};
@ -571,7 +566,9 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void {
null,
.of(u64),
null,
) catch |err| return step.fail("unable to read '{f'}': {s}", .{ src_path, @errorName(err) });
) catch |err| return step.fail("unable to read '{f}': {s}", .{
std.fmt.alt(src_path, .formatEscapeChar), @errorName(err),
});
var vars: std.StringHashMap(u64) = .init(gpa);
for (check_object.checks.items) |chk| {
@ -606,7 +603,7 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void {
// we either format message string with escaped codes, or not to aid debugging
// the failed test.
const fmtMessageString = struct {
fn fmtMessageString(kind: Check.Kind, msg: []const u8) std.fmt.Formatter(formatMessageString) {
fn fmtMessageString(kind: Check.Kind, msg: []const u8) std.fmt.Formatter(Ctx, formatMessageString) {
return .{ .data = .{
.kind = kind,
.msg = msg,
@ -618,15 +615,10 @@ fn make(step: *Step, make_options: Step.MakeOptions) !void {
msg: []const u8,
};
fn formatMessageString(
ctx: Ctx,
bw: *Writer,
comptime unused_fmt_string: []const u8,
) !void {
_ = unused_fmt_string;
fn formatMessageString(ctx: Ctx, w: *Writer) !void {
switch (ctx.kind) {
.dump_section => try bw.print("{f}", .{std.fmt.fmtSliceEscapeLower(ctx.msg)}),
else => try bw.writeAll(ctx.msg),
.dump_section => try w.print("{f}", .{std.ascii.hexEscape(ctx.msg, .lower)}),
else => try w.writeAll(ctx.msg),
}
}
}.fmtMessageString;
@ -882,9 +874,9 @@ const MachODumper = struct {
try bw.writeByte('\n');
}
fn dumpLoadCommand(lc: macho.LoadCommandIterator.LoadCommand, index: usize, bw: *Writer) !void {
fn dumpLoadCommand(lc: macho.LoadCommandIterator.LoadCommand, index: usize, writer: *Writer) !void {
// print header first
try bw.print(
try writer.print(
\\LC {d}
\\cmd {s}
\\cmdsize {d}
@ -893,8 +885,8 @@ const MachODumper = struct {
switch (lc.cmd()) {
.SEGMENT_64 => {
const seg = lc.cast(macho.segment_command_64).?;
try bw.writeByte('\n');
try bw.print(
try writer.writeByte('\n');
try writer.print(
\\segname {s}
\\vmaddr {x}
\\vmsize {x}
@ -909,8 +901,8 @@ const MachODumper = struct {
});
for (lc.getSections()) |sect| {
try bw.writeByte('\n');
try bw.print(
try writer.writeByte('\n');
try writer.print(
\\sectname {s}
\\addr {x}
\\size {x}
@ -932,8 +924,8 @@ const MachODumper = struct {
.REEXPORT_DYLIB,
=> {
const dylib = lc.cast(macho.dylib_command).?;
try bw.writeByte('\n');
try bw.print(
try writer.writeByte('\n');
try writer.print(
\\name {s}
\\timestamp {d}
\\current version {x}
@ -948,16 +940,16 @@ const MachODumper = struct {
.MAIN => {
const main = lc.cast(macho.entry_point_command).?;
try bw.writeByte('\n');
try bw.print(
try writer.writeByte('\n');
try writer.print(
\\entryoff {x}
\\stacksize {x}
, .{ main.entryoff, main.stacksize });
},
.RPATH => {
try bw.writeByte('\n');
try bw.print(
try writer.writeByte('\n');
try writer.print(
\\path {s}
, .{
lc.getRpathPathName(),
@ -966,8 +958,8 @@ const MachODumper = struct {
.UUID => {
const uuid = lc.cast(macho.uuid_command).?;
try bw.writeByte('\n');
try bw.print("uuid {x}", .{&uuid.uuid});
try writer.writeByte('\n');
try writer.print("uuid {x}", .{&uuid.uuid});
},
.DATA_IN_CODE,
@ -975,8 +967,8 @@ const MachODumper = struct {
.CODE_SIGNATURE,
=> {
const llc = lc.cast(macho.linkedit_data_command).?;
try bw.writeByte('\n');
try bw.print(
try writer.writeByte('\n');
try writer.print(
\\dataoff {x}
\\datasize {x}
, .{ llc.dataoff, llc.datasize });
@ -984,8 +976,8 @@ const MachODumper = struct {
.DYLD_INFO_ONLY => {
const dlc = lc.cast(macho.dyld_info_command).?;
try bw.writeByte('\n');
try bw.print(
try writer.writeByte('\n');
try writer.print(
\\rebaseoff {x}
\\rebasesize {x}
\\bindoff {x}
@ -1012,8 +1004,8 @@ const MachODumper = struct {
.SYMTAB => {
const slc = lc.cast(macho.symtab_command).?;
try bw.writeByte('\n');
try bw.print(
try writer.writeByte('\n');
try writer.print(
\\symoff {x}
\\nsyms {x}
\\stroff {x}
@ -1028,8 +1020,8 @@ const MachODumper = struct {
.DYSYMTAB => {
const dlc = lc.cast(macho.dysymtab_command).?;
try bw.writeByte('\n');
try bw.print(
try writer.writeByte('\n');
try writer.print(
\\ilocalsym {x}
\\nlocalsym {x}
\\iextdefsym {x}
@ -1052,8 +1044,8 @@ const MachODumper = struct {
.BUILD_VERSION => {
const blc = lc.cast(macho.build_version_command).?;
try bw.writeByte('\n');
try bw.print(
try writer.writeByte('\n');
try writer.print(
\\platform {s}
\\minos {d}.{d}.{d}
\\sdk {d}.{d}.{d}
@ -1069,12 +1061,12 @@ const MachODumper = struct {
blc.ntools,
});
for (lc.getBuildVersionTools()) |tool| {
try bw.writeByte('\n');
try writer.writeByte('\n');
switch (tool.tool) {
.CLANG, .SWIFT, .LD, .LLD, .ZIG => try bw.print("tool {s}\n", .{@tagName(tool.tool)}),
else => |x| try bw.print("tool {d}\n", .{@intFromEnum(x)}),
.CLANG, .SWIFT, .LD, .LLD, .ZIG => try writer.print("tool {s}\n", .{@tagName(tool.tool)}),
else => |x| try writer.print("tool {d}\n", .{@intFromEnum(x)}),
}
try bw.print(
try writer.print(
\\version {d}.{d}.{d}
, .{
tool.version >> 16,
@ -1090,8 +1082,8 @@ const MachODumper = struct {
.VERSION_MIN_TVOS,
=> {
const vlc = lc.cast(macho.version_min_command).?;
try bw.writeByte('\n');
try bw.print(
try writer.writeByte('\n');
try writer.print(
\\version {d}.{d}.{d}
\\sdk {d}.{d}.{d}
, .{
@ -1943,58 +1935,58 @@ const ElfDumper = struct {
try bw.print("entry {x}\n", .{ctx.hdr.e_entry});
}
fn dumpPhdrs(ctx: ObjectContext, bw: *Writer) !void {
fn dumpPhdrs(ctx: ObjectContext, writer: *Writer) !void {
if (ctx.phdrs.len == 0) return;
try bw.writeAll("program headers\n");
try writer.writeAll("program headers\n");
for (ctx.phdrs, 0..) |phdr, phndx| {
try bw.print("phdr {d}\n", .{phndx});
try bw.print("type {f}\n", .{fmtPhType(phdr.p_type)});
try bw.print("vaddr {x}\n", .{phdr.p_vaddr});
try bw.print("paddr {x}\n", .{phdr.p_paddr});
try bw.print("offset {x}\n", .{phdr.p_offset});
try bw.print("memsz {x}\n", .{phdr.p_memsz});
try bw.print("filesz {x}\n", .{phdr.p_filesz});
try bw.print("align {x}\n", .{phdr.p_align});
try writer.print("phdr {d}\n", .{phndx});
try writer.print("type {f}\n", .{fmtPhType(phdr.p_type)});
try writer.print("vaddr {x}\n", .{phdr.p_vaddr});
try writer.print("paddr {x}\n", .{phdr.p_paddr});
try writer.print("offset {x}\n", .{phdr.p_offset});
try writer.print("memsz {x}\n", .{phdr.p_memsz});
try writer.print("filesz {x}\n", .{phdr.p_filesz});
try writer.print("align {x}\n", .{phdr.p_align});
{
const flags = phdr.p_flags;
try bw.writeAll("flags");
if (flags > 0) try bw.writeByte(' ');
try writer.writeAll("flags");
if (flags > 0) try writer.writeByte(' ');
if (flags & elf.PF_R != 0) {
try bw.writeByte('R');
try writer.writeByte('R');
}
if (flags & elf.PF_W != 0) {
try bw.writeByte('W');
try writer.writeByte('W');
}
if (flags & elf.PF_X != 0) {
try bw.writeByte('E');
try writer.writeByte('E');
}
if (flags & elf.PF_MASKOS != 0) {
try bw.writeAll("OS");
try writer.writeAll("OS");
}
if (flags & elf.PF_MASKPROC != 0) {
try bw.writeAll("PROC");
try writer.writeAll("PROC");
}
try bw.writeByte('\n');
try writer.writeByte('\n');
}
}
}
fn dumpShdrs(ctx: ObjectContext, bw: *Writer) !void {
fn dumpShdrs(ctx: ObjectContext, writer: *Writer) !void {
if (ctx.shdrs.len == 0) return;
try bw.writeAll("section headers\n");
try writer.writeAll("section headers\n");
for (ctx.shdrs, 0..) |shdr, shndx| {
try bw.print("shdr {d}\n", .{shndx});
try bw.print("name {s}\n", .{ctx.getSectionName(shndx)});
try bw.print("type {f}\n", .{fmtShType(shdr.sh_type)});
try bw.print("addr {x}\n", .{shdr.sh_addr});
try bw.print("offset {x}\n", .{shdr.sh_offset});
try bw.print("size {x}\n", .{shdr.sh_size});
try bw.print("addralign {x}\n", .{shdr.sh_addralign});
try writer.print("shdr {d}\n", .{shndx});
try writer.print("name {s}\n", .{ctx.getSectionName(shndx)});
try writer.print("type {f}\n", .{fmtShType(shdr.sh_type)});
try writer.print("addr {x}\n", .{shdr.sh_addr});
try writer.print("offset {x}\n", .{shdr.sh_offset});
try writer.print("size {x}\n", .{shdr.sh_size});
try writer.print("addralign {x}\n", .{shdr.sh_addralign});
// TODO dump formatted sh_flags
}
}
@ -2263,16 +2255,11 @@ const ElfDumper = struct {
return str[0..std.mem.indexOfScalar(u8, str, 0).?];
}
fn fmtShType(sh_type: u32) std.fmt.Formatter(formatShType) {
fn fmtShType(sh_type: u32) std.fmt.Formatter(u32, formatShType) {
return .{ .data = sh_type };
}
fn formatShType(
sh_type: u32,
bw: *Writer,
comptime unused_fmt_string: []const u8,
) !void {
_ = unused_fmt_string;
fn formatShType(sh_type: u32, writer: *Writer) Writer.Error!void {
const name = switch (sh_type) {
elf.SHT_NULL => "NULL",
elf.SHT_PROGBITS => "PROGBITS",
@ -2298,26 +2285,21 @@ const ElfDumper = struct {
elf.SHT_GNU_VERNEED => "VERNEED",
elf.SHT_GNU_VERSYM => "VERSYM",
else => if (elf.SHT_LOOS <= sh_type and sh_type < elf.SHT_HIOS) {
return try bw.print("LOOS+0x{x}", .{sh_type - elf.SHT_LOOS});
return try writer.print("LOOS+0x{x}", .{sh_type - elf.SHT_LOOS});
} else if (elf.SHT_LOPROC <= sh_type and sh_type < elf.SHT_HIPROC) {
return try bw.print("LOPROC+0x{x}", .{sh_type - elf.SHT_LOPROC});
return try writer.print("LOPROC+0x{x}", .{sh_type - elf.SHT_LOPROC});
} else if (elf.SHT_LOUSER <= sh_type and sh_type < elf.SHT_HIUSER) {
return try bw.print("LOUSER+0x{x}", .{sh_type - elf.SHT_LOUSER});
return try writer.print("LOUSER+0x{x}", .{sh_type - elf.SHT_LOUSER});
} else "UNKNOWN",
};
try bw.writeAll(name);
try writer.writeAll(name);
}
fn fmtPhType(ph_type: u32) std.fmt.Formatter(formatPhType) {
fn fmtPhType(ph_type: u32) std.fmt.Formatter(u32, formatPhType) {
return .{ .data = ph_type };
}
fn formatPhType(
ph_type: u32,
bw: *Writer,
comptime unused_fmt_string: []const u8,
) !void {
_ = unused_fmt_string;
fn formatPhType(ph_type: u32, writer: *Writer) Writer.Error!void {
const p_type = switch (ph_type) {
elf.PT_NULL => "NULL",
elf.PT_LOAD => "LOAD",
@ -2332,12 +2314,12 @@ const ElfDumper = struct {
elf.PT_GNU_STACK => "GNU_STACK",
elf.PT_GNU_RELRO => "GNU_RELRO",
else => if (elf.PT_LOOS <= ph_type and ph_type < elf.PT_HIOS) {
return try bw.print("LOOS+0x{x}", .{ph_type - elf.PT_LOOS});
return try writer.print("LOOS+0x{x}", .{ph_type - elf.PT_LOOS});
} else if (elf.PT_LOPROC <= ph_type and ph_type < elf.PT_HIPROC) {
return try bw.print("LOPROC+0x{x}", .{ph_type - elf.PT_LOPROC});
return try writer.print("LOPROC+0x{x}", .{ph_type - elf.PT_LOPROC});
} else "UNKNOWN",
};
try bw.writeAll(p_type);
try writer.writeAll(p_type);
}
};

View File

@ -1017,20 +1017,16 @@ fn getGeneratedFilePath(compile: *Compile, comptime tag_name: []const u8, asking
const maybe_path: ?*GeneratedFile = @field(compile, tag_name);
const generated_file = maybe_path orelse {
std.debug.lockStdErr();
const stderr: fs.File = .stderr();
std.Build.dumpBadGetPathHelp(&compile.step, stderr, compile.step.owner, asking_step) catch {};
const w = std.debug.lockStderrWriter(&.{});
std.Build.dumpBadGetPathHelp(&compile.step, w, .detect(.stderr()), compile.step.owner, asking_step) catch {};
std.debug.unlockStderrWriter();
@panic("missing emit option for " ++ tag_name);
};
const path = generated_file.path orelse {
std.debug.lockStdErr();
const stderr: fs.File = .stderr();
std.Build.dumpBadGetPathHelp(&compile.step, stderr, compile.step.owner, asking_step) catch {};
const w = std.debug.lockStderrWriter(&.{});
std.Build.dumpBadGetPathHelp(&compile.step, w, .detect(.stderr()), compile.step.owner, asking_step) catch {};
std.debug.unlockStderrWriter();
@panic(tag_name ++ " is null. Is there a missing step dependency?");
};

View File

@ -198,7 +198,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
var aw: std.io.Writer.Allocating = .init(gpa);
defer aw.deinit();
const bw = &aw.interface;
const bw = &aw.writer;
const header_text = "This file was generated by ConfigHeader using the Zig Build System.";
const c_generated_line = "/* " ++ header_text ++ " */\n";
@ -335,7 +335,7 @@ fn render_autoconf_at(
) !void {
const build = step.owner;
const allocator = build.allocator;
const bw = &aw.interface;
const bw = &aw.writer;
const used = allocator.alloc(bool, values.count()) catch @panic("OOM");
for (used) |*u| u.* = false;
@ -553,7 +553,7 @@ fn renderValueC(bw: *Writer, name: []const u8, value: Value) !void {
.int => |i| try bw.print("#define {s} {d}\n", .{ name, i }),
.ident => |ident| try bw.print("#define {s} {s}\n", .{ name, ident }),
// TODO: use C-specific escaping instead of zig string literals
.string => |string| try bw.print("#define {s} \"{f}\"\n", .{ name, std.zig.fmtEscapes(string) }),
.string => |string| try bw.print("#define {s} \"{f}\"\n", .{ name, std.zig.fmtString(string) }),
}
}
@ -565,7 +565,7 @@ fn renderValueNasm(bw: *Writer, name: []const u8, value: Value) !void {
.int => |i| try bw.print("%define {s} {d}\n", .{ name, i }),
.ident => |ident| try bw.print("%define {s} {s}\n", .{ name, ident }),
// TODO: use nasm-specific escaping instead of zig string literals
.string => |string| try bw.print("%define {s} \"{f}\"\n", .{ name, std.zig.fmtEscapes(string) }),
.string => |string| try bw.print("%define {s} \"{f}\"\n", .{ name, std.zig.fmtString(string) }),
}
}
@ -753,17 +753,17 @@ fn testReplaceVariablesAutoconfAt(
expected: []const u8,
values: std.StringArrayHashMap(Value),
) !void {
var output: std.io.Writer.Allocating = .init(allocator);
defer output.deinit();
var aw: std.io.Writer.Allocating = .init(allocator);
defer aw.deinit();
const used = try allocator.alloc(bool, values.count());
for (used) |*u| u.* = false;
defer allocator.free(used);
try expand_variables_autoconf_at(&output.interface, contents, values, used);
try expand_variables_autoconf_at(&aw.writer, contents, values, used);
for (used) |u| if (!u) return error.UnusedValue;
try std.testing.expectEqualStrings(expected, output.getWritten());
try std.testing.expectEqualStrings(expected, aw.getWritten());
}
fn testReplaceVariablesCMake(

View File

@ -62,7 +62,7 @@ fn printType(
for (value) |slice| {
try out.appendNTimes(gpa, ' ', indent);
try out.print(gpa, " \"{f}\",\n", .{std.zig.fmtEscapes(slice)});
try out.print(gpa, " \"{f}\",\n", .{std.zig.fmtString(slice)});
}
if (name != null) {
@ -76,28 +76,28 @@ fn printType(
[]const u8 => {
if (name) |some| {
try out.print(gpa, "pub const {f}: []const u8 = \"{f}\";", .{
std.zig.fmtId(some), std.zig.fmtEscapes(value),
std.zig.fmtId(some), std.zig.fmtString(value),
});
} else {
try out.print(gpa, "\"{f}\",", .{std.zig.fmtEscapes(value)});
try out.print(gpa, "\"{f}\",", .{std.zig.fmtString(value)});
}
return out.appendSlice(gpa, "\n");
},
[:0]const u8 => {
if (name) |some| {
try out.print(gpa, "pub const {f}: [:0]const u8 = \"{f}\";", .{ std.zig.fmtId(some), std.zig.fmtEscapes(value) });
try out.print(gpa, "pub const {f}: [:0]const u8 = \"{f}\";", .{ std.zig.fmtId(some), std.zig.fmtString(value) });
} else {
try out.print(gpa, "\"{f}\",", .{std.zig.fmtEscapes(value)});
try out.print(gpa, "\"{f}\",", .{std.zig.fmtString(value)});
}
return out.appendSlice(gpa, "\n");
},
?[]const u8 => {
if (name) |some| {
try out.print(gpa, "pub const {}: ?[]const u8 = ", .{std.zig.fmtId(some)});
try out.print(gpa, "pub const {f}: ?[]const u8 = ", .{std.zig.fmtId(some)});
}
if (value) |payload| {
try out.print(gpa, "\"{f}\"", .{std.zig.fmtEscapes(payload)});
try out.print(gpa, "\"{f}\"", .{std.zig.fmtString(payload)});
} else {
try out.appendSlice(gpa, "null");
}
@ -111,11 +111,11 @@ fn printType(
},
?[:0]const u8 => {
if (name) |some| {
try out.print(gpa, "pub const {}: ?[:0]const u8 = ", .{std.zig.fmtId(some)});
try out.print(gpa, "pub const {f}: ?[:0]const u8 = ", .{std.zig.fmtId(some)});
}
if (value) |payload| {
try out.print(gpa, "\"{f}\"", .{std.zig.fmtEscapes(payload)});
try out.print(gpa, "\"{f}\"", .{std.zig.fmtString(payload)});
} else {
try out.appendSlice(gpa, "null");
}
@ -142,11 +142,11 @@ fn printType(
if (value.pre) |some| {
try out.appendNTimes(gpa, ' ', indent);
try out.print(gpa, " .pre = \"{f}\",\n", .{std.zig.fmtEscapes(some)});
try out.print(gpa, " .pre = \"{f}\",\n", .{std.zig.fmtString(some)});
}
if (value.build) |some| {
try out.appendNTimes(gpa, ' ', indent);
try out.print(gpa, " .build = \"{f}\",\n", .{std.zig.fmtEscapes(some)});
try out.print(gpa, " .build = \"{f}\",\n", .{std.zig.fmtString(some)});
}
if (name != null) {
@ -162,7 +162,7 @@ fn printType(
switch (@typeInfo(T)) {
.array => {
if (name) |some| {
try out.print(gpa, "pub const {}: {s} = ", .{ std.zig.fmtId(some), @typeName(T) });
try out.print(gpa, "pub const {f}: {s} = ", .{ std.zig.fmtId(some), @typeName(T) });
}
try out.print(gpa, "{s} {{\n", .{@typeName(T)});
@ -186,7 +186,7 @@ fn printType(
}
if (name) |some| {
try out.print(gpa, "pub const {}: {s} = ", .{ std.zig.fmtId(some), @typeName(T) });
try out.print(gpa, "pub const {f}: {s} = ", .{ std.zig.fmtId(some), @typeName(T) });
}
try out.print(gpa, "&[_]{s} {{\n", .{@typeName(p.child)});
@ -206,7 +206,7 @@ fn printType(
},
.optional => {
if (name) |some| {
try out.print(gpa, "pub const {}: {s} = ", .{ std.zig.fmtId(some), @typeName(T) });
try out.print(gpa, "pub const {f}: {s} = ", .{ std.zig.fmtId(some), @typeName(T) });
}
if (value) |inner| {
@ -243,10 +243,10 @@ fn printType(
try printEnum(options, out, T, info, indent);
if (name) |some| {
try out.print(gpa, "pub const {f}: {f} = .{fp_};\n", .{
try out.print(gpa, "pub const {f}: {f} = .{f};\n", .{
std.zig.fmtId(some),
std.zig.fmtId(@typeName(T)),
std.zig.fmtId(@tagName(value)),
std.zig.fmtIdFlags(@tagName(value), .{ .allow_underscore = true, .allow_primitive = true }),
});
}
return;
@ -295,7 +295,9 @@ fn printEnum(
inline for (val.fields) |field| {
try out.appendNTimes(gpa, ' ', indent);
try out.print(gpa, " {fp} = {d},\n", .{ std.zig.fmtId(field.name), field.value });
try out.print(gpa, " {f} = {d},\n", .{
std.zig.fmtIdFlags(field.name, .{ .allow_primitive = true }), field.value,
});
}
if (!val.is_exhaustive) {
@ -313,7 +315,7 @@ fn printStruct(options: *Options, out: *std.ArrayListUnmanaged(u8), comptime T:
if (gop.found_existing) return;
try out.appendNTimes(gpa, ' ', indent);
try out.print(gpa, "pub const {} = ", .{std.zig.fmtId(@typeName(T))});
try out.print(gpa, "pub const {f} = ", .{std.zig.fmtId(@typeName(T))});
switch (val.layout) {
.@"extern" => try out.appendSlice(gpa, "extern struct"),
@ -330,9 +332,15 @@ fn printStruct(options: *Options, out: *std.ArrayListUnmanaged(u8), comptime T:
// If the type name doesn't contains a '.' the type is from zig builtins.
if (std.mem.containsAtLeast(u8, type_name, 1, ".")) {
try out.print(gpa, " {p_}: {}", .{ std.zig.fmtId(field.name), std.zig.fmtId(type_name) });
try out.print(gpa, " {f}: {f}", .{
std.zig.fmtIdFlags(field.name, .{ .allow_underscore = true, .allow_primitive = true }),
std.zig.fmtId(type_name),
});
} else {
try out.print(gpa, " {p_}: {s}", .{ std.zig.fmtId(field.name), type_name });
try out.print(gpa, " {f}: {s}", .{
std.zig.fmtIdFlags(field.name, .{ .allow_underscore = true, .allow_primitive = true }),
type_name,
});
}
if (field.defaultValue()) |default_value| {
@ -377,7 +385,9 @@ fn printStructValue(
} else {
inline for (struct_val.fields) |field| {
try out.appendNTimes(gpa, ' ', indent);
try out.print(gpa, " .{p_} = ", .{std.zig.fmtId(field.name)});
try out.print(gpa, " .{f} = ", .{
std.zig.fmtIdFlags(field.name, .{ .allow_primitive = true, .allow_underscore = true }),
});
const field_name = @field(val, field.name);
switch (@typeInfo(@TypeOf(field_name))) {
@ -405,7 +415,8 @@ pub fn addOptionPath(
name: []const u8,
path: LazyPath,
) void {
options.args.append(.{
const arena = options.step.owner.allocator;
options.args.append(arena, .{
.name = options.step.owner.dupe(name),
.path = path.dupe(options.step.owner),
}) catch @panic("OOM");

View File

@ -1015,16 +1015,17 @@ fn populateGeneratedPaths(
}
}
fn formatTerm(term: ?std.process.Child.Term, w: *std.io.Writer, comptime fmt: []const u8) !void {
comptime assert(fmt.len == 0);
fn formatTerm(term: ?std.process.Child.Term, w: *std.io.Writer) std.io.Writer.Error!void {
if (term) |t| switch (t) {
.Exited => |code| try w.print("exited with code {}", .{code}),
.Signal => |sig| try w.print("terminated with signal {}", .{sig}),
.Stopped => |sig| try w.print("stopped with signal {}", .{sig}),
.Unknown => |code| try w.print("terminated for unknown reason with code {}", .{code}),
} else try w.writeAll("exited with any code");
.Exited => |code| try w.print("exited with code {d}", .{code}),
.Signal => |sig| try w.print("terminated with signal {d}", .{sig}),
.Stopped => |sig| try w.print("stopped with signal {d}", .{sig}),
.Unknown => |code| try w.print("terminated for unknown reason with code {d}", .{code}),
} else {
try w.writeAll("exited with any code");
}
}
fn fmtTerm(term: ?std.process.Child.Term) std.fmt.Formatter(formatTerm) {
fn fmtTerm(term: ?std.process.Child.Term) std.fmt.Formatter(?std.process.Child.Term, formatTerm) {
return .{ .data = term };
}

View File

@ -659,7 +659,7 @@ const Os = switch (builtin.os.tag) {
path.root_dir.handle.fd
else
posix.openat(path.root_dir.handle.fd, path.sub_path, dir_open_flags, 0) catch |err| {
fatal("failed to open directory {}: {s}", .{ path, @errorName(err) });
fatal("failed to open directory {f}: {s}", .{ path, @errorName(err) });
};
// Empirically the dir has to stay open or else no events are triggered.
errdefer if (!skip_open_dir) posix.close(dir_fd);

View File

@ -150,15 +150,10 @@ fn parseNum(text: []const u8) error{ InvalidVersion, Overflow }!usize {
};
}
pub fn format(
self: Version,
bw: *std.io.Writer,
comptime fmt: []const u8,
) !void {
if (fmt.len != 0) std.fmt.invalidFmtError(fmt, self);
try bw.print("{d}.{d}.{d}", .{ self.major, self.minor, self.patch });
if (self.pre) |pre| try bw.print("-{s}", .{pre});
if (self.build) |build| try bw.print("+{s}", .{build});
pub fn format(self: Version, w: *std.io.Writer) std.io.Writer.Error!void {
try w.print("{d}.{d}.{d}", .{ self.major, self.minor, self.patch });
if (self.pre) |pre| try w.print("-{s}", .{pre});
if (self.build) |build| try w.print("+{s}", .{build});
}
const expect = std.testing.expect;
@ -200,7 +195,7 @@ test format {
"1.0.0+0.build.1-rc.10000aaa-kk-0.1",
"5.4.0-1018-raspi",
"5.7.123",
}) |valid| try std.testing.expectFmt(valid, "{}", .{try parse(valid)});
}) |valid| try std.testing.expectFmt(valid, "{f}", .{try parse(valid)});
// Invalid version strings should be rejected.
for ([_][]const u8{
@ -267,12 +262,12 @@ test format {
// Valid version string that may overflow.
const big_valid = "99999999999999999999999.999999999999999999.99999999999999999";
if (parse(big_valid)) |ver| {
try std.testing.expectFmt(big_valid, "{}", .{ver});
try std.testing.expectFmt(big_valid, "{f}", .{ver});
} else |err| try expect(err == error.Overflow);
// Invalid version string that may overflow.
const big_invalid = "99999999999999999999999.999999999999999999.99999999999999999----RC-SNAPSHOT.12.09.1--------------------------------..12";
if (parse(big_invalid)) |ver| std.debug.panic("expected error, found {}", .{ver}) else |_| {}
if (parse(big_invalid)) |ver| std.debug.panic("expected error, found {f}", .{ver}) else |_| {}
}
test "precedence" {

View File

@ -301,24 +301,13 @@ pub const Os = struct {
/// This function is defined to serialize a Zig source code representation of this
/// type, that, when parsed, will deserialize into the same data.
pub fn format(ver: WindowsVersion, bw: *std.io.Writer, comptime fmt_str: []const u8) std.io.Writer.Error!void {
const maybe_name = std.enums.tagName(WindowsVersion, ver);
if (comptime std.mem.eql(u8, fmt_str, "s")) {
if (maybe_name) |name|
try bw.print(".{s}", .{name})
else
try bw.print(".{d}", .{@intFromEnum(ver)});
} else if (comptime std.mem.eql(u8, fmt_str, "c")) {
if (maybe_name) |name|
try bw.print(".{s}", .{name})
else
try bw.print("@enumFromInt(0x{X:0>8})", .{@intFromEnum(ver)});
} else if (fmt_str.len == 0) {
if (maybe_name) |name|
try bw.print("WindowsVersion.{s}", .{name})
else
try bw.print("WindowsVersion(0x{X:0>8})", .{@intFromEnum(ver)});
} else std.fmt.invalidFmtError(fmt_str, ver);
pub fn format(wv: WindowsVersion, w: *std.io.Writer) std.io.Writer.Error!void {
if (std.enums.tagName(WindowsVersion, wv)) |name| {
var vecs: [2][]const u8 = .{ ".", name };
return w.writeVecAll(&vecs);
} else {
return w.print("@enumFromInt(0x{X:0>8})", .{wv});
}
}
};
@ -1686,7 +1675,7 @@ pub const Cpu = struct {
pub fn fromCallingConvention(cc: std.builtin.CallingConvention.Tag) []const Arch {
return switch (cc) {
.auto,
.@"async",
.async,
.naked,
.@"inline",
=> unreachable,

View File

@ -165,10 +165,18 @@ pub fn setName(self: Thread, name: []const u8) SetNameError!void {
const path = try std.fmt.bufPrint(&buf, "/proc/self/task/{d}/comm", .{self.getHandle()});
const file = try std.fs.cwd().openFile(path, .{ .mode = .write_only });
defer file.close();
<<<<<<< HEAD
var fw = file.writer(&.{});
fw.interface.writeAll(name) catch |err| switch (err) {
error.WriteFailed => return fw.err.?,
};
||||||| edf785db0f
try file.writer().writeAll(name);
=======
try file.deprecatedWriter().writeAll(name);
>>>>>>> origin/master
return;
},
.windows => {
@ -280,11 +288,23 @@ pub fn getName(self: Thread, buffer_ptr: *[max_name_len:0]u8) GetNameError!?[]co
const path = try std.fmt.bufPrint(&buf, "/proc/self/task/{d}/comm", .{self.getHandle()});
const file = try std.fs.cwd().openFile(path, .{});
defer file.close();
<<<<<<< HEAD
var fr = file.reader(&.{});
const n = fr.interface.readSliceShort(buffer_ptr[0 .. max_name_len + 1]) catch |err| switch (err) {
error.ReadFailed => return fr.err.?,
};
return if (n == 0) null else buffer[0 .. n - 1];
||||||| edf785db0f
const data_len = try file.reader().readAll(buffer_ptr[0 .. max_name_len + 1]);
return if (data_len >= 1) buffer[0 .. data_len - 1] else null;
=======
const data_len = try file.deprecatedReader().readAll(buffer_ptr[0 .. max_name_len + 1]);
return if (data_len >= 1) buffer[0 .. data_len - 1] else null;
>>>>>>> origin/master
},
.windows => {
const buf_capacity = @sizeOf(windows.UNICODE_STRING) + (@sizeOf(u16) * max_name_len);
@ -1164,7 +1184,7 @@ const LinuxThreadImpl = struct {
fn getCurrentId() Id {
return tls_thread_id orelse {
const tid = @as(u32, @bitCast(linux.gettid()));
const tid: u32 = @bitCast(linux.gettid());
tls_thread_id = tid;
return tid;
};

View File

@ -3,11 +3,9 @@
const std = @import("std.zig");
const testing = std.testing;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const Writer = std.io.Writer;
const Uri = @This();
const Allocator = std.mem.Allocator;
const Writer = std.io.Writer;
scheme: []const u8,
user: ?Component = null,
@ -65,7 +63,7 @@ pub const Component = union(enum) {
return switch (component) {
.raw => |raw| raw,
.percent_encoded => |percent_encoded| if (std.mem.indexOfScalar(u8, percent_encoded, '%')) |_|
try std.fmt.bufPrint(buffer, "{fraw}", .{component})
try std.fmt.bufPrint(buffer, "{f}", .{std.fmt.alt(component, .formatRaw)})
else
percent_encoded,
};
@ -85,16 +83,9 @@ pub const Component = union(enum) {
};
}
pub fn format(component: Component, bw: *Writer, comptime fmt: []const u8) Writer.Error!void {
if (fmt.len == 0) {
try bw.print("std.Uri.Component{{ .{s} = \"{}\" }}", .{
@tagName(component),
std.zig.fmtEscapes(switch (component) {
.raw, .percent_encoded => |string| string,
}),
});
} else if (comptime std.mem.eql(u8, fmt, "raw")) switch (component) {
.raw => |raw| try bw.writeAll(raw),
pub fn formatRaw(component: Component, w: *Writer) Writer.Error!void {
switch (component) {
.raw => |raw| try w.writeAll(raw),
.percent_encoded => |percent_encoded| {
var start: usize = 0;
var index: usize = 0;
@ -103,51 +94,75 @@ pub const Component = union(enum) {
if (percent_encoded.len - index < 2) continue;
const percent_encoded_char =
std.fmt.parseInt(u8, percent_encoded[index..][0..2], 16) catch continue;
try bw.print("{s}{c}", .{
try w.print("{s}{c}", .{
percent_encoded[start..percent],
percent_encoded_char,
});
start = percent + 3;
index = percent + 3;
}
try bw.writeAll(percent_encoded[start..]);
try w.writeAll(percent_encoded[start..]);
},
} else if (comptime std.mem.eql(u8, fmt, "%")) switch (component) {
.raw => |raw| try percentEncode(bw, raw, isUnreserved),
.percent_encoded => |percent_encoded| try bw.writeAll(percent_encoded),
} else if (comptime std.mem.eql(u8, fmt, "user")) switch (component) {
.raw => |raw| try percentEncode(bw, raw, isUserChar),
.percent_encoded => |percent_encoded| try bw.writeAll(percent_encoded),
} else if (comptime std.mem.eql(u8, fmt, "password")) switch (component) {
.raw => |raw| try percentEncode(bw, raw, isPasswordChar),
.percent_encoded => |percent_encoded| try bw.writeAll(percent_encoded),
} else if (comptime std.mem.eql(u8, fmt, "host")) switch (component) {
.raw => |raw| try percentEncode(bw, raw, isHostChar),
.percent_encoded => |percent_encoded| try bw.writeAll(percent_encoded),
} else if (comptime std.mem.eql(u8, fmt, "path")) switch (component) {
.raw => |raw| try percentEncode(bw, raw, isPathChar),
.percent_encoded => |percent_encoded| try bw.writeAll(percent_encoded),
} else if (comptime std.mem.eql(u8, fmt, "query")) switch (component) {
.raw => |raw| try percentEncode(bw, raw, isQueryChar),
.percent_encoded => |percent_encoded| try bw.writeAll(percent_encoded),
} else if (comptime std.mem.eql(u8, fmt, "fragment")) switch (component) {
.raw => |raw| try percentEncode(bw, raw, isFragmentChar),
.percent_encoded => |percent_encoded| try bw.writeAll(percent_encoded),
} else @compileError("invalid format string '" ++ fmt ++ "'");
}
}
pub fn percentEncode(
bw: *Writer,
raw: []const u8,
comptime isValidChar: fn (u8) bool,
) Writer.Error!void {
pub fn formatEscaped(component: Component, w: *Writer) Writer.Error!void {
switch (component) {
.raw => |raw| try percentEncode(w, raw, isUnreserved),
.percent_encoded => |percent_encoded| try w.writeAll(percent_encoded),
}
}
pub fn formatUser(component: Component, w: *Writer) Writer.Error!void {
switch (component) {
.raw => |raw| try percentEncode(w, raw, isUserChar),
.percent_encoded => |percent_encoded| try w.writeAll(percent_encoded),
}
}
pub fn formatPassword(component: Component, w: *Writer) Writer.Error!void {
switch (component) {
.raw => |raw| try percentEncode(w, raw, isPasswordChar),
.percent_encoded => |percent_encoded| try w.writeAll(percent_encoded),
}
}
pub fn formatHost(component: Component, w: *Writer) Writer.Error!void {
switch (component) {
.raw => |raw| try percentEncode(w, raw, isHostChar),
.percent_encoded => |percent_encoded| try w.writeAll(percent_encoded),
}
}
pub fn formatPath(component: Component, w: *Writer) Writer.Error!void {
switch (component) {
.raw => |raw| try percentEncode(w, raw, isPathChar),
.percent_encoded => |percent_encoded| try w.writeAll(percent_encoded),
}
}
pub fn formatQuery(component: Component, w: *Writer) Writer.Error!void {
switch (component) {
.raw => |raw| try percentEncode(w, raw, isQueryChar),
.percent_encoded => |percent_encoded| try w.writeAll(percent_encoded),
}
}
pub fn formatFragment(component: Component, w: *Writer) Writer.Error!void {
switch (component) {
.raw => |raw| try percentEncode(w, raw, isFragmentChar),
.percent_encoded => |percent_encoded| try w.writeAll(percent_encoded),
}
}
pub fn percentEncode(w: *Writer, raw: []const u8, comptime isValidChar: fn (u8) bool) Writer.Error!void {
var start: usize = 0;
for (raw, 0..) |char, index| {
if (isValidChar(char)) continue;
try bw.print("{s}%{X:0>2}", .{ raw[start..index], char });
try w.print("{s}%{X:0>2}", .{ raw[start..index], char });
start = index + 1;
}
try bw.writeAll(raw[start..]);
try w.writeAll(raw[start..]);
}
};
@ -264,76 +279,91 @@ pub fn parseAfterScheme(scheme: []const u8, text: []const u8) ParseError!Uri {
return uri;
}
pub const WriteToStreamOptions = struct {
/// When true, include the scheme part of the URI.
scheme: bool = false,
/// When true, include the user and password part of the URI. Ignored if `authority` is false.
authentication: bool = false,
/// When true, include the authority part of the URI.
authority: bool = false,
/// When true, include the path part of the URI.
path: bool = false,
/// When true, include the query part of the URI. Ignored when `path` is false.
query: bool = false,
/// When true, include the fragment part of the URI. Ignored when `path` is false.
fragment: bool = false,
/// When true, include the port part of the URI. Ignored when `port` is null.
port: bool = true,
};
pub fn format(uri: *const Uri, writer: *Writer) Writer.Error!void {
return writeToStream(uri, writer, .all);
}
pub fn writeToStream(uri: Uri, options: WriteToStreamOptions, bw: *Writer) Writer.Error!void {
if (options.scheme) {
try bw.print("{s}:", .{uri.scheme});
if (options.authority and uri.host != null) {
try bw.writeAll("//");
pub fn writeToStream(uri: *const Uri, writer: *Writer, flags: Format.Flags) Writer.Error!void {
if (flags.scheme) {
try writer.print("{s}:", .{uri.scheme});
if (flags.authority and uri.host != null) {
try writer.writeAll("//");
}
}
if (options.authority) {
if (options.authentication and uri.host != null) {
if (flags.authority) {
if (flags.authentication and uri.host != null) {
if (uri.user) |user| {
try bw.print("{fuser}", .{user});
try user.formatUser(writer);
if (uri.password) |password| {
try bw.print(":{fpassword}", .{password});
try writer.writeByte(':');
try password.formatPassword(writer);
}
try bw.writeByte('@');
try writer.writeByte('@');
}
}
if (uri.host) |host| {
try bw.print("{fhost}", .{host});
if (options.port) {
if (uri.port) |port| try bw.print(":{d}", .{port});
try host.formatHost(writer);
if (flags.port) {
if (uri.port) |port| try writer.print(":{d}", .{port});
}
}
}
if (options.path) {
try bw.print("{fpath}", .{
if (uri.path.isEmpty()) Uri.Component{ .percent_encoded = "/" } else uri.path,
});
if (options.query) {
if (uri.query) |query| try bw.print("?{fquery}", .{query});
if (flags.path) {
const uri_path: Component = if (uri.path.isEmpty()) .{ .percent_encoded = "/" } else uri.path;
try uri_path.formatPath(writer);
if (flags.query) {
if (uri.query) |query| {
try writer.writeByte('?');
try query.formatQuery(writer);
}
}
if (options.fragment) {
if (uri.fragment) |fragment| try bw.print("#{ffragment}", .{fragment});
if (flags.fragment) {
if (uri.fragment) |fragment| {
try writer.writeByte('#');
try fragment.formatFragment(writer);
}
}
}
}
pub fn format(uri: Uri, bw: *Writer, comptime fmt: []const u8) Writer.Error!void {
const scheme = comptime std.mem.indexOfScalar(u8, fmt, ';') != null or fmt.len == 0;
const authentication = comptime std.mem.indexOfScalar(u8, fmt, '@') != null or fmt.len == 0;
const authority = comptime std.mem.indexOfScalar(u8, fmt, '+') != null or fmt.len == 0;
const path = comptime std.mem.indexOfScalar(u8, fmt, '/') != null or fmt.len == 0;
const query = comptime std.mem.indexOfScalar(u8, fmt, '?') != null or fmt.len == 0;
const fragment = comptime std.mem.indexOfScalar(u8, fmt, '#') != null or fmt.len == 0;
pub const Format = struct {
uri: *const Uri,
flags: Flags = .{},
return writeToStream(uri, .{
.scheme = scheme,
.authentication = authentication,
.authority = authority,
.path = path,
.query = query,
.fragment = fragment,
}, bw);
pub const Flags = struct {
/// When true, include the scheme part of the URI.
scheme: bool = false,
/// When true, include the user and password part of the URI. Ignored if `authority` is false.
authentication: bool = false,
/// When true, include the authority part of the URI.
authority: bool = false,
/// When true, include the path part of the URI.
path: bool = false,
/// When true, include the query part of the URI. Ignored when `path` is false.
query: bool = false,
/// When true, include the fragment part of the URI. Ignored when `path` is false.
fragment: bool = false,
/// When true, include the port part of the URI. Ignored when `port` is null.
port: bool = true,
pub const all: Flags = .{
.scheme = true,
.authentication = true,
.authority = true,
.path = true,
.query = true,
.fragment = true,
.port = true,
};
};
pub fn default(f: Format, writer: *Writer) Writer.Error!void {
return writeToStream(f.uri, writer, f.flags);
}
};
pub fn fmt(uri: *const Uri, flags: Format.Flags) std.fmt.Formatter(Format, Format.default) {
return .{ .data = .{ .uri = uri, .flags = flags } };
}
/// The return value will contain strings pointing into the original `text`.
@ -464,9 +494,8 @@ test remove_dot_segments {
fn merge_paths(base: Component, new: []u8, aux_buf: *[]u8) error{NoSpaceLeft}!Component {
var aux: Writer = .fixed(aux_buf.*);
if (!base.isEmpty()) {
aux.print("{fpath}", .{base}) catch return error.NoSpaceLeft;
aux.end = std.mem.lastIndexOfScalar(u8, aux.buffered(), '/') orelse
return remove_dot_segments(new);
base.formatPath(&aux) catch return error.NoSpaceLeft;
aux.end = std.mem.lastIndexOfScalar(u8, aux.buffered(), '/') orelse return remove_dot_segments(new);
}
aux.print("/{s}", .{new}) catch return error.NoSpaceLeft;
const merged_path = remove_dot_segments(aux.buffered());
@ -745,8 +774,11 @@ test "Special test" {
test "URI percent encoding" {
try std.testing.expectFmt(
"%5C%C3%B6%2F%20%C3%A4%C3%B6%C3%9F%20~~.adas-https%3A%2F%2Fcanvas%3A123%2F%23ads%26%26sad",
"{%}",
.{Component{ .raw = "\\ö/ äöß ~~.adas-https://canvas:123/#ads&&sad" }},
"{f}",
.{std.fmt.alt(
@as(Component, .{ .raw = "\\ö/ äöß ~~.adas-https://canvas:123/#ads&&sad" }),
.formatEscaped,
)},
);
}
@ -755,7 +787,10 @@ test "URI percent decoding" {
const expected = "\\ö/ äöß ~~.adas-https://canvas:123/#ads&&sad";
var input = "%5C%C3%B6%2F%20%C3%A4%C3%B6%C3%9F%20~~.adas-https%3A%2F%2Fcanvas%3A123%2F%23ads%26%26sad".*;
try std.testing.expectFmt(expected, "{fraw}", .{Component{ .percent_encoded = &input }});
try std.testing.expectFmt(expected, "{f}", .{std.fmt.alt(
@as(Component, .{ .percent_encoded = &input }),
.formatRaw,
)});
var output: [expected.len]u8 = undefined;
try std.testing.expectEqualStrings(percentDecodeBackwards(&output, &input), expected);
@ -767,7 +802,10 @@ test "URI percent decoding" {
const expected = "/abc%";
var input = expected.*;
try std.testing.expectFmt(expected, "{fraw}", .{Component{ .percent_encoded = &input }});
try std.testing.expectFmt(expected, "{f}", .{std.fmt.alt(
@as(Component, .{ .percent_encoded = &input }),
.formatRaw,
)});
var output: [expected.len]u8 = undefined;
try std.testing.expectEqualStrings(percentDecodeBackwards(&output, &input), expected);
@ -781,7 +819,9 @@ test "URI query encoding" {
const parsed = try Uri.parse(address);
// format the URI to percent encode it
try std.testing.expectFmt("/?response-content-type=application%2Foctet-stream", "{/?}", .{parsed});
try std.testing.expectFmt("/?response-content-type=application%2Foctet-stream", "{f}", .{
parsed.fmt(.{ .path = true, .query = true }),
});
}
test "format" {
@ -795,7 +835,9 @@ test "format" {
.query = null,
.fragment = null,
};
try std.testing.expectFmt("file:/foo/bar/baz", "{;/?#}", .{uri});
try std.testing.expectFmt("file:/foo/bar/baz", "{f}", .{
uri.fmt(.{ .scheme = true, .path = true, .query = true, .fragment = true }),
});
}
test "URI malformed input" {

View File

@ -339,9 +339,10 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
}
pub fn print(self: *Self, comptime fmt: []const u8, args: anytype) error{OutOfMemory}!void {
const gpa = self.allocator;
var unmanaged = self.moveToUnmanaged();
try unmanaged.print(self.allocator, fmt, args);
self.* = unmanaged.toManaged(self.allocator);
defer self.* = unmanaged.toManaged(gpa);
try unmanaged.print(gpa, fmt, args);
}
/// Append a value to the list `n` times.
@ -907,7 +908,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
try self.ensureUnusedCapacity(gpa, fmt.len);
var aw: std.io.Writer.Allocating = .fromArrayList(gpa, self);
defer self.* = aw.toArrayList();
return aw.interface.print(fmt, args) catch |err| switch (err) {
return aw.writer.print(fmt, args) catch |err| switch (err) {
error.WriteFailed => return error.OutOfMemory,
};
}

View File

@ -10,6 +10,10 @@
const std = @import("std");
pub const lowercase = "abcdefghijklmnopqrstuvwxyz";
pub const uppercase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
pub const letters = lowercase ++ uppercase;
/// The C0 control codes of the ASCII encoding.
///
/// See also: https://en.wikipedia.org/wiki/C0_and_C1_control_codes and `isControl`
@ -435,3 +439,44 @@ pub fn orderIgnoreCase(lhs: []const u8, rhs: []const u8) std.math.Order {
pub fn lessThanIgnoreCase(lhs: []const u8, rhs: []const u8) bool {
return orderIgnoreCase(lhs, rhs) == .lt;
}
pub const HexEscape = struct {
bytes: []const u8,
charset: *const [16]u8,
pub const upper_charset = "0123456789ABCDEF";
pub const lower_charset = "0123456789abcdef";
pub fn format(se: HexEscape, w: *std.io.Writer) std.io.Writer.Error!void {
const charset = se.charset;
var buf: [4]u8 = undefined;
buf[0] = '\\';
buf[1] = 'x';
for (se.bytes) |c| {
if (std.ascii.isPrint(c)) {
try w.writeByte(c);
} else {
buf[2] = charset[c >> 4];
buf[3] = charset[c & 15];
try w.writeAll(&buf);
}
}
}
};
/// Replaces non-ASCII bytes with hex escapes.
pub fn hexEscape(bytes: []const u8, case: std.fmt.Case) std.fmt.Formatter(HexEscape, HexEscape.format) {
return .{ .data = .{ .bytes = bytes, .charset = switch (case) {
.lower => HexEscape.lower_charset,
.upper => HexEscape.upper_charset,
} } };
}
test hexEscape {
try std.testing.expectFmt("abc 123", "{f}", .{hexEscape("abc 123", .lower)});
try std.testing.expectFmt("ab\\xffc", "{f}", .{hexEscape("ab\xffc", .lower)});
try std.testing.expectFmt("abc 123", "{f}", .{hexEscape("abc 123", .upper)});
try std.testing.expectFmt("ab\\xFFc", "{f}", .{hexEscape("ab\xffc", .upper)});
}

View File

@ -108,7 +108,7 @@ pub const Base64Encoder = struct {
}
}
// dest must be compatible with std.io.Writer's writeAll interface
// dest must be compatible with std.io.GenericWriter's writeAll interface
pub fn encodeWriter(encoder: *const Base64Encoder, dest: anytype, source: []const u8) !void {
var chunker = window(u8, source, 3, 3);
while (chunker.next()) |chunk| {
@ -118,8 +118,8 @@ pub const Base64Encoder = struct {
}
}
// destWriter must be compatible with std.io.Writer's writeAll interface
// sourceReader must be compatible with std.io.Reader's read interface
// destWriter must be compatible with std.io.GenericWriter's writeAll interface
// sourceReader must be compatible with `std.io.GenericReader` read interface
pub fn encodeFromReaderToWriter(encoder: *const Base64Encoder, destWriter: anytype, sourceReader: anytype) !void {
while (true) {
var tempSource: [3]u8 = undefined;

View File

@ -277,7 +277,7 @@ pub fn BoundedArrayAligned(
@compileError("The Writer interface is only defined for BoundedArray(u8, ...) " ++
"but the given type is BoundedArray(" ++ @typeName(T) ++ ", ...)")
else
std.io.Writer(*Self, error{Overflow}, appendWrite);
std.io.GenericWriter(*Self, error{Overflow}, appendWrite);
/// Initializes a writer which will write into the array.
pub fn writer(self: *Self) Writer {
@ -285,7 +285,7 @@ pub fn BoundedArrayAligned(
}
/// Same as `appendSlice` except it returns the number of bytes written, which is always the same
/// as `m.len`. The purpose of this function existing is to match `std.io.Writer` API.
/// as `m.len`. The purpose of this function existing is to match `std.io.GenericWriter` API.
fn appendWrite(self: *Self, m: []const u8) error{Overflow}!usize {
try self.appendSlice(m);
return m.len;

View File

@ -34,23 +34,21 @@ pub const StackTrace = struct {
index: usize,
instruction_addresses: []usize,
pub fn format(st: StackTrace, bw: *std.io.Writer, comptime fmt: []const u8) !void {
comptime if (fmt.len != 0) unreachable;
pub fn format(self: StackTrace, writer: *std.io.Writer) std.io.Writer.Error!void {
// TODO: re-evaluate whether to use format() methods at all.
// Until then, avoid an error when using DebugAllocator with WebAssembly
// where it tries to call detectTTYConfig here.
if (builtin.os.tag == .freestanding) return 0;
const debug_info = std.debug.getSelfDebugInfo() catch |err| {
return bw.print("\nUnable to print stack trace: Unable to open debug info: {s}\n", .{
return writer.print("\nUnable to print stack trace: Unable to open debug info: {s}\n", .{
@errorName(err),
});
};
const tty_config = std.io.tty.detectConfig(.stderr());
try bw.writeAll("\n");
std.debug.writeStackTrace(st, bw, debug_info, tty_config) catch |err| {
try bw.print("Unable to print stack trace: {s}\n", .{@errorName(err)});
const tty_config = std.io.tty.detectConfig(std.fs.File.stderr());
try writer.writeAll("\n");
std.debug.writeStackTrace(self, writer, debug_info, tty_config) catch |err| {
try writer.print("Unable to print stack trace: {s}\n", .{@errorName(err)});
};
}
};
@ -195,8 +193,6 @@ pub const CallingConvention = union(enum(u8)) {
pub const C: CallingConvention = .c;
/// Deprecated; use `.naked`.
pub const Naked: CallingConvention = .naked;
/// Deprecated; use `.@"async"`.
pub const Async: CallingConvention = .@"async";
/// Deprecated; use `.@"inline"`.
pub const Inline: CallingConvention = .@"inline";
/// Deprecated; use `.x86_64_interrupt`, `.x86_interrupt`, or `.avr_interrupt`.
@ -244,7 +240,7 @@ pub const CallingConvention = union(enum(u8)) {
/// The calling convention of a function that can be called with `async` syntax. An `async` call
/// of a runtime-known function must target a function with this calling convention.
/// Comptime-known functions with other calling conventions may be coerced to this one.
@"async",
async,
/// Functions with this calling convention have no prologue or epilogue, making the function
/// uncallable in regular Zig code. This can be useful when integrating with assembly.
@ -847,7 +843,7 @@ pub const LinkMode = enum {
pub const UnwindTables = enum {
none,
sync,
@"async",
async,
};
/// This data structure is used by the Zig language code generation and
@ -862,32 +858,23 @@ pub const WasiExecModel = enum {
pub const CallModifier = enum {
/// Equivalent to function call syntax.
auto,
/// Equivalent to async keyword used with function call syntax.
async_kw,
/// Prevents tail call optimization. This guarantees that the return
/// address will point to the callsite, as opposed to the callsite's
/// callsite. If the call is otherwise required to be tail-called
/// or inlined, a compile error is emitted instead.
never_tail,
/// Guarantees that the call will not be inlined. If the call is
/// otherwise required to be inlined, a compile error is emitted instead.
never_inline,
/// Asserts that the function call will not suspend. This allows a
/// non-async function to call an async function.
no_async,
no_suspend,
/// Guarantees that the call will be generated with tail call optimization.
/// If this is not possible, a compile error is emitted instead.
always_tail,
/// Guarantees that the call will be inlined at the callsite.
/// If this is not possible, a compile error is emitted instead.
always_inline,
/// Evaluates the call at compile-time. If the call cannot be completed at
/// compile-time, a compile error is emitted instead.
compile_time,

View File

@ -10412,7 +10412,10 @@ pub const sigfillset = switch (native_os) {
};
pub const sigaddset = private.sigaddset;
pub const sigemptyset = private.sigemptyset;
pub const sigemptyset = switch (native_os) {
.netbsd => private.__sigemptyset14,
else => private.sigemptyset,
};
pub const sigdelset = private.sigdelset;
pub const sigismember = private.sigismember;
@ -11268,6 +11271,7 @@ const private = struct {
extern "c" fn __msync13(addr: *align(page_size) const anyopaque, len: usize, flags: c_int) c_int;
extern "c" fn __nanosleep50(rqtp: *const timespec, rmtp: ?*timespec) c_int;
extern "c" fn __sigaction14(sig: c_int, noalias act: ?*const Sigaction, noalias oact: ?*Sigaction) c_int;
extern "c" fn __sigemptyset14(set: ?*sigset_t) c_int;
extern "c" fn __sigfillset14(set: ?*sigset_t) c_int;
extern "c" fn __sigprocmask14(how: c_int, noalias set: ?*const sigset_t, noalias oset: ?*sigset_t) c_int;
extern "c" fn __socket30(domain: c_uint, sock_type: c_uint, protocol: c_uint) c_int;

View File

@ -34,7 +34,7 @@ pub fn Decompress(comptime ReaderType: type) type {
const Self = @This();
pub const Error = ReaderType.Error || block.Decoder(ReaderType).Error;
pub const Reader = std.io.Reader(*Self, Error, read);
pub const Reader = std.io.GenericReader(*Self, Error, read);
allocator: Allocator,
block_decoder: block.Decoder(ReaderType),

View File

@ -27,7 +27,7 @@ pub fn Decoder(comptime ReaderType: type) type {
ReaderType.Error ||
DecodeError ||
Allocator.Error;
pub const Reader = std.io.Reader(*Self, Error, read);
pub const Reader = std.io.GenericReader(*Self, Error, read);
allocator: Allocator,
inner_reader: ReaderType,

View File

@ -45,7 +45,7 @@ pub fn prependSlice(self: *ArrayListReverse, data: []const u8) Error!void {
self.data.ptr = begin;
}
pub const Writer = std.io.Writer(*ArrayListReverse, Error, prependSliceSize);
pub const Writer = std.io.GenericWriter(*ArrayListReverse, Error, prependSliceSize);
/// Warning: This writer writes backwards. `fn print` will NOT work as expected.
pub fn writer(self: *ArrayListReverse) Writer {
return .{ .context = self };

View File

@ -383,12 +383,28 @@ fn Sha2x32(comptime iv: Iv32, digest_bits: comptime_int) type {
for (&d.s, v) |*dv, vv| dv.* +%= vv;
}
<<<<<<< HEAD
pub fn writer(this: *@This(), buffer: []u8) Writer {
return .{
.context = this,
.vtable = &.{ .drain = drain },
.buffer = buffer,
};
||||||| edf785db0f
pub const Error = error{};
pub const Writer = std.io.Writer(*Self, Error, write);
fn write(self: *Self, bytes: []const u8) Error!usize {
self.update(bytes);
return bytes.len;
=======
pub const Error = error{};
pub const Writer = std.io.GenericWriter(*Self, Error, write);
fn write(self: *Self, bytes: []const u8) Error!usize {
self.update(bytes);
return bytes.len;
>>>>>>> origin/master
}
fn drain(w: *Writer, data: []const []const u8, splat: usize) Writer.Error!usize {

View File

@ -222,7 +222,8 @@ pub fn unlockStderrWriter() void {
/// Print to stderr, unbuffered, and silently returning on failure. Intended
/// for use in "printf debugging". Use `std.log` functions for proper logging.
pub fn print(comptime fmt: []const u8, args: anytype) void {
const bw = lockStderrWriter(&.{});
var buffer: [32]u8 = undefined;
const bw = lockStderrWriter(&buffer);
defer unlockStderrWriter();
nosuspend bw.print(fmt, args) catch return;
}
@ -307,7 +308,7 @@ test dumpHexFallible {
var aw: std.io.Writer.Allocating = .init(std.testing.allocator);
defer aw.deinit();
try dumpHexFallible(&aw.interface, .no_color, bytes);
try dumpHexFallible(&aw.writer, .no_color, bytes);
const expected = try std.fmt.allocPrint(std.testing.allocator,
\\{x:0>[2]} 00 11 22 33 44 55 66 77 88 99 AA BB CC DD EE FF .."3DUfw........
\\{x:0>[2]} 01 12 13 ...
@ -1228,9 +1229,9 @@ fn printLineFromFileAnyOs(writer: *Writer, source_location: SourceLocation) !voi
}
test printLineFromFileAnyOs {
var output = std.ArrayList(u8).init(std.testing.allocator);
defer output.deinit();
const output_stream = output.writer();
var aw: Writer.Allocating = .init(std.testing.allocator);
defer aw.deinit();
const output_stream = &aw.writer;
const allocator = std.testing.allocator;
const join = std.fs.path.join;
@ -1252,8 +1253,8 @@ test printLineFromFileAnyOs {
try expectError(error.EndOfFile, printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 }));
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
try expectEqualStrings("no new lines in this file, but one is printed anyway\n", output.items);
output.clearRetainingCapacity();
try expectEqualStrings("no new lines in this file, but one is printed anyway\n", aw.getWritten());
aw.clearRetainingCapacity();
}
{
const path = try fs.path.join(allocator, &.{ test_dir_path, "three_lines.zig" });
@ -1268,12 +1269,12 @@ test printLineFromFileAnyOs {
});
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
try expectEqualStrings("1\n", output.items);
output.clearRetainingCapacity();
try expectEqualStrings("1\n", aw.getWritten());
aw.clearRetainingCapacity();
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 3, .column = 0 });
try expectEqualStrings("3\n", output.items);
output.clearRetainingCapacity();
try expectEqualStrings("3\n", aw.getWritten());
aw.clearRetainingCapacity();
}
{
const file = try test_dir.dir.createFile("line_overlaps_page_boundary.zig", .{});
@ -1282,14 +1283,17 @@ test printLineFromFileAnyOs {
defer allocator.free(path);
const overlap = 10;
var writer = file.writer();
var buf: [16]u8 = undefined;
var file_writer = file.writer(&buf);
const writer = &file_writer.interface;
try writer.splatByteAll('a', std.heap.page_size_min - overlap);
try writer.writeByte('\n');
try writer.splatByteAll('a', overlap);
try writer.flush();
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 });
try expectEqualStrings(("a" ** overlap) ++ "\n", output.items);
output.clearRetainingCapacity();
try expectEqualStrings(("a" ** overlap) ++ "\n", aw.getWritten());
aw.clearRetainingCapacity();
}
{
const file = try test_dir.dir.createFile("file_ends_on_page_boundary.zig", .{});
@ -1297,12 +1301,13 @@ test printLineFromFileAnyOs {
const path = try fs.path.join(allocator, &.{ test_dir_path, "file_ends_on_page_boundary.zig" });
defer allocator.free(path);
var writer = file.writer();
var file_writer = file.writer(&.{});
const writer = &file_writer.interface;
try writer.splatByteAll('a', std.heap.page_size_max);
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
try expectEqualStrings(("a" ** std.heap.page_size_max) ++ "\n", output.items);
output.clearRetainingCapacity();
try expectEqualStrings(("a" ** std.heap.page_size_max) ++ "\n", aw.getWritten());
aw.clearRetainingCapacity();
}
{
const file = try test_dir.dir.createFile("very_long_first_line_spanning_multiple_pages.zig", .{});
@ -1310,24 +1315,25 @@ test printLineFromFileAnyOs {
const path = try fs.path.join(allocator, &.{ test_dir_path, "very_long_first_line_spanning_multiple_pages.zig" });
defer allocator.free(path);
var writer = file.writer();
var file_writer = file.writer(&.{});
const writer = &file_writer.interface;
try writer.splatByteAll('a', 3 * std.heap.page_size_max);
try expectError(error.EndOfFile, printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 }));
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
try expectEqualStrings(("a" ** (3 * std.heap.page_size_max)) ++ "\n", output.items);
output.clearRetainingCapacity();
try expectEqualStrings(("a" ** (3 * std.heap.page_size_max)) ++ "\n", aw.getWritten());
aw.clearRetainingCapacity();
try writer.writeAll("a\na");
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
try expectEqualStrings(("a" ** (3 * std.heap.page_size_max)) ++ "a\n", output.items);
output.clearRetainingCapacity();
try expectEqualStrings(("a" ** (3 * std.heap.page_size_max)) ++ "a\n", aw.getWritten());
aw.clearRetainingCapacity();
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 });
try expectEqualStrings("a\n", output.items);
output.clearRetainingCapacity();
try expectEqualStrings("a\n", aw.getWritten());
aw.clearRetainingCapacity();
}
{
const file = try test_dir.dir.createFile("file_of_newlines.zig", .{});
@ -1335,18 +1341,19 @@ test printLineFromFileAnyOs {
const path = try fs.path.join(allocator, &.{ test_dir_path, "file_of_newlines.zig" });
defer allocator.free(path);
var writer = file.writer();
var file_writer = file.writer(&.{});
const writer = &file_writer.interface;
const real_file_start = 3 * std.heap.page_size_min;
try writer.splatByteAll('\n', real_file_start);
try writer.writeAll("abc\ndef");
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = real_file_start + 1, .column = 0 });
try expectEqualStrings("abc\n", output.items);
output.clearRetainingCapacity();
try expectEqualStrings("abc\n", aw.getWritten());
aw.clearRetainingCapacity();
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = real_file_start + 2, .column = 0 });
try expectEqualStrings("def\n", output.items);
output.clearRetainingCapacity();
try expectEqualStrings("def\n", aw.getWritten());
aw.clearRetainingCapacity();
}
}
@ -1597,10 +1604,10 @@ test "manage resources correctly" {
// self-hosted debug info is still too buggy
if (builtin.zig_backend != .stage2_llvm) return error.SkipZigTest;
const writer = std.io.null_writer;
var discarding: std.io.Writer.Discarding = .init(&.{});
var di = try SelfInfo.open(testing.allocator);
defer di.deinit();
try printSourceAtAddress(&di, writer, showMyTrace(), io.tty.detectConfig(.stderr()));
try printSourceAtAddress(&di, &discarding.writer, showMyTrace(), io.tty.detectConfig(.stderr()));
}
noinline fn showMyTrace() usize {

View File

@ -395,7 +395,7 @@ const Msf = struct {
streams: []MsfStream,
fn init(allocator: Allocator, file: File) !Msf {
const in = file.reader();
const in = file.deprecatedReader();
const superblock = try in.takeStruct(pdb.SuperBlock);
@ -514,7 +514,7 @@ const MsfStream = struct {
var offset = self.pos % self.block_size;
try self.in_file.seekTo(block * self.block_size + offset);
const in = self.in_file.reader();
const in = self.in_file.deprecatedReader();
var size: usize = 0;
var rem_buffer = buffer;
@ -562,7 +562,7 @@ const MsfStream = struct {
return block * self.block_size + offset;
}
pub fn reader(self: *MsfStream) std.io.Reader(*MsfStream, Error, read) {
pub fn reader(self: *MsfStream) std.io.GenericReader(*MsfStream, Error, read) {
return .{ .context = self };
}
};

View File

@ -508,6 +508,7 @@ pub const Header = struct {
};
}
<<<<<<< HEAD
pub const ReadError = std.io.Reader.Error || ParseError;
pub fn read(r: *std.io.Reader) ReadError!Header {
@ -515,6 +516,19 @@ pub const Header = struct {
const result = try parse(@ptrCast(buf));
r.toss(if (result.is_64) @sizeOf(Elf64_Ehdr) else @sizeOf(Elf32_Ehdr));
return result;
||||||| edf785db0f
pub fn read(parse_source: anytype) !Header {
var hdr_buf: [@sizeOf(Elf64_Ehdr)]u8 align(@alignOf(Elf64_Ehdr)) = undefined;
try parse_source.seekableStream().seekTo(0);
try parse_source.reader().readNoEof(&hdr_buf);
return Header.parse(&hdr_buf);
=======
pub fn read(parse_source: anytype) !Header {
var hdr_buf: [@sizeOf(Elf64_Ehdr)]u8 align(@alignOf(Elf64_Ehdr)) = undefined;
try parse_source.seekableStream().seekTo(0);
try parse_source.deprecatedReader().readNoEof(&hdr_buf);
return Header.parse(&hdr_buf);
>>>>>>> origin/master
}
pub const ParseError = error{
@ -590,14 +604,92 @@ pub const ProgramHeaderIterator = struct {
if (it.index >= it.elf_header.phnum) return null;
defer it.index += 1;
<<<<<<< HEAD
if (it.elf_header.is_64) {
var phdr: Elf64_Phdr = undefined;
const offset = it.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * it.index;
try it.file_reader.seekTo(offset);
try it.file_reader.interface.readSlice(@ptrCast(&phdr));
if (it.elf_header.endian != native_endian)
||||||| edf785db0f
if (self.elf_header.is_64) {
var phdr: Elf64_Phdr = undefined;
const offset = self.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * self.index;
try self.parse_source.seekableStream().seekTo(offset);
try self.parse_source.reader().readNoEof(mem.asBytes(&phdr));
// ELF endianness matches native endianness.
if (self.elf_header.endian == native_endian) return phdr;
// Convert fields to native endianness.
=======
if (self.elf_header.is_64) {
var phdr: Elf64_Phdr = undefined;
const offset = self.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * self.index;
try self.parse_source.seekableStream().seekTo(offset);
try self.parse_source.deprecatedReader().readNoEof(mem.asBytes(&phdr));
// ELF endianness matches native endianness.
if (self.elf_header.endian == native_endian) return phdr;
// Convert fields to native endianness.
>>>>>>> origin/master
mem.byteSwapAllFields(Elf64_Phdr, &phdr);
<<<<<<< HEAD
return phdr;
||||||| edf785db0f
return phdr;
}
var phdr: Elf32_Phdr = undefined;
const offset = self.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * self.index;
try self.parse_source.seekableStream().seekTo(offset);
try self.parse_source.reader().readNoEof(mem.asBytes(&phdr));
// ELF endianness does NOT match native endianness.
if (self.elf_header.endian != native_endian) {
// Convert fields to native endianness.
mem.byteSwapAllFields(Elf32_Phdr, &phdr);
}
// Convert 32-bit header to 64-bit.
return Elf64_Phdr{
.p_type = phdr.p_type,
.p_offset = phdr.p_offset,
.p_vaddr = phdr.p_vaddr,
.p_paddr = phdr.p_paddr,
.p_filesz = phdr.p_filesz,
.p_memsz = phdr.p_memsz,
.p_flags = phdr.p_flags,
.p_align = phdr.p_align,
};
=======
return phdr;
}
var phdr: Elf32_Phdr = undefined;
const offset = self.elf_header.phoff + @sizeOf(@TypeOf(phdr)) * self.index;
try self.parse_source.seekableStream().seekTo(offset);
try self.parse_source.deprecatedReader().readNoEof(mem.asBytes(&phdr));
// ELF endianness does NOT match native endianness.
if (self.elf_header.endian != native_endian) {
// Convert fields to native endianness.
mem.byteSwapAllFields(Elf32_Phdr, &phdr);
}
// Convert 32-bit header to 64-bit.
return Elf64_Phdr{
.p_type = phdr.p_type,
.p_offset = phdr.p_offset,
.p_vaddr = phdr.p_vaddr,
.p_paddr = phdr.p_paddr,
.p_filesz = phdr.p_filesz,
.p_memsz = phdr.p_memsz,
.p_flags = phdr.p_flags,
.p_align = phdr.p_align,
};
>>>>>>> origin/master
}
var phdr: Elf32_Phdr = undefined;
@ -624,9 +716,23 @@ pub const SectionHeaderIterator = struct {
file_reader: *std.fs.File.Reader,
index: usize = 0,
<<<<<<< HEAD
pub fn next(it: *SectionHeaderIterator) !?Elf64_Shdr {
if (it.index >= it.elf_header.shnum) return null;
defer it.index += 1;
||||||| edf785db0f
if (self.elf_header.is_64) {
var shdr: Elf64_Shdr = undefined;
const offset = self.elf_header.shoff + @sizeOf(@TypeOf(shdr)) * self.index;
try self.parse_source.seekableStream().seekTo(offset);
try self.parse_source.reader().readNoEof(mem.asBytes(&shdr));
=======
if (self.elf_header.is_64) {
var shdr: Elf64_Shdr = undefined;
const offset = self.elf_header.shoff + @sizeOf(@TypeOf(shdr)) * self.index;
try self.parse_source.seekableStream().seekTo(offset);
try self.parse_source.deprecatedReader().readNoEof(mem.asBytes(&shdr));
>>>>>>> origin/master
if (it.elf_header.is_64) {
var shdr: Elf64_Shdr = undefined;
@ -635,7 +741,65 @@ pub const SectionHeaderIterator = struct {
try it.file_reader.interface.readSlice(@ptrCast(&shdr));
if (it.elf_header.endian != native_endian)
mem.byteSwapAllFields(Elf64_Shdr, &shdr);
<<<<<<< HEAD
return shdr;
||||||| edf785db0f
return shdr;
}
var shdr: Elf32_Shdr = undefined;
const offset = self.elf_header.shoff + @sizeOf(@TypeOf(shdr)) * self.index;
try self.parse_source.seekableStream().seekTo(offset);
try self.parse_source.reader().readNoEof(mem.asBytes(&shdr));
// ELF endianness does NOT match native endianness.
if (self.elf_header.endian != native_endian) {
// Convert fields to native endianness.
mem.byteSwapAllFields(Elf32_Shdr, &shdr);
}
// Convert 32-bit header to 64-bit.
return Elf64_Shdr{
.sh_name = shdr.sh_name,
.sh_type = shdr.sh_type,
.sh_flags = shdr.sh_flags,
.sh_addr = shdr.sh_addr,
.sh_offset = shdr.sh_offset,
.sh_size = shdr.sh_size,
.sh_link = shdr.sh_link,
.sh_info = shdr.sh_info,
.sh_addralign = shdr.sh_addralign,
.sh_entsize = shdr.sh_entsize,
};
=======
return shdr;
}
var shdr: Elf32_Shdr = undefined;
const offset = self.elf_header.shoff + @sizeOf(@TypeOf(shdr)) * self.index;
try self.parse_source.seekableStream().seekTo(offset);
try self.parse_source.deprecatedReader().readNoEof(mem.asBytes(&shdr));
// ELF endianness does NOT match native endianness.
if (self.elf_header.endian != native_endian) {
// Convert fields to native endianness.
mem.byteSwapAllFields(Elf32_Shdr, &shdr);
}
// Convert 32-bit header to 64-bit.
return Elf64_Shdr{
.sh_name = shdr.sh_name,
.sh_type = shdr.sh_type,
.sh_flags = shdr.sh_flags,
.sh_addr = shdr.sh_addr,
.sh_offset = shdr.sh_offset,
.sh_size = shdr.sh_size,
.sh_link = shdr.sh_link,
.sh_info = shdr.sh_info,
.sh_addralign = shdr.sh_addralign,
.sh_entsize = shdr.sh_entsize,
};
>>>>>>> origin/master
}
var shdr: Elf32_Shdr = undefined;

File diff suppressed because it is too large Load Diff

View File

@ -1,3 +1,20 @@
const builtin = @import("builtin");
const Os = std.builtin.Os;
const native_os = builtin.os.tag;
const is_windows = native_os == .windows;
const File = @This();
const std = @import("../std.zig");
const Allocator = std.mem.Allocator;
const posix = std.posix;
const io = std.io;
const math = std.math;
const assert = std.debug.assert;
const linux = std.os.linux;
const windows = std.os.windows;
const maxInt = std.math.maxInt;
const Alignment = std.mem.Alignment;
/// The OS-specific file descriptor or file handle.
handle: Handle,
@ -844,7 +861,7 @@ pub fn write(self: File, bytes: []const u8) WriteError!usize {
return posix.write(self.handle, bytes);
}
/// One-shot alternative to `std.io.Writer.writeAll` via `writer`.
/// Deprecated in favor of `Writer`.
pub fn writeAll(self: File, bytes: []const u8) WriteError!void {
var index: usize = 0;
while (index < bytes.len) {
@ -900,6 +917,8 @@ pub const Reader = struct {
file: File,
err: ?ReadError = null,
mode: Reader.Mode = .positional,
/// Tracks the true seek position in the file. To obtain the logical
/// position, subtract the buffer size from this value.
pos: u64 = 0,
size: ?u64 = null,
size_err: ?GetEndPosError = null,
@ -1008,7 +1027,7 @@ pub const Reader = struct {
};
var remaining = std.math.cast(u64, offset) orelse return seek_err;
while (remaining > 0) {
const n = discard(&r.interface, .limited(remaining)) catch |err| {
const n = discard(&r.interface, .limited64(remaining)) catch |err| {
r.seek_err = err;
return err;
};
@ -1043,7 +1062,7 @@ pub const Reader = struct {
const max_buffers_len = 16;
fn stream(io_reader: *std.io.Reader, w: *std.io.Writer, limit: std.io.Limit) std.io.Reader.StreamError!usize {
const r: *Reader = @fieldParentPtr("interface", io_reader);
const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
switch (r.mode) {
.positional, .streaming => return w.sendFile(r, limit) catch |write_err| switch (write_err) {
error.Unimplemented => {
@ -1067,10 +1086,14 @@ pub const Reader = struct {
const n = posix.preadv(r.file.handle, dest, r.pos) catch |err| switch (err) {
error.Unseekable => {
r.mode = r.mode.toStreaming();
if (r.pos != 0) r.seekBy(@intCast(r.pos)) catch {
r.mode = .failure;
return error.ReadFailed;
};
const pos = r.pos;
if (pos != 0) {
r.pos = 0;
r.seekBy(@intCast(pos)) catch {
r.mode = .failure;
return error.ReadFailed;
};
}
return 0;
},
else => |e| {
@ -1113,7 +1136,7 @@ pub const Reader = struct {
}
fn discard(io_reader: *std.io.Reader, limit: std.io.Limit) std.io.Reader.Error!usize {
const r: *Reader = @fieldParentPtr("interface", io_reader);
const r: *Reader = @alignCast(@fieldParentPtr("interface", io_reader));
const file = r.file;
const pos = r.pos;
switch (r.mode) {
@ -1195,10 +1218,14 @@ pub const Reader = struct {
const n = r.file.pread(dest, r.pos) catch |err| switch (err) {
error.Unseekable => {
r.mode = r.mode.toStreaming();
if (r.pos != 0) r.seekBy(@intCast(r.pos)) catch {
r.mode = .failure;
return error.ReadFailed;
};
const pos = r.pos;
if (pos != 0) {
r.pos = 0;
r.seekBy(@intCast(pos)) catch {
r.mode = .failure;
return error.ReadFailed;
};
}
return 0;
},
else => |e| {
@ -1246,6 +1273,8 @@ pub const Writer = struct {
file: File,
err: ?WriteError = null,
mode: Writer.Mode = .positional,
/// Tracks the true seek position in the file. To obtain the logical
/// position, add the buffer size to this value.
pos: u64 = 0,
sendfile_err: ?SendfileError = null,
copy_file_range_err: ?CopyFileRangeError = null,
@ -1308,110 +1337,162 @@ pub const Writer = struct {
};
}
pub fn drain(io_writer: *std.io.Writer, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
const w: *Writer = @fieldParentPtr("interface", io_writer);
pub fn drain(io_w: *std.io.Writer, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
const w: *Writer = @alignCast(@fieldParentPtr("interface", io_w));
const handle = w.file.handle;
const buffered = io_writer.buffered();
var splat_buffer: [256]u8 = undefined;
if (is_windows) {
var i: usize = 0;
while (i < buffered.len) {
const n = windows.WriteFile(handle, buffered[i..], null) catch |err| {
w.err = err;
w.pos += i;
_ = io_writer.consume(i);
return error.WriteFailed;
};
i += n;
if (data.len > 0 and buffered.len - i < n) {
w.pos += i;
return io_writer.consume(i);
const buffered = io_w.buffered();
if (is_windows) switch (w.mode) {
.positional, .positional_reading => {
if (buffered.len != 0) {
const n = windows.WriteFile(handle, buffered, w.pos) catch |err| {
w.err = err;
return error.WriteFailed;
};
w.pos += n;
return io_w.consume(n);
}
}
if (i != 0 or data.len == 0 or (data.len == 1 and splat == 0)) {
w.pos += i;
return io_writer.consume(i);
}
const n = windows.WriteFile(handle, data[0], null) catch |err| {
w.err = err;
return 0;
};
w.pos += n;
return n;
}
if (data.len == 0) {
var i: usize = 0;
while (i < buffered.len) {
i += std.posix.write(handle, buffered) catch |err| {
for (data[0 .. data.len - 1]) |buf| {
if (buf.len == 0) continue;
const n = windows.WriteFile(handle, buf, w.pos) catch |err| {
w.err = err;
return error.WriteFailed;
};
w.pos += n;
return io_w.consume(n);
}
const pattern = data[data.len - 1];
if (pattern.len == 0 or splat == 0) return 0;
const n = windows.WriteFile(handle, pattern, w.pos) catch |err| {
w.err = err;
w.pos += i;
_ = io_writer.consume(i);
return error.WriteFailed;
};
}
w.pos += i;
return io_writer.consumeAll();
}
w.pos += n;
return io_w.consume(n);
},
.streaming, .streaming_reading => {
if (buffered.len != 0) {
const n = windows.WriteFile(handle, buffered, null) catch |err| {
w.err = err;
return error.WriteFailed;
};
w.pos += n;
return io_w.consume(n);
}
for (data[0 .. data.len - 1]) |buf| {
if (buf.len == 0) continue;
const n = windows.WriteFile(handle, buf, null) catch |err| {
w.err = err;
return error.WriteFailed;
};
w.pos += n;
return io_w.consume(n);
}
const pattern = data[data.len - 1];
if (pattern.len == 0 or splat == 0) return 0;
const n = windows.WriteFile(handle, pattern, null) catch |err| {
std.debug.print("windows write file failed3: {t}\n", .{err});
w.err = err;
return error.WriteFailed;
};
w.pos += n;
return io_w.consume(n);
},
.failure => return error.WriteFailed,
};
var iovecs: [max_buffers_len]std.posix.iovec_const = undefined;
var len: usize = 0;
if (buffered.len > 0) {
iovecs[len] = .{ .base = buffered.ptr, .len = buffered.len };
len += 1;
}
for (data) |d| {
for (data[0 .. data.len - 1]) |d| {
if (d.len == 0) continue;
if (iovecs.len - len == 0) break;
iovecs[len] = .{ .base = d.ptr, .len = d.len };
len += 1;
if (iovecs.len - len == 0) break;
}
switch (splat) {
0 => if (data[data.len - 1].len != 0) {
len -= 1;
const pattern = data[data.len - 1];
if (iovecs.len - len != 0) switch (splat) {
0 => {},
1 => if (pattern.len != 0) {
iovecs[len] = .{ .base = pattern.ptr, .len = pattern.len };
len += 1;
},
1 => {},
else => switch (data[data.len - 1].len) {
else => switch (pattern.len) {
0 => {},
1 => {
const splat_buffer_candidate = io_w.buffer[io_w.end..];
var backup_buffer: [64]u8 = undefined;
const splat_buffer = if (splat_buffer_candidate.len >= backup_buffer.len)
splat_buffer_candidate
else
&backup_buffer;
const memset_len = @min(splat_buffer.len, splat);
const buf = splat_buffer[0..memset_len];
@memset(buf, data[data.len - 1][0]);
iovecs[len - 1] = .{ .base = buf.ptr, .len = buf.len };
var remaining_splat = splat - buf.len;
while (remaining_splat > splat_buffer.len and len < iovecs.len) {
iovecs[len] = .{ .base = &splat_buffer, .len = splat_buffer.len };
remaining_splat -= splat_buffer.len;
len += 1;
}
if (remaining_splat > 0 and len < iovecs.len) {
iovecs[len] = .{ .base = &splat_buffer, .len = remaining_splat };
len += 1;
}
return std.posix.writev(handle, iovecs[0..len]) catch |err| {
w.err = err;
return error.WriteFailed;
};
},
else => for (0..splat - 1) |_| {
if (iovecs.len - len == 0) break;
iovecs[len] = .{ .base = data[data.len - 1].ptr, .len = data[data.len - 1].len };
@memset(buf, pattern[0]);
iovecs[len] = .{ .base = buf.ptr, .len = buf.len };
len += 1;
var remaining_splat = splat - buf.len;
while (remaining_splat > splat_buffer.len and iovecs.len - len != 0) {
assert(buf.len == splat_buffer.len);
iovecs[len] = .{ .base = splat_buffer.ptr, .len = splat_buffer.len };
len += 1;
remaining_splat -= splat_buffer.len;
}
if (remaining_splat > 0 and iovecs.len - len != 0) {
iovecs[len] = .{ .base = splat_buffer.ptr, .len = remaining_splat };
len += 1;
}
},
else => for (0..splat) |_| {
iovecs[len] = .{ .base = pattern.ptr, .len = pattern.len };
len += 1;
if (iovecs.len - len == 0) break;
},
},
}
const n = std.posix.writev(handle, iovecs[0..len]) catch |err| {
w.err = err;
return error.WriteFailed;
};
w.pos += n;
return io_writer.consume(n);
if (len == 0) return 0;
switch (w.mode) {
.positional, .positional_reading => {
const n = std.posix.pwritev(handle, iovecs[0..len], w.pos) catch |err| switch (err) {
error.Unseekable => {
w.mode = w.mode.toStreaming();
const pos = w.pos;
if (pos != 0) {
w.pos = 0;
w.seekTo(@intCast(pos)) catch {
w.mode = .failure;
return error.WriteFailed;
};
}
return 0;
},
else => |e| {
w.err = e;
return error.WriteFailed;
},
};
w.pos += n;
return io_w.consume(n);
},
.streaming, .streaming_reading => {
const n = std.posix.writev(handle, iovecs[0..len]) catch |err| {
w.err = err;
return error.WriteFailed;
};
w.pos += n;
return io_w.consume(n);
},
.failure => return error.WriteFailed,
}
}
pub fn sendFile(
io_writer: *std.io.Writer,
io_w: *std.io.Writer,
file_reader: *Reader,
limit: std.io.Limit,
) std.io.Writer.FileError!usize {
const w: *Writer = @fieldParentPtr("interface", io_writer);
const w: *Writer = @alignCast(@fieldParentPtr("interface", io_w));
const out_fd = w.file.handle;
const in_fd = file_reader.file.handle;
// TODO try using copy_file_range on FreeBSD
@ -1422,7 +1503,7 @@ pub const Writer = struct {
if (w.sendfile_err != null) break :sf;
// Linux sendfile does not support headers.
const buffered = limit.slice(file_reader.interface.buffer);
if (io_writer.end != 0 or buffered.len != 0) return drain(io_writer, &.{buffered}, 1);
if (io_w.end != 0 or buffered.len != 0) return drain(io_w, &.{buffered}, 1);
const max_count = 0x7ffff000; // Avoid EINVAL.
var off: std.os.linux.off_t = undefined;
const off_ptr: ?*std.os.linux.off_t, const count: usize = switch (file_reader.mode) {
@ -1446,10 +1527,14 @@ pub const Writer = struct {
const n = std.os.linux.wrapped.sendfile(out_fd, in_fd, off_ptr, count) catch |err| switch (err) {
error.Unseekable => {
file_reader.mode = file_reader.mode.toStreaming();
if (file_reader.pos != 0) file_reader.seekBy(@intCast(file_reader.pos)) catch {
file_reader.mode = .failure;
return error.ReadFailed;
};
const pos = file_reader.pos;
if (pos != 0) {
file_reader.pos = 0;
file_reader.seekBy(@intCast(pos)) catch {
file_reader.mode = .failure;
return error.ReadFailed;
};
}
return 0;
},
else => |e| {
@ -1465,21 +1550,21 @@ pub const Writer = struct {
w.pos += n;
return n;
}
const copy_file_range_fn = switch (native_os) {
const copy_file_range = switch (native_os) {
.freebsd => std.os.freebsd.copy_file_range,
.linux => if (std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 })) std.os.linux.wrapped.copy_file_range else null,
else => null,
.linux => if (std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 })) std.os.linux.wrapped.copy_file_range else {},
else => {},
};
if (copy_file_range_fn) |copy_file_range| cfr: {
if (@TypeOf(copy_file_range) != void) cfr: {
if (w.copy_file_range_err != null) break :cfr;
const buffered = limit.slice(file_reader.interface.buffer);
if (io_writer.end != 0 or buffered.len != 0) return drain(io_writer, &.{buffered}, 1);
if (io_w.end != 0 or buffered.len != 0) return drain(io_w, &.{buffered}, 1);
var off_in: i64 = undefined;
var off_out: i64 = undefined;
const off_in_ptr: ?*i64 = switch (file_reader.mode) {
.positional_reading, .streaming_reading => return error.Unimplemented,
.positional => p: {
off_in = file_reader.pos;
off_in = @intCast(file_reader.pos);
break :p &off_in;
},
.streaming => null,
@ -1488,7 +1573,7 @@ pub const Writer = struct {
const off_out_ptr: ?*i64 = switch (w.mode) {
.positional_reading, .streaming_reading => return error.Unimplemented,
.positional => p: {
off_out = w.pos;
off_out = @intCast(w.pos);
break :p &off_out;
},
.streaming => null,
@ -1542,19 +1627,35 @@ pub const Writer = struct {
}
pub fn seekTo(w: *Writer, offset: u64) SeekError!void {
if (w.seek_err) |err| return err;
switch (w.mode) {
.positional, .positional_reading => {
w.pos = offset;
},
.streaming, .streaming_reading => {
if (w.seek_err) |err| return err;
posix.lseek_SET(w.file.handle, offset) catch |err| {
w.seek_err = err;
return err;
};
w.pos = offset;
},
.failure => return w.seek_err.?,
}
}
pub const EndError = SetEndPosError || std.io.Writer.Error;
/// Flushes any buffered data and sets the end position of the file.
///
/// If not overwriting existing contents, then calling `interface.flush`
/// directly is sufficient.
///
/// Flush failure is handled by setting `err` so that it can be handled
/// along with other write failures.
pub fn end(w: *Writer) EndError!void {
try w.interface.flush();
return w.file.setEndPos(w.pos);
}
};
/// Defaults to positional reading; falls back to streaming.
@ -1568,9 +1669,10 @@ pub fn reader(file: File, buffer: []u8) Reader {
/// Positional is more threadsafe, since the global seek position is not
/// affected, but when such syscalls are not available, preemptively choosing
/// `Reader.Mode.streaming` will skip a failed syscall.
pub fn readerStreaming(file: File) Reader {
pub fn readerStreaming(file: File, buffer: []u8) Reader {
return .{
.file = file,
.interface = Reader.initInterface(buffer),
.mode = .streaming,
.seek_err = error.Unseekable,
};
@ -1753,20 +1855,3 @@ pub fn downgradeLock(file: File) LockError!void {
};
}
}
const builtin = @import("builtin");
const Os = std.builtin.Os;
const native_os = builtin.os.tag;
const is_windows = native_os == .windows;
const File = @This();
const std = @import("../std.zig");
const Allocator = std.mem.Allocator;
const posix = std.posix;
const io = std.io;
const math = std.math;
const assert = std.debug.assert;
const linux = std.os.linux;
const windows = std.os.windows;
const maxInt = std.math.maxInt;
const Alignment = std.mem.Alignment;

View File

@ -146,30 +146,28 @@ pub fn joinZ(allocator: Allocator, paths: []const []const u8) ![:0]u8 {
return out[0 .. out.len - 1 :0];
}
pub fn fmtJoin(paths: []const []const u8) std.fmt.Formatter(formatJoin) {
pub fn fmtJoin(paths: []const []const u8) std.fmt.Formatter([]const []const u8, formatJoin) {
return .{ .data = paths };
}
fn formatJoin(paths: []const []const u8, bw: *std.io.Writer, comptime fmt: []const u8) !void {
comptime assert(fmt.len == 0);
fn formatJoin(paths: []const []const u8, w: *std.io.Writer) std.io.Writer.Error!void {
const first_path_idx = for (paths, 0..) |p, idx| {
if (p.len != 0) break idx;
} else return;
try bw.writeAll(paths[first_path_idx]); // first component
try w.writeAll(paths[first_path_idx]); // first component
var prev_path = paths[first_path_idx];
for (paths[first_path_idx + 1 ..]) |this_path| {
if (this_path.len == 0) continue; // skip empty components
const prev_sep = isSep(prev_path[prev_path.len - 1]);
const this_sep = isSep(this_path[0]);
if (!prev_sep and !this_sep) {
try bw.writeByte(sep);
try w.writeByte(sep);
}
if (prev_sep and this_sep) {
try bw.writeAll(this_path[1..]); // skip redundant separator
try w.writeAll(this_path[1..]); // skip redundant separator
} else {
try bw.writeAll(this_path);
try w.writeAll(this_path);
}
prev_path = this_path;
}

View File

@ -1798,11 +1798,11 @@ test "walker" {
var num_walked: usize = 0;
while (try walker.next()) |entry| {
testing.expect(expected_basenames.has(entry.basename)) catch |err| {
std.debug.print("found unexpected basename: {s}\n", .{std.fmt.fmtSliceEscapeLower(entry.basename)});
std.debug.print("found unexpected basename: {f}\n", .{std.ascii.hexEscape(entry.basename, .lower)});
return err;
};
testing.expect(expected_paths.has(entry.path)) catch |err| {
std.debug.print("found unexpected path: {s}\n", .{std.fmt.fmtSliceEscapeLower(entry.path)});
std.debug.print("found unexpected path: {f}\n", .{std.ascii.hexEscape(entry.path, .lower)});
return err;
};
// make sure that the entry.dir is the containing dir

View File

@ -287,7 +287,7 @@ fn rawCAlloc(
) ?[*]u8 {
_ = context;
_ = return_address;
assert(alignment.compare(.lte, comptime .fromByteUnits(@alignOf(std.c.max_align_t))));
assert(alignment.compare(.lte, .of(std.c.max_align_t)));
// Note that this pointer cannot be aligncasted to max_align_t because if
// len is < max_align_t then the alignment can be smaller. For example, if
// max_align_t is 16, but the user requests 8 bytes, there is no built-in

View File

@ -42,7 +42,7 @@ pub const ArenaAllocator = struct {
data: usize,
node: std.SinglyLinkedList.Node = .{},
};
const BufNode_alignment: Alignment = .fromByteUnits(@alignOf(BufNode));
const BufNode_alignment: Alignment = .of(BufNode);
pub fn init(child_allocator: Allocator) ArenaAllocator {
return (State{}).promote(child_allocator);

View File

@ -1054,7 +1054,7 @@ const TraceKind = enum {
free,
};
const test_config = Config{};
const test_config: Config = .{};
test "small allocations - free in same order" {
var gpa = DebugAllocator(test_config){};

View File

@ -42,8 +42,8 @@ pub const Method = enum(u64) {
return x;
}
pub fn write(self: Method, w: anytype) !void {
const bytes = std.mem.asBytes(&@intFromEnum(self));
pub fn format(self: Method, w: *std.io.Writer) std.io.Writer.Error!void {
const bytes: []const u8 = @ptrCast(&@intFromEnum(self));
const str = std.mem.sliceTo(bytes, 0);
try w.writeAll(str);
}

View File

@ -920,7 +920,7 @@ pub const Request = struct {
.authority = connection.proxied,
.path = true,
.query = true,
}, w);
});
}
try w.writeByte(' ');
try w.writeAll(@tagName(r.version));
@ -1280,9 +1280,18 @@ pub const basic_authorization = struct {
}
pub fn valueLengthFromUri(uri: Uri) usize {
// TODO don't abuse formatted printing to count percent encoded characters
const user_len = std.fmt.count("{fuser}", .{uri.user orelse Uri.Component.empty});
const password_len = std.fmt.count("{fpassword}", .{uri.password orelse Uri.Component.empty});
const user: Uri.Component = uri.user orelse .empty;
const password: Uri.Component = uri.password orelse .empty;
var dw: std.io.Writer.Discarding = .init(&.{});
user.formatUser(&dw.writer) catch unreachable; // discarding
const user_len = dw.count + dw.writer.end;
dw.count = 0;
dw.writer.end = 0;
password.formatPassword(&dw.writer) catch unreachable; // discarding
const password_len = dw.count + dw.writer.end;
return valueLength(@intCast(user_len), @intCast(password_len));
}

View File

@ -405,10 +405,8 @@ test "general client/server API coverage" {
fn handleRequest(request: *http.Server.Request, listen_port: u16) !void {
const log = std.log.scoped(.server);
log.info("{} {s} {s}", .{
request.head.method,
@tagName(request.head.version),
request.head.target,
log.info("{f} {s} {s}", .{
request.head.method, @tagName(request.head.version), request.head.target,
});
const gpa = std.testing.allocator;

View File

@ -19,6 +19,12 @@ pub const Limit = enum(usize) {
return @enumFromInt(n);
}
/// Any value grater than `std.math.maxInt(usize)` is interpreted to mean
/// `.unlimited`.
pub fn limited64(n: u64) Limit {
return @enumFromInt(@min(n, std.math.maxInt(usize)));
}
pub fn countVec(data: []const []const u8) Limit {
var total: usize = 0;
for (data) |d| total += d.len;
@ -33,6 +39,10 @@ pub const Limit = enum(usize) {
return @min(n, @intFromEnum(l));
}
pub fn minInt64(l: Limit, n: u64) usize {
return @min(n, @intFromEnum(l));
}
pub fn slice(l: Limit, s: []u8) []u8 {
return s[0..l.minInt(s.len)];
}

View File

@ -0,0 +1,386 @@
context: *const anyopaque,
readFn: *const fn (context: *const anyopaque, buffer: []u8) anyerror!usize,
pub const Error = anyerror;
/// Returns the number of bytes read. It may be less than buffer.len.
/// If the number of bytes read is 0, it means end of stream.
/// End of stream is not an error condition.
pub fn read(self: Self, buffer: []u8) anyerror!usize {
return self.readFn(self.context, buffer);
}
/// Returns the number of bytes read. If the number read is smaller than `buffer.len`, it
/// means the stream reached the end. Reaching the end of a stream is not an error
/// condition.
pub fn readAll(self: Self, buffer: []u8) anyerror!usize {
return readAtLeast(self, buffer, buffer.len);
}
/// Returns the number of bytes read, calling the underlying read
/// function the minimal number of times until the buffer has at least
/// `len` bytes filled. If the number read is less than `len` it means
/// the stream reached the end. Reaching the end of the stream is not
/// an error condition.
pub fn readAtLeast(self: Self, buffer: []u8, len: usize) anyerror!usize {
assert(len <= buffer.len);
var index: usize = 0;
while (index < len) {
const amt = try self.read(buffer[index..]);
if (amt == 0) break;
index += amt;
}
return index;
}
/// If the number read would be smaller than `buf.len`, `error.EndOfStream` is returned instead.
pub fn readNoEof(self: Self, buf: []u8) anyerror!void {
const amt_read = try self.readAll(buf);
if (amt_read < buf.len) return error.EndOfStream;
}
/// Appends to the `std.ArrayList` contents by reading from the stream
/// until end of stream is found.
/// If the number of bytes appended would exceed `max_append_size`,
/// `error.StreamTooLong` is returned
/// and the `std.ArrayList` has exactly `max_append_size` bytes appended.
pub fn readAllArrayList(
self: Self,
array_list: *std.ArrayList(u8),
max_append_size: usize,
) anyerror!void {
return self.readAllArrayListAligned(null, array_list, max_append_size);
}
pub fn readAllArrayListAligned(
self: Self,
comptime alignment: ?Alignment,
array_list: *std.ArrayListAligned(u8, alignment),
max_append_size: usize,
) anyerror!void {
try array_list.ensureTotalCapacity(@min(max_append_size, 4096));
const original_len = array_list.items.len;
var start_index: usize = original_len;
while (true) {
array_list.expandToCapacity();
const dest_slice = array_list.items[start_index..];
const bytes_read = try self.readAll(dest_slice);
start_index += bytes_read;
if (start_index - original_len > max_append_size) {
array_list.shrinkAndFree(original_len + max_append_size);
return error.StreamTooLong;
}
if (bytes_read != dest_slice.len) {
array_list.shrinkAndFree(start_index);
return;
}
// This will trigger ArrayList to expand superlinearly at whatever its growth rate is.
try array_list.ensureTotalCapacity(start_index + 1);
}
}
/// Allocates enough memory to hold all the contents of the stream. If the allocated
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readAllAlloc(self: Self, allocator: mem.Allocator, max_size: usize) anyerror![]u8 {
var array_list = std.ArrayList(u8).init(allocator);
defer array_list.deinit();
try self.readAllArrayList(&array_list, max_size);
return try array_list.toOwnedSlice();
}
/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
/// Replaces the `std.ArrayList` contents by reading from the stream until `delimiter` is found.
/// Does not include the delimiter in the result.
/// If the `std.ArrayList` length would exceed `max_size`, `error.StreamTooLong` is returned and the
/// `std.ArrayList` is populated with `max_size` bytes from the stream.
pub fn readUntilDelimiterArrayList(
self: Self,
array_list: *std.ArrayList(u8),
delimiter: u8,
max_size: usize,
) anyerror!void {
array_list.shrinkRetainingCapacity(0);
try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
}
/// Deprecated: use `streamUntilDelimiter` with ArrayList's writer instead.
/// Allocates enough memory to read until `delimiter`. If the allocated
/// memory would be greater than `max_size`, returns `error.StreamTooLong`.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readUntilDelimiterAlloc(
self: Self,
allocator: mem.Allocator,
delimiter: u8,
max_size: usize,
) anyerror![]u8 {
var array_list = std.ArrayList(u8).init(allocator);
defer array_list.deinit();
try self.streamUntilDelimiter(array_list.writer(), delimiter, max_size);
return try array_list.toOwnedSlice();
}
/// Deprecated: use `streamUntilDelimiter` with FixedBufferStream's writer instead.
/// Reads from the stream until specified byte is found. If the buffer is not
/// large enough to hold the entire contents, `error.StreamTooLong` is returned.
/// If end-of-stream is found, `error.EndOfStream` is returned.
/// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
/// delimiter byte is written to the output buffer but is not included
/// in the returned slice.
pub fn readUntilDelimiter(self: Self, buf: []u8, delimiter: u8) anyerror![]u8 {
var fbs = std.io.fixedBufferStream(buf);
try self.streamUntilDelimiter(fbs.writer(), delimiter, fbs.buffer.len);
const output = fbs.getWritten();
buf[output.len] = delimiter; // emulating old behaviour
return output;
}
/// Deprecated: use `streamUntilDelimiter` with ArrayList's (or any other's) writer instead.
/// Allocates enough memory to read until `delimiter` or end-of-stream.
/// If the allocated memory would be greater than `max_size`, returns
/// `error.StreamTooLong`. If end-of-stream is found, returns the rest
/// of the stream. If this function is called again after that, returns
/// null.
/// Caller owns returned memory.
/// If this function returns an error, the contents from the stream read so far are lost.
pub fn readUntilDelimiterOrEofAlloc(
self: Self,
allocator: mem.Allocator,
delimiter: u8,
max_size: usize,
) anyerror!?[]u8 {
var array_list = std.ArrayList(u8).init(allocator);
defer array_list.deinit();
self.streamUntilDelimiter(array_list.writer(), delimiter, max_size) catch |err| switch (err) {
error.EndOfStream => if (array_list.items.len == 0) {
return null;
},
else => |e| return e,
};
return try array_list.toOwnedSlice();
}
/// Deprecated: use `streamUntilDelimiter` with FixedBufferStream's writer instead.
/// Reads from the stream until specified byte is found. If the buffer is not
/// large enough to hold the entire contents, `error.StreamTooLong` is returned.
/// If end-of-stream is found, returns the rest of the stream. If this
/// function is called again after that, returns null.
/// Returns a slice of the stream data, with ptr equal to `buf.ptr`. The
/// delimiter byte is written to the output buffer but is not included
/// in the returned slice.
pub fn readUntilDelimiterOrEof(self: Self, buf: []u8, delimiter: u8) anyerror!?[]u8 {
var fbs = std.io.fixedBufferStream(buf);
self.streamUntilDelimiter(fbs.writer(), delimiter, fbs.buffer.len) catch |err| switch (err) {
error.EndOfStream => if (fbs.getWritten().len == 0) {
return null;
},
else => |e| return e,
};
const output = fbs.getWritten();
buf[output.len] = delimiter; // emulating old behaviour
return output;
}
/// Appends to the `writer` contents by reading from the stream until `delimiter` is found.
/// Does not write the delimiter itself.
/// If `optional_max_size` is not null and amount of written bytes exceeds `optional_max_size`,
/// returns `error.StreamTooLong` and finishes appending.
/// If `optional_max_size` is null, appending is unbounded.
pub fn streamUntilDelimiter(
self: Self,
writer: anytype,
delimiter: u8,
optional_max_size: ?usize,
) anyerror!void {
if (optional_max_size) |max_size| {
for (0..max_size) |_| {
const byte: u8 = try self.readByte();
if (byte == delimiter) return;
try writer.writeByte(byte);
}
return error.StreamTooLong;
} else {
while (true) {
const byte: u8 = try self.readByte();
if (byte == delimiter) return;
try writer.writeByte(byte);
}
// Can not throw `error.StreamTooLong` since there are no boundary.
}
}
/// Reads from the stream until specified byte is found, discarding all data,
/// including the delimiter.
/// If end-of-stream is found, this function succeeds.
pub fn skipUntilDelimiterOrEof(self: Self, delimiter: u8) anyerror!void {
while (true) {
const byte = self.readByte() catch |err| switch (err) {
error.EndOfStream => return,
else => |e| return e,
};
if (byte == delimiter) return;
}
}
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
pub fn readByte(self: Self) anyerror!u8 {
var result: [1]u8 = undefined;
const amt_read = try self.read(result[0..]);
if (amt_read < 1) return error.EndOfStream;
return result[0];
}
/// Same as `readByte` except the returned byte is signed.
pub fn readByteSigned(self: Self) anyerror!i8 {
return @as(i8, @bitCast(try self.readByte()));
}
/// Reads exactly `num_bytes` bytes and returns as an array.
/// `num_bytes` must be comptime-known
pub fn readBytesNoEof(self: Self, comptime num_bytes: usize) anyerror![num_bytes]u8 {
var bytes: [num_bytes]u8 = undefined;
try self.readNoEof(&bytes);
return bytes;
}
/// Reads bytes until `bounded.len` is equal to `num_bytes`,
/// or the stream ends.
///
/// * it is assumed that `num_bytes` will not exceed `bounded.capacity()`
pub fn readIntoBoundedBytes(
self: Self,
comptime num_bytes: usize,
bounded: *std.BoundedArray(u8, num_bytes),
) anyerror!void {
while (bounded.len < num_bytes) {
// get at most the number of bytes free in the bounded array
const bytes_read = try self.read(bounded.unusedCapacitySlice());
if (bytes_read == 0) return;
// bytes_read will never be larger than @TypeOf(bounded.len)
// due to `self.read` being bounded by `bounded.unusedCapacitySlice()`
bounded.len += @as(@TypeOf(bounded.len), @intCast(bytes_read));
}
}
/// Reads at most `num_bytes` and returns as a bounded array.
pub fn readBoundedBytes(self: Self, comptime num_bytes: usize) anyerror!std.BoundedArray(u8, num_bytes) {
var result = std.BoundedArray(u8, num_bytes){};
try self.readIntoBoundedBytes(num_bytes, &result);
return result;
}
pub inline fn readInt(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
const bytes = try self.readBytesNoEof(@divExact(@typeInfo(T).int.bits, 8));
return mem.readInt(T, &bytes, endian);
}
pub fn readVarInt(
self: Self,
comptime ReturnType: type,
endian: std.builtin.Endian,
size: usize,
) anyerror!ReturnType {
assert(size <= @sizeOf(ReturnType));
var bytes_buf: [@sizeOf(ReturnType)]u8 = undefined;
const bytes = bytes_buf[0..size];
try self.readNoEof(bytes);
return mem.readVarInt(ReturnType, bytes, endian);
}
/// Optional parameters for `skipBytes`
pub const SkipBytesOptions = struct {
buf_size: usize = 512,
};
// `num_bytes` is a `u64` to match `off_t`
/// Reads `num_bytes` bytes from the stream and discards them
pub fn skipBytes(self: Self, num_bytes: u64, comptime options: SkipBytesOptions) anyerror!void {
var buf: [options.buf_size]u8 = undefined;
var remaining = num_bytes;
while (remaining > 0) {
const amt = @min(remaining, options.buf_size);
try self.readNoEof(buf[0..amt]);
remaining -= amt;
}
}
/// Reads `slice.len` bytes from the stream and returns if they are the same as the passed slice
pub fn isBytes(self: Self, slice: []const u8) anyerror!bool {
var i: usize = 0;
var matches = true;
while (i < slice.len) : (i += 1) {
if (slice[i] != try self.readByte()) {
matches = false;
}
}
return matches;
}
pub fn readStruct(self: Self, comptime T: type) anyerror!T {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(T).@"struct".layout != .auto);
var res: [1]T = undefined;
try self.readNoEof(mem.sliceAsBytes(res[0..]));
return res[0];
}
pub fn readStructEndian(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
var res = try self.readStruct(T);
if (native_endian != endian) {
mem.byteSwapAllFields(T, &res);
}
return res;
}
/// Reads an integer with the same size as the given enum's tag type. If the integer matches
/// an enum tag, casts the integer to the enum tag and returns it. Otherwise, returns an `error.InvalidValue`.
/// TODO optimization taking advantage of most fields being in order
pub fn readEnum(self: Self, comptime Enum: type, endian: std.builtin.Endian) anyerror!Enum {
const E = error{
/// An integer was read, but it did not match any of the tags in the supplied enum.
InvalidValue,
};
const type_info = @typeInfo(Enum).@"enum";
const tag = try self.readInt(type_info.tag_type, endian);
inline for (std.meta.fields(Enum)) |field| {
if (tag == field.value) {
return @field(Enum, field.name);
}
}
return E.InvalidValue;
}
/// Reads the stream until the end, ignoring all the data.
/// Returns the number of bytes discarded.
pub fn discard(self: Self) anyerror!u64 {
var trash: [4096]u8 = undefined;
var index: u64 = 0;
while (true) {
const n = try self.read(&trash);
if (n == 0) return index;
index += n;
}
}
const std = @import("../std.zig");
const Self = @This();
const math = std.math;
const assert = std.debug.assert;
const mem = std.mem;
const testing = std.testing;
const native_endian = @import("builtin").target.cpu.arch.endian();
const Alignment = std.mem.Alignment;
test {
_ = @import("Reader/test.zig");
}

View File

@ -0,0 +1,109 @@
const std = @import("../std.zig");
const assert = std.debug.assert;
const mem = std.mem;
const native_endian = @import("builtin").target.cpu.arch.endian();
context: *const anyopaque,
writeFn: *const fn (context: *const anyopaque, bytes: []const u8) anyerror!usize,
const Self = @This();
pub const Error = anyerror;
pub fn write(self: Self, bytes: []const u8) anyerror!usize {
return self.writeFn(self.context, bytes);
}
pub fn writeAll(self: Self, bytes: []const u8) anyerror!void {
var index: usize = 0;
while (index != bytes.len) {
index += try self.write(bytes[index..]);
}
}
pub fn print(self: Self, comptime format: []const u8, args: anytype) anyerror!void {
return std.fmt.format(self, format, args);
}
pub fn writeByte(self: Self, byte: u8) anyerror!void {
const array = [1]u8{byte};
return self.writeAll(&array);
}
pub fn writeByteNTimes(self: Self, byte: u8, n: usize) anyerror!void {
var bytes: [256]u8 = undefined;
@memset(bytes[0..], byte);
var remaining: usize = n;
while (remaining > 0) {
const to_write = @min(remaining, bytes.len);
try self.writeAll(bytes[0..to_write]);
remaining -= to_write;
}
}
pub fn writeBytesNTimes(self: Self, bytes: []const u8, n: usize) anyerror!void {
var i: usize = 0;
while (i < n) : (i += 1) {
try self.writeAll(bytes);
}
}
pub inline fn writeInt(self: Self, comptime T: type, value: T, endian: std.builtin.Endian) anyerror!void {
var bytes: [@divExact(@typeInfo(T).int.bits, 8)]u8 = undefined;
mem.writeInt(std.math.ByteAlignedInt(@TypeOf(value)), &bytes, value, endian);
return self.writeAll(&bytes);
}
pub fn writeStruct(self: Self, value: anytype) anyerror!void {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(@TypeOf(value)).@"struct".layout != .auto);
return self.writeAll(mem.asBytes(&value));
}
pub fn writeStructEndian(self: Self, value: anytype, endian: std.builtin.Endian) anyerror!void {
// TODO: make sure this value is not a reference type
if (native_endian == endian) {
return self.writeStruct(value);
} else {
var copy = value;
mem.byteSwapAllFields(@TypeOf(value), &copy);
return self.writeStruct(copy);
}
}
pub fn writeFile(self: Self, file: std.fs.File) anyerror!void {
// TODO: figure out how to adjust std lib abstractions so that this ends up
// doing sendfile or maybe even copy_file_range under the right conditions.
var buf: [4000]u8 = undefined;
while (true) {
const n = try file.readAll(&buf);
try self.writeAll(buf[0..n]);
if (n < buf.len) return;
}
}
/// Helper for bridging to the new `Writer` API while upgrading.
pub fn adaptToNewApi(self: *const Self) Adapter {
return .{
.derp_writer = self.*,
.new_interface = .{
.buffer = &.{},
.vtable = &.{ .drain = Adapter.drain },
},
};
}
pub const Adapter = struct {
derp_writer: Self,
new_interface: std.io.Writer,
err: ?Error = null,
fn drain(w: *std.io.Writer, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
_ = splat;
const a: *@This() = @fieldParentPtr("new_interface", w);
return a.derp_writer.write(data[0]) catch |err| {
a.err = err;
return error.WriteFailed;
};
}
};

View File

@ -26,7 +26,8 @@ pub const VTable = struct {
/// Returns the number of bytes written, which will be at minimum `0` and
/// at most `limit`. The number returned, including zero, does not indicate
/// end of stream. `limit` is guaranteed to be at least as large as the
/// buffer capacity of `w`.
/// buffer capacity of `w`, a value whose minimum size is determined by the
/// stream implementation.
///
/// The reader's internal logical seek position moves forward in accordance
/// with the number of bytes returned from this function.
@ -35,7 +36,15 @@ pub const VTable = struct {
/// sizes combined with short reads (returning a value less than `limit`)
/// in order to minimize complexity.
///
/// This function is always called when `buffer` is empty.
/// Although this function is usually called when `buffer` is empty, it is
/// also called when it needs to be filled more due to the API user
/// requesting contiguous memory. In either case, the existing buffer data
/// should be ignored; new data written to `w`.
///
/// In addition to, or instead of writing to `w`, the implementation may
/// choose to store data in `buffer`, modifying `seek` and `end`
/// accordingly. Stream implementations are encouraged to take advantage of
/// this if simplifies the logic.
stream: *const fn (r: *Reader, w: *Writer, limit: Limit) StreamError!usize,
/// Consumes bytes from the internally tracked stream position without
@ -55,6 +64,8 @@ pub const VTable = struct {
/// The default implementation is is based on calling `stream`, borrowing
/// `buffer` to construct a temporary `Writer` and ignoring the written
/// data.
///
/// This function is only called when `buffer` is empty.
discard: *const fn (r: *Reader, limit: Limit) Error!usize = defaultDiscard,
};
@ -102,7 +113,7 @@ const ending_state: Reader = .fixed(&.{});
pub const ending: *Reader = @constCast(&ending_state);
pub fn limited(r: *Reader, limit: Limit, buffer: []u8) Limited {
return Limited.init(r, limit, buffer);
return .init(r, limit, buffer);
}
/// Constructs a `Reader` such that it will read from `buffer` and then end.
@ -128,10 +139,8 @@ pub fn stream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize {
r.seek += n;
return n;
}
const before = w.count;
const n = try r.vtable.stream(r, w, limit);
assert(n <= @intFromEnum(limit));
assert(w.count == before + n);
return n;
}
@ -154,19 +163,13 @@ pub fn discard(r: *Reader, limit: Limit) Error!usize {
pub fn defaultDiscard(r: *Reader, limit: Limit) Error!usize {
assert(r.seek == 0);
assert(r.end == 0);
var w: Writer = .discarding(r.buffer);
const n = r.stream(&w, limit) catch |err| switch (err) {
var dw: Writer.Discarding = .init(r.buffer);
const n = r.stream(&dw.writer, limit) catch |err| switch (err) {
error.WriteFailed => unreachable,
error.ReadFailed => return error.ReadFailed,
error.EndOfStream => return error.EndOfStream,
};
if (n > @intFromEnum(limit)) {
const over_amt = n - @intFromEnum(limit);
r.seek = w.end - over_amt;
r.end = w.end;
assert(r.end <= w.buffer.len); // limit may be exceeded only by an amount within buffer capacity.
return @intFromEnum(limit);
}
assert(n <= @intFromEnum(limit));
return n;
}
@ -193,7 +196,7 @@ pub fn streamRemaining(r: *Reader, w: *Writer) StreamRemainingError!usize {
/// Consumes the stream until the end, ignoring all the data, returning the
/// number of bytes discarded.
pub fn discardRemaining(r: *Reader) ShortError!usize {
var offset: usize = r.end;
var offset: usize = r.end - r.seek;
r.seek = 0;
r.end = 0;
while (true) {
@ -262,10 +265,9 @@ pub fn appendRemaining(
error.EndOfStream => break,
error.ReadFailed => return error.ReadFailed,
};
if (n >= dest.len) {
if (n > dest.len) {
r.end = n - dest.len;
list.items.len += dest.len;
if (n == dest.len) return;
return error.StreamTooLong;
}
list.items.len += n;
@ -320,22 +322,29 @@ pub fn readVecLimit(r: *Reader, data: []const []u8, limit: Limit) Error!usize {
},
.writer = .{
.buffer = if (first.len >= r.buffer.len) first else r.buffer,
.vtable = &Writer.VectorWrapper.vtable,
.vtable = Writer.VectorWrapper.vtable,
},
};
var n = r.vtable.stream(r, &wrapper.writer, .limited(remaining)) catch |err| switch (err) {
error.WriteFailed => {
assert(!wrapper.used);
if (wrapper.writer.buffer.ptr == first.ptr) {
remaining -= wrapper.writer.end;
} else {
assert(wrapper.writer.end <= r.buffer.len);
r.end = wrapper.writer.end;
}
break;
},
else => |e| return e,
};
if (wrapper.writer.buffer.ptr != first.ptr) {
r.end = n;
if (!wrapper.used) {
if (wrapper.writer.buffer.ptr == first.ptr) {
remaining -= n;
} else {
assert(n <= r.buffer.len);
r.end = n;
}
break;
}
if (n < first.len) {
@ -352,6 +361,7 @@ pub fn readVecLimit(r: *Reader, data: []const []u8, limit: Limit) Error!usize {
remaining -= mid.len;
n -= mid.len;
}
assert(n <= r.buffer.len);
r.end = n;
break;
}
@ -441,7 +451,7 @@ pub fn toss(r: *Reader, n: usize) void {
}
/// Equivalent to `toss(r.bufferedLen())`.
pub fn tossAll(r: *Reader) void {
pub fn tossBuffered(r: *Reader) void {
r.seek = 0;
r.end = 0;
}
@ -553,7 +563,7 @@ pub fn discardShort(r: *Reader, n: usize) ShortError!usize {
/// See also:
/// * `peek`
/// * `readSliceShort`
pub fn readSlice(r: *Reader, buffer: []u8) Error!void {
pub fn readSliceAll(r: *Reader, buffer: []u8) Error!void {
const n = try readSliceShort(r, buffer);
if (n != buffer.len) return error.EndOfStream;
}
@ -567,7 +577,7 @@ pub fn readSlice(r: *Reader, buffer: []u8) Error!void {
/// only if the stream reached the end.
///
/// See also:
/// * `readSlice`
/// * `readSliceAll`
pub fn readSliceShort(r: *Reader, buffer: []u8) ShortError!usize {
const in_buffer = r.buffer[r.seek..r.end];
const copy_len = @min(buffer.len, in_buffer.len);
@ -588,17 +598,16 @@ pub fn readSliceShort(r: *Reader, buffer: []u8) ShortError!usize {
},
.writer = .{
.buffer = if (remaining.len >= r.buffer.len) remaining else r.buffer,
.vtable = &Writer.VectorWrapper.vtable,
.vtable = Writer.VectorWrapper.vtable,
},
};
const n = r.vtable.stream(r, &wrapper.writer, .unlimited) catch |err| switch (err) {
error.WriteFailed => {
if (wrapper.writer.buffer.ptr != remaining.ptr) {
if (!wrapper.used) {
assert(r.seek == 0);
r.seek = remaining.len;
r.end = wrapper.writer.end;
@memcpy(remaining, r.buffer[0..remaining.len]);
return buffer.len;
}
return buffer.len;
},
@ -626,7 +635,7 @@ pub fn readSliceShort(r: *Reader, buffer: []u8) ShortError!usize {
/// comptime-known and matches host endianness.
///
/// See also:
/// * `readSlice`
/// * `readSliceAll`
/// * `readSliceEndianAlloc`
pub inline fn readSliceEndian(
r: *Reader,
@ -634,7 +643,7 @@ pub inline fn readSliceEndian(
buffer: []Elem,
endian: std.builtin.Endian,
) Error!void {
try readSlice(r, @ptrCast(buffer));
try readSliceAll(r, @ptrCast(buffer));
if (native_endian != endian) for (buffer) |*elem| std.mem.byteSwapAllFields(Elem, elem);
}
@ -651,15 +660,16 @@ pub inline fn readSliceEndianAlloc(
) ReadAllocError![]Elem {
const dest = try allocator.alloc(Elem, len);
errdefer allocator.free(dest);
try readSlice(r, @ptrCast(dest));
try readSliceAll(r, @ptrCast(dest));
if (native_endian != endian) for (dest) |*elem| std.mem.byteSwapAllFields(Elem, elem);
return dest;
}
pub fn readSliceAlloc(r: *Reader, allocator: Allocator, len: usize) ReadAllocError![]u8 {
/// Shortcut for calling `readSliceAll` with a buffer provided by `allocator`.
pub fn readAlloc(r: *Reader, allocator: Allocator, len: usize) ReadAllocError![]u8 {
const dest = try allocator.alloc(u8, len);
errdefer allocator.free(dest);
try readSlice(r, dest);
try readSliceAll(r, dest);
return dest;
}
@ -692,6 +702,17 @@ pub fn takeSentinel(r: *Reader, comptime sentinel: u8) DelimiterError![:sentinel
return result;
}
/// Returns a slice of the next bytes of buffered data from the stream until
/// `sentinel` is found, without advancing the seek position.
///
/// Returned slice has a sentinel; end of stream does not count as a delimiter.
///
/// Invalidates previously returned values from `peek`.
///
/// See also:
/// * `takeSentinel`
/// * `peekDelimiterExclusive`
/// * `peekDelimiterInclusive`
pub fn peekSentinel(r: *Reader, comptime sentinel: u8) DelimiterError![:sentinel]u8 {
const result = try r.peekDelimiterInclusive(sentinel);
return result[0 .. result.len - 1 :sentinel];
@ -732,26 +753,21 @@ pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
@branchHint(.likely);
return buffer[seek .. end + 1];
}
if (seek > 0) {
const remainder = buffer[seek..];
@memmove(buffer[0..remainder.len], remainder);
r.end = remainder.len;
r.seek = 0;
if (r.vtable.stream == &endingStream) {
// Protect the `@constCast` of `fixed`.
return error.EndOfStream;
}
var writer: Writer = .{
.buffer = r.buffer,
.vtable = &.{ .drain = Writer.fixedDrain },
};
while (r.end < r.buffer.len) {
writer.end = r.end;
const n = r.vtable.stream(r, &writer, .limited(r.buffer.len - r.end)) catch |err| switch (err) {
r.rebase();
while (r.buffer.len - r.end != 0) {
const end_cap = r.buffer[r.end..];
var writer: Writer = .fixed(end_cap);
const n = r.vtable.stream(r, &writer, .limited(end_cap.len)) catch |err| switch (err) {
error.WriteFailed => unreachable,
else => |e| return e,
};
const prev_end = r.end;
r.end = prev_end + n;
if (std.mem.indexOfScalarPos(u8, r.buffer[0..r.end], prev_end, delimiter)) |end| {
return r.buffer[0 .. end + 1];
r.end += n;
if (std.mem.indexOfScalarPos(u8, end_cap[0..n], 0, delimiter)) |end| {
return r.buffer[0 .. r.end - n + end + 1];
}
}
return error.StreamTooLong;
@ -777,9 +793,10 @@ pub fn peekDelimiterInclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
pub fn takeDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
error.EndOfStream => {
if (r.end == 0) return error.EndOfStream;
r.toss(r.end);
return r.buffer[0..r.end];
const remaining = r.buffer[r.seek..r.end];
if (remaining.len == 0) return error.EndOfStream;
r.toss(remaining.len);
return remaining;
},
else => |e| return e,
};
@ -807,8 +824,10 @@ pub fn takeDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
pub fn peekDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
const result = r.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
error.EndOfStream => {
if (r.end == 0) return error.EndOfStream;
return r.buffer[0..r.end];
const remaining = r.buffer[r.seek..r.end];
if (remaining.len == 0) return error.EndOfStream;
r.toss(remaining.len);
return remaining;
},
else => |e| return e,
};
@ -818,37 +837,50 @@ pub fn peekDelimiterExclusive(r: *Reader, delimiter: u8) DelimiterError![]u8 {
/// Appends to `w` contents by reading from the stream until `delimiter` is
/// found. Does not write the delimiter itself.
///
/// Returns number of bytes streamed.
pub fn readDelimiter(r: *Reader, w: *Writer, delimiter: u8) StreamError!usize {
const amount, const to = try r.readAny(w, delimiter, .unlimited);
return switch (to) {
.delimiter => amount,
.limit => unreachable,
.end => error.EndOfStream,
/// Returns number of bytes streamed, which may be zero, or error.EndOfStream
/// if the delimiter was not found.
///
/// Asserts buffer capacity of at least one. This function performs better with
/// larger buffers.
///
/// See also:
/// * `streamDelimiterEnding`
/// * `streamDelimiterLimit`
pub fn streamDelimiter(r: *Reader, w: *Writer, delimiter: u8) StreamError!usize {
const n = streamDelimiterLimit(r, w, delimiter, .unlimited) catch |err| switch (err) {
error.StreamTooLong => unreachable, // unlimited is passed
else => |e| return e,
};
if (r.seek == r.end) return error.EndOfStream;
return n;
}
/// Appends to `w` contents by reading from the stream until `delimiter` is found.
/// Does not write the delimiter itself.
///
/// Succeeds if stream ends before delimiter found.
/// Returns number of bytes streamed, which may be zero. End of stream can be
/// detected by checking if the next byte in the stream is the delimiter.
///
/// Returns number of bytes streamed. The end is not signaled to the writer.
pub fn readDelimiterEnding(
/// Asserts buffer capacity of at least one. This function performs better with
/// larger buffers.
///
/// See also:
/// * `streamDelimiter`
/// * `streamDelimiterLimit`
pub fn streamDelimiterEnding(
r: *Reader,
w: *Writer,
delimiter: u8,
) StreamRemainingError!usize {
const amount, const to = try r.readAny(w, delimiter, .unlimited);
return switch (to) {
.delimiter, .end => amount,
.limit => unreachable,
return streamDelimiterLimit(r, w, delimiter, .unlimited) catch |err| switch (err) {
error.StreamTooLong => unreachable, // unlimited is passed
else => |e| return e,
};
}
pub const StreamDelimiterLimitedError = StreamRemainingError || error{
/// Stream ended before the delimiter was found.
EndOfStream,
pub const StreamDelimiterLimitError = error{
ReadFailed,
WriteFailed,
/// The delimiter was not found within the limit.
StreamTooLong,
};
@ -856,65 +888,103 @@ pub const StreamDelimiterLimitedError = StreamRemainingError || error{
/// Appends to `w` contents by reading from the stream until `delimiter` is found.
/// Does not write the delimiter itself.
///
/// Returns number of bytes streamed.
pub fn readDelimiterLimit(
/// Returns number of bytes streamed, which may be zero. End of stream can be
/// detected by checking if the next byte in the stream is the delimiter.
///
/// Asserts buffer capacity of at least one. This function performs better with
/// larger buffers.
pub fn streamDelimiterLimit(
r: *Reader,
w: *Writer,
delimiter: u8,
limit: Limit,
) StreamDelimiterLimitedError!usize {
const amount, const to = try r.readAny(w, delimiter, limit);
return switch (to) {
.delimiter => amount,
.limit => error.StreamTooLong,
.end => error.EndOfStream,
};
}
fn readAny(
r: *Reader,
w: *Writer,
delimiter: ?u8,
limit: Limit,
) StreamRemainingError!struct { usize, enum { delimiter, limit, end } } {
var amount: usize = 0;
var remaining = limit;
while (remaining.nonzero()) {
const available = remaining.slice(r.peekGreedy(1) catch |err| switch (err) {
error.ReadFailed => |e| return e,
error.EndOfStream => return .{ amount, .end },
) StreamDelimiterLimitError!usize {
var remaining = @intFromEnum(limit);
while (remaining != 0) {
const available = Limit.limited(remaining).slice(r.peekGreedy(1) catch |err| switch (err) {
error.ReadFailed => return error.ReadFailed,
error.EndOfStream => return @intFromEnum(limit) - remaining,
});
if (delimiter) |d| if (std.mem.indexOfScalar(u8, available, d)) |delimiter_index| {
if (std.mem.indexOfScalar(u8, available, delimiter)) |delimiter_index| {
try w.writeAll(available[0..delimiter_index]);
r.toss(delimiter_index + 1);
return .{ amount + delimiter_index, .delimiter };
};
r.toss(delimiter_index);
remaining -= delimiter_index;
return @intFromEnum(limit) - remaining;
}
try w.writeAll(available);
r.toss(available.len);
amount += available.len;
remaining = remaining.subtract(available.len).?;
remaining -= available.len;
}
return .{ amount, .limit };
return error.StreamTooLong;
}
/// Reads from the stream until specified byte is found, discarding all data,
/// including the delimiter.
///
/// If end of stream is found, this function succeeds.
pub fn discardDelimiterInclusive(r: *Reader, delimiter: u8) Error!void {
_ = r;
_ = delimiter;
@panic("TODO");
/// Returns number of bytes discarded, or `error.EndOfStream` if the delimiter
/// is not found.
///
/// See also:
/// * `discardDelimiterExclusive`
/// * `discardDelimiterLimit`
pub fn discardDelimiterInclusive(r: *Reader, delimiter: u8) Error!usize {
const n = discardDelimiterLimit(r, delimiter, .unlimited) catch |err| switch (err) {
error.StreamTooLong => unreachable, // unlimited is passed
else => |e| return e,
};
if (r.seek == r.end) return error.EndOfStream;
assert(r.buffer[r.seek] == delimiter);
toss(r, 1);
return n + 1;
}
/// Reads from the stream until specified byte is found, discarding all data,
/// excluding the delimiter.
///
/// Succeeds if stream ends before delimiter found.
pub fn discardDelimiterExclusive(r: *Reader, delimiter: u8) ShortError!void {
_ = r;
_ = delimiter;
@panic("TODO");
/// Returns the number of bytes discarded.
///
/// Succeeds if stream ends before delimiter found. End of stream can be
/// detected by checking if the delimiter is buffered.
///
/// See also:
/// * `discardDelimiterInclusive`
/// * `discardDelimiterLimit`
pub fn discardDelimiterExclusive(r: *Reader, delimiter: u8) ShortError!usize {
return discardDelimiterLimit(r, delimiter, .unlimited) catch |err| switch (err) {
error.StreamTooLong => unreachable, // unlimited is passed
else => |e| return e,
};
}
pub const DiscardDelimiterLimitError = error{
ReadFailed,
/// The delimiter was not found within the limit.
StreamTooLong,
};
/// Reads from the stream until specified byte is found, discarding all data,
/// excluding the delimiter.
///
/// Returns the number of bytes discarded.
///
/// Succeeds if stream ends before delimiter found. End of stream can be
/// detected by checking if the delimiter is buffered.
pub fn discardDelimiterLimit(r: *Reader, delimiter: u8, limit: Limit) DiscardDelimiterLimitError!usize {
var remaining = @intFromEnum(limit);
while (remaining != 0) {
const available = Limit.limited(remaining).slice(r.peekGreedy(1) catch |err| switch (err) {
error.ReadFailed => return error.ReadFailed,
error.EndOfStream => return @intFromEnum(limit) - remaining,
});
if (std.mem.indexOfScalar(u8, available, delimiter)) |delimiter_index| {
r.toss(delimiter_index);
remaining -= delimiter_index;
return @intFromEnum(limit) - remaining;
}
r.toss(available.len);
remaining -= available.len;
}
return error.StreamTooLong;
}
/// Fills the buffer such that it contains at least `n` bytes, without
@ -930,6 +1000,19 @@ pub fn fill(r: *Reader, n: usize) Error!void {
@branchHint(.likely);
return;
}
if (r.seek + n <= r.buffer.len) while (true) {
const end_cap = r.buffer[r.end..];
var writer: Writer = .fixed(end_cap);
r.end += r.vtable.stream(r, &writer, .limited(end_cap.len)) catch |err| switch (err) {
error.WriteFailed => unreachable,
else => |e| return e,
};
if (r.seek + n <= r.end) return;
};
if (r.vtable.stream == &endingStream) {
// Protect the `@constCast` of `fixed`.
return error.EndOfStream;
}
rebaseCapacity(r, n);
var writer: Writer = .{
.buffer = r.buffer,
@ -970,11 +1053,12 @@ pub fn fillMore(r: *Reader) Error!void {
pub fn peekByte(r: *Reader) Error!u8 {
const buffer = r.buffer[0..r.end];
const seek = r.seek;
if (seek >= buffer.len) {
@branchHint(.unlikely);
try fill(r, 1);
if (seek < buffer.len) {
@branchHint(.likely);
return buffer[seek];
}
return buffer[seek];
try fill(r, 1);
return r.buffer[r.seek];
}
/// Reads 1 byte from the stream or returns `error.EndOfStream`.
@ -1009,6 +1093,7 @@ pub fn takeVarInt(r: *Reader, comptime Int: type, endian: std.builtin.Endian, n:
///
/// See also:
/// * `peekStruct`
/// * `takeStructEndian`
pub fn takeStruct(r: *Reader, comptime T: type) Error!*align(1) T {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(T).@"struct".layout != .auto);
@ -1021,6 +1106,7 @@ pub fn takeStruct(r: *Reader, comptime T: type) Error!*align(1) T {
///
/// See also:
/// * `takeStruct`
/// * `peekStructEndian`
pub fn peekStruct(r: *Reader, comptime T: type) Error!*align(1) T {
// Only extern and packed structs have defined in-memory layout.
comptime assert(@typeInfo(T).@"struct".layout != .auto);
@ -1031,6 +1117,10 @@ pub fn peekStruct(r: *Reader, comptime T: type) Error!*align(1) T {
///
/// This function is inline to avoid referencing `std.mem.byteSwapAllFields`
/// when `endian` is comptime-known and matches the host endianness.
///
/// See also:
/// * `takeStruct`
/// * `peekStructEndian`
pub inline fn takeStructEndian(r: *Reader, comptime T: type, endian: std.builtin.Endian) Error!T {
var res = (try r.takeStruct(T)).*;
if (native_endian != endian) std.mem.byteSwapAllFields(T, &res);
@ -1041,6 +1131,10 @@ pub inline fn takeStructEndian(r: *Reader, comptime T: type, endian: std.builtin
///
/// This function is inline to avoid referencing `std.mem.byteSwapAllFields`
/// when `endian` is comptime-known and matches the host endianness.
///
/// See also:
/// * `takeStructEndian`
/// * `peekStruct`
pub inline fn peekStructEndian(r: *Reader, comptime T: type, endian: std.builtin.Endian) Error!T {
var res = (try r.peekStruct(T)).*;
if (native_endian != endian) std.mem.byteSwapAllFields(T, &res);
@ -1218,146 +1312,295 @@ test fixed {
}
test peek {
return error.Unimplemented;
var r: Reader = .fixed("abc");
try testing.expectEqualStrings("ab", try r.peek(2));
try testing.expectEqualStrings("a", try r.peek(1));
}
test peekGreedy {
return error.Unimplemented;
var r: Reader = .fixed("abc");
try testing.expectEqualStrings("abc", try r.peekGreedy(1));
}
test toss {
return error.Unimplemented;
var r: Reader = .fixed("abc");
r.toss(1);
try testing.expectEqualStrings("bc", r.buffered());
}
test take {
return error.Unimplemented;
var r: Reader = .fixed("abc");
try testing.expectEqualStrings("ab", try r.take(2));
try testing.expectEqualStrings("c", try r.take(1));
}
test takeArray {
return error.Unimplemented;
var r: Reader = .fixed("abc");
try testing.expectEqualStrings("ab", try r.takeArray(2));
try testing.expectEqualStrings("c", try r.takeArray(1));
}
test peekArray {
return error.Unimplemented;
var r: Reader = .fixed("abc");
try testing.expectEqualStrings("ab", try r.peekArray(2));
try testing.expectEqualStrings("a", try r.peekArray(1));
}
test discardAll {
var r: Reader = .fixed("foobar");
try r.discard(3);
try r.discardAll(3);
try testing.expectEqualStrings("bar", try r.take(3));
try r.discard(0);
try testing.expectError(error.EndOfStream, r.discard(1));
try r.discardAll(0);
try testing.expectError(error.EndOfStream, r.discardAll(1));
}
test discardRemaining {
return error.Unimplemented;
var r: Reader = .fixed("foobar");
r.toss(1);
try testing.expectEqual(5, try r.discardRemaining());
try testing.expectEqual(0, try r.discardRemaining());
}
test stream {
return error.Unimplemented;
var out_buffer: [10]u8 = undefined;
var r: Reader = .fixed("foobar");
var w: Writer = .fixed(&out_buffer);
// Short streams are possible with this function but not with fixed.
try testing.expectEqual(2, try r.stream(&w, .limited(2)));
try testing.expectEqualStrings("fo", w.buffered());
try testing.expectEqual(4, try r.stream(&w, .unlimited));
try testing.expectEqualStrings("foobar", w.buffered());
}
test takeSentinel {
return error.Unimplemented;
var r: Reader = .fixed("ab\nc");
try testing.expectEqualStrings("ab", try r.takeSentinel('\n'));
try testing.expectError(error.EndOfStream, r.takeSentinel('\n'));
try testing.expectEqualStrings("c", try r.peek(1));
}
test peekSentinel {
return error.Unimplemented;
var r: Reader = .fixed("ab\nc");
try testing.expectEqualStrings("ab", try r.peekSentinel('\n'));
try testing.expectEqualStrings("ab", try r.peekSentinel('\n'));
}
test takeDelimiterInclusive {
return error.Unimplemented;
var r: Reader = .fixed("ab\nc");
try testing.expectEqualStrings("ab\n", try r.takeDelimiterInclusive('\n'));
try testing.expectError(error.EndOfStream, r.takeDelimiterInclusive('\n'));
}
test peekDelimiterInclusive {
return error.Unimplemented;
var r: Reader = .fixed("ab\nc");
try testing.expectEqualStrings("ab\n", try r.peekDelimiterInclusive('\n'));
try testing.expectEqualStrings("ab\n", try r.peekDelimiterInclusive('\n'));
r.toss(3);
try testing.expectError(error.EndOfStream, r.peekDelimiterInclusive('\n'));
}
test takeDelimiterExclusive {
return error.Unimplemented;
var r: Reader = .fixed("ab\nc");
try testing.expectEqualStrings("ab", try r.takeDelimiterExclusive('\n'));
try testing.expectEqualStrings("c", try r.takeDelimiterExclusive('\n'));
try testing.expectError(error.EndOfStream, r.takeDelimiterExclusive('\n'));
}
test peekDelimiterExclusive {
return error.Unimplemented;
var r: Reader = .fixed("ab\nc");
try testing.expectEqualStrings("ab", try r.peekDelimiterExclusive('\n'));
try testing.expectEqualStrings("ab", try r.peekDelimiterExclusive('\n'));
r.toss(3);
try testing.expectEqualStrings("c", try r.peekDelimiterExclusive('\n'));
}
test readDelimiter {
return error.Unimplemented;
test streamDelimiter {
var out_buffer: [10]u8 = undefined;
var r: Reader = .fixed("foo\nbars");
var w: Writer = .fixed(&out_buffer);
try testing.expectEqual(3, try r.streamDelimiter(&w, '\n'));
try testing.expectEqualStrings("foo", w.buffered());
try testing.expectEqual(0, try r.streamDelimiter(&w, '\n'));
r.toss(1);
try testing.expectError(error.EndOfStream, r.streamDelimiter(&w, '\n'));
}
test readDelimiterEnding {
return error.Unimplemented;
test streamDelimiterEnding {
var out_buffer: [10]u8 = undefined;
var r: Reader = .fixed("foo\nbars");
var w: Writer = .fixed(&out_buffer);
try testing.expectEqual(3, try r.streamDelimiterEnding(&w, '\n'));
try testing.expectEqualStrings("foo", w.buffered());
r.toss(1);
try testing.expectEqual(4, try r.streamDelimiterEnding(&w, '\n'));
try testing.expectEqualStrings("foobars", w.buffered());
try testing.expectEqual(0, try r.streamDelimiterEnding(&w, '\n'));
try testing.expectEqual(0, try r.streamDelimiterEnding(&w, '\n'));
}
test readDelimiterLimit {
return error.Unimplemented;
test streamDelimiterLimit {
var out_buffer: [10]u8 = undefined;
var r: Reader = .fixed("foo\nbars");
var w: Writer = .fixed(&out_buffer);
try testing.expectError(error.StreamTooLong, r.streamDelimiterLimit(&w, '\n', .limited(2)));
try testing.expectEqual(1, try r.streamDelimiterLimit(&w, '\n', .limited(3)));
try testing.expectEqualStrings("\n", try r.take(1));
try testing.expectEqual(4, try r.streamDelimiterLimit(&w, '\n', .unlimited));
try testing.expectEqualStrings("foobars", w.buffered());
}
test discardDelimiterExclusive {
return error.Unimplemented;
var r: Reader = .fixed("foob\nar");
try testing.expectEqual(4, try r.discardDelimiterExclusive('\n'));
try testing.expectEqualStrings("\n", try r.take(1));
try testing.expectEqual(2, try r.discardDelimiterExclusive('\n'));
try testing.expectEqual(0, try r.discardDelimiterExclusive('\n'));
}
test discardDelimiterInclusive {
return error.Unimplemented;
var r: Reader = .fixed("foob\nar");
try testing.expectEqual(5, try r.discardDelimiterInclusive('\n'));
try testing.expectError(error.EndOfStream, r.discardDelimiterInclusive('\n'));
}
test discardDelimiterLimit {
var r: Reader = .fixed("foob\nar");
try testing.expectError(error.StreamTooLong, r.discardDelimiterLimit('\n', .limited(4)));
try testing.expectEqual(0, try r.discardDelimiterLimit('\n', .limited(2)));
try testing.expectEqualStrings("\n", try r.take(1));
try testing.expectEqual(2, try r.discardDelimiterLimit('\n', .unlimited));
try testing.expectEqual(0, try r.discardDelimiterLimit('\n', .unlimited));
}
test fill {
return error.Unimplemented;
var r: Reader = .fixed("abc");
try r.fill(1);
try r.fill(3);
}
test takeByte {
return error.Unimplemented;
var r: Reader = .fixed("ab");
try testing.expectEqual('a', try r.takeByte());
try testing.expectEqual('b', try r.takeByte());
try testing.expectError(error.EndOfStream, r.takeByte());
}
test takeByteSigned {
return error.Unimplemented;
var r: Reader = .fixed(&.{ 255, 5 });
try testing.expectEqual(-1, try r.takeByteSigned());
try testing.expectEqual(5, try r.takeByteSigned());
try testing.expectError(error.EndOfStream, r.takeByteSigned());
}
test takeInt {
return error.Unimplemented;
var r: Reader = .fixed(&.{ 0x12, 0x34, 0x56 });
try testing.expectEqual(0x1234, try r.takeInt(u16, .big));
try testing.expectError(error.EndOfStream, r.takeInt(u16, .little));
}
test takeVarInt {
return error.Unimplemented;
var r: Reader = .fixed(&.{ 0x12, 0x34, 0x56 });
try testing.expectEqual(0x123456, try r.takeVarInt(u64, .big, 3));
try testing.expectError(error.EndOfStream, r.takeVarInt(u16, .little, 1));
}
test takeStruct {
return error.Unimplemented;
var r: Reader = .fixed(&.{ 0x12, 0x00, 0x34, 0x56 });
const S = extern struct { a: u8, b: u16 };
switch (native_endian) {
.little => try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x5634 }), (try r.takeStruct(S)).*),
.big => try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x3456 }), (try r.takeStruct(S)).*),
}
try testing.expectError(error.EndOfStream, r.takeStruct(S));
}
test peekStruct {
return error.Unimplemented;
var r: Reader = .fixed(&.{ 0x12, 0x00, 0x34, 0x56 });
const S = extern struct { a: u8, b: u16 };
switch (native_endian) {
.little => {
try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x5634 }), (try r.peekStruct(S)).*);
try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x5634 }), (try r.peekStruct(S)).*);
},
.big => {
try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x3456 }), (try r.peekStruct(S)).*);
try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x3456 }), (try r.peekStruct(S)).*);
},
}
}
test takeStructEndian {
return error.Unimplemented;
var r: Reader = .fixed(&.{ 0x12, 0x00, 0x34, 0x56 });
const S = extern struct { a: u8, b: u16 };
try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x3456 }), try r.takeStructEndian(S, .big));
try testing.expectError(error.EndOfStream, r.takeStructEndian(S, .little));
}
test peekStructEndian {
return error.Unimplemented;
var r: Reader = .fixed(&.{ 0x12, 0x00, 0x34, 0x56 });
const S = extern struct { a: u8, b: u16 };
try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x3456 }), try r.peekStructEndian(S, .big));
try testing.expectEqual(@as(S, .{ .a = 0x12, .b = 0x5634 }), try r.peekStructEndian(S, .little));
}
test takeEnum {
return error.Unimplemented;
var r: Reader = .fixed(&.{ 2, 0, 1 });
const E1 = enum(u8) { a, b, c };
const E2 = enum(u16) { _ };
try testing.expectEqual(E1.c, try r.takeEnum(E1, .little));
try testing.expectEqual(@as(E2, @enumFromInt(0x0001)), try r.takeEnum(E2, .big));
}
test takeLeb128 {
return error.Unimplemented;
var r: Reader = .fixed("\xc7\x9f\x7f\x80");
try testing.expectEqual(-12345, try r.takeLeb128(i64));
try testing.expectEqual(0x80, try r.peekByte());
try testing.expectError(error.EndOfStream, r.takeLeb128(i64));
}
test readSliceShort {
return error.Unimplemented;
var r: Reader = .fixed("HelloFren");
var buf: [5]u8 = undefined;
try testing.expectEqual(5, try r.readSliceShort(&buf));
try testing.expectEqualStrings("Hello", buf[0..5]);
try testing.expectEqual(4, try r.readSliceShort(&buf));
try testing.expectEqualStrings("Fren", buf[0..4]);
try testing.expectEqual(0, try r.readSliceShort(&buf));
}
test readVec {
return error.Unimplemented;
var r: Reader = .fixed(std.ascii.letters);
var flat_buffer: [52]u8 = undefined;
var bufs: [2][]u8 = .{
flat_buffer[0..26],
flat_buffer[26..],
};
// Short reads are possible with this function but not with fixed.
try testing.expectEqual(26 * 2, try r.readVec(&bufs));
try testing.expectEqualStrings(std.ascii.letters[0..26], bufs[0]);
try testing.expectEqualStrings(std.ascii.letters[26..], bufs[1]);
}
test readVecLimit {
var r: Reader = .fixed(std.ascii.letters);
var flat_buffer: [52]u8 = undefined;
var bufs: [2][]u8 = .{
flat_buffer[0..26],
flat_buffer[26..],
};
// Short reads are possible with this function but not with fixed.
try testing.expectEqual(50, try r.readVecLimit(&bufs, .limited(50)));
try testing.expectEqualStrings(std.ascii.letters[0..26], bufs[0]);
try testing.expectEqualStrings(std.ascii.letters[26..50], bufs[1][0..24]);
}
test "expected error.EndOfStream" {
// Unit test inspired by https://github.com/ziglang/zig/issues/17733
var r: std.io.Reader = .fixed("");
try std.testing.expectError(error.EndOfStream, r.readEnum(enum(u8) { a, b }, .little));
try std.testing.expectError(error.EndOfStream, r.isBytes("foo"));
var buffer: [3]u8 = undefined;
var r: std.io.Reader = .fixed(&buffer);
r.end = 0; // capacity 3, but empty
try std.testing.expectError(error.EndOfStream, r.takeEnum(enum(u8) { a, b }, .little));
try std.testing.expectError(error.EndOfStream, r.take(3));
}
fn endingStream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize {
@ -1389,25 +1632,51 @@ fn failingDiscard(r: *Reader, limit: Limit) Error!usize {
test "readAlloc when the backing reader provides one byte at a time" {
const OneByteReader = struct {
str: []const u8,
curr: usize,
i: usize,
reader: Reader,
fn read(self: *@This(), dest: []u8) usize {
if (self.str.len <= self.curr or dest.len == 0)
return 0;
dest[0] = self.str[self.curr];
self.curr += 1;
fn stream(r: *Reader, w: *Writer, limit: Limit) StreamError!usize {
assert(@intFromEnum(limit) >= 1);
const self: *@This() = @fieldParentPtr("reader", r);
if (self.str.len - self.i == 0) return error.EndOfStream;
try w.writeByte(self.str[self.i]);
self.i += 1;
return 1;
}
};
const str = "This is a test";
var one_byte_stream: OneByteReader = .init(str);
const res = try one_byte_stream.reader().streamReadAlloc(std.testing.allocator, str.len + 1);
var one_byte_stream: OneByteReader = .{
.str = str,
.i = 0,
.reader = .{
.buffer = &.{},
.vtable = &.{ .stream = OneByteReader.stream },
.seek = 0,
.end = 0,
},
};
const res = try one_byte_stream.reader.allocRemaining(std.testing.allocator, .unlimited);
defer std.testing.allocator.free(res);
try std.testing.expectEqualStrings(str, res);
}
test "takeDelimiterInclusive when it rebases" {
const written_line = "ABCDEFGHIJKLMNOPQRSTUVWXYZ\n";
var buffer: [128]u8 = undefined;
var tr: std.testing.Reader = .init(&buffer, &.{
.{ .buffer = written_line },
.{ .buffer = written_line },
.{ .buffer = written_line },
.{ .buffer = written_line },
.{ .buffer = written_line },
.{ .buffer = written_line },
});
const r = &tr.interface;
for (0..6) |_| {
try std.testing.expectEqualStrings(written_line, try r.takeDelimiterInclusive('\n'));
}
}
/// Provides a `Reader` implementation by passing data from an underlying
/// reader through `Hasher.update`.
///

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More