Merge remote-tracking branch 'origin/http' into wrangle-writer-buffering

This commit is contained in:
Andrew Kelley 2025-08-06 22:52:19 -07:00
commit b9e1fef562
87 changed files with 2545 additions and 2346 deletions

View File

@ -50,6 +50,24 @@ jobs:
uses: actions/checkout@v4
- name: Build and Test
run: sh ci/aarch64-linux-release.sh
riscv64-linux-debug:
if: github.event_name == 'push'
timeout-minutes: 420
runs-on: [self-hosted, Linux, riscv64]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Build and Test
run: sh ci/riscv64-linux-debug.sh
riscv64-linux-release:
if: github.event_name == 'push'
timeout-minutes: 420
runs-on: [self-hosted, Linux, riscv64]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Build and Test
run: sh ci/riscv64-linux-release.sh
x86_64-macos-release:
runs-on: "macos-13"
env:

View File

@ -1,22 +0,0 @@
name: riscv
on:
workflow_dispatch:
permissions:
contents: read
jobs:
riscv64-linux-debug:
timeout-minutes: 1020
runs-on: [self-hosted, Linux, riscv64]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Build and Test
run: sh ci/riscv64-linux-debug.sh
riscv64-linux-release:
timeout-minutes: 900
runs-on: [self-hosted, Linux, riscv64]
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Build and Test
run: sh ci/riscv64-linux-release.sh

View File

@ -90,6 +90,7 @@ pub fn build(b: *std.Build) !void {
const skip_non_native = b.option(bool, "skip-non-native", "Main test suite skips non-native builds") orelse false;
const skip_libc = b.option(bool, "skip-libc", "Main test suite skips tests that link libc") orelse false;
const skip_single_threaded = b.option(bool, "skip-single-threaded", "Main test suite skips tests that are single-threaded") orelse false;
const skip_compile_errors = b.option(bool, "skip-compile-errors", "Main test suite skips compile error tests") orelse false;
const skip_translate_c = b.option(bool, "skip-translate-c", "Main test suite skips translate-c tests") orelse false;
const skip_run_translated_c = b.option(bool, "skip-run-translated-c", "Main test suite skips run-translated-c tests") orelse false;
const skip_freebsd = b.option(bool, "skip-freebsd", "Main test suite skips targets with freebsd OS") orelse false;
@ -418,6 +419,7 @@ pub fn build(b: *std.Build) !void {
try tests.addCases(b, test_cases_step, target, .{
.test_filters = test_filters,
.test_target_filters = test_target_filters,
.skip_compile_errors = skip_compile_errors,
.skip_non_native = skip_non_native,
.skip_freebsd = skip_freebsd,
.skip_netbsd = skip_netbsd,
@ -450,7 +452,6 @@ pub fn build(b: *std.Build) !void {
.desc = "Run the behavior tests",
.optimize_modes = optimization_modes,
.include_paths = &.{},
.windows_libs = &.{},
.skip_single_threaded = skip_single_threaded,
.skip_non_native = skip_non_native,
.skip_freebsd = skip_freebsd,
@ -473,7 +474,6 @@ pub fn build(b: *std.Build) !void {
.desc = "Run the @cImport tests",
.optimize_modes = optimization_modes,
.include_paths = &.{"test/c_import"},
.windows_libs = &.{},
.skip_single_threaded = true,
.skip_non_native = skip_non_native,
.skip_freebsd = skip_freebsd,
@ -494,7 +494,6 @@ pub fn build(b: *std.Build) !void {
.desc = "Run the compiler_rt tests",
.optimize_modes = optimization_modes,
.include_paths = &.{},
.windows_libs = &.{},
.skip_single_threaded = true,
.skip_non_native = skip_non_native,
.skip_freebsd = skip_freebsd,
@ -516,7 +515,6 @@ pub fn build(b: *std.Build) !void {
.desc = "Run the zigc tests",
.optimize_modes = optimization_modes,
.include_paths = &.{},
.windows_libs = &.{},
.skip_single_threaded = true,
.skip_non_native = skip_non_native,
.skip_freebsd = skip_freebsd,
@ -538,12 +536,6 @@ pub fn build(b: *std.Build) !void {
.desc = "Run the standard library tests",
.optimize_modes = optimization_modes,
.include_paths = &.{},
.windows_libs = &.{
"advapi32",
"crypt32",
"iphlpapi",
"ws2_32",
},
.skip_single_threaded = skip_single_threaded,
.skip_non_native = skip_non_native,
.skip_freebsd = skip_freebsd,
@ -741,12 +733,6 @@ fn addCompilerMod(b: *std.Build, options: AddCompilerModOptions) *std.Build.Modu
compiler_mod.addImport("aro", aro_mod);
compiler_mod.addImport("aro_translate_c", aro_translate_c_mod);
if (options.target.result.os.tag == .windows) {
compiler_mod.linkSystemLibrary("advapi32", .{});
compiler_mod.linkSystemLibrary("crypt32", .{});
compiler_mod.linkSystemLibrary("ws2_32", .{});
}
return compiler_mod;
}
@ -1444,10 +1430,6 @@ fn generateLangRef(b: *std.Build) std.Build.LazyPath {
}),
});
if (b.graph.host.result.os.tag == .windows) {
doctest_exe.root_module.linkSystemLibrary("advapi32", .{});
}
var dir = b.build_root.handle.openDir("doc/langref", .{ .iterate = true }) catch |err| {
std.debug.panic("unable to open '{f}doc/langref' directory: {s}", .{
b.build_root, @errorName(err),

View File

@ -49,11 +49,12 @@ unset CXX
ninja install
# No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts.
stage3-debug/bin/zig build test-cases test-modules test-unit test-standalone test-c-abi test-link test-stack-traces test-asm-link test-llvm-ir \
stage3-debug/bin/zig build test-cases test-modules test-unit test-c-abi test-stack-traces test-asm-link test-llvm-ir \
--maxrss 68719476736 \
-Dstatic-llvm \
-Dskip-non-native \
-Dskip-single-threaded \
-Dskip-compile-errors \
-Dskip-translate-c \
-Dskip-run-translated-c \
-Dtarget=native-native-musl \

View File

@ -49,11 +49,12 @@ unset CXX
ninja install
# No -fqemu and -fwasmtime here as they're covered by the x86_64-linux scripts.
stage3-release/bin/zig build test-cases test-modules test-unit test-standalone test-c-abi test-link test-stack-traces test-asm-link test-llvm-ir \
stage3-release/bin/zig build test-cases test-modules test-unit test-c-abi test-stack-traces test-asm-link test-llvm-ir \
--maxrss 68719476736 \
-Dstatic-llvm \
-Dskip-non-native \
-Dskip-single-threaded \
-Dskip-compile-errors \
-Dskip-translate-c \
-Dskip-run-translated-c \
-Dtarget=native-native-musl \

View File

@ -8,20 +8,22 @@ const Instruction = enum {
};
fn evaluate(initial_stack: []const i32, code: []const Instruction) !i32 {
var stack = try std.BoundedArray(i32, 8).fromSlice(initial_stack);
var buffer: [8]i32 = undefined;
var stack = std.ArrayListUnmanaged(i32).initBuffer(&buffer);
try stack.appendSliceBounded(initial_stack);
var ip: usize = 0;
return vm: switch (code[ip]) {
// Because all code after `continue` is unreachable, this branch does
// not provide a result.
.add => {
try stack.append(stack.pop().? + stack.pop().?);
try stack.appendBounded(stack.pop().? + stack.pop().?);
ip += 1;
continue :vm code[ip];
},
.mul => {
try stack.append(stack.pop().? * stack.pop().?);
try stack.appendBounded(stack.pop().? * stack.pop().?);
ip += 1;
continue :vm code[ip];

View File

@ -708,7 +708,7 @@ pub const Arguments = blk: {
field.* = .{
.name = decl.name,
.type = @field(attributes, decl.name),
.alignment = 0,
.alignment = @alignOf(@field(attributes, decl.name)),
};
}

View File

@ -502,6 +502,9 @@ pub fn main() !void {
};
}
// Comptime-known guard to prevent including the logic below when `!Watch.have_impl`.
if (!Watch.have_impl) unreachable;
try w.update(gpa, run.step_stack.keys());
// Wait until a file system notification arrives. Read all such events
@ -511,7 +514,7 @@ pub fn main() !void {
// recursive dependants.
var caption_buf: [std.Progress.Node.max_name_len]u8 = undefined;
const caption = std.fmt.bufPrint(&caption_buf, "watching {d} directories, {d} processes", .{
w.dir_table.entries.len, countSubProcesses(run.step_stack.keys()),
w.dir_count, countSubProcesses(run.step_stack.keys()),
}) catch &caption_buf;
var debouncing_node = main_progress_node.start(caption, 0);
var in_debounce = false;

View File

@ -1141,6 +1141,8 @@ pub fn parse(allocator: Allocator, args: []const []const u8, diagnostics: *Diagn
}
output_format = .res;
}
} else {
output_format_source = .output_format_arg;
}
options.output_source = .{ .filename = try filepathWithExtension(allocator, options.input_source.filename, output_format.?.extension()) };
} else {
@ -1529,21 +1531,21 @@ fn testParseOutput(args: []const []const u8, expected_output: []const u8) !?Opti
var diagnostics = Diagnostics.init(std.testing.allocator);
defer diagnostics.deinit();
var output = std.ArrayList(u8).init(std.testing.allocator);
var output: std.io.Writer.Allocating = .init(std.testing.allocator);
defer output.deinit();
var options = parse(std.testing.allocator, args, &diagnostics) catch |err| switch (err) {
error.ParseError => {
try diagnostics.renderToWriter(args, output.writer(), .no_color);
try std.testing.expectEqualStrings(expected_output, output.items);
try diagnostics.renderToWriter(args, &output.writer, .no_color);
try std.testing.expectEqualStrings(expected_output, output.getWritten());
return null;
},
else => |e| return e,
};
errdefer options.deinit();
try diagnostics.renderToWriter(args, output.writer(), .no_color);
try std.testing.expectEqualStrings(expected_output, output.items);
try diagnostics.renderToWriter(args, &output.writer, .no_color);
try std.testing.expectEqualStrings(expected_output, output.getWritten());
return options;
}

View File

@ -550,7 +550,7 @@ pub const Compiler = struct {
// so get it here to simplify future usage.
const filename_token = node.filename.getFirstToken();
const file = self.searchForFile(filename_utf8) catch |err| switch (err) {
const file_handle = self.searchForFile(filename_utf8) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
else => |e| {
const filename_string_index = try self.diagnostics.putString(filename_utf8);
@ -564,13 +564,15 @@ pub const Compiler = struct {
});
},
};
defer file.close();
defer file_handle.close();
var file_buffer: [2048]u8 = undefined;
var file_reader = file_handle.reader(&file_buffer);
if (maybe_predefined_type) |predefined_type| {
switch (predefined_type) {
.GROUP_ICON, .GROUP_CURSOR => {
// Check for animated icon first
if (ani.isAnimatedIcon(file.deprecatedReader())) {
if (ani.isAnimatedIcon(file_reader.interface.adaptToOldInterface())) {
// Animated icons are just put into the resource unmodified,
// and the resource type changes to ANIICON/ANICURSOR
@ -582,18 +584,18 @@ pub const Compiler = struct {
header.type_value.ordinal = @intFromEnum(new_predefined_type);
header.memory_flags = MemoryFlags.defaults(new_predefined_type);
header.applyMemoryFlags(node.common_resource_attributes, self.source);
header.data_size = @intCast(try file.getEndPos());
header.data_size = @intCast(try file_reader.getSize());
try header.write(writer, self.errContext(node.id));
try file.seekTo(0);
try writeResourceData(writer, file.deprecatedReader(), header.data_size);
try file_reader.seekTo(0);
try writeResourceData(writer, &file_reader.interface, header.data_size);
return;
}
// isAnimatedIcon moved the file cursor so reset to the start
try file.seekTo(0);
try file_reader.seekTo(0);
const icon_dir = ico.read(self.allocator, file.deprecatedReader(), try file.getEndPos()) catch |err| switch (err) {
const icon_dir = ico.read(self.allocator, file_reader.interface.adaptToOldInterface(), try file_reader.getSize()) catch |err| switch (err) {
error.OutOfMemory => |e| return e,
else => |e| {
return self.iconReadError(
@ -671,15 +673,15 @@ pub const Compiler = struct {
try writer.writeInt(u16, entry.type_specific_data.cursor.hotspot_y, .little);
}
try file.seekTo(entry.data_offset_from_start_of_file);
var header_bytes = file.deprecatedReader().readBytesNoEof(16) catch {
try file_reader.seekTo(entry.data_offset_from_start_of_file);
var header_bytes = (file_reader.interface.takeArray(16) catch {
return self.iconReadError(
error.UnexpectedEOF,
filename_utf8,
filename_token,
predefined_type,
);
};
}).*;
const image_format = ico.ImageFormat.detect(&header_bytes);
if (!image_format.validate(&header_bytes)) {
@ -802,8 +804,8 @@ pub const Compiler = struct {
},
}
try file.seekTo(entry.data_offset_from_start_of_file);
try writeResourceDataNoPadding(writer, file.deprecatedReader(), entry.data_size_in_bytes);
try file_reader.seekTo(entry.data_offset_from_start_of_file);
try writeResourceDataNoPadding(writer, &file_reader.interface, entry.data_size_in_bytes);
try writeDataPadding(writer, full_data_size);
if (self.state.icon_id == std.math.maxInt(u16)) {
@ -857,9 +859,9 @@ pub const Compiler = struct {
},
.BITMAP => {
header.applyMemoryFlags(node.common_resource_attributes, self.source);
const file_size = try file.getEndPos();
const file_size = try file_reader.getSize();
const bitmap_info = bmp.read(file.deprecatedReader(), file_size) catch |err| {
const bitmap_info = bmp.read(file_reader.interface.adaptToOldInterface(), file_size) catch |err| {
const filename_string_index = try self.diagnostics.putString(filename_utf8);
return self.addErrorDetailsAndFail(.{
.err = .bmp_read_error,
@ -921,18 +923,17 @@ pub const Compiler = struct {
header.data_size = bmp_bytes_to_write;
try header.write(writer, self.errContext(node.id));
try file.seekTo(bmp.file_header_len);
const file_reader = file.deprecatedReader();
try writeResourceDataNoPadding(writer, file_reader, bitmap_info.dib_header_size);
try file_reader.seekTo(bmp.file_header_len);
try writeResourceDataNoPadding(writer, &file_reader.interface, bitmap_info.dib_header_size);
if (bitmap_info.getBitmasksByteLen() > 0) {
try writeResourceDataNoPadding(writer, file_reader, bitmap_info.getBitmasksByteLen());
try writeResourceDataNoPadding(writer, &file_reader.interface, bitmap_info.getBitmasksByteLen());
}
if (bitmap_info.getExpectedPaletteByteLen() > 0) {
try writeResourceDataNoPadding(writer, file_reader, @intCast(bitmap_info.getActualPaletteByteLen()));
try writeResourceDataNoPadding(writer, &file_reader.interface, @intCast(bitmap_info.getActualPaletteByteLen()));
}
try file.seekTo(bitmap_info.pixel_data_offset);
try file_reader.seekTo(bitmap_info.pixel_data_offset);
const pixel_bytes: u32 = @intCast(file_size - bitmap_info.pixel_data_offset);
try writeResourceDataNoPadding(writer, file_reader, pixel_bytes);
try writeResourceDataNoPadding(writer, &file_reader.interface, pixel_bytes);
try writeDataPadding(writer, bmp_bytes_to_write);
return;
},
@ -956,7 +957,7 @@ pub const Compiler = struct {
return;
}
header.applyMemoryFlags(node.common_resource_attributes, self.source);
const file_size = try file.getEndPos();
const file_size = try file_reader.getSize();
if (file_size > std.math.maxInt(u32)) {
return self.addErrorDetailsAndFail(.{
.err = .resource_data_size_exceeds_max,
@ -968,8 +969,9 @@ pub const Compiler = struct {
header.data_size = @intCast(file_size);
try header.write(writer, self.errContext(node.id));
var header_slurping_reader = headerSlurpingReader(148, file.deprecatedReader());
try writeResourceData(writer, header_slurping_reader.reader(), header.data_size);
var header_slurping_reader = headerSlurpingReader(148, file_reader.interface.adaptToOldInterface());
var adapter = header_slurping_reader.reader().adaptToNewApi(&.{});
try writeResourceData(writer, &adapter.new_interface, header.data_size);
try self.state.font_dir.add(self.arena, FontDir.Font{
.id = header.name_value.ordinal,
@ -992,7 +994,7 @@ pub const Compiler = struct {
}
// Fallback to just writing out the entire contents of the file
const data_size = try file.getEndPos();
const data_size = try file_reader.getSize();
if (data_size > std.math.maxInt(u32)) {
return self.addErrorDetailsAndFail(.{
.err = .resource_data_size_exceeds_max,
@ -1002,7 +1004,7 @@ pub const Compiler = struct {
// We now know that the data size will fit in a u32
header.data_size = @intCast(data_size);
try header.write(writer, self.errContext(node.id));
try writeResourceData(writer, file.deprecatedReader(), header.data_size);
try writeResourceData(writer, &file_reader.interface, header.data_size);
}
fn iconReadError(
@ -1250,8 +1252,8 @@ pub const Compiler = struct {
const data_len: u32 = @intCast(data_buffer.items.len);
try self.writeResourceHeader(writer, node.id, node.type, data_len, node.common_resource_attributes, self.state.language);
var data_fbs = std.io.fixedBufferStream(data_buffer.items);
try writeResourceData(writer, data_fbs.reader(), data_len);
var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
try writeResourceData(writer, &data_fbs, data_len);
}
pub fn writeResourceHeader(self: *Compiler, writer: anytype, id_token: Token, type_token: Token, data_size: u32, common_resource_attributes: []Token, language: res.Language) !void {
@ -1266,13 +1268,15 @@ pub const Compiler = struct {
try header.write(writer, self.errContext(id_token));
}
pub fn writeResourceDataNoPadding(writer: anytype, data_reader: anytype, data_size: u32) !void {
var limited_reader = std.io.limitedReader(data_reader, data_size);
try limited_reader.reader().readRemaining(writer);
pub fn writeResourceDataNoPadding(writer: anytype, data_reader: *std.Io.Reader, data_size: u32) !void {
var adapted = writer.adaptToNewApi();
var buffer: [128]u8 = undefined;
adapted.new_interface.buffer = &buffer;
try data_reader.streamExact(&adapted.new_interface, data_size);
try adapted.new_interface.flush();
}
pub fn writeResourceData(writer: anytype, data_reader: anytype, data_size: u32) !void {
pub fn writeResourceData(writer: anytype, data_reader: *std.Io.Reader, data_size: u32) !void {
try writeResourceDataNoPadding(writer, data_reader, data_size);
try writeDataPadding(writer, data_size);
}
@ -1337,8 +1341,8 @@ pub const Compiler = struct {
try header.write(writer, self.errContext(node.id));
var data_fbs = std.io.fixedBufferStream(data_buffer.items);
try writeResourceData(writer, data_fbs.reader(), data_size);
var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
try writeResourceData(writer, &data_fbs, data_size);
}
/// Expects `data_writer` to be a LimitedWriter limited to u32, meaning all writes to
@ -1730,8 +1734,8 @@ pub const Compiler = struct {
try header.write(writer, self.errContext(node.id));
var data_fbs = std.io.fixedBufferStream(data_buffer.items);
try writeResourceData(writer, data_fbs.reader(), data_size);
var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
try writeResourceData(writer, &data_fbs, data_size);
}
fn writeDialogHeaderAndStrings(
@ -2044,8 +2048,8 @@ pub const Compiler = struct {
try header.write(writer, self.errContext(node.id));
var data_fbs = std.io.fixedBufferStream(data_buffer.items);
try writeResourceData(writer, data_fbs.reader(), data_size);
var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
try writeResourceData(writer, &data_fbs, data_size);
}
/// Weight and italic carry over from previous FONT statements within a single resource,
@ -2119,8 +2123,8 @@ pub const Compiler = struct {
try header.write(writer, self.errContext(node.id));
var data_fbs = std.io.fixedBufferStream(data_buffer.items);
try writeResourceData(writer, data_fbs.reader(), data_size);
var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
try writeResourceData(writer, &data_fbs, data_size);
}
/// Expects `data_writer` to be a LimitedWriter limited to u32, meaning all writes to
@ -2384,8 +2388,8 @@ pub const Compiler = struct {
try header.write(writer, self.errContext(node.id));
var data_fbs = std.io.fixedBufferStream(data_buffer.items);
try writeResourceData(writer, data_fbs.reader(), data_size);
var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
try writeResourceData(writer, &data_fbs, data_size);
}
/// Expects writer to be a LimitedWriter limited to u16, meaning all writes to
@ -3319,8 +3323,8 @@ pub const StringTable = struct {
// we fully control and know are numbers, so they have a fixed size.
try header.writeAssertNoOverflow(writer);
var data_fbs = std.io.fixedBufferStream(data_buffer.items);
try Compiler.writeResourceData(writer, data_fbs.reader(), data_size);
var data_fbs: std.Io.Reader = .fixed(data_buffer.items);
try Compiler.writeResourceData(writer, &data_fbs, data_size);
}
};

View File

@ -65,7 +65,7 @@ pub const ParseResOptions = struct {
};
/// The returned ParsedResources should be freed by calling its `deinit` function.
pub fn parseRes(allocator: Allocator, reader: anytype, options: ParseResOptions) !ParsedResources {
pub fn parseRes(allocator: Allocator, reader: *std.Io.Reader, options: ParseResOptions) !ParsedResources {
var resources = ParsedResources.init(allocator);
errdefer resources.deinit();
@ -74,7 +74,7 @@ pub fn parseRes(allocator: Allocator, reader: anytype, options: ParseResOptions)
return resources;
}
pub fn parseResInto(resources: *ParsedResources, reader: anytype, options: ParseResOptions) !void {
pub fn parseResInto(resources: *ParsedResources, reader: *std.Io.Reader, options: ParseResOptions) !void {
const allocator = resources.allocator;
var bytes_remaining: u64 = options.max_size;
{
@ -103,43 +103,38 @@ pub const ResourceAndSize = struct {
total_size: u64,
};
pub fn parseResource(allocator: Allocator, reader: anytype, max_size: u64) !ResourceAndSize {
var header_counting_reader = std.io.countingReader(reader);
const header_reader = header_counting_reader.reader();
const data_size = try header_reader.readInt(u32, .little);
const header_size = try header_reader.readInt(u32, .little);
pub fn parseResource(allocator: Allocator, reader: *std.Io.Reader, max_size: u64) !ResourceAndSize {
const data_size = try reader.takeInt(u32, .little);
const header_size = try reader.takeInt(u32, .little);
const total_size: u64 = @as(u64, header_size) + data_size;
if (total_size > max_size) return error.ImpossibleSize;
var header_bytes_available = header_size -| 8;
var type_reader = std.io.limitedReader(header_reader, header_bytes_available);
const type_value = try parseNameOrOrdinal(allocator, type_reader.reader());
const remaining_header_bytes = try reader.take(header_size -| 8);
var remaining_header_reader: std.Io.Reader = .fixed(remaining_header_bytes);
const type_value = try parseNameOrOrdinal(allocator, &remaining_header_reader);
errdefer type_value.deinit(allocator);
header_bytes_available -|= @intCast(type_value.byteLen());
var name_reader = std.io.limitedReader(header_reader, header_bytes_available);
const name_value = try parseNameOrOrdinal(allocator, name_reader.reader());
const name_value = try parseNameOrOrdinal(allocator, &remaining_header_reader);
errdefer name_value.deinit(allocator);
const padding_after_name = numPaddingBytesNeeded(@intCast(header_counting_reader.bytes_read));
try header_reader.skipBytes(padding_after_name, .{ .buf_size = 3 });
const padding_after_name = numPaddingBytesNeeded(@intCast(remaining_header_reader.seek));
try remaining_header_reader.discardAll(padding_after_name);
std.debug.assert(header_counting_reader.bytes_read % 4 == 0);
const data_version = try header_reader.readInt(u32, .little);
const memory_flags: MemoryFlags = @bitCast(try header_reader.readInt(u16, .little));
const language: Language = @bitCast(try header_reader.readInt(u16, .little));
const version = try header_reader.readInt(u32, .little);
const characteristics = try header_reader.readInt(u32, .little);
std.debug.assert(remaining_header_reader.seek % 4 == 0);
const data_version = try remaining_header_reader.takeInt(u32, .little);
const memory_flags: MemoryFlags = @bitCast(try remaining_header_reader.takeInt(u16, .little));
const language: Language = @bitCast(try remaining_header_reader.takeInt(u16, .little));
const version = try remaining_header_reader.takeInt(u32, .little);
const characteristics = try remaining_header_reader.takeInt(u32, .little);
const header_bytes_read = header_counting_reader.bytes_read;
if (header_size != header_bytes_read) return error.HeaderSizeMismatch;
if (remaining_header_reader.seek != remaining_header_reader.end) return error.HeaderSizeMismatch;
const data = try allocator.alloc(u8, data_size);
errdefer allocator.free(data);
try reader.readNoEof(data);
try reader.readSliceAll(data);
const padding_after_data = numPaddingBytesNeeded(@intCast(data_size));
try reader.skipBytes(padding_after_data, .{ .buf_size = 3 });
try reader.discardAll(padding_after_data);
return .{
.resource = .{
@ -156,10 +151,10 @@ pub fn parseResource(allocator: Allocator, reader: anytype, max_size: u64) !Reso
};
}
pub fn parseNameOrOrdinal(allocator: Allocator, reader: anytype) !NameOrOrdinal {
const first_code_unit = try reader.readInt(u16, .little);
pub fn parseNameOrOrdinal(allocator: Allocator, reader: *std.Io.Reader) !NameOrOrdinal {
const first_code_unit = try reader.takeInt(u16, .little);
if (first_code_unit == 0xFFFF) {
const ordinal_value = try reader.readInt(u16, .little);
const ordinal_value = try reader.takeInt(u16, .little);
return .{ .ordinal = ordinal_value };
}
var name_buf = try std.ArrayListUnmanaged(u16).initCapacity(allocator, 16);
@ -167,7 +162,7 @@ pub fn parseNameOrOrdinal(allocator: Allocator, reader: anytype) !NameOrOrdinal
var code_unit = first_code_unit;
while (code_unit != 0) {
try name_buf.append(allocator, std.mem.nativeToLittle(u16, code_unit));
code_unit = try reader.readInt(u16, .little);
code_unit = try reader.takeInt(u16, .little);
}
return .{ .name = try name_buf.toOwnedSliceSentinel(allocator, 0) };
}

View File

@ -1112,7 +1112,7 @@ const CorrespondingLines = struct {
try corresponding_lines.writeLineFromStreamVerbatim(
writer,
corresponding_lines.buffered_reader.reader(),
corresponding_lines.buffered_reader.interface.adaptToOldInterface(),
corresponding_span.start_line,
);
@ -1155,7 +1155,7 @@ const CorrespondingLines = struct {
try self.writeLineFromStreamVerbatim(
writer,
self.buffered_reader.reader(),
self.buffered_reader.interface.adaptToOldInterface(),
self.line_num,
);

View File

@ -14,8 +14,9 @@ pub fn read(allocator: std.mem.Allocator, reader: anytype, max_size: u64) ReadEr
// Some Reader implementations have an empty ReadError error set which would
// cause 'unreachable else' if we tried to use an else in the switch, so we
// need to detect this case and not try to translate to ReadError
const anyerror_reader_errorset = @TypeOf(reader).Error == anyerror;
const empty_reader_errorset = @typeInfo(@TypeOf(reader).Error).error_set == null or @typeInfo(@TypeOf(reader).Error).error_set.?.len == 0;
if (empty_reader_errorset) {
if (empty_reader_errorset and !anyerror_reader_errorset) {
return readAnyError(allocator, reader, max_size) catch |err| switch (err) {
error.EndOfStream => error.UnexpectedEOF,
else => |e| return e,

View File

@ -325,8 +325,8 @@ pub fn main() !void {
std.debug.assert(options.output_format == .coff);
// TODO: Maybe use a buffered file reader instead of reading file into memory -> fbs
var fbs = std.io.fixedBufferStream(res_data.bytes);
break :resources cvtres.parseRes(allocator, fbs.reader(), .{ .max_size = res_data.bytes.len }) catch |err| {
var res_reader: std.Io.Reader = .fixed(res_data.bytes);
break :resources cvtres.parseRes(allocator, &res_reader, .{ .max_size = res_data.bytes.len }) catch |err| {
// TODO: Better errors
try error_handler.emitMessage(allocator, .err, "unable to parse res from '{s}': {s}", .{ res_stream.name, @errorName(err) });
std.process.exit(1);

View File

@ -129,6 +129,11 @@
domSearch.addEventListener('input', onSearchChange, false);
window.addEventListener('keydown', onWindowKeyDown, false);
onHashChange(null);
if (domSearch.value) {
// user started typing a search query while the page was loading
curSearchIndex = -1;
startAsyncSearch();
}
});
});
@ -643,6 +648,7 @@
}
function onHashChange(state) {
// Use a non-null state value to prevent the window scrolling if the user goes back to this history entry.
history.replaceState({}, "");
navigate(location.hash);
if (state == null) window.scrollTo({top: 0});
@ -650,13 +656,11 @@
function onPopState(ev) {
onHashChange(ev.state);
syncDomSearch();
}
function navigate(location_hash) {
updateCurNav(location_hash);
if (domSearch.value !== curNavSearch) {
domSearch.value = curNavSearch;
}
render();
if (imFeelingLucky) {
imFeelingLucky = false;
@ -664,6 +668,12 @@
}
}
function syncDomSearch() {
if (domSearch.value !== curNavSearch) {
domSearch.value = curNavSearch;
}
}
function activateSelectedResult() {
if (domSectSearchResults.classList.contains("hidden")) {
return;

View File

@ -143,13 +143,12 @@ fn mainImpl() !void {
var parser = try Parser.init(gpa);
defer parser.deinit();
var stdin_buf = std.io.bufferedReader(std.fs.File.stdin().deprecatedReader());
var line_buf = std.ArrayList(u8).init(gpa);
defer line_buf.deinit();
while (stdin_buf.reader().streamUntilDelimiter(line_buf.writer(), '\n', null)) {
if (line_buf.getLastOrNull() == '\r') _ = line_buf.pop();
try parser.feedLine(line_buf.items);
line_buf.clearRetainingCapacity();
var stdin_buffer: [1024]u8 = undefined;
var stdin_reader = std.fs.File.stdin().reader(&stdin_buffer);
while (stdin_reader.takeDelimiterExclusive('\n')) |line| {
const trimmed = std.mem.trimRight(u8, line, '\r');
try parser.feedLine(trimmed);
} else |err| switch (err) {
error.EndOfStream => {},
else => |e| return e,

View File

@ -29,13 +29,14 @@ const Node = Document.Node;
const ExtraIndex = Document.ExtraIndex;
const ExtraData = Document.ExtraData;
const StringIndex = Document.StringIndex;
const ArrayList = std.ArrayListUnmanaged;
nodes: Node.List = .{},
extra: std.ArrayListUnmanaged(u32) = .empty,
scratch_extra: std.ArrayListUnmanaged(u32) = .empty,
string_bytes: std.ArrayListUnmanaged(u8) = .empty,
scratch_string: std.ArrayListUnmanaged(u8) = .empty,
pending_blocks: std.ArrayListUnmanaged(Block) = .empty,
extra: ArrayList(u32) = .empty,
scratch_extra: ArrayList(u32) = .empty,
string_bytes: ArrayList(u8) = .empty,
scratch_string: ArrayList(u8) = .empty,
pending_blocks: ArrayList(Block) = .empty,
allocator: Allocator,
const Parser = @This();
@ -86,7 +87,8 @@ const Block = struct {
continuation_indent: usize,
},
table: struct {
column_alignments: std.BoundedArray(Node.TableCellAlignment, max_table_columns) = .{},
column_alignments_buffer: [max_table_columns]Node.TableCellAlignment,
column_alignments_len: usize,
},
heading: struct {
/// Between 1 and 6, inclusive.
@ -354,7 +356,8 @@ const BlockStart = struct {
continuation_indent: usize,
},
table_row: struct {
cells: std.BoundedArray([]const u8, max_table_columns),
cells_buffer: [max_table_columns][]const u8,
cells_len: usize,
},
heading: struct {
/// Between 1 and 6, inclusive.
@ -422,7 +425,8 @@ fn appendBlockStart(p: *Parser, block_start: BlockStart) !void {
try p.pending_blocks.append(p.allocator, .{
.tag = .table,
.data = .{ .table = .{
.column_alignments = .{},
.column_alignments_buffer = undefined,
.column_alignments_len = 0,
} },
.string_start = p.scratch_string.items.len,
.extra_start = p.scratch_extra.items.len,
@ -431,15 +435,19 @@ fn appendBlockStart(p: *Parser, block_start: BlockStart) !void {
const current_row = p.scratch_extra.items.len - p.pending_blocks.getLast().extra_start;
if (current_row <= 1) {
if (parseTableHeaderDelimiter(block_start.data.table_row.cells)) |alignments| {
p.pending_blocks.items[p.pending_blocks.items.len - 1].data.table.column_alignments = alignments;
var buffer: [max_table_columns]Node.TableCellAlignment = undefined;
const table_row = &block_start.data.table_row;
if (parseTableHeaderDelimiter(table_row.cells_buffer[0..table_row.cells_len], &buffer)) |alignments| {
const table = &p.pending_blocks.items[p.pending_blocks.items.len - 1].data.table;
@memcpy(table.column_alignments_buffer[0..alignments.len], alignments);
table.column_alignments_len = alignments.len;
if (current_row == 1) {
// We need to go back and mark the header row and its column
// alignments.
const datas = p.nodes.items(.data);
const header_data = datas[p.scratch_extra.getLast()];
for (p.extraChildren(header_data.container.children), 0..) |header_cell, i| {
const alignment = if (i < alignments.len) alignments.buffer[i] else .unset;
const alignment = if (i < alignments.len) alignments[i] else .unset;
const cell_data = &datas[@intFromEnum(header_cell)].table_cell;
cell_data.info.alignment = alignment;
cell_data.info.header = true;
@ -480,8 +488,10 @@ fn appendBlockStart(p: *Parser, block_start: BlockStart) !void {
// available in the BlockStart. We can immediately parse and append
// these children now.
const containing_table = p.pending_blocks.items[p.pending_blocks.items.len - 2];
const column_alignments = containing_table.data.table.column_alignments.slice();
for (block_start.data.table_row.cells.slice(), 0..) |cell_content, i| {
const table = &containing_table.data.table;
const column_alignments = table.column_alignments_buffer[0..table.column_alignments_len];
const table_row = &block_start.data.table_row;
for (table_row.cells_buffer[0..table_row.cells_len], 0..) |cell_content, i| {
const cell_children = try p.parseInlines(cell_content);
const alignment = if (i < column_alignments.len) column_alignments[i] else .unset;
const cell = try p.addNode(.{
@ -523,7 +533,8 @@ fn startBlock(p: *Parser, line: []const u8) !?BlockStart {
return .{
.tag = .table_row,
.data = .{ .table_row = .{
.cells = table_row.cells,
.cells_buffer = table_row.cells_buffer,
.cells_len = table_row.cells_len,
} },
.rest = "",
};
@ -606,7 +617,8 @@ fn startListItem(unindented_line: []const u8) ?ListItemStart {
}
const TableRowStart = struct {
cells: std.BoundedArray([]const u8, max_table_columns),
cells_buffer: [max_table_columns][]const u8,
cells_len: usize,
};
fn startTableRow(unindented_line: []const u8) ?TableRowStart {
@ -615,7 +627,8 @@ fn startTableRow(unindented_line: []const u8) ?TableRowStart {
mem.endsWith(u8, unindented_line, "\\|") or
!mem.endsWith(u8, unindented_line, "|")) return null;
var cells: std.BoundedArray([]const u8, max_table_columns) = .{};
var cells_buffer: [max_table_columns][]const u8 = undefined;
var cells: ArrayList([]const u8) = .initBuffer(&cells_buffer);
const table_row_content = unindented_line[1 .. unindented_line.len - 1];
var cell_start: usize = 0;
var i: usize = 0;
@ -623,7 +636,7 @@ fn startTableRow(unindented_line: []const u8) ?TableRowStart {
switch (table_row_content[i]) {
'\\' => i += 1,
'|' => {
cells.append(table_row_content[cell_start..i]) catch return null;
cells.appendBounded(table_row_content[cell_start..i]) catch return null;
cell_start = i + 1;
},
'`' => {
@ -641,20 +654,21 @@ fn startTableRow(unindented_line: []const u8) ?TableRowStart {
else => {},
}
}
cells.append(table_row_content[cell_start..]) catch return null;
cells.appendBounded(table_row_content[cell_start..]) catch return null;
return .{ .cells = cells };
return .{ .cells_buffer = cells_buffer, .cells_len = cells.items.len };
}
fn parseTableHeaderDelimiter(
row_cells: std.BoundedArray([]const u8, max_table_columns),
) ?std.BoundedArray(Node.TableCellAlignment, max_table_columns) {
var alignments: std.BoundedArray(Node.TableCellAlignment, max_table_columns) = .{};
for (row_cells.slice()) |content| {
row_cells: []const []const u8,
buffer: []Node.TableCellAlignment,
) ?[]Node.TableCellAlignment {
var alignments: ArrayList(Node.TableCellAlignment) = .initBuffer(buffer);
for (row_cells) |content| {
const alignment = parseTableHeaderDelimiterCell(content) orelse return null;
alignments.appendAssumeCapacity(alignment);
}
return alignments;
return alignments.items;
}
fn parseTableHeaderDelimiterCell(content: []const u8) ?Node.TableCellAlignment {
@ -928,8 +942,8 @@ const InlineParser = struct {
parent: *Parser,
content: []const u8,
pos: usize = 0,
pending_inlines: std.ArrayListUnmanaged(PendingInline) = .empty,
completed_inlines: std.ArrayListUnmanaged(CompletedInline) = .empty,
pending_inlines: ArrayList(PendingInline) = .empty,
completed_inlines: ArrayList(CompletedInline) = .empty,
const PendingInline = struct {
tag: Tag,

View File

@ -234,7 +234,7 @@ pub const Previous = struct {
};
pub fn sendUpdate(
fuzz: *Fuzz,
socket: *std.http.WebSocket,
socket: *std.http.Server.WebSocket,
prev: *Previous,
) !void {
fuzz.coverage_mutex.lock();
@ -263,36 +263,36 @@ pub fn sendUpdate(
.string_bytes_len = @intCast(coverage_map.coverage.string_bytes.items.len),
.start_timestamp = coverage_map.start_timestamp,
};
const iovecs: [5]std.posix.iovec_const = .{
makeIov(@ptrCast(&header)),
makeIov(@ptrCast(coverage_map.coverage.directories.keys())),
makeIov(@ptrCast(coverage_map.coverage.files.keys())),
makeIov(@ptrCast(coverage_map.source_locations)),
makeIov(coverage_map.coverage.string_bytes.items),
var iovecs: [5][]const u8 = .{
@ptrCast(&header),
@ptrCast(coverage_map.coverage.directories.keys()),
@ptrCast(coverage_map.coverage.files.keys()),
@ptrCast(coverage_map.source_locations),
coverage_map.coverage.string_bytes.items,
};
try socket.writeMessagev(&iovecs, .binary);
try socket.writeMessageVec(&iovecs, .binary);
}
const header: abi.CoverageUpdateHeader = .{
.n_runs = n_runs,
.unique_runs = unique_runs,
};
const iovecs: [2]std.posix.iovec_const = .{
makeIov(@ptrCast(&header)),
makeIov(@ptrCast(seen_pcs)),
var iovecs: [2][]const u8 = .{
@ptrCast(&header),
@ptrCast(seen_pcs),
};
try socket.writeMessagev(&iovecs, .binary);
try socket.writeMessageVec(&iovecs, .binary);
prev.unique_runs = unique_runs;
}
if (prev.entry_points != coverage_map.entry_points.items.len) {
const header: abi.EntryPointHeader = .init(@intCast(coverage_map.entry_points.items.len));
const iovecs: [2]std.posix.iovec_const = .{
makeIov(@ptrCast(&header)),
makeIov(@ptrCast(coverage_map.entry_points.items)),
var iovecs: [2][]const u8 = .{
@ptrCast(&header),
@ptrCast(coverage_map.entry_points.items),
};
try socket.writeMessagev(&iovecs, .binary);
try socket.writeMessageVec(&iovecs, .binary);
prev.entry_points = coverage_map.entry_points.items.len;
}
@ -448,10 +448,3 @@ fn addEntryPoint(fuzz: *Fuzz, coverage_id: u64, addr: u64) error{ AlreadyReporte
}
try coverage_map.entry_points.append(fuzz.ws.gpa, @intCast(index));
}
fn makeIov(s: []const u8) std.posix.iovec_const {
return .{
.base = s.ptr,
.len = s.len,
};
}

View File

@ -1851,7 +1851,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const maybe_output_dir = step.evalZigProcess(
zig_args,
options.progress_node,
(b.graph.incremental == true) and options.watch,
(b.graph.incremental == true) and (options.watch or options.web_server != null),
options.web_server,
options.gpa,
) catch |err| switch (err) {

View File

@ -1,13 +1,18 @@
const builtin = @import("builtin");
const std = @import("../std.zig");
const Watch = @This();
const Step = std.Build.Step;
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const fatal = std.process.fatal;
const Watch = @This();
const FsEvents = @import("Watch/FsEvents.zig");
dir_table: DirTable,
os: Os,
/// The number to show as the number of directories being watched.
dir_count: usize,
// These fields are common to most implementations so are kept here for simplicity.
// They are `undefined` on implementations which do not utilize then.
dir_table: DirTable,
generation: Generation,
pub const have_impl = Os != void;
@ -97,6 +102,7 @@ const Os = switch (builtin.os.tag) {
fn init() !Watch {
return .{
.dir_table = .{},
.dir_count = 0,
.os = switch (builtin.os.tag) {
.linux => .{
.handle_table = .{},
@ -273,6 +279,7 @@ const Os = switch (builtin.os.tag) {
}
w.generation +%= 1;
}
w.dir_count = w.dir_table.count();
}
fn wait(w: *Watch, gpa: Allocator, timeout: Timeout) !WaitResult {
@ -408,6 +415,7 @@ const Os = switch (builtin.os.tag) {
fn init() !Watch {
return .{
.dir_table = .{},
.dir_count = 0,
.os = switch (builtin.os.tag) {
.windows => .{
.handle_table = .{},
@ -572,6 +580,7 @@ const Os = switch (builtin.os.tag) {
}
w.generation +%= 1;
}
w.dir_count = w.dir_table.count();
}
fn wait(w: *Watch, gpa: Allocator, timeout: Timeout) !WaitResult {
@ -605,7 +614,7 @@ const Os = switch (builtin.os.tag) {
};
}
},
.dragonfly, .freebsd, .netbsd, .openbsd, .ios, .macos, .tvos, .visionos, .watchos => struct {
.dragonfly, .freebsd, .netbsd, .openbsd, .ios, .tvos, .visionos, .watchos => struct {
const posix = std.posix;
kq_fd: i32,
@ -639,6 +648,7 @@ const Os = switch (builtin.os.tag) {
errdefer posix.close(kq_fd);
return .{
.dir_table = .{},
.dir_count = 0,
.os = .{
.kq_fd = kq_fd,
.handles = .empty,
@ -769,6 +779,7 @@ const Os = switch (builtin.os.tag) {
}
w.generation +%= 1;
}
w.dir_count = w.dir_table.count();
}
fn wait(w: *Watch, gpa: Allocator, timeout: Timeout) !WaitResult {
@ -812,6 +823,28 @@ const Os = switch (builtin.os.tag) {
return any_dirty;
}
},
.macos => struct {
fse: FsEvents,
fn init() !Watch {
return .{
.os = .{ .fse = try .init() },
.dir_count = 0,
.dir_table = undefined,
.generation = undefined,
};
}
fn update(w: *Watch, gpa: Allocator, steps: []const *Step) !void {
try w.os.fse.setPaths(gpa, steps);
w.dir_count = w.os.fse.watch_roots.len;
}
fn wait(w: *Watch, gpa: Allocator, timeout: Timeout) !WaitResult {
return w.os.fse.wait(gpa, switch (timeout) {
.none => null,
.ms => |ms| @as(u64, ms) * std.time.ns_per_ms,
});
}
},
else => void,
};

View File

@ -0,0 +1,493 @@
//! An implementation of file-system watching based on the `FSEventStream` API in macOS.
//! While macOS supports kqueue, it does not allow detecting changes to files without
//! placing watches on each individual file, meaning FD limits are reached incredibly
//! quickly. The File System Events API works differently: it implements *recursive*
//! directory watches, managed by a system service. Rather than being in libc, the API is
//! exposed by the CoreServices framework. To avoid a compile dependency on the framework
//! bundle, we dynamically load CoreServices with `std.DynLib`.
//!
//! While the logic in this file *is* specialized to `std.Build.Watch`, efforts have been
//! made to keep that specialization to a minimum. Other use cases could be served with
//! relatively minimal modifications to the `watch_paths` field and its usages (in
//! particular the `setPaths` function). We avoid using the global GCD dispatch queue in
//! favour of creating our own and synchronizing with an explicit semaphore, meaning this
//! logic is thread-safe and does not affect process-global state.
//!
//! In theory, this API is quite good at avoiding filesystem race conditions. In practice,
//! the logic that would avoid them is currently disabled, because the build system kind
//! of relies on them at the time of writing to avoid redundant work -- see the comment at
//! the top of `wait` for details.
const enable_debug_logs = false;
core_services: std.DynLib,
resolved_symbols: ResolvedSymbols,
paths_arena: std.heap.ArenaAllocator.State,
/// The roots of the recursive watches. FSEvents has relatively small limits on the number
/// of watched paths, so this slice must not be too long. The paths themselves are allocated
/// into `paths_arena`, but this slice is allocated into the GPA.
watch_roots: [][:0]const u8,
/// All of the paths being watched. Value is the set of steps which depend on the file/directory.
/// Keys and values are in `paths_arena`, but this map is allocated into the GPA.
watch_paths: std.StringArrayHashMapUnmanaged([]const *std.Build.Step),
/// The semaphore we use to block the thread calling `wait` until the callback determines a relevant
/// event has occurred. This is retained across `wait` calls for simplicity and efficiency.
waiting_semaphore: dispatch_semaphore_t,
/// This dispatch queue is created by us and executes serially. It exists exclusively to trigger the
/// callbacks of the FSEventStream we create. This is not in use outside of `wait`, but is retained
/// across `wait` calls for simplicity and efficiency.
dispatch_queue: dispatch_queue_t,
/// In theory, this field avoids race conditions. In practice, it is essentially unused at the time
/// of writing. See the comment at the start of `wait` for details.
since_event: FSEventStreamEventId,
/// All of the symbols we pull from the `dlopen`ed CoreServices framework. If any of these symbols
/// is not present, `init` will close the framework and return an error.
const ResolvedSymbols = struct {
FSEventStreamCreate: *const fn (
allocator: CFAllocatorRef,
callback: FSEventStreamCallback,
ctx: ?*const FSEventStreamContext,
paths_to_watch: CFArrayRef,
since_when: FSEventStreamEventId,
latency: CFTimeInterval,
flags: FSEventStreamCreateFlags,
) callconv(.c) FSEventStreamRef,
FSEventStreamSetDispatchQueue: *const fn (stream: FSEventStreamRef, queue: dispatch_queue_t) callconv(.c) void,
FSEventStreamStart: *const fn (stream: FSEventStreamRef) callconv(.c) bool,
FSEventStreamStop: *const fn (stream: FSEventStreamRef) callconv(.c) void,
FSEventStreamInvalidate: *const fn (stream: FSEventStreamRef) callconv(.c) void,
FSEventStreamRelease: *const fn (stream: FSEventStreamRef) callconv(.c) void,
FSEventStreamGetLatestEventId: *const fn (stream: ConstFSEventStreamRef) callconv(.c) FSEventStreamEventId,
FSEventsGetCurrentEventId: *const fn () callconv(.c) FSEventStreamEventId,
CFRelease: *const fn (cf: *const anyopaque) callconv(.c) void,
CFArrayCreate: *const fn (
allocator: CFAllocatorRef,
values: [*]const usize,
num_values: CFIndex,
call_backs: ?*const CFArrayCallBacks,
) callconv(.c) CFArrayRef,
CFStringCreateWithCString: *const fn (
alloc: CFAllocatorRef,
c_str: [*:0]const u8,
encoding: CFStringEncoding,
) callconv(.c) CFStringRef,
CFAllocatorCreate: *const fn (allocator: CFAllocatorRef, context: *const CFAllocatorContext) callconv(.c) CFAllocatorRef,
kCFAllocatorUseContext: *const CFAllocatorRef,
};
pub fn init() error{ OpenFrameworkFailed, MissingCoreServicesSymbol }!FsEvents {
var core_services = std.DynLib.open("/System/Library/Frameworks/CoreServices.framework/CoreServices") catch
return error.OpenFrameworkFailed;
errdefer core_services.close();
var resolved_symbols: ResolvedSymbols = undefined;
inline for (@typeInfo(ResolvedSymbols).@"struct".fields) |f| {
@field(resolved_symbols, f.name) = core_services.lookup(f.type, f.name) orelse return error.MissingCoreServicesSymbol;
}
return .{
.core_services = core_services,
.resolved_symbols = resolved_symbols,
.paths_arena = .{},
.watch_roots = &.{},
.watch_paths = .empty,
.waiting_semaphore = dispatch_semaphore_create(0),
.dispatch_queue = dispatch_queue_create("zig-watch", .SERIAL),
// Not `.since_now`, because this means we can init `FsEvents` *before* we do work in order
// to notice any changes which happened during said work.
.since_event = resolved_symbols.FSEventsGetCurrentEventId(),
};
}
pub fn deinit(fse: *FsEvents, gpa: Allocator) void {
dispatch_release(fse.waiting_semaphore);
dispatch_release(fse.dispatch_queue);
fse.core_services.close();
gpa.free(fse.watch_roots);
fse.watch_paths.deinit(gpa);
{
var paths_arena = fse.paths_arena.promote(gpa);
paths_arena.deinit();
}
}
pub fn setPaths(fse: *FsEvents, gpa: Allocator, steps: []const *std.Build.Step) !void {
var paths_arena_instance = fse.paths_arena.promote(gpa);
defer fse.paths_arena = paths_arena_instance.state;
const paths_arena = paths_arena_instance.allocator();
const cwd_path = try std.process.getCwdAlloc(gpa);
defer gpa.free(cwd_path);
var need_dirs: std.StringArrayHashMapUnmanaged(void) = .empty;
defer need_dirs.deinit(gpa);
fse.watch_paths.clearRetainingCapacity();
// We take `step` by pointer for a slight memory optimization in a moment.
for (steps) |*step| {
for (step.*.inputs.table.keys(), step.*.inputs.table.values()) |path, *files| {
const resolved_dir = try std.fs.path.resolvePosix(paths_arena, &.{ cwd_path, path.root_dir.path orelse ".", path.sub_path });
try need_dirs.put(gpa, resolved_dir, {});
for (files.items) |file_name| {
const watch_path = if (std.mem.eql(u8, file_name, "."))
resolved_dir
else
try std.fs.path.join(paths_arena, &.{ resolved_dir, file_name });
const gop = try fse.watch_paths.getOrPut(gpa, watch_path);
if (gop.found_existing) {
const old_steps = gop.value_ptr.*;
const new_steps = try paths_arena.alloc(*std.Build.Step, old_steps.len + 1);
@memcpy(new_steps[0..old_steps.len], old_steps);
new_steps[old_steps.len] = step.*;
gop.value_ptr.* = new_steps;
} else {
// This is why we captured `step` by pointer! We can avoid allocating a slice of one
// step in the arena in the common case where a file is referenced by only one step.
gop.value_ptr.* = step[0..1];
}
}
}
}
{
// There's no point looking at directories inside other ones (e.g. "/foo" and "/foo/bar").
// To eliminate these, we'll re-add directories in order of path length with a redundancy check.
const old_dirs = try gpa.dupe([]const u8, need_dirs.keys());
defer gpa.free(old_dirs);
std.mem.sort([]const u8, old_dirs, {}, struct {
fn lessThan(ctx: void, a: []const u8, b: []const u8) bool {
ctx;
return std.mem.lessThan(u8, a, b);
}
}.lessThan);
need_dirs.clearRetainingCapacity();
for (old_dirs) |dir_path| {
var it: std.fs.path.ComponentIterator(.posix, u8) = try .init(dir_path);
while (it.next()) |component| {
if (need_dirs.contains(component.path)) {
// this path is '/foo/bar/qux', but '/foo' or '/foo/bar' was already added
break;
}
} else {
need_dirs.putAssumeCapacityNoClobber(dir_path, {});
}
}
}
// `need_dirs` is now a set of directories to watch with no redundancy. In practice, this is very
// likely to have reduced it to a quite small set (e.g. it'll typically coalesce a full `src/`
// directory into one entry). However, the FSEventStream API has a fairly low undocumented limit
// on total watches (supposedly 4096), so we should handle the case where we exceed it. To be
// safe, because this API can be a little unpredictable, we'll cap ourselves a little *below*
// that known limit.
if (need_dirs.count() > 2048) {
// Fallback: watch the whole filesystem. This is excessive, but... it *works* :P
if (enable_debug_logs) watch_log.debug("too many dirs; recursively watching root", .{});
fse.watch_roots = try gpa.realloc(fse.watch_roots, 1);
fse.watch_roots[0] = "/";
} else {
fse.watch_roots = try gpa.realloc(fse.watch_roots, need_dirs.count());
for (fse.watch_roots, need_dirs.keys()) |*out, in| {
out.* = try paths_arena.dupeZ(u8, in);
}
}
if (enable_debug_logs) {
watch_log.debug("watching {d} paths using {d} recursive watches:", .{ fse.watch_paths.count(), fse.watch_roots.len });
for (fse.watch_roots) |dir_path| {
watch_log.debug("- '{s}'", .{dir_path});
}
}
}
pub fn wait(fse: *FsEvents, gpa: Allocator, timeout_ns: ?u64) error{ OutOfMemory, StartFailed }!std.Build.Watch.WaitResult {
if (fse.watch_roots.len == 0) @panic("nothing to watch");
const rs = fse.resolved_symbols;
// At the time of writing, using `since_event` in the obvious way causes redundant rebuilds
// to occur, because one step modifies a file which is an input to another step. The solution
// to this problem will probably be either:
//
// a) Don't include the output of one step as a watch input of another; only mark external
// files as watch inputs. Or...
//
// b) Note the current event ID when a step begins, and disregard events preceding that ID
// when considering whether to dirty that step in `eventCallback`.
//
// For now, to avoid the redundant rebuilds, we bypass this `since_event` mechanism. This does
// introduce race conditions, but the other `std.Build.Watch` implementations suffer from those
// too at the time of writing, so this is kind of expected.
fse.since_event = .since_now;
const cf_allocator = rs.CFAllocatorCreate(rs.kCFAllocatorUseContext.*, &.{
.version = 0,
.info = @constCast(&gpa),
.retain = null,
.release = null,
.copy_description = null,
.allocate = &cf_alloc_callbacks.allocate,
.reallocate = &cf_alloc_callbacks.reallocate,
.deallocate = &cf_alloc_callbacks.deallocate,
.preferred_size = null,
}) orelse return error.OutOfMemory;
defer rs.CFRelease(cf_allocator);
const cf_paths = try gpa.alloc(?CFStringRef, fse.watch_roots.len);
@memset(cf_paths, null);
defer {
for (cf_paths) |o| if (o) |p| rs.CFRelease(p);
gpa.free(cf_paths);
}
for (fse.watch_roots, cf_paths) |raw_path, *cf_path| {
cf_path.* = rs.CFStringCreateWithCString(cf_allocator, raw_path, .utf8);
}
const cf_paths_array = rs.CFArrayCreate(cf_allocator, @ptrCast(cf_paths), @intCast(cf_paths.len), null);
defer rs.CFRelease(cf_paths_array);
const callback_ctx: EventCallbackCtx = .{
.fse = fse,
.gpa = gpa,
};
const event_stream = rs.FSEventStreamCreate(
null,
&eventCallback,
&.{
.version = 0,
.info = @constCast(&callback_ctx),
.retain = null,
.release = null,
.copy_description = null,
},
cf_paths_array,
fse.since_event,
0.05, // 0.05s latency; higher values increase efficiency by coalescing more events
.{ .watch_root = true, .file_events = true },
);
defer rs.FSEventStreamRelease(event_stream);
rs.FSEventStreamSetDispatchQueue(event_stream, fse.dispatch_queue);
defer rs.FSEventStreamInvalidate(event_stream);
if (!rs.FSEventStreamStart(event_stream)) return error.StartFailed;
defer rs.FSEventStreamStop(event_stream);
const result = dispatch_semaphore_wait(fse.waiting_semaphore, timeout: {
const ns = timeout_ns orelse break :timeout .forever;
break :timeout dispatch_time(.now, @intCast(ns));
});
return switch (result) {
0 => .dirty,
else => .timeout,
};
}
const cf_alloc_callbacks = struct {
const log = std.log.scoped(.cf_alloc);
fn allocate(size: CFIndex, hint: CFOptionFlags, info: ?*const anyopaque) callconv(.c) ?*const anyopaque {
if (enable_debug_logs) log.debug("allocate {d}", .{size});
_ = hint;
const gpa: *const Allocator = @ptrCast(@alignCast(info));
const mem = gpa.alignedAlloc(u8, .of(usize), @intCast(size + @sizeOf(usize))) catch return null;
const metadata: *usize = @ptrCast(mem);
metadata.* = @intCast(size);
return mem[@sizeOf(usize)..].ptr;
}
fn reallocate(ptr: ?*anyopaque, new_size: CFIndex, hint: CFOptionFlags, info: ?*const anyopaque) callconv(.c) ?*const anyopaque {
if (enable_debug_logs) log.debug("reallocate @{*} {d}", .{ ptr, new_size });
_ = hint;
if (ptr == null or new_size == 0) return null; // not a bug: documentation explicitly states that realloc on NULL should return NULL
const gpa: *const Allocator = @ptrCast(@alignCast(info));
const old_base: [*]align(@alignOf(usize)) u8 = @alignCast(@as([*]u8, @ptrCast(ptr)) - @sizeOf(usize));
const old_size = @as(*const usize, @ptrCast(old_base)).*;
const old_mem = old_base[0 .. old_size + @sizeOf(usize)];
const new_mem = gpa.realloc(old_mem, @intCast(new_size + @sizeOf(usize))) catch return null;
const metadata: *usize = @ptrCast(new_mem);
metadata.* = @intCast(new_size);
return new_mem[@sizeOf(usize)..].ptr;
}
fn deallocate(ptr: *anyopaque, info: ?*const anyopaque) callconv(.c) void {
if (enable_debug_logs) log.debug("deallocate @{*}", .{ptr});
const gpa: *const Allocator = @ptrCast(@alignCast(info));
const old_base: [*]align(@alignOf(usize)) u8 = @alignCast(@as([*]u8, @ptrCast(ptr)) - @sizeOf(usize));
const old_size = @as(*const usize, @ptrCast(old_base)).*;
const old_mem = old_base[0 .. old_size + @sizeOf(usize)];
gpa.free(old_mem);
}
};
const EventCallbackCtx = struct {
fse: *FsEvents,
gpa: Allocator,
};
fn eventCallback(
stream: ConstFSEventStreamRef,
client_callback_info: ?*anyopaque,
num_events: usize,
events_paths_ptr: *anyopaque,
events_flags_ptr: [*]const FSEventStreamEventFlags,
events_ids_ptr: [*]const FSEventStreamEventId,
) callconv(.c) void {
const ctx: *const EventCallbackCtx = @ptrCast(@alignCast(client_callback_info));
const fse = ctx.fse;
const gpa = ctx.gpa;
const rs = fse.resolved_symbols;
const events_paths_ptr_casted: [*]const [*:0]const u8 = @ptrCast(@alignCast(events_paths_ptr));
const events_paths = events_paths_ptr_casted[0..num_events];
const events_ids = events_ids_ptr[0..num_events];
const events_flags = events_flags_ptr[0..num_events];
var any_dirty = false;
for (events_paths, events_ids, events_flags) |event_path_nts, event_id, event_flags| {
_ = event_id;
if (event_flags.history_done) continue; // sentinel
const event_path = std.mem.span(event_path_nts);
switch (event_flags.must_scan_sub_dirs) {
false => {
if (fse.watch_paths.get(event_path)) |steps| {
assert(steps.len > 0);
for (steps) |s| dirtyStep(s, gpa, &any_dirty);
}
if (std.fs.path.dirname(event_path)) |event_dirname| {
// Modifying '/foo/bar' triggers the watch on '/foo'.
if (fse.watch_paths.get(event_dirname)) |steps| {
assert(steps.len > 0);
for (steps) |s| dirtyStep(s, gpa, &any_dirty);
}
}
},
true => {
// This is unlikely, but can occasionally happen when bottlenecked: events have been
// coalesced into one. We want to see if any of these events are actually relevant
// to us. The only way we can reasonably do that in this rare edge case is iterate
// the watch paths and see if any is under this directory. That's acceptable because
// we would otherwise kick off a rebuild which would be clearing those paths anyway.
const changed_path = std.fs.path.dirname(event_path) orelse event_path;
for (fse.watch_paths.keys(), fse.watch_paths.values()) |watching_path, steps| {
if (dirStartsWith(watching_path, changed_path)) {
for (steps) |s| dirtyStep(s, gpa, &any_dirty);
}
}
},
}
}
if (any_dirty) {
fse.since_event = rs.FSEventStreamGetLatestEventId(stream);
_ = dispatch_semaphore_signal(fse.waiting_semaphore);
}
}
fn dirtyStep(s: *std.Build.Step, gpa: Allocator, any_dirty: *bool) void {
if (s.state == .precheck_done) return;
s.recursiveReset(gpa);
any_dirty.* = true;
}
fn dirStartsWith(path: []const u8, prefix: []const u8) bool {
if (std.mem.eql(u8, path, prefix)) return true;
if (!std.mem.startsWith(u8, path, prefix)) return false;
if (path[prefix.len] != '/') return false; // `path` is `/foo/barx`, `prefix` is `/foo/bar`
return true; // `path` is `/foo/bar/...`, `prefix` is `/foo/bar`
}
const dispatch_time_t = enum(u64) {
now = 0,
forever = std.math.maxInt(u64),
_,
};
extern fn dispatch_time(base: dispatch_time_t, delta_ns: i64) dispatch_time_t;
const dispatch_semaphore_t = *opaque {};
extern fn dispatch_semaphore_create(value: isize) dispatch_semaphore_t;
extern fn dispatch_semaphore_wait(dsema: dispatch_semaphore_t, timeout: dispatch_time_t) isize;
extern fn dispatch_semaphore_signal(dsema: dispatch_semaphore_t) isize;
const dispatch_queue_t = *opaque {};
const dispatch_queue_attr_t = ?*opaque {
const SERIAL: dispatch_queue_attr_t = null;
};
extern fn dispatch_queue_create(label: [*:0]const u8, attr: dispatch_queue_attr_t) dispatch_queue_t;
extern fn dispatch_release(object: *anyopaque) void;
const CFAllocatorRef = ?*const opaque {};
const CFArrayRef = *const opaque {};
const CFStringRef = *const opaque {};
const CFTimeInterval = f64;
const CFIndex = i32;
const CFOptionFlags = enum(u32) { _ };
const CFAllocatorRetainCallBack = *const fn (info: ?*const anyopaque) callconv(.c) *const anyopaque;
const CFAllocatorReleaseCallBack = *const fn (info: ?*const anyopaque) callconv(.c) void;
const CFAllocatorCopyDescriptionCallBack = *const fn (info: ?*const anyopaque) callconv(.c) CFStringRef;
const CFAllocatorAllocateCallBack = *const fn (alloc_size: CFIndex, hint: CFOptionFlags, info: ?*const anyopaque) callconv(.c) ?*const anyopaque;
const CFAllocatorReallocateCallBack = *const fn (ptr: ?*anyopaque, new_size: CFIndex, hint: CFOptionFlags, info: ?*const anyopaque) callconv(.c) ?*const anyopaque;
const CFAllocatorDeallocateCallBack = *const fn (ptr: *anyopaque, info: ?*const anyopaque) callconv(.c) void;
const CFAllocatorPreferredSizeCallBack = *const fn (size: CFIndex, hint: CFOptionFlags, info: ?*const anyopaque) callconv(.c) CFIndex;
const CFAllocatorContext = extern struct {
version: CFIndex,
info: ?*anyopaque,
retain: ?CFAllocatorRetainCallBack,
release: ?CFAllocatorReleaseCallBack,
copy_description: ?CFAllocatorCopyDescriptionCallBack,
allocate: CFAllocatorAllocateCallBack,
reallocate: ?CFAllocatorReallocateCallBack,
deallocate: ?CFAllocatorDeallocateCallBack,
preferred_size: ?CFAllocatorPreferredSizeCallBack,
};
const CFArrayCallBacks = opaque {};
const CFStringEncoding = enum(u32) {
invalid_id = std.math.maxInt(u32),
mac_roman = 0,
windows_latin_1 = 0x500,
iso_latin_1 = 0x201,
next_step_latin = 0xB01,
ascii = 0x600,
unicode = 0x100,
utf8 = 0x8000100,
non_lossy_ascii = 0xBFF,
};
const FSEventStreamRef = *opaque {};
const ConstFSEventStreamRef = *const @typeInfo(FSEventStreamRef).pointer.child;
const FSEventStreamCallback = *const fn (
stream: ConstFSEventStreamRef,
client_callback_info: ?*anyopaque,
num_events: usize,
event_paths: *anyopaque,
event_flags: [*]const FSEventStreamEventFlags,
event_ids: [*]const FSEventStreamEventId,
) callconv(.c) void;
const FSEventStreamContext = extern struct {
version: CFIndex,
info: ?*anyopaque,
retain: ?CFAllocatorRetainCallBack,
release: ?CFAllocatorReleaseCallBack,
copy_description: ?CFAllocatorCopyDescriptionCallBack,
};
const FSEventStreamEventId = enum(u64) {
since_now = std.math.maxInt(u64),
_,
};
const FSEventStreamCreateFlags = packed struct(u32) {
use_cf_types: bool = false,
no_defer: bool = false,
watch_root: bool = false,
ignore_self: bool = false,
file_events: bool = false,
_: u27 = 0,
};
const FSEventStreamEventFlags = packed struct(u32) {
must_scan_sub_dirs: bool,
user_dropped: bool,
kernel_dropped: bool,
event_ids_wrapped: bool,
history_done: bool,
root_changed: bool,
mount: bool,
unmount: bool,
_: u24 = 0,
};
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const watch_log = std.log.scoped(.watch);
const FsEvents = @This();

View File

@ -251,48 +251,44 @@ pub fn now(s: *const WebServer) i64 {
fn accept(ws: *WebServer, connection: std.net.Server.Connection) void {
defer connection.stream.close();
var read_buf: [0x4000]u8 = undefined;
var server: std.http.Server = .init(connection, &read_buf);
var send_buffer: [4096]u8 = undefined;
var recv_buffer: [4096]u8 = undefined;
var connection_reader = connection.stream.reader(&recv_buffer);
var connection_writer = connection.stream.writer(&send_buffer);
var server: http.Server = .init(connection_reader.interface(), &connection_writer.interface);
while (true) {
var request = server.receiveHead() catch |err| switch (err) {
error.HttpConnectionClosing => return,
else => {
log.err("failed to receive http request: {s}", .{@errorName(err)});
return;
},
else => return log.err("failed to receive http request: {t}", .{err}),
};
var ws_send_buf: [0x4000]u8 = undefined;
var ws_recv_buf: [0x4000]u8 align(4) = undefined;
if (std.http.WebSocket.init(&request, &ws_send_buf, &ws_recv_buf) catch |err| {
log.err("failed to initialize websocket connection: {s}", .{@errorName(err)});
return;
}) |ws_init| {
var web_socket = ws_init;
ws.serveWebSocket(&web_socket) catch |err| {
log.err("failed to serve websocket: {s}", .{@errorName(err)});
return;
};
comptime unreachable;
} else {
ws.serveRequest(&request) catch |err| switch (err) {
error.AlreadyReported => return,
else => {
log.err("failed to serve '{s}': {s}", .{ request.head.target, @errorName(err) });
switch (request.upgradeRequested()) {
.websocket => |opt_key| {
const key = opt_key orelse return log.err("missing websocket key", .{});
var web_socket = request.respondWebSocket(.{ .key = key }) catch {
return log.err("failed to respond web socket: {t}", .{connection_writer.err.?});
};
ws.serveWebSocket(&web_socket) catch |err| {
log.err("failed to serve websocket: {t}", .{err});
return;
},
};
};
comptime unreachable;
},
.other => |name| return log.err("unknown upgrade request: {s}", .{name}),
.none => {
ws.serveRequest(&request) catch |err| switch (err) {
error.AlreadyReported => return,
else => {
log.err("failed to serve '{s}': {t}", .{ request.head.target, err });
return;
},
};
},
}
}
}
fn makeIov(s: []const u8) std.posix.iovec_const {
return .{
.base = s.ptr,
.len = s.len,
};
}
fn serveWebSocket(ws: *WebServer, sock: *std.http.WebSocket) !noreturn {
fn serveWebSocket(ws: *WebServer, sock: *http.Server.WebSocket) !noreturn {
var prev_build_status = ws.build_status.load(.monotonic);
const prev_step_status_bits = try ws.gpa.alloc(u8, ws.step_status_bits.len);
@ -312,11 +308,8 @@ fn serveWebSocket(ws: *WebServer, sock: *std.http.WebSocket) !noreturn {
.timestamp = ws.now(),
.steps_len = @intCast(ws.all_steps.len),
};
try sock.writeMessagev(&.{
makeIov(@ptrCast(&hello_header)),
makeIov(ws.step_names_trailing),
makeIov(prev_step_status_bits),
}, .binary);
var bufs: [3][]const u8 = .{ @ptrCast(&hello_header), ws.step_names_trailing, prev_step_status_bits };
try sock.writeMessageVec(&bufs, .binary);
}
var prev_fuzz: Fuzz.Previous = .init;
@ -380,7 +373,7 @@ fn serveWebSocket(ws: *WebServer, sock: *std.http.WebSocket) !noreturn {
std.Thread.Futex.timedWait(&ws.update_id, start_update_id, std.time.ns_per_ms * default_update_interval_ms) catch {};
}
}
fn recvWebSocketMessages(ws: *WebServer, sock: *std.http.WebSocket) void {
fn recvWebSocketMessages(ws: *WebServer, sock: *http.Server.WebSocket) void {
while (true) {
const msg = sock.readSmallMessage() catch return;
if (msg.opcode != .binary) continue;
@ -402,7 +395,7 @@ fn recvWebSocketMessages(ws: *WebServer, sock: *std.http.WebSocket) void {
}
}
fn serveRequest(ws: *WebServer, req: *std.http.Server.Request) !void {
fn serveRequest(ws: *WebServer, req: *http.Server.Request) !void {
// Strip an optional leading '/debug' component from the request.
const target: []const u8, const debug: bool = target: {
if (mem.eql(u8, req.head.target, "/debug")) break :target .{ "/", true };
@ -431,7 +424,7 @@ fn serveRequest(ws: *WebServer, req: *std.http.Server.Request) !void {
fn serveLibFile(
ws: *WebServer,
request: *std.http.Server.Request,
request: *http.Server.Request,
sub_path: []const u8,
content_type: []const u8,
) !void {
@ -442,7 +435,7 @@ fn serveLibFile(
}
fn serveClientWasm(
ws: *WebServer,
req: *std.http.Server.Request,
req: *http.Server.Request,
optimize_mode: std.builtin.OptimizeMode,
) !void {
var arena_state: std.heap.ArenaAllocator = .init(ws.gpa);
@ -456,12 +449,12 @@ fn serveClientWasm(
pub fn serveFile(
ws: *WebServer,
request: *std.http.Server.Request,
request: *http.Server.Request,
path: Cache.Path,
content_type: []const u8,
) !void {
const gpa = ws.gpa;
// The desired API is actually sendfile, which will require enhancing std.http.Server.
// The desired API is actually sendfile, which will require enhancing http.Server.
// We load the file with every request so that the user can make changes to the file
// and refresh the HTML page without restarting this server.
const file_contents = path.root_dir.handle.readFileAlloc(gpa, path.sub_path, 10 * 1024 * 1024) catch |err| {
@ -478,14 +471,13 @@ pub fn serveFile(
}
pub fn serveTarFile(
ws: *WebServer,
request: *std.http.Server.Request,
request: *http.Server.Request,
paths: []const Cache.Path,
) !void {
const gpa = ws.gpa;
var send_buf: [0x4000]u8 = undefined;
var response = request.respondStreaming(.{
.send_buffer = &send_buf,
var send_buffer: [0x4000]u8 = undefined;
var response = try request.respondStreaming(&send_buffer, .{
.respond_options = .{
.extra_headers = &.{
.{ .name = "Content-Type", .value = "application/x-tar" },
@ -497,10 +489,7 @@ pub fn serveTarFile(
var cached_cwd_path: ?[]const u8 = null;
defer if (cached_cwd_path) |p| gpa.free(p);
var response_buf: [1024]u8 = undefined;
var adapter = response.writer().adaptToNewApi();
adapter.new_interface.buffer = &response_buf;
var archiver: std.tar.Writer = .{ .underlying_writer = &adapter.new_interface };
var archiver: std.tar.Writer = .{ .underlying_writer = &response.writer };
for (paths) |path| {
var file = path.root_dir.handle.openFile(path.sub_path, .{}) catch |err| {
@ -526,7 +515,6 @@ pub fn serveTarFile(
}
// intentionally not calling `archiver.finishPedantically`
try adapter.new_interface.flush();
try response.end();
}
@ -804,7 +792,7 @@ pub fn wait(ws: *WebServer) RunnerRequest {
}
}
const cache_control_header: std.http.Header = .{
const cache_control_header: http.Header = .{
.name = "Cache-Control",
.value = "max-age=0, must-revalidate",
};
@ -819,5 +807,6 @@ const Build = std.Build;
const Cache = Build.Cache;
const Fuzz = Build.Fuzz;
const abi = Build.abi;
const http = std.http;
const WebServer = @This();

View File

@ -249,33 +249,6 @@ pub fn readBytesNoEof(self: Self, comptime num_bytes: usize) anyerror![num_bytes
return bytes;
}
/// Reads bytes until `bounded.len` is equal to `num_bytes`,
/// or the stream ends.
///
/// * it is assumed that `num_bytes` will not exceed `bounded.capacity()`
pub fn readIntoBoundedBytes(
self: Self,
comptime num_bytes: usize,
bounded: *std.BoundedArray(u8, num_bytes),
) anyerror!void {
while (bounded.len < num_bytes) {
// get at most the number of bytes free in the bounded array
const bytes_read = try self.read(bounded.unusedCapacitySlice());
if (bytes_read == 0) return;
// bytes_read will never be larger than @TypeOf(bounded.len)
// due to `self.read` being bounded by `bounded.unusedCapacitySlice()`
bounded.len += @as(@TypeOf(bounded.len), @intCast(bytes_read));
}
}
/// Reads at most `num_bytes` and returns as a bounded array.
pub fn readBoundedBytes(self: Self, comptime num_bytes: usize) anyerror!std.BoundedArray(u8, num_bytes) {
var result = std.BoundedArray(u8, num_bytes){};
try self.readIntoBoundedBytes(num_bytes, &result);
return result;
}
pub inline fn readInt(self: Self, comptime T: type, endian: std.builtin.Endian) anyerror!T {
const bytes = try self.readBytesNoEof(@divExact(@typeInfo(T).int.bits, 8));
return mem.readInt(T, &bytes, endian);

View File

@ -70,13 +70,14 @@ pub const VTable = struct {
/// Returns number of bytes written to `data`.
///
/// `data` may not have nonzero length.
/// `data` must have nonzero length. `data[0]` may have zero length, in
/// which case the implementation must write to `Reader.buffer`.
///
/// `data` may not contain an alias to `Reader.buffer`.
///
/// `data` is mutable because the implementation may to temporarily modify
/// the fields in order to handle partial reads. Implementations must
/// restore the original value before returning.
/// `data` is mutable because the implementation may temporarily modify the
/// fields in order to handle partial reads. Implementations must restore
/// the original value before returning.
///
/// Implementations may ignore `data`, writing directly to `Reader.buffer`,
/// modifying `seek` and `end` accordingly, and returning 0 from this
@ -366,8 +367,11 @@ pub fn appendRemainingUnlimited(
const buffer_contents = r.buffer[r.seek..r.end];
try list.ensureUnusedCapacity(gpa, buffer_contents.len + bump);
list.appendSliceAssumeCapacity(buffer_contents);
r.seek = 0;
r.end = 0;
// If statement protects `ending`.
if (r.end != 0) {
r.seek = 0;
r.end = 0;
}
// From here, we leave `buffer` empty, appending directly to `list`.
var writer: Writer = .{
.buffer = undefined,
@ -421,23 +425,29 @@ pub fn readVec(r: *Reader, data: [][]u8) Error!usize {
/// Writes to `Reader.buffer` or `data`, whichever has larger capacity.
pub fn defaultReadVec(r: *Reader, data: [][]u8) Error!usize {
assert(r.seek == r.end);
r.seek = 0;
r.end = 0;
const first = data[0];
const direct = first.len >= r.buffer.len;
if (r.seek == r.end and first.len >= r.buffer.len) {
var writer: Writer = .{
.buffer = first,
.end = 0,
.vtable = &.{ .drain = Writer.fixedDrain },
};
const limit: Limit = .limited(writer.buffer.len - writer.end);
return r.vtable.stream(r, &writer, limit) catch |err| switch (err) {
error.WriteFailed => unreachable,
else => |e| return e,
};
}
var writer: Writer = .{
.buffer = if (direct) first else r.buffer,
.end = 0,
.buffer = r.buffer,
.end = r.end,
.vtable = &.{ .drain = Writer.fixedDrain },
};
const limit: Limit = .limited(writer.buffer.len - writer.end);
const n = r.vtable.stream(r, &writer, limit) catch |err| switch (err) {
r.end += r.vtable.stream(r, &writer, limit) catch |err| switch (err) {
error.WriteFailed => unreachable,
else => |e| return e,
};
if (direct) return n;
r.end += n;
return 0;
}
@ -1059,17 +1069,8 @@ pub fn fill(r: *Reader, n: usize) Error!void {
/// increasing by a factor of 5 or more.
fn fillUnbuffered(r: *Reader, n: usize) Error!void {
try rebase(r, n);
var writer: Writer = .{
.buffer = r.buffer,
.vtable = &.{ .drain = Writer.fixedDrain },
};
while (r.end < r.seek + n) {
writer.end = r.end;
r.end += r.vtable.stream(r, &writer, .limited(r.buffer.len - r.end)) catch |err| switch (err) {
error.WriteFailed => unreachable,
error.ReadFailed, error.EndOfStream => |e| return e,
};
}
var bufs: [1][]u8 = .{""};
while (r.end < r.seek + n) _ = try r.vtable.readVec(r, &bufs);
}
/// Without advancing the seek position, does exactly one underlying read, filling the buffer as
@ -1079,15 +1080,8 @@ fn fillUnbuffered(r: *Reader, n: usize) Error!void {
/// Asserts buffer capacity is at least 1.
pub fn fillMore(r: *Reader) Error!void {
try rebase(r, 1);
var writer: Writer = .{
.buffer = r.buffer,
.end = r.end,
.vtable = &.{ .drain = Writer.fixedDrain },
};
r.end += r.vtable.stream(r, &writer, .limited(r.buffer.len - r.end)) catch |err| switch (err) {
error.WriteFailed => unreachable,
else => |e| return e,
};
var bufs: [1][]u8 = .{""};
_ = try r.vtable.readVec(r, &bufs);
}
/// Returns the next byte from the stream or returns `error.EndOfStream`.
@ -1315,31 +1309,6 @@ pub fn defaultRebase(r: *Reader, capacity: usize) RebaseError!void {
r.end = data.len;
}
/// Advances the stream and decreases the size of the storage buffer by `n`,
/// returning the range of bytes no longer accessible by `r`.
///
/// This action can be undone by `restitute`.
///
/// Asserts there are at least `n` buffered bytes already.
///
/// Asserts that `r.seek` is zero, i.e. the buffer is in a rebased state.
pub fn steal(r: *Reader, n: usize) []u8 {
assert(r.seek == 0);
assert(n <= r.end);
const stolen = r.buffer[0..n];
r.buffer = r.buffer[n..];
r.end -= n;
return stolen;
}
/// Expands the storage buffer, undoing the effects of `steal`
/// Assumes that `n` does not exceed the total number of stolen bytes.
pub fn restitute(r: *Reader, n: usize) void {
r.buffer = (r.buffer.ptr - n)[0 .. r.buffer.len + n];
r.end += n;
r.seek += n;
}
test fixed {
var r: Reader = .fixed("a\x02");
try testing.expect((try r.takeByte()) == 'a');
@ -1796,18 +1765,26 @@ pub fn Hashed(comptime Hasher: type) type {
fn readVec(r: *Reader, data: [][]u8) Error!usize {
const this: *@This() = @alignCast(@fieldParentPtr("reader", r));
const n = try this.in.readVec(data);
var vecs: [8][]u8 = undefined; // Arbitrarily chosen amount.
const dest_n, const data_size = try r.writableVector(&vecs, data);
const dest = vecs[0..dest_n];
const n = try this.in.readVec(dest);
var remaining: usize = n;
for (data) |slice| {
for (dest) |slice| {
if (remaining < slice.len) {
this.hasher.update(slice[0..remaining]);
return n;
remaining = 0;
break;
} else {
remaining -= slice.len;
this.hasher.update(slice);
}
}
assert(remaining == 0);
if (n > data_size) {
r.end += n - data_size;
return data_size;
}
return n;
}
@ -1824,17 +1801,24 @@ pub fn Hashed(comptime Hasher: type) type {
pub fn writableVectorPosix(r: *Reader, buffer: []std.posix.iovec, data: []const []u8) Error!struct { usize, usize } {
var i: usize = 0;
var n: usize = 0;
for (data) |buf| {
if (buffer.len - i == 0) return .{ i, n };
if (r.seek == r.end) {
for (data) |buf| {
if (buffer.len - i == 0) return .{ i, n };
if (buf.len != 0) {
buffer[i] = .{ .base = buf.ptr, .len = buf.len };
i += 1;
n += buf.len;
}
}
const buf = r.buffer;
if (buf.len != 0) {
r.seek = 0;
r.end = 0;
buffer[i] = .{ .base = buf.ptr, .len = buf.len };
i += 1;
n += buf.len;
}
}
assert(r.seek == r.end);
const buf = r.buffer;
if (buf.len != 0) {
} else {
const buf = r.buffer[r.end..];
buffer[i] = .{ .base = buf.ptr, .len = buf.len };
i += 1;
}
@ -1848,28 +1832,62 @@ pub fn writableVectorWsa(
) Error!struct { usize, usize } {
var i: usize = 0;
var n: usize = 0;
for (data) |buf| {
if (buffer.len - i == 0) return .{ i, n };
if (buf.len == 0) continue;
if (std.math.cast(u32, buf.len)) |len| {
buffer[i] = .{ .buf = buf.ptr, .len = len };
i += 1;
n += len;
continue;
}
buffer[i] = .{ .buf = buf.ptr, .len = std.math.maxInt(u32) };
i += 1;
n += std.math.maxInt(u32);
return .{ i, n };
}
assert(r.seek == r.end);
const buf = r.buffer;
if (buf.len != 0) {
if (std.math.cast(u32, buf.len)) |len| {
buffer[i] = .{ .buf = buf.ptr, .len = len };
} else {
if (r.seek == r.end) {
for (data) |buf| {
if (buffer.len - i == 0) return .{ i, n };
if (buf.len == 0) continue;
if (std.math.cast(u32, buf.len)) |len| {
buffer[i] = .{ .buf = buf.ptr, .len = len };
i += 1;
n += len;
continue;
}
buffer[i] = .{ .buf = buf.ptr, .len = std.math.maxInt(u32) };
i += 1;
n += std.math.maxInt(u32);
return .{ i, n };
}
const buf = r.buffer;
if (buf.len != 0) {
r.seek = 0;
r.end = 0;
if (std.math.cast(u32, buf.len)) |len| {
buffer[i] = .{ .buf = buf.ptr, .len = len };
} else {
buffer[i] = .{ .buf = buf.ptr, .len = std.math.maxInt(u32) };
}
i += 1;
}
} else {
buffer[i] = .{
.buf = r.buffer.ptr + r.end,
.len = @min(std.math.maxInt(u32), r.buffer.len - r.end),
};
i += 1;
}
return .{ i, n };
}
pub fn writableVector(r: *Reader, buffer: [][]u8, data: []const []u8) Error!struct { usize, usize } {
var i: usize = 0;
var n: usize = 0;
if (r.seek == r.end) {
for (data) |buf| {
if (buffer.len - i == 0) return .{ i, n };
if (buf.len != 0) {
buffer[i] = buf;
i += 1;
n += buf.len;
}
}
if (r.buffer.len != 0) {
r.seek = 0;
r.end = 0;
buffer[i] = r.buffer;
i += 1;
}
} else {
buffer[i] = r.buffer[r.end..];
i += 1;
}
return .{ i, n };

View File

@ -191,29 +191,87 @@ pub fn writeSplatHeader(
data: []const []const u8,
splat: usize,
) Error!usize {
const new_end = w.end + header.len;
if (new_end <= w.buffer.len) {
@memcpy(w.buffer[w.end..][0..header.len], header);
w.end = new_end;
return header.len + try writeSplat(w, data, splat);
return writeSplatHeaderLimit(w, header, data, splat, .unlimited);
}
/// Equivalent to `writeSplatHeader` but writes at most `limit` bytes.
pub fn writeSplatHeaderLimit(
w: *Writer,
header: []const u8,
data: []const []const u8,
splat: usize,
limit: Limit,
) Error!usize {
var remaining = @intFromEnum(limit);
{
const copy_len = @min(header.len, w.buffer.len - w.end, remaining);
if (header.len - copy_len != 0) return writeSplatHeaderLimitFinish(w, header, data, splat, remaining);
@memcpy(w.buffer[w.end..][0..copy_len], header[0..copy_len]);
w.end += copy_len;
remaining -= copy_len;
}
var vecs: [8][]const u8 = undefined; // Arbitrarily chosen size.
var i: usize = 1;
vecs[0] = header;
for (data[0 .. data.len - 1]) |buf| {
if (buf.len == 0) continue;
vecs[i] = buf;
i += 1;
if (vecs.len - i == 0) break;
for (data[0 .. data.len - 1], 0..) |buf, i| {
const copy_len = @min(buf.len, w.buffer.len - w.end, remaining);
if (buf.len - copy_len != 0) return @intFromEnum(limit) - remaining +
try writeSplatHeaderLimitFinish(w, &.{}, data[i..], splat, remaining);
@memcpy(w.buffer[w.end..][0..copy_len], buf[0..copy_len]);
w.end += copy_len;
remaining -= copy_len;
}
const pattern = data[data.len - 1];
const new_splat = s: {
if (pattern.len == 0 or vecs.len - i == 0) break :s 1;
const splat_n = pattern.len * splat;
if (splat_n > @min(w.buffer.len - w.end, remaining)) {
const buffered_n = @intFromEnum(limit) - remaining;
const written = try writeSplatHeaderLimitFinish(w, &.{}, data[data.len - 1 ..][0..1], splat, remaining);
return buffered_n + written;
}
for (0..splat) |_| {
@memcpy(w.buffer[w.end..][0..pattern.len], pattern);
w.end += pattern.len;
}
remaining -= splat_n;
return @intFromEnum(limit) - remaining;
}
fn writeSplatHeaderLimitFinish(
w: *Writer,
header: []const u8,
data: []const []const u8,
splat: usize,
limit: usize,
) Error!usize {
var remaining = limit;
var vecs: [8][]const u8 = undefined;
var i: usize = 0;
v: {
if (header.len != 0) {
const copy_len = @min(header.len, remaining);
vecs[i] = header[0..copy_len];
i += 1;
remaining -= copy_len;
if (remaining == 0) break :v;
}
for (data[0 .. data.len - 1]) |buf| if (buf.len != 0) {
const copy_len = @min(header.len, remaining);
vecs[i] = buf;
i += 1;
remaining -= copy_len;
if (remaining == 0) break :v;
if (vecs.len - i == 0) break :v;
};
const pattern = data[data.len - 1];
if (splat == 1) {
vecs[i] = pattern[0..@min(remaining, pattern.len)];
i += 1;
break :v;
}
vecs[i] = pattern;
i += 1;
break :s splat;
};
return w.vtable.drain(w, vecs[0..i], new_splat);
return w.vtable.drain(w, (&vecs)[0..i], @min(remaining / pattern.len, splat));
}
return w.vtable.drain(w, (&vecs)[0..i], 1);
}
test "writeSplatHeader splatting avoids buffer aliasing temptation" {

View File

@ -45,9 +45,9 @@ test "write a file, read it, then delete it" {
const expected_file_size: u64 = "begin".len + data.len + "end".len;
try expectEqual(expected_file_size, file_size);
var buf_stream = io.bufferedReader(file.deprecatedReader());
const st = buf_stream.reader();
const contents = try st.readAllAlloc(std.testing.allocator, 2 * 1024);
var file_buffer: [1024]u8 = undefined;
var file_reader = file.reader(&file_buffer);
const contents = try file_reader.interface.allocRemaining(std.testing.allocator, .limited(2 * 1024));
defer std.testing.allocator.free(contents);
try expect(mem.eql(u8, contents[0.."begin".len], "begin"));

View File

@ -1006,7 +1006,7 @@ fn serializeIpc(start_serialized_len: usize, serialized_buffer: *Serialized.Buff
continue;
}
const src = pipe_buf[m.remaining_read_trash_bytes..n];
std.mem.copyForwards(u8, &pipe_buf, src);
@memmove(pipe_buf[0..src.len], src);
m.remaining_read_trash_bytes = 0;
bytes_read = src.len;
continue;

View File

@ -405,7 +405,7 @@ pub const Os = struct {
.fuchsia => .{
.semver = .{
.min = .{ .major = 1, .minor = 0, .patch = 0 },
.max = .{ .major = 26, .minor = 0, .patch = 0 },
.max = .{ .major = 27, .minor = 0, .patch = 0 },
},
},
.hermit => .{
@ -446,7 +446,7 @@ pub const Os = struct {
break :blk default_min;
},
.max = .{ .major = 6, .minor = 13, .patch = 4 },
.max = .{ .major = 6, .minor = 16, .patch = 0 },
},
.glibc = blk: {
// For 32-bit targets that traditionally used 32-bit time, we require
@ -519,7 +519,7 @@ pub const Os = struct {
break :blk default_min;
},
.max = .{ .major = 14, .minor = 2, .patch = 0 },
.max = .{ .major = 14, .minor = 3, .patch = 0 },
},
},
.netbsd => .{
@ -549,38 +549,38 @@ pub const Os = struct {
.driverkit => .{
.semver = .{
.min = .{ .major = 19, .minor = 0, .patch = 0 },
.max = .{ .major = 24, .minor = 4, .patch = 0 },
.min = .{ .major = 20, .minor = 0, .patch = 0 },
.max = .{ .major = 25, .minor = 0, .patch = 0 },
},
},
.macos => .{
.semver = .{
.min = .{ .major = 13, .minor = 0, .patch = 0 },
.max = .{ .major = 15, .minor = 4, .patch = 1 },
.max = .{ .major = 15, .minor = 6, .patch = 0 },
},
},
.ios => .{
.semver = .{
.min = .{ .major = 15, .minor = 0, .patch = 0 },
.max = .{ .major = 18, .minor = 4, .patch = 1 },
.max = .{ .major = 18, .minor = 6, .patch = 0 },
},
},
.tvos => .{
.semver = .{
.min = .{ .major = 15, .minor = 0, .patch = 0 },
.max = .{ .major = 18, .minor = 4, .patch = 1 },
.max = .{ .major = 18, .minor = 5, .patch = 0 },
},
},
.visionos => .{
.semver = .{
.min = .{ .major = 1, .minor = 0, .patch = 0 },
.max = .{ .major = 2, .minor = 4, .patch = 1 },
.max = .{ .major = 2, .minor = 5, .patch = 0 },
},
},
.watchos => .{
.semver = .{
.min = .{ .major = 7, .minor = 0, .patch = 0 },
.max = .{ .major = 11, .minor = 4, .patch = 0 },
.min = .{ .major = 8, .minor = 0, .patch = 0 },
.max = .{ .major = 11, .minor = 6, .patch = 0 },
},
},
@ -614,7 +614,7 @@ pub const Os = struct {
.amdhsa => .{
.semver = .{
.min = .{ .major = 5, .minor = 0, .patch = 0 },
.max = .{ .major = 6, .minor = 4, .patch = 0 },
.max = .{ .major = 6, .minor = 4, .patch = 2 },
},
},
.amdpal => .{
@ -626,7 +626,7 @@ pub const Os = struct {
.cuda => .{
.semver = .{
.min = .{ .major = 11, .minor = 0, .patch = 1 },
.max = .{ .major = 12, .minor = 9, .patch = 0 },
.max = .{ .major = 12, .minor = 9, .patch = 1 },
},
},
.nvcl,
@ -646,7 +646,7 @@ pub const Os = struct {
.vulkan => .{
.semver = .{
.min = .{ .major = 1, .minor = 2, .patch = 0 },
.max = .{ .major = 1, .minor = 4, .patch = 313 },
.max = .{ .major = 1, .minor = 4, .patch = 321 },
},
},
};
@ -697,57 +697,6 @@ pub const Os = struct {
=> |field| @field(os.version_range, @tagName(field)).isAtLeast(ver),
};
}
/// On Darwin, we always link libSystem which contains libc.
/// Similarly on FreeBSD and NetBSD we always link system libc
/// since this is the stable syscall interface.
pub fn requiresLibC(os: Os) bool {
return switch (os.tag) {
.aix,
.driverkit,
.macos,
.ios,
.tvos,
.watchos,
.visionos,
.dragonfly,
.openbsd,
.haiku,
.solaris,
.illumos,
.serenity,
=> true,
.linux,
.windows,
.freebsd,
.netbsd,
.freestanding,
.fuchsia,
.ps3,
.zos,
.rtems,
.cuda,
.nvcl,
.amdhsa,
.ps4,
.ps5,
.mesa3d,
.contiki,
.amdpal,
.hermit,
.hurd,
.wasi,
.emscripten,
.uefi,
.opencl,
.opengl,
.vulkan,
.plan9,
.other,
=> false,
};
}
};
pub const aarch64 = @import("Target/aarch64.zig");
@ -2055,6 +2004,61 @@ pub inline fn isWasiLibC(target: *const Target) bool {
return target.os.tag == .wasi and target.abi.isMusl();
}
/// Does this target require linking libc? This may be the case if the target has an unstable
/// syscall interface, for example.
pub fn requiresLibC(target: *const Target) bool {
return switch (target.os.tag) {
.aix,
.driverkit,
.macos,
.ios,
.tvos,
.watchos,
.visionos,
.dragonfly,
.openbsd,
.haiku,
.solaris,
.illumos,
.serenity,
=> true,
// Android API levels prior to 29 did not have native TLS support. For these API levels, TLS
// is implemented through calls to `__emutls_get_address`. We provide this function in
// compiler-rt, but it's implemented by way of `pthread_key_create` et al, so linking libc
// is required.
.linux => target.abi.isAndroid() and target.os.version_range.linux.android < 29,
.windows,
.freebsd,
.netbsd,
.freestanding,
.fuchsia,
.ps3,
.zos,
.rtems,
.cuda,
.nvcl,
.amdhsa,
.ps4,
.ps5,
.mesa3d,
.contiki,
.amdpal,
.hermit,
.hurd,
.wasi,
.emscripten,
.uefi,
.opencl,
.opengl,
.vulkan,
.plan9,
.other,
=> false,
};
}
pub const DynamicLinker = struct {
/// Contains the memory used to store the dynamic linker path. This field
/// should not be used directly. See `get` and `set`. This field exists so

View File

@ -423,7 +423,7 @@ pub fn zigTriple(self: Query, gpa: Allocator) Allocator.Error![]u8 {
try formatVersion(v, gpa, &result);
},
.windows => |v| {
try result.print(gpa, "{d}", .{v});
try result.print(gpa, "{f}", .{v});
},
}
}
@ -437,7 +437,7 @@ pub fn zigTriple(self: Query, gpa: Allocator) Allocator.Error![]u8 {
.windows => |v| {
// This is counting on a custom format() function defined on `WindowsVersion`
// to add a prefix '.' and make there be a total of three dots.
try result.print(gpa, "..{d}", .{v});
try result.print(gpa, "..{f}", .{v});
},
}
}
@ -729,4 +729,20 @@ test parse {
defer std.testing.allocator.free(text);
try std.testing.expectEqualSlices(u8, "aarch64-linux.3.10...4.4.1-android.30", text);
}
{
const query = try Query.parse(.{
.arch_os_abi = "x86-windows.xp...win8-msvc",
});
const target = try std.zig.system.resolveTargetQuery(query);
try std.testing.expect(target.cpu.arch == .x86);
try std.testing.expect(target.os.tag == .windows);
try std.testing.expect(target.os.version_range.windows.min == .xp);
try std.testing.expect(target.os.version_range.windows.max == .win8);
try std.testing.expect(target.abi == .msvc);
const text = try query.zigTriple(std.testing.allocator);
defer std.testing.allocator.free(text);
try std.testing.expectEqualSlices(u8, "x86-windows.xp...win8-msvc", text);
}
}

View File

@ -377,7 +377,8 @@ pub fn parse(text: []const u8) ParseError!Uri {
pub const ResolveInPlaceError = ParseError || error{NoSpaceLeft};
/// Resolves a URI against a base URI, conforming to RFC 3986, Section 5.
/// Resolves a URI against a base URI, conforming to
/// [RFC 3986, Section 5](https://www.rfc-editor.org/rfc/rfc3986#section-5)
///
/// Assumes new location is already copied to the beginning of `aux_buf.*`.
/// Parses that new location as a URI, and then resolves the path in place.

View File

@ -158,7 +158,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
assert(self.items.len < self.capacity);
self.items.len += 1;
mem.copyBackwards(T, self.items[i + 1 .. self.items.len], self.items[i .. self.items.len - 1]);
@memmove(self.items[i + 1 .. self.items.len], self.items[i .. self.items.len - 1]);
self.items[i] = item;
}
@ -216,7 +216,7 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?mem.Alignment) ty
assert(self.capacity >= new_len);
const to_move = self.items[index..];
self.items.len = new_len;
mem.copyBackwards(T, self.items[index + count ..], to_move);
@memmove(self.items[index + count ..][0..to_move.len], to_move);
const result = self.items[index..][0..count];
@memset(result, undefined);
return result;
@ -624,6 +624,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
/// Initialize with externally-managed memory. The buffer determines the
/// capacity, and the length is set to zero.
///
/// When initialized this way, all functions that accept an Allocator
/// argument cause illegal behavior.
pub fn initBuffer(buffer: Slice) Self {
@ -705,18 +706,37 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
}
/// Insert `item` at index `i`. Moves `list[i .. list.len]` to higher indices to make room.
/// If in` is equal to the length of the list this operation is equivalent to append.
///
/// If `i` is equal to the length of the list this operation is equivalent to append.
///
/// This operation is O(N).
///
/// Asserts that the list has capacity for one additional item.
///
/// Asserts that the index is in bounds or equal to the length.
pub fn insertAssumeCapacity(self: *Self, i: usize, item: T) void {
assert(self.items.len < self.capacity);
self.items.len += 1;
mem.copyBackwards(T, self.items[i + 1 .. self.items.len], self.items[i .. self.items.len - 1]);
@memmove(self.items[i + 1 .. self.items.len], self.items[i .. self.items.len - 1]);
self.items[i] = item;
}
/// Insert `item` at index `i`, moving `list[i .. list.len]` to higher indices to make room.
///
/// If `i` is equal to the length of the list this operation is equivalent to append.
///
/// This operation is O(N).
///
/// If the list lacks unused capacity for the additional item, returns
/// `error.OutOfMemory`.
///
/// Asserts that the index is in bounds or equal to the length.
pub fn insertBounded(self: *Self, i: usize, item: T) error{OutOfMemory}!void {
if (self.capacity - self.items.len == 0) return error.OutOfMemory;
return insertAssumeCapacity(self, i, item);
}
/// Add `count` new elements at position `index`, which have
/// `undefined` values. Returns a slice pointing to the newly allocated
/// elements, which becomes invalid after various `ArrayList`
@ -749,12 +769,29 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
assert(self.capacity >= new_len);
const to_move = self.items[index..];
self.items.len = new_len;
mem.copyBackwards(T, self.items[index + count ..], to_move);
@memmove(self.items[index + count ..][0..to_move.len], to_move);
const result = self.items[index..][0..count];
@memset(result, undefined);
return result;
}
/// Add `count` new elements at position `index`, which have
/// `undefined` values, returning a slice pointing to the newly
/// allocated elements, which becomes invalid after various `ArrayList`
/// operations.
///
/// Invalidates pre-existing pointers to elements at and after `index`, but
/// does not invalidate any before that.
///
/// If the list lacks unused capacity for the additional items, returns
/// `error.OutOfMemory`.
///
/// Asserts that the index is in bounds or equal to the length.
pub fn addManyAtBounded(self: *Self, index: usize, count: usize) error{OutOfMemory}![]T {
if (self.capacity - self.items.len < count) return error.OutOfMemory;
return addManyAtAssumeCapacity(self, index, count);
}
/// Insert slice `items` at index `i` by moving `list[i .. list.len]` to make room.
/// This operation is O(N).
/// Invalidates pre-existing pointers to elements at and after `index`.
@ -798,7 +835,9 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
}
/// Grows or shrinks the list as necessary.
///
/// Never invalidates element pointers.
///
/// Asserts the capacity is enough for additional items.
pub fn replaceRangeAssumeCapacity(self: *Self, start: usize, len: usize, new_items: []const T) void {
const after_range = start + len;
@ -815,16 +854,24 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
} else {
const extra = range.len - new_items.len;
@memcpy(range[0..new_items.len], new_items);
std.mem.copyForwards(
T,
self.items[after_range - extra ..],
self.items[after_range..],
);
const src = self.items[after_range..];
@memmove(self.items[after_range - extra ..][0..src.len], src);
@memset(self.items[self.items.len - extra ..], undefined);
self.items.len -= extra;
}
}
/// Grows or shrinks the list as necessary.
///
/// Never invalidates element pointers.
///
/// If the unused capacity is insufficient for additional items,
/// returns `error.OutOfMemory`.
pub fn replaceRangeBounded(self: *Self, start: usize, len: usize, new_items: []const T) error{OutOfMemory}!void {
if (self.capacity - self.items.len < new_items.len -| len) return error.OutOfMemory;
return replaceRangeAssumeCapacity(self, start, len, new_items);
}
/// Extend the list by 1 element. Allocates more memory as necessary.
/// Invalidates element pointers if additional memory is needed.
pub fn append(self: *Self, gpa: Allocator, item: T) Allocator.Error!void {
@ -833,12 +880,25 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
}
/// Extend the list by 1 element.
///
/// Never invalidates element pointers.
///
/// Asserts that the list can hold one additional item.
pub fn appendAssumeCapacity(self: *Self, item: T) void {
self.addOneAssumeCapacity().* = item;
}
/// Extend the list by 1 element.
///
/// Never invalidates element pointers.
///
/// If the list lacks unused capacity for the additional item, returns
/// `error.OutOfMemory`.
pub fn appendBounded(self: *Self, item: T) error{OutOfMemory}!void {
if (self.capacity - self.items.len == 0) return error.OutOfMemory;
return appendAssumeCapacity(self, item);
}
/// Remove the element at index `i` from the list and return its value.
/// Invalidates pointers to the last element.
/// This operation is O(N).
@ -873,6 +933,7 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
}
/// Append the slice of items to the list.
///
/// Asserts that the list can hold the additional items.
pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void {
const old_len = self.items.len;
@ -882,6 +943,14 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
@memcpy(self.items[old_len..][0..items.len], items);
}
/// Append the slice of items to the list.
///
/// If the list lacks unused capacity for the additional items, returns `error.OutOfMemory`.
pub fn appendSliceBounded(self: *Self, items: []const T) error{OutOfMemory}!void {
if (self.capacity - self.items.len < items.len) return error.OutOfMemory;
return appendSliceAssumeCapacity(self, items);
}
/// Append the slice of items to the list. Allocates more
/// memory as necessary. Only call this function if a call to `appendSlice` instead would
/// be a compile error.
@ -892,8 +961,10 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
}
/// Append an unaligned slice of items to the list.
/// Only call this function if a call to `appendSliceAssumeCapacity`
/// instead would be a compile error.
///
/// Intended to be used only when `appendSliceAssumeCapacity` would be
/// a compile error.
///
/// Asserts that the list can hold the additional items.
pub fn appendUnalignedSliceAssumeCapacity(self: *Self, items: []align(1) const T) void {
const old_len = self.items.len;
@ -903,6 +974,18 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
@memcpy(self.items[old_len..][0..items.len], items);
}
/// Append an unaligned slice of items to the list.
///
/// Intended to be used only when `appendSliceAssumeCapacity` would be
/// a compile error.
///
/// If the list lacks unused capacity for the additional items, returns
/// `error.OutOfMemory`.
pub fn appendUnalignedSliceBounded(self: *Self, items: []align(1) const T) error{OutOfMemory}!void {
if (self.capacity - self.items.len < items.len) return error.OutOfMemory;
return appendUnalignedSliceAssumeCapacity(self, items);
}
pub fn print(self: *Self, gpa: Allocator, comptime fmt: []const u8, args: anytype) error{OutOfMemory}!void {
comptime assert(T == u8);
try self.ensureUnusedCapacity(gpa, fmt.len);
@ -920,6 +1003,13 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
self.items.len += w.end;
}
pub fn printBounded(self: *Self, comptime fmt: []const u8, args: anytype) error{OutOfMemory}!void {
comptime assert(T == u8);
var w: std.io.Writer = .fixed(self.unusedCapacitySlice());
w.print(fmt, args) catch return error.OutOfMemory;
self.items.len += w.end;
}
/// Append a value to the list `n` times.
/// Allocates more memory as necessary.
/// Invalidates element pointers if additional memory is needed.
@ -932,9 +1022,12 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
}
/// Append a value to the list `n` times.
///
/// Never invalidates element pointers.
///
/// The function is inline so that a comptime-known `value` parameter will
/// have better memset codegen in case it has a repeated byte pattern.
///
/// Asserts that the list can hold the additional items.
pub inline fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
const new_len = self.items.len + n;
@ -943,6 +1036,22 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
self.items.len = new_len;
}
/// Append a value to the list `n` times.
///
/// Never invalidates element pointers.
///
/// The function is inline so that a comptime-known `value` parameter will
/// have better memset codegen in case it has a repeated byte pattern.
///
/// If the list lacks unused capacity for the additional items, returns
/// `error.OutOfMemory`.
pub inline fn appendNTimesBounded(self: *Self, value: T, n: usize) error{OutOfMemory}!void {
const new_len = self.items.len + n;
if (self.capacity < new_len) return error.OutOfMemory;
@memset(self.items.ptr[self.items.len..new_len], value);
self.items.len = new_len;
}
/// Adjust the list length to `new_len`.
/// Additional elements contain the value `undefined`.
/// Invalidates element pointers if additional memory is needed.
@ -1068,8 +1177,11 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
}
/// Increase length by 1, returning pointer to the new item.
///
/// Never invalidates element pointers.
///
/// The returned element pointer becomes invalid when the list is resized.
///
/// Asserts that the list can hold one additional item.
pub fn addOneAssumeCapacity(self: *Self) *T {
assert(self.items.len < self.capacity);
@ -1078,6 +1190,18 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
return &self.items[self.items.len - 1];
}
/// Increase length by 1, returning pointer to the new item.
///
/// Never invalidates element pointers.
///
/// The returned element pointer becomes invalid when the list is resized.
///
/// If the list lacks unused capacity for the additional item, returns `error.OutOfMemory`.
pub fn addOneBounded(self: *Self) error{OutOfMemory}!*T {
if (self.capacity - self.items.len < 1) return error.OutOfMemory;
return addOneAssumeCapacity(self);
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is an array pointing to the newly allocated elements.
/// The returned pointer becomes invalid when the list is resized.
@ -1088,9 +1212,13 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
///
/// The return value is an array pointing to the newly allocated elements.
///
/// Never invalidates element pointers.
///
/// The returned pointer becomes invalid when the list is resized.
///
/// Asserts that the list can hold the additional items.
pub fn addManyAsArrayAssumeCapacity(self: *Self, comptime n: usize) *[n]T {
assert(self.items.len + n <= self.capacity);
@ -1099,6 +1227,21 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
///
/// The return value is an array pointing to the newly allocated elements.
///
/// Never invalidates element pointers.
///
/// The returned pointer becomes invalid when the list is resized.
///
/// If the list lacks unused capacity for the additional items, returns
/// `error.OutOfMemory`.
pub fn addManyAsArrayBounded(self: *Self, comptime n: usize) error{OutOfMemory}!*[n]T {
if (self.capacity - self.items.len < n) return error.OutOfMemory;
return addManyAsArrayAssumeCapacity(self, n);
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is a slice pointing to the newly allocated elements.
/// The returned pointer becomes invalid when the list is resized.
@ -1109,10 +1252,12 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
return self.items[prev_len..][0..n];
}
/// Resize the array, adding `n` new elements, which have `undefined` values.
/// The return value is a slice pointing to the newly allocated elements.
/// Never invalidates element pointers.
/// The returned pointer becomes invalid when the list is resized.
/// Resizes the array, adding `n` new elements, which have `undefined`
/// values, returning a slice pointing to the newly allocated elements.
///
/// Never invalidates element pointers. The returned pointer becomes
/// invalid when the list is resized.
///
/// Asserts that the list can hold the additional items.
pub fn addManyAsSliceAssumeCapacity(self: *Self, n: usize) []T {
assert(self.items.len + n <= self.capacity);
@ -1121,6 +1266,19 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?mem.Alig
return self.items[prev_len..][0..n];
}
/// Resizes the array, adding `n` new elements, which have `undefined`
/// values, returning a slice pointing to the newly allocated elements.
///
/// Never invalidates element pointers. The returned pointer becomes
/// invalid when the list is resized.
///
/// If the list lacks unused capacity for the additional items, returns
/// `error.OutOfMemory`.
pub fn addManyAsSliceBounded(self: *Self, n: usize) error{OutOfMemory}![]T {
if (self.capacity - self.items.len < n) return error.OutOfMemory;
return addManyAsSliceAssumeCapacity(self, n);
}
/// Remove and return the last element from the list.
/// If the list is empty, returns `null`.
/// Invalidates pointers to last element.

View File

@ -118,22 +118,6 @@ pub const Base64Encoder = struct {
}
}
// destWriter must be compatible with std.io.GenericWriter's writeAll interface
// sourceReader must be compatible with `std.io.GenericReader` read interface
pub fn encodeFromReaderToWriter(encoder: *const Base64Encoder, destWriter: anytype, sourceReader: anytype) !void {
while (true) {
var tempSource: [3]u8 = undefined;
const bytesRead = try sourceReader.read(&tempSource);
if (bytesRead == 0) {
break;
}
var temp: [5]u8 = undefined;
const s = encoder.encode(&temp, tempSource[0..bytesRead]);
try destWriter.writeAll(s);
}
}
/// dest.len must at least be what you get from ::calcSize.
pub fn encode(encoder: *const Base64Encoder, dest: []u8, source: []const u8) []const u8 {
const out_len = encoder.calcSize(source.len);
@ -517,17 +501,13 @@ fn testAllApis(codecs: Codecs, expected_decoded: []const u8, expected_encoded: [
var buffer: [0x100]u8 = undefined;
const encoded = codecs.Encoder.encode(&buffer, expected_decoded);
try testing.expectEqualSlices(u8, expected_encoded, encoded);
}
{
// stream encode
var list = try std.BoundedArray(u8, 0x100).init(0);
try codecs.Encoder.encodeWriter(list.writer(), expected_decoded);
try testing.expectEqualSlices(u8, expected_encoded, list.slice());
// reader to writer encode
var stream = std.io.fixedBufferStream(expected_decoded);
list = try std.BoundedArray(u8, 0x100).init(0);
try codecs.Encoder.encodeFromReaderToWriter(list.writer(), stream.reader());
try testing.expectEqualSlices(u8, expected_encoded, list.slice());
var buffer: [0x100]u8 = undefined;
var writer: std.Io.Writer = .fixed(&buffer);
try codecs.Encoder.encodeWriter(&writer, expected_decoded);
try testing.expectEqualSlices(u8, expected_encoded, writer.buffered());
}
// Base64Decoder

View File

@ -1,412 +0,0 @@
const std = @import("std.zig");
const assert = std.debug.assert;
const mem = std.mem;
const testing = std.testing;
const Alignment = std.mem.Alignment;
/// A structure with an array and a length, that can be used as a slice.
///
/// Useful to pass around small arrays whose exact size is only known at
/// runtime, but whose maximum size is known at comptime, without requiring
/// an `Allocator`.
///
/// ```zig
/// var actual_size = 32;
/// var a = try BoundedArray(u8, 64).init(actual_size);
/// var slice = a.slice(); // a slice of the 64-byte array
/// var a_clone = a; // creates a copy - the structure doesn't use any internal pointers
/// ```
pub fn BoundedArray(comptime T: type, comptime buffer_capacity: usize) type {
return BoundedArrayAligned(T, .of(T), buffer_capacity);
}
/// A structure with an array, length and alignment, that can be used as a
/// slice.
///
/// Useful to pass around small explicitly-aligned arrays whose exact size is
/// only known at runtime, but whose maximum size is known at comptime, without
/// requiring an `Allocator`.
/// ```zig
// var a = try BoundedArrayAligned(u8, 16, 2).init(0);
// try a.append(255);
// try a.append(255);
// const b = @ptrCast(*const [1]u16, a.constSlice().ptr);
// try testing.expectEqual(@as(u16, 65535), b[0]);
/// ```
pub fn BoundedArrayAligned(
comptime T: type,
comptime alignment: Alignment,
comptime buffer_capacity: usize,
) type {
return struct {
const Self = @This();
buffer: [buffer_capacity]T align(alignment.toByteUnits()) = undefined,
len: usize = 0,
/// Set the actual length of the slice.
/// Returns error.Overflow if it exceeds the length of the backing array.
pub fn init(len: usize) error{Overflow}!Self {
if (len > buffer_capacity) return error.Overflow;
return Self{ .len = len };
}
/// View the internal array as a slice whose size was previously set.
pub fn slice(self: anytype) switch (@TypeOf(&self.buffer)) {
*align(alignment.toByteUnits()) [buffer_capacity]T => []align(alignment.toByteUnits()) T,
*align(alignment.toByteUnits()) const [buffer_capacity]T => []align(alignment.toByteUnits()) const T,
else => unreachable,
} {
return self.buffer[0..self.len];
}
/// View the internal array as a constant slice whose size was previously set.
pub fn constSlice(self: *const Self) []align(alignment.toByteUnits()) const T {
return self.slice();
}
/// Adjust the slice's length to `len`.
/// Does not initialize added items if any.
pub fn resize(self: *Self, len: usize) error{Overflow}!void {
if (len > buffer_capacity) return error.Overflow;
self.len = len;
}
/// Remove all elements from the slice.
pub fn clear(self: *Self) void {
self.len = 0;
}
/// Copy the content of an existing slice.
pub fn fromSlice(m: []const T) error{Overflow}!Self {
var list = try init(m.len);
@memcpy(list.slice(), m);
return list;
}
/// Return the element at index `i` of the slice.
pub fn get(self: Self, i: usize) T {
return self.constSlice()[i];
}
/// Set the value of the element at index `i` of the slice.
pub fn set(self: *Self, i: usize, item: T) void {
self.slice()[i] = item;
}
/// Return the maximum length of a slice.
pub fn capacity(self: Self) usize {
return self.buffer.len;
}
/// Check that the slice can hold at least `additional_count` items.
pub fn ensureUnusedCapacity(self: Self, additional_count: usize) error{Overflow}!void {
if (self.len + additional_count > buffer_capacity) {
return error.Overflow;
}
}
/// Increase length by 1, returning a pointer to the new item.
pub fn addOne(self: *Self) error{Overflow}!*T {
try self.ensureUnusedCapacity(1);
return self.addOneAssumeCapacity();
}
/// Increase length by 1, returning pointer to the new item.
/// Asserts that there is space for the new item.
pub fn addOneAssumeCapacity(self: *Self) *T {
assert(self.len < buffer_capacity);
self.len += 1;
return &self.slice()[self.len - 1];
}
/// Resize the slice, adding `n` new elements, which have `undefined` values.
/// The return value is a pointer to the array of uninitialized elements.
pub fn addManyAsArray(self: *Self, comptime n: usize) error{Overflow}!*align(alignment.toByteUnits()) [n]T {
const prev_len = self.len;
try self.resize(self.len + n);
return self.slice()[prev_len..][0..n];
}
/// Resize the slice, adding `n` new elements, which have `undefined` values.
/// The return value is a slice pointing to the uninitialized elements.
pub fn addManyAsSlice(self: *Self, n: usize) error{Overflow}![]align(alignment.toByteUnits()) T {
const prev_len = self.len;
try self.resize(self.len + n);
return self.slice()[prev_len..][0..n];
}
/// Remove and return the last element from the slice, or return `null` if the slice is empty.
pub fn pop(self: *Self) ?T {
if (self.len == 0) return null;
const item = self.get(self.len - 1);
self.len -= 1;
return item;
}
/// Return a slice of only the extra capacity after items.
/// This can be useful for writing directly into it.
/// Note that such an operation must be followed up with a
/// call to `resize()`
pub fn unusedCapacitySlice(self: *Self) []align(alignment.toByteUnits()) T {
return self.buffer[self.len..];
}
/// Insert `item` at index `i` by moving `slice[n .. slice.len]` to make room.
/// This operation is O(N).
pub fn insert(
self: *Self,
i: usize,
item: T,
) error{Overflow}!void {
if (i > self.len) {
return error.Overflow;
}
_ = try self.addOne();
var s = self.slice();
mem.copyBackwards(T, s[i + 1 .. s.len], s[i .. s.len - 1]);
self.buffer[i] = item;
}
/// Insert slice `items` at index `i` by moving `slice[i .. slice.len]` to make room.
/// This operation is O(N).
pub fn insertSlice(self: *Self, i: usize, items: []const T) error{Overflow}!void {
try self.ensureUnusedCapacity(items.len);
self.len += items.len;
mem.copyBackwards(T, self.slice()[i + items.len .. self.len], self.constSlice()[i .. self.len - items.len]);
@memcpy(self.slice()[i..][0..items.len], items);
}
/// Replace range of elements `slice[start..][0..len]` with `new_items`.
/// Grows slice if `len < new_items.len`.
/// Shrinks slice if `len > new_items.len`.
pub fn replaceRange(
self: *Self,
start: usize,
len: usize,
new_items: []const T,
) error{Overflow}!void {
const after_range = start + len;
var range = self.slice()[start..after_range];
if (range.len == new_items.len) {
@memcpy(range[0..new_items.len], new_items);
} else if (range.len < new_items.len) {
const first = new_items[0..range.len];
const rest = new_items[range.len..];
@memcpy(range[0..first.len], first);
try self.insertSlice(after_range, rest);
} else {
@memcpy(range[0..new_items.len], new_items);
const after_subrange = start + new_items.len;
for (self.constSlice()[after_range..], 0..) |item, i| {
self.slice()[after_subrange..][i] = item;
}
self.len -= len - new_items.len;
}
}
/// Extend the slice by 1 element.
pub fn append(self: *Self, item: T) error{Overflow}!void {
const new_item_ptr = try self.addOne();
new_item_ptr.* = item;
}
/// Extend the slice by 1 element, asserting the capacity is already
/// enough to store the new item.
pub fn appendAssumeCapacity(self: *Self, item: T) void {
const new_item_ptr = self.addOneAssumeCapacity();
new_item_ptr.* = item;
}
/// Remove the element at index `i`, shift elements after index
/// `i` forward, and return the removed element.
/// Asserts the slice has at least one item.
/// This operation is O(N).
pub fn orderedRemove(self: *Self, i: usize) T {
const newlen = self.len - 1;
if (newlen == i) return self.pop().?;
const old_item = self.get(i);
for (self.slice()[i..newlen], 0..) |*b, j| b.* = self.get(i + 1 + j);
self.set(newlen, undefined);
self.len = newlen;
return old_item;
}
/// Remove the element at the specified index and return it.
/// The empty slot is filled from the end of the slice.
/// This operation is O(1).
pub fn swapRemove(self: *Self, i: usize) T {
if (self.len - 1 == i) return self.pop().?;
const old_item = self.get(i);
self.set(i, self.pop().?);
return old_item;
}
/// Append the slice of items to the slice.
pub fn appendSlice(self: *Self, items: []const T) error{Overflow}!void {
try self.ensureUnusedCapacity(items.len);
self.appendSliceAssumeCapacity(items);
}
/// Append the slice of items to the slice, asserting the capacity is already
/// enough to store the new items.
pub fn appendSliceAssumeCapacity(self: *Self, items: []const T) void {
const old_len = self.len;
self.len += items.len;
@memcpy(self.slice()[old_len..][0..items.len], items);
}
/// Append a value to the slice `n` times.
/// Allocates more memory as necessary.
pub fn appendNTimes(self: *Self, value: T, n: usize) error{Overflow}!void {
const old_len = self.len;
try self.resize(old_len + n);
@memset(self.slice()[old_len..self.len], value);
}
/// Append a value to the slice `n` times.
/// Asserts the capacity is enough.
pub fn appendNTimesAssumeCapacity(self: *Self, value: T, n: usize) void {
const old_len = self.len;
self.len += n;
assert(self.len <= buffer_capacity);
@memset(self.slice()[old_len..self.len], value);
}
pub const Writer = if (T != u8)
@compileError("The Writer interface is only defined for BoundedArray(u8, ...) " ++
"but the given type is BoundedArray(" ++ @typeName(T) ++ ", ...)")
else
std.io.GenericWriter(*Self, error{Overflow}, appendWrite);
/// Initializes a writer which will write into the array.
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
/// Same as `appendSlice` except it returns the number of bytes written, which is always the same
/// as `m.len`. The purpose of this function existing is to match `std.io.GenericWriter` API.
fn appendWrite(self: *Self, m: []const u8) error{Overflow}!usize {
try self.appendSlice(m);
return m.len;
}
};
}
test BoundedArray {
var a = try BoundedArray(u8, 64).init(32);
try testing.expectEqual(a.capacity(), 64);
try testing.expectEqual(a.slice().len, 32);
try testing.expectEqual(a.constSlice().len, 32);
try a.resize(48);
try testing.expectEqual(a.len, 48);
const x = [_]u8{1} ** 10;
a = try BoundedArray(u8, 64).fromSlice(&x);
try testing.expectEqualSlices(u8, &x, a.constSlice());
var a2 = a;
try testing.expectEqualSlices(u8, a.constSlice(), a2.constSlice());
a2.set(0, 0);
try testing.expect(a.get(0) != a2.get(0));
try testing.expectError(error.Overflow, a.resize(100));
try testing.expectError(error.Overflow, BoundedArray(u8, x.len - 1).fromSlice(&x));
try a.resize(0);
try a.ensureUnusedCapacity(a.capacity());
(try a.addOne()).* = 0;
try a.ensureUnusedCapacity(a.capacity() - 1);
try testing.expectEqual(a.len, 1);
const uninitialized = try a.addManyAsArray(4);
try testing.expectEqual(uninitialized.len, 4);
try testing.expectEqual(a.len, 5);
try a.append(0xff);
try testing.expectEqual(a.len, 6);
try testing.expectEqual(a.pop(), 0xff);
a.appendAssumeCapacity(0xff);
try testing.expectEqual(a.len, 6);
try testing.expectEqual(a.pop(), 0xff);
try a.resize(1);
try testing.expectEqual(a.pop(), 0);
try testing.expectEqual(a.pop(), null);
var unused = a.unusedCapacitySlice();
@memset(unused[0..8], 2);
unused[8] = 3;
unused[9] = 4;
try testing.expectEqual(unused.len, a.capacity());
try a.resize(10);
try a.insert(5, 0xaa);
try testing.expectEqual(a.len, 11);
try testing.expectEqual(a.get(5), 0xaa);
try testing.expectEqual(a.get(9), 3);
try testing.expectEqual(a.get(10), 4);
try a.insert(11, 0xbb);
try testing.expectEqual(a.len, 12);
try testing.expectEqual(a.pop(), 0xbb);
try a.appendSlice(&x);
try testing.expectEqual(a.len, 11 + x.len);
try a.appendNTimes(0xbb, 5);
try testing.expectEqual(a.len, 11 + x.len + 5);
try testing.expectEqual(a.pop(), 0xbb);
a.appendNTimesAssumeCapacity(0xcc, 5);
try testing.expectEqual(a.len, 11 + x.len + 5 - 1 + 5);
try testing.expectEqual(a.pop(), 0xcc);
try testing.expectEqual(a.len, 29);
try a.replaceRange(1, 20, &x);
try testing.expectEqual(a.len, 29 + x.len - 20);
try a.insertSlice(0, &x);
try testing.expectEqual(a.len, 29 + x.len - 20 + x.len);
try a.replaceRange(1, 5, &x);
try testing.expectEqual(a.len, 29 + x.len - 20 + x.len + x.len - 5);
try a.append(10);
try testing.expectEqual(a.pop(), 10);
try a.append(20);
const removed = a.orderedRemove(5);
try testing.expectEqual(removed, 1);
try testing.expectEqual(a.len, 34);
a.set(0, 0xdd);
a.set(a.len - 1, 0xee);
const swapped = a.swapRemove(0);
try testing.expectEqual(swapped, 0xdd);
try testing.expectEqual(a.get(0), 0xee);
const added_slice = try a.addManyAsSlice(3);
try testing.expectEqual(added_slice.len, 3);
try testing.expectEqual(a.len, 36);
while (a.pop()) |_| {}
const w = a.writer();
const s = "hello, this is a test string";
try w.writeAll(s);
try testing.expectEqualStrings(s, a.constSlice());
}
test "BoundedArrayAligned" {
var a = try BoundedArrayAligned(u8, .@"16", 4).init(0);
try a.append(0);
try a.append(0);
try a.append(255);
try a.append(255);
const b = @as(*const [2]u16, @ptrCast(a.constSlice().ptr));
try testing.expectEqual(@as(u16, 0), b[0]);
try testing.expectEqual(@as(u16, 65535), b[1]);
}

View File

@ -6970,11 +6970,11 @@ pub const utsname = switch (native_os) {
domainname: [256:0]u8,
},
.macos => extern struct {
sysname: [256:0]u8,
nodename: [256:0]u8,
release: [256:0]u8,
version: [256:0]u8,
machine: [256:0]u8,
sysname: [255:0]u8,
nodename: [255:0]u8,
release: [255:0]u8,
version: [255:0]u8,
machine: [255:0]u8,
},
// https://github.com/SerenityOS/serenity/blob/d794ed1de7a46482272683f8dc4c858806390f29/Kernel/API/POSIX/sys/utsname.h#L17-L23
.serenity => extern struct {
@ -6984,7 +6984,7 @@ pub const utsname = switch (native_os) {
version: [UTSNAME_ENTRY_LEN:0]u8,
machine: [UTSNAME_ENTRY_LEN:0]u8,
const UTSNAME_ENTRY_LEN = 65;
const UTSNAME_ENTRY_LEN = 64;
},
else => void,
};

View File

@ -373,7 +373,7 @@ fn streamInner(d: *Decompress, w: *Writer, limit: std.Io.Limit) (Error || Reader
d.state = .{ .stored_block = @intCast(remaining_len - n) };
}
w.advance(n);
return n;
return @intFromEnum(limit) - remaining + n;
},
.fixed_block => {
while (remaining > 0) {
@ -603,7 +603,7 @@ fn tossBitsEnding(d: *Decompress, n: u4) !void {
error.EndOfStream => unreachable,
};
d.next_bits = next_int >> needed_bits;
d.remaining_bits = @intCast(@as(usize, n) * 8 -| @as(usize, needed_bits));
d.remaining_bits = @intCast(@as(usize, buffered_n) * 8 -| @as(usize, needed_bits));
}
fn takeBitsRuntime(d: *Decompress, n: u4) !u16 {
@ -1265,6 +1265,7 @@ fn testDecompress(container: Container, compressed: []const u8, expected_plain:
defer aw.deinit();
var decompress: Decompress = .init(&in, container, &.{});
_ = try decompress.reader.streamRemaining(&aw.writer);
const decompressed_len = try decompress.reader.streamRemaining(&aw.writer);
try testing.expectEqual(expected_plain.len, decompressed_len);
try testing.expectEqualSlices(u8, expected_plain, aw.getWritten());
}

View File

@ -58,7 +58,7 @@ pub fn Ecdsa(comptime Curve: type, comptime Hash: type) type {
pub const PublicKey = struct {
/// Length (in bytes) of a compressed sec1-encoded key.
pub const compressed_sec1_encoded_length = 1 + Curve.Fe.encoded_length;
/// Length (in bytes) of a compressed sec1-encoded key.
/// Length (in bytes) of an uncompressed sec1-encoded key.
pub const uncompressed_sec1_encoded_length = 1 + 2 * Curve.Fe.encoded_length;
p: Curve,

View File

@ -8,8 +8,8 @@ const mem = std.mem;
const crypto = std.crypto;
const assert = std.debug.assert;
const Certificate = std.crypto.Certificate;
const Reader = std.io.Reader;
const Writer = std.io.Writer;
const Reader = std.Io.Reader;
const Writer = std.Io.Writer;
const max_ciphertext_len = tls.max_ciphertext_len;
const hmacExpandLabel = tls.hmacExpandLabel;
@ -27,6 +27,8 @@ reader: Reader,
/// The encrypted stream from the client to the server. Bytes are pushed here
/// via `writer`.
///
/// The buffer is asserted to have capacity at least `min_buffer_len`.
output: *Writer,
/// The plaintext stream from the client to the server.
writer: Writer,
@ -122,7 +124,6 @@ pub const Options = struct {
/// the amount of data expected, such as HTTP with the Content-Length header.
allow_truncation_attacks: bool = false,
write_buffer: []u8,
/// Asserted to have capacity at least `min_buffer_len`.
read_buffer: []u8,
/// Populated when `error.TlsAlert` is returned from `init`.
alert: ?*tls.Alert = null,
@ -185,6 +186,7 @@ const InitError = error{
/// `input` is asserted to have buffer capacity at least `min_buffer_len`.
pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client {
assert(input.buffer.len >= min_buffer_len);
assert(output.buffer.len >= min_buffer_len);
const host = switch (options.host) {
.no_verification => "",
.explicit => |host| host,
@ -278,6 +280,7 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
{
var iovecs: [2][]const u8 = .{ cleartext_header, host };
try output.writeVecAll(iovecs[0..if (host.len == 0) 1 else 2]);
try output.flush();
}
var tls_version: tls.ProtocolVersion = undefined;
@ -328,7 +331,9 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
var cleartext_bufs: [2][tls.max_ciphertext_inner_record_len]u8 = undefined;
fragment: while (true) {
// Ensure the input buffer pointer is stable in this scope.
input.rebaseCapacity(tls.max_ciphertext_record_len);
input.rebase(tls.max_ciphertext_record_len) catch |err| switch (err) {
error.EndOfStream => {}, // We have assurance the remainder of stream can be buffered.
};
const record_header = input.peek(tls.record_header_len) catch |err| switch (err) {
error.EndOfStream => return error.TlsConnectionTruncated,
error.ReadFailed => return error.ReadFailed,
@ -761,6 +766,7 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
&client_verify_msg,
};
try output.writeVecAll(&all_msgs_vec);
try output.flush();
},
}
write_seq += 1;
@ -826,6 +832,7 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
&finished_msg,
};
try output.writeVecAll(&all_msgs_vec);
try output.flush();
const client_secret = hkdfExpandLabel(P.Hkdf, pv.master_secret, "c ap traffic", &handshake_hash, P.Hash.digest_length);
const server_secret = hkdfExpandLabel(P.Hkdf, pv.master_secret, "s ap traffic", &handshake_hash, P.Hash.digest_length);
@ -875,7 +882,7 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
.buffer = options.write_buffer,
.vtable = &.{
.drain = drain,
.sendFile = Writer.unimplementedSendFile,
.flush = flush,
},
},
.tls_version = tls_version,
@ -908,32 +915,57 @@ pub fn init(input: *Reader, output: *Writer, options: Options) InitError!Client
}
fn drain(w: *Writer, data: []const []const u8, splat: usize) Writer.Error!usize {
const c: *Client = @fieldParentPtr("writer", w);
if (true) @panic("update to use the buffer and flush");
const sliced_data = if (splat == 0) data[0..data.len -| 1] else data;
const c: *Client = @alignCast(@fieldParentPtr("writer", w));
const output = c.output;
const ciphertext_buf = try output.writableSliceGreedy(min_buffer_len);
var total_clear: usize = 0;
var ciphertext_end: usize = 0;
for (sliced_data) |buf| {
const prepared = prepareCiphertextRecord(c, ciphertext_buf[ciphertext_end..], buf, .application_data);
total_clear += prepared.cleartext_len;
ciphertext_end += prepared.ciphertext_end;
if (total_clear < buf.len) break;
var total_clear: usize = 0;
done: {
{
const buf = w.buffered();
const prepared = prepareCiphertextRecord(c, ciphertext_buf[ciphertext_end..], buf, .application_data);
total_clear += prepared.cleartext_len;
ciphertext_end += prepared.ciphertext_end;
if (prepared.cleartext_len < buf.len) break :done;
}
for (data[0 .. data.len - 1]) |buf| {
if (buf.len < min_buffer_len) break :done;
const prepared = prepareCiphertextRecord(c, ciphertext_buf[ciphertext_end..], buf, .application_data);
total_clear += prepared.cleartext_len;
ciphertext_end += prepared.ciphertext_end;
if (prepared.cleartext_len < buf.len) break :done;
}
const buf = data[data.len - 1];
for (0..splat) |_| {
if (buf.len < min_buffer_len) break :done;
const prepared = prepareCiphertextRecord(c, ciphertext_buf[ciphertext_end..], buf, .application_data);
total_clear += prepared.cleartext_len;
ciphertext_end += prepared.ciphertext_end;
if (prepared.cleartext_len < buf.len) break :done;
}
}
output.advance(ciphertext_end);
return total_clear;
return w.consume(total_clear);
}
fn flush(w: *Writer) Writer.Error!void {
const c: *Client = @alignCast(@fieldParentPtr("writer", w));
const output = c.output;
const ciphertext_buf = try output.writableSliceGreedy(min_buffer_len);
const prepared = prepareCiphertextRecord(c, ciphertext_buf, w.buffered(), .application_data);
output.advance(prepared.ciphertext_end);
w.end = 0;
}
/// Sends a `close_notify` alert, which is necessary for the server to
/// distinguish between a properly finished TLS session, or a truncation
/// attack.
pub fn end(c: *Client) Writer.Error!void {
try flush(&c.writer);
const output = c.output;
const ciphertext_buf = try output.writableSliceGreedy(min_buffer_len);
const prepared = prepareCiphertextRecord(c, ciphertext_buf, &tls.close_notify_alert, .alert);
output.advance(prepared.cleartext_len);
return prepared.ciphertext_end;
output.advance(prepared.ciphertext_end);
}
fn prepareCiphertextRecord(
@ -1043,8 +1075,8 @@ pub fn eof(c: Client) bool {
return c.received_close_notify;
}
fn stream(r: *Reader, w: *Writer, limit: std.io.Limit) Reader.StreamError!usize {
const c: *Client = @fieldParentPtr("reader", r);
fn stream(r: *Reader, w: *Writer, limit: std.Io.Limit) Reader.StreamError!usize {
const c: *Client = @alignCast(@fieldParentPtr("reader", r));
if (c.eof()) return error.EndOfStream;
const input = c.input;
// If at least one full encrypted record is not buffered, read once.

View File

@ -502,6 +502,13 @@ pub const Header = struct {
};
}
pub fn iterateProgramHeadersBuffer(h: Header, buf: []const u8) ProgramHeaderBufferIterator {
return .{
.elf_header = h,
.buf = buf,
};
}
pub fn iterateSectionHeaders(h: Header, file_reader: *std.fs.File.Reader) SectionHeaderIterator {
return .{
.elf_header = h,
@ -509,6 +516,13 @@ pub const Header = struct {
};
}
pub fn iterateSectionHeadersBuffer(h: Header, buf: []const u8) SectionHeaderBufferIterator {
return .{
.elf_header = h,
.buf = buf,
};
}
pub const ReadError = std.Io.Reader.Error || error{
InvalidElfMagic,
InvalidElfVersion,
@ -570,29 +584,48 @@ pub const ProgramHeaderIterator = struct {
if (it.index >= it.elf_header.phnum) return null;
defer it.index += 1;
if (it.elf_header.is_64) {
const offset = it.elf_header.phoff + @sizeOf(Elf64_Phdr) * it.index;
try it.file_reader.seekTo(offset);
const phdr = try it.file_reader.interface.takeStruct(Elf64_Phdr, it.elf_header.endian);
return phdr;
}
const offset = it.elf_header.phoff + @sizeOf(Elf32_Phdr) * it.index;
const offset = it.elf_header.phoff + if (it.elf_header.is_64) @sizeOf(Elf64_Phdr) else @sizeOf(Elf32_Phdr) * it.index;
try it.file_reader.seekTo(offset);
const phdr = try it.file_reader.interface.takeStruct(Elf32_Phdr, it.elf_header.endian);
return .{
.p_type = phdr.p_type,
.p_offset = phdr.p_offset,
.p_vaddr = phdr.p_vaddr,
.p_paddr = phdr.p_paddr,
.p_filesz = phdr.p_filesz,
.p_memsz = phdr.p_memsz,
.p_flags = phdr.p_flags,
.p_align = phdr.p_align,
};
return takePhdr(&it.file_reader.interface, it.elf_header);
}
};
pub const ProgramHeaderBufferIterator = struct {
elf_header: Header,
buf: []const u8,
index: usize = 0,
pub fn next(it: *ProgramHeaderBufferIterator) !?Elf64_Phdr {
if (it.index >= it.elf_header.phnum) return null;
defer it.index += 1;
const offset = it.elf_header.phoff + if (it.elf_header.is_64) @sizeOf(Elf64_Phdr) else @sizeOf(Elf32_Phdr) * it.index;
var reader = std.Io.Reader.fixed(it.buf[offset..]);
return takePhdr(&reader, it.elf_header);
}
};
fn takePhdr(reader: *std.io.Reader, elf_header: Header) !?Elf64_Phdr {
if (elf_header.is_64) {
const phdr = try reader.takeStruct(Elf64_Phdr, elf_header.endian);
return phdr;
}
const phdr = try reader.takeStruct(Elf32_Phdr, elf_header.endian);
return .{
.p_type = phdr.p_type,
.p_offset = phdr.p_offset,
.p_vaddr = phdr.p_vaddr,
.p_paddr = phdr.p_paddr,
.p_filesz = phdr.p_filesz,
.p_memsz = phdr.p_memsz,
.p_flags = phdr.p_flags,
.p_align = phdr.p_align,
};
}
pub const SectionHeaderIterator = struct {
elf_header: Header,
file_reader: *std.fs.File.Reader,
@ -602,29 +635,50 @@ pub const SectionHeaderIterator = struct {
if (it.index >= it.elf_header.shnum) return null;
defer it.index += 1;
if (it.elf_header.is_64) {
try it.file_reader.seekTo(it.elf_header.shoff + @sizeOf(Elf64_Shdr) * it.index);
const shdr = try it.file_reader.interface.takeStruct(Elf64_Shdr, it.elf_header.endian);
return shdr;
}
const offset = it.elf_header.shoff + if (it.elf_header.is_64) @sizeOf(Elf64_Shdr) else @sizeOf(Elf32_Shdr) * it.index;
try it.file_reader.seekTo(offset);
try it.file_reader.seekTo(it.elf_header.shoff + @sizeOf(Elf32_Shdr) * it.index);
const shdr = try it.file_reader.interface.takeStruct(Elf32_Shdr, it.elf_header.endian);
return .{
.sh_name = shdr.sh_name,
.sh_type = shdr.sh_type,
.sh_flags = shdr.sh_flags,
.sh_addr = shdr.sh_addr,
.sh_offset = shdr.sh_offset,
.sh_size = shdr.sh_size,
.sh_link = shdr.sh_link,
.sh_info = shdr.sh_info,
.sh_addralign = shdr.sh_addralign,
.sh_entsize = shdr.sh_entsize,
};
return takeShdr(&it.file_reader.interface, it.elf_header);
}
};
pub const SectionHeaderBufferIterator = struct {
elf_header: Header,
buf: []const u8,
index: usize = 0,
pub fn next(it: *SectionHeaderBufferIterator) !?Elf64_Shdr {
if (it.index >= it.elf_header.shnum) return null;
defer it.index += 1;
const offset = it.elf_header.shoff + if (it.elf_header.is_64) @sizeOf(Elf64_Shdr) else @sizeOf(Elf32_Shdr) * it.index;
var reader = std.Io.Reader.fixed(it.buf[offset..]);
return takeShdr(&reader, it.elf_header);
}
};
fn takeShdr(reader: *std.Io.Reader, elf_header: Header) !?Elf64_Shdr {
if (elf_header.is_64) {
const shdr = try reader.takeStruct(Elf64_Shdr, elf_header.endian);
return shdr;
}
const shdr = try reader.takeStruct(Elf32_Shdr, elf_header.endian);
return .{
.sh_name = shdr.sh_name,
.sh_type = shdr.sh_type,
.sh_flags = shdr.sh_flags,
.sh_addr = shdr.sh_addr,
.sh_offset = shdr.sh_offset,
.sh_size = shdr.sh_size,
.sh_link = shdr.sh_link,
.sh_info = shdr.sh_info,
.sh_addralign = shdr.sh_addralign,
.sh_entsize = shdr.sh_entsize,
};
}
pub const ELFCLASSNONE = 0;
pub const ELFCLASS32 = 1;
pub const ELFCLASS64 = 2;

View File

@ -1111,7 +1111,16 @@ pub const Reader = struct {
if (is_windows) {
// Unfortunately, `ReadFileScatter` cannot be used since it
// requires page alignment.
return readPositional(r, data[0]);
assert(io_reader.seek == io_reader.end);
io_reader.seek = 0;
io_reader.end = 0;
const first = data[0];
if (first.len >= io_reader.buffer.len) {
return readPositional(r, first);
} else {
io_reader.end += try readPositional(r, io_reader.buffer);
return 0;
}
}
var iovecs_buffer: [max_buffers_len]posix.iovec = undefined;
const dest_n, const data_size = try io_reader.writableVectorPosix(&iovecs_buffer, data);
@ -1141,8 +1150,7 @@ pub const Reader = struct {
}
r.pos += n;
if (n > data_size) {
io_reader.seek = 0;
io_reader.end = n - data_size;
io_reader.end += n - data_size;
return data_size;
}
return n;
@ -1151,7 +1159,16 @@ pub const Reader = struct {
if (is_windows) {
// Unfortunately, `ReadFileScatter` cannot be used since it
// requires page alignment.
return readStreaming(r, data[0]);
assert(io_reader.seek == io_reader.end);
io_reader.seek = 0;
io_reader.end = 0;
const first = data[0];
if (first.len >= io_reader.buffer.len) {
return readStreaming(r, first);
} else {
io_reader.end += try readStreaming(r, io_reader.buffer);
return 0;
}
}
var iovecs_buffer: [max_buffers_len]posix.iovec = undefined;
const dest_n, const data_size = try io_reader.writableVectorPosix(&iovecs_buffer, data);
@ -1167,8 +1184,7 @@ pub const Reader = struct {
}
r.pos += n;
if (n > data_size) {
io_reader.seek = 0;
io_reader.end = n - data_size;
io_reader.end += n - data_size;
return data_size;
}
return n;

View File

@ -1,7 +1,7 @@
const builtin = @import("builtin");
const std = @import("std.zig");
const assert = std.debug.assert;
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
const File = std.fs.File;
pub const Client = @import("http/Client.zig");
@ -20,51 +20,32 @@ pub const Version = enum {
/// https://datatracker.ietf.org/doc/html/rfc7231#section-4 Initial definition
///
/// https://datatracker.ietf.org/doc/html/rfc5789#section-2 PATCH
pub const Method = enum(u64) {
GET = parse("GET"),
HEAD = parse("HEAD"),
POST = parse("POST"),
PUT = parse("PUT"),
DELETE = parse("DELETE"),
CONNECT = parse("CONNECT"),
OPTIONS = parse("OPTIONS"),
TRACE = parse("TRACE"),
PATCH = parse("PATCH"),
_,
/// Converts `s` into a type that may be used as a `Method` field.
/// Asserts that `s` is 24 or fewer bytes.
pub fn parse(s: []const u8) u64 {
var x: u64 = 0;
const len = @min(s.len, @sizeOf(@TypeOf(x)));
@memcpy(std.mem.asBytes(&x)[0..len], s[0..len]);
return x;
}
pub fn format(self: Method, w: *std.io.Writer) std.io.Writer.Error!void {
const bytes: []const u8 = @ptrCast(&@intFromEnum(self));
const str = std.mem.sliceTo(bytes, 0);
try w.writeAll(str);
}
pub const Method = enum {
GET,
HEAD,
POST,
PUT,
DELETE,
CONNECT,
OPTIONS,
TRACE,
PATCH,
/// Returns true if a request of this method is allowed to have a body
/// Actual behavior from servers may vary and should still be checked
pub fn requestHasBody(self: Method) bool {
return switch (self) {
pub fn requestHasBody(m: Method) bool {
return switch (m) {
.POST, .PUT, .PATCH => true,
.GET, .HEAD, .DELETE, .CONNECT, .OPTIONS, .TRACE => false,
else => true,
};
}
/// Returns true if a response to this method is allowed to have a body
/// Actual behavior from clients may vary and should still be checked
pub fn responseHasBody(self: Method) bool {
return switch (self) {
pub fn responseHasBody(m: Method) bool {
return switch (m) {
.GET, .POST, .DELETE, .CONNECT, .OPTIONS, .PATCH => true,
.HEAD, .PUT, .TRACE => false,
else => true,
};
}
@ -73,11 +54,10 @@ pub const Method = enum(u64) {
/// https://developer.mozilla.org/en-US/docs/Glossary/Safe/HTTP
///
/// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.1
pub fn safe(self: Method) bool {
return switch (self) {
pub fn safe(m: Method) bool {
return switch (m) {
.GET, .HEAD, .OPTIONS, .TRACE => true,
.POST, .PUT, .DELETE, .CONNECT, .PATCH => false,
else => false,
};
}
@ -88,11 +68,10 @@ pub const Method = enum(u64) {
/// https://developer.mozilla.org/en-US/docs/Glossary/Idempotent
///
/// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.2
pub fn idempotent(self: Method) bool {
return switch (self) {
pub fn idempotent(m: Method) bool {
return switch (m) {
.GET, .HEAD, .PUT, .DELETE, .OPTIONS, .TRACE => true,
.CONNECT, .POST, .PATCH => false,
else => false,
};
}
@ -102,11 +81,10 @@ pub const Method = enum(u64) {
/// https://developer.mozilla.org/en-US/docs/Glossary/cacheable
///
/// https://datatracker.ietf.org/doc/html/rfc7231#section-4.2.3
pub fn cacheable(self: Method) bool {
return switch (self) {
pub fn cacheable(m: Method) bool {
return switch (m) {
.GET, .HEAD => true,
.POST, .PUT, .DELETE, .CONNECT, .OPTIONS, .TRACE, .PATCH => false,
else => false,
};
}
};
@ -327,11 +305,11 @@ pub const Header = struct {
};
pub const Reader = struct {
in: *std.io.Reader,
in: *std.Io.Reader,
/// This is preallocated memory that might be used by `bodyReader`. That
/// function might return a pointer to this field, or a different
/// `*std.io.Reader`. Advisable to not access this field directly.
interface: std.io.Reader,
/// `*std.Io.Reader`. Advisable to not access this field directly.
interface: std.Io.Reader,
/// Keeps track of whether the stream is ready to accept a new request,
/// making invalid API usage cause assertion failures rather than HTTP
/// protocol violations.
@ -343,10 +321,6 @@ pub const Reader = struct {
/// read from `in`.
trailers: []const u8 = &.{},
body_err: ?BodyError = null,
/// Stolen from `in`.
head_buffer: []u8 = &.{},
pub const max_chunk_header_len = 22;
pub const RemainingChunkLen = enum(u64) {
head = 0,
@ -398,35 +372,34 @@ pub const Reader = struct {
ReadFailed,
};
pub fn restituteHeadBuffer(reader: *Reader) void {
reader.in.restitute(reader.head_buffer.len);
reader.head_buffer.len = 0;
}
/// Buffers the entire head into `head_buffer`, invalidating the previous
/// `head_buffer`, if any.
pub fn receiveHead(reader: *Reader) HeadError!void {
/// Buffers the entire head inside `in`.
///
/// The resulting memory is invalidated by any subsequent consumption of
/// the input stream.
pub fn receiveHead(reader: *Reader) HeadError![]const u8 {
reader.trailers = &.{};
const in = reader.in;
in.restitute(reader.head_buffer.len);
reader.head_buffer.len = 0;
in.rebase();
var hp: HeadParser = .{};
var head_end: usize = 0;
var head_len: usize = 0;
while (true) {
if (head_end >= in.buffer.len) return error.HttpHeadersOversize;
in.fillMore() catch |err| switch (err) {
error.EndOfStream => switch (head_end) {
0 => return error.HttpConnectionClosing,
else => return error.HttpRequestTruncated,
},
error.ReadFailed => return error.ReadFailed,
};
head_end += hp.feed(in.buffered()[head_end..]);
if (in.buffer.len - head_len == 0) return error.HttpHeadersOversize;
const remaining = in.buffered()[head_len..];
if (remaining.len == 0) {
in.fillMore() catch |err| switch (err) {
error.EndOfStream => switch (head_len) {
0 => return error.HttpConnectionClosing,
else => return error.HttpRequestTruncated,
},
error.ReadFailed => return error.ReadFailed,
};
continue;
}
head_len += hp.feed(remaining);
if (hp.state == .finished) {
reader.head_buffer = in.steal(head_end);
reader.state = .received_head;
return;
const head_buffer = in.buffered()[0..head_len];
in.toss(head_len);
return head_buffer;
}
}
}
@ -442,7 +415,7 @@ pub const Reader = struct {
buffer: []u8,
transfer_encoding: TransferEncoding,
content_length: ?u64,
) *std.io.Reader {
) *std.Io.Reader {
assert(reader.state == .received_head);
switch (transfer_encoding) {
.chunked => {
@ -492,7 +465,7 @@ pub const Reader = struct {
content_encoding: ContentEncoding,
decompressor: *Decompressor,
decompression_buffer: []u8,
) *std.io.Reader {
) *std.Io.Reader {
if (transfer_encoding == .none and content_length == null) {
assert(reader.state == .received_head);
reader.state = .body_none;
@ -501,7 +474,7 @@ pub const Reader = struct {
return reader.in;
},
.deflate => {
decompressor.* = .{ .flate = .init(reader.in, .raw, decompression_buffer) };
decompressor.* = .{ .flate = .init(reader.in, .zlib, decompression_buffer) };
return &decompressor.flate.reader;
},
.gzip => {
@ -520,37 +493,37 @@ pub const Reader = struct {
}
fn contentLengthStream(
io_r: *std.io.Reader,
io_r: *std.Io.Reader,
w: *Writer,
limit: std.io.Limit,
) std.io.Reader.StreamError!usize {
const reader: *Reader = @fieldParentPtr("interface", io_r);
limit: std.Io.Limit,
) std.Io.Reader.StreamError!usize {
const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
const remaining_content_length = &reader.state.body_remaining_content_length;
const remaining = remaining_content_length.*;
if (remaining == 0) {
reader.state = .ready;
return error.EndOfStream;
}
const n = try reader.in.stream(w, limit.min(.limited(remaining)));
const n = try reader.in.stream(w, limit.min(.limited64(remaining)));
remaining_content_length.* = remaining - n;
return n;
}
fn contentLengthDiscard(io_r: *std.io.Reader, limit: std.io.Limit) std.io.Reader.Error!usize {
const reader: *Reader = @fieldParentPtr("interface", io_r);
fn contentLengthDiscard(io_r: *std.Io.Reader, limit: std.Io.Limit) std.Io.Reader.Error!usize {
const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
const remaining_content_length = &reader.state.body_remaining_content_length;
const remaining = remaining_content_length.*;
if (remaining == 0) {
reader.state = .ready;
return error.EndOfStream;
}
const n = try reader.in.discard(limit.min(.limited(remaining)));
const n = try reader.in.discard(limit.min(.limited64(remaining)));
remaining_content_length.* = remaining - n;
return n;
}
fn chunkedStream(io_r: *std.io.Reader, w: *Writer, limit: std.io.Limit) std.io.Reader.StreamError!usize {
const reader: *Reader = @fieldParentPtr("interface", io_r);
fn chunkedStream(io_r: *std.Io.Reader, w: *Writer, limit: std.Io.Limit) std.Io.Reader.StreamError!usize {
const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
const chunk_len_ptr = switch (reader.state) {
.ready => return error.EndOfStream,
.body_remaining_chunk_len => |*x| x,
@ -573,9 +546,9 @@ pub const Reader = struct {
fn chunkedReadEndless(
reader: *Reader,
w: *Writer,
limit: std.io.Limit,
limit: std.Io.Limit,
chunk_len_ptr: *RemainingChunkLen,
) (BodyError || std.io.Reader.StreamError)!usize {
) (BodyError || std.Io.Reader.StreamError)!usize {
const in = reader.in;
len: switch (chunk_len_ptr.*) {
.head => {
@ -596,7 +569,7 @@ pub const Reader = struct {
}
}
if (cp.chunk_len == 0) return parseTrailers(reader, 0);
const n = try in.stream(w, limit.min(.limited(cp.chunk_len)));
const n = try in.stream(w, limit.min(.limited64(cp.chunk_len)));
chunk_len_ptr.* = .init(cp.chunk_len + 2 - n);
return n;
},
@ -612,15 +585,15 @@ pub const Reader = struct {
continue :len .head;
},
else => |remaining_chunk_len| {
const n = try in.stream(w, limit.min(.limited(@intFromEnum(remaining_chunk_len) - 2)));
const n = try in.stream(w, limit.min(.limited64(@intFromEnum(remaining_chunk_len) - 2)));
chunk_len_ptr.* = .init(@intFromEnum(remaining_chunk_len) - n);
return n;
},
}
}
fn chunkedDiscard(io_r: *std.io.Reader, limit: std.io.Limit) std.io.Reader.Error!usize {
const reader: *Reader = @fieldParentPtr("interface", io_r);
fn chunkedDiscard(io_r: *std.Io.Reader, limit: std.Io.Limit) std.Io.Reader.Error!usize {
const reader: *Reader = @alignCast(@fieldParentPtr("interface", io_r));
const chunk_len_ptr = switch (reader.state) {
.ready => return error.EndOfStream,
.body_remaining_chunk_len => |*x| x,
@ -641,9 +614,9 @@ pub const Reader = struct {
fn chunkedDiscardEndless(
reader: *Reader,
limit: std.io.Limit,
limit: std.Io.Limit,
chunk_len_ptr: *RemainingChunkLen,
) (BodyError || std.io.Reader.Error)!usize {
) (BodyError || std.Io.Reader.Error)!usize {
const in = reader.in;
len: switch (chunk_len_ptr.*) {
.head => {
@ -664,7 +637,7 @@ pub const Reader = struct {
}
}
if (cp.chunk_len == 0) return parseTrailers(reader, 0);
const n = try in.discard(limit.min(.limited(cp.chunk_len)));
const n = try in.discard(limit.min(.limited64(cp.chunk_len)));
chunk_len_ptr.* = .init(cp.chunk_len + 2 - n);
return n;
},
@ -680,7 +653,7 @@ pub const Reader = struct {
continue :len .head;
},
else => |remaining_chunk_len| {
const n = try in.discard(limit.min(.limited(remaining_chunk_len.int() - 2)));
const n = try in.discard(limit.min(.limited64(remaining_chunk_len.int() - 2)));
chunk_len_ptr.* = .init(remaining_chunk_len.int() - n);
return n;
},
@ -689,7 +662,7 @@ pub const Reader = struct {
/// Called when next bytes in the stream are trailers, or "\r\n" to indicate
/// end of chunked body.
fn parseTrailers(reader: *Reader, amt_read: usize) (BodyError || std.io.Reader.Error)!usize {
fn parseTrailers(reader: *Reader, amt_read: usize) (BodyError || std.Io.Reader.Error)!usize {
const in = reader.in;
const rn = try in.peekArray(2);
if (rn[0] == '\r' and rn[1] == '\n') {
@ -721,21 +694,21 @@ pub const Reader = struct {
pub const Decompressor = union(enum) {
flate: std.compress.flate.Decompress,
zstd: std.compress.zstd.Decompress,
none: *std.io.Reader,
none: *std.Io.Reader,
pub fn init(
decompressor: *Decompressor,
transfer_reader: *std.io.Reader,
transfer_reader: *std.Io.Reader,
buffer: []u8,
content_encoding: ContentEncoding,
) *std.io.Reader {
) *std.Io.Reader {
switch (content_encoding) {
.identity => {
decompressor.* = .{ .none = transfer_reader };
return transfer_reader;
},
.deflate => {
decompressor.* = .{ .flate = .init(transfer_reader, .raw, buffer) };
decompressor.* = .{ .flate = .init(transfer_reader, .zlib, buffer) };
return &decompressor.flate.reader;
},
.gzip => {
@ -763,7 +736,7 @@ pub const BodyWriter = struct {
/// How many zeroes to reserve for hex-encoded chunk length.
const chunk_len_digits = 8;
const max_chunk_len: usize = std.math.pow(usize, 16, chunk_len_digits) - 1;
const max_chunk_len: usize = std.math.pow(u64, 16, chunk_len_digits) - 1;
const chunk_header_template = ("0" ** chunk_len_digits) ++ "\r\n";
comptime {
@ -795,7 +768,7 @@ pub const BodyWriter = struct {
};
pub fn isEliding(w: *const BodyWriter) bool {
return w.writer.vtable.drain == Writer.discardingDrain;
return w.writer.vtable.drain == elidingDrain;
}
/// Sends all buffered data across `BodyWriter.http_protocol_output`.
@ -923,7 +896,7 @@ pub const BodyWriter = struct {
}
pub fn contentLengthDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
const bw: *BodyWriter = @fieldParentPtr("writer", w);
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding());
const out = bw.http_protocol_output;
const n = try out.writeSplatHeader(w.buffered(), data, splat);
@ -932,24 +905,64 @@ pub const BodyWriter = struct {
}
pub fn noneDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
const bw: *BodyWriter = @fieldParentPtr("writer", w);
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding());
const out = bw.http_protocol_output;
const n = try out.writeSplatHeader(w.buffered(), data, splat);
return w.consume(n);
}
pub fn elidingDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
const slice = data[0 .. data.len - 1];
const pattern = data[slice.len];
var written: usize = pattern.len * splat;
for (slice) |bytes| written += bytes.len;
switch (bw.state) {
.content_length => |*len| len.* -= written + w.end,
else => {},
}
w.end = 0;
return written;
}
pub fn elidingSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
if (File.Handle == void) return error.Unimplemented;
if (builtin.zig_backend == .stage2_aarch64) return error.Unimplemented;
switch (bw.state) {
.content_length => |*len| len.* -= w.end,
else => {},
}
w.end = 0;
if (limit == .nothing) return 0;
if (file_reader.getSize()) |size| {
const n = limit.minInt64(size - file_reader.pos);
if (n == 0) return error.EndOfStream;
file_reader.seekBy(@intCast(n)) catch return error.Unimplemented;
switch (bw.state) {
.content_length => |*len| len.* -= n,
else => {},
}
return n;
} else |_| {
// Error is observable on `file_reader` instance, and it is better to
// treat the file as a pipe.
return error.Unimplemented;
}
}
/// Returns `null` if size cannot be computed without making any syscalls.
pub fn noneSendFile(w: *Writer, file_reader: *File.Reader, limit: std.io.Limit) Writer.FileError!usize {
const bw: *BodyWriter = @fieldParentPtr("writer", w);
pub fn noneSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding());
const out = bw.http_protocol_output;
const n = try out.sendFileHeader(w.buffered(), file_reader, limit);
return w.consume(n);
}
pub fn contentLengthSendFile(w: *Writer, file_reader: *File.Reader, limit: std.io.Limit) Writer.FileError!usize {
const bw: *BodyWriter = @fieldParentPtr("writer", w);
pub fn contentLengthSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding());
const out = bw.http_protocol_output;
const n = try out.sendFileHeader(w.buffered(), file_reader, limit);
@ -957,8 +970,8 @@ pub const BodyWriter = struct {
return w.consume(n);
}
pub fn chunkedSendFile(w: *Writer, file_reader: *File.Reader, limit: std.io.Limit) Writer.FileError!usize {
const bw: *BodyWriter = @fieldParentPtr("writer", w);
pub fn chunkedSendFile(w: *Writer, file_reader: *File.Reader, limit: std.Io.Limit) Writer.FileError!usize {
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding());
const data_len = Writer.countSendFileLowerBound(w.end, file_reader, limit) orelse {
// If the file size is unknown, we cannot lower to a `sendFile` since we would
@ -1006,7 +1019,7 @@ pub const BodyWriter = struct {
}
pub fn chunkedDrain(w: *Writer, data: []const []const u8, splat: usize) Error!usize {
const bw: *BodyWriter = @fieldParentPtr("writer", w);
const bw: *BodyWriter = @alignCast(@fieldParentPtr("writer", w));
assert(!bw.isEliding());
const out = bw.http_protocol_output;
const data_len = w.end + Writer.countSplat(data, splat);

View File

@ -42,7 +42,7 @@ connection_pool: ConnectionPool = .{},
///
/// If the entire HTTP header cannot fit in this amount of bytes,
/// `error.HttpHeadersOversize` will be returned from `Request.wait`.
read_buffer_size: usize = 4096,
read_buffer_size: usize = 4096 + if (disable_tls) 0 else std.crypto.tls.Client.min_buffer_len,
/// Each `Connection` allocates this amount for the writer buffer.
write_buffer_size: usize = 1024,
@ -82,7 +82,7 @@ pub const ConnectionPool = struct {
var next = pool.free.last;
while (next) |node| : (next = node.prev) {
const connection: *Connection = @fieldParentPtr("pool_node", node);
const connection: *Connection = @alignCast(@fieldParentPtr("pool_node", node));
if (connection.protocol != criteria.protocol) continue;
if (connection.port != criteria.port) continue;
@ -115,8 +115,6 @@ pub const ConnectionPool = struct {
/// Tries to release a connection back to the connection pool.
/// If the connection is marked as closing, it will be closed instead.
///
/// `allocator` must be the same one used to create `connection`.
///
/// Threadsafe.
pub fn release(pool: *ConnectionPool, connection: *Connection) void {
pool.mutex.lock();
@ -127,7 +125,7 @@ pub const ConnectionPool = struct {
if (connection.closing or pool.free_size == 0) return connection.destroy();
if (pool.free_len >= pool.free_size) {
const popped: *Connection = @fieldParentPtr("pool_node", pool.free.popFirst().?);
const popped: *Connection = @alignCast(@fieldParentPtr("pool_node", pool.free.popFirst().?));
pool.free_len -= 1;
popped.destroy();
@ -183,14 +181,14 @@ pub const ConnectionPool = struct {
var next = pool.free.first;
while (next) |node| {
const connection: *Connection = @fieldParentPtr("pool_node", node);
const connection: *Connection = @alignCast(@fieldParentPtr("pool_node", node));
next = node.next;
connection.destroy();
}
next = pool.used.first;
while (next) |node| {
const connection: *Connection = @fieldParentPtr("pool_node", node);
const connection: *Connection = @alignCast(@fieldParentPtr("pool_node", node));
next = node.next;
connection.destroy();
}
@ -306,15 +304,16 @@ pub const Connection = struct {
const host_buffer = base[@sizeOf(Tls)..][0..remote_host.len];
const tls_read_buffer = host_buffer.ptr[host_buffer.len..][0..client.tls_buffer_size];
const tls_write_buffer = tls_read_buffer.ptr[tls_read_buffer.len..][0..client.tls_buffer_size];
const socket_write_buffer = tls_write_buffer.ptr[tls_write_buffer.len..][0..client.write_buffer_size];
assert(base.ptr + alloc_len == socket_write_buffer.ptr + socket_write_buffer.len);
const write_buffer = tls_write_buffer.ptr[tls_write_buffer.len..][0..client.write_buffer_size];
const read_buffer = write_buffer.ptr[write_buffer.len..][0..client.read_buffer_size];
assert(base.ptr + alloc_len == read_buffer.ptr + read_buffer.len);
@memcpy(host_buffer, remote_host);
const tls: *Tls = @ptrCast(base);
tls.* = .{
.connection = .{
.client = client,
.stream_writer = stream.writer(socket_write_buffer),
.stream_reader = stream.reader(&.{}),
.stream_writer = stream.writer(tls_write_buffer),
.stream_reader = stream.reader(tls_read_buffer),
.pool_node = .{},
.port = port,
.host_len = @intCast(remote_host.len),
@ -330,8 +329,8 @@ pub const Connection = struct {
.host = .{ .explicit = remote_host },
.ca = .{ .bundle = client.ca_bundle },
.ssl_key_log = client.ssl_key_log,
.read_buffer = tls_read_buffer,
.write_buffer = tls_write_buffer,
.read_buffer = read_buffer,
.write_buffer = write_buffer,
// This is appropriate for HTTPS because the HTTP headers contain
// the content length which is used to detect truncation attacks.
.allow_truncation_attacks = true,
@ -349,7 +348,8 @@ pub const Connection = struct {
}
fn allocLen(client: *Client, host_len: usize) usize {
return @sizeOf(Tls) + host_len + client.tls_buffer_size + client.tls_buffer_size + client.write_buffer_size;
return @sizeOf(Tls) + host_len + client.tls_buffer_size + client.tls_buffer_size +
client.write_buffer_size + client.read_buffer_size;
}
fn host(tls: *Tls) []u8 {
@ -358,6 +358,21 @@ pub const Connection = struct {
}
};
pub const ReadError = std.crypto.tls.Client.ReadError || std.net.Stream.ReadError;
pub fn getReadError(c: *const Connection) ?ReadError {
return switch (c.protocol) {
.tls => {
if (disable_tls) unreachable;
const tls: *const Tls = @alignCast(@fieldParentPtr("connection", c));
return tls.client.read_err orelse c.stream_reader.getError();
},
.plain => {
return c.stream_reader.getError();
},
};
}
fn getStream(c: *Connection) net.Stream {
return c.stream_reader.getStream();
}
@ -366,11 +381,11 @@ pub const Connection = struct {
return switch (c.protocol) {
.tls => {
if (disable_tls) unreachable;
const tls: *Tls = @fieldParentPtr("connection", c);
const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
return tls.host();
},
.plain => {
const plain: *Plain = @fieldParentPtr("connection", c);
const plain: *Plain = @alignCast(@fieldParentPtr("connection", c));
return plain.host();
},
};
@ -383,11 +398,11 @@ pub const Connection = struct {
switch (c.protocol) {
.tls => {
if (disable_tls) unreachable;
const tls: *Tls = @fieldParentPtr("connection", c);
const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
tls.destroy();
},
.plain => {
const plain: *Plain = @fieldParentPtr("connection", c);
const plain: *Plain = @alignCast(@fieldParentPtr("connection", c));
plain.destroy();
},
}
@ -399,7 +414,7 @@ pub const Connection = struct {
return switch (c.protocol) {
.tls => {
if (disable_tls) unreachable;
const tls: *Tls = @fieldParentPtr("connection", c);
const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
return &tls.client.writer;
},
.plain => &c.stream_writer.interface,
@ -412,7 +427,7 @@ pub const Connection = struct {
return switch (c.protocol) {
.tls => {
if (disable_tls) unreachable;
const tls: *Tls = @fieldParentPtr("connection", c);
const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
return &tls.client.reader;
},
.plain => c.stream_reader.interface(),
@ -422,7 +437,7 @@ pub const Connection = struct {
pub fn flush(c: *Connection) Writer.Error!void {
if (c.protocol == .tls) {
if (disable_tls) unreachable;
const tls: *Tls = @fieldParentPtr("connection", c);
const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
try tls.client.writer.flush();
}
try c.stream_writer.interface.flush();
@ -434,9 +449,8 @@ pub const Connection = struct {
pub fn end(c: *Connection) Writer.Error!void {
if (c.protocol == .tls) {
if (disable_tls) unreachable;
const tls: *Tls = @fieldParentPtr("connection", c);
const tls: *Tls = @alignCast(@fieldParentPtr("connection", c));
try tls.client.end();
try tls.client.writer.flush();
}
try c.stream_writer.interface.flush();
}
@ -444,8 +458,8 @@ pub const Connection = struct {
pub const Response = struct {
request: *Request,
/// Pointers in this struct are invalidated with the next call to
/// `receiveHead`.
/// Pointers in this struct are invalidated when the response body stream
/// is initialized.
head: Head,
pub const Head = struct {
@ -484,10 +498,8 @@ pub const Response = struct {
};
var it = mem.splitSequence(u8, bytes, "\r\n");
const first_line = it.next().?;
if (first_line.len < 12) {
return error.HttpHeadersInvalid;
}
const first_line = it.first();
if (first_line.len < 12) return error.HttpHeadersInvalid;
const version: http.Version = switch (int64(first_line[0..8])) {
int64("HTTP/1.0") => .@"HTTP/1.0",
@ -671,6 +683,16 @@ pub const Response = struct {
try expectEqual(@as(u10, 418), parseInt3("418"));
try expectEqual(@as(u10, 999), parseInt3("999"));
}
/// Help the programmer avoid bugs by calling this when the string
/// memory of `Head` becomes invalidated.
fn invalidateStrings(h: *Head) void {
h.bytes = undefined;
h.reason = undefined;
if (h.location) |*s| s.* = undefined;
if (h.content_type) |*s| s.* = undefined;
if (h.content_disposition) |*s| s.* = undefined;
}
};
/// If compressed body has been negotiated this will return compressed bytes.
@ -683,6 +705,7 @@ pub const Response = struct {
/// See also:
/// * `readerDecompressing`
pub fn reader(response: *Response, buffer: []u8) *Reader {
response.head.invalidateStrings();
const req = response.request;
if (!req.method.responseHasBody()) return .ending;
const head = &response.head;
@ -703,6 +726,7 @@ pub const Response = struct {
decompressor: *http.Decompressor,
decompression_buffer: []u8,
) *Reader {
response.head.invalidateStrings();
const head = &response.head;
return response.request.reader.bodyReaderDecompressing(
head.transfer_encoding,
@ -805,6 +829,11 @@ pub const Request = struct {
unhandled = std.math.maxInt(u16),
_,
pub fn init(n: u16) RedirectBehavior {
assert(n != std.math.maxInt(u16));
return @enumFromInt(n);
}
pub fn subtractOne(rb: *RedirectBehavior) void {
switch (rb.*) {
.not_allowed => unreachable,
@ -821,7 +850,6 @@ pub const Request = struct {
/// Returns the request's `Connection` back to the pool of the `Client`.
pub fn deinit(r: *Request) void {
r.reader.restituteHeadBuffer();
if (r.connection) |connection| {
connection.closing = connection.closing or switch (r.reader.state) {
.ready => false,
@ -856,6 +884,15 @@ pub const Request = struct {
return result;
}
/// Transfers the HTTP head and body over the connection and flushes.
pub fn sendBodyComplete(r: *Request, body: []u8) Writer.Error!void {
r.transfer_encoding = .{ .content_length = body.len };
var bw = try sendBodyUnflushed(r, body);
bw.writer.end = body.len;
try bw.end();
try r.connection.?.flush();
}
/// Transfers the HTTP head over the connection, which is not flushed until
/// `BodyWriter.flush` or `BodyWriter.end` is called.
///
@ -908,13 +945,13 @@ pub const Request = struct {
const connection = r.connection.?;
const w = connection.writer();
try r.method.write(w);
try w.writeAll(@tagName(r.method));
try w.writeByte(' ');
if (r.method == .CONNECT) {
try uri.writeToStream(.{ .authority = true }, w);
try uri.writeToStream(w, .{ .authority = true });
} else {
try uri.writeToStream(.{
try uri.writeToStream(w, .{
.scheme = connection.proxied,
.authentication = connection.proxied,
.authority = connection.proxied,
@ -928,7 +965,7 @@ pub const Request = struct {
if (try emitOverridableHeader("host: ", r.headers.host, w)) {
try w.writeAll("host: ");
try uri.writeToStream(.{ .authority = true }, w);
try uri.writeToStream(w, .{ .authority = true });
try w.writeAll("\r\n");
}
@ -1043,13 +1080,16 @@ pub const Request = struct {
/// buffer capacity would be exceeded, `error.HttpRedirectLocationOversize`
/// is returned instead. This buffer may be empty if no redirects are to be
/// handled.
///
/// If this fails with `error.ReadFailed` then the `Connection.getReadError`
/// method of `r.connection` can be used to get more detailed information.
pub fn receiveHead(r: *Request, redirect_buffer: []u8) ReceiveHeadError!Response {
var aux_buf = redirect_buffer;
while (true) {
try r.reader.receiveHead();
const head_buffer = try r.reader.receiveHead();
const response: Response = .{
.request = r,
.head = Response.Head.parse(r.reader.head_buffer) catch return error.HttpHeadersInvalid,
.head = Response.Head.parse(head_buffer) catch return error.HttpHeadersInvalid,
};
const head = &response.head;
@ -1121,7 +1161,6 @@ pub const Request = struct {
_ = reader.discardRemaining() catch |err| switch (err) {
error.ReadFailed => return r.reader.body_err.?,
};
r.reader.restituteHeadBuffer();
}
const new_uri = r.uri.resolveInPlace(location.len, aux_buf) catch |err| switch (err) {
error.UnexpectedCharacter => return error.HttpRedirectLocationInvalid,
@ -1298,16 +1337,17 @@ pub const basic_authorization = struct {
pub fn value(uri: Uri, out: []u8) []u8 {
var bw: Writer = .fixed(out);
write(uri, &bw) catch unreachable;
return bw.getWritten();
return bw.buffered();
}
pub fn write(uri: Uri, out: *Writer) Writer.Error!void {
var buf: [max_user_len + ":".len + max_password_len]u8 = undefined;
var buf: [max_user_len + 1 + max_password_len]u8 = undefined;
var w: Writer = .fixed(&buf);
w.print("{fuser}:{fpassword}", .{
uri.user orelse Uri.Component.empty,
uri.password orelse Uri.Component.empty,
}) catch unreachable;
const user: Uri.Component = uri.user orelse .empty;
const password: Uri.Component = uri.user orelse .empty;
user.formatUser(&w) catch unreachable;
w.writeByte(':') catch unreachable;
password.formatPassword(&w) catch unreachable;
try out.print("Basic {b64}", .{w.buffered()});
}
};
@ -1697,6 +1737,7 @@ pub const FetchError = Uri.ParseError || RequestError || Request.ReceiveHeadErro
StreamTooLong,
/// TODO provide optional diagnostics when this occurs or break into more error codes
WriteFailed,
UnsupportedCompressionMethod,
};
/// Perform a one-shot HTTP request with the provided options.
@ -1748,7 +1789,8 @@ pub fn fetch(client: *Client, options: FetchOptions) FetchError!FetchResult {
const decompress_buffer: []u8 = switch (response.head.content_encoding) {
.identity => &.{},
.zstd => options.decompress_buffer orelse try client.allocator.alloc(u8, std.compress.zstd.default_window_len),
else => options.decompress_buffer orelse try client.allocator.alloc(u8, 8 * 1024),
.deflate, .gzip => options.decompress_buffer orelse try client.allocator.alloc(u8, std.compress.flate.max_window_len),
.compress => return error.UnsupportedCompressionMethod,
};
defer if (options.decompress_buffer == null) client.allocator.free(decompress_buffer);

View File

@ -6,7 +6,8 @@ const mem = std.mem;
const Uri = std.Uri;
const assert = std.debug.assert;
const testing = std.testing;
const Writer = std.io.Writer;
const Writer = std.Io.Writer;
const Reader = std.Io.Reader;
const Server = @This();
@ -21,7 +22,7 @@ reader: http.Reader,
/// header, otherwise `receiveHead` returns `error.HttpHeadersOversize`.
///
/// The returned `Server` is ready for `receiveHead` to be called.
pub fn init(in: *std.io.Reader, out: *Writer) Server {
pub fn init(in: *Reader, out: *Writer) Server {
return .{
.reader = .{
.in = in,
@ -33,33 +34,31 @@ pub fn init(in: *std.io.Reader, out: *Writer) Server {
};
}
pub fn deinit(s: *Server) void {
s.reader.restituteHeadBuffer();
}
pub const ReceiveHeadError = http.Reader.HeadError || error{
/// Client sent headers that did not conform to the HTTP protocol.
///
/// To find out more detailed diagnostics, `http.Reader.head_buffer` can be
/// To find out more detailed diagnostics, `Request.head_buffer` can be
/// passed directly to `Request.Head.parse`.
HttpHeadersInvalid,
};
pub fn receiveHead(s: *Server) ReceiveHeadError!Request {
try s.reader.receiveHead();
const head_buffer = try s.reader.receiveHead();
return .{
.server = s,
.head_buffer = head_buffer,
// No need to track the returned error here since users can repeat the
// parse with the header buffer to get detailed diagnostics.
.head = Request.Head.parse(s.reader.head_buffer) catch return error.HttpHeadersInvalid,
.head = Request.Head.parse(head_buffer) catch return error.HttpHeadersInvalid,
};
}
pub const Request = struct {
server: *Server,
/// Pointers in this struct are invalidated with the next call to
/// `receiveHead`.
/// Pointers in this struct are invalidated when the request body stream is
/// initialized.
head: Head,
head_buffer: []const u8,
respond_err: ?RespondError = null,
pub const RespondError = error{
@ -98,10 +97,9 @@ pub const Request = struct {
const method_end = mem.indexOfScalar(u8, first_line, ' ') orelse
return error.HttpHeadersInvalid;
if (method_end > 24) return error.HttpHeadersInvalid;
const method_str = first_line[0..method_end];
const method: http.Method = @enumFromInt(http.Method.parse(method_str));
const method = std.meta.stringToEnum(http.Method, first_line[0..method_end]) orelse
return error.UnknownHttpMethod;
const version_start = mem.lastIndexOfScalar(u8, first_line, ' ') orelse
return error.HttpHeadersInvalid;
@ -225,11 +223,19 @@ pub const Request = struct {
inline fn int64(array: *const [8]u8) u64 {
return @bitCast(array.*);
}
/// Help the programmer avoid bugs by calling this when the string
/// memory of `Head` becomes invalidated.
fn invalidateStrings(h: *Head) void {
h.target = undefined;
if (h.expect) |*s| s.* = undefined;
if (h.content_type) |*s| s.* = undefined;
}
};
pub fn iterateHeaders(r: *Request) http.HeaderIterator {
pub fn iterateHeaders(r: *const Request) http.HeaderIterator {
assert(r.server.reader.state == .received_head);
return http.HeaderIterator.init(r.server.reader.head_buffer);
return http.HeaderIterator.init(r.head_buffer);
}
test iterateHeaders {
@ -244,7 +250,6 @@ pub const Request = struct {
.reader = .{
.in = undefined,
.state = .received_head,
.head_buffer = @constCast(request_bytes),
.interface = undefined,
},
.out = undefined,
@ -253,6 +258,7 @@ pub const Request = struct {
var request: Request = .{
.server = &server,
.head = undefined,
.head_buffer = @constCast(request_bytes),
};
var it = request.iterateHeaders();
@ -435,10 +441,8 @@ pub const Request = struct {
for (o.extra_headers) |header| {
assert(header.name.len != 0);
try out.writeAll(header.name);
try out.writeAll(": ");
try out.writeAll(header.value);
try out.writeAll("\r\n");
var bufs: [4][]const u8 = .{ header.name, ": ", header.value, "\r\n" };
try out.writeVecAll(&bufs);
}
try out.writeAll("\r\n");
@ -453,7 +457,13 @@ pub const Request = struct {
return if (elide_body) .{
.http_protocol_output = request.server.out,
.state = state,
.writer = .discarding(buffer),
.writer = .{
.buffer = buffer,
.vtable = &.{
.drain = http.BodyWriter.elidingDrain,
.sendFile = http.BodyWriter.elidingSendFile,
},
},
} else .{
.http_protocol_output = request.server.out,
.state = state,
@ -484,10 +494,11 @@ pub const Request = struct {
none,
};
/// Does not invalidate `request.head`.
pub fn upgradeRequested(request: *const Request) UpgradeRequest {
switch (request.head.version) {
.@"HTTP/1.0" => return null,
.@"HTTP/1.1" => if (request.head.method != .GET) return null,
.@"HTTP/1.0" => return .none,
.@"HTTP/1.1" => if (request.head.method != .GET) return .none,
}
var sec_websocket_key: ?[]const u8 = null;
@ -515,7 +526,7 @@ pub const Request = struct {
/// The header is not guaranteed to be sent until `WebSocket.flush` is
/// called on the returned struct.
pub fn respondWebSocket(request: *Request, options: WebSocketOptions) Writer.Error!WebSocket {
pub fn respondWebSocket(request: *Request, options: WebSocketOptions) ExpectContinueError!WebSocket {
if (request.head.expect != null) return error.HttpExpectationFailed;
const out = request.server.out;
@ -534,16 +545,14 @@ pub const Request = struct {
try out.print("{s} {d} {s}\r\n", .{ @tagName(version), @intFromEnum(status), phrase });
try out.writeAll("connection: upgrade\r\nupgrade: websocket\r\nsec-websocket-accept: ");
const base64_digest = try out.writableArray(28);
assert(std.base64.standard.Encoder.encode(&base64_digest, &digest).len == base64_digest.len);
assert(std.base64.standard.Encoder.encode(base64_digest, &digest).len == base64_digest.len);
out.advance(base64_digest.len);
try out.writeAll("\r\n");
for (options.extra_headers) |header| {
assert(header.name.len != 0);
try out.writeAll(header.name);
try out.writeAll(": ");
try out.writeAll(header.value);
try out.writeAll("\r\n");
var bufs: [4][]const u8 = .{ header.name, ": ", header.value, "\r\n" };
try out.writeVecAll(&bufs);
}
try out.writeAll("\r\n");
@ -564,7 +573,7 @@ pub const Request = struct {
///
/// See `readerExpectNone` for an infallible alternative that cannot write
/// to the server output stream.
pub fn readerExpectContinue(request: *Request, buffer: []u8) ExpectContinueError!*std.io.Reader {
pub fn readerExpectContinue(request: *Request, buffer: []u8) ExpectContinueError!*Reader {
const flush = request.head.expect != null;
try writeExpectContinue(request);
if (flush) try request.server.out.flush();
@ -576,9 +585,12 @@ pub const Request = struct {
/// this function.
///
/// Asserts that this function is only called once.
pub fn readerExpectNone(request: *Request, buffer: []u8) *std.io.Reader {
///
/// Invalidates the string memory inside `Head`.
pub fn readerExpectNone(request: *Request, buffer: []u8) *Reader {
assert(request.server.reader.state == .received_head);
assert(request.head.expect == null);
request.head.invalidateStrings();
if (!request.head.method.requestHasBody()) return .ending;
return request.server.reader.bodyReader(buffer, request.head.transfer_encoding, request.head.content_length);
}
@ -640,7 +652,7 @@ pub const Request = struct {
/// See https://tools.ietf.org/html/rfc6455
pub const WebSocket = struct {
key: []const u8,
input: *std.io.Reader,
input: *Reader,
output: *Writer,
pub const Header0 = packed struct(u8) {
@ -677,6 +689,8 @@ pub const WebSocket = struct {
UnexpectedOpCode,
MessageTooBig,
MissingMaskBit,
ReadFailed,
EndOfStream,
};
pub const SmallMessage = struct {
@ -691,8 +705,9 @@ pub const WebSocket = struct {
pub fn readSmallMessage(ws: *WebSocket) ReadSmallTextMessageError!SmallMessage {
const in = ws.input;
while (true) {
const h0 = in.takeStruct(Header0);
const h1 = in.takeStruct(Header1);
const header = try in.takeArray(2);
const h0: Header0 = @bitCast(header[0]);
const h1: Header1 = @bitCast(header[1]);
switch (h0.opcode) {
.text, .binary, .pong, .ping => {},
@ -732,47 +747,49 @@ pub const WebSocket = struct {
}
pub fn writeMessage(ws: *WebSocket, data: []const u8, op: Opcode) Writer.Error!void {
try writeMessageVecUnflushed(ws, &.{data}, op);
var bufs: [1][]const u8 = .{data};
try writeMessageVecUnflushed(ws, &bufs, op);
try ws.output.flush();
}
pub fn writeMessageUnflushed(ws: *WebSocket, data: []const u8, op: Opcode) Writer.Error!void {
try writeMessageVecUnflushed(ws, &.{data}, op);
var bufs: [1][]const u8 = .{data};
try writeMessageVecUnflushed(ws, &bufs, op);
}
pub fn writeMessageVec(ws: *WebSocket, data: []const []const u8, op: Opcode) Writer.Error!void {
pub fn writeMessageVec(ws: *WebSocket, data: [][]const u8, op: Opcode) Writer.Error!void {
try writeMessageVecUnflushed(ws, data, op);
try ws.output.flush();
}
pub fn writeMessageVecUnflushed(ws: *WebSocket, data: []const []const u8, op: Opcode) Writer.Error!void {
pub fn writeMessageVecUnflushed(ws: *WebSocket, data: [][]const u8, op: Opcode) Writer.Error!void {
const total_len = l: {
var total_len: u64 = 0;
for (data) |iovec| total_len += iovec.len;
break :l total_len;
};
const out = ws.output;
try out.writeStruct(@as(Header0, .{
try out.writeByte(@bitCast(@as(Header0, .{
.opcode = op,
.fin = true,
}));
})));
switch (total_len) {
0...125 => try out.writeStruct(@as(Header1, .{
0...125 => try out.writeByte(@bitCast(@as(Header1, .{
.payload_len = @enumFromInt(total_len),
.mask = false,
})),
}))),
126...0xffff => {
try out.writeStruct(@as(Header1, .{
try out.writeByte(@bitCast(@as(Header1, .{
.payload_len = .len16,
.mask = false,
}));
})));
try out.writeInt(u16, @intCast(total_len), .big);
},
else => {
try out.writeStruct(@as(Header1, .{
try out.writeByte(@bitCast(@as(Header1, .{
.payload_len = .len64,
.mask = false,
}));
})));
try out.writeInt(u64, total_len, .big);
},
}

View File

@ -65,23 +65,22 @@ test "trailers" {
try req.sendBodiless();
var response = try req.receiveHead(&.{});
const body = try response.reader(&.{}).allocRemaining(gpa, .limited(8192));
{
var it = response.head.iterateHeaders();
const header = it.next().?;
try expectEqualStrings("transfer-encoding", header.name);
try expectEqualStrings("chunked", header.value);
try expectEqual(null, it.next());
}
const body = try response.reader(&.{}).allocRemaining(gpa, .unlimited);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
{
var it = response.head.iterateHeaders();
const header = it.next().?;
try expect(!it.is_trailer);
try expectEqualStrings("transfer-encoding", header.name);
try expectEqualStrings("chunked", header.value);
try expectEqual(null, it.next());
}
{
var it = response.iterateTrailers();
const header = it.next().?;
try expect(it.is_trailer);
try expectEqualStrings("X-Checksum", header.name);
try expectEqualStrings("aaaa", header.value);
try expectEqual(null, it.next());
@ -183,7 +182,11 @@ test "echo content server" {
if (request.head.expect) |expect_header_value| {
if (mem.eql(u8, expect_header_value, "garbage")) {
try expectError(error.HttpExpectationFailed, request.readerExpectContinue(&.{}));
try request.respond("", .{ .keep_alive = false });
request.head.expect = null;
try request.respond("", .{
.keep_alive = false,
.status = .expectation_failed,
});
continue;
}
}
@ -204,12 +207,14 @@ test "echo content server" {
// request.head.target,
//});
const body = try (try request.readerExpectContinue(&.{})).allocRemaining(std.testing.allocator, .limited(8192));
try expect(mem.startsWith(u8, request.head.target, "/echo-content"));
try expectEqualStrings("text/plain", request.head.content_type.?);
// head strings expire here
const body = try (try request.readerExpectContinue(&.{})).allocRemaining(std.testing.allocator, .unlimited);
defer std.testing.allocator.free(body);
try expect(mem.startsWith(u8, request.head.target, "/echo-content"));
try expectEqualStrings("Hello, World!\n", body);
try expectEqualStrings("text/plain", request.head.content_type.?);
var response = try request.respondStreaming(&.{}, .{
.content_length = switch (request.head.transfer_encoding) {
@ -273,7 +278,6 @@ test "Server.Request.respondStreaming non-chunked, unknown content-length" {
for (0..500) |i| {
try w.print("{d}, ah ha ha!\n", .{i});
}
try expectEqual(7390, w.count);
try w.flush();
try response.end();
try expectEqual(.closing, server.reader.state);
@ -291,7 +295,7 @@ test "Server.Request.respondStreaming non-chunked, unknown content-length" {
var tiny_buffer: [1]u8 = undefined; // allows allocRemaining to detect limit exceeded
var stream_reader = stream.reader(&tiny_buffer);
const response = try stream_reader.interface().allocRemaining(gpa, .limited(8192));
const response = try stream_reader.interface().allocRemaining(gpa, .unlimited);
defer gpa.free(response);
var expected_response = std.ArrayList(u8).init(gpa);
@ -362,7 +366,7 @@ test "receiving arbitrary http headers from the client" {
var tiny_buffer: [1]u8 = undefined; // allows allocRemaining to detect limit exceeded
var stream_reader = stream.reader(&tiny_buffer);
const response = try stream_reader.interface().allocRemaining(gpa, .limited(8192));
const response = try stream_reader.interface().allocRemaining(gpa, .unlimited);
defer gpa.free(response);
var expected_response = std.ArrayList(u8).init(gpa);
@ -407,18 +411,19 @@ test "general client/server API coverage" {
fn handleRequest(request: *http.Server.Request, listen_port: u16) !void {
const log = std.log.scoped(.server);
log.info("{f} {s} {s}", .{
request.head.method, @tagName(request.head.version), request.head.target,
});
const gpa = std.testing.allocator;
const body = try (try request.readerExpectContinue(&.{})).allocRemaining(gpa, .limited(8192));
log.info("{t} {t} {s}", .{ request.head.method, request.head.version, request.head.target });
const target = try gpa.dupe(u8, request.head.target);
defer gpa.free(target);
const reader = (try request.readerExpectContinue(&.{}));
const body = try reader.allocRemaining(gpa, .unlimited);
defer gpa.free(body);
if (mem.startsWith(u8, request.head.target, "/get")) {
if (mem.startsWith(u8, target, "/get")) {
var response = try request.respondStreaming(&.{}, .{
.content_length = if (mem.indexOf(u8, request.head.target, "?chunked") == null)
.content_length = if (mem.indexOf(u8, target, "?chunked") == null)
14
else
null,
@ -433,7 +438,7 @@ test "general client/server API coverage" {
try w.writeAll("World!\n");
try response.end();
// Writing again would cause an assertion failure.
} else if (mem.startsWith(u8, request.head.target, "/large")) {
} else if (mem.startsWith(u8, target, "/large")) {
var response = try request.respondStreaming(&.{}, .{
.content_length = 14 * 1024 + 14 * 10,
});
@ -447,7 +452,8 @@ test "general client/server API coverage" {
try w.writeAll("Hello, World!\n");
}
try w.writeAll("Hello, World!\n" ** 1024);
var vec: [1][]const u8 = .{"Hello, World!\n"};
try w.writeSplatAll(&vec, 1024);
i = 0;
while (i < 5) : (i += 1) {
@ -455,7 +461,7 @@ test "general client/server API coverage" {
}
try response.end();
} else if (mem.eql(u8, request.head.target, "/redirect/1")) {
} else if (mem.eql(u8, target, "/redirect/1")) {
var response = try request.respondStreaming(&.{}, .{
.respond_options = .{
.status = .found,
@ -469,14 +475,14 @@ test "general client/server API coverage" {
try w.writeAll("Hello, ");
try w.writeAll("Redirected!\n");
try response.end();
} else if (mem.eql(u8, request.head.target, "/redirect/2")) {
} else if (mem.eql(u8, target, "/redirect/2")) {
try request.respond("Hello, Redirected!\n", .{
.status = .found,
.extra_headers = &.{
.{ .name = "location", .value = "/redirect/1" },
},
});
} else if (mem.eql(u8, request.head.target, "/redirect/3")) {
} else if (mem.eql(u8, target, "/redirect/3")) {
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}/redirect/2", .{
listen_port,
});
@ -488,23 +494,23 @@ test "general client/server API coverage" {
.{ .name = "location", .value = location },
},
});
} else if (mem.eql(u8, request.head.target, "/redirect/4")) {
} else if (mem.eql(u8, target, "/redirect/4")) {
try request.respond("Hello, Redirected!\n", .{
.status = .found,
.extra_headers = &.{
.{ .name = "location", .value = "/redirect/3" },
},
});
} else if (mem.eql(u8, request.head.target, "/redirect/5")) {
} else if (mem.eql(u8, target, "/redirect/5")) {
try request.respond("Hello, Redirected!\n", .{
.status = .found,
.extra_headers = &.{
.{ .name = "location", .value = "/%2525" },
},
});
} else if (mem.eql(u8, request.head.target, "/%2525")) {
} else if (mem.eql(u8, target, "/%2525")) {
try request.respond("Encoded redirect successful!\n", .{});
} else if (mem.eql(u8, request.head.target, "/redirect/invalid")) {
} else if (mem.eql(u8, target, "/redirect/invalid")) {
const invalid_port = try getUnusedTcpPort();
const location = try std.fmt.allocPrint(gpa, "http://127.0.0.1:{d}", .{invalid_port});
defer gpa.free(location);
@ -515,7 +521,7 @@ test "general client/server API coverage" {
.{ .name = "location", .value = location },
},
});
} else if (mem.eql(u8, request.head.target, "/empty")) {
} else if (mem.eql(u8, target, "/empty")) {
try request.respond("", .{
.extra_headers = &.{
.{ .name = "empty", .value = "" },
@ -556,11 +562,12 @@ test "general client/server API coverage" {
try req.sendBodiless();
var response = try req.receiveHead(&redirect_buffer);
const body = try response.reader(&.{}).allocRemaining(gpa, .limited(8192));
try expectEqualStrings("text/plain", response.head.content_type.?);
const body = try response.reader(&.{}).allocRemaining(gpa, .unlimited);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
try expectEqualStrings("text/plain", response.head.content_type.?);
}
// connection has been kept alive
@ -579,7 +586,7 @@ test "general client/server API coverage" {
try req.sendBodiless();
var response = try req.receiveHead(&redirect_buffer);
const body = try response.reader(&.{}).allocRemaining(gpa, .limited(8192 * 1024));
const body = try response.reader(&.{}).allocRemaining(gpa, .unlimited);
defer gpa.free(body);
try expectEqual(@as(usize, 14 * 1024 + 14 * 10), body.len);
@ -601,12 +608,13 @@ test "general client/server API coverage" {
try req.sendBodiless();
var response = try req.receiveHead(&redirect_buffer);
const body = try response.reader(&.{}).allocRemaining(gpa, .limited(8192));
try expectEqualStrings("text/plain", response.head.content_type.?);
try expectEqual(14, response.head.content_length.?);
const body = try response.reader(&.{}).allocRemaining(gpa, .unlimited);
defer gpa.free(body);
try expectEqualStrings("", body);
try expectEqualStrings("text/plain", response.head.content_type.?);
try expectEqual(14, response.head.content_length.?);
}
// connection has been kept alive
@ -625,11 +633,12 @@ test "general client/server API coverage" {
try req.sendBodiless();
var response = try req.receiveHead(&redirect_buffer);
const body = try response.reader(&.{}).allocRemaining(gpa, .limited(8192));
try expectEqualStrings("text/plain", response.head.content_type.?);
const body = try response.reader(&.{}).allocRemaining(gpa, .unlimited);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
try expectEqualStrings("text/plain", response.head.content_type.?);
}
// connection has been kept alive
@ -648,12 +657,13 @@ test "general client/server API coverage" {
try req.sendBodiless();
var response = try req.receiveHead(&redirect_buffer);
const body = try response.reader(&.{}).allocRemaining(gpa, .limited(8192));
try expectEqualStrings("text/plain", response.head.content_type.?);
try expect(response.head.transfer_encoding == .chunked);
const body = try response.reader(&.{}).allocRemaining(gpa, .unlimited);
defer gpa.free(body);
try expectEqualStrings("", body);
try expectEqualStrings("text/plain", response.head.content_type.?);
try expect(response.head.transfer_encoding == .chunked);
}
// connection has been kept alive
@ -674,11 +684,12 @@ test "general client/server API coverage" {
try req.sendBodiless();
var response = try req.receiveHead(&redirect_buffer);
const body = try response.reader(&.{}).allocRemaining(gpa, .limited(8192));
try expectEqualStrings("text/plain", response.head.content_type.?);
const body = try response.reader(&.{}).allocRemaining(gpa, .unlimited);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
try expectEqualStrings("text/plain", response.head.content_type.?);
}
// connection has been closed
@ -703,11 +714,6 @@ test "general client/server API coverage" {
try std.testing.expectEqual(.ok, response.head.status);
const body = try response.reader(&.{}).allocRemaining(gpa, .limited(8192));
defer gpa.free(body);
try expectEqualStrings("", body);
var it = response.head.iterateHeaders();
{
const header = it.next().?;
@ -715,6 +721,12 @@ test "general client/server API coverage" {
try expectEqualStrings("content-length", header.name);
try expectEqualStrings("0", header.value);
}
const body = try response.reader(&.{}).allocRemaining(gpa, .unlimited);
defer gpa.free(body);
try expectEqualStrings("", body);
{
const header = it.next().?;
try expect(!it.is_trailer);
@ -740,7 +752,7 @@ test "general client/server API coverage" {
try req.sendBodiless();
var response = try req.receiveHead(&redirect_buffer);
const body = try response.reader(&.{}).allocRemaining(gpa, .limited(8192));
const body = try response.reader(&.{}).allocRemaining(gpa, .unlimited);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
@ -762,7 +774,7 @@ test "general client/server API coverage" {
try req.sendBodiless();
var response = try req.receiveHead(&redirect_buffer);
const body = try response.reader(&.{}).allocRemaining(gpa, .limited(8192));
const body = try response.reader(&.{}).allocRemaining(gpa, .unlimited);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
@ -784,7 +796,7 @@ test "general client/server API coverage" {
try req.sendBodiless();
var response = try req.receiveHead(&redirect_buffer);
const body = try response.reader(&.{}).allocRemaining(gpa, .limited(8192));
const body = try response.reader(&.{}).allocRemaining(gpa, .unlimited);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
@ -825,7 +837,7 @@ test "general client/server API coverage" {
try req.sendBodiless();
var response = try req.receiveHead(&redirect_buffer);
const body = try response.reader(&.{}).allocRemaining(gpa, .limited(8192));
const body = try response.reader(&.{}).allocRemaining(gpa, .unlimited);
defer gpa.free(body);
try expectEqualStrings("Encoded redirect successful!\n", body);
@ -915,7 +927,7 @@ test "Server streams both reading and writing" {
try body_writer.writer.writeAll("fish");
try body_writer.end();
const body = try response.reader(&.{}).allocRemaining(std.testing.allocator, .limited(8192));
const body = try response.reader(&.{}).allocRemaining(std.testing.allocator, .unlimited);
defer std.testing.allocator.free(body);
try expectEqualStrings("ONE FISH", body);
@ -947,7 +959,7 @@ fn echoTests(client: *http.Client, port: u16) !void {
var response = try req.receiveHead(&redirect_buffer);
const body = try response.reader(&.{}).allocRemaining(gpa, .limited(8192));
const body = try response.reader(&.{}).allocRemaining(gpa, .unlimited);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
@ -980,7 +992,7 @@ fn echoTests(client: *http.Client, port: u16) !void {
var response = try req.receiveHead(&redirect_buffer);
const body = try response.reader(&.{}).allocRemaining(gpa, .limited(8192));
const body = try response.reader(&.{}).allocRemaining(gpa, .unlimited);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
@ -1034,7 +1046,7 @@ fn echoTests(client: *http.Client, port: u16) !void {
var response = try req.receiveHead(&redirect_buffer);
try expectEqual(.ok, response.head.status);
const body = try response.reader(&.{}).allocRemaining(gpa, .limited(8192));
const body = try response.reader(&.{}).allocRemaining(gpa, .unlimited);
defer gpa.free(body);
try expectEqualStrings("Hello, World!\n", body);
@ -1175,7 +1187,7 @@ test "redirect to different connection" {
var response = try req.receiveHead(&redirect_buffer);
var reader = response.reader(&.{});
const body = try reader.allocRemaining(gpa, .limited(8192));
const body = try reader.allocRemaining(gpa, .unlimited);
defer gpa.free(body);
try expectEqualStrings("good job, you pass", body);

View File

@ -1710,7 +1710,7 @@ pub const Mutable = struct {
if (xy_trailing != 0 and r.limbs[r.len - 1] != 0) {
// Manually shift here since we know its limb aligned.
mem.copyBackwards(Limb, r.limbs[xy_trailing..], r.limbs[0..r.len]);
@memmove(r.limbs[xy_trailing..][0..r.len], r.limbs[0..r.len]);
@memset(r.limbs[0..xy_trailing], 0);
r.len += xy_trailing;
}
@ -3836,8 +3836,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) usize {
std.debug.assert(@intFromPtr(r.ptr) >= @intFromPtr(a.ptr));
if (shift == 0) {
if (a.ptr != r.ptr)
std.mem.copyBackwards(Limb, r[0..a.len], a);
if (a.ptr != r.ptr) @memmove(r[0..a.len], a);
return a.len;
}
if (shift >= limb_bits) {
@ -3891,8 +3890,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) usize {
if (shift == 0) {
std.debug.assert(r.len >= a.len);
if (a.ptr != r.ptr)
std.mem.copyForwards(Limb, r[0..a.len], a);
if (a.ptr != r.ptr) @memmove(r[0..a.len], a);
return a.len;
}
if (shift >= limb_bits) {

View File

@ -939,7 +939,7 @@ fn CreateUniqueTuple(comptime N: comptime_int, comptime types: [N]type) type {
.type = T,
.default_value_ptr = null,
.is_comptime = false,
.alignment = 0,
.alignment = @alignOf(T),
};
}

View File

@ -1944,7 +1944,7 @@ pub const Stream = struct {
pub const Error = ReadError;
pub fn getStream(r: *const Reader) Stream {
return r.stream;
return r.net_stream;
}
pub fn getError(r: *const Reader) ?Error {

View File

@ -1014,6 +1014,44 @@ pub fn munmap(address: [*]const u8, length: usize) usize {
return syscall2(.munmap, @intFromPtr(address), length);
}
pub fn mlock(address: [*]const u8, length: usize) usize {
return syscall2(.mlock, @intFromPtr(address), length);
}
pub fn munlock(address: [*]const u8, length: usize) usize {
return syscall2(.munlock, @intFromPtr(address), length);
}
pub const MLOCK = packed struct(u32) {
ONFAULT: bool = false,
_1: u31 = 0,
};
pub fn mlock2(address: [*]const u8, length: usize, flags: MLOCK) usize {
return syscall3(.mlock2, @intFromPtr(address), length, @as(u32, @bitCast(flags)));
}
pub const MCL = if (native_arch.isSPARC() or native_arch.isPowerPC()) packed struct(u32) {
_0: u13 = 0,
CURRENT: bool = false,
FUTURE: bool = false,
ONFAULT: bool = false,
_4: u16 = 0,
} else packed struct(u32) {
CURRENT: bool = false,
FUTURE: bool = false,
ONFAULT: bool = false,
_3: u29 = 0,
};
pub fn mlockall(flags: MCL) usize {
return syscall1(.mlockall, @as(u32, @bitCast(flags)));
}
pub fn munlockall() usize {
return syscall0(.munlockall);
}
pub fn poll(fds: [*]pollfd, n: nfds_t, timeout: i32) usize {
if (@hasField(SYS, "poll")) {
return syscall3(.poll, @intFromPtr(fds), n, @as(u32, @bitCast(timeout)));

View File

@ -1332,7 +1332,7 @@ pub fn GetFinalPathNameByHandle(
// dropping the \Device\Mup\ and making sure the path begins with \\
if (mem.eql(u16, device_name_u16, std.unicode.utf8ToUtf16LeStringLiteral("Mup"))) {
out_buffer[0] = '\\';
mem.copyForwards(u16, out_buffer[1..][0..file_name_u16.len], file_name_u16);
@memmove(out_buffer[1..][0..file_name_u16.len], file_name_u16);
return out_buffer[0 .. 1 + file_name_u16.len];
}
@ -1400,7 +1400,7 @@ pub fn GetFinalPathNameByHandle(
if (out_buffer.len < drive_letter.len + file_name_u16.len) return error.NameTooLong;
@memcpy(out_buffer[0..drive_letter.len], drive_letter);
mem.copyForwards(u16, out_buffer[drive_letter.len..][0..file_name_u16.len], file_name_u16);
@memmove(out_buffer[drive_letter.len..][0..file_name_u16.len], file_name_u16);
const total_len = drive_letter.len + file_name_u16.len;
// Validate that DOS does not contain any spurious nul bytes.
@ -1449,12 +1449,7 @@ pub fn GetFinalPathNameByHandle(
// to copy backwards. We also need to do this before copying the volume path because
// it could overwrite the file_name_u16 memory.
const file_name_dest = out_buffer[volume_path.len..][0..file_name_u16.len];
const file_name_byte_offset = @intFromPtr(file_name_u16.ptr) - @intFromPtr(out_buffer.ptr);
const file_name_index = file_name_byte_offset / @sizeOf(u16);
if (volume_path.len > file_name_index)
mem.copyBackwards(u16, file_name_dest, file_name_u16)
else
mem.copyForwards(u16, file_name_dest, file_name_u16);
@memmove(file_name_dest, file_name_u16);
@memcpy(out_buffer[0..volume_path.len], volume_path);
const total_len = volume_path.len + file_name_u16.len;

View File

@ -901,11 +901,6 @@ fn spawnWindows(self: *ChildProcess) SpawnError!void {
if (dir_buf.items.len > 0) try dir_buf.append(self.allocator, fs.path.sep);
try dir_buf.appendSlice(self.allocator, app_dir);
}
if (dir_buf.items.len > 0) {
// Need to normalize the path, openDirW can't handle things like double backslashes
const normalized_len = windows.normalizePath(u16, dir_buf.items) catch return error.BadPathName;
dir_buf.shrinkRetainingCapacity(normalized_len);
}
windowsCreateProcessPathExt(self.allocator, &dir_buf, &app_buf, PATHEXT, &cmd_line_cache, envp_ptr, cwd_w_ptr, flags, &siStartInfo, &piProcInfo) catch |no_path_err| {
const original_err = switch (no_path_err) {
@ -930,10 +925,6 @@ fn spawnWindows(self: *ChildProcess) SpawnError!void {
while (it.next()) |search_path| {
dir_buf.clearRetainingCapacity();
try dir_buf.appendSlice(self.allocator, search_path);
// Need to normalize the path, some PATH values can contain things like double
// backslashes which openDirW can't handle
const normalized_len = windows.normalizePath(u16, dir_buf.items) catch continue;
dir_buf.shrinkRetainingCapacity(normalized_len);
if (windowsCreateProcessPathExt(self.allocator, &dir_buf, &app_buf, PATHEXT, &cmd_line_cache, envp_ptr, cwd_w_ptr, flags, &siStartInfo, &piProcInfo)) {
break :run;

View File

@ -9,8 +9,6 @@ pub const AutoArrayHashMapUnmanaged = array_hash_map.AutoArrayHashMapUnmanaged;
pub const AutoHashMap = hash_map.AutoHashMap;
pub const AutoHashMapUnmanaged = hash_map.AutoHashMapUnmanaged;
pub const BitStack = @import("BitStack.zig");
pub const BoundedArray = @import("bounded_array.zig").BoundedArray;
pub const BoundedArrayAligned = @import("bounded_array.zig").BoundedArrayAligned;
pub const Build = @import("Build.zig");
pub const BufMap = @import("buf_map.zig").BufMap;
pub const BufSet = @import("buf_set.zig").BufSet;

View File

@ -5386,6 +5386,9 @@ fn unionDeclInner(
return astgen.failNode(member_node, "union field missing type", .{});
}
if (member.ast.align_expr.unwrap()) |align_expr| {
if (layout == .@"packed") {
return astgen.failNode(align_expr, "unable to override alignment of packed union fields", .{});
}
const align_inst = try expr(&block_scope, &block_scope.base, coerced_align_ri, align_expr);
wip_members.appendToField(@intFromEnum(align_inst));
any_aligned_fields = true;

View File

@ -8533,18 +8533,19 @@ pub const Metadata = enum(u32) {
.type = []const u8,
.default_value_ptr = null,
.is_comptime = false,
.alignment = 0,
.alignment = @alignOf([]const u8),
};
}
fmt_str = fmt_str ++ "(";
inline for (fields[2..], names) |*field, name| {
fmt_str = fmt_str ++ "{[" ++ name ++ "]f}";
const T = std.fmt.Formatter(FormatData, format);
field.* = .{
.name = name,
.type = std.fmt.Formatter(FormatData, format),
.type = T,
.default_value_ptr = null,
.is_comptime = false,
.alignment = 0,
.alignment = @alignOf(T),
};
}
fmt_str = fmt_str ++ ")\n";

View File

@ -2103,6 +2103,8 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.local_zir_cache = local_zir_cache,
.error_limit = error_limit,
.llvm_object = null,
.analysis_roots_buffer = undefined,
.analysis_roots_len = 0,
};
try zcu.init(options.thread_pool.getIdCount());
break :blk zcu;
@ -2183,8 +2185,12 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
.emit_docs = try options.emit_docs.resolve(arena, &options, .docs),
};
comp.windows_libs = try std.StringArrayHashMapUnmanaged(void).init(gpa, options.windows_lib_names, &.{});
errdefer comp.windows_libs.deinit(gpa);
errdefer {
for (comp.windows_libs.keys()) |windows_lib| gpa.free(windows_lib);
comp.windows_libs.deinit(gpa);
}
try comp.windows_libs.ensureUnusedCapacity(gpa, options.windows_lib_names.len);
for (options.windows_lib_names) |windows_lib| comp.windows_libs.putAssumeCapacity(try gpa.dupe(u8, windows_lib), {});
// Prevent some footguns by making the "any" fields of config reflect
// the default Module settings.
@ -2378,7 +2384,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
};
comp.c_object_table.putAssumeCapacityNoClobber(c_object, {});
}
comp.link_task_queue.pending_prelink_tasks += @intCast(comp.c_object_table.count());
// Add a `Win32Resource` for each `rc_source_files` and one for `manifest_file`.
const win32_resource_count =
@ -2386,10 +2391,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
if (win32_resource_count > 0) {
dev.check(.win32_resource);
try comp.win32_resource_table.ensureTotalCapacity(gpa, win32_resource_count);
// Add this after adding logic to updateWin32Resource to pass the
// result into link.loadInput. loadInput integration is not implemented
// for Windows linking logic yet.
//comp.link_task_queue.pending_prelink_tasks += @intCast(win32_resource_count);
for (options.rc_source_files) |rc_source_file| {
const win32_resource = try gpa.create(Win32Resource);
errdefer gpa.destroy(win32_resource);
@ -2415,13 +2416,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
if (comp.emit_bin != null and target.ofmt != .c) {
if (!comp.skip_linker_dependencies) {
// These DLLs are always loaded into every Windows process.
if (target.os.tag == .windows and is_exe_or_dyn_lib) {
try comp.windows_libs.ensureUnusedCapacity(gpa, 2);
comp.windows_libs.putAssumeCapacity("kernel32", {});
comp.windows_libs.putAssumeCapacity("ntdll", {});
}
// If we need to build libc for the target, add work items for it.
// We go through the work queue so that building can be done in parallel.
// If linking against host libc installation, instead queue up jobs
@ -2455,62 +2449,51 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
if (musl.needsCrt0(comp.config.output_mode, comp.config.link_mode, comp.config.pie)) |f| {
comp.queued_jobs.musl_crt_file[@intFromEnum(f)] = true;
comp.link_task_queue.pending_prelink_tasks += 1;
}
switch (comp.config.link_mode) {
.static => comp.queued_jobs.musl_crt_file[@intFromEnum(musl.CrtFile.libc_a)] = true,
.dynamic => comp.queued_jobs.musl_crt_file[@intFromEnum(musl.CrtFile.libc_so)] = true,
}
comp.link_task_queue.pending_prelink_tasks += 1;
} else if (target.isGnuLibC()) {
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
if (glibc.needsCrt0(comp.config.output_mode)) |f| {
comp.queued_jobs.glibc_crt_file[@intFromEnum(f)] = true;
comp.link_task_queue.pending_prelink_tasks += 1;
}
comp.queued_jobs.glibc_shared_objects = true;
comp.link_task_queue.pending_prelink_tasks += glibc.sharedObjectsCount(target);
comp.queued_jobs.glibc_crt_file[@intFromEnum(glibc.CrtFile.libc_nonshared_a)] = true;
comp.link_task_queue.pending_prelink_tasks += 1;
} else if (target.isFreeBSDLibC()) {
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
if (freebsd.needsCrt0(comp.config.output_mode)) |f| {
comp.queued_jobs.freebsd_crt_file[@intFromEnum(f)] = true;
comp.link_task_queue.pending_prelink_tasks += 1;
}
comp.queued_jobs.freebsd_shared_objects = true;
comp.link_task_queue.pending_prelink_tasks += freebsd.sharedObjectsCount();
} else if (target.isNetBSDLibC()) {
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
if (netbsd.needsCrt0(comp.config.output_mode)) |f| {
comp.queued_jobs.netbsd_crt_file[@intFromEnum(f)] = true;
comp.link_task_queue.pending_prelink_tasks += 1;
}
comp.queued_jobs.netbsd_shared_objects = true;
comp.link_task_queue.pending_prelink_tasks += netbsd.sharedObjectsCount();
} else if (target.isWasiLibC()) {
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
comp.queued_jobs.wasi_libc_crt_file[@intFromEnum(wasi_libc.execModelCrtFile(comp.config.wasi_exec_model))] = true;
comp.queued_jobs.wasi_libc_crt_file[@intFromEnum(wasi_libc.CrtFile.libc_a)] = true;
comp.link_task_queue.pending_prelink_tasks += 2;
} else if (target.isMinGW()) {
if (!std.zig.target.canBuildLibC(target)) return error.LibCUnavailable;
const main_crt_file: mingw.CrtFile = if (is_dyn_lib) .dllcrt2_o else .crt2_o;
comp.queued_jobs.mingw_crt_file[@intFromEnum(main_crt_file)] = true;
comp.queued_jobs.mingw_crt_file[@intFromEnum(mingw.CrtFile.libmingw32_lib)] = true;
comp.link_task_queue.pending_prelink_tasks += 2;
// When linking mingw-w64 there are some import libs we always need.
try comp.windows_libs.ensureUnusedCapacity(gpa, mingw.always_link_libs.len);
for (mingw.always_link_libs) |name| comp.windows_libs.putAssumeCapacity(name, {});
for (mingw.always_link_libs) |name| comp.windows_libs.putAssumeCapacity(try gpa.dupe(u8, name), {});
} else {
return error.LibCUnavailable;
}
@ -2520,7 +2503,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
target.isMinGW())
{
comp.queued_jobs.zigc_lib = true;
comp.link_task_queue.pending_prelink_tasks += 1;
}
}
@ -2537,50 +2519,41 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
}
if (comp.wantBuildLibUnwindFromSource()) {
comp.queued_jobs.libunwind = true;
comp.link_task_queue.pending_prelink_tasks += 1;
}
if (build_options.have_llvm and is_exe_or_dyn_lib and comp.config.link_libcpp) {
comp.queued_jobs.libcxx = true;
comp.queued_jobs.libcxxabi = true;
comp.link_task_queue.pending_prelink_tasks += 2;
}
if (build_options.have_llvm and is_exe_or_dyn_lib and comp.config.any_sanitize_thread) {
comp.queued_jobs.libtsan = true;
comp.link_task_queue.pending_prelink_tasks += 1;
}
if (can_build_compiler_rt) {
if (comp.compiler_rt_strat == .lib) {
log.debug("queuing a job to build compiler_rt_lib", .{});
comp.queued_jobs.compiler_rt_lib = true;
comp.link_task_queue.pending_prelink_tasks += 1;
} else if (comp.compiler_rt_strat == .obj) {
log.debug("queuing a job to build compiler_rt_obj", .{});
// In this case we are making a static library, so we ask
// for a compiler-rt object to put in it.
comp.queued_jobs.compiler_rt_obj = true;
comp.link_task_queue.pending_prelink_tasks += 1;
} else if (comp.compiler_rt_strat == .dyn_lib) {
// hack for stage2_x86_64 + coff
log.debug("queuing a job to build compiler_rt_dyn_lib", .{});
comp.queued_jobs.compiler_rt_dyn_lib = true;
comp.link_task_queue.pending_prelink_tasks += 1;
}
if (comp.ubsan_rt_strat == .lib) {
log.debug("queuing a job to build ubsan_rt_lib", .{});
comp.queued_jobs.ubsan_rt_lib = true;
comp.link_task_queue.pending_prelink_tasks += 1;
} else if (comp.ubsan_rt_strat == .obj) {
log.debug("queuing a job to build ubsan_rt_obj", .{});
comp.queued_jobs.ubsan_rt_obj = true;
comp.link_task_queue.pending_prelink_tasks += 1;
}
if (is_exe_or_dyn_lib and comp.config.any_fuzz) {
log.debug("queuing a job to build libfuzzer", .{});
comp.queued_jobs.fuzzer_lib = true;
comp.link_task_queue.pending_prelink_tasks += 1;
}
}
}
@ -2588,8 +2561,6 @@ pub fn create(gpa: Allocator, arena: Allocator, options: CreateOptions) !*Compil
try comp.link_task_queue.queued_prelink.append(gpa, .load_explicitly_provided);
}
log.debug("queued prelink tasks: {d}", .{comp.link_task_queue.queued_prelink.items.len});
log.debug("pending prelink tasks: {d}", .{comp.link_task_queue.pending_prelink_tasks});
return comp;
}
@ -2608,6 +2579,7 @@ pub fn destroy(comp: *Compilation) void {
comp.c_object_work_queue.deinit();
comp.win32_resource_work_queue.deinit();
for (comp.windows_libs.keys()) |windows_lib| gpa.free(windows_lib);
comp.windows_libs.deinit(gpa);
{
@ -2933,22 +2905,26 @@ pub fn update(comp: *Compilation, main_progress_node: std.Progress.Node) !void {
try comp.appendFileSystemInput(embed_file.path);
}
zcu.analysis_roots.clear();
zcu.analysis_roots_len = 0;
zcu.analysis_roots.appendAssumeCapacity(zcu.std_mod);
zcu.analysis_roots_buffer[zcu.analysis_roots_len] = zcu.std_mod;
zcu.analysis_roots_len += 1;
// Normally we rely on importing std to in turn import the root source file in the start code.
// However, the main module is distinct from the root module in tests, so that won't happen there.
if (comp.config.is_test and zcu.main_mod != zcu.std_mod) {
zcu.analysis_roots.appendAssumeCapacity(zcu.main_mod);
zcu.analysis_roots_buffer[zcu.analysis_roots_len] = zcu.main_mod;
zcu.analysis_roots_len += 1;
}
if (zcu.root_mod.deps.get("compiler_rt")) |compiler_rt_mod| {
zcu.analysis_roots.appendAssumeCapacity(compiler_rt_mod);
zcu.analysis_roots_buffer[zcu.analysis_roots_len] = compiler_rt_mod;
zcu.analysis_roots_len += 1;
}
if (zcu.root_mod.deps.get("ubsan_rt")) |ubsan_rt_mod| {
zcu.analysis_roots.appendAssumeCapacity(ubsan_rt_mod);
zcu.analysis_roots_buffer[zcu.analysis_roots_len] = ubsan_rt_mod;
zcu.analysis_roots_len += 1;
}
}
@ -4404,10 +4380,8 @@ fn performAllTheWork(
comp.link_task_wait_group.reset();
defer comp.link_task_wait_group.wait();
comp.link_prog_node.increaseEstimatedTotalItems(
comp.link_task_queue.queued_prelink.items.len + // already queued prelink tasks
comp.link_task_queue.pending_prelink_tasks, // prelink tasks which will be queued
);
// Already-queued prelink tasks
comp.link_prog_node.increaseEstimatedTotalItems(comp.link_task_queue.queued_prelink.items.len);
comp.link_task_queue.start(comp);
if (comp.emit_docs != null) {
@ -4423,6 +4397,7 @@ fn performAllTheWork(
// compiler-rt due to LLD bugs as well, e.g.:
//
// https://github.com/llvm/llvm-project/issues/43698#issuecomment-2542660611
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildRt, .{
comp,
"compiler_rt.zig",
@ -4440,6 +4415,7 @@ fn performAllTheWork(
}
if (comp.queued_jobs.compiler_rt_obj and comp.compiler_rt_obj == null) {
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildRt, .{
comp,
"compiler_rt.zig",
@ -4458,6 +4434,7 @@ fn performAllTheWork(
// hack for stage2_x86_64 + coff
if (comp.queued_jobs.compiler_rt_dyn_lib and comp.compiler_rt_dyn_lib == null) {
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildRt, .{
comp,
"compiler_rt.zig",
@ -4475,6 +4452,7 @@ fn performAllTheWork(
}
if (comp.queued_jobs.fuzzer_lib and comp.fuzzer_lib == null) {
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildRt, .{
comp,
"fuzzer.zig",
@ -4489,6 +4467,7 @@ fn performAllTheWork(
}
if (comp.queued_jobs.ubsan_rt_lib and comp.ubsan_rt_lib == null) {
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildRt, .{
comp,
"ubsan_rt.zig",
@ -4505,6 +4484,7 @@ fn performAllTheWork(
}
if (comp.queued_jobs.ubsan_rt_obj and comp.ubsan_rt_obj == null) {
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildRt, .{
comp,
"ubsan_rt.zig",
@ -4521,40 +4501,49 @@ fn performAllTheWork(
}
if (comp.queued_jobs.glibc_shared_objects) {
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildGlibcSharedObjects, .{ comp, main_progress_node });
}
if (comp.queued_jobs.freebsd_shared_objects) {
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildFreeBSDSharedObjects, .{ comp, main_progress_node });
}
if (comp.queued_jobs.netbsd_shared_objects) {
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildNetBSDSharedObjects, .{ comp, main_progress_node });
}
if (comp.queued_jobs.libunwind) {
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildLibUnwind, .{ comp, main_progress_node });
}
if (comp.queued_jobs.libcxx) {
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildLibCxx, .{ comp, main_progress_node });
}
if (comp.queued_jobs.libcxxabi) {
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildLibCxxAbi, .{ comp, main_progress_node });
}
if (comp.queued_jobs.libtsan) {
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildLibTsan, .{ comp, main_progress_node });
}
if (comp.queued_jobs.zigc_lib and comp.zigc_static_lib == null) {
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildLibZigC, .{ comp, main_progress_node });
}
for (0..@typeInfo(musl.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.musl_crt_file[i]) {
const tag: musl.CrtFile = @enumFromInt(i);
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildMuslCrtFile, .{ comp, tag, main_progress_node });
}
}
@ -4562,6 +4551,7 @@ fn performAllTheWork(
for (0..@typeInfo(glibc.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.glibc_crt_file[i]) {
const tag: glibc.CrtFile = @enumFromInt(i);
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildGlibcCrtFile, .{ comp, tag, main_progress_node });
}
}
@ -4569,6 +4559,7 @@ fn performAllTheWork(
for (0..@typeInfo(freebsd.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.freebsd_crt_file[i]) {
const tag: freebsd.CrtFile = @enumFromInt(i);
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildFreeBSDCrtFile, .{ comp, tag, main_progress_node });
}
}
@ -4576,6 +4567,7 @@ fn performAllTheWork(
for (0..@typeInfo(netbsd.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.netbsd_crt_file[i]) {
const tag: netbsd.CrtFile = @enumFromInt(i);
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildNetBSDCrtFile, .{ comp, tag, main_progress_node });
}
}
@ -4583,6 +4575,7 @@ fn performAllTheWork(
for (0..@typeInfo(wasi_libc.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.wasi_libc_crt_file[i]) {
const tag: wasi_libc.CrtFile = @enumFromInt(i);
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildWasiLibcCrtFile, .{ comp, tag, main_progress_node });
}
}
@ -4590,6 +4583,7 @@ fn performAllTheWork(
for (0..@typeInfo(mingw.CrtFile).@"enum".fields.len) |i| {
if (comp.queued_jobs.mingw_crt_file[i]) {
const tag: mingw.CrtFile = @enumFromInt(i);
comp.link_task_queue.startPrelinkItem();
comp.link_task_wait_group.spawnManager(buildMingwCrtFile, .{ comp, tag, main_progress_node });
}
}
@ -4661,12 +4655,14 @@ fn performAllTheWork(
}
while (comp.c_object_work_queue.readItem()) |c_object| {
comp.link_task_queue.startPrelinkItem();
comp.thread_pool.spawnWg(&comp.link_task_wait_group, workerUpdateCObject, .{
comp, c_object, main_progress_node,
});
}
while (comp.win32_resource_work_queue.readItem()) |win32_resource| {
comp.link_task_queue.startPrelinkItem();
comp.thread_pool.spawnWg(&comp.link_task_wait_group, workerUpdateWin32Resource, .{
comp, win32_resource, main_progress_node,
});
@ -4745,7 +4741,7 @@ fn performAllTheWork(
try zcu.flushRetryableFailures();
// It's analysis time! Queue up our initial analysis.
for (zcu.analysis_roots.slice()) |mod| {
for (zcu.analysisRoots()) |mod| {
try comp.queueJob(.{ .analyze_mod = mod });
}
@ -4769,15 +4765,14 @@ fn performAllTheWork(
}
};
// We aren't going to queue any more prelink tasks.
comp.link_task_queue.finishPrelinkItem(comp);
if (!comp.separateCodegenThreadOk()) {
// Waits until all input files have been parsed.
comp.link_task_wait_group.wait();
comp.link_task_wait_group.reset();
std.log.scoped(.link).debug("finished waiting for link_task_wait_group", .{});
if (comp.link_task_queue.pending_prelink_tasks > 0) {
// Indicates an error occurred preventing prelink phase from completing.
return;
}
}
if (comp.zcu != null) {
@ -5564,6 +5559,7 @@ fn workerUpdateCObject(
c_object: *CObject,
progress_node: std.Progress.Node,
) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
comp.updateCObject(c_object, progress_node) catch |err| switch (err) {
error.AnalysisFail => return,
else => {
@ -5581,6 +5577,7 @@ fn workerUpdateWin32Resource(
win32_resource: *Win32Resource,
progress_node: std.Progress.Node,
) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
comp.updateWin32Resource(win32_resource, progress_node) catch |err| switch (err) {
error.AnalysisFail => return,
else => {
@ -5624,6 +5621,7 @@ fn buildRt(
options: RtOptions,
out: *?CrtFile,
) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
comp.buildOutputFromZig(
root_source_name,
root_name,
@ -5642,6 +5640,7 @@ fn buildRt(
}
fn buildMuslCrtFile(comp: *Compilation, crt_file: musl.CrtFile, prog_node: std.Progress.Node) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
if (musl.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.musl_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@ -5653,6 +5652,7 @@ fn buildMuslCrtFile(comp: *Compilation, crt_file: musl.CrtFile, prog_node: std.P
}
fn buildGlibcCrtFile(comp: *Compilation, crt_file: glibc.CrtFile, prog_node: std.Progress.Node) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
if (glibc.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.glibc_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@ -5664,6 +5664,7 @@ fn buildGlibcCrtFile(comp: *Compilation, crt_file: glibc.CrtFile, prog_node: std
}
fn buildGlibcSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
if (glibc.buildSharedObjects(comp, prog_node)) |_| {
// The job should no longer be queued up since it succeeded.
comp.queued_jobs.glibc_shared_objects = false;
@ -5676,6 +5677,7 @@ fn buildGlibcSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) voi
}
fn buildFreeBSDCrtFile(comp: *Compilation, crt_file: freebsd.CrtFile, prog_node: std.Progress.Node) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
if (freebsd.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.freebsd_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@ -5687,6 +5689,7 @@ fn buildFreeBSDCrtFile(comp: *Compilation, crt_file: freebsd.CrtFile, prog_node:
}
fn buildFreeBSDSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
if (freebsd.buildSharedObjects(comp, prog_node)) |_| {
// The job should no longer be queued up since it succeeded.
comp.queued_jobs.freebsd_shared_objects = false;
@ -5699,6 +5702,7 @@ fn buildFreeBSDSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) v
}
fn buildNetBSDCrtFile(comp: *Compilation, crt_file: netbsd.CrtFile, prog_node: std.Progress.Node) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
if (netbsd.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.netbsd_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@ -5710,6 +5714,7 @@ fn buildNetBSDCrtFile(comp: *Compilation, crt_file: netbsd.CrtFile, prog_node: s
}
fn buildNetBSDSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
if (netbsd.buildSharedObjects(comp, prog_node)) |_| {
// The job should no longer be queued up since it succeeded.
comp.queued_jobs.netbsd_shared_objects = false;
@ -5722,6 +5727,7 @@ fn buildNetBSDSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) vo
}
fn buildMingwCrtFile(comp: *Compilation, crt_file: mingw.CrtFile, prog_node: std.Progress.Node) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
if (mingw.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.mingw_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@ -5733,6 +5739,7 @@ fn buildMingwCrtFile(comp: *Compilation, crt_file: mingw.CrtFile, prog_node: std
}
fn buildWasiLibcCrtFile(comp: *Compilation, crt_file: wasi_libc.CrtFile, prog_node: std.Progress.Node) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
if (wasi_libc.buildCrtFile(comp, crt_file, prog_node)) |_| {
comp.queued_jobs.wasi_libc_crt_file[@intFromEnum(crt_file)] = false;
} else |err| switch (err) {
@ -5744,6 +5751,7 @@ fn buildWasiLibcCrtFile(comp: *Compilation, crt_file: wasi_libc.CrtFile, prog_no
}
fn buildLibUnwind(comp: *Compilation, prog_node: std.Progress.Node) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
if (libunwind.buildStaticLib(comp, prog_node)) |_| {
comp.queued_jobs.libunwind = false;
} else |err| switch (err) {
@ -5753,6 +5761,7 @@ fn buildLibUnwind(comp: *Compilation, prog_node: std.Progress.Node) void {
}
fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
if (libcxx.buildLibCxx(comp, prog_node)) |_| {
comp.queued_jobs.libcxx = false;
} else |err| switch (err) {
@ -5762,6 +5771,7 @@ fn buildLibCxx(comp: *Compilation, prog_node: std.Progress.Node) void {
}
fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
if (libcxx.buildLibCxxAbi(comp, prog_node)) |_| {
comp.queued_jobs.libcxxabi = false;
} else |err| switch (err) {
@ -5771,6 +5781,7 @@ fn buildLibCxxAbi(comp: *Compilation, prog_node: std.Progress.Node) void {
}
fn buildLibTsan(comp: *Compilation, prog_node: std.Progress.Node) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
if (libtsan.buildTsan(comp, prog_node)) |_| {
comp.queued_jobs.libtsan = false;
} else |err| switch (err) {
@ -5780,6 +5791,7 @@ fn buildLibTsan(comp: *Compilation, prog_node: std.Progress.Node) void {
}
fn buildLibZigC(comp: *Compilation, prog_node: std.Progress.Node) void {
defer comp.link_task_queue.finishPrelinkItem(comp);
comp.buildOutputFromZig(
"c.zig",
"zigc",
@ -7717,6 +7729,7 @@ pub fn queuePrelinkTaskMode(comp: *Compilation, path: Cache.Path, config: *const
/// Only valid to call during `update`. Automatically handles queuing up a
/// linker worker task if there is not already one.
pub fn queuePrelinkTasks(comp: *Compilation, tasks: []const link.PrelinkTask) void {
comp.link_prog_node.increaseEstimatedTotalItems(tasks.len);
comp.link_task_queue.enqueuePrelink(comp, tasks) catch |err| switch (err) {
error.OutOfMemory => return comp.setAllocFailure(),
};
@ -7789,6 +7802,27 @@ fn getCrtPathsInner(
};
}
pub fn addLinkLib(comp: *Compilation, lib_name: []const u8) !void {
// Avoid deadlocking on building import libs such as kernel32.lib
// This can happen when the user uses `build-exe foo.obj -lkernel32` and
// then when we create a sub-Compilation for zig libc, it also tries to
// build kernel32.lib.
if (comp.skip_linker_dependencies) return;
const target = &comp.root_mod.resolved_target.result;
if (target.os.tag != .windows or target.ofmt == .c) return;
// This happens when an `extern "foo"` function is referenced.
// If we haven't seen this library yet and we're targeting Windows, we need
// to queue up a work item to produce the DLL import library for this.
const gop = try comp.windows_libs.getOrPut(comp.gpa, lib_name);
if (gop.found_existing) return;
{
errdefer _ = comp.windows_libs.pop();
gop.key_ptr.* = try comp.gpa.dupe(u8, lib_name);
}
try comp.queueJob(.{ .windows_import_lib = gop.index });
}
/// This decides the optimization mode for all zig-provided libraries, including
/// compiler-rt, libcxx, libc, libunwind, etc.
pub fn compilerRtOptMode(comp: Compilation) std.builtin.OptimizeMode {

View File

@ -1137,13 +1137,16 @@ const Local = struct {
const elem_info = @typeInfo(Elem).@"struct";
const elem_fields = elem_info.fields;
var new_fields: [elem_fields.len]std.builtin.Type.StructField = undefined;
for (&new_fields, elem_fields) |*new_field, elem_field| new_field.* = .{
.name = elem_field.name,
.type = *[len]elem_field.type,
.default_value_ptr = null,
.is_comptime = false,
.alignment = 0,
};
for (&new_fields, elem_fields) |*new_field, elem_field| {
const T = *[len]elem_field.type;
new_field.* = .{
.name = elem_field.name,
.type = T,
.default_value_ptr = null,
.is_comptime = false,
.alignment = @alignOf(T),
};
}
return @Type(.{ .@"struct" = .{
.layout = .auto,
.fields = &new_fields,
@ -1158,22 +1161,25 @@ const Local = struct {
const elem_info = @typeInfo(Elem).@"struct";
const elem_fields = elem_info.fields;
var new_fields: [elem_fields.len]std.builtin.Type.StructField = undefined;
for (&new_fields, elem_fields) |*new_field, elem_field| new_field.* = .{
.name = elem_field.name,
.type = @Type(.{ .pointer = .{
for (&new_fields, elem_fields) |*new_field, elem_field| {
const T = @Type(.{ .pointer = .{
.size = opts.size,
.is_const = opts.is_const,
.is_volatile = false,
.alignment = 0,
.alignment = @alignOf(elem_field.type),
.address_space = .generic,
.child = elem_field.type,
.is_allowzero = false,
.sentinel_ptr = null,
} }),
.default_value_ptr = null,
.is_comptime = false,
.alignment = 0,
};
} });
new_field.* = .{
.name = elem_field.name,
.type = T,
.default_value_ptr = null,
.is_comptime = false,
.alignment = @alignOf(T),
};
}
return @Type(.{ .@"struct" = .{
.layout = .auto,
.fields = &new_fields,

View File

@ -385,21 +385,23 @@ pub fn run(f: *Fetch) RunError!void {
var resource: Resource = .{ .dir = dir };
return f.runResource(path_or_url, &resource, null);
} else |dir_err| {
var server_header_buffer: [init_resource_buffer_size]u8 = undefined;
const file_err = if (dir_err == error.NotDir) e: {
if (fs.cwd().openFile(path_or_url, .{})) |file| {
var resource: Resource = .{ .file = file };
var resource: Resource = .{ .file = file.reader(&server_header_buffer) };
return f.runResource(path_or_url, &resource, null);
} else |err| break :e err;
} else dir_err;
const uri = std.Uri.parse(path_or_url) catch |uri_err| {
return f.fail(0, try eb.printString(
"'{s}' could not be recognized as a file path ({s}) or an URL ({s})",
.{ path_or_url, @errorName(file_err), @errorName(uri_err) },
"'{s}' could not be recognized as a file path ({t}) or an URL ({t})",
.{ path_or_url, file_err, uri_err },
));
};
var server_header_buffer: [header_buffer_size]u8 = undefined;
var resource = try f.initResource(uri, &server_header_buffer);
var resource: Resource = undefined;
try f.initResource(uri, &resource, &server_header_buffer);
return f.runResource(try uri.path.toRawMaybeAlloc(arena), &resource, null);
}
},
@ -464,8 +466,9 @@ pub fn run(f: *Fetch) RunError!void {
f.location_tok,
try eb.printString("invalid URI: {s}", .{@errorName(err)}),
);
var server_header_buffer: [header_buffer_size]u8 = undefined;
var resource = try f.initResource(uri, &server_header_buffer);
var buffer: [init_resource_buffer_size]u8 = undefined;
var resource: Resource = undefined;
try f.initResource(uri, &resource, &buffer);
return f.runResource(try uri.path.toRawMaybeAlloc(arena), &resource, remote.hash);
}
@ -866,8 +869,8 @@ fn fail(f: *Fetch, msg_tok: std.zig.Ast.TokenIndex, msg_str: u32) RunError {
}
const Resource = union(enum) {
file: fs.File,
http_request: std.http.Client.Request,
file: fs.File.Reader,
http_request: HttpRequest,
git: Git,
dir: fs.Dir,
@ -877,10 +880,16 @@ const Resource = union(enum) {
want_oid: git.Oid,
};
const HttpRequest = struct {
request: std.http.Client.Request,
response: std.http.Client.Response,
buffer: []u8,
};
fn deinit(resource: *Resource) void {
switch (resource.*) {
.file => |*file| file.close(),
.http_request => |*req| req.deinit(),
.file => |*file_reader| file_reader.file.close(),
.http_request => |*http_request| http_request.request.deinit(),
.git => |*git_resource| {
git_resource.fetch_stream.deinit();
git_resource.session.deinit();
@ -890,21 +899,13 @@ const Resource = union(enum) {
resource.* = undefined;
}
fn reader(resource: *Resource) std.io.AnyReader {
return .{
.context = resource,
.readFn = read,
};
}
fn read(context: *const anyopaque, buffer: []u8) anyerror!usize {
const resource: *Resource = @constCast(@ptrCast(@alignCast(context)));
switch (resource.*) {
.file => |*f| return f.read(buffer),
.http_request => |*r| return r.read(buffer),
.git => |*g| return g.fetch_stream.read(buffer),
fn reader(resource: *Resource) *std.Io.Reader {
return switch (resource.*) {
.file => |*file_reader| return &file_reader.interface,
.http_request => |*http_request| return http_request.response.reader(http_request.buffer),
.git => |*g| return &g.fetch_stream.reader,
.dir => unreachable,
}
};
}
};
@ -967,20 +968,22 @@ const FileType = enum {
}
};
const header_buffer_size = 16 * 1024;
const init_resource_buffer_size = git.Packet.max_data_length;
fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Resource {
fn initResource(f: *Fetch, uri: std.Uri, resource: *Resource, reader_buffer: []u8) RunError!void {
const gpa = f.arena.child_allocator;
const arena = f.arena.allocator();
const eb = &f.error_bundle;
if (ascii.eqlIgnoreCase(uri.scheme, "file")) {
const path = try uri.path.toRawMaybeAlloc(arena);
return .{ .file = f.parent_package_root.openFile(path, .{}) catch |err| {
return f.fail(f.location_tok, try eb.printString("unable to open '{f}{s}': {s}", .{
f.parent_package_root, path, @errorName(err),
const file = f.parent_package_root.openFile(path, .{}) catch |err| {
return f.fail(f.location_tok, try eb.printString("unable to open '{f}{s}': {t}", .{
f.parent_package_root, path, err,
}));
} };
};
resource.* = .{ .file = file.reader(reader_buffer) };
return;
}
const http_client = f.job_queue.http_client;
@ -988,37 +991,35 @@ fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Re
if (ascii.eqlIgnoreCase(uri.scheme, "http") or
ascii.eqlIgnoreCase(uri.scheme, "https"))
{
var req = http_client.open(.GET, uri, .{
.server_header_buffer = server_header_buffer,
}) catch |err| {
return f.fail(f.location_tok, try eb.printString(
"unable to connect to server: {s}",
.{@errorName(err)},
));
};
errdefer req.deinit(); // releases more than memory
resource.* = .{ .http_request = .{
.request = http_client.request(.GET, uri, .{}) catch |err|
return f.fail(f.location_tok, try eb.printString("unable to connect to server: {t}", .{err})),
.response = undefined,
.buffer = reader_buffer,
} };
const request = &resource.http_request.request;
errdefer request.deinit();
req.send() catch |err| {
return f.fail(f.location_tok, try eb.printString(
"HTTP request failed: {s}",
.{@errorName(err)},
));
};
req.wait() catch |err| {
return f.fail(f.location_tok, try eb.printString(
"invalid HTTP response: {s}",
.{@errorName(err)},
));
request.sendBodiless() catch |err|
return f.fail(f.location_tok, try eb.printString("HTTP request failed: {t}", .{err}));
var redirect_buffer: [1024]u8 = undefined;
const response = &resource.http_request.response;
response.* = request.receiveHead(&redirect_buffer) catch |err| switch (err) {
error.ReadFailed => {
return f.fail(f.location_tok, try eb.printString("HTTP response read failure: {t}", .{
request.connection.?.getReadError().?,
}));
},
else => |e| return f.fail(f.location_tok, try eb.printString("invalid HTTP response: {t}", .{e})),
};
if (req.response.status != .ok) {
return f.fail(f.location_tok, try eb.printString(
"bad HTTP response code: '{d} {s}'",
.{ @intFromEnum(req.response.status), req.response.status.phrase() orelse "" },
));
}
if (response.head.status != .ok) return f.fail(f.location_tok, try eb.printString(
"bad HTTP response code: '{d} {s}'",
.{ response.head.status, response.head.status.phrase() orelse "" },
));
return .{ .http_request = req };
return;
}
if (ascii.eqlIgnoreCase(uri.scheme, "git+http") or
@ -1026,7 +1027,7 @@ fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Re
{
var transport_uri = uri;
transport_uri.scheme = uri.scheme["git+".len..];
var session = git.Session.init(gpa, http_client, transport_uri, server_header_buffer) catch |err| {
var session = git.Session.init(gpa, http_client, transport_uri, reader_buffer) catch |err| {
return f.fail(f.location_tok, try eb.printString(
"unable to discover remote git server capabilities: {s}",
.{@errorName(err)},
@ -1042,16 +1043,12 @@ fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Re
const want_ref_head = try std.fmt.allocPrint(arena, "refs/heads/{s}", .{want_ref});
const want_ref_tag = try std.fmt.allocPrint(arena, "refs/tags/{s}", .{want_ref});
var ref_iterator = session.listRefs(.{
var ref_iterator: git.Session.RefIterator = undefined;
session.listRefs(&ref_iterator, .{
.ref_prefixes = &.{ want_ref, want_ref_head, want_ref_tag },
.include_peeled = true,
.server_header_buffer = server_header_buffer,
}) catch |err| {
return f.fail(f.location_tok, try eb.printString(
"unable to list refs: {s}",
.{@errorName(err)},
));
};
.buffer = reader_buffer,
}) catch |err| return f.fail(f.location_tok, try eb.printString("unable to list refs: {t}", .{err}));
defer ref_iterator.deinit();
while (ref_iterator.next() catch |err| {
return f.fail(f.location_tok, try eb.printString(
@ -1089,25 +1086,21 @@ fn initResource(f: *Fetch, uri: std.Uri, server_header_buffer: []u8) RunError!Re
var want_oid_buf: [git.Oid.max_formatted_length]u8 = undefined;
_ = std.fmt.bufPrint(&want_oid_buf, "{f}", .{want_oid}) catch unreachable;
var fetch_stream = session.fetch(&.{&want_oid_buf}, server_header_buffer) catch |err| {
return f.fail(f.location_tok, try eb.printString(
"unable to create fetch stream: {s}",
.{@errorName(err)},
));
var fetch_stream: git.Session.FetchStream = undefined;
session.fetch(&fetch_stream, &.{&want_oid_buf}, reader_buffer) catch |err| {
return f.fail(f.location_tok, try eb.printString("unable to create fetch stream: {t}", .{err}));
};
errdefer fetch_stream.deinit();
return .{ .git = .{
resource.* = .{ .git = .{
.session = session,
.fetch_stream = fetch_stream,
.want_oid = want_oid,
} };
return;
}
return f.fail(f.location_tok, try eb.printString(
"unsupported URL scheme: {s}",
.{uri.scheme},
));
return f.fail(f.location_tok, try eb.printString("unsupported URL scheme: {s}", .{uri.scheme}));
}
fn unpackResource(
@ -1121,9 +1114,11 @@ fn unpackResource(
.file => FileType.fromPath(uri_path) orelse
return f.fail(f.location_tok, try eb.printString("unknown file type: '{s}'", .{uri_path})),
.http_request => |req| ft: {
.http_request => |*http_request| ft: {
const head = &http_request.response.head;
// Content-Type takes first precedence.
const content_type = req.response.content_type orelse
const content_type = head.content_type orelse
return f.fail(f.location_tok, try eb.addString("missing 'Content-Type' header"));
// Extract the MIME type, ignoring charset and boundary directives
@ -1165,7 +1160,7 @@ fn unpackResource(
}
// Next, the filename from 'content-disposition: attachment' takes precedence.
if (req.response.content_disposition) |cd_header| {
if (head.content_disposition) |cd_header| {
break :ft FileType.fromContentDisposition(cd_header) orelse {
return f.fail(f.location_tok, try eb.printString(
"unsupported Content-Disposition header value: '{s}' for Content-Type=application/octet-stream",
@ -1176,10 +1171,7 @@ fn unpackResource(
// Finally, the path from the URI is used.
break :ft FileType.fromPath(uri_path) orelse {
return f.fail(f.location_tok, try eb.printString(
"unknown file type: '{s}'",
.{uri_path},
));
return f.fail(f.location_tok, try eb.printString("unknown file type: '{s}'", .{uri_path}));
};
},
@ -1187,10 +1179,9 @@ fn unpackResource(
.dir => |dir| {
f.recursiveDirectoryCopy(dir, tmp_directory.handle) catch |err| {
return f.fail(f.location_tok, try eb.printString(
"unable to copy directory '{s}': {s}",
.{ uri_path, @errorName(err) },
));
return f.fail(f.location_tok, try eb.printString("unable to copy directory '{s}': {t}", .{
uri_path, err,
}));
};
return .{};
},
@ -1198,27 +1189,17 @@ fn unpackResource(
switch (file_type) {
.tar => {
var adapter_buffer: [1024]u8 = undefined;
var adapter = resource.reader().adaptToNewApi(&adapter_buffer);
return unpackTarball(f, tmp_directory.handle, &adapter.new_interface);
return unpackTarball(f, tmp_directory.handle, resource.reader());
},
.@"tar.gz" => {
var adapter_buffer: [std.crypto.tls.max_ciphertext_record_len]u8 = undefined;
var adapter = resource.reader().adaptToNewApi(&adapter_buffer);
var flate_buffer: [std.compress.flate.max_window_len]u8 = undefined;
var decompress: std.compress.flate.Decompress = .init(&adapter.new_interface, .gzip, &flate_buffer);
var decompress: std.compress.flate.Decompress = .init(resource.reader(), .gzip, &flate_buffer);
return try unpackTarball(f, tmp_directory.handle, &decompress.reader);
},
.@"tar.xz" => {
const gpa = f.arena.child_allocator;
const reader = resource.reader();
var br = std.io.bufferedReaderSize(std.crypto.tls.max_ciphertext_record_len, reader);
var dcp = std.compress.xz.decompress(gpa, br.reader()) catch |err| {
return f.fail(f.location_tok, try eb.printString(
"unable to decompress tarball: {s}",
.{@errorName(err)},
));
};
var dcp = std.compress.xz.decompress(gpa, resource.reader().adaptToOldInterface()) catch |err|
return f.fail(f.location_tok, try eb.printString("unable to decompress tarball: {t}", .{err}));
defer dcp.deinit();
var adapter_buffer: [1024]u8 = undefined;
var adapter = dcp.reader().adaptToNewApi(&adapter_buffer);
@ -1227,9 +1208,7 @@ fn unpackResource(
.@"tar.zst" => {
const window_size = std.compress.zstd.default_window_len;
const window_buffer = try f.arena.allocator().create([window_size]u8);
var adapter_buffer: [std.crypto.tls.max_ciphertext_record_len]u8 = undefined;
var adapter = resource.reader().adaptToNewApi(&adapter_buffer);
var decompress: std.compress.zstd.Decompress = .init(&adapter.new_interface, window_buffer, .{
var decompress: std.compress.zstd.Decompress = .init(resource.reader(), window_buffer, .{
.verify_checksum = false,
});
return try unpackTarball(f, tmp_directory.handle, &decompress.reader);
@ -1237,12 +1216,15 @@ fn unpackResource(
.git_pack => return unpackGitPack(f, tmp_directory.handle, &resource.git) catch |err| switch (err) {
error.FetchFailed => return error.FetchFailed,
error.OutOfMemory => return error.OutOfMemory,
else => |e| return f.fail(f.location_tok, try eb.printString(
"unable to unpack git files: {s}",
.{@errorName(e)},
)),
else => |e| return f.fail(f.location_tok, try eb.printString("unable to unpack git files: {t}", .{e})),
},
.zip => return unzip(f, tmp_directory.handle, resource.reader()) catch |err| switch (err) {
error.ReadFailed => return f.fail(f.location_tok, try eb.printString(
"failed reading resource: {t}",
.{err},
)),
else => |e| return e,
},
.zip => return try unzip(f, tmp_directory.handle, resource.reader()),
}
}
@ -1277,99 +1259,69 @@ fn unpackTarball(f: *Fetch, out_dir: fs.Dir, reader: *std.Io.Reader) RunError!Un
return res;
}
fn unzip(f: *Fetch, out_dir: fs.Dir, reader: anytype) RunError!UnpackResult {
fn unzip(f: *Fetch, out_dir: fs.Dir, reader: *std.Io.Reader) error{ ReadFailed, OutOfMemory, FetchFailed }!UnpackResult {
// We write the entire contents to a file first because zip files
// must be processed back to front and they could be too large to
// load into memory.
const cache_root = f.job_queue.global_cache;
// TODO: the downside of this solution is if we get a failure/crash/oom/power out
// during this process, we leave behind a zip file that would be
// difficult to know if/when it can be cleaned up.
// Might be worth it to use a mechanism that enables other processes
// to see if the owning process of a file is still alive (on linux this
// can be done with file locks).
// Coupled with this mechansism, we could also use slots (i.e. zig-cache/tmp/0,
// zig-cache/tmp/1, etc) which would mean that subsequent runs would
// automatically clean up old dead files.
// This could all be done with a simple TmpFile abstraction.
const prefix = "tmp/";
const suffix = ".zip";
const random_bytes_count = 20;
const random_path_len = comptime std.fs.base64_encoder.calcSize(random_bytes_count);
var zip_path: [prefix.len + random_path_len + suffix.len]u8 = undefined;
@memcpy(zip_path[0..prefix.len], prefix);
@memcpy(zip_path[prefix.len + random_path_len ..], suffix);
{
var random_bytes: [random_bytes_count]u8 = undefined;
std.crypto.random.bytes(&random_bytes);
_ = std.fs.base64_encoder.encode(
zip_path[prefix.len..][0..random_path_len],
&random_bytes,
);
}
defer cache_root.handle.deleteFile(&zip_path) catch {};
const eb = &f.error_bundle;
const random_len = @sizeOf(u64) * 2;
{
var zip_file = cache_root.handle.createFile(
&zip_path,
.{},
) catch |err| return f.fail(f.location_tok, try eb.printString(
"failed to create tmp zip file: {s}",
.{@errorName(err)},
));
defer zip_file.close();
var buf: [4096]u8 = undefined;
while (true) {
const len = reader.readAll(&buf) catch |err| return f.fail(f.location_tok, try eb.printString(
"read zip stream failed: {s}",
.{@errorName(err)},
));
if (len == 0) break;
zip_file.deprecatedWriter().writeAll(buf[0..len]) catch |err| return f.fail(f.location_tok, try eb.printString(
"write temporary zip file failed: {s}",
.{@errorName(err)},
));
}
}
var zip_path: [prefix.len + random_len + suffix.len]u8 = undefined;
zip_path[0..prefix.len].* = prefix.*;
zip_path[prefix.len + random_len ..].* = suffix.*;
var zip_file = while (true) {
const random_integer = std.crypto.random.int(u64);
zip_path[prefix.len..][0..random_len].* = std.fmt.hex(random_integer);
break cache_root.handle.createFile(&zip_path, .{
.exclusive = true,
.read = true,
}) catch |err| switch (err) {
error.PathAlreadyExists => continue,
else => |e| return f.fail(
f.location_tok,
try eb.printString("failed to create temporary zip file: {t}", .{e}),
),
};
};
defer zip_file.close();
var zip_file_buffer: [4096]u8 = undefined;
var zip_file_reader = b: {
var zip_file_writer = zip_file.writer(&zip_file_buffer);
_ = reader.streamRemaining(&zip_file_writer.interface) catch |err| switch (err) {
error.ReadFailed => return error.ReadFailed,
error.WriteFailed => return f.fail(
f.location_tok,
try eb.printString("failed writing temporary zip file: {t}", .{err}),
),
};
zip_file_writer.interface.flush() catch |err| return f.fail(
f.location_tok,
try eb.printString("failed writing temporary zip file: {t}", .{err}),
);
break :b zip_file_writer.moveToReader();
};
var diagnostics: std.zip.Diagnostics = .{ .allocator = f.arena.allocator() };
// no need to deinit since we are using an arena allocator
{
var zip_file = cache_root.handle.openFile(
&zip_path,
.{},
) catch |err| return f.fail(f.location_tok, try eb.printString(
"failed to open temporary zip file: {s}",
.{@errorName(err)},
));
defer zip_file.close();
zip_file_reader.seekTo(0) catch |err|
return f.fail(f.location_tok, try eb.printString("failed to seek temporary zip file: {t}", .{err}));
std.zip.extract(out_dir, &zip_file_reader, .{
.allow_backslashes = true,
.diagnostics = &diagnostics,
}) catch |err| return f.fail(f.location_tok, try eb.printString("zip extract failed: {t}", .{err}));
var zip_file_buffer: [1024]u8 = undefined;
var zip_file_reader = zip_file.reader(&zip_file_buffer);
cache_root.handle.deleteFile(&zip_path) catch |err|
return f.fail(f.location_tok, try eb.printString("delete temporary zip failed: {t}", .{err}));
std.zip.extract(out_dir, &zip_file_reader, .{
.allow_backslashes = true,
.diagnostics = &diagnostics,
}) catch |err| return f.fail(f.location_tok, try eb.printString(
"zip extract failed: {s}",
.{@errorName(err)},
));
}
cache_root.handle.deleteFile(&zip_path) catch |err| return f.fail(f.location_tok, try eb.printString(
"delete temporary zip failed: {s}",
.{@errorName(err)},
));
const res: UnpackResult = .{ .root_dir = diagnostics.root_dir };
return res;
return .{ .root_dir = diagnostics.root_dir };
}
fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!UnpackResult {
@ -1387,10 +1339,13 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource.Git) anyerror!U
var pack_file = try pack_dir.createFile("pkg.pack", .{ .read = true });
defer pack_file.close();
var pack_file_buffer: [4096]u8 = undefined;
var fifo = std.fifo.LinearFifo(u8, .{ .Slice = {} }).init(&pack_file_buffer);
try fifo.pump(resource.fetch_stream.reader(), pack_file.deprecatedWriter());
var pack_file_reader = pack_file.reader(&pack_file_buffer);
var pack_file_reader = b: {
var pack_file_writer = pack_file.writer(&pack_file_buffer);
const fetch_reader = &resource.fetch_stream.reader;
_ = try fetch_reader.streamRemaining(&pack_file_writer.interface);
try pack_file_writer.interface.flush();
break :b pack_file_writer.moveToReader();
};
var index_file = try pack_dir.createFile("pkg.idx", .{ .read = true });
defer index_file.close();

File diff suppressed because it is too large Load Diff

View File

@ -2631,7 +2631,7 @@ fn reparentOwnedErrorMsg(
const orig_notes = msg.notes.len;
msg.notes = try sema.gpa.realloc(msg.notes, orig_notes + 1);
std.mem.copyBackwards(Zcu.ErrorMsg, msg.notes[1..], msg.notes[0..orig_notes]);
@memmove(msg.notes[1..][0..orig_notes], msg.notes[0..orig_notes]);
msg.notes[0] = .{
.src_loc = msg.src_loc,
.msg = msg.msg,
@ -2649,7 +2649,13 @@ pub fn analyzeAsAlign(
src: LazySrcLoc,
air_ref: Air.Inst.Ref,
) !Alignment {
const alignment_big = try sema.analyzeAsInt(block, src, air_ref, align_ty, .{ .simple = .@"align" });
const alignment_big = try sema.analyzeAsInt(
block,
src,
air_ref,
align_ty,
.{ .simple = .@"align" },
);
return sema.validateAlign(block, src, alignment_big);
}
@ -3926,11 +3932,12 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
// Whilst constructing our mapping, we will also initialize optional and error union payloads when
// we encounter the corresponding pointers. For this reason, the ordering of `to_map` matters.
var to_map = try std.ArrayList(Air.Inst.Index).initCapacity(sema.arena, stores.len);
for (stores) |store_inst_idx| {
const store_inst = sema.air_instructions.get(@intFromEnum(store_inst_idx));
const ptr_to_map = switch (store_inst.tag) {
.store, .store_safe => store_inst.data.bin_op.lhs.toIndex().?, // Map the pointer being stored to.
.set_union_tag => continue, // Ignore for now; handled after we map pointers
.set_union_tag => store_inst.data.bin_op.lhs.toIndex().?, // Map the union pointer.
.optional_payload_ptr_set, .errunion_payload_ptr_set => store_inst_idx, // Map the generated pointer itself.
else => unreachable,
};
@ -4047,13 +4054,12 @@ fn resolveComptimeKnownAllocPtr(sema: *Sema, block: *Block, alloc: Air.Inst.Ref,
const maybe_union_ty = Value.fromInterned(decl_parent_ptr).typeOf(zcu).childType(zcu);
if (zcu.typeToUnion(maybe_union_ty)) |union_obj| {
// As this is a union field, we must store to the pointer now to set the tag.
// If the payload is OPV, there will not be a payload store, so we store that value.
// Otherwise, there will be a payload store to process later, so undef will suffice.
// The payload value will be stored later, so undef is a sufficent payload for now.
const payload_ty: Type = .fromInterned(union_obj.field_types.get(&zcu.intern_pool)[idx]);
const payload_val = try sema.typeHasOnePossibleValue(payload_ty) orelse try pt.undefValue(payload_ty);
const payload_val = try pt.undefValue(payload_ty);
const tag_val = try pt.enumValueFieldIndex(.fromInterned(union_obj.enum_tag_ty), idx);
const store_val = try pt.unionValue(maybe_union_ty, tag_val, payload_val);
try sema.storePtrVal(block, LazySrcLoc.unneeded, Value.fromInterned(decl_parent_ptr), store_val, maybe_union_ty);
try sema.storePtrVal(block, .unneeded, .fromInterned(decl_parent_ptr), store_val, maybe_union_ty);
}
break :ptr (try Value.fromInterned(decl_parent_ptr).ptrField(idx, pt)).toIntern();
},
@ -8900,6 +8906,14 @@ fn resolveGenericBody(
return sema.resolveConstDefinedValue(block, src, result, reason);
}
/// Given a library name, examines if the library name should end up in
/// `link.File.Options.windows_libs` table (for example, libc is always
/// specified via dedicated flag `link_libc` instead),
/// and puts it there if it doesn't exist.
/// It also dupes the library name which can then be saved as part of the
/// respective `Decl` (either `ExternFn` or `Var`).
/// The liveness of the duped library name is tied to liveness of `Zcu`.
/// To deallocate, call `deinit` on the respective `Decl` (`ExternFn` or `Var`).
pub fn handleExternLibName(
sema: *Sema,
block: *Block,
@ -8949,6 +8963,11 @@ pub fn handleExternLibName(
.{ lib_name, lib_name },
);
}
comp.addLinkLib(lib_name) catch |err| {
return sema.fail(block, src_loc, "unable to add link lib '{s}': {s}", .{
lib_name, @errorName(err),
});
};
}
}
@ -14458,8 +14477,8 @@ fn analyzeTupleMul(
}
}
for (0..factor) |i| {
mem.copyForwards(InternPool.Index, types[tuple_len * i ..], types[0..tuple_len]);
mem.copyForwards(InternPool.Index, values[tuple_len * i ..], values[0..tuple_len]);
@memmove(types[tuple_len * i ..][0..tuple_len], types[0..tuple_len]);
@memmove(values[tuple_len * i ..][0..tuple_len], values[0..tuple_len]);
}
break :rs runtime_src;
};
@ -18817,7 +18836,7 @@ fn zirPtrType(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air
const abi_align: Alignment = if (inst_data.flags.has_align) blk: {
const ref: Zir.Inst.Ref = @enumFromInt(sema.code.extra[extra_i]);
extra_i += 1;
const coerced = try sema.coerce(block, .u32, try sema.resolveInst(ref), align_src);
const coerced = try sema.coerce(block, align_ty, try sema.resolveInst(ref), align_src);
const val = try sema.resolveConstDefinedValue(block, align_src, coerced, .{ .simple = .@"align" });
// Check if this happens to be the lazy alignment of our element type, in
// which case we can make this 0 without resolving it.
@ -20335,15 +20354,11 @@ fn zirReify(
try ip.getOrPutString(gpa, pt.tid, "sentinel_ptr", .no_embedded_nulls),
).?);
if (!try sema.intFitsInType(alignment_val, .u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
if (!try sema.intFitsInType(alignment_val, align_ty, null)) {
return sema.fail(block, src, "alignment must fit in '{f}'", .{align_ty.fmt(pt)});
}
const alignment_val_int = try alignment_val.toUnsignedIntSema(pt);
if (alignment_val_int > 0 and !math.isPowerOfTwo(alignment_val_int)) {
return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{alignment_val_int});
}
const abi_align = Alignment.fromByteUnits(alignment_val_int);
const abi_align = try sema.validateAlign(block, src, alignment_val_int);
const elem_ty = child_val.toType();
if (abi_align != .none) {
@ -20920,8 +20935,6 @@ fn reifyUnion(
std.hash.autoHash(&hasher, opt_tag_type_val.toIntern());
std.hash.autoHash(&hasher, fields_len);
var any_aligns = false;
for (0..fields_len) |field_idx| {
const field_info = try fields_val.elemValue(pt, field_idx);
@ -20930,16 +20943,11 @@ fn reifyUnion(
const field_align_val = try sema.resolveLazyValue(try field_info.fieldValue(pt, 2));
const field_name = try sema.sliceToIpString(block, src, field_name_val, .{ .simple = .union_field_name });
std.hash.autoHash(&hasher, .{
field_name,
field_type_val.toIntern(),
field_align_val.toIntern(),
});
if (field_align_val.toUnsignedInt(zcu) != 0) {
any_aligns = true;
}
}
const tracked_inst = try block.trackZir(inst);
@ -20956,7 +20964,7 @@ fn reifyUnion(
true => .safety,
false => .none,
},
.any_aligned_fields = any_aligns,
.any_aligned_fields = layout != .@"packed",
.requires_comptime = .unknown,
.assumed_runtime_bits = false,
.assumed_pointer_aligned = false,
@ -20989,8 +20997,7 @@ fn reifyUnion(
);
wip_ty.setName(ip, type_name.name, type_name.nav);
const field_types = try sema.arena.alloc(InternPool.Index, fields_len);
const field_aligns = if (any_aligns) try sema.arena.alloc(InternPool.Alignment, fields_len) else undefined;
const loaded_union = ip.loadUnionType(wip_ty.index);
const enum_tag_ty, const has_explicit_tag = if (opt_tag_type_val.optionalValue(zcu)) |tag_type_val| tag_ty: {
switch (ip.indexToKey(tag_type_val.toIntern())) {
@ -21003,11 +21010,12 @@ fn reifyUnion(
const tag_ty_fields_len = enum_tag_ty.enumFieldCount(zcu);
var seen_tags = try std.DynamicBitSetUnmanaged.initEmpty(sema.arena, tag_ty_fields_len);
for (field_types, 0..) |*field_ty, field_idx| {
for (0..fields_len) |field_idx| {
const field_info = try fields_val.elemValue(pt, field_idx);
const field_name_val = try field_info.fieldValue(pt, 0);
const field_type_val = try field_info.fieldValue(pt, 1);
const field_alignment_val = try field_info.fieldValue(pt, 2);
// Don't pass a reason; first loop acts as an assertion that this is valid.
const field_name = try sema.sliceToIpString(block, src, field_name_val, undefined);
@ -21024,14 +21032,12 @@ fn reifyUnion(
}
seen_tags.set(enum_index);
field_ty.* = field_type_val.toIntern();
if (any_aligns) {
const byte_align = try (try field_info.fieldValue(pt, 2)).toUnsignedIntSema(pt);
if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) {
// TODO: better source location
return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align});
}
field_aligns[field_idx] = Alignment.fromByteUnits(byte_align);
loaded_union.field_types.get(ip)[field_idx] = field_type_val.toIntern();
const byte_align = try field_alignment_val.toUnsignedIntSema(pt);
if (layout == .@"packed") {
if (byte_align != 0) return sema.fail(block, src, "alignment of a packed union field must be set to 0", .{});
} else {
loaded_union.field_aligns.get(ip)[field_idx] = try sema.validateAlign(block, src, byte_align);
}
}
@ -21055,11 +21061,12 @@ fn reifyUnion(
var field_names: std.AutoArrayHashMapUnmanaged(InternPool.NullTerminatedString, void) = .empty;
try field_names.ensureTotalCapacity(sema.arena, fields_len);
for (field_types, 0..) |*field_ty, field_idx| {
for (0..fields_len) |field_idx| {
const field_info = try fields_val.elemValue(pt, field_idx);
const field_name_val = try field_info.fieldValue(pt, 0);
const field_type_val = try field_info.fieldValue(pt, 1);
const field_alignment_val = try field_info.fieldValue(pt, 2);
// Don't pass a reason; first loop acts as an assertion that this is valid.
const field_name = try sema.sliceToIpString(block, src, field_name_val, undefined);
@ -21069,14 +21076,12 @@ fn reifyUnion(
return sema.fail(block, src, "duplicate union field {f}", .{field_name.fmt(ip)});
}
field_ty.* = field_type_val.toIntern();
if (any_aligns) {
const byte_align = try (try field_info.fieldValue(pt, 2)).toUnsignedIntSema(pt);
if (byte_align > 0 and !math.isPowerOfTwo(byte_align)) {
// TODO: better source location
return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align});
}
field_aligns[field_idx] = Alignment.fromByteUnits(byte_align);
loaded_union.field_types.get(ip)[field_idx] = field_type_val.toIntern();
const byte_align = try field_alignment_val.toUnsignedIntSema(pt);
if (layout == .@"packed") {
if (byte_align != 0) return sema.fail(block, src, "alignment of a packed union field must be set to 0", .{});
} else {
loaded_union.field_aligns.get(ip)[field_idx] = try sema.validateAlign(block, src, byte_align);
}
}
@ -21085,7 +21090,7 @@ fn reifyUnion(
};
errdefer if (!has_explicit_tag) ip.remove(pt.tid, enum_tag_ty); // remove generated tag type on error
for (field_types) |field_ty_ip| {
for (loaded_union.field_types.get(ip)) |field_ty_ip| {
const field_ty: Type = .fromInterned(field_ty_ip);
if (field_ty.zigTypeTag(zcu) == .@"opaque") {
return sema.failWithOwnedErrorMsg(block, msg: {
@ -21119,11 +21124,6 @@ fn reifyUnion(
}
}
const loaded_union = ip.loadUnionType(wip_ty.index);
loaded_union.setFieldTypes(ip, field_types);
if (any_aligns) {
loaded_union.setFieldAligns(ip, field_aligns);
}
loaded_union.setTagType(ip, enum_tag_ty);
loaded_union.setStatus(ip, .have_field_types);
@ -21276,7 +21276,6 @@ fn reifyStruct(
var any_comptime_fields = false;
var any_default_inits = false;
var any_aligned_fields = false;
for (0..fields_len) |field_idx| {
const field_info = try fields_val.elemValue(pt, field_idx);
@ -21311,11 +21310,6 @@ fn reifyStruct(
if (field_is_comptime) any_comptime_fields = true;
if (field_default_value != .none) any_default_inits = true;
switch (try field_alignment_val.orderAgainstZeroSema(pt)) {
.eq => {},
.gt => any_aligned_fields = true,
.lt => unreachable,
}
}
const tracked_inst = try block.trackZir(inst);
@ -21327,7 +21321,7 @@ fn reifyStruct(
.requires_comptime = .unknown,
.any_comptime_fields = any_comptime_fields,
.any_default_inits = any_default_inits,
.any_aligned_fields = any_aligned_fields,
.any_aligned_fields = layout != .@"packed",
.inits_resolved = true,
.key = .{ .reified = .{
.zir_index = tracked_inst,
@ -21371,21 +21365,14 @@ fn reifyStruct(
return sema.fail(block, src, "duplicate struct field name {f}", .{field_name.fmt(ip)});
}
if (any_aligned_fields) {
if (!try sema.intFitsInType(field_alignment_val, .u32, null)) {
return sema.fail(block, src, "alignment must fit in 'u32'", .{});
}
const byte_align = try field_alignment_val.toUnsignedIntSema(pt);
if (byte_align == 0) {
if (layout != .@"packed") {
struct_type.field_aligns.get(ip)[field_idx] = .none;
}
} else {
if (layout == .@"packed") return sema.fail(block, src, "alignment in a packed struct field must be set to 0", .{});
if (!math.isPowerOfTwo(byte_align)) return sema.fail(block, src, "alignment value '{d}' is not a power of two or zero", .{byte_align});
struct_type.field_aligns.get(ip)[field_idx] = Alignment.fromNonzeroByteUnits(byte_align);
}
if (!try sema.intFitsInType(field_alignment_val, align_ty, null)) {
return sema.fail(block, src, "alignment must fit in '{f}'", .{align_ty.fmt(pt)});
}
const byte_align = try field_alignment_val.toUnsignedIntSema(pt);
if (layout == .@"packed") {
if (byte_align != 0) return sema.fail(block, src, "alignment of a packed struct field must be set to 0", .{});
} else {
struct_type.field_aligns.get(ip)[field_idx] = try sema.validateAlign(block, src, byte_align);
}
const field_is_comptime = field_is_comptime_val.toBool();

View File

@ -268,7 +268,8 @@ nav_val_analysis_queued: std.AutoArrayHashMapUnmanaged(InternPool.Nav.Index, voi
/// These are the modules which we initially queue for analysis in `Compilation.update`.
/// `resolveReferences` will use these as the root of its reachability traversal.
analysis_roots: std.BoundedArray(*Package.Module, 4) = .{},
analysis_roots_buffer: [4]*Package.Module,
analysis_roots_len: usize = 0,
/// This is the cached result of `Zcu.resolveReferences`. It is computed on-demand, and
/// reset to `null` when any semantic analysis occurs (since this invalidates the data).
/// Allocated into `gpa`.
@ -4013,8 +4014,8 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
// This is not a sufficient size, but a lower bound.
try result.ensureTotalCapacity(gpa, @intCast(zcu.reference_table.count()));
try type_queue.ensureTotalCapacity(gpa, zcu.analysis_roots.len);
for (zcu.analysis_roots.slice()) |mod| {
try type_queue.ensureTotalCapacity(gpa, zcu.analysis_roots_len);
for (zcu.analysisRoots()) |mod| {
const file = zcu.module_roots.get(mod).?.unwrap() orelse continue;
const root_ty = zcu.fileRootType(file);
if (root_ty == .none) continue;
@ -4202,6 +4203,10 @@ fn resolveReferencesInner(zcu: *Zcu) !std.AutoHashMapUnmanaged(AnalUnit, ?Resolv
return result;
}
pub fn analysisRoots(zcu: *Zcu) []*Package.Module {
return zcu.analysis_roots_buffer[0..zcu.analysis_roots_len];
}
pub fn fileByIndex(zcu: *const Zcu, file_index: File.Index) *File {
return zcu.intern_pool.filePtr(file_index);
}
@ -4510,7 +4515,7 @@ pub fn callconvSupported(zcu: *Zcu, cc: std.builtin.CallingConvention) union(enu
},
.stage2_spirv => switch (cc) {
.spirv_device, .spirv_kernel => true,
.spirv_fragment, .spirv_vertex => target.os.tag == .vulkan,
.spirv_fragment, .spirv_vertex => target.os.tag == .vulkan or target.os.tag == .opengl,
else => false,
},
};

View File

@ -2116,8 +2116,9 @@ pub fn computeAliveFiles(pt: Zcu.PerThread) Allocator.Error!bool {
// multi-threaded environment (where things like file indices could differ between compiler runs).
// The roots of our file liveness analysis will be the analysis roots.
try zcu.alive_files.ensureTotalCapacity(gpa, zcu.analysis_roots.len);
for (zcu.analysis_roots.slice()) |mod| {
const analysis_roots = zcu.analysisRoots();
try zcu.alive_files.ensureTotalCapacity(gpa, analysis_roots.len);
for (analysis_roots) |mod| {
const file_index = zcu.module_roots.get(mod).?.unwrap() orelse continue;
const file = zcu.fileByIndex(file_index);

View File

@ -33,13 +33,16 @@ pub fn nextInstruction(as: *Assemble) !?Instruction {
var symbols: Symbols: {
const symbols = @typeInfo(@TypeOf(instruction.symbols)).@"struct".fields;
var symbol_fields: [symbols.len]std.builtin.Type.StructField = undefined;
for (&symbol_fields, symbols) |*symbol_field, symbol| symbol_field.* = .{
.name = symbol.name,
.type = zonCast(SymbolSpec, @field(instruction.symbols, symbol.name), .{}).Storage(),
.default_value_ptr = null,
.is_comptime = false,
.alignment = 0,
};
for (&symbol_fields, symbols) |*symbol_field, symbol| {
const Storage = zonCast(SymbolSpec, @field(instruction.symbols, symbol.name), .{}).Storage();
symbol_field.* = .{
.name = symbol.name,
.type = Storage,
.default_value_ptr = null,
.is_comptime = false,
.alignment = @alignOf(Storage),
};
}
break :Symbols @Type(.{ .@"struct" = .{
.layout = .auto,
.fields = &symbol_fields,

View File

@ -12210,11 +12210,14 @@ fn lowerFnRetTy(o: *Object, pt: Zcu.PerThread, fn_info: InternPool.Key.FuncType)
},
.riscv64_lp64, .riscv32_ilp32 => switch (riscv_c_abi.classifyType(return_type, zcu)) {
.memory => return .void,
.integer => {
return o.builder.intType(@intCast(return_type.bitSize(zcu)));
},
.integer => return o.builder.intType(@intCast(return_type.bitSize(zcu))),
.double_integer => {
return o.builder.structType(.normal, &.{ .i64, .i64 });
const integer: Builder.Type = switch (zcu.getTarget().cpu.arch) {
.riscv64 => .i64,
.riscv32 => .i32,
else => unreachable,
};
return o.builder.structType(.normal, &.{ integer, integer });
},
.byval => return o.lowerType(pt, return_type),
.fields => {

View File

@ -977,10 +977,6 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
});
}
pub fn sharedObjectsCount() u8 {
return libs.len;
}
fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
assert(comp.freebsd_so_files == null);
comp.freebsd_so_files = so_files;

View File

@ -1120,18 +1120,6 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
});
}
pub fn sharedObjectsCount(target: *const std.Target) u8 {
const target_version = target.os.versionRange().gnuLibCVersion() orelse return 0;
var count: u8 = 0;
for (libs) |lib| {
if (lib.removed_in) |rem_in| {
if (target_version.order(rem_in) != .lt) continue;
}
count += 1;
}
return count;
}
fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
const target_version = comp.getTarget().os.versionRange().gnuLibCVersion().?;

View File

@ -1011,7 +1011,6 @@ const mingw32_winpthreads_src = [_][]const u8{
"winpthreads" ++ path.sep_str ++ "thread.c",
};
// Note: kernel32 and ntdll are always linked even without targeting MinGW-w64.
pub const always_link_libs = [_][]const u8{
"api-ms-win-crt-conio-l1-1-0",
"api-ms-win-crt-convert-l1-1-0",
@ -1029,6 +1028,8 @@ pub const always_link_libs = [_][]const u8{
"api-ms-win-crt-time-l1-1-0",
"api-ms-win-crt-utility-l1-1-0",
"advapi32",
"kernel32",
"ntdll",
"shell32",
"user32",
};

View File

@ -642,10 +642,6 @@ pub fn buildSharedObjects(comp: *Compilation, prog_node: std.Progress.Node) anye
});
}
pub fn sharedObjectsCount() u8 {
return libs.len;
}
fn queueSharedObjects(comp: *Compilation, so_files: BuiltSharedObjects) void {
assert(comp.netbsd_so_files == null);
comp.netbsd_so_files = so_files;

View File

@ -138,18 +138,23 @@ fn finalize(self: *Trie, allocator: Allocator) !void {
defer ordered_nodes.deinit();
try ordered_nodes.ensureTotalCapacityPrecise(self.nodes.items(.is_terminal).len);
var fifo = DeprecatedLinearFifo(Node.Index).init(allocator);
defer fifo.deinit();
{
var fifo: std.ArrayListUnmanaged(Node.Index) = .empty;
defer fifo.deinit(allocator);
try fifo.writeItem(self.root.?);
try fifo.append(allocator, self.root.?);
while (fifo.readItem()) |next_index| {
const edges = &self.nodes.items(.edges)[next_index];
for (edges.items) |edge_index| {
const edge = self.edges.items[edge_index];
try fifo.writeItem(edge.node);
var i: usize = 0;
while (i < fifo.items.len) {
const next_index = fifo.items[i];
i += 1;
const edges = &self.nodes.items(.edges)[next_index];
for (edges.items) |edge_index| {
const edge = self.edges.items[edge_index];
try fifo.append(allocator, edge.node);
}
ordered_nodes.appendAssumeCapacity(next_index);
}
ordered_nodes.appendAssumeCapacity(next_index);
}
var more: bool = true;

View File

@ -16,9 +16,9 @@ mutex: std.Thread.Mutex,
/// Validates that only one `flushTaskQueue` thread is running at a time.
flush_safety: std.debug.SafetyLock,
/// This is the number of prelink tasks which are expected but have not yet been enqueued.
/// Guarded by `mutex`.
pending_prelink_tasks: u32,
/// This value is positive while there are still prelink tasks yet to be queued. Once they are
/// all queued, this value becomes 0, and ZCU tasks can be run. Guarded by `mutex`.
prelink_wait_count: u32,
/// Prelink tasks which have been enqueued and are not yet owned by the worker thread.
/// Allocated into `gpa`, guarded by `mutex`.
@ -59,7 +59,7 @@ state: union(enum) {
/// The link thread is currently running or queued to run.
running,
/// The link thread is not running or queued, because it has exhausted all immediately available
/// tasks. It should be spawned when more tasks are enqueued. If `pending_prelink_tasks` is not
/// tasks. It should be spawned when more tasks are enqueued. If `prelink_wait_count` is not
/// zero, we are specifically waiting for prelink tasks.
finished,
/// The link thread is not running or queued, because it is waiting for this MIR to be populated.
@ -73,11 +73,11 @@ state: union(enum) {
const max_air_bytes_in_flight = 10 * 1024 * 1024;
/// The initial `Queue` state, containing no tasks, expecting no prelink tasks, and with no running worker thread.
/// The `pending_prelink_tasks` and `queued_prelink` fields may be modified as needed before calling `start`.
/// The `queued_prelink` field may be appended to before calling `start`.
pub const empty: Queue = .{
.mutex = .{},
.flush_safety = .{},
.pending_prelink_tasks = 0,
.prelink_wait_count = undefined, // set in `start`
.queued_prelink = .empty,
.wip_prelink = .empty,
.queued_zcu = .empty,
@ -100,17 +100,49 @@ pub fn deinit(q: *Queue, comp: *Compilation) void {
}
/// This is expected to be called exactly once, after which the caller must not directly access
/// `queued_prelink` or `pending_prelink_tasks` any longer. This will spawn the link thread if
/// necessary.
/// `queued_prelink` any longer. This will spawn the link thread if necessary.
pub fn start(q: *Queue, comp: *Compilation) void {
assert(q.state == .finished);
assert(q.queued_zcu.items.len == 0);
// Reset this to 1. We can't init it to 1 in `empty`, because it would fall to 0 on successive
// incremental updates, but we still need the initial 1.
q.prelink_wait_count = 1;
if (q.queued_prelink.items.len != 0) {
q.state = .running;
comp.thread_pool.spawnWgId(&comp.link_task_wait_group, flushTaskQueue, .{ q, comp });
}
}
/// Every call to this must be paired with a call to `finishPrelinkItem`.
pub fn startPrelinkItem(q: *Queue) void {
q.mutex.lock();
defer q.mutex.unlock();
assert(q.prelink_wait_count > 0); // must not have finished everything already
q.prelink_wait_count += 1;
}
/// This function must be called exactly one more time than `startPrelinkItem` is. The final call
/// indicates that we have finished calling `startPrelinkItem`, so once all pending items finish,
/// we are ready to move on to ZCU tasks.
pub fn finishPrelinkItem(q: *Queue, comp: *Compilation) void {
{
q.mutex.lock();
defer q.mutex.unlock();
q.prelink_wait_count -= 1;
if (q.prelink_wait_count != 0) return;
// The prelink task count dropped to 0; restart the linker thread if necessary.
switch (q.state) {
.wait_for_mir => unreachable, // we've not started zcu tasks yet
.running => return,
.finished => {},
}
assert(q.queued_prelink.items.len == 0);
// Even if there are no ZCU tasks, we must restart the linker thread to make sure
// that `link.File.prelink()` is called.
q.state = .running;
}
comp.thread_pool.spawnWgId(&comp.link_task_wait_group, flushTaskQueue, .{ q, comp });
}
/// Called by codegen workers after they have populated a `ZcuTask.LinkFunc.SharedMir`. If the link
/// thread was waiting for this MIR, it can resume.
pub fn mirReady(q: *Queue, comp: *Compilation, func_index: InternPool.Index, mir: *ZcuTask.LinkFunc.SharedMir) void {
@ -130,14 +162,14 @@ pub fn mirReady(q: *Queue, comp: *Compilation, func_index: InternPool.Index, mir
comp.thread_pool.spawnWgId(&comp.link_task_wait_group, flushTaskQueue, .{ q, comp });
}
/// Enqueues all prelink tasks in `tasks`. Asserts that they were expected, i.e. that `tasks.len` is
/// less than or equal to `q.pending_prelink_tasks`. Also asserts that `tasks.len` is not 0.
/// Enqueues all prelink tasks in `tasks`. Asserts that they were expected, i.e. that
/// `prelink_wait_count` is not yet 0. Also asserts that `tasks.len` is not 0.
pub fn enqueuePrelink(q: *Queue, comp: *Compilation, tasks: []const PrelinkTask) Allocator.Error!void {
{
q.mutex.lock();
defer q.mutex.unlock();
assert(q.prelink_wait_count > 0);
try q.queued_prelink.appendSlice(comp.gpa, tasks);
q.pending_prelink_tasks -= @intCast(tasks.len);
switch (q.state) {
.wait_for_mir => unreachable, // we've not started zcu tasks yet
.running => return,
@ -167,7 +199,7 @@ pub fn enqueueZcu(q: *Queue, comp: *Compilation, task: ZcuTask) Allocator.Error!
try q.queued_zcu.append(comp.gpa, task);
switch (q.state) {
.running, .wait_for_mir => return,
.finished => if (q.pending_prelink_tasks != 0) return,
.finished => if (q.prelink_wait_count > 0) return,
}
// Restart the linker thread, unless it would immediately be blocked
if (task == .link_func and task.link_func.mir.status.load(.acquire) == .pending) {
@ -194,7 +226,7 @@ fn flushTaskQueue(tid: usize, q: *Queue, comp: *Compilation) void {
defer q.mutex.unlock();
std.mem.swap(std.ArrayListUnmanaged(PrelinkTask), &q.queued_prelink, &q.wip_prelink);
if (q.wip_prelink.items.len == 0) {
if (q.pending_prelink_tasks == 0) {
if (q.prelink_wait_count == 0) {
break :prelink; // prelink is done
} else {
// We're expecting more prelink tasks so can't move on to ZCU tasks.

View File

@ -312,7 +312,6 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
return jitCmd(gpa, arena, cmd_args, .{
.cmd_name = "resinator",
.root_src_path = "resinator/main.zig",
.windows_libs = &.{"advapi32"},
.depend_on_aro = true,
.prepend_zig_lib_dir_path = true,
.server = use_server,
@ -337,7 +336,6 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
return jitCmd(gpa, arena, cmd_args, .{
.cmd_name = "std",
.root_src_path = "std-docs.zig",
.windows_libs = &.{"ws2_32"},
.prepend_zig_lib_dir_path = true,
.prepend_zig_exe_path = true,
.prepend_global_cache_path = true,
@ -3659,6 +3657,7 @@ fn buildOutputType(
} else if (target.os.tag == .windows) {
try test_exec_args.appendSlice(arena, &.{
"--subsystem", "console",
"-lkernel32", "-lntdll",
});
}
@ -3862,8 +3861,7 @@ fn createModule(
.only_compiler_rt => continue,
}
// We currently prefer import libraries provided by MinGW-w64 even for MSVC.
if (target.os.tag == .windows) {
if (target.isMinGW()) {
const exists = mingw.libExists(arena, target, create_module.dirs.zig_lib, lib_name) catch |err| {
fatal("failed to check zig installation for DLL import libs: {s}", .{
@errorName(err),
@ -4796,7 +4794,8 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
writeSimpleTemplateFile(Package.Manifest.basename,
\\.{{
\\ .name = .{s},
\\ .version = "{s}",
\\ .version = "0.0.1",
\\ .minimum_zig_version = "{s}",
\\ .paths = .{{""}},
\\ .fingerprint = 0x{x},
\\}}
@ -4811,6 +4810,7 @@ fn cmdInit(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
};
writeSimpleTemplateFile(Package.build_zig_basename,
\\const std = @import("std");
\\
\\pub fn build(b: *std.Build) void {{
\\ _ = b; // stub
\\}}
@ -4891,6 +4891,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
var fetch_mode: Package.Fetch.JobQueue.Mode = .needed;
var system_pkg_dir_path: ?[]const u8 = null;
var debug_target: ?[]const u8 = null;
var debug_libc_paths_file: ?[]const u8 = null;
const argv_index_exe = child_argv.items.len;
_ = try child_argv.addOne();
@ -5014,6 +5015,14 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
} else {
warn("Zig was compiled without debug extensions. --debug-target has no effect.", .{});
}
} else if (mem.eql(u8, arg, "--debug-libc")) {
if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg});
i += 1;
if (build_options.enable_debug_extensions) {
debug_libc_paths_file = args[i];
} else {
warn("Zig was compiled without debug extensions. --debug-libc has no effect.", .{});
}
} else if (mem.eql(u8, arg, "--verbose-link")) {
verbose_link = true;
} else if (mem.eql(u8, arg, "--verbose-cc")) {
@ -5101,6 +5110,14 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
.is_explicit_dynamic_linker = false,
};
};
// Likewise, `--debug-libc` allows overriding the libc installation.
const libc_installation: ?*const LibCInstallation = lci: {
const paths_file = debug_libc_paths_file orelse break :lci null;
if (!build_options.enable_debug_extensions) unreachable;
const lci = try arena.create(LibCInstallation);
lci.* = try .parse(arena, paths_file, &resolved_target.result);
break :lci lci;
};
process.raiseFileDescriptorLimit();
@ -5356,15 +5373,8 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
try root_mod.deps.put(arena, "@build", build_mod);
var windows_libs: std.StringArrayHashMapUnmanaged(void) = .empty;
if (resolved_target.result.os.tag == .windows) {
try windows_libs.ensureUnusedCapacity(arena, 2);
windows_libs.putAssumeCapacity("advapi32", {});
windows_libs.putAssumeCapacity("ws2_32", {}); // for `--listen` (web interface)
}
const comp = Compilation.create(gpa, arena, .{
.libc_installation = libc_installation,
.dirs = dirs,
.root_name = "build",
.config = config,
@ -5385,7 +5395,6 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
.cache_mode = .whole,
.reference_trace = reference_trace,
.debug_compile_errors = debug_compile_errors,
.windows_lib_names = windows_libs.keys(),
}) catch |err| {
fatal("unable to create compilation: {s}", .{@errorName(err)});
};
@ -5489,7 +5498,6 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
const JitCmdOptions = struct {
cmd_name: []const u8,
root_src_path: []const u8,
windows_libs: []const []const u8 = &.{},
prepend_zig_lib_dir_path: bool = false,
prepend_global_cache_path: bool = false,
prepend_zig_exe_path: bool = false,
@ -5606,13 +5614,6 @@ fn jitCmd(
try root_mod.deps.put(arena, "aro", aro_mod);
}
var windows_libs: std.StringArrayHashMapUnmanaged(void) = .empty;
if (resolved_target.result.os.tag == .windows) {
try windows_libs.ensureUnusedCapacity(arena, options.windows_libs.len);
for (options.windows_libs) |lib| windows_libs.putAssumeCapacity(lib, {});
}
const comp = Compilation.create(gpa, arena, .{
.dirs = dirs,
.root_name = options.cmd_name,
@ -5623,7 +5624,6 @@ fn jitCmd(
.self_exe_path = self_exe_path,
.thread_pool = &thread_pool,
.cache_mode = .whole,
.windows_lib_names = windows_libs.keys(),
}) catch |err| {
fatal("unable to create compilation: {s}", .{@errorName(err)});
};

View File

@ -20,7 +20,7 @@ pub fn cannotDynamicLink(target: *const std.Target) bool {
/// Similarly on FreeBSD and NetBSD we always link system libc
/// since this is the stable syscall interface.
pub fn osRequiresLibC(target: *const std.Target) bool {
return target.os.requiresLibC();
return target.requiresLibC();
}
pub fn libCNeedsLibUnwind(target: *const std.Target, link_mode: std.builtin.LinkMode) bool {

View File

@ -318,6 +318,8 @@ test "tuple type with void field" {
test "zero sized struct in tuple handled correctly" {
const State = struct {
const Self = @This();
const Inner = struct {};
data: @Type(.{
.@"struct" = .{
.is_tuple = true,
@ -325,10 +327,10 @@ test "zero sized struct in tuple handled correctly" {
.decls = &.{},
.fields = &.{.{
.name = "0",
.type = struct {},
.type = Inner,
.default_value_ptr = null,
.is_comptime = false,
.alignment = 0,
.alignment = @alignOf(Inner),
}},
},
}),

View File

@ -433,8 +433,8 @@ test "Type.Union" {
.layout = .@"packed",
.tag_type = null,
.fields = &.{
.{ .name = "signed", .type = i32, .alignment = @alignOf(i32) },
.{ .name = "unsigned", .type = u32, .alignment = @alignOf(u32) },
.{ .name = "signed", .type = i32, .alignment = 0 },
.{ .name = "unsigned", .type = u32, .alignment = 0 },
},
.decls = &.{},
},
@ -735,7 +735,7 @@ test "struct field names sliced at comptime from larger string" {
var it = std.mem.tokenizeScalar(u8, text, '\n');
while (it.next()) |name| {
fields = fields ++ &[_]Type.StructField{.{
.alignment = 0,
.alignment = @alignOf(usize),
.name = name ++ "",
.type = usize,
.default_value_ptr = null,

View File

@ -2311,3 +2311,11 @@ test "set mutable union by switching on same union" {
try expect(val == .bar);
try expect(val.bar == 2);
}
test "initialize empty field of union inside comptime-known struct constant" {
const Inner = union { none: void, some: u8 };
const Wrapper = struct { inner: Inner };
const val: Wrapper = .{ .inner = .{ .none = {} } };
comptime assert(val.inner.none == {});
}

View File

@ -1,52 +1,80 @@
pub var global_var: i32 align(0) = undefined;
var global_var: i32 align(0) = undefined;
pub export fn a() void {
export fn a() void {
_ = &global_var;
}
pub extern var extern_var: i32 align(0);
extern var extern_var: i32 align(0);
pub export fn b() void {
export fn b() void {
_ = &extern_var;
}
pub export fn c() align(0) void {}
export fn c() align(0) void {}
pub export fn d() void {
export fn d() void {
_ = *align(0) fn () i32;
}
pub export fn e() void {
export fn e() void {
var local_var: i32 align(0) = undefined;
_ = &local_var;
}
pub export fn f() void {
export fn f() void {
_ = *align(0) i32;
}
pub export fn g() void {
export fn g() void {
_ = []align(0) i32;
}
pub export fn h() void {
export fn h() void {
_ = struct { field: i32 align(0) };
}
pub export fn i() void {
export fn i() void {
_ = union { field: i32 align(0) };
}
export fn j() void {
_ = @Type(.{ .@"struct" = .{
.layout = .auto,
.fields = &.{.{
.name = "test",
.type = u32,
.default_value_ptr = null,
.is_comptime = false,
.alignment = 0,
}},
.decls = &.{},
.is_tuple = false,
} });
}
export fn k() void {
_ = @Type(.{ .pointer = .{
.size = .one,
.is_const = false,
.is_volatile = false,
.alignment = 0,
.address_space = .generic,
.child = u32,
.is_allowzero = false,
.sentinel_ptr = null,
} });
}
// error
// backend=stage2
// target=native
//
// :1:31: error: alignment must be >= 1
// :7:38: error: alignment must be >= 1
// :13:25: error: alignment must be >= 1
// :1:27: error: alignment must be >= 1
// :7:34: error: alignment must be >= 1
// :13:21: error: alignment must be >= 1
// :16:16: error: alignment must be >= 1
// :20:30: error: alignment must be >= 1
// :25:16: error: alignment must be >= 1
// :29:17: error: alignment must be >= 1
// :33:35: error: alignment must be >= 1
// :37:34: error: alignment must be >= 1
// :41:9: error: alignment must be >= 1
// :56:9: error: alignment must be >= 1

View File

@ -11,5 +11,5 @@ export fn entry2() void {
// backend=stage2
// target=native
//
// :2:22: error: expected type 'u32', found 'bool'
// :6:21: error: fractional component prevents float value '12.34' from coercion to type 'u32'
// :2:22: error: expected type 'u29', found 'bool'
// :6:21: error: fractional component prevents float value '12.34' from coercion to type 'u29'

View File

@ -1,9 +0,0 @@
export fn entry() void {
_ = @Type(.{ .@"struct" = .{ .layout = .@"packed", .fields = &.{
.{ .name = "one", .type = u4, .default_value_ptr = null, .is_comptime = false, .alignment = 2 },
}, .decls = &.{}, .is_tuple = false } });
}
// error
//
// :2:9: error: alignment in a packed struct field must be set to 0

View File

@ -0,0 +1,9 @@
const U = packed union {
x: f32,
y: u8 align(10),
z: u32,
};
// error
//
// :3:17: error: unable to override alignment of packed union fields

View File

@ -75,4 +75,5 @@ comptime {
// :16:5: error: tuple field name '3' does not match field index 0
// :30:5: error: comptime field without default initialization value
// :44:5: error: extern struct fields cannot be marked comptime
// :58:5: error: alignment in a packed struct field must be set to 0
// :58:5: error: alignment of a packed struct field must be set to 0

View File

@ -43,6 +43,6 @@ comptime {
// error
//
// :2:9: error: alignment value '3' is not a power of two or zero
// :14:9: error: alignment value '5' is not a power of two or zero
// :30:9: error: alignment value '7' is not a power of two or zero
// :2:9: error: alignment value '3' is not a power of two
// :14:9: error: alignment value '5' is not a power of two
// :30:9: error: alignment value '7' is not a power of two

View File

@ -593,6 +593,7 @@ pub fn lowerToTranslateCSteps(
pub const CaseTestOptions = struct {
test_filters: []const []const u8,
test_target_filters: []const []const u8,
skip_compile_errors: bool,
skip_non_native: bool,
skip_freebsd: bool,
skip_netbsd: bool,
@ -618,6 +619,8 @@ pub fn lowerToBuildSteps(
if (std.mem.indexOf(u8, case.name, test_filter)) |_| break;
} else if (options.test_filters.len > 0) continue;
if (case.case.? == .Error and options.skip_compile_errors) continue;
if (options.skip_non_native and !case.target.query.isNative())
continue;

View File

@ -31,5 +31,16 @@ pub fn build(b: *std.Build) void {
run.addArtifactArg(child);
run.expectExitCode(0);
// Use a temporary directory within the cache as the CWD to test
// spawning the child using a path that contains a leading `..` component.
const run_relative = b.addRunArtifact(main);
run_relative.addArtifactArg(child);
const write_tmp_dir = b.addWriteFiles();
const tmp_cwd = write_tmp_dir.getDirectory();
run_relative.addDirectoryArg(tmp_cwd);
run_relative.setCwd(tmp_cwd);
run_relative.expectExitCode(0);
test_step.dependOn(&run.step);
test_step.dependOn(&run_relative.step);
}

View File

@ -11,7 +11,14 @@ pub fn main() !void {
var it = try std.process.argsWithAllocator(gpa);
defer it.deinit();
_ = it.next() orelse unreachable; // skip binary name
const child_path = it.next() orelse unreachable;
const child_path, const needs_free = child_path: {
const child_path = it.next() orelse unreachable;
const cwd_path = it.next() orelse break :child_path .{ child_path, false };
// If there is a third argument, it is the current CWD somewhere within the cache directory.
// In that case, modify the child path in order to test spawning a path with a leading `..` component.
break :child_path .{ try std.fs.path.relative(gpa, cwd_path, child_path), true };
};
defer if (needs_free) gpa.free(child_path);
var child = std.process.Child.init(&.{ child_path, "hello arg" }, gpa);
child.stdin_behavior = .Pipe;
@ -39,7 +46,12 @@ pub fn main() !void {
},
else => |term| testError("abnormal child exit: {}", .{term}),
}
return if (parent_test_error) error.ParentTestError else {};
if (parent_test_error) return error.ParentTestError;
// Check that FileNotFound is consistent across platforms when trying to spawn an executable that doesn't exist
const missing_child_path = try std.mem.concat(gpa, u8, &.{ child_path, "_intentionally_missing" });
defer gpa.free(missing_child_path);
try std.testing.expectError(error.FileNotFound, std.process.Child.run(.{ .allocator = gpa, .argv = &.{missing_child_path} }));
}
var parent_test_error = false;

View File

@ -50,10 +50,6 @@ pub fn build(b: *std.Build) void {
});
if (case.link_libc) exe.root_module.link_libc = true;
if (resolved_target.result.os.tag == .windows) {
exe.root_module.linkSystemLibrary("advapi32", .{});
}
_ = exe.getEmittedBin();
step.dependOn(&exe.step);
@ -70,10 +66,6 @@ pub fn build(b: *std.Build) void {
});
if (case.link_libc) exe.root_module.link_libc = true;
if (resolved_target.result.os.tag == .windows) {
exe.root_module.linkSystemLibrary("advapi32", .{});
}
const run = b.addRunArtifact(exe);
step.dependOn(&run.step);
}

View File

@ -47,8 +47,6 @@ pub fn build(b: *std.Build) !void {
}),
});
fuzz.root_module.linkSystemLibrary("advapi32", .{});
const fuzz_max_iterations = b.option(u64, "iterations", "The max fuzz iterations (default: 100)") orelse 100;
const fuzz_iterations_arg = std.fmt.allocPrint(b.allocator, "{}", .{fuzz_max_iterations}) catch @panic("oom");

View File

@ -28,8 +28,6 @@ pub fn build(b: *std.Build) !void {
}),
});
test_exe.root_module.linkSystemLibrary("advapi32", .{});
const run = b.addRunArtifact(test_exe);
run.addArtifactArg(echo_args);
run.expectExitCode(0);
@ -46,8 +44,6 @@ pub fn build(b: *std.Build) !void {
}),
});
fuzz.root_module.linkSystemLibrary("advapi32", .{});
const fuzz_max_iterations = b.option(u64, "iterations", "The max fuzz iterations (default: 100)") orelse 100;
const fuzz_iterations_arg = std.fmt.allocPrint(b.allocator, "{}", .{fuzz_max_iterations}) catch @panic("oom");

View File

@ -28,8 +28,6 @@ pub fn build(b: *std.Build) void {
}),
});
main.root_module.linkSystemLibrary("advapi32", .{});
const run = b.addRunArtifact(main);
run.addArtifactArg(hello);
run.expectExitCode(0);

View File

@ -1,4 +1,5 @@
const std = @import("std");
const windows = std.os.windows;
const utf16Literal = std.unicode.utf8ToUtf16LeStringLiteral;
@ -39,6 +40,9 @@ pub fn main() anyerror!void {
// No PATH, so it should fail to find anything not in the cwd
try testExecError(error.FileNotFound, allocator, "something_missing");
// make sure we don't get error.BadPath traversing out of cwd with a relative path
try testExecError(error.FileNotFound, allocator, "..\\.\\.\\.\\\\..\\more_missing");
std.debug.assert(windows.kernel32.SetEnvironmentVariableW(
utf16Literal("PATH"),
tmp_absolute_path_w,
@ -149,6 +153,48 @@ pub fn main() anyerror!void {
// If we try to exec but provide a cwd that is an absolute path, the PATH
// should still be searched and the goodbye.exe in something should be found.
try testExecWithCwd(allocator, "goodbye", tmp_absolute_path, "hello from exe\n");
// introduce some extra path separators into the path which is dealt with inside the spawn call.
const denormed_something_subdir_size = std.mem.replacementSize(u16, something_subdir_abs_path, utf16Literal("\\"), utf16Literal("\\\\\\\\"));
const denormed_something_subdir_abs_path = try allocator.allocSentinel(u16, denormed_something_subdir_size, 0);
defer allocator.free(denormed_something_subdir_abs_path);
_ = std.mem.replace(u16, something_subdir_abs_path, utf16Literal("\\"), utf16Literal("\\\\\\\\"), denormed_something_subdir_abs_path);
const denormed_something_subdir_wtf8 = try std.unicode.wtf16LeToWtf8Alloc(allocator, denormed_something_subdir_abs_path);
defer allocator.free(denormed_something_subdir_wtf8);
// clear the path to ensure that the match comes from the cwd
std.debug.assert(windows.kernel32.SetEnvironmentVariableW(
utf16Literal("PATH"),
null,
) == windows.TRUE);
try testExecWithCwd(allocator, "goodbye", denormed_something_subdir_wtf8, "hello from exe\n");
// normalization should also work if the non-normalized path is found in the PATH var.
std.debug.assert(windows.kernel32.SetEnvironmentVariableW(
utf16Literal("PATH"),
denormed_something_subdir_abs_path,
) == windows.TRUE);
try testExec(allocator, "goodbye", "hello from exe\n");
// now make sure we can launch executables "outside" of the cwd
var subdir_cwd = try tmp.dir.openDir(denormed_something_subdir_wtf8, .{});
defer subdir_cwd.close();
try tmp.dir.rename("something/goodbye.exe", "hello.exe");
try subdir_cwd.setAsCwd();
// clear the PATH again
std.debug.assert(windows.kernel32.SetEnvironmentVariableW(
utf16Literal("PATH"),
null,
) == windows.TRUE);
// while we're at it make sure non-windows separators work fine
try testExec(allocator, "../hello", "hello from exe\n");
}
fn testExecError(err: anyerror, allocator: std.mem.Allocator, command: []const u8) !void {

View File

@ -2238,7 +2238,6 @@ const ModuleTestOptions = struct {
desc: []const u8,
optimize_modes: []const OptimizeMode,
include_paths: []const []const u8,
windows_libs: []const []const u8,
skip_single_threaded: bool,
skip_non_native: bool,
skip_freebsd: bool,
@ -2373,10 +2372,6 @@ pub fn addModuleTests(b: *std.Build, options: ModuleTestOptions) *Step {
for (options.include_paths) |include_path| these_tests.root_module.addIncludePath(b.path(include_path));
if (target.os.tag == .windows) {
for (options.windows_libs) |lib| these_tests.root_module.linkSystemLibrary(lib, .{});
}
const qualified_name = b.fmt("{s}-{s}-{s}-{s}{s}{s}{s}{s}{s}{s}", .{
options.name,
triple_txt,
@ -2672,10 +2667,6 @@ pub fn addIncrementalTests(b: *std.Build, test_step: *Step) !void {
}),
});
if (b.graph.host.result.os.tag == .windows) {
incr_check.root_module.linkSystemLibrary("advapi32", .{});
}
var dir = try b.build_root.handle.openDir("test/incremental", .{ .iterate = true });
defer dir.close();