mirror of
https://github.com/ziglang/zig.git
synced 2026-02-13 04:48:20 +00:00
hello world compiling again
This commit is contained in:
parent
af24e722fb
commit
837f2bfc69
@ -69,13 +69,11 @@ var stdout_buffer: [std.heap.page_size_min]u8 align(std.heap.page_size_min) = un
|
||||
|
||||
fn mainServer() !void {
|
||||
@disableInstrumentation();
|
||||
var stdin_reader = std.fs.File.stdin().reader();
|
||||
var stdout_writer = std.fs.File.stdout().writer();
|
||||
var stdin_buffered_reader = stdin_reader.interface().buffered(&stdin_buffer);
|
||||
var stdout_buffered_writer = stdout_writer.interface().buffered(&stdout_buffer);
|
||||
var stdin_reader = std.fs.File.stdin().reader(&stdin_buffer);
|
||||
var stdout_writer = std.fs.File.stdout().writer(&stdout_buffer);
|
||||
var server = try std.zig.Server.init(.{
|
||||
.in = &stdin_buffered_reader,
|
||||
.out = &stdout_buffered_writer,
|
||||
.in = &stdin_reader.interface,
|
||||
.out = &stdout_writer.interface,
|
||||
.zig_version = builtin.zig_version_string,
|
||||
});
|
||||
|
||||
|
||||
@ -1,22 +1,3 @@
|
||||
//! Inflate decompresses deflate bit stream. Reads compressed data from reader
|
||||
//! provided in init. Decompressed data are stored in internal hist buffer and
|
||||
//! can be accesses iterable `next` or reader interface.
|
||||
//!
|
||||
//! Container defines header/footer wrapper around deflate bit stream. Can be
|
||||
//! gzip or zlib.
|
||||
//!
|
||||
//! Deflate bit stream consists of multiple blocks. Block can be one of three types:
|
||||
//! * stored, non compressed, max 64k in size
|
||||
//! * fixed, huffman codes are predefined
|
||||
//! * dynamic, huffman code tables are encoded at the block start
|
||||
//!
|
||||
//! `step` function runs decoder until internal `hist` buffer is full. Client than needs to read
|
||||
//! that data in order to proceed with decoding.
|
||||
//!
|
||||
//! Allocates 74.5K of internal buffers, most important are:
|
||||
//! * 64K for history (CircularBuffer)
|
||||
//! * ~10K huffman decoders (Literal and DistanceDecoder)
|
||||
|
||||
const std = @import("../../std.zig");
|
||||
const flate = std.compress.flate;
|
||||
const Container = flate.Container;
|
||||
@ -24,16 +5,16 @@ const Token = @import("Token.zig");
|
||||
const testing = std.testing;
|
||||
const Decompress = @This();
|
||||
const Writer = std.io.Writer;
|
||||
const Reader = std.io.Reader;
|
||||
|
||||
input: *std.io.Reader,
|
||||
// Hashes, produces checksum, of uncompressed data for gzip/zlib footer.
|
||||
input: *Reader,
|
||||
interface: Reader,
|
||||
/// Hashes, produces checksum, of uncompressed data for gzip/zlib footer.
|
||||
hasher: Container.Hasher,
|
||||
|
||||
// dynamic block huffman code decoders
|
||||
lit_dec: LiteralDecoder,
|
||||
dst_dec: DistanceDecoder,
|
||||
|
||||
// current read state
|
||||
final_block: bool,
|
||||
state: State,
|
||||
|
||||
@ -68,8 +49,16 @@ pub const Error = Container.Error || error{
|
||||
MissingEndOfBlockCode,
|
||||
};
|
||||
|
||||
pub fn init(input: *std.io.Reader, container: Container) Decompress {
|
||||
pub fn init(input: *Reader, container: Container, buffer: []u8) Decompress {
|
||||
return .{
|
||||
.interface = .{
|
||||
// TODO populate discard so that when an amount is discarded that
|
||||
// includes an entire frame, skip decoding that frame.
|
||||
.vtable = &.{ .stream = stream },
|
||||
.buffer = buffer,
|
||||
.seek = 0,
|
||||
.end = 0,
|
||||
},
|
||||
.input = input,
|
||||
.hasher = .init(container),
|
||||
.lit_dec = .{},
|
||||
@ -140,13 +129,9 @@ fn decodeSymbol(self: *Decompress, decoder: anytype) !Symbol {
|
||||
return sym;
|
||||
}
|
||||
|
||||
pub fn read(
|
||||
context: ?*anyopaque,
|
||||
bw: *Writer,
|
||||
limit: std.io.Limit,
|
||||
) std.io.Reader.StreamError!usize {
|
||||
const d: *Decompress = @alignCast(@ptrCast(context));
|
||||
return readInner(d, bw, limit) catch |err| switch (err) {
|
||||
pub fn stream(r: *Reader, w: *Writer, limit: std.io.Limit) Reader.StreamError!usize {
|
||||
const d: *Decompress = @alignCast(@fieldParentPtr("interface", r));
|
||||
return readInner(d, w, limit) catch |err| switch (err) {
|
||||
error.EndOfStream => return error.EndOfStream,
|
||||
error.WriteFailed => return error.WriteFailed,
|
||||
else => |e| {
|
||||
@ -158,11 +143,7 @@ pub fn read(
|
||||
};
|
||||
}
|
||||
|
||||
fn readInner(
|
||||
d: *Decompress,
|
||||
bw: *Writer,
|
||||
limit: std.io.Limit,
|
||||
) (Error || error{ WriteFailed, EndOfStream })!usize {
|
||||
fn readInner(d: *Decompress, w: *Writer, limit: std.io.Limit) (Error || Reader.StreamError)!usize {
|
||||
const in = d.input;
|
||||
sw: switch (d.state) {
|
||||
.protocol_header => switch (d.hasher.container()) {
|
||||
@ -266,76 +247,75 @@ fn readInner(
|
||||
}
|
||||
},
|
||||
.stored_block => |remaining_len| {
|
||||
const out = try bw.writableSliceGreedyPreserving(flate.history_len, 1);
|
||||
const out = try w.writableSliceGreedyPreserving(flate.history_len, 1);
|
||||
const limited_out = limit.min(.limited(remaining_len)).slice(out);
|
||||
const n = try d.input.readVec(bw, &.{limited_out});
|
||||
const n = try d.input.readVec(&.{limited_out});
|
||||
if (remaining_len - n == 0) {
|
||||
d.state = if (d.final_block) .protocol_footer else .block_header;
|
||||
} else {
|
||||
d.state = .{ .stored_block = remaining_len - n };
|
||||
d.state = .{ .stored_block = @intCast(remaining_len - n) };
|
||||
}
|
||||
bw.advance(n);
|
||||
w.advance(n);
|
||||
return n;
|
||||
},
|
||||
.fixed_block => {
|
||||
const start = bw.count;
|
||||
while (@intFromEnum(limit) > bw.count - start) {
|
||||
const start = w.count;
|
||||
while (@intFromEnum(limit) > w.count - start) {
|
||||
const code = try d.readFixedCode();
|
||||
switch (code) {
|
||||
0...255 => try bw.writeBytePreserving(flate.history_len, @intCast(code)),
|
||||
0...255 => try w.writeBytePreserving(flate.history_len, @intCast(code)),
|
||||
256 => {
|
||||
d.state = if (d.final_block) .protocol_footer else .block_header;
|
||||
return bw.count - start;
|
||||
return w.count - start;
|
||||
},
|
||||
257...285 => {
|
||||
// Handles fixed block non literal (length) code.
|
||||
// Length code is followed by 5 bits of distance code.
|
||||
const rebased_code = code - 257;
|
||||
const length = try d.decodeLength(rebased_code);
|
||||
const length = try d.decodeLength(@intCast(code - 257));
|
||||
const distance = try d.decodeDistance(try d.takeBitsReverseBuffered(u5));
|
||||
try writeMatch(bw, length, distance);
|
||||
try writeMatch(w, length, distance);
|
||||
},
|
||||
else => return error.InvalidCode,
|
||||
}
|
||||
}
|
||||
d.state = .fixed_block;
|
||||
return bw.count - start;
|
||||
return w.count - start;
|
||||
},
|
||||
.dynamic_block => {
|
||||
// In larger archives most blocks are usually dynamic, so decompression
|
||||
// performance depends on this logic.
|
||||
const start = bw.count;
|
||||
while (@intFromEnum(limit) > bw.count - start) {
|
||||
const start = w.count;
|
||||
while (@intFromEnum(limit) > w.count - start) {
|
||||
const sym = try d.decodeSymbol(&d.lit_dec);
|
||||
|
||||
switch (sym.kind) {
|
||||
.literal => d.hist.write(sym.symbol),
|
||||
.literal => try w.writeBytePreserving(flate.history_len, sym.symbol),
|
||||
.match => {
|
||||
// Decode match backreference <length, distance>
|
||||
const length = try d.decodeLength(sym.symbol);
|
||||
const dsm = try d.decodeSymbol(&d.dst_dec);
|
||||
const distance = try d.decodeDistance(dsm.symbol);
|
||||
try writeMatch(bw, length, distance);
|
||||
try writeMatch(w, length, distance);
|
||||
},
|
||||
.end_of_block => {
|
||||
d.state = if (d.final_block) .protocol_footer else .block_header;
|
||||
return bw.count - start;
|
||||
return w.count - start;
|
||||
},
|
||||
}
|
||||
}
|
||||
d.state = .dynamic_block;
|
||||
return bw.count - start;
|
||||
return w.count - start;
|
||||
},
|
||||
.protocol_footer => {
|
||||
d.alignBitsToByte();
|
||||
switch (d.hasher.container()) {
|
||||
switch (d.hasher) {
|
||||
.gzip => |*gzip| {
|
||||
if (try reader.read(u32) != gzip.final()) return error.WrongGzipChecksum;
|
||||
if (try reader.read(u32) != gzip.count) return error.WrongGzipSize;
|
||||
if (try in.takeInt(u32, .little) != gzip.crc.final()) return error.WrongGzipChecksum;
|
||||
if (try in.takeInt(u32, .little) != gzip.count) return error.WrongGzipSize;
|
||||
},
|
||||
.zlib => |*zlib| {
|
||||
const chksum: u32 = @byteSwap(zlib.final());
|
||||
if (try reader.read(u32) != chksum) return error.WrongZlibChecksum;
|
||||
if (try in.takeInt(u32, .big) != chksum) return error.WrongZlibChecksum;
|
||||
},
|
||||
.raw => {},
|
||||
}
|
||||
@ -355,15 +335,12 @@ fn writeMatch(bw: *Writer, length: u16, distance: u16) !void {
|
||||
@panic("TODO");
|
||||
}
|
||||
|
||||
pub fn reader(self: *Decompress, buffer: []u8) std.io.Reader {
|
||||
return .{
|
||||
.context = self,
|
||||
.vtable = &.{ .read = read },
|
||||
.buffer = buffer,
|
||||
};
|
||||
fn takeBits(d: *Decompress, comptime T: type) !T {
|
||||
_ = d;
|
||||
@panic("TODO");
|
||||
}
|
||||
|
||||
fn takeBits(d: *Decompress, comptime T: type) !T {
|
||||
fn takeBitsReverseBuffered(d: *Decompress, comptime T: type) !T {
|
||||
_ = d;
|
||||
@panic("TODO");
|
||||
}
|
||||
@ -725,8 +702,8 @@ test "decompress" {
|
||||
},
|
||||
};
|
||||
for (cases) |c| {
|
||||
var fb: std.io.Reader = .fixed(c.in);
|
||||
var aw: std.io.Writer.Allocating = .init(testing.allocator);
|
||||
var fb: Reader = .fixed(c.in);
|
||||
var aw: Writer.Allocating = .init(testing.allocator);
|
||||
defer aw.deinit();
|
||||
|
||||
var decompress: Decompress = .init(&fb, .raw);
|
||||
@ -784,8 +761,8 @@ test "gzip decompress" {
|
||||
},
|
||||
};
|
||||
for (cases) |c| {
|
||||
var fb: std.io.Reader = .fixed(c.in);
|
||||
var aw: std.io.Writer.Allocating = .init(testing.allocator);
|
||||
var fb: Reader = .fixed(c.in);
|
||||
var aw: Writer.Allocating = .init(testing.allocator);
|
||||
defer aw.deinit();
|
||||
|
||||
var decompress: Decompress = .init(&fb, .gzip);
|
||||
@ -812,8 +789,8 @@ test "zlib decompress" {
|
||||
},
|
||||
};
|
||||
for (cases) |c| {
|
||||
var fb: std.io.Reader = .fixed(c.in);
|
||||
var aw: std.io.Writer.Allocating = .init(testing.allocator);
|
||||
var fb: Reader = .fixed(c.in);
|
||||
var aw: Writer.Allocating = .init(testing.allocator);
|
||||
defer aw.deinit();
|
||||
|
||||
var decompress: Decompress = .init(&fb, .zlib);
|
||||
@ -872,8 +849,8 @@ test "fuzzing tests" {
|
||||
};
|
||||
|
||||
inline for (cases, 0..) |c, case_no| {
|
||||
var in: std.io.Reader = .fixed(@embedFile("testdata/fuzz/" ++ c.input ++ ".input"));
|
||||
var aw: std.io.Writer.Allocating = .init(testing.allocator);
|
||||
var in: Reader = .fixed(@embedFile("testdata/fuzz/" ++ c.input ++ ".input"));
|
||||
var aw: Writer.Allocating = .init(testing.allocator);
|
||||
defer aw.deinit();
|
||||
errdefer std.debug.print("test case failed {}\n", .{case_no});
|
||||
|
||||
@ -893,8 +870,8 @@ test "bug 18966" {
|
||||
const input = @embedFile("testdata/fuzz/bug_18966.input");
|
||||
const expect = @embedFile("testdata/fuzz/bug_18966.expect");
|
||||
|
||||
var in: std.io.Reader = .fixed(input);
|
||||
var aw: std.io.Writer.Allocating = .init(testing.allocator);
|
||||
var in: Reader = .fixed(input);
|
||||
var aw: Writer.Allocating = .init(testing.allocator);
|
||||
defer aw.deinit();
|
||||
|
||||
var decompress: Decompress = .init(&in, .gzip);
|
||||
@ -909,7 +886,7 @@ test "reading into empty buffer" {
|
||||
0b0000_0001, 0b0000_1100, 0x00, 0b1111_0011, 0xff, // deflate fixed buffer header len, nlen
|
||||
'H', 'e', 'l', 'l', 'o', ' ', 'w', 'o', 'r', 'l', 'd', 0x0a, // non compressed data
|
||||
};
|
||||
var in: std.io.Reader = .fixed(input);
|
||||
var in: Reader = .fixed(input);
|
||||
var decomp: Decompress = .init(&in, .raw);
|
||||
var decompress_br = decomp.readable(&.{});
|
||||
var buf: [0]u8 = undefined;
|
||||
|
||||
@ -200,10 +200,9 @@ pub fn addCertsFromFilePathAbsolute(
|
||||
gpa: Allocator,
|
||||
abs_file_path: []const u8,
|
||||
) AddCertsFromFilePathError!void {
|
||||
assert(fs.path.isAbsolute(abs_file_path));
|
||||
var file = try fs.openFileAbsolute(abs_file_path, .{});
|
||||
defer file.close();
|
||||
return addCertsFromFile(cb, gpa, file);
|
||||
var file_reader: fs.File.Reader = .init(try fs.openFileAbsolute(abs_file_path, .{}), &.{});
|
||||
defer file_reader.file.close();
|
||||
return addCertsFromFile(cb, gpa, &file_reader, std.time.timestamp());
|
||||
}
|
||||
|
||||
pub fn addCertsFromFilePath(
|
||||
@ -212,9 +211,9 @@ pub fn addCertsFromFilePath(
|
||||
dir: fs.Dir,
|
||||
sub_file_path: []const u8,
|
||||
) AddCertsFromFilePathError!void {
|
||||
var file = try dir.openFile(sub_file_path, .{});
|
||||
defer file.close();
|
||||
return addCertsFromFile(cb, gpa, file);
|
||||
var file_reader: fs.File.Reader = .init(try dir.openFile(sub_file_path, .{}), &.{});
|
||||
defer file_reader.file.close();
|
||||
return addCertsFromFile(cb, gpa, &file_reader, std.time.timestamp());
|
||||
}
|
||||
|
||||
pub const AddCertsFromFileError = Allocator.Error ||
|
||||
@ -224,10 +223,14 @@ pub const AddCertsFromFileError = Allocator.Error ||
|
||||
std.base64.Error ||
|
||||
error{ CertificateAuthorityBundleTooBig, MissingEndCertificateMarker };
|
||||
|
||||
pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFromFileError!void {
|
||||
var file_reader = file.reader();
|
||||
/// `file_reader` needs no buffer since it will be read directly into `Bundle`.
|
||||
pub fn addCertsFromFile(
|
||||
cb: *Bundle,
|
||||
gpa: Allocator,
|
||||
file_reader: *fs.File.Reader,
|
||||
now_sec: i64,
|
||||
) AddCertsFromFileError!void {
|
||||
const size = try file_reader.getSize();
|
||||
var br = file_reader.interface().unbuffered();
|
||||
|
||||
// We borrow `bytes` as a temporary buffer for the base64-encoded data.
|
||||
// This is possible by computing the decoded length and reserving the space
|
||||
@ -238,7 +241,7 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFrom
|
||||
try cb.bytes.ensureUnusedCapacity(gpa, needed_capacity);
|
||||
const end_reserved: u32 = @intCast(cb.bytes.items.len + decoded_size_upper_bound);
|
||||
const buffer = cb.bytes.allocatedSlice()[end_reserved..];
|
||||
const end_index = br.readSliceShort(buffer) catch |err| switch (err) {
|
||||
const end_index = file_reader.interface.readSliceShort(buffer) catch |err| switch (err) {
|
||||
error.ReadFailed => return file_reader.err.?,
|
||||
};
|
||||
const encoded_bytes = buffer[0..end_index];
|
||||
@ -246,8 +249,6 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFrom
|
||||
const begin_marker = "-----BEGIN CERTIFICATE-----";
|
||||
const end_marker = "-----END CERTIFICATE-----";
|
||||
|
||||
const now_sec = std.time.timestamp();
|
||||
|
||||
var start_index: usize = 0;
|
||||
while (mem.indexOfPos(u8, encoded_bytes, start_index, begin_marker)) |begin_marker_start| {
|
||||
const cert_start = begin_marker_start + begin_marker.len;
|
||||
|
||||
@ -2240,9 +2240,10 @@ pub const ElfModule = struct {
|
||||
if (chdr.ch_type != .ZLIB) continue;
|
||||
const ch_size = chdr.ch_size;
|
||||
|
||||
var zlib_stream: std.compress.flate.Decompress = .init(§ion_reader, .zlib);
|
||||
var zlib_stream: std.compress.flate.Decompress = .init(§ion_reader, .zlib, &.{});
|
||||
|
||||
const decompressed_section = zlib_stream.reader().readRemainingAlloc(gpa, .limited(ch_size)) catch continue;
|
||||
const decompressed_section = zlib_stream.interface.allocRemaining(gpa, .limited(ch_size)) catch
|
||||
continue;
|
||||
if (decompressed_section.len != ch_size) {
|
||||
gpa.free(decompressed_section);
|
||||
continue;
|
||||
|
||||
@ -830,11 +830,11 @@ pub const BufPrintError = error{
|
||||
|
||||
/// Print a Formatter string into `buf`. Returns a slice of the bytes printed.
|
||||
pub fn bufPrint(buf: []u8, comptime fmt: []const u8, args: anytype) BufPrintError![]u8 {
|
||||
var bw: Writer = .fixed(buf);
|
||||
bw.print(fmt, args) catch |err| switch (err) {
|
||||
var w: Writer = .fixed(buf);
|
||||
w.print(fmt, args) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.NoSpaceLeft,
|
||||
};
|
||||
return bw.getWritten();
|
||||
return w.buffered();
|
||||
}
|
||||
|
||||
pub fn bufPrintZ(buf: []u8, comptime fmt: []const u8, args: anytype) BufPrintError![:0]u8 {
|
||||
@ -1012,17 +1012,17 @@ test "int.padded" {
|
||||
test "buffer" {
|
||||
{
|
||||
var buf1: [32]u8 = undefined;
|
||||
var bw: Writer = .fixed(&buf1);
|
||||
try bw.printValue("", .{}, 1234, std.options.fmt_max_depth);
|
||||
try std.testing.expectEqualStrings("1234", bw.getWritten());
|
||||
var w: Writer = .fixed(&buf1);
|
||||
try w.printValue("", .{}, 1234, std.options.fmt_max_depth);
|
||||
try std.testing.expectEqualStrings("1234", w.buffered());
|
||||
|
||||
bw = .fixed(&buf1);
|
||||
try bw.printValue("c", .{}, 'a', std.options.fmt_max_depth);
|
||||
try std.testing.expectEqualStrings("a", bw.getWritten());
|
||||
w = .fixed(&buf1);
|
||||
try w.printValue("c", .{}, 'a', std.options.fmt_max_depth);
|
||||
try std.testing.expectEqualStrings("a", w.buffered());
|
||||
|
||||
bw = .fixed(&buf1);
|
||||
try bw.printValue("b", .{}, 0b1100, std.options.fmt_max_depth);
|
||||
try std.testing.expectEqualStrings("1100", bw.getWritten());
|
||||
w = .fixed(&buf1);
|
||||
try w.printValue("b", .{}, 0b1100, std.options.fmt_max_depth);
|
||||
try std.testing.expectEqualStrings("1100", w.buffered());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -26,6 +26,7 @@ pub fn init(
|
||||
mode: File.Mode,
|
||||
dir: Dir,
|
||||
close_dir_on_deinit: bool,
|
||||
write_buffer: []u8,
|
||||
) InitError!AtomicFile {
|
||||
var rand_buf: [random_bytes_len]u8 = undefined;
|
||||
var tmp_path_buf: [tmp_path_len:0]u8 = undefined;
|
||||
@ -35,16 +36,13 @@ pub fn init(
|
||||
const tmp_path = fs.base64_encoder.encode(&tmp_path_buf, &rand_buf);
|
||||
tmp_path_buf[tmp_path.len] = 0;
|
||||
|
||||
const file = dir.createFile(
|
||||
tmp_path,
|
||||
.{ .mode = mode, .exclusive = true },
|
||||
) catch |err| switch (err) {
|
||||
const file = dir.createFile(tmp_path, .{ .mode = mode, .exclusive = true }) catch |err| switch (err) {
|
||||
error.PathAlreadyExists => continue,
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
return .{
|
||||
.file_writer = file.writer(),
|
||||
.file_writer = file.writer(write_buffer),
|
||||
.tmp_path_buf = tmp_path_buf,
|
||||
.dest_basename = dest_basename,
|
||||
.file_open = true,
|
||||
|
||||
@ -2609,12 +2609,15 @@ pub fn updateFile(
|
||||
try dest_dir.makePath(dirname);
|
||||
}
|
||||
|
||||
var atomic_file = try dest_dir.atomicFile(dest_path, .{ .mode = actual_mode });
|
||||
var buffer: [2000]u8 = undefined;
|
||||
var atomic_file = try dest_dir.atomicFile(dest_path, .{
|
||||
.mode = actual_mode,
|
||||
.write_buffer = &buffer,
|
||||
});
|
||||
defer atomic_file.deinit();
|
||||
|
||||
var src_reader: File.Reader = .initSize(src_file, &.{}, src_stat.size);
|
||||
var buffer: [2000]u8 = undefined;
|
||||
var dest_writer = atomic_file.file_writer.writer(&buffer);
|
||||
const dest_writer = &atomic_file.file_writer.interface;
|
||||
|
||||
dest_writer.writeFileAll(&src_reader, .{}) catch |err| switch (err) {
|
||||
error.ReadFailed => return src_reader.err.?,
|
||||
@ -2715,6 +2718,7 @@ fn copy_file(fd_in: posix.fd_t, fd_out: posix.fd_t, maybe_size: ?u64) CopyFileRa
|
||||
pub const AtomicFileOptions = struct {
|
||||
mode: File.Mode = File.default_mode,
|
||||
make_path: bool = false,
|
||||
write_buffer: []u8,
|
||||
};
|
||||
|
||||
/// Directly access the `.file` field, and then call `AtomicFile.finish` to
|
||||
@ -2732,9 +2736,9 @@ pub fn atomicFile(self: Dir, dest_path: []const u8, options: AtomicFileOptions)
|
||||
else
|
||||
try self.openDir(dirname, .{});
|
||||
|
||||
return AtomicFile.init(fs.path.basename(dest_path), options.mode, dir, true);
|
||||
return .init(fs.path.basename(dest_path), options.mode, dir, true, options.write_buffer);
|
||||
} else {
|
||||
return AtomicFile.init(dest_path, options.mode, self, false);
|
||||
return .init(dest_path, options.mode, self, false, options.write_buffer);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -19,10 +19,8 @@ test "trailers" {
|
||||
const connection = try net_server.accept();
|
||||
defer connection.stream.close();
|
||||
|
||||
var stream_reader = connection.stream.reader();
|
||||
var stream_writer = connection.stream.writer();
|
||||
var connection_br = stream_reader.interface().buffered(&recv_buffer);
|
||||
var connection_bw = stream_writer.interface().buffered(&send_buffer);
|
||||
var connection_br = connection.stream.reader(&recv_buffer);
|
||||
var connection_bw = connection.stream.writer(&send_buffer);
|
||||
var server = http.Server.init(&connection_br, &connection_bw);
|
||||
|
||||
try expectEqual(.ready, server.reader.state);
|
||||
@ -104,10 +102,8 @@ test "HTTP server handles a chunked transfer coding request" {
|
||||
const connection = try net_server.accept();
|
||||
defer connection.stream.close();
|
||||
|
||||
var stream_reader = connection.stream.reader();
|
||||
var stream_writer = connection.stream.writer();
|
||||
var connection_br = stream_reader.interface().buffered(&recv_buffer);
|
||||
var connection_bw = stream_writer.interface().buffered(&send_buffer);
|
||||
var connection_br = connection.stream.reader(&recv_buffer);
|
||||
var connection_bw = connection.stream.writer(&send_buffer);
|
||||
var server = http.Server.init(&connection_br, &connection_bw);
|
||||
var request = try server.receiveHead();
|
||||
|
||||
@ -1163,10 +1159,8 @@ test "redirect to different connection" {
|
||||
global.other_port.?,
|
||||
});
|
||||
|
||||
var stream_reader = connection.stream.reader();
|
||||
var stream_writer = connection.stream.writer();
|
||||
var connection_br = stream_reader.interface().buffered(&recv_buffer);
|
||||
var connection_bw = stream_writer.interface().buffered(&send_buffer);
|
||||
var connection_br = connection.stream.reader(&recv_buffer);
|
||||
var connection_bw = connection.stream.writer(&send_buffer);
|
||||
var server = http.Server.init(&connection_br, &connection_bw);
|
||||
var request = try server.receiveHead();
|
||||
try expectEqualStrings(request.head.target, "/help");
|
||||
|
||||
@ -13,7 +13,7 @@ const Limit = std.io.Limit;
|
||||
|
||||
pub const Limited = @import("Reader/Limited.zig");
|
||||
|
||||
context: ?*anyopaque,
|
||||
context: ?*anyopaque = null,
|
||||
vtable: *const VTable,
|
||||
buffer: []u8,
|
||||
/// Number of bytes which have been consumed from `buffer`.
|
||||
@ -214,7 +214,7 @@ pub const LimitedAllocError = Allocator.Error || ShortError || error{StreamTooLo
|
||||
pub fn allocRemaining(r: *Reader, gpa: Allocator, limit: Limit) LimitedAllocError![]u8 {
|
||||
var buffer: ArrayList(u8) = .empty;
|
||||
defer buffer.deinit(gpa);
|
||||
try appendRemaining(r, gpa, null, &buffer, limit, 1);
|
||||
try appendRemaining(r, gpa, null, &buffer, limit);
|
||||
return buffer.toOwnedSlice(gpa);
|
||||
}
|
||||
|
||||
@ -237,13 +237,13 @@ pub fn appendRemaining(
|
||||
limit: Limit,
|
||||
) LimitedAllocError!void {
|
||||
const buffer = r.buffer;
|
||||
const buffered = buffer[r.seek..r.end];
|
||||
const copy_len = limit.minInt(buffered.len);
|
||||
const buffer_contents = buffer[r.seek..r.end];
|
||||
const copy_len = limit.minInt(buffer_contents.len);
|
||||
try list.ensureUnusedCapacity(gpa, copy_len);
|
||||
@memcpy(list.unusedCapacitySlice()[0..copy_len], buffer[0..copy_len]);
|
||||
list.items.len += copy_len;
|
||||
r.seek += copy_len;
|
||||
if (copy_len == buffered.len) {
|
||||
if (copy_len == buffer_contents.len) {
|
||||
r.seek = 0;
|
||||
r.end = 0;
|
||||
}
|
||||
@ -251,7 +251,7 @@ pub fn appendRemaining(
|
||||
while (true) {
|
||||
try list.ensureUnusedCapacity(gpa, 1);
|
||||
const dest = remaining.slice(list.unusedCapacitySlice());
|
||||
const additional_buffer = if (@intFromEnum(remaining) == dest.len) buffer else &.{};
|
||||
const additional_buffer: []u8 = if (@intFromEnum(remaining) == dest.len) buffer else &.{};
|
||||
const n = readVec(r, &.{ dest, additional_buffer }) catch |err| switch (err) {
|
||||
error.EndOfStream => break,
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
@ -276,7 +276,7 @@ pub fn appendRemaining(
|
||||
/// The reader's internal logical seek position moves forward in accordance
|
||||
/// with the number of bytes returned from this function.
|
||||
pub fn readVec(r: *Reader, data: []const []u8) Error!usize {
|
||||
return readVec(r, data, .unlimited);
|
||||
return readVecLimit(r, data, .unlimited);
|
||||
}
|
||||
|
||||
/// Equivalent to `readVec` but reads at most `limit` bytes.
|
||||
@ -290,9 +290,9 @@ pub fn readVecLimit(r: *Reader, data: []const []u8, limit: Limit) Error!usize {
|
||||
comptime assert(@intFromEnum(Limit.unlimited) == std.math.maxInt(usize));
|
||||
var remaining = @intFromEnum(limit);
|
||||
for (data, 0..) |buf, i| {
|
||||
const buffered = r.buffer[r.seek..r.end];
|
||||
const copy_len = @min(buffered.len, buf.len, remaining);
|
||||
@memcpy(buf[0..copy_len], buffered[0..copy_len]);
|
||||
const buffer_contents = r.buffer[r.seek..r.end];
|
||||
const copy_len = @min(buffer_contents.len, buf.len, remaining);
|
||||
@memcpy(buf[0..copy_len], buffer_contents[0..copy_len]);
|
||||
r.seek += copy_len;
|
||||
remaining -= copy_len;
|
||||
if (remaining == 0) break;
|
||||
@ -354,7 +354,7 @@ pub fn readVecLimit(r: *Reader, data: []const []u8, limit: Limit) Error!usize {
|
||||
return @intFromEnum(limit) - remaining;
|
||||
}
|
||||
|
||||
pub fn bufferContents(r: *Reader) []u8 {
|
||||
pub fn buffered(r: *Reader) []u8 {
|
||||
return r.buffer[r.seek..r.end];
|
||||
}
|
||||
|
||||
|
||||
@ -57,8 +57,9 @@ pub const VTable = struct {
|
||||
/// Copies contents from an open file to the logical sink. `buffer[0..end]`
|
||||
/// is consumed first, followed by `limit` bytes from `file_reader`.
|
||||
///
|
||||
/// Number of bytes actually written is returned, excluding bytes from
|
||||
/// `buffer`. Bytes from `buffer` are tracked by modifying `end`.
|
||||
/// Number of bytes logically written is returned. This excludes bytes from
|
||||
/// `buffer` because they have already been logically written. Number of
|
||||
/// bytes consumed from `buffer` are tracked by modifying `end`.
|
||||
///
|
||||
/// Number of bytes returned may be zero, which does not necessarily mean
|
||||
/// end-of-stream. A subsequent call may return nonzero, or signal end of
|
||||
@ -162,7 +163,11 @@ pub fn writeSplat(w: *Writer, data: []const []const u8, splat: usize) Error!usiz
|
||||
assert(data.len > 0);
|
||||
const buffer = w.buffer;
|
||||
const count = countSplat(0, data, splat);
|
||||
if (w.end + count > buffer.len) return w.vtable.drain(w, data, splat);
|
||||
if (w.end + count > buffer.len) {
|
||||
const n = try w.vtable.drain(w, data, splat);
|
||||
w.count += n;
|
||||
return n;
|
||||
}
|
||||
w.count += count;
|
||||
for (data) |bytes| {
|
||||
@memcpy(buffer[w.end..][0..bytes.len], bytes);
|
||||
@ -213,6 +218,18 @@ pub fn flush(w: *Writer) Error!void {
|
||||
assert(0 == try w.vtable.drain(w, &.{}, 0));
|
||||
}
|
||||
|
||||
/// Calls `VTable.drain` but hides the last `preserve_length` bytes from the
|
||||
/// implementation, keeping them buffered.
|
||||
pub fn drainLimited(w: *Writer, preserve_length: usize) Error!void {
|
||||
const temp_end = w.end -| preserve_length;
|
||||
const preserved = w.buffer[temp_end..w.end];
|
||||
w.end = temp_end;
|
||||
defer w.end += preserved.len;
|
||||
assert(0 == try w.vtable.drain(w, &.{""}, 1));
|
||||
assert(w.end <= temp_end + preserved.len);
|
||||
@memmove(w.buffer[w.end..][0..preserved.len], preserved);
|
||||
}
|
||||
|
||||
pub fn unusedCapacitySlice(w: *const Writer) []u8 {
|
||||
return w.buffer[w.end..];
|
||||
}
|
||||
@ -246,13 +263,30 @@ pub fn writableSlice(w: *Writer, len: usize) Error![]u8 {
|
||||
/// If `minimum_length` is zero, this is equivalent to `unusedCapacitySlice`.
|
||||
pub fn writableSliceGreedy(w: *Writer, minimum_length: usize) Error![]u8 {
|
||||
assert(w.buffer.len >= minimum_length);
|
||||
while (true) {
|
||||
const cap_slice = w.buffer[w.end..];
|
||||
if (cap_slice.len >= minimum_length) {
|
||||
@branchHint(.likely);
|
||||
return cap_slice;
|
||||
}
|
||||
while (w.buffer.len - w.end < minimum_length) {
|
||||
assert(0 == try w.vtable.drain(w, &.{""}, 1));
|
||||
} else {
|
||||
@branchHint(.likely);
|
||||
return w.buffer[w.end..];
|
||||
}
|
||||
}
|
||||
|
||||
/// Asserts the provided buffer has total capacity enough for `minimum_length`
|
||||
/// and `preserve_length` combined.
|
||||
///
|
||||
/// Does not `advance` the buffer end position.
|
||||
///
|
||||
/// When draining the buffer, ensures that at least `preserve_length` bytes
|
||||
/// remain buffered.
|
||||
///
|
||||
/// If `preserve_length` is zero, this is equivalent to `writableSliceGreedy`.
|
||||
pub fn writableSliceGreedyPreserving(w: *Writer, preserve_length: usize, minimum_length: usize) Error![]u8 {
|
||||
assert(w.buffer.len >= preserve_length + minimum_length);
|
||||
while (w.buffer.len - w.end < minimum_length) {
|
||||
try drainLimited(w, preserve_length);
|
||||
} else {
|
||||
@branchHint(.likely);
|
||||
return w.buffer[w.end..];
|
||||
}
|
||||
}
|
||||
|
||||
@ -376,20 +410,56 @@ pub fn write(w: *Writer, bytes: []const u8) Error!usize {
|
||||
w.count += bytes.len;
|
||||
return bytes.len;
|
||||
}
|
||||
return w.vtable.drain(w, &.{bytes}, 1);
|
||||
const n = try w.vtable.drain(w, &.{bytes}, 1);
|
||||
w.count += n;
|
||||
return n;
|
||||
}
|
||||
|
||||
/// Calls `write` as many times as necessary such that all of `bytes` are
|
||||
/// Asserts `buffer` capacity exceeds `preserve_length`.
|
||||
pub fn writePreserving(w: *Writer, preserve_length: usize, bytes: []const u8) Error!usize {
|
||||
assert(preserve_length <= w.buffer.len);
|
||||
if (w.end + bytes.len <= w.buffer.len) {
|
||||
@branchHint(.likely);
|
||||
@memcpy(w.buffer[w.end..][0..bytes.len], bytes);
|
||||
w.end += bytes.len;
|
||||
w.count += bytes.len;
|
||||
return bytes.len;
|
||||
}
|
||||
const temp_end = w.end -| preserve_length;
|
||||
const preserved = w.buffer[temp_end..w.end];
|
||||
w.end = temp_end;
|
||||
defer w.end += preserved.len;
|
||||
const n = try w.vtable.drain(w, &.{bytes}, 1);
|
||||
w.count += n;
|
||||
assert(w.end <= temp_end + preserved.len);
|
||||
@memmove(w.buffer[w.end..][0..preserved.len], preserved);
|
||||
return n;
|
||||
}
|
||||
|
||||
/// Calls `drain` as many times as necessary such that all of `bytes` are
|
||||
/// transferred.
|
||||
pub fn writeAll(w: *Writer, bytes: []const u8) Error!void {
|
||||
var index: usize = 0;
|
||||
while (index < bytes.len) index += try w.write(bytes[index..]);
|
||||
}
|
||||
|
||||
/// Calls `drain` as many times as necessary such that all of `bytes` are
|
||||
/// transferred.
|
||||
///
|
||||
/// When draining the buffer, ensures that at least `preserve_length` bytes
|
||||
/// remain buffered.
|
||||
///
|
||||
/// Asserts `buffer` capacity exceeds `preserve_length`.
|
||||
pub fn writeAllPreserving(w: *Writer, preserve_length: usize, bytes: []const u8) Error!void {
|
||||
var index: usize = 0;
|
||||
while (index < bytes.len) index += try w.writePreserving(preserve_length, bytes[index..]);
|
||||
}
|
||||
|
||||
pub fn print(w: *Writer, comptime format: []const u8, args: anytype) Error!void {
|
||||
try std.fmt.format(w, format, args);
|
||||
}
|
||||
|
||||
/// Calls `drain` as many times as necessary such that `byte` is transferred.
|
||||
pub fn writeByte(w: *Writer, byte: u8) Error!void {
|
||||
while (w.buffer.len - w.end == 0) {
|
||||
const n = try w.vtable.drain(w, &.{&.{byte}}, 1);
|
||||
@ -405,6 +475,19 @@ pub fn writeByte(w: *Writer, byte: u8) Error!void {
|
||||
}
|
||||
}
|
||||
|
||||
/// When draining the buffer, ensures that at least `preserve_length` bytes
|
||||
/// remain buffered.
|
||||
pub fn writeBytePreserving(w: *Writer, preserve_length: usize, byte: u8) Error!void {
|
||||
while (w.buffer.len - w.end == 0) {
|
||||
try drainLimited(w, preserve_length);
|
||||
} else {
|
||||
@branchHint(.likely);
|
||||
w.buffer[w.end] = byte;
|
||||
w.end += 1;
|
||||
w.count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes the same byte many times, performing the underlying write call as
|
||||
/// many times as necessary.
|
||||
pub fn splatByteAll(w: *Writer, byte: u8, n: usize) Error!void {
|
||||
@ -496,9 +579,43 @@ pub fn writeSliceSwap(w: *Writer, Elem: type, slice: []const Elem) Error!void {
|
||||
/// See `sendFileReading` for an alternative that does not have
|
||||
/// `error.Unimplemented` in the error set.
|
||||
pub fn sendFile(w: *Writer, file_reader: *File.Reader, limit: Limit) FileError!usize {
|
||||
const end = w.end;
|
||||
const n = try w.vtable.sendFile(w, file_reader, limit);
|
||||
return n -| end;
|
||||
return w.vtable.sendFile(w, file_reader, limit);
|
||||
}
|
||||
|
||||
/// Forwards a `sendFile` to a second `Writer` instance. `w` is only used for
|
||||
/// its buffer, but it has its `end` and `count` adjusted accordingly depending
|
||||
/// on how much was consumed.
|
||||
///
|
||||
/// Returns how many bytes from `file_reader` were consumed.
|
||||
pub fn sendFileTo(w: *Writer, other: *Writer, file_reader: *File.Reader, limit: Limit) FileError!usize {
|
||||
const header = w.buffered();
|
||||
const new_end = other.end + header.len;
|
||||
if (new_end <= other.buffer.len) {
|
||||
@memcpy(other.buffer[other.end..][0..header.len], header);
|
||||
other.end = new_end;
|
||||
w.end = 0;
|
||||
return other.vtable.sendFile(other, file_reader, limit);
|
||||
}
|
||||
assert(header.len > 0);
|
||||
var vec_buf: [2][]const u8 = .{ header, undefined };
|
||||
var vec_i: usize = 1;
|
||||
const buffered_contents = limit.slice(file_reader.buffered());
|
||||
if (buffered_contents.len > 0) {
|
||||
vec_buf[vec_i] = buffered_contents;
|
||||
vec_i += 1;
|
||||
}
|
||||
const n = try other.vtable.drain(other, vec_buf[0..vec_i], 1);
|
||||
other.count += n;
|
||||
if (n < header.len) {
|
||||
const remaining = w.buffer[n..w.end];
|
||||
@memmove(w.buffer[0..remaining.len], remaining);
|
||||
w.end = remaining.len;
|
||||
return 0;
|
||||
}
|
||||
w.end = 0;
|
||||
const tossed = n - header.len;
|
||||
file_reader.interface.toss(tossed);
|
||||
return tossed;
|
||||
}
|
||||
|
||||
/// Asserts nonzero buffer capacity.
|
||||
@ -1696,8 +1813,12 @@ pub fn discardingSendFile(w: *Writer, file_reader: *File.Reader, limit: Limit) F
|
||||
}
|
||||
}
|
||||
|
||||
/// This function is used by `VTable.drain` function implementations to
|
||||
/// implement partial drains.
|
||||
/// Removes the first `n` bytes from `buffer` by shifting buffer contents,
|
||||
/// returning how many bytes are left after consuming the entire buffer, or
|
||||
/// zero if the entire buffer was not consumed.
|
||||
///
|
||||
/// Useful for `VTable.drain` function implementations to implement partial
|
||||
/// drains.
|
||||
pub fn consume(w: *Writer, n: usize) usize {
|
||||
if (n < w.end) {
|
||||
const remaining = w.buffer[n..w.end];
|
||||
|
||||
@ -2093,7 +2093,7 @@ pub const Stream = struct {
|
||||
},
|
||||
.buffer = buffer,
|
||||
},
|
||||
.file_writer = .initMode(stream.handle, &.{}, .streaming),
|
||||
.file_writer = .initMode(.{ .handle = stream.handle }, &.{}, .streaming),
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user