mirror of
https://github.com/ziglang/zig.git
synced 2025-12-15 18:53:07 +00:00
back to the anyerror!usize vtable
This commit is contained in:
parent
6ac7931bec
commit
9dc0b4a98f
@ -326,6 +326,7 @@ pub fn Inflate(comptime container: Container, comptime Lookahead: type) type {
|
||||
/// returned bytes means end of stream reached. With limit=0 returns as
|
||||
/// much data it can. It newer will be more than 65536 bytes, which is
|
||||
/// size of internal buffer.
|
||||
/// TODO merge this logic into reader_streamRead and reader_streamReadVec
|
||||
pub fn get(self: *Self, limit: usize) Error![]const u8 {
|
||||
while (true) {
|
||||
const out = self.hist.readAtMost(limit);
|
||||
@ -342,31 +343,27 @@ pub fn Inflate(comptime container: Container, comptime Lookahead: type) type {
|
||||
ctx: ?*anyopaque,
|
||||
bw: *std.io.BufferedWriter,
|
||||
limit: std.io.Reader.Limit,
|
||||
) std.io.Reader.RwResult {
|
||||
) anyerror!std.io.Reader.Status {
|
||||
const self: *Self = @alignCast(@ptrCast(ctx));
|
||||
const out = bw.writableSlice(1) catch |err| return .{ .write_err = err };
|
||||
const in = self.get(limit.min(out.len)) catch |err| return .{ .read_err = err };
|
||||
if (in.len == 0) return .{ .read_end = true };
|
||||
const out = try bw.writableSlice(1);
|
||||
const in = try self.get(limit.min(out.len));
|
||||
@memcpy(out[0..in.len], in);
|
||||
return .{ .len = in.len };
|
||||
bw.advance(in.len);
|
||||
return .{ .len = in.len, .end = in.len == 0 };
|
||||
}
|
||||
|
||||
fn reader_streamReadVec(ctx: ?*anyopaque, data: []const []u8) std.io.Reader.Result {
|
||||
fn reader_streamReadVec(ctx: ?*anyopaque, data: []const []u8) anyerror!std.io.Reader.Status {
|
||||
const self: *Self = @alignCast(@ptrCast(ctx));
|
||||
var total: usize = 0;
|
||||
for (data) |buffer| {
|
||||
if (buffer.len == 0) break;
|
||||
const out = self.get(buffer.len) catch |err| {
|
||||
return .{ .len = total, .err = err };
|
||||
};
|
||||
if (out.len == 0) break;
|
||||
@memcpy(buffer[0..out.len], out);
|
||||
total += out.len;
|
||||
for (data) |out| {
|
||||
if (out.len == 0) continue;
|
||||
const in = try self.get(out.len);
|
||||
@memcpy(out[0..in.len], in);
|
||||
return .{ .len = @intCast(in.len), .end = in.len == 0 };
|
||||
}
|
||||
return .{ .len = total, .end = total == 0 };
|
||||
return .{};
|
||||
}
|
||||
|
||||
pub fn streamReadVec(self: *Self, data: []const []u8) std.io.Reader.Result {
|
||||
pub fn streamReadVec(self: *Self, data: []const []u8) anyerror!std.io.Reader.Status {
|
||||
return reader_streamReadVec(self, data);
|
||||
}
|
||||
|
||||
|
||||
@ -2247,16 +2247,15 @@ pub const ElfModule = struct {
|
||||
errdefer gpa.free(decompressed_section);
|
||||
|
||||
{
|
||||
var read_index: usize = 0;
|
||||
var i: usize = 0;
|
||||
while (true) {
|
||||
const read_result = zlib_stream.streamReadVec(&.{decompressed_section[read_index..]});
|
||||
read_result.err catch {
|
||||
const status = zlib_stream.streamReadVec(&.{decompressed_section[i..]}) catch {
|
||||
gpa.free(decompressed_section);
|
||||
continue :shdrs;
|
||||
};
|
||||
read_index += read_result.len;
|
||||
if (read_index == decompressed_section.len) break;
|
||||
if (read_result.end) {
|
||||
i += status.len;
|
||||
if (i == decompressed_section.len) break;
|
||||
if (status.end) {
|
||||
gpa.free(decompressed_section);
|
||||
continue :shdrs;
|
||||
}
|
||||
|
||||
@ -2034,7 +2034,7 @@ pub const VirtualMachine = struct {
|
||||
const streams: [2]*std.io.BufferedReader = .{ &cie_stream, &fde_stream };
|
||||
|
||||
for (&streams, 0..) |stream, i| {
|
||||
while (stream.seek < stream.buffer.len) {
|
||||
while (stream.seek < stream.storageBuffer().len) {
|
||||
const instruction = try std.debug.Dwarf.call_frame.Instruction.read(stream, addr_size_bytes, endian);
|
||||
prev_row = try self.step(allocator, cie, i == 0, instruction);
|
||||
if (pc < fde.pc_begin + self.current_row.offset) return prev_row;
|
||||
|
||||
@ -1499,7 +1499,7 @@ pub fn writeFileAll(self: File, in_file: File, args: WriteFileOptions) WriteFile
|
||||
error.FileDescriptorNotASocket,
|
||||
error.NetworkUnreachable,
|
||||
error.NetworkSubsystemFailed,
|
||||
=> return self.writeFileAllUnseekable(in_file, args),
|
||||
=> return self.writeFileUnseekableAll(in_file, args),
|
||||
|
||||
else => |e| return e,
|
||||
};
|
||||
@ -1507,53 +1507,11 @@ pub fn writeFileAll(self: File, in_file: File, args: WriteFileOptions) WriteFile
|
||||
|
||||
/// Does not try seeking in either of the File parameters.
|
||||
/// See `writeFileAll` as an alternative to calling this.
|
||||
pub fn writeFileAllUnseekable(self: File, in_file: File, args: WriteFileOptions) WriteFileError!void {
|
||||
// TODO make `try @errorCast(...)` work
|
||||
return @errorCast(writeFileAllUnseekableInner(self, in_file, args));
|
||||
}
|
||||
|
||||
fn writeFileAllUnseekableInner(out_file: File, in_file: File, args: WriteFileOptions) anyerror!void {
|
||||
const headers = args.headers_and_trailers[0..args.header_count];
|
||||
const trailers = args.headers_and_trailers[args.header_count..];
|
||||
|
||||
try out_file.writevAll(headers);
|
||||
|
||||
// Some possible optimizations here:
|
||||
// * Could writev buffer multiple times if the amount to discard is larger than 4096
|
||||
// * Could combine discard and read in one readv if amount to discard is small
|
||||
|
||||
var buffer: [4096]u8 = undefined;
|
||||
var remaining = args.in_offset;
|
||||
while (remaining > 0) {
|
||||
const n = try in_file.read(buffer[0..@min(buffer.len, remaining)]);
|
||||
if (n == 0) return error.EndOfStream;
|
||||
remaining -= n;
|
||||
}
|
||||
if (args.in_len) |len| {
|
||||
remaining = len;
|
||||
var buffer_index: usize = 0;
|
||||
while (remaining > 0) {
|
||||
const n = buffer_index + try in_file.read(buffer[buffer_index..@min(buffer.len, remaining)]);
|
||||
if (n == 0) return error.EndOfStream;
|
||||
const written = try out_file.write(buffer[0..n]);
|
||||
if (written == 0) return error.EndOfStream;
|
||||
remaining -= written;
|
||||
std.mem.copyForwards(u8, &buffer, buffer[written..n]);
|
||||
buffer_index = n - written;
|
||||
}
|
||||
} else {
|
||||
var buffer_index: usize = 0;
|
||||
while (true) {
|
||||
const n = buffer_index + try in_file.read(buffer[buffer_index..]);
|
||||
if (n == 0) break;
|
||||
const written = try out_file.write(buffer[0..n]);
|
||||
if (written == 0) return error.EndOfStream;
|
||||
std.mem.copyForwards(u8, &buffer, buffer[written..n]);
|
||||
buffer_index = n - written;
|
||||
}
|
||||
}
|
||||
|
||||
try out_file.writevAll(trailers);
|
||||
pub fn writeFileUnseekableAll(out_file: File, in_file: File, args: WriteFileOptions) WriteFileError!void {
|
||||
_ = out_file;
|
||||
_ = in_file;
|
||||
_ = args;
|
||||
@panic("TODO call writeFileUnseekable multiple times");
|
||||
}
|
||||
|
||||
/// Low level function which can fail for OS-specific reasons.
|
||||
@ -1635,6 +1593,30 @@ pub fn reader(file: File) std.io.Reader {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn unseekableReader(file: File) std.io.Reader {
|
||||
return .{
|
||||
.context = handleToOpaque(file.handle),
|
||||
.vtable = .{
|
||||
.posRead = null,
|
||||
.posReadVec = null,
|
||||
.streamRead = reader_streamRead,
|
||||
.streamReadVec = reader_streamReadVec,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn unstreamableReader(file: File) std.io.Reader {
|
||||
return .{
|
||||
.context = handleToOpaque(file.handle),
|
||||
.vtable = .{
|
||||
.posRead = reader_posRead,
|
||||
.posReadVec = reader_posReadVec,
|
||||
.streamRead = null,
|
||||
.streamReadVec = null,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn writer(file: File) std.io.Writer {
|
||||
return .{
|
||||
.context = handleToOpaque(file.handle),
|
||||
@ -1692,12 +1674,12 @@ pub fn reader_streamReadVec(context: ?*anyopaque, data: []const []u8) anyerror!s
|
||||
};
|
||||
}
|
||||
|
||||
pub fn writer_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Result {
|
||||
pub fn writer_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
|
||||
const file = opaqueToHandle(context);
|
||||
var splat_buffer: [256]u8 = undefined;
|
||||
if (is_windows) {
|
||||
if (data.len == 1 and splat == 0) return 0;
|
||||
return .{ .len = windows.WriteFile(file, data[0], null) catch |err| return .{ .err = err } };
|
||||
return windows.WriteFile(file, data[0], null);
|
||||
}
|
||||
var iovecs: [max_buffers_len]std.posix.iovec_const = undefined;
|
||||
var len: usize = @min(iovecs.len, data.len);
|
||||
@ -1706,8 +1688,8 @@ pub fn writer_writeSplat(context: ?*anyopaque, data: []const []const u8, splat:
|
||||
.len = d.len,
|
||||
};
|
||||
switch (splat) {
|
||||
0 => return .{ .len = std.posix.writev(file, iovecs[0 .. len - 1]) catch |err| return .{ .err = err } },
|
||||
1 => return .{ .len = std.posix.writev(file, iovecs[0..len]) catch |err| return .{ .err = err } },
|
||||
0 => return std.posix.writev(file, iovecs[0 .. len - 1]),
|
||||
1 => return std.posix.writev(file, iovecs[0..len]),
|
||||
else => {
|
||||
const pattern = data[data.len - 1];
|
||||
if (pattern.len == 1) {
|
||||
@ -1725,21 +1707,21 @@ pub fn writer_writeSplat(context: ?*anyopaque, data: []const []const u8, splat:
|
||||
iovecs[len] = .{ .base = &splat_buffer, .len = remaining_splat };
|
||||
len += 1;
|
||||
}
|
||||
return .{ .len = std.posix.writev(file, iovecs[0..len]) catch |err| return .{ .err = err } };
|
||||
return std.posix.writev(file, iovecs[0..len]);
|
||||
}
|
||||
},
|
||||
}
|
||||
return .{ .len = std.posix.writev(file, iovecs[0..len]) catch |err| return .{ .err = err } };
|
||||
return std.posix.writev(file, iovecs[0..len]);
|
||||
}
|
||||
|
||||
pub fn writer_writeFile(
|
||||
context: ?*anyopaque,
|
||||
in_file: std.fs.File,
|
||||
in_offset: u64,
|
||||
in_offset: std.io.Writer.Offset,
|
||||
in_len: std.io.Writer.FileLen,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) std.io.Writer.Result {
|
||||
) anyerror!usize {
|
||||
const out_fd = opaqueToHandle(context);
|
||||
const in_fd = in_file.handle;
|
||||
const len_int = switch (in_len) {
|
||||
@ -1747,6 +1729,26 @@ pub fn writer_writeFile(
|
||||
.entire_file => 0,
|
||||
else => in_len.int(),
|
||||
};
|
||||
if (native_os == .linux) sf: {
|
||||
// Linux sendfile does not support headers or trailers but it does
|
||||
// support a streaming read from in_file.
|
||||
if (headers_len > 0) return writer_writeSplat(context, headers_and_trailers[0..headers_len], 1);
|
||||
const max_count = 0x7ffff000; // Avoid EINVAL.
|
||||
const smaller_len = if (len_int == 0) max_count else @min(len_int, max_count);
|
||||
var off: std.os.linux.off_t = undefined;
|
||||
const off_ptr: ?*std.os.linux.off_t = if (in_offset.toInt()) |offset| b: {
|
||||
off = try std.math.cast(std.os.linux.off_t, offset);
|
||||
break :b &off;
|
||||
} else null;
|
||||
const n = std.os.linux.wrapped.sendfile(out_fd, in_fd, off_ptr, smaller_len) catch |err| switch (err) {
|
||||
error.UnsupportedOperation => break :sf,
|
||||
error.Unseekable => break :sf,
|
||||
error.Unexpected => break :sf,
|
||||
else => |e| return e,
|
||||
};
|
||||
if (in_offset.toInt()) |offset| assert(n == off - offset);
|
||||
return n;
|
||||
}
|
||||
var iovecs_buffer: [max_buffers_len]std.posix.iovec_const = undefined;
|
||||
const iovecs = iovecs_buffer[0..@min(iovecs_buffer.len, headers_and_trailers.len)];
|
||||
for (iovecs, headers_and_trailers[0..iovecs.len]) |*v, d| v.* = .{ .base = d.ptr, .len = d.len };
|
||||
@ -1783,7 +1785,7 @@ fn writeFileUnseekable(
|
||||
@panic("TODO writeFileUnseekable");
|
||||
}
|
||||
|
||||
fn handleToOpaque(handle: Handle) *anyopaque {
|
||||
fn handleToOpaque(handle: Handle) ?*anyopaque {
|
||||
return switch (@typeInfo(Handle)) {
|
||||
.pointer => @ptrCast(handle),
|
||||
.int => @ptrFromInt(@as(u32, @bitCast(handle))),
|
||||
@ -1791,7 +1793,7 @@ fn handleToOpaque(handle: Handle) *anyopaque {
|
||||
};
|
||||
}
|
||||
|
||||
fn opaqueToHandle(userdata: *anyopaque) Handle {
|
||||
fn opaqueToHandle(userdata: ?*anyopaque) Handle {
|
||||
return switch (@typeInfo(Handle)) {
|
||||
.pointer => @ptrCast(userdata),
|
||||
.int => @intCast(@intFromPtr(userdata)),
|
||||
@ -1976,9 +1978,13 @@ pub fn downgradeLock(file: File) LockError!void {
|
||||
}
|
||||
}
|
||||
|
||||
const builtin = @import("builtin");
|
||||
const Os = std.builtin.Os;
|
||||
const native_os = builtin.os.tag;
|
||||
const is_windows = native_os == .windows;
|
||||
|
||||
const File = @This();
|
||||
const std = @import("../std.zig");
|
||||
const builtin = @import("builtin");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const posix = std.posix;
|
||||
const io = std.io;
|
||||
@ -1986,7 +1992,5 @@ const math = std.math;
|
||||
const assert = std.debug.assert;
|
||||
const linux = std.os.linux;
|
||||
const windows = std.os.windows;
|
||||
const Os = std.builtin.Os;
|
||||
const maxInt = std.math.maxInt;
|
||||
const is_windows = builtin.os.tag == .windows;
|
||||
const Alignment = std.mem.Alignment;
|
||||
|
||||
@ -23,6 +23,69 @@ pub fn init(br: *BufferedReader, r: Reader, buffer: []u8) void {
|
||||
br.storage.initFixed(buffer);
|
||||
}
|
||||
|
||||
const eof_writer: std.io.Writer.VTable = .{
|
||||
.writeSplat = eof_writeSplat,
|
||||
.writeFile = eof_writeFile,
|
||||
};
|
||||
const eof_reader: std.io.Reader.VTable = .{
|
||||
.posRead = eof_posRead,
|
||||
.posReadVec = eof_posReadVec,
|
||||
.streamRead = eof_streamRead,
|
||||
.streamReadVec = eof_streamReadVec,
|
||||
};
|
||||
|
||||
fn eof_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!Reader.Status {
|
||||
_ = context;
|
||||
_ = data;
|
||||
_ = splat;
|
||||
return error.NoSpaceLeft;
|
||||
}
|
||||
|
||||
fn eof_writeFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: u64,
|
||||
len: Reader.FileLen,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) anyerror!Reader.Status {
|
||||
_ = context;
|
||||
_ = file;
|
||||
_ = offset;
|
||||
_ = len;
|
||||
_ = headers_and_trailers;
|
||||
_ = headers_len;
|
||||
return error.NoSpaceLeft;
|
||||
}
|
||||
|
||||
fn eof_posRead(ctx: ?*anyopaque, bw: *std.io.BufferedWriter, limit: Reader.Limit, offset: u64) anyerror!Reader.Status {
|
||||
_ = ctx;
|
||||
_ = bw;
|
||||
_ = limit;
|
||||
_ = offset;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
|
||||
fn eof_posReadVec(ctx: ?*anyopaque, data: []const []u8, offset: u64) anyerror!Reader.Status {
|
||||
_ = ctx;
|
||||
_ = data;
|
||||
_ = offset;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
|
||||
fn eof_streamRead(ctx: ?*anyopaque, bw: *std.io.BufferedWriter, limit: Reader.Limit) Reader.Status {
|
||||
_ = ctx;
|
||||
_ = bw;
|
||||
_ = limit;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
|
||||
fn eof_streamReadVec(ctx: ?*anyopaque, data: []const []u8) Reader.Status {
|
||||
_ = ctx;
|
||||
_ = data;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
|
||||
/// Constructs `br` such that it will read from `buffer` and then end.
|
||||
pub fn initFixed(br: *BufferedReader, buffer: []const u8) void {
|
||||
br.* = .{
|
||||
@ -31,19 +94,16 @@ pub fn initFixed(br: *BufferedReader, buffer: []const u8) void {
|
||||
.buffer = .initBuffer(@constCast(buffer)),
|
||||
.unbuffered_writer = .{
|
||||
.context = undefined,
|
||||
.vtable = &std.io.Writer.VTable.eof,
|
||||
.vtable = &eof_writer,
|
||||
},
|
||||
},
|
||||
.unbuffered_reader = &.{
|
||||
.context = undefined,
|
||||
.vtable = &std.io.Reader.VTable.eof,
|
||||
},
|
||||
.unbuffered_reader = &.{ .context = undefined, .vtable = &eof_reader },
|
||||
};
|
||||
}
|
||||
|
||||
pub fn storageBuffer(br: *BufferedReader) []u8 {
|
||||
assert(br.storage.unbuffered_writer.vtable == &std.io.Writer.VTable.eof);
|
||||
assert(br.unbuffered_reader.vtable == &std.io.Reader.VTable.eof);
|
||||
assert(br.storage.unbuffered_writer.vtable == &eof_writer);
|
||||
assert(br.unbuffered_reader.vtable == &eof_reader);
|
||||
return br.storage.buffer.allocatedSlice();
|
||||
}
|
||||
|
||||
|
||||
@ -43,7 +43,7 @@ const fixed_vtable: Writer.VTable = .{
|
||||
};
|
||||
|
||||
/// Replaces the `BufferedWriter` with a new one that writes to `buffer` and
|
||||
/// then ends when it is full.
|
||||
/// returns `error.NoSpaceLeft` when it is full.
|
||||
pub fn initFixed(bw: *BufferedWriter, buffer: []u8) void {
|
||||
bw.* = .{
|
||||
.unbuffered_writer = .{
|
||||
@ -86,25 +86,25 @@ pub fn writableSlice(bw: *BufferedWriter, minimum_length: usize) anyerror![]u8 {
|
||||
return cap_slice;
|
||||
}
|
||||
const buffer = list.items;
|
||||
const result = bw.unbuffered_writer.write(buffer);
|
||||
if (result.len == buffer.len) {
|
||||
const n = try bw.unbuffered_writer.write(buffer);
|
||||
if (n == buffer.len) {
|
||||
@branchHint(.likely);
|
||||
list.items.len = 0;
|
||||
try result.err;
|
||||
return list.unusedCapacitySlice();
|
||||
}
|
||||
if (result.len > 0) {
|
||||
const remainder = buffer[result.len..];
|
||||
if (n > 0) {
|
||||
const remainder = buffer[n..];
|
||||
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
|
||||
list.items.len = remainder.len;
|
||||
}
|
||||
try result.err;
|
||||
return list.unusedCapacitySlice();
|
||||
}
|
||||
|
||||
/// After calling `writableSlice`, this function tracks how many bytes were written to it.
|
||||
pub fn advance(bw: *BufferedWriter, n: usize) void {
|
||||
bw.items.len += n;
|
||||
const list = &bw.buffer;
|
||||
list.items.len += n;
|
||||
assert(list.items.len <= list.capacity);
|
||||
}
|
||||
|
||||
/// The `data` parameter is mutable because this function needs to mutate the
|
||||
@ -122,15 +122,15 @@ pub fn writevAll(bw: *BufferedWriter, data: [][]const u8) anyerror!void {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writeSplat(bw: *BufferedWriter, data: []const []const u8, splat: usize) Writer.Result {
|
||||
pub fn writeSplat(bw: *BufferedWriter, data: []const []const u8, splat: usize) anyerror!usize {
|
||||
return passthru_writeSplat(bw, data, splat);
|
||||
}
|
||||
|
||||
pub fn writev(bw: *BufferedWriter, data: []const []const u8) Writer.Result {
|
||||
pub fn writev(bw: *BufferedWriter, data: []const []const u8) anyerror!usize {
|
||||
return passthru_writeSplat(bw, data, 1);
|
||||
}
|
||||
|
||||
fn passthru_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) Writer.Result {
|
||||
fn passthru_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
|
||||
const bw: *BufferedWriter = @alignCast(@ptrCast(context));
|
||||
const list = &bw.buffer;
|
||||
const buffer = list.allocatedSlice();
|
||||
@ -156,45 +156,27 @@ fn passthru_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: us
|
||||
if (len >= remaining_data.len) {
|
||||
@branchHint(.likely);
|
||||
// Made it past the headers, so we can enable splatting.
|
||||
const result = bw.unbuffered_writer.writeSplat(send_buffers, splat);
|
||||
const n = result.len;
|
||||
const n = try bw.unbuffered_writer.writeSplat(send_buffers, splat);
|
||||
if (n < end) {
|
||||
@branchHint(.unlikely);
|
||||
const remainder = buffer[n..end];
|
||||
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
|
||||
list.items.len = remainder.len;
|
||||
return .{
|
||||
.err = result.err,
|
||||
.len = end - start_end,
|
||||
.end = result.end,
|
||||
};
|
||||
return end - start_end;
|
||||
}
|
||||
list.items.len = 0;
|
||||
return .{
|
||||
.err = result.err,
|
||||
.len = n - start_end,
|
||||
.end = result.end,
|
||||
};
|
||||
return n - start_end;
|
||||
}
|
||||
const result = try bw.unbuffered_writer.writeSplat(send_buffers, 1);
|
||||
const n = result.len;
|
||||
const n = try bw.unbuffered_writer.writeSplat(send_buffers, 1);
|
||||
if (n < end) {
|
||||
@branchHint(.unlikely);
|
||||
const remainder = buffer[n..end];
|
||||
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
|
||||
list.items.len = remainder.len;
|
||||
return .{
|
||||
.err = result.err,
|
||||
.len = end - start_end,
|
||||
.end = result.end,
|
||||
};
|
||||
return end - start_end;
|
||||
}
|
||||
list.items.len = 0;
|
||||
return .{
|
||||
.err = result.err,
|
||||
.len = n - start_end,
|
||||
.end = result.end,
|
||||
};
|
||||
return n - start_end;
|
||||
}
|
||||
|
||||
const pattern = data[data.len - 1];
|
||||
@ -204,7 +186,7 @@ fn passthru_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: us
|
||||
// It was added in the loop above; undo it here.
|
||||
end -= pattern.len;
|
||||
list.items.len = end;
|
||||
return .{ .len = end - start_end };
|
||||
return end - start_end;
|
||||
}
|
||||
|
||||
const remaining_splat = splat - 1;
|
||||
@ -212,7 +194,7 @@ fn passthru_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: us
|
||||
switch (pattern.len) {
|
||||
0 => {
|
||||
list.items.len = end;
|
||||
return .{ .len = end - start_end };
|
||||
return end - start_end;
|
||||
},
|
||||
1 => {
|
||||
const new_end = end + remaining_splat;
|
||||
@ -220,29 +202,20 @@ fn passthru_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: us
|
||||
@branchHint(.likely);
|
||||
@memset(buffer[end..new_end], pattern[0]);
|
||||
list.items.len = new_end;
|
||||
return .{ .len = new_end - start_end };
|
||||
return new_end - start_end;
|
||||
}
|
||||
buffers[0] = buffer[0..end];
|
||||
buffers[1] = pattern;
|
||||
const result = bw.unbuffered_writer.writeSplat(buffers[0..2], remaining_splat);
|
||||
const n = result.len;
|
||||
const n = try bw.unbuffered_writer.writeSplat(buffers[0..2], remaining_splat);
|
||||
if (n < end) {
|
||||
@branchHint(.unlikely);
|
||||
const remainder = buffer[n..end];
|
||||
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
|
||||
list.items.len = remainder.len;
|
||||
return .{
|
||||
.err = result.err,
|
||||
.len = end - start_end,
|
||||
.end = result.end,
|
||||
};
|
||||
return end - start_end;
|
||||
}
|
||||
list.items.len = 0;
|
||||
return .{
|
||||
.err = result.err,
|
||||
.len = n - start_end,
|
||||
.end = result.end,
|
||||
};
|
||||
return n - start_end;
|
||||
},
|
||||
else => {
|
||||
const new_end = end + pattern.len * remaining_splat;
|
||||
@ -252,29 +225,20 @@ fn passthru_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: us
|
||||
@memcpy(buffer[end..][0..pattern.len], pattern);
|
||||
}
|
||||
list.items.len = new_end;
|
||||
return .{ .len = new_end - start_end };
|
||||
return new_end - start_end;
|
||||
}
|
||||
buffers[0] = buffer[0..end];
|
||||
buffers[1] = pattern;
|
||||
const result = bw.unbuffered_writer.writeSplat(buffers[0..2], remaining_splat);
|
||||
const n = result.len;
|
||||
const n = try bw.unbuffered_writer.writeSplat(buffers[0..2], remaining_splat);
|
||||
if (n < end) {
|
||||
@branchHint(.unlikely);
|
||||
const remainder = buffer[n..end];
|
||||
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
|
||||
list.items.len = remainder.len;
|
||||
return .{
|
||||
.err = result.err,
|
||||
.len = end - start_end,
|
||||
.end = result.end,
|
||||
};
|
||||
return end - start_end;
|
||||
}
|
||||
list.items.len = 0;
|
||||
return .{
|
||||
.err = result.err,
|
||||
.len = n - start_end,
|
||||
.end = result.end,
|
||||
};
|
||||
return n - start_end;
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -282,13 +246,12 @@ fn passthru_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: us
|
||||
/// When this function is called it means the buffer got full, so it's time
|
||||
/// to return an error. However, we still need to make sure all of the
|
||||
/// available buffer has been filled.
|
||||
fn fixed_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) Writer.Result {
|
||||
fn fixed_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
|
||||
const bw: *BufferedWriter = @alignCast(@ptrCast(context));
|
||||
const list = &bw.buffer;
|
||||
const start_len = list.items.len;
|
||||
for (data) |bytes| {
|
||||
const dest = list.unusedCapacitySlice();
|
||||
if (dest.len == 0) return .{ .len = list.items.len - start_len, .end = true };
|
||||
if (dest.len == 0) return error.NoSpaceLeft;
|
||||
const len = @min(bytes.len, dest.len);
|
||||
@memcpy(dest[0..len], bytes[0..len]);
|
||||
list.items.len += len;
|
||||
@ -301,60 +264,43 @@ fn fixed_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize
|
||||
else => for (0..splat - 1) |i| @memcpy(dest[i * pattern.len ..][0..pattern.len], pattern),
|
||||
}
|
||||
list.items.len = list.capacity;
|
||||
return .{ .len = list.items.len - start_len, .end = true };
|
||||
return error.NoSpaceLeft;
|
||||
}
|
||||
|
||||
pub fn write(bw: *BufferedWriter, bytes: []const u8) Writer.Result {
|
||||
pub fn write(bw: *BufferedWriter, bytes: []const u8) anyerror!usize {
|
||||
const list = &bw.buffer;
|
||||
const buffer = list.allocatedSlice();
|
||||
const end = list.items.len;
|
||||
const new_end = end + bytes.len;
|
||||
if (new_end > buffer.len) {
|
||||
var data: [2][]const u8 = .{ buffer[0..end], bytes };
|
||||
const result = bw.unbuffered_writer.writev(&data);
|
||||
const n = result.len;
|
||||
const n = try bw.unbuffered_writer.writev(&data);
|
||||
if (n < end) {
|
||||
@branchHint(.unlikely);
|
||||
const remainder = buffer[n..end];
|
||||
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
|
||||
list.items.len = remainder.len;
|
||||
return .{
|
||||
.err = result.err,
|
||||
.len = 0,
|
||||
.end = result.end,
|
||||
};
|
||||
return 0;
|
||||
}
|
||||
list.items.len = 0;
|
||||
return .{
|
||||
.err = result.err,
|
||||
.len = n - end,
|
||||
.end = result.end,
|
||||
};
|
||||
return n - end;
|
||||
}
|
||||
@memcpy(buffer[end..new_end], bytes);
|
||||
list.items.len = new_end;
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
pub fn writeAll(bw: *BufferedWriter, bytes: []const u8) anyerror!void {
|
||||
if ((try writeUntilEnd(bw, bytes)) != bytes.len) return error.WriteStreamEnd;
|
||||
}
|
||||
|
||||
/// Convenience function that calls `writeAll` and then returns `bytes.len`.
|
||||
pub fn writeAllCount(bw: *BufferedWriter, bytes: []const u8) anyerror!usize {
|
||||
try writeAll(bw, bytes);
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
/// If the number returned is less than `bytes.len` it indicates end of stream.
|
||||
pub fn writeUntilEnd(bw: *BufferedWriter, bytes: []const u8) anyerror!usize {
|
||||
/// Calls `write` as many times as necessary such that all of `bytes` are
|
||||
/// transferred.
|
||||
pub fn writeAll(bw: *BufferedWriter, bytes: []const u8) anyerror!void {
|
||||
var index: usize = 0;
|
||||
while (true) {
|
||||
const result = write(bw, bytes[index..]);
|
||||
try result.err;
|
||||
index += result.len;
|
||||
assert(index <= bytes.len);
|
||||
if (index == bytes.len or result.end) return index;
|
||||
}
|
||||
while (index < bytes.len) index += try write(bw, bytes[index..]);
|
||||
}
|
||||
|
||||
pub fn print(bw: *BufferedWriter, comptime format: []const u8, args: anytype) anyerror!void {
|
||||
@ -365,65 +311,48 @@ pub fn printCount(bw: *BufferedWriter, comptime format: []const u8, args: anytyp
|
||||
return std.fmt.format(bw, format, args);
|
||||
}
|
||||
|
||||
pub fn writeByte(bw: *BufferedWriter, byte: u8) anyerror!void {
|
||||
if ((try writeByteUntilEnd(bw, byte)) == 0) return error.WriteStreamEnd;
|
||||
}
|
||||
|
||||
/// Returns 0 or 1 indicating how many bytes were written.
|
||||
pub fn writeByteCount(bw: *BufferedWriter, byte: u8) anyerror!usize {
|
||||
try writeByte(bw, byte);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/// Returns 0 or 1 indicating how many bytes were written.
|
||||
/// `0` means end of stream encountered.
|
||||
pub fn writeByteUntilEnd(bw: *BufferedWriter, byte: u8) anyerror!usize {
|
||||
pub fn writeByte(bw: *BufferedWriter, byte: u8) anyerror!void {
|
||||
const list = &bw.buffer;
|
||||
const buffer = list.items;
|
||||
if (buffer.len < list.capacity) {
|
||||
@branchHint(.likely);
|
||||
buffer.ptr[buffer.len] = byte;
|
||||
list.items.len = buffer.len + 1;
|
||||
return 1;
|
||||
return;
|
||||
}
|
||||
var buffers: [2][]const u8 = .{ buffer, &.{byte} };
|
||||
while (true) {
|
||||
const result = bw.unbuffered_writer.writev(&buffers);
|
||||
try result.err;
|
||||
const n = result.len;
|
||||
const n = try bw.unbuffered_writer.writev(&buffers);
|
||||
if (n == 0) {
|
||||
@branchHint(.unlikely);
|
||||
if (result.end) return 0;
|
||||
continue;
|
||||
} else if (n >= buffer.len) {
|
||||
@branchHint(.likely);
|
||||
if (n > buffer.len) {
|
||||
@branchHint(.likely);
|
||||
list.items.len = 0;
|
||||
return 1;
|
||||
return;
|
||||
} else {
|
||||
buffer[0] = byte;
|
||||
list.items.len = 1;
|
||||
return 1;
|
||||
return;
|
||||
}
|
||||
}
|
||||
const remainder = buffer[n..];
|
||||
std.mem.copyForwards(u8, buffer[0..remainder.len], remainder);
|
||||
buffer[remainder.len] = byte;
|
||||
list.items.len = remainder.len + 1;
|
||||
return 1;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes the same byte many times, performing the underlying write call as
|
||||
/// many times as necessary, returning `error.WriteStreamEnd` if the byte
|
||||
/// could not be repeated `n` times.
|
||||
pub fn splatByteAll(bw: *BufferedWriter, byte: u8, n: usize) anyerror!void {
|
||||
if ((try splatByteUntilEnd(bw, byte, n)) != n) return error.WriteStreamEnd;
|
||||
}
|
||||
|
||||
/// Writes the same byte many times, performing the underlying write call as
|
||||
/// many times as necessary, returning `error.WriteStreamEnd` if the byte
|
||||
/// could not be repeated `n` times, or returning `n` on success.
|
||||
/// Convenience function that calls `splatByteAll` and then returns `n`.
|
||||
pub fn splatByteAllCount(bw: *BufferedWriter, byte: u8, n: usize) anyerror!usize {
|
||||
try splatByteAll(bw, byte, n);
|
||||
return n;
|
||||
@ -431,23 +360,15 @@ pub fn splatByteAllCount(bw: *BufferedWriter, byte: u8, n: usize) anyerror!usize
|
||||
|
||||
/// Writes the same byte many times, performing the underlying write call as
|
||||
/// many times as necessary.
|
||||
///
|
||||
/// If the number returned is less than `n` it indicates end of stream.
|
||||
pub fn splatByteUntilEnd(bw: *BufferedWriter, byte: u8, n: usize) anyerror!usize {
|
||||
var index: usize = 0;
|
||||
while (true) {
|
||||
const result = splatByte(bw, byte, n - index);
|
||||
try result.err;
|
||||
index += result.len;
|
||||
assert(index <= n);
|
||||
if (index == n or result.end) return index;
|
||||
}
|
||||
pub fn splatByteAll(bw: *BufferedWriter, byte: u8, n: usize) anyerror!void {
|
||||
var remaining: usize = n;
|
||||
while (remaining > 0) remaining -= try splatByte(bw, byte, remaining);
|
||||
}
|
||||
|
||||
/// Writes the same byte many times, allowing short writes.
|
||||
///
|
||||
/// Does maximum of one underlying `Writer.VTable.writeSplat`.
|
||||
pub fn splatByte(bw: *BufferedWriter, byte: u8, n: usize) Writer.Result {
|
||||
pub fn splatByte(bw: *BufferedWriter, byte: u8, n: usize) anyerror!usize {
|
||||
return passthru_writeSplat(bw, &.{&.{byte}}, n);
|
||||
}
|
||||
|
||||
|
||||
@ -19,8 +19,8 @@ pub const VTable = struct {
|
||||
///
|
||||
/// If this is `null` it is equivalent to always returning
|
||||
/// `error.Unseekable`.
|
||||
posRead: ?*const fn (ctx: ?*anyopaque, bw: *std.io.BufferedWriter, limit: Limit, offset: u64) RwResult,
|
||||
posReadVec: ?*const fn (ctx: ?*anyopaque, data: []const []u8, offset: u64) Result,
|
||||
posRead: ?*const fn (ctx: ?*anyopaque, bw: *std.io.BufferedWriter, limit: Limit, offset: u64) anyerror!Status,
|
||||
posReadVec: ?*const fn (ctx: ?*anyopaque, data: []const []u8, offset: u64) anyerror!Status,
|
||||
|
||||
/// Writes bytes from the internally tracked stream position to `bw`, or
|
||||
/// returns `error.Unstreamable`, indicating `posRead` should be used
|
||||
@ -37,25 +37,18 @@ pub const VTable = struct {
|
||||
///
|
||||
/// If this is `null` it is equivalent to always returning
|
||||
/// `error.Unstreamable`.
|
||||
streamRead: ?*const fn (ctx: ?*anyopaque, bw: *std.io.BufferedWriter, limit: Limit) RwResult,
|
||||
streamReadVec: ?*const fn (ctx: ?*anyopaque, data: []const []u8) Result,
|
||||
|
||||
pub const eof: VTable = .{
|
||||
.posRead = eof_posRead,
|
||||
.posReadVec = eof_posReadVec,
|
||||
.streamRead = eof_streamRead,
|
||||
.streamReadVec = eof_streamReadVec,
|
||||
};
|
||||
streamRead: ?*const fn (ctx: ?*anyopaque, bw: *std.io.BufferedWriter, limit: Limit) anyerror!Status,
|
||||
streamReadVec: ?*const fn (ctx: ?*anyopaque, data: []const []u8) anyerror!Status,
|
||||
};
|
||||
|
||||
pub const Result = std.io.Writer.Result;
|
||||
pub const Len = @Type(.{ .int = .{ .signedness = .unsigned, .bits = @bitSizeOf(usize) - 1 } });
|
||||
|
||||
pub const RwResult = struct {
|
||||
len: usize = 0,
|
||||
read_err: anyerror!void = {},
|
||||
write_err: anyerror!void = {},
|
||||
read_end: bool = false,
|
||||
write_end: bool = false,
|
||||
pub const Status = packed struct(usize) {
|
||||
/// Number of bytes that were transferred. Zero does not mean end of
|
||||
/// stream.
|
||||
len: Len = 0,
|
||||
/// Indicates end of stream.
|
||||
end: bool = false,
|
||||
};
|
||||
|
||||
pub const Limit = enum(usize) {
|
||||
@ -171,31 +164,3 @@ test "when the backing reader provides one byte at a time" {
|
||||
defer std.testing.allocator.free(res);
|
||||
try std.testing.expectEqualStrings(str, res);
|
||||
}
|
||||
|
||||
fn eof_posRead(ctx: ?*anyopaque, bw: *std.io.BufferedWriter, limit: Limit, offset: u64) RwResult {
|
||||
_ = ctx;
|
||||
_ = bw;
|
||||
_ = limit;
|
||||
_ = offset;
|
||||
return .{ .end = true };
|
||||
}
|
||||
|
||||
fn eof_posReadVec(ctx: ?*anyopaque, data: []const []u8, offset: u64) Result {
|
||||
_ = ctx;
|
||||
_ = data;
|
||||
_ = offset;
|
||||
return .{ .end = true };
|
||||
}
|
||||
|
||||
fn eof_streamRead(ctx: ?*anyopaque, bw: *std.io.BufferedWriter, limit: Limit) RwResult {
|
||||
_ = ctx;
|
||||
_ = bw;
|
||||
_ = limit;
|
||||
return .{ .end = true };
|
||||
}
|
||||
|
||||
fn eof_streamReadVec(ctx: ?*anyopaque, data: []const []u8) Result {
|
||||
_ = ctx;
|
||||
_ = data;
|
||||
return .{ .end = true };
|
||||
}
|
||||
|
||||
@ -17,7 +17,7 @@ pub const VTable = struct {
|
||||
/// Number of bytes returned may be zero, which does not mean
|
||||
/// end-of-stream. A subsequent call may return nonzero, or may signal end
|
||||
/// of stream via an error.
|
||||
writeSplat: *const fn (ctx: ?*anyopaque, data: []const []const u8, splat: usize) Result,
|
||||
writeSplat: *const fn (ctx: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize,
|
||||
|
||||
/// Writes contents from an open file. `headers` are written first, then `len`
|
||||
/// bytes of `file` starting from `offset`, then `trailers`.
|
||||
@ -38,23 +38,7 @@ pub const VTable = struct {
|
||||
/// zero, they can be forwarded directly to `VTable.writev`.
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) Result,
|
||||
|
||||
pub const eof: VTable = .{
|
||||
.writeSplat = eof_writeSplat,
|
||||
.writeFile = eof_writeFile,
|
||||
};
|
||||
};
|
||||
|
||||
pub const Result = struct {
|
||||
/// Even when a failure occurs, `len` may be nonzero, and `end` may be
|
||||
/// true.
|
||||
err: anyerror!void = {},
|
||||
/// Number of bytes that were transferred. When an error occurs, ideally
|
||||
/// this will be zero, but may not always be the case.
|
||||
len: usize = 0,
|
||||
/// Indicates end of stream.
|
||||
end: bool = false,
|
||||
) anyerror!usize,
|
||||
};
|
||||
|
||||
pub const Offset = enum(u64) {
|
||||
@ -90,11 +74,11 @@ pub const FileLen = enum(u64) {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn writev(w: Writer, data: []const []const u8) Result {
|
||||
pub fn writev(w: Writer, data: []const []const u8) anyerror!usize {
|
||||
return w.vtable.writeSplat(w.context, data, 1);
|
||||
}
|
||||
|
||||
pub fn writeSplat(w: Writer, data: []const []const u8, splat: usize) Result {
|
||||
pub fn writeSplat(w: Writer, data: []const []const u8, splat: usize) anyerror!usize {
|
||||
return w.vtable.writeSplat(w.context, data, splat);
|
||||
}
|
||||
|
||||
@ -105,7 +89,7 @@ pub fn writeFile(
|
||||
len: FileLen,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) Result {
|
||||
) anyerror!usize {
|
||||
return w.vtable.writeFile(w.context, file, offset, len, headers_and_trailers, headers_len);
|
||||
}
|
||||
|
||||
@ -116,14 +100,14 @@ pub fn unimplemented_writeFile(
|
||||
len: FileLen,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) Result {
|
||||
) anyerror!usize {
|
||||
_ = context;
|
||||
_ = file;
|
||||
_ = offset;
|
||||
_ = len;
|
||||
_ = headers_and_trailers;
|
||||
_ = headers_len;
|
||||
return .{ .err = error.Unimplemented };
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
pub fn buffered(w: Writer, buffer: []u8) std.io.BufferedWriter {
|
||||
@ -146,7 +130,7 @@ pub const @"null": Writer = .{
|
||||
},
|
||||
};
|
||||
|
||||
fn null_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) Result {
|
||||
fn null_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) anyerror!usize {
|
||||
_ = context;
|
||||
const headers = data[0 .. data.len - 1];
|
||||
const pattern = data[headers.len..];
|
||||
@ -162,14 +146,14 @@ fn null_writeFile(
|
||||
len: FileLen,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) Result {
|
||||
) anyerror!usize {
|
||||
_ = context;
|
||||
var n: usize = 0;
|
||||
if (len == .entire_file) {
|
||||
const headers = headers_and_trailers[0..headers_len];
|
||||
for (headers) |bytes| n += bytes.len;
|
||||
if (offset.toInt()) |off| {
|
||||
const stat = file.stat() catch |err| return .{ .err = err, .len = n };
|
||||
const stat = try file.stat();
|
||||
n += stat.size - off;
|
||||
for (headers_and_trailers[headers_len..]) |bytes| n += bytes.len;
|
||||
return .{ .len = n };
|
||||
@ -183,27 +167,3 @@ fn null_writeFile(
|
||||
test @"null" {
|
||||
try @"null".writeAll("yay");
|
||||
}
|
||||
|
||||
fn eof_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) Result {
|
||||
_ = context;
|
||||
_ = data;
|
||||
_ = splat;
|
||||
return .{ .end = true };
|
||||
}
|
||||
|
||||
fn eof_writeFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: u64,
|
||||
len: FileLen,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) Result {
|
||||
_ = context;
|
||||
_ = file;
|
||||
_ = offset;
|
||||
_ = len;
|
||||
_ = headers_and_trailers;
|
||||
_ = headers_len;
|
||||
return .{ .end = true };
|
||||
}
|
||||
|
||||
@ -6359,7 +6359,7 @@ pub const SendFileError = PReadError || WriteError || SendError;
|
||||
pub fn sendfile(
|
||||
out_fd: fd_t,
|
||||
in_fd: fd_t,
|
||||
in_offset: u64,
|
||||
in_offset: ?u64,
|
||||
in_len: u64,
|
||||
headers: []const iovec_const,
|
||||
trailers: []const iovec_const,
|
||||
@ -6390,8 +6390,9 @@ pub fn sendfile(
|
||||
|
||||
const sendfile_sym = if (lfs64_abi) system.sendfile64 else system.sendfile;
|
||||
while (true) {
|
||||
var offset: off_t = @bitCast(in_offset);
|
||||
const rc = sendfile_sym(out_fd, in_fd, &offset, adjusted_count);
|
||||
var offset: off_t = if (in_offset) |o| o else undefined;
|
||||
const offset_pointer: ?*off_t = if (in_offset) &offset else null;
|
||||
const rc = sendfile_sym(out_fd, in_fd, offset_pointer, adjusted_count);
|
||||
switch (errno(rc)) {
|
||||
.SUCCESS => {
|
||||
const amt: usize = @bitCast(rc);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user