mirror of
https://github.com/ziglang/zig.git
synced 2026-02-13 04:48:20 +00:00
update std.http.Server to new API
and rename std.io.BufferedWriter.writableSlice to writableSliceGreedy and make writableSlice and writableArray advance the buffer end position introduce std.io.BufferedWriter.writeSplatLimit but it's unimplemented
This commit is contained in:
parent
98f463ad59
commit
f333267782
@ -90,8 +90,15 @@ pub fn main() !void {
|
||||
fn accept(context: *Context, connection: std.net.Server.Connection) void {
|
||||
defer connection.stream.close();
|
||||
|
||||
var read_buffer: [8000]u8 = undefined;
|
||||
var server = std.http.Server.init(connection, &read_buffer);
|
||||
var recv_buffer: [8000]u8 = undefined;
|
||||
var send_buffer: [4000]u8 = undefined;
|
||||
var connection_br: std.io.BufferedReader = undefined;
|
||||
var stream_reader = connection.stream.reader();
|
||||
connection_br.init(stream_reader.interface(), &recv_buffer);
|
||||
var stream_writer = connection.stream.writer();
|
||||
var connection_bw = stream_writer.interface().buffered(&send_buffer);
|
||||
var server = std.http.Server.init(&connection_br, &connection_bw);
|
||||
|
||||
while (server.state == .ready) {
|
||||
var request = server.receiveHead() catch |err| switch (err) {
|
||||
error.HttpConnectionClosing => return,
|
||||
@ -160,9 +167,7 @@ fn serveDocsFile(
|
||||
defer file.close();
|
||||
const content_length = std.math.cast(usize, (try file.stat()).size) orelse return error.FileTooBig;
|
||||
|
||||
var send_buffer: [4000]u8 = undefined;
|
||||
var response = request.respondStreaming(.{
|
||||
.send_buffer = &send_buffer,
|
||||
var response = try request.respondStreaming(.{
|
||||
.content_length = content_length,
|
||||
.respond_options = .{
|
||||
.extra_headers = &.{
|
||||
@ -182,9 +187,7 @@ fn serveDocsFile(
|
||||
fn serveSourcesTar(request: *std.http.Server.Request, context: *Context) !void {
|
||||
const gpa = context.gpa;
|
||||
|
||||
var send_buffer: [0x4000]u8 = undefined;
|
||||
var response = request.respondStreaming(.{
|
||||
.send_buffer = &send_buffer,
|
||||
var response = try request.respondStreaming(.{
|
||||
.respond_options = .{
|
||||
.extra_headers = &.{
|
||||
.{ .name = "content-type", .value = "application/x-tar" },
|
||||
|
||||
@ -349,8 +349,8 @@ pub fn Inflate(comptime container: Container, comptime Lookahead: type) type {
|
||||
limit: std.io.Reader.Limit,
|
||||
) std.io.Reader.RwError!usize {
|
||||
const self: *Self = @alignCast(@ptrCast(context));
|
||||
const out = try bw.writableSlice(1);
|
||||
const in = self.get(limit.min(out.len)) catch |err| switch (err) {
|
||||
const out = try bw.writableSliceGreedy(1);
|
||||
const in = self.get(limit.minInt(out.len)) catch |err| switch (err) {
|
||||
error.EndOfStream => return error.EndOfStream,
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
else => |e| {
|
||||
|
||||
@ -925,7 +925,7 @@ fn writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) std.i
|
||||
const c: *Client = @alignCast(@ptrCast(context));
|
||||
const sliced_data = if (splat == 0) data[0..data.len -| 1] else data;
|
||||
const output = &c.output;
|
||||
const ciphertext_buf = try output.writableSlice(min_buffer_len);
|
||||
const ciphertext_buf = try output.writableSliceGreedy(min_buffer_len);
|
||||
var total_clear: usize = 0;
|
||||
var ciphertext_end: usize = 0;
|
||||
for (sliced_data) |buf| {
|
||||
@ -943,7 +943,7 @@ fn writeSplat(context: *anyopaque, data: []const []const u8, splat: usize) std.i
|
||||
/// attack.
|
||||
pub fn end(c: *Client) std.io.Writer.Error!void {
|
||||
const output = &c.output;
|
||||
const ciphertext_buf = try output.writableSlice(min_buffer_len);
|
||||
const ciphertext_buf = try output.writableSliceGreedy(min_buffer_len);
|
||||
const prepared = prepareCiphertextRecord(c, ciphertext_buf, &tls.close_notify_alert, .alert);
|
||||
output.advance(prepared.cleartext_len);
|
||||
return prepared.ciphertext_end;
|
||||
@ -1063,7 +1063,7 @@ fn read(
|
||||
bw: *std.io.BufferedWriter,
|
||||
limit: std.io.Reader.Limit,
|
||||
) std.io.Reader.RwError!std.io.Reader.Status {
|
||||
const buf = limit.slice(try bw.writableSlice(1));
|
||||
const buf = limit.slice(try bw.writableSliceGreedy(1));
|
||||
const status = try readVec(context, &.{buf});
|
||||
bw.advance(status.len);
|
||||
return status;
|
||||
|
||||
@ -983,7 +983,7 @@ pub const Reader = struct {
|
||||
}
|
||||
return 0;
|
||||
};
|
||||
const new_limit: std.io.Reader.Limit = .limited(limit.min(size - pos));
|
||||
const new_limit = limit.min(.limited(size - pos));
|
||||
const n = bw.writeFile(file, .init(pos), new_limit, &.{}, 0) catch |err| switch (err) {
|
||||
error.WriteFailed => return error.WriteFailed,
|
||||
error.Unseekable => {
|
||||
|
||||
@ -14,6 +14,7 @@ const Server = @This();
|
||||
/// The reader's buffer must be large enough to store the client's entire HTTP
|
||||
/// header, otherwise `receiveHead` returns `error.HttpHeadersOversize`.
|
||||
in: *std.io.BufferedReader,
|
||||
/// Data from the HTTP server to the HTTP client.
|
||||
out: *std.io.BufferedWriter,
|
||||
/// Keeps track of whether the Server is ready to accept a new request on the
|
||||
/// same connection, and makes invalid API usage cause assertion failures
|
||||
@ -479,12 +480,6 @@ pub const Request = struct {
|
||||
}
|
||||
|
||||
pub const RespondStreamingOptions = struct {
|
||||
/// An externally managed slice of memory used to batch bytes before
|
||||
/// sending. `respondStreaming` asserts this is large enough to store
|
||||
/// the full HTTP response head.
|
||||
///
|
||||
/// Must outlive the returned Response.
|
||||
send_buffer: []u8,
|
||||
/// If provided, the response will use the content-length header;
|
||||
/// otherwise it will use transfer-encoding: chunked.
|
||||
content_length: ?u64 = null,
|
||||
@ -492,7 +487,7 @@ pub const Request = struct {
|
||||
respond_options: RespondOptions = .{},
|
||||
};
|
||||
|
||||
/// The header is buffered but not sent until Response.flush is called.
|
||||
/// The header is buffered but not sent until `Response.flush` is called.
|
||||
///
|
||||
/// If the request contains a body and the connection is to be reused,
|
||||
/// discards the request body, leaving the Server in the `ready` state. If
|
||||
@ -504,69 +499,63 @@ pub const Request = struct {
|
||||
/// that flag and skipping any expensive work that would otherwise need to
|
||||
/// be done to satisfy the request.
|
||||
///
|
||||
/// Asserts `send_buffer` is large enough to store the entire response header.
|
||||
/// Asserts status is not `continue`.
|
||||
pub fn respondStreaming(request: *Request, options: RespondStreamingOptions) Response {
|
||||
pub fn respondStreaming(request: *Request, options: RespondStreamingOptions) std.io.Writer.Error!Response {
|
||||
const o = options.respond_options;
|
||||
assert(o.status != .@"continue");
|
||||
const transfer_encoding_none = (o.transfer_encoding orelse .chunked) == .none;
|
||||
const server_keep_alive = !transfer_encoding_none and o.keep_alive;
|
||||
const keep_alive = request.discardBody(server_keep_alive);
|
||||
const phrase = o.reason orelse o.status.phrase() orelse "";
|
||||
|
||||
var h = std.ArrayListUnmanaged(u8).initBuffer(options.send_buffer);
|
||||
const out = request.server.out;
|
||||
|
||||
const elide_body = if (request.head.expect != null) eb: {
|
||||
// reader() and hence discardBody() above sets expect to null if it
|
||||
// is handled. So the fact that it is not null here means unhandled.
|
||||
h.appendSliceAssumeCapacity("HTTP/1.1 417 Expectation Failed\r\n");
|
||||
if (!keep_alive) h.appendSliceAssumeCapacity("connection: close\r\n");
|
||||
h.appendSliceAssumeCapacity("content-length: 0\r\n\r\n");
|
||||
try out.writeAll("HTTP/1.1 417 Expectation Failed\r\n");
|
||||
if (!keep_alive) try out.writeAll("connection: close\r\n");
|
||||
try out.writeAll("content-length: 0\r\n\r\n");
|
||||
break :eb true;
|
||||
} else eb: {
|
||||
h.printAssumeCapacity("{s} {d} {s}\r\n", .{
|
||||
try out.print("{s} {d} {s}\r\n", .{
|
||||
@tagName(o.version), @intFromEnum(o.status), phrase,
|
||||
});
|
||||
|
||||
switch (o.version) {
|
||||
.@"HTTP/1.0" => if (keep_alive) h.appendSliceAssumeCapacity("connection: keep-alive\r\n"),
|
||||
.@"HTTP/1.1" => if (!keep_alive) h.appendSliceAssumeCapacity("connection: close\r\n"),
|
||||
.@"HTTP/1.0" => if (keep_alive) try out.writeAll("connection: keep-alive\r\n"),
|
||||
.@"HTTP/1.1" => if (!keep_alive) try out.writeAll("connection: close\r\n"),
|
||||
}
|
||||
|
||||
if (o.transfer_encoding) |transfer_encoding| switch (transfer_encoding) {
|
||||
.chunked => h.appendSliceAssumeCapacity("transfer-encoding: chunked\r\n"),
|
||||
.chunked => try out.writeAll("transfer-encoding: chunked\r\n"),
|
||||
.none => {},
|
||||
} else if (options.content_length) |len| {
|
||||
h.printAssumeCapacity("content-length: {d}\r\n", .{len});
|
||||
try out.print("content-length: {d}\r\n", .{len});
|
||||
} else {
|
||||
h.appendSliceAssumeCapacity("transfer-encoding: chunked\r\n");
|
||||
try out.writeAll("transfer-encoding: chunked\r\n");
|
||||
}
|
||||
|
||||
for (o.extra_headers) |header| {
|
||||
assert(header.name.len != 0);
|
||||
h.appendSliceAssumeCapacity(header.name);
|
||||
h.appendSliceAssumeCapacity(": ");
|
||||
h.appendSliceAssumeCapacity(header.value);
|
||||
h.appendSliceAssumeCapacity("\r\n");
|
||||
try out.writeAll(header.name);
|
||||
try out.writeAll(": ");
|
||||
try out.writeAll(header.value);
|
||||
try out.writeAll("\r\n");
|
||||
}
|
||||
|
||||
h.appendSliceAssumeCapacity("\r\n");
|
||||
try out.writeAll("\r\n");
|
||||
break :eb request.head.method == .HEAD;
|
||||
};
|
||||
|
||||
return .{
|
||||
.out = request.server.out,
|
||||
.send_buffer = options.send_buffer,
|
||||
.send_buffer_start = 0,
|
||||
.send_buffer_end = h.items.len,
|
||||
.server_output = request.server.out,
|
||||
.transfer_encoding = if (o.transfer_encoding) |te| switch (te) {
|
||||
.chunked => .chunked,
|
||||
.chunked => .{ .chunked = .init },
|
||||
.none => .none,
|
||||
} else if (options.content_length) |len| .{
|
||||
.content_length = len,
|
||||
} else .chunked,
|
||||
} else .{ .chunked = .init },
|
||||
.elide_body = elide_body,
|
||||
.chunk_len = 0,
|
||||
};
|
||||
}
|
||||
|
||||
@ -836,20 +825,32 @@ pub const Request = struct {
|
||||
};
|
||||
|
||||
pub const Response = struct {
|
||||
out: *std.io.BufferedWriter,
|
||||
send_buffer: []u8,
|
||||
/// Index of the first byte in `send_buffer`.
|
||||
/// This is 0 unless a short write happens in `write`.
|
||||
send_buffer_start: usize,
|
||||
/// Index of the last byte + 1 in `send_buffer`.
|
||||
send_buffer_end: usize,
|
||||
/// HTTP protocol to the client.
|
||||
///
|
||||
/// This is the underlying stream; use `buffered` to create a
|
||||
/// `BufferedWriter` for this `Response`.
|
||||
server_output: *std.io.BufferedWriter,
|
||||
/// `null` means transfer-encoding: chunked.
|
||||
/// As a debugging utility, counts down to zero as bytes are written.
|
||||
transfer_encoding: TransferEncoding,
|
||||
elide_body: bool,
|
||||
/// Indicates how much of the end of the `send_buffer` corresponds to a
|
||||
/// chunk. This amount of data will be wrapped by an HTTP chunk header.
|
||||
chunk_len: usize,
|
||||
err: Error!void = {},
|
||||
|
||||
pub const Error = error{
|
||||
/// Attempted to write a file to the stream, an expensive operation
|
||||
/// that should be avoided when `elide_body` is true.
|
||||
UnableToElideBody,
|
||||
};
|
||||
pub const WriteError = std.io.Writer.Error;
|
||||
|
||||
/// How many zeroes to reserve for hex-encoded chunk length.
|
||||
const chunk_len_digits = 8;
|
||||
const max_chunk_len: usize = std.math.pow(usize, 16, chunk_len_digits) - 1;
|
||||
const chunk_header_template = ("0" ** chunk_len_digits) ++ "\r\n";
|
||||
|
||||
comptime {
|
||||
assert(max_chunk_len == std.math.maxInt(u32));
|
||||
}
|
||||
|
||||
pub const TransferEncoding = union(enum) {
|
||||
/// End of connection signals the end of the stream.
|
||||
@ -857,7 +858,19 @@ pub const Response = struct {
|
||||
/// As a debugging utility, counts down to zero as bytes are written.
|
||||
content_length: u64,
|
||||
/// Each chunk is wrapped in a header and trailer.
|
||||
chunked,
|
||||
chunked: Chunked,
|
||||
|
||||
pub const Chunked = union(enum) {
|
||||
/// Index of the hex-encoded chunk length in the chunk header
|
||||
/// within the buffer of `Response.server_output`.
|
||||
offset: usize,
|
||||
/// We are in the middle of a chunk and this is how many bytes are
|
||||
/// left until the next header. This includes +2 for "\r"\n", and
|
||||
/// is zero for the beginning of the stream.
|
||||
chunk_len: usize,
|
||||
|
||||
pub const init: Chunked = .{ .chunk_len = 0 };
|
||||
};
|
||||
};
|
||||
|
||||
/// When using content-length, asserts that the amount of data sent matches
|
||||
@ -865,17 +878,17 @@ pub const Response = struct {
|
||||
/// Otherwise, transfer-encoding: chunked is being used, and it writes the
|
||||
/// end-of-stream message, then flushes the stream to the system.
|
||||
/// Respects the value of `elide_body` to omit all data after the headers.
|
||||
pub fn end(r: *Response) std.io.Writer.Error!void {
|
||||
pub fn end(r: *Response) WriteError!void {
|
||||
switch (r.transfer_encoding) {
|
||||
.content_length => |len| {
|
||||
assert(len == 0); // Trips when end() called before all bytes written.
|
||||
try flush_cl(r);
|
||||
try flushContentLength(r);
|
||||
},
|
||||
.none => {
|
||||
try flush_cl(r);
|
||||
try flushContentLength(r);
|
||||
},
|
||||
.chunked => {
|
||||
try flush_chunked(r, &.{});
|
||||
try flushChunked(r, &.{});
|
||||
},
|
||||
}
|
||||
r.* = undefined;
|
||||
@ -890,9 +903,9 @@ pub const Response = struct {
|
||||
/// flushes the stream to the system.
|
||||
/// Respects the value of `elide_body` to omit all data after the headers.
|
||||
/// Asserts there are at most 25 trailers.
|
||||
pub fn endChunked(r: *Response, options: EndChunkedOptions) std.io.Writer.Error!void {
|
||||
pub fn endChunked(r: *Response, options: EndChunkedOptions) WriteError!void {
|
||||
assert(r.transfer_encoding == .chunked);
|
||||
try flush_chunked(r, options.trailers);
|
||||
try flushChunked(r, options.trailers);
|
||||
r.* = undefined;
|
||||
}
|
||||
|
||||
@ -900,163 +913,222 @@ pub const Response = struct {
|
||||
/// would not exceed the content-length value sent in the HTTP header.
|
||||
/// May return 0, which does not indicate end of stream. The caller decides
|
||||
/// when the end of stream occurs by calling `end`.
|
||||
pub fn write(r: *Response, bytes: []const u8) std.io.Writer.Error!usize {
|
||||
pub fn write(r: *Response, bytes: []const u8) WriteError!usize {
|
||||
switch (r.transfer_encoding) {
|
||||
.content_length, .none => return cl_writeSplat(r, &.{bytes}, 1),
|
||||
.chunked => return chunked_writeSplat(r, &.{bytes}, 1),
|
||||
.content_length, .none => return contentLengthWriteSplat(r, &.{bytes}, 1),
|
||||
.chunked => return chunkedWriteSplat(r, &.{bytes}, 1),
|
||||
}
|
||||
}
|
||||
|
||||
fn cl_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
|
||||
_ = splat;
|
||||
return cl_write(context, data[0]); // TODO: try to send all the data
|
||||
fn contentLengthWriteSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) WriteError!usize {
|
||||
const r: *Response = @alignCast(@ptrCast(context));
|
||||
const n = if (r.elide_body) countSplat(data, splat) else try r.server_output.writeSplat(data, splat);
|
||||
r.transfer_encoding.content_length -= n;
|
||||
return n;
|
||||
}
|
||||
|
||||
fn cl_writeFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
fn noneWriteSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) WriteError!usize {
|
||||
const r: *Response = @alignCast(@ptrCast(context));
|
||||
if (r.elide_body) return countSplat(data, splat);
|
||||
return r.server_output.writeSplat(data, splat);
|
||||
}
|
||||
|
||||
fn countSplat(data: []const []const u8, splat: usize) usize {
|
||||
if (data.len == 0) return 0;
|
||||
var total: usize = 0;
|
||||
for (data[0 .. data.len - 1]) |buf| total += buf.len;
|
||||
total += data[data.len - 1].len * splat;
|
||||
return total;
|
||||
}
|
||||
|
||||
fn elideWriteFile(
|
||||
r: *Response,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) std.io.Writer.Error!usize {
|
||||
_ = context;
|
||||
_ = file;
|
||||
_ = offset;
|
||||
_ = limit;
|
||||
_ = headers_and_trailers;
|
||||
_ = headers_len;
|
||||
@panic("TODO");
|
||||
}
|
||||
|
||||
fn cl_write(context: ?*anyopaque, bytes: []const u8) std.io.Writer.Error!usize {
|
||||
const r: *Response = @alignCast(@ptrCast(context));
|
||||
|
||||
var trash: u64 = std.math.maxInt(u64);
|
||||
const len = switch (r.transfer_encoding) {
|
||||
.content_length => |*len| len,
|
||||
else => &trash,
|
||||
};
|
||||
|
||||
if (r.elide_body) {
|
||||
len.* -= bytes.len;
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
if (bytes.len + r.send_buffer_end > r.send_buffer.len) {
|
||||
const send_buffer_len = r.send_buffer_end - r.send_buffer_start;
|
||||
var iovecs: [2][]const u8 = .{
|
||||
r.send_buffer[r.send_buffer_start..][0..send_buffer_len],
|
||||
bytes,
|
||||
};
|
||||
const n = try r.out.writeVec(&iovecs);
|
||||
|
||||
if (n >= send_buffer_len) {
|
||||
// It was enough to reset the buffer.
|
||||
r.send_buffer_start = 0;
|
||||
r.send_buffer_end = 0;
|
||||
const bytes_n = n - send_buffer_len;
|
||||
len.* -= bytes_n;
|
||||
return bytes_n;
|
||||
) WriteError!usize {
|
||||
if (offset != .none) {
|
||||
if (countWriteFile(limit, headers_and_trailers)) |n| {
|
||||
return n;
|
||||
}
|
||||
|
||||
// It didn't even make it through the existing buffer, let
|
||||
// alone the new bytes provided.
|
||||
r.send_buffer_start += n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// All bytes can be stored in the remaining space of the buffer.
|
||||
@memcpy(r.send_buffer[r.send_buffer_end..][0..bytes.len], bytes);
|
||||
r.send_buffer_end += bytes.len;
|
||||
len.* -= bytes.len;
|
||||
return bytes.len;
|
||||
r.err = error.UnableToElideBody;
|
||||
return error.WriteFailed;
|
||||
}
|
||||
|
||||
fn chunked_writeSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) std.io.Writer.Error!usize {
|
||||
_ = splat;
|
||||
return chunked_write(context, data[0]); // TODO: try to send all the data
|
||||
/// Returns `null` if size cannot be computed without making any syscalls.
|
||||
fn countWriteFile(limit: std.io.Writer.Limit, headers_and_trailers: []const []const u8) ?usize {
|
||||
var total: usize = limit.toInt() orelse return null;
|
||||
for (headers_and_trailers) |buf| total += buf.len;
|
||||
return total;
|
||||
}
|
||||
|
||||
fn chunked_writeFile(
|
||||
fn noneWriteFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) std.io.Writer.Error!usize {
|
||||
_ = context;
|
||||
_ = file;
|
||||
_ = offset;
|
||||
_ = limit;
|
||||
_ = headers_and_trailers;
|
||||
_ = headers_len;
|
||||
@panic("TODO"); // TODO lower to a call to writeFile on the output
|
||||
}
|
||||
|
||||
fn chunked_write(context: ?*anyopaque, bytes: []const u8) std.io.Writer.Error!usize {
|
||||
) std.io.Writer.FileError!usize {
|
||||
if (limit == .nothing) return noneWriteSplat(context, headers_and_trailers, 1);
|
||||
const r: *Response = @alignCast(@ptrCast(context));
|
||||
assert(r.transfer_encoding == .chunked);
|
||||
|
||||
if (r.elide_body)
|
||||
return bytes.len;
|
||||
|
||||
if (bytes.len + r.send_buffer_end > r.send_buffer.len) {
|
||||
const send_buffer_len = r.send_buffer_end - r.send_buffer_start;
|
||||
const chunk_len = r.chunk_len + bytes.len;
|
||||
var header_buf: [18]u8 = undefined;
|
||||
const chunk_header = std.fmt.bufPrint(&header_buf, "{x}\r\n", .{chunk_len}) catch unreachable;
|
||||
|
||||
var iovecs: [5][]const u8 = .{
|
||||
r.send_buffer[r.send_buffer_start .. send_buffer_len - r.chunk_len],
|
||||
chunk_header,
|
||||
r.send_buffer[r.send_buffer_end - r.chunk_len ..][0..r.chunk_len],
|
||||
bytes,
|
||||
"\r\n",
|
||||
};
|
||||
// TODO make this writev instead of writevAll, which involves
|
||||
// complicating the logic of this function.
|
||||
try r.out.writeVecAll(&iovecs);
|
||||
r.send_buffer_start = 0;
|
||||
r.send_buffer_end = 0;
|
||||
r.chunk_len = 0;
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
// All bytes can be stored in the remaining space of the buffer.
|
||||
@memcpy(r.send_buffer[r.send_buffer_end..][0..bytes.len], bytes);
|
||||
r.send_buffer_end += bytes.len;
|
||||
r.chunk_len += bytes.len;
|
||||
return bytes.len;
|
||||
if (r.elide_body) return elideWriteFile(r, offset, limit, headers_and_trailers);
|
||||
return r.server_output.writeFile(file, offset, limit, headers_and_trailers, headers_len);
|
||||
}
|
||||
|
||||
/// If using content-length, asserts that writing these bytes to the client
|
||||
/// would not exceed the content-length value sent in the HTTP header.
|
||||
pub fn writeAll(r: *Response, bytes: []const u8) std.io.Writer.Error!void {
|
||||
var index: usize = 0;
|
||||
while (index < bytes.len) {
|
||||
index += try write(r, bytes[index..]);
|
||||
fn contentLengthWriteFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) std.io.Writer.FileError!usize {
|
||||
if (limit == .nothing) return contentLengthWriteSplat(context, headers_and_trailers, 1);
|
||||
const r: *Response = @alignCast(@ptrCast(context));
|
||||
if (r.elide_body) return elideWriteFile(r, offset, limit, headers_and_trailers);
|
||||
const n = try r.server_output.writeFile(file, offset, limit, headers_and_trailers, headers_len);
|
||||
r.transfer_encoding.content_length -= n;
|
||||
return n;
|
||||
}
|
||||
|
||||
fn chunkedWriteFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) std.io.Writer.FileError!usize {
|
||||
if (limit == .nothing) return chunkedWriteSplat(context, headers_and_trailers, 1);
|
||||
const r: *Response = @alignCast(@ptrCast(context));
|
||||
if (r.elide_body) return elideWriteFile(r, offset, limit, headers_and_trailers);
|
||||
const data_len = countWriteFile(limit, headers_and_trailers) orelse @panic("TODO");
|
||||
const bw = r.server_output;
|
||||
const chunked = &r.transfer_encoding.chunked;
|
||||
state: switch (chunked.*) {
|
||||
.offset => |off| {
|
||||
// TODO: is it better perf to read small files into the buffer?
|
||||
const buffered_len = bw.end - off - chunk_header_template.len;
|
||||
const chunk_len = data_len + buffered_len;
|
||||
writeHex(bw.buffer[off..][0..chunk_len_digits], chunk_len);
|
||||
const n = try bw.writeFile(file, offset, limit, headers_and_trailers, headers_len);
|
||||
chunked.* = .{ .chunk_len = data_len + 2 - n };
|
||||
return n;
|
||||
},
|
||||
.chunk_len => |chunk_len| {
|
||||
l: switch (chunk_len) {
|
||||
0 => {
|
||||
const header_buf = try bw.writableArray(chunk_header_template.len);
|
||||
const off = bw.end;
|
||||
@memcpy(header_buf, chunk_header_template);
|
||||
chunked.* = .{ .offset = off };
|
||||
continue :state .{ .offset = off };
|
||||
},
|
||||
1 => {
|
||||
try bw.writeByte('\n');
|
||||
chunked.chunk_len = 0;
|
||||
continue :l 0;
|
||||
},
|
||||
2 => {
|
||||
try bw.writeByte('\r');
|
||||
chunked.chunk_len = 1;
|
||||
continue :l 1;
|
||||
},
|
||||
else => {
|
||||
const new_limit = limit.min(.limited(chunk_len - 2));
|
||||
const n = try bw.writeFile(file, offset, new_limit, headers_and_trailers, headers_len);
|
||||
chunked.chunk_len = chunk_len - n;
|
||||
return n;
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn chunkedWriteSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) WriteError!usize {
|
||||
const r: *Response = @alignCast(@ptrCast(context));
|
||||
const data_len = countSplat(data, splat);
|
||||
if (r.elide_body) return data_len;
|
||||
|
||||
const bw = r.server_output;
|
||||
const chunked = &r.transfer_encoding.chunked;
|
||||
|
||||
state: switch (chunked.*) {
|
||||
.offset => |offset| {
|
||||
if (bw.unusedCapacitySlice().len >= data_len) {
|
||||
assert(data_len == (bw.writeSplat(data, splat) catch unreachable));
|
||||
return data_len;
|
||||
}
|
||||
const buffered_len = bw.end - offset - chunk_header_template.len;
|
||||
const chunk_len = data_len + buffered_len;
|
||||
writeHex(bw.buffer[offset..][0..chunk_len_digits], chunk_len);
|
||||
const n = try bw.writeSplat(data, splat);
|
||||
chunked.* = .{ .chunk_len = data_len + 2 - n };
|
||||
return n;
|
||||
},
|
||||
.chunk_len => |chunk_len| {
|
||||
l: switch (chunk_len) {
|
||||
0 => {
|
||||
const header_buf = try bw.writableArray(chunk_header_template.len);
|
||||
const offset = bw.end;
|
||||
@memcpy(header_buf, chunk_header_template);
|
||||
chunked.* = .{ .offset = offset };
|
||||
continue :state .{ .offset = offset };
|
||||
},
|
||||
1 => {
|
||||
try bw.writeByte('\n');
|
||||
chunked.chunk_len = 0;
|
||||
continue :l 0;
|
||||
},
|
||||
2 => {
|
||||
try bw.writeByte('\r');
|
||||
chunked.chunk_len = 1;
|
||||
continue :l 1;
|
||||
},
|
||||
else => {
|
||||
const n = try bw.writeSplatLimit(data, splat, .limited(chunk_len - 2));
|
||||
chunked.chunk_len = chunk_len - n;
|
||||
return n;
|
||||
},
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes an integer as base 16 to `buf`, right-aligned, assuming the
|
||||
/// buffer has already been filled with zeroes.
|
||||
fn writeHex(buf: []u8, x: usize) void {
|
||||
assert(std.mem.allEqual(u8, buf, '0'));
|
||||
const base = 16;
|
||||
var index: usize = buf.len;
|
||||
var a = x;
|
||||
while (a > 0) {
|
||||
const digit = a % base;
|
||||
index -= 1;
|
||||
buf[index] = std.fmt.digitToChar(@intCast(digit), .lower);
|
||||
a /= base;
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends all buffered data to the client.
|
||||
/// This is redundant after calling `end`.
|
||||
/// Respects the value of `elide_body` to omit all data after the headers.
|
||||
pub fn flush(r: *Response) std.io.Writer.Error!void {
|
||||
pub fn flush(r: *Response) Error!void {
|
||||
switch (r.transfer_encoding) {
|
||||
.none, .content_length => return flush_cl(r),
|
||||
.chunked => return flush_chunked(r, null),
|
||||
.none, .content_length => return flushContentLength(r),
|
||||
.chunked => return flushChunked(r, null),
|
||||
}
|
||||
}
|
||||
|
||||
fn flush_cl(r: *Response) std.io.Writer.Error!void {
|
||||
fn flushContentLength(r: *Response) Error!void {
|
||||
try r.out.writeAll(r.send_buffer[r.send_buffer_start..r.send_buffer_end]);
|
||||
r.send_buffer_start = 0;
|
||||
r.send_buffer_end = 0;
|
||||
}
|
||||
|
||||
fn flush_chunked(r: *Response, end_trailers: ?[]const http.Header) std.io.Writer.Error!void {
|
||||
fn flushChunked(r: *Response, end_trailers: ?[]const http.Header) Error!void {
|
||||
const max_trailers = 25;
|
||||
if (end_trailers) |trailers| assert(trailers.len <= max_trailers);
|
||||
assert(r.transfer_encoding == .chunked);
|
||||
@ -1123,17 +1195,21 @@ pub const Response = struct {
|
||||
|
||||
pub fn writer(r: *Response) std.io.Writer {
|
||||
return .{
|
||||
.context = r,
|
||||
.vtable = switch (r.transfer_encoding) {
|
||||
.none, .content_length => &.{
|
||||
.writeSplat = cl_writeSplat,
|
||||
.writeFile = cl_writeFile,
|
||||
.none => &.{
|
||||
.writeSplat = noneWriteSplat,
|
||||
.writeFile = noneWriteFile,
|
||||
},
|
||||
.content_length => &.{
|
||||
.writeSplat = contentLengthWriteSplat,
|
||||
.writeFile = contentLengthWriteFile,
|
||||
},
|
||||
.chunked => &.{
|
||||
.writeSplat = chunked_writeSplat,
|
||||
.writeFile = chunked_writeFile,
|
||||
.writeSplat = chunkedWriteSplat,
|
||||
.writeFile = chunkedWriteFile,
|
||||
},
|
||||
},
|
||||
.context = r,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
@ -84,12 +84,29 @@ pub fn unusedCapacitySlice(bw: *const BufferedWriter) []u8 {
|
||||
}
|
||||
|
||||
/// Asserts the provided buffer has total capacity enough for `len`.
|
||||
pub fn writableArray(bw: *BufferedWriter, comptime len: usize) anyerror!*[len]u8 {
|
||||
return (try bw.writableSlice(len))[0..len];
|
||||
///
|
||||
/// Advances the buffer end position by `len`.
|
||||
pub fn writableArray(bw: *BufferedWriter, comptime len: usize) Writer.Error!*[len]u8 {
|
||||
const big_slice = try bw.writableSliceGreedy(len);
|
||||
advance(bw, len);
|
||||
return big_slice[0..len];
|
||||
}
|
||||
|
||||
/// Asserts the provided buffer has total capacity enough for `len`.
|
||||
///
|
||||
/// Advances the buffer end position by `len`.
|
||||
pub fn writableSlice(bw: *BufferedWriter, len: usize) Writer.Error![]u8 {
|
||||
const big_slice = try bw.writableSliceGreedy(len);
|
||||
advance(bw, len);
|
||||
return big_slice[0..len];
|
||||
}
|
||||
|
||||
/// Asserts the provided buffer has total capacity enough for `minimum_length`.
|
||||
pub fn writableSlice(bw: *BufferedWriter, minimum_length: usize) Writer.Error![]u8 {
|
||||
///
|
||||
/// Does not `advance` the buffer end position.
|
||||
///
|
||||
/// If `minimum_length` is zero, this is equivalent to `unusedCapacitySlice`.
|
||||
pub fn writableSliceGreedy(bw: *BufferedWriter, minimum_length: usize) Writer.Error![]u8 {
|
||||
assert(bw.buffer.len >= minimum_length);
|
||||
const cap_slice = bw.buffer[bw.end..];
|
||||
if (cap_slice.len >= minimum_length) {
|
||||
@ -111,7 +128,10 @@ pub fn writableSlice(bw: *BufferedWriter, minimum_length: usize) Writer.Error![]
|
||||
return bw.buffer[bw.end..];
|
||||
}
|
||||
|
||||
/// After calling `writableSlice`, this function tracks how many bytes were written to it.
|
||||
/// After calling `writableSliceGreedy`, this function tracks how many bytes
|
||||
/// were written to it.
|
||||
///
|
||||
/// This is not needed when using `writableSlice` or `writableArray`.
|
||||
pub fn advance(bw: *BufferedWriter, n: usize) void {
|
||||
const new_end = bw.end + n;
|
||||
assert(new_end <= bw.buffer.len);
|
||||
@ -135,14 +155,34 @@ pub fn writeVecAll(bw: *BufferedWriter, data: [][]const u8) Writer.Error!void {
|
||||
}
|
||||
}
|
||||
|
||||
/// If the number of bytes to write based on `data` and `splat` fits inside
|
||||
/// `unusedCapacitySlice`, this function is guaranteed to not fail, not call
|
||||
/// into the underlying writer, and return the full number of bytes.
|
||||
pub fn writeSplat(bw: *BufferedWriter, data: []const []const u8, splat: usize) Writer.Error!usize {
|
||||
return passthruWriteSplat(bw, data, splat);
|
||||
}
|
||||
|
||||
/// If the total number of bytes of `data` fits inside `unusedCapacitySlice`,
|
||||
/// this function is guaranteed to not fail, not call into the underlying
|
||||
/// writer, and return the total bytes inside `data`.
|
||||
pub fn writeVec(bw: *BufferedWriter, data: []const []const u8) Writer.Error!usize {
|
||||
return passthruWriteSplat(bw, data, 1);
|
||||
}
|
||||
|
||||
/// Equivalent to `writeSplat` but writes at most `limit` bytes.
|
||||
pub fn writeSplatLimit(
|
||||
bw: *BufferedWriter,
|
||||
data: []const []const u8,
|
||||
splat: usize,
|
||||
limit: Writer.Limit,
|
||||
) Writer.Error!usize {
|
||||
_ = bw;
|
||||
_ = data;
|
||||
_ = splat;
|
||||
_ = limit;
|
||||
@panic("TODO");
|
||||
}
|
||||
|
||||
fn passthruWriteSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) Writer.Error!usize {
|
||||
const bw: *BufferedWriter = @alignCast(@ptrCast(context));
|
||||
const buffer = bw.buffer;
|
||||
@ -435,6 +475,9 @@ pub fn writeArraySwap(bw: *BufferedWriter, Elem: type, array: []const Elem) Writ
|
||||
@panic("TODO");
|
||||
}
|
||||
|
||||
/// Unlike `writeSplat` and `writeVec`, this function will call into the
|
||||
/// underlying writer even if there is enough buffer capacity for the file
|
||||
/// contents.
|
||||
pub fn writeFile(
|
||||
bw: *BufferedWriter,
|
||||
file: std.fs.File,
|
||||
@ -1400,7 +1443,7 @@ fn writeMultipleOf7Leb128(bw: *BufferedWriter, value: anytype) Writer.Error!void
|
||||
comptime assert(value_info.bits % 7 == 0);
|
||||
var remaining = value;
|
||||
while (true) {
|
||||
const buffer: []packed struct(u8) { bits: u7, more: bool } = @ptrCast(try bw.writableSlice(1));
|
||||
const buffer: []packed struct(u8) { bits: u7, more: bool } = @ptrCast(try bw.writableSliceGreedy(1));
|
||||
for (buffer, 1..) |*byte, len| {
|
||||
const more = switch (value_info.signedness) {
|
||||
.signed => remaining >> 6 != remaining >> (value_info.bits - 1),
|
||||
|
||||
@ -77,7 +77,11 @@ pub const Limit = enum(usize) {
|
||||
return @enumFromInt(n);
|
||||
}
|
||||
|
||||
pub fn min(l: Limit, n: usize) usize {
|
||||
pub fn min(a: Limit, b: Limit) Limit {
|
||||
return @enumFromInt(@min(@intFromEnum(a), @intFromEnum(b)));
|
||||
}
|
||||
|
||||
pub fn minInt(l: Limit, n: usize) usize {
|
||||
return @min(n, @intFromEnum(l));
|
||||
}
|
||||
|
||||
|
||||
@ -33,10 +33,12 @@ pub const VTable = struct {
|
||||
writeFile: *const fn (
|
||||
ctx: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
/// If this is `none`, `file` will be streamed. Otherwise, it will be
|
||||
/// read positionally without affecting the seek position.
|
||||
/// If this is `none`, `file` will be streamed, affecting the seek
|
||||
/// position. Otherwise, it will be read positionally without affecting
|
||||
/// the seek position.
|
||||
offset: Offset,
|
||||
/// Maximum amount of bytes to read from the file.
|
||||
/// Maximum amount of bytes to read from the file. Implementations may
|
||||
/// assume that the file size does not exceed this amount.
|
||||
limit: Limit,
|
||||
/// Headers and trailers must be passed together so that in case `len` is
|
||||
/// zero, they can be forwarded directly to `VTable.writeVec`.
|
||||
|
||||
@ -2344,11 +2344,11 @@ pub const Const = struct {
|
||||
|
||||
const max_str_len = self.sizeInBaseUpperBound(base);
|
||||
const limbs_len = calcToStringLimbsBufferLen(self.limbs.len, base);
|
||||
if (bw.writableSlice(max_str_len + @alignOf(Limb) - 1 + @sizeOf(Limb) * limbs_len)) |buf| {
|
||||
if (bw.writableSliceGreedy(max_str_len + @alignOf(Limb) - 1 + @sizeOf(Limb) * limbs_len)) |buf| {
|
||||
const limbs: [*]Limb = @alignCast(@ptrCast(std.mem.alignPointer(buf[max_str_len..].ptr, @alignOf(Limb))));
|
||||
bw.advance(self.toString(buf[0..max_str_len], base, case, limbs[0..limbs_len]));
|
||||
return;
|
||||
} else |_| if (bw.writableSlice(max_str_len)) |buf| {
|
||||
} else |_| if (bw.writableSliceGreedy(max_str_len)) |buf| {
|
||||
const available_len = 64;
|
||||
var limbs: [calcToStringLimbsBufferLen(available_len, base)]Limb = undefined;
|
||||
if (limbs.len >= limbs_len) {
|
||||
|
||||
@ -750,7 +750,7 @@ pub fn connectUnixSocket(path: []const u8) !Stream {
|
||||
);
|
||||
errdefer Stream.close(.{ .handle = sockfd });
|
||||
|
||||
var addr = try std.net.Address.initUnix(path);
|
||||
var addr = try Address.initUnix(path);
|
||||
try posix.connect(sockfd, &addr.any, addr.getOsSockLen());
|
||||
|
||||
return .{ .handle = sockfd };
|
||||
@ -1859,7 +1859,7 @@ pub const Stream = struct {
|
||||
bw: *std.io.BufferedWriter,
|
||||
limit: std.io.Reader.Limit,
|
||||
) std.io.Reader.Error!usize {
|
||||
const buf = limit.slice(try bw.writableSlice(1));
|
||||
const buf = limit.slice(try bw.writableSliceGreedy(1));
|
||||
const status = try windows_readVec(context, &.{buf});
|
||||
bw.advance(status.len);
|
||||
return status;
|
||||
@ -2080,7 +2080,11 @@ pub const Stream = struct {
|
||||
return switch (native_os) {
|
||||
.windows => .{ .impl = stream },
|
||||
else => .{ .impl = .{
|
||||
.fr = std.fs.File.reader(.{ .handle = stream.handle }),
|
||||
.fr = .{
|
||||
.file = .{ .handle = stream.handle },
|
||||
.mode = .streaming,
|
||||
.seek_err = error.Unseekable,
|
||||
},
|
||||
.err = {},
|
||||
} },
|
||||
};
|
||||
@ -2090,7 +2094,10 @@ pub const Stream = struct {
|
||||
return switch (native_os) {
|
||||
.windows => .{ .impl = stream },
|
||||
else => .{ .impl = .{
|
||||
.fw = std.fs.File.writer(.{ .handle = stream.handle }),
|
||||
.fw = .{
|
||||
.file = .{ .handle = stream.handle },
|
||||
.mode = .streaming,
|
||||
},
|
||||
.err = {},
|
||||
} },
|
||||
};
|
||||
@ -2101,10 +2108,10 @@ pub const Stream = struct {
|
||||
|
||||
pub const Server = struct {
|
||||
listen_address: Address,
|
||||
stream: std.net.Stream,
|
||||
stream: Stream,
|
||||
|
||||
pub const Connection = struct {
|
||||
stream: std.net.Stream,
|
||||
stream: Stream,
|
||||
address: Address,
|
||||
};
|
||||
|
||||
|
||||
@ -386,8 +386,7 @@ pub fn generateSymbolInner(
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
var space: Value.BigIntSpace = undefined;
|
||||
const int_val = val.toBigInt(&space, zcu);
|
||||
int_val.writeTwosComplement((try bw.writableSlice(abi_size))[0..abi_size], endian);
|
||||
bw.advance(abi_size);
|
||||
int_val.writeTwosComplement((try bw.writableSlice(abi_size)), endian);
|
||||
},
|
||||
.err => |err| {
|
||||
const int = try pt.getErrorValue(err.name);
|
||||
@ -498,7 +497,7 @@ pub fn generateSymbolInner(
|
||||
.vector_type => |vector_type| {
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
if (vector_type.child == .bool_type) {
|
||||
const buffer = (try bw.writableSlice(abi_size))[0..abi_size];
|
||||
const buffer = try bw.writableSlice(abi_size);
|
||||
@memset(buffer, 0xaa);
|
||||
var index: usize = 0;
|
||||
const len = math.cast(usize, vector_type.len) orelse return error.Overflow;
|
||||
@ -535,7 +534,6 @@ pub fn generateSymbolInner(
|
||||
},
|
||||
}) byte.* |= mask else byte.* &= ~mask;
|
||||
}
|
||||
bw.advance(abi_size);
|
||||
} else {
|
||||
switch (aggregate.storage) {
|
||||
.bytes => |bytes| try bw.writeAll(bytes.toSlice(vector_type.len, ip)),
|
||||
@ -592,7 +590,7 @@ pub fn generateSymbolInner(
|
||||
.@"packed" => {
|
||||
const abi_size = math.cast(usize, ty.abiSize(zcu)) orelse return error.Overflow;
|
||||
const current_end, const current_count = .{ bw.end, bw.count };
|
||||
const buffer = (try bw.writableSlice(abi_size))[0..abi_size];
|
||||
const buffer = try bw.writableSlice(abi_size);
|
||||
@memset(buffer, 0);
|
||||
var bits: u16 = 0;
|
||||
|
||||
@ -628,7 +626,6 @@ pub fn generateSymbolInner(
|
||||
}
|
||||
bits += @intCast(Type.fromInterned(field_ty).bitSize(zcu));
|
||||
}
|
||||
bw.advance(abi_size);
|
||||
},
|
||||
.auto, .@"extern" => {
|
||||
const struct_begin = bw.count;
|
||||
|
||||
@ -659,8 +659,7 @@ const Unit = struct {
|
||||
.eq => {
|
||||
// no length will ever work, so undercount and futz with the leb encoding to make up the missing byte
|
||||
op_len_bytes += 1;
|
||||
std.leb.writeUnsignedExtended((bw.writableSlice(op_len_bytes) catch unreachable)[0..op_len_bytes], len - extended_op_bytes - op_len_bytes);
|
||||
bw.advance(op_len_bytes);
|
||||
std.leb.writeUnsignedExtended((bw.writableSlice(op_len_bytes) catch unreachable), len - extended_op_bytes - op_len_bytes);
|
||||
break;
|
||||
},
|
||||
.gt => op_len_bytes += 1,
|
||||
@ -849,8 +848,7 @@ const Entry = struct {
|
||||
.eq => {
|
||||
// no length will ever work, so undercount and futz with the leb encoding to make up the missing byte
|
||||
block_len_bytes += 1;
|
||||
std.leb.writeUnsignedExtended((try bw.writableSlice(block_len_bytes))[0..block_len_bytes], len - abbrev_code_bytes - block_len_bytes);
|
||||
bw.advance(block_len_bytes);
|
||||
std.leb.writeUnsignedExtended((try bw.writableSlice(block_len_bytes)), len - abbrev_code_bytes - block_len_bytes);
|
||||
break;
|
||||
},
|
||||
.gt => block_len_bytes += 1,
|
||||
@ -870,10 +868,9 @@ const Entry = struct {
|
||||
// no length will ever work, so undercount and futz with the leb encoding to make up the missing byte
|
||||
op_len_bytes += 1;
|
||||
std.leb.writeUnsignedExtended(
|
||||
(bw.writableSlice(op_len_bytes) catch unreachable)[0..op_len_bytes],
|
||||
(bw.writableSlice(op_len_bytes) catch unreachable),
|
||||
len - extended_op_bytes - op_len_bytes,
|
||||
);
|
||||
bw.advance(op_len_bytes);
|
||||
break;
|
||||
},
|
||||
.gt => op_len_bytes += 1,
|
||||
@ -2009,7 +2006,7 @@ pub const WipNav = struct {
|
||||
.signed => abbrev_code.sdata,
|
||||
.unsigned => abbrev_code.udata,
|
||||
});
|
||||
_ = try dibw.writableSlice(std.math.divCeil(usize, bits, 7) catch unreachable);
|
||||
_ = try dibw.writableSliceGreedy(std.math.divCeil(usize, bits, 7) catch unreachable);
|
||||
var bit: usize = 0;
|
||||
var carry: u1 = 1;
|
||||
while (bit < bits) {
|
||||
@ -2033,7 +2030,7 @@ pub const WipNav = struct {
|
||||
const bytes = @max(ty.abiSize(zcu), std.math.divCeil(usize, bits, 8) catch unreachable);
|
||||
try dibw.writeLeb128(bytes);
|
||||
big_int.writeTwosComplement(
|
||||
try dibw.writableSlice(@intCast(bytes)),
|
||||
try dibw.writableSliceGreedy(@intCast(bytes)),
|
||||
wip_nav.dwarf.endian,
|
||||
);
|
||||
dibw.advance(@intCast(bytes));
|
||||
@ -6083,8 +6080,7 @@ fn writeInt(dwarf: *Dwarf, buf: []u8, int: u64) void {
|
||||
}
|
||||
|
||||
fn writeIntTo(dwarf: *Dwarf, bw: *std.io.BufferedWriter, len: usize, int: u64) !void {
|
||||
dwarf.writeInt((try bw.writableSlice(len))[0..len], int);
|
||||
bw.advance(len);
|
||||
dwarf.writeInt(try bw.writableSlice(len), int);
|
||||
}
|
||||
|
||||
fn resolveReloc(dwarf: *Dwarf, source: u64, target: u64, size: u32) RelocError!void {
|
||||
|
||||
@ -1254,7 +1254,6 @@ const vec_section_header_size = section_header_size + size_header_size;
|
||||
fn reserveVecSectionHeader(bw: *std.io.BufferedWriter) std.io.Writer.Error!u32 {
|
||||
const offset = bw.count;
|
||||
_ = try bw.writableSlice(vec_section_header_size);
|
||||
bw.advance(vec_section_header_size);
|
||||
return @intCast(offset);
|
||||
}
|
||||
|
||||
@ -1275,7 +1274,6 @@ const section_header_size = 1 + size_header_size;
|
||||
fn reserveSectionHeader(bw: *std.io.BufferedWriter) std.io.Writer.Error!u32 {
|
||||
const offset = bw.count;
|
||||
_ = try bw.writableSlice(section_header_size);
|
||||
bw.advance(section_header_size);
|
||||
return @intCast(offset);
|
||||
}
|
||||
|
||||
@ -1290,7 +1288,6 @@ const size_header_size = 5;
|
||||
fn reserveSizeHeader(bw: *std.io.BufferedWriter) std.io.Writer.Error!u32 {
|
||||
const offset = bw.count;
|
||||
_ = try bw.writableSlice(size_header_size);
|
||||
bw.advance(size_header_size);
|
||||
return @intCast(offset);
|
||||
}
|
||||
|
||||
|
||||
@ -38,7 +38,7 @@ pub fn writeAddend(
|
||||
bw: *std.io.BufferedWriter,
|
||||
) std.io.Writer.Error!void {
|
||||
const n = @divExact(@bitSizeOf(Int), 8);
|
||||
var V: Int = mem.readInt(Int, (try bw.writableSlice(n))[0..n], .little);
|
||||
var V: Int = mem.readInt(Int, (try bw.writableSliceGreedy(n))[0..n], .little);
|
||||
const addend: Int = @truncate(value);
|
||||
switch (op) {
|
||||
.add => V +|= addend, // TODO: I think saturating arithmetic is correct here
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user