mirror of
https://github.com/ziglang/zig.git
synced 2026-01-04 04:25:05 +00:00
std.http: rewrite
WIP
This commit is contained in:
parent
396464ee6b
commit
81b0d14e2b
218
lib/std/Uri.zig
218
lib/std/Uri.zig
@ -1,6 +1,13 @@
|
||||
//! Uniform Resource Identifier (URI) parsing roughly adhering to <https://tools.ietf.org/html/rfc3986>.
|
||||
//! Does not do perfect grammar and character class checking, but should be robust against URIs in the wild.
|
||||
|
||||
const std = @import("std.zig");
|
||||
const testing = std.testing;
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const Uri = @This();
|
||||
|
||||
scheme: []const u8,
|
||||
user: ?Component = null,
|
||||
password: ?Component = null,
|
||||
@ -10,6 +17,32 @@ path: Component = Component.empty,
|
||||
query: ?Component = null,
|
||||
fragment: ?Component = null,
|
||||
|
||||
pub const host_name_max = 255;
|
||||
|
||||
/// Returned value may point into `buffer` or be the original string.
|
||||
///
|
||||
/// Suggested buffer length: `host_name_max`.
|
||||
///
|
||||
/// See also:
|
||||
/// * `getHostAlloc`
|
||||
pub fn getHost(uri: Uri, buffer: []u8) error{ UriMissingHost, UriHostTooLong }![]const u8 {
|
||||
const component = uri.host orelse return error.UriMissingHost;
|
||||
return component.toRaw(buffer) catch |err| switch (err) {
|
||||
error.NoSpaceLeft => return error.UriHostTooLong,
|
||||
};
|
||||
}
|
||||
|
||||
/// Returned value may point into `buffer` or be the original string.
|
||||
///
|
||||
/// See also:
|
||||
/// * `getHost`
|
||||
pub fn getHostAlloc(uri: Uri, arena: Allocator) error{ UriMissingHost, UriHostTooLong, OutOfMemory }![]const u8 {
|
||||
const component = uri.host orelse return error.UriMissingHost;
|
||||
const result = try component.toRawMaybeAlloc(arena);
|
||||
if (result.len > host_name_max) return error.UriHostTooLong;
|
||||
return result;
|
||||
}
|
||||
|
||||
pub const Component = union(enum) {
|
||||
/// Invalid characters in this component must be percent encoded
|
||||
/// before being printed as part of a URI.
|
||||
@ -26,11 +59,22 @@ pub const Component = union(enum) {
|
||||
};
|
||||
}
|
||||
|
||||
/// Returned value may point into `buffer` or be the original string.
|
||||
pub fn toRaw(component: Component, buffer: []u8) error{NoSpaceLeft}![]const u8 {
|
||||
return switch (component) {
|
||||
.raw => |raw| raw,
|
||||
.percent_encoded => |percent_encoded| if (std.mem.indexOfScalar(u8, percent_encoded, '%')) |_|
|
||||
try std.fmt.bufPrint(buffer, "{fraw}", .{component})
|
||||
else
|
||||
percent_encoded,
|
||||
};
|
||||
}
|
||||
|
||||
/// Allocates the result with `arena` only if needed, so the result should not be freed.
|
||||
pub fn toRawMaybeAlloc(
|
||||
component: Component,
|
||||
arena: std.mem.Allocator,
|
||||
) std.mem.Allocator.Error![]const u8 {
|
||||
arena: Allocator,
|
||||
) Allocator.Error![]const u8 {
|
||||
return switch (component) {
|
||||
.raw => |raw| raw,
|
||||
.percent_encoded => |percent_encoded| if (std.mem.indexOfScalar(u8, percent_encoded, '%')) |_|
|
||||
@ -144,17 +188,15 @@ pub const ParseError = error{ UnexpectedCharacter, InvalidFormat, InvalidPort };
|
||||
/// The return value will contain strings pointing into the original `text`.
|
||||
/// Each component that is provided, will be non-`null`.
|
||||
pub fn parseAfterScheme(scheme: []const u8, text: []const u8) ParseError!Uri {
|
||||
var reader = SliceReader{ .slice = text };
|
||||
|
||||
var uri: Uri = .{ .scheme = scheme, .path = undefined };
|
||||
var i: usize = 0;
|
||||
|
||||
if (reader.peekPrefix("//")) a: { // authority part
|
||||
std.debug.assert(reader.get().? == '/');
|
||||
std.debug.assert(reader.get().? == '/');
|
||||
|
||||
const authority = reader.readUntil(isAuthoritySeparator);
|
||||
if (std.mem.startsWith(u8, text, "//")) a: {
|
||||
i = std.mem.indexOfAnyPos(u8, text, 2, &authority_sep) orelse text.len;
|
||||
const authority = text[2..i];
|
||||
if (authority.len == 0) {
|
||||
if (reader.peekPrefix("/")) break :a else return error.InvalidFormat;
|
||||
if (!std.mem.startsWith(u8, text[2..], "/")) return error.InvalidFormat;
|
||||
break :a;
|
||||
}
|
||||
|
||||
var start_of_host: usize = 0;
|
||||
@ -204,16 +246,18 @@ pub fn parseAfterScheme(scheme: []const u8, text: []const u8) ParseError!Uri {
|
||||
uri.host = .{ .percent_encoded = authority[start_of_host..end_of_host] };
|
||||
}
|
||||
|
||||
uri.path = .{ .percent_encoded = reader.readUntil(isPathSeparator) };
|
||||
const path_start = i;
|
||||
i = std.mem.indexOfAnyPos(u8, text, path_start, &path_sep) orelse text.len;
|
||||
uri.path = .{ .percent_encoded = text[path_start..i] };
|
||||
|
||||
if ((reader.peek() orelse 0) == '?') { // query part
|
||||
std.debug.assert(reader.get().? == '?');
|
||||
uri.query = .{ .percent_encoded = reader.readUntil(isQuerySeparator) };
|
||||
if (std.mem.startsWith(u8, text[i..], "?")) {
|
||||
const query_start = i + 1;
|
||||
i = std.mem.indexOfScalarPos(u8, text, query_start, '#') orelse text.len;
|
||||
uri.query = .{ .percent_encoded = text[query_start..i] };
|
||||
}
|
||||
|
||||
if ((reader.peek() orelse 0) == '#') { // fragment part
|
||||
std.debug.assert(reader.get().? == '#');
|
||||
uri.fragment = .{ .percent_encoded = reader.readUntilEof() };
|
||||
if (std.mem.startsWith(u8, text[i..], "#")) {
|
||||
uri.fragment = .{ .percent_encoded = text[i + 1 ..] };
|
||||
}
|
||||
|
||||
return uri;
|
||||
@ -291,41 +335,33 @@ pub fn format(uri: Uri, bw: *std.io.BufferedWriter, comptime fmt: []const u8) st
|
||||
}, bw);
|
||||
}
|
||||
|
||||
/// Parses the URI or returns an error.
|
||||
/// The return value will contain strings pointing into the
|
||||
/// original `text`. Each component that is provided, will be non-`null`.
|
||||
/// The return value will contain strings pointing into the original `text`.
|
||||
/// Each component that is provided will be non-`null`.
|
||||
pub fn parse(text: []const u8) ParseError!Uri {
|
||||
var reader: SliceReader = .{ .slice = text };
|
||||
const scheme = reader.readWhile(isSchemeChar);
|
||||
|
||||
// after the scheme, a ':' must appear
|
||||
if (reader.get()) |c| {
|
||||
if (c != ':')
|
||||
return error.UnexpectedCharacter;
|
||||
} else {
|
||||
return error.InvalidFormat;
|
||||
}
|
||||
|
||||
return parseAfterScheme(scheme, reader.readUntilEof());
|
||||
const end = for (text, 0..) |byte, i| {
|
||||
if (!isSchemeChar(byte)) break i;
|
||||
} else text.len;
|
||||
// After the scheme, a ':' must appear.
|
||||
if (end >= text.len) return error.InvalidFormat;
|
||||
if (text[end] != ':') return error.UnexpectedCharacter;
|
||||
return parseAfterScheme(text[0..end], text[end + 1 ..]);
|
||||
}
|
||||
|
||||
pub const ResolveInPlaceError = ParseError || error{NoSpaceLeft};
|
||||
|
||||
/// Resolves a URI against a base URI, conforming to RFC 3986, Section 5.
|
||||
/// Copies `new` to the beginning of `aux_buf.*`, allowing the slices to overlap,
|
||||
/// then parses `new` as a URI, and then resolves the path in place.
|
||||
///
|
||||
/// Assumes new location is already copied to the beginning of `aux_buf.*`.
|
||||
/// Parses that new location as a URI, and then resolves the path in place.
|
||||
///
|
||||
/// If a merge needs to take place, the newly constructed path will be stored
|
||||
/// in `aux_buf.*` just after the copied `new`, and `aux_buf.*` will be modified
|
||||
/// to only contain the remaining unused space.
|
||||
pub fn resolve_inplace(base: Uri, new: []const u8, aux_buf: *[]u8) ResolveInPlaceError!Uri {
|
||||
std.mem.copyForwards(u8, aux_buf.*, new);
|
||||
// At this point, new is an invalid pointer.
|
||||
const new_mut = aux_buf.*[0..new.len];
|
||||
aux_buf.* = aux_buf.*[new.len..];
|
||||
|
||||
const new_parsed = parse(new_mut) catch |err|
|
||||
(parseAfterScheme("", new_mut) catch return err);
|
||||
// As you can see above, `new_mut` is not a const pointer.
|
||||
/// in `aux_buf.*` just after the copied location, and `aux_buf.*` will be
|
||||
/// modified to only contain the remaining unused space.
|
||||
pub fn resolveInPlace(base: Uri, new_len: usize, aux_buf: *[]u8) ResolveInPlaceError!Uri {
|
||||
const new = aux_buf.*[0..new_len];
|
||||
const new_parsed = parse(new) catch |err| (parseAfterScheme("", new) catch return err);
|
||||
aux_buf.* = aux_buf.*[new_len..];
|
||||
// As you can see above, `new` is not a const pointer.
|
||||
const new_path: []u8 = @constCast(new_parsed.path.percent_encoded);
|
||||
|
||||
if (new_parsed.scheme.len > 0) return .{
|
||||
@ -438,59 +474,6 @@ fn merge_paths(base: Component, new: []u8, aux_buf: *[]u8) error{NoSpaceLeft}!Co
|
||||
return merged_path;
|
||||
}
|
||||
|
||||
const SliceReader = struct {
|
||||
const Self = @This();
|
||||
|
||||
slice: []const u8,
|
||||
offset: usize = 0,
|
||||
|
||||
fn get(self: *Self) ?u8 {
|
||||
if (self.offset >= self.slice.len)
|
||||
return null;
|
||||
const c = self.slice[self.offset];
|
||||
self.offset += 1;
|
||||
return c;
|
||||
}
|
||||
|
||||
fn peek(self: Self) ?u8 {
|
||||
if (self.offset >= self.slice.len)
|
||||
return null;
|
||||
return self.slice[self.offset];
|
||||
}
|
||||
|
||||
fn readWhile(self: *Self, comptime predicate: fn (u8) bool) []const u8 {
|
||||
const start = self.offset;
|
||||
var end = start;
|
||||
while (end < self.slice.len and predicate(self.slice[end])) {
|
||||
end += 1;
|
||||
}
|
||||
self.offset = end;
|
||||
return self.slice[start..end];
|
||||
}
|
||||
|
||||
fn readUntil(self: *Self, comptime predicate: fn (u8) bool) []const u8 {
|
||||
const start = self.offset;
|
||||
var end = start;
|
||||
while (end < self.slice.len and !predicate(self.slice[end])) {
|
||||
end += 1;
|
||||
}
|
||||
self.offset = end;
|
||||
return self.slice[start..end];
|
||||
}
|
||||
|
||||
fn readUntilEof(self: *Self) []const u8 {
|
||||
const start = self.offset;
|
||||
self.offset = self.slice.len;
|
||||
return self.slice[start..];
|
||||
}
|
||||
|
||||
fn peekPrefix(self: Self, prefix: []const u8) bool {
|
||||
if (self.offset + prefix.len > self.slice.len)
|
||||
return false;
|
||||
return std.mem.eql(u8, self.slice[self.offset..][0..prefix.len], prefix);
|
||||
}
|
||||
};
|
||||
|
||||
/// scheme = ALPHA *( ALPHA / DIGIT / "+" / "-" / "." )
|
||||
fn isSchemeChar(c: u8) bool {
|
||||
return switch (c) {
|
||||
@ -499,19 +482,6 @@ fn isSchemeChar(c: u8) bool {
|
||||
};
|
||||
}
|
||||
|
||||
/// reserved = gen-delims / sub-delims
|
||||
fn isReserved(c: u8) bool {
|
||||
return isGenLimit(c) or isSubLimit(c);
|
||||
}
|
||||
|
||||
/// gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
|
||||
fn isGenLimit(c: u8) bool {
|
||||
return switch (c) {
|
||||
':', ',', '?', '#', '[', ']', '@' => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
/// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
||||
/// / "*" / "+" / "," / ";" / "="
|
||||
fn isSubLimit(c: u8) bool {
|
||||
@ -551,26 +521,8 @@ fn isQueryChar(c: u8) bool {
|
||||
|
||||
const isFragmentChar = isQueryChar;
|
||||
|
||||
fn isAuthoritySeparator(c: u8) bool {
|
||||
return switch (c) {
|
||||
'/', '?', '#' => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
fn isPathSeparator(c: u8) bool {
|
||||
return switch (c) {
|
||||
'?', '#' => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
fn isQuerySeparator(c: u8) bool {
|
||||
return switch (c) {
|
||||
'#' => true,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
const authority_sep: [3]u8 = .{ '/', '?', '#' };
|
||||
const path_sep: [2]u8 = .{ '?', '#' };
|
||||
|
||||
test "basic" {
|
||||
const parsed = try parse("https://ziglang.org/download");
|
||||
@ -851,7 +803,3 @@ test "URI malformed input" {
|
||||
try std.testing.expectError(error.InvalidFormat, std.Uri.parse("http://]@["));
|
||||
try std.testing.expectError(error.InvalidFormat, std.Uri.parse("http://lo]s\x85hc@[/8\x10?0Q"));
|
||||
}
|
||||
|
||||
const std = @import("std.zig");
|
||||
const testing = std.testing;
|
||||
const Uri = @This();
|
||||
|
||||
752
lib/std/http.zig
752
lib/std/http.zig
@ -1,6 +1,9 @@
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("std.zig");
|
||||
const assert = std.debug.assert;
|
||||
|
||||
pub const Client = @import("http/Client.zig");
|
||||
pub const Server = @import("http/Server.zig");
|
||||
pub const protocol = @import("http/protocol.zig");
|
||||
pub const HeadParser = @import("http/HeadParser.zig");
|
||||
pub const ChunkParser = @import("http/ChunkParser.zig");
|
||||
pub const HeaderIterator = @import("http/HeaderIterator.zig");
|
||||
@ -77,7 +80,9 @@ pub const Method = enum(u64) {
|
||||
};
|
||||
}
|
||||
|
||||
/// An HTTP method is idempotent if an identical request can be made once or several times in a row with the same effect while leaving the server in the same state.
|
||||
/// An HTTP method is idempotent if an identical request can be made once
|
||||
/// or several times in a row with the same effect while leaving the server
|
||||
/// in the same state.
|
||||
///
|
||||
/// https://developer.mozilla.org/en-US/docs/Glossary/Idempotent
|
||||
///
|
||||
@ -90,7 +95,8 @@ pub const Method = enum(u64) {
|
||||
};
|
||||
}
|
||||
|
||||
/// A cacheable response is an HTTP response that can be cached, that is stored to be retrieved and used later, saving a new request to the server.
|
||||
/// A cacheable response can be stored to be retrieved and used later,
|
||||
/// saving a new request to the server.
|
||||
///
|
||||
/// https://developer.mozilla.org/en-US/docs/Glossary/cacheable
|
||||
///
|
||||
@ -282,10 +288,10 @@ pub const Status = enum(u10) {
|
||||
}
|
||||
};
|
||||
|
||||
/// compression is intentionally omitted here since it is handled in `ContentEncoding`.
|
||||
pub const TransferEncoding = enum {
|
||||
chunked,
|
||||
none,
|
||||
// compression is intentionally omitted here, as std.http.Client stores it as content-encoding
|
||||
};
|
||||
|
||||
pub const ContentEncoding = enum {
|
||||
@ -308,18 +314,740 @@ pub const Header = struct {
|
||||
value: []const u8,
|
||||
};
|
||||
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("std.zig");
|
||||
pub const Reader = struct {
|
||||
in: *std.io.BufferedReader,
|
||||
/// Keeps track of whether the stream is ready to accept a new request,
|
||||
/// making invalid API usage cause assertion failures rather than HTTP
|
||||
/// protocol violations.
|
||||
state: State,
|
||||
/// Number of bytes of HTTP trailers. These are at the end of a
|
||||
/// transfer-encoding: chunked message.
|
||||
trailers_len: usize = 0,
|
||||
body_state: union {
|
||||
none: void,
|
||||
remaining_content_length: u64,
|
||||
remaining_chunk_len: RemainingChunkLen,
|
||||
},
|
||||
body_err: ?BodyError = null,
|
||||
/// Stolen from `in`.
|
||||
head_buffer: []u8 = &.{},
|
||||
|
||||
pub const max_chunk_header_len = 22;
|
||||
|
||||
pub const RemainingChunkLen = enum(u64) {
|
||||
head = 0,
|
||||
n = 1,
|
||||
rn = 2,
|
||||
done = std.math.maxInt(u64),
|
||||
_,
|
||||
|
||||
pub fn init(integer: u64) RemainingChunkLen {
|
||||
return @enumFromInt(integer);
|
||||
}
|
||||
|
||||
pub fn int(rcl: RemainingChunkLen) u64 {
|
||||
return @intFromEnum(rcl);
|
||||
}
|
||||
};
|
||||
|
||||
pub const State = enum {
|
||||
/// The stream is available to be used for the first time, or reused.
|
||||
ready,
|
||||
receiving_head,
|
||||
received_head,
|
||||
receiving_body,
|
||||
/// The stream would be eligible for another HTTP request, however the
|
||||
/// client and server did not negotiate a persistent connection.
|
||||
closing,
|
||||
};
|
||||
|
||||
pub const BodyError = error{
|
||||
HttpChunkInvalid,
|
||||
HttpHeadersOversize,
|
||||
};
|
||||
|
||||
pub const HeadError = error{
|
||||
/// Too many bytes of HTTP headers.
|
||||
///
|
||||
/// The HTTP specification suggests to respond with a 431 status code
|
||||
/// before closing the connection.
|
||||
HttpHeadersOversize,
|
||||
/// Partial HTTP request was received but the connection was closed
|
||||
/// before fully receiving the headers.
|
||||
HttpRequestTruncated,
|
||||
/// The client sent 0 bytes of headers before closing the stream. This
|
||||
/// happens when a keep-alive connection is finally closed.
|
||||
HttpConnectionClosing,
|
||||
/// Transitive error occurred reading from `in`.
|
||||
ReadFailed,
|
||||
};
|
||||
|
||||
/// Buffers the entire head into `head_buffer`, invalidating the previous
|
||||
/// `head_buffer`, if any.
|
||||
pub fn receiveHead(reader: *Reader) HeadError!void {
|
||||
const in = reader.in;
|
||||
in.restitute(reader.head_buffer.len);
|
||||
in.rebase();
|
||||
var hp: HeadParser = .{};
|
||||
var head_end: usize = 0;
|
||||
while (true) {
|
||||
if (head_end >= in.buffer.len) return error.HttpHeadersOversize;
|
||||
const buf = in.peekGreedy(head_end + 1) catch |err| switch (err) {
|
||||
error.EndOfStream => switch (head_end) {
|
||||
0 => return error.HttpConnectionClosing,
|
||||
else => return error.HttpRequestTruncated,
|
||||
},
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
};
|
||||
head_end += hp.feed(buf[head_end..]);
|
||||
if (hp.state == .finished) {
|
||||
reader.head_buffer = in.steal(head_end);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Asserts only called once and after `receiveHead`.
|
||||
pub fn interface(reader: *Reader, transfer_encoding: TransferEncoding, content_length: ?u64) std.io.Reader {
|
||||
assert(reader.state == .received_head);
|
||||
reader.state = .receiving_body;
|
||||
switch (transfer_encoding) {
|
||||
.chunked => {
|
||||
reader.body_state = .{ .remaining_chunk_len = .head };
|
||||
return .{
|
||||
.context = reader,
|
||||
.vtable = &.{
|
||||
.read = &chunkedRead,
|
||||
.readVec = &chunkedReadVec,
|
||||
.discard = &chunkedDiscard,
|
||||
},
|
||||
};
|
||||
},
|
||||
.none => {
|
||||
if (content_length) |len| {
|
||||
reader.body_state = .{ .remaining_content_length = len };
|
||||
return .{
|
||||
.context = reader,
|
||||
.vtable = &.{
|
||||
.read = &contentLengthRead,
|
||||
.readVec = &contentLengthReadVec,
|
||||
.discard = &contentLengthDiscard,
|
||||
},
|
||||
};
|
||||
} else {
|
||||
return reader.in.reader();
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn contentLengthRead(
|
||||
ctx: ?*anyopaque,
|
||||
bw: *std.io.BufferedWriter,
|
||||
limit: std.io.Reader.Limit,
|
||||
) std.io.Reader.RwError!usize {
|
||||
const reader: *Reader = @alignCast(@ptrCast(ctx));
|
||||
const remaining_content_length = &reader.body_state.remaining_content_length;
|
||||
const remaining = remaining_content_length.*;
|
||||
if (remaining == 0) {
|
||||
reader.state = .ready;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
const n = try reader.in.read(bw, limit.min(.limited(remaining)));
|
||||
remaining_content_length.* = remaining - n;
|
||||
return n;
|
||||
}
|
||||
|
||||
fn contentLengthReadVec(context: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
|
||||
const reader: *Reader = @alignCast(@ptrCast(context));
|
||||
const remaining_content_length = &reader.body_state.remaining_content_length;
|
||||
const remaining = remaining_content_length.*;
|
||||
if (remaining == 0) {
|
||||
reader.state = .ready;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
const n = try reader.in.readVecLimit(data, .limited(remaining));
|
||||
remaining_content_length.* = remaining - n;
|
||||
return n;
|
||||
}
|
||||
|
||||
fn contentLengthDiscard(ctx: ?*anyopaque, limit: std.io.Reader.Limit) std.io.Reader.Error!usize {
|
||||
const reader: *Reader = @alignCast(@ptrCast(ctx));
|
||||
const remaining_content_length = &reader.body_state.remaining_content_length;
|
||||
const remaining = remaining_content_length.*;
|
||||
if (remaining == 0) {
|
||||
reader.state = .ready;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
const n = try reader.in.discard(limit.min(.limited(remaining)));
|
||||
remaining_content_length.* = remaining - n;
|
||||
return n;
|
||||
}
|
||||
|
||||
fn chunkedRead(
|
||||
ctx: ?*anyopaque,
|
||||
bw: *std.io.BufferedWriter,
|
||||
limit: std.io.Reader.Limit,
|
||||
) std.io.Reader.RwError!usize {
|
||||
const reader: *Reader = @alignCast(@ptrCast(ctx));
|
||||
const chunk_len_ptr = &reader.body_state.remaining_chunk_len;
|
||||
const in = reader.in;
|
||||
len: switch (chunk_len_ptr.*) {
|
||||
.head => {
|
||||
var cp: ChunkParser = .init;
|
||||
const i = cp.feed(in.bufferContents());
|
||||
switch (cp.state) {
|
||||
.invalid => return reader.failBody(error.HttpChunkInvalid),
|
||||
.data => {
|
||||
if (i > max_chunk_header_len) return reader.failBody(error.HttpChunkInvalid);
|
||||
in.toss(i);
|
||||
},
|
||||
else => {
|
||||
try in.fill(max_chunk_header_len);
|
||||
const next_i = cp.feed(in.bufferContents()[i..]);
|
||||
if (cp.state != .data) return reader.failBody(error.HttpChunkInvalid);
|
||||
const header_len = i + next_i;
|
||||
if (header_len > max_chunk_header_len) return reader.failBody(error.HttpChunkInvalid);
|
||||
in.toss(header_len);
|
||||
},
|
||||
}
|
||||
if (cp.chunk_len == 0) return parseTrailers(reader, 0);
|
||||
const n = try in.read(bw, limit.min(.limited(cp.chunk_len)));
|
||||
chunk_len_ptr.* = .init(cp.chunk_len + 2 - n);
|
||||
return n;
|
||||
},
|
||||
.n => {
|
||||
if ((try in.peekByte()) != '\n') return reader.failBody(error.HttpChunkInvalid);
|
||||
in.toss(1);
|
||||
continue :len .head;
|
||||
},
|
||||
.rn => {
|
||||
const rn = try in.peekArray(2);
|
||||
if (rn[0] != '\r' or rn[1] != '\n') return reader.failBody(error.HttpChunkInvalid);
|
||||
in.toss(2);
|
||||
continue :len .head;
|
||||
},
|
||||
else => |remaining_chunk_len| {
|
||||
const n = try in.read(bw, limit.min(.limited(@intFromEnum(remaining_chunk_len) - 2)));
|
||||
chunk_len_ptr.* = .init(@intFromEnum(remaining_chunk_len) - n);
|
||||
return n;
|
||||
},
|
||||
.done => return error.EndOfStream,
|
||||
}
|
||||
}
|
||||
|
||||
fn chunkedReadVec(ctx: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
|
||||
const reader: *Reader = @alignCast(@ptrCast(ctx));
|
||||
const chunk_len_ptr = &reader.body_state.remaining_chunk_len;
|
||||
const in = reader.in;
|
||||
var already_requested_more = false;
|
||||
var amt_read: usize = 0;
|
||||
data: for (data) |d| {
|
||||
len: switch (chunk_len_ptr.*) {
|
||||
.head => {
|
||||
var cp: ChunkParser = .init;
|
||||
const available_buffer = in.bufferContents();
|
||||
const i = cp.feed(available_buffer);
|
||||
if (cp.state == .invalid) return reader.failBody(error.HttpChunkInvalid);
|
||||
if (i == available_buffer.len) {
|
||||
if (already_requested_more) {
|
||||
chunk_len_ptr.* = .head;
|
||||
return amt_read;
|
||||
}
|
||||
already_requested_more = true;
|
||||
try in.fill(max_chunk_header_len);
|
||||
const next_i = cp.feed(in.bufferContents()[i..]);
|
||||
if (cp.state != .data) return reader.failBody(error.HttpChunkInvalid);
|
||||
const header_len = i + next_i;
|
||||
if (header_len > max_chunk_header_len) return reader.failBody(error.HttpChunkInvalid);
|
||||
in.toss(header_len);
|
||||
} else {
|
||||
if (i > max_chunk_header_len) return reader.failBody(error.HttpChunkInvalid);
|
||||
in.toss(i);
|
||||
}
|
||||
if (cp.chunk_len == 0) return parseTrailers(reader, amt_read);
|
||||
continue :len .init(cp.chunk_len + 2);
|
||||
},
|
||||
.n => {
|
||||
if (in.bufferContents().len < 1) already_requested_more = true;
|
||||
if ((try in.takeByte()) != '\n') return reader.failBody(error.HttpChunkInvalid);
|
||||
continue :len .head;
|
||||
},
|
||||
.rn => {
|
||||
if (in.bufferContents().len < 2) already_requested_more = true;
|
||||
const rn = try in.takeArray(2);
|
||||
if (rn[0] != '\r' or rn[1] != '\n') return reader.failBody(error.HttpChunkInvalid);
|
||||
continue :len .head;
|
||||
},
|
||||
else => |remaining_chunk_len| {
|
||||
const available_buffer = in.bufferContents();
|
||||
const copy_len = @min(available_buffer.len, d.len, remaining_chunk_len.int() - 2);
|
||||
@memcpy(d[0..copy_len], available_buffer[0..copy_len]);
|
||||
amt_read += copy_len;
|
||||
in.toss(copy_len);
|
||||
const next_chunk_len: RemainingChunkLen = .init(remaining_chunk_len.int() - copy_len);
|
||||
if (copy_len == d.len) {
|
||||
chunk_len_ptr.* = next_chunk_len;
|
||||
continue :data;
|
||||
}
|
||||
if (already_requested_more) {
|
||||
chunk_len_ptr.* = next_chunk_len;
|
||||
return amt_read;
|
||||
}
|
||||
already_requested_more = true;
|
||||
try in.fill(3);
|
||||
continue :len next_chunk_len;
|
||||
},
|
||||
.done => return error.EndOfStream,
|
||||
}
|
||||
}
|
||||
return amt_read;
|
||||
}
|
||||
|
||||
fn chunkedDiscard(ctx: ?*anyopaque, limit: std.io.Reader.Limit) std.io.Reader.Error!usize {
|
||||
const reader: *Reader = @alignCast(@ptrCast(ctx));
|
||||
const chunk_len_ptr = &reader.body_state.remaining_chunk_len;
|
||||
const in = reader.in;
|
||||
len: switch (chunk_len_ptr.*) {
|
||||
.head => {
|
||||
var cp: ChunkParser = .init;
|
||||
const i = cp.feed(in.bufferContents());
|
||||
switch (cp.state) {
|
||||
.invalid => return reader.failBody(error.HttpChunkInvalid),
|
||||
.data => {
|
||||
if (i > max_chunk_header_len) return reader.failBody(error.HttpChunkInvalid);
|
||||
in.toss(i);
|
||||
},
|
||||
else => {
|
||||
try in.fill(max_chunk_header_len);
|
||||
const next_i = cp.feed(in.bufferContents()[i..]);
|
||||
if (cp.state != .data) return reader.failBody(error.HttpChunkInvalid);
|
||||
const header_len = i + next_i;
|
||||
if (header_len > max_chunk_header_len) return reader.failBody(error.HttpChunkInvalid);
|
||||
in.toss(header_len);
|
||||
},
|
||||
}
|
||||
if (cp.chunk_len == 0) return parseTrailers(reader, 0);
|
||||
const n = try in.discard(limit.min(.limited(cp.chunk_len)));
|
||||
chunk_len_ptr.* = .init(cp.chunk_len + 2 - n);
|
||||
return n;
|
||||
},
|
||||
.n => {
|
||||
if ((try in.peekByte()) != '\n') return reader.failBody(error.HttpChunkInvalid);
|
||||
in.toss(1);
|
||||
continue :len .head;
|
||||
},
|
||||
.rn => {
|
||||
const rn = try in.peekArray(2);
|
||||
if (rn[0] != '\r' or rn[1] != '\n') return reader.failBody(error.HttpChunkInvalid);
|
||||
in.toss(2);
|
||||
continue :len .head;
|
||||
},
|
||||
else => |remaining_chunk_len| {
|
||||
const n = try in.discard(limit.min(.limited(remaining_chunk_len.int() - 2)));
|
||||
chunk_len_ptr.* = .init(remaining_chunk_len.int() - n);
|
||||
return n;
|
||||
},
|
||||
.done => return error.EndOfStream,
|
||||
}
|
||||
}
|
||||
|
||||
/// Called when next bytes in the stream are trailers, or "\r\n" to indicate
|
||||
/// end of chunked body.
|
||||
fn parseTrailers(reader: *Reader, amt_read: usize) std.io.Reader.Error!usize {
|
||||
const in = reader.in;
|
||||
var hp: HeadParser = .{};
|
||||
var trailers_len: usize = 0;
|
||||
while (true) {
|
||||
if (trailers_len >= in.buffer.len) return reader.failBody(error.HttpHeadersOversize);
|
||||
try in.fill(trailers_len + 1);
|
||||
trailers_len += hp.feed(in.bufferContents()[trailers_len..]);
|
||||
if (hp.state == .finished) {
|
||||
reader.body_state.remaining_chunk_len = .done;
|
||||
reader.state = .ready;
|
||||
reader.trailers_len = trailers_len;
|
||||
return amt_read;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn failBody(r: *Reader, err: BodyError) error{ReadFailed} {
|
||||
r.body_err = err;
|
||||
return error.ReadFailed;
|
||||
}
|
||||
};
|
||||
|
||||
/// Request or response body.
|
||||
pub const BodyWriter = struct {
|
||||
/// Until the lifetime of `BodyWriter` ends, it is illegal to modify the
|
||||
/// state of this other than via methods of `BodyWriter`.
|
||||
http_protocol_output: *std.io.BufferedWriter,
|
||||
state: State,
|
||||
elide: bool,
|
||||
err: Error!void = {},
|
||||
|
||||
pub const Error = error{
|
||||
/// Attempted to write a file to the stream, an expensive operation
|
||||
/// that should be avoided when `elide` is true.
|
||||
UnableToElideBody,
|
||||
};
|
||||
pub const WriteError = std.io.Writer.Error;
|
||||
|
||||
/// How many zeroes to reserve for hex-encoded chunk length.
|
||||
const chunk_len_digits = 8;
|
||||
const max_chunk_len: usize = std.math.pow(usize, 16, chunk_len_digits) - 1;
|
||||
const chunk_header_template = ("0" ** chunk_len_digits) ++ "\r\n";
|
||||
|
||||
comptime {
|
||||
assert(max_chunk_len == std.math.maxInt(u32));
|
||||
}
|
||||
|
||||
pub const State = union(enum) {
|
||||
/// End of connection signals the end of the stream.
|
||||
none,
|
||||
/// As a debugging utility, counts down to zero as bytes are written.
|
||||
content_length: u64,
|
||||
/// Each chunk is wrapped in a header and trailer.
|
||||
chunked: Chunked,
|
||||
/// Cleanly finished stream; connection can be reused.
|
||||
end,
|
||||
|
||||
pub const Chunked = union(enum) {
|
||||
/// Index of the hex-encoded chunk length in the chunk header
|
||||
/// within the buffer of `BodyWriter.http_protocol_output`.
|
||||
offset: usize,
|
||||
/// We are in the middle of a chunk and this is how many bytes are
|
||||
/// left until the next header. This includes +2 for "\r"\n", and
|
||||
/// is zero for the beginning of the stream.
|
||||
chunk_len: usize,
|
||||
|
||||
pub const init: Chunked = .{ .chunk_len = 0 };
|
||||
};
|
||||
};
|
||||
|
||||
/// Sends all buffered data across `BodyWriter.http_protocol_output`.
|
||||
///
|
||||
/// Some buffered data will remain if transfer-encoding is chunked and the
|
||||
/// BodyWriter is mid-chunk.
|
||||
pub fn flush(w: *BodyWriter) WriteError!void {
|
||||
switch (w.state) {
|
||||
.none, .content_length => return w.http_protocol_output.flush(),
|
||||
.chunked => |*chunked| switch (chunked.*) {
|
||||
.offset => |*offset| {
|
||||
try w.http_protocol_output.flushLimit(.limited(w.http_protocol_output.end - offset.*));
|
||||
offset.* = 0;
|
||||
},
|
||||
.chunk_len => return w.http_protocol_output.flush(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// When using content-length, asserts that the amount of data sent matches
|
||||
/// the value sent in the header, then flushes.
|
||||
///
|
||||
/// When using transfer-encoding: chunked, writes the end-of-stream message
|
||||
/// with empty trailers, then flushes the stream to the system. Asserts any
|
||||
/// started chunk has been completely finished.
|
||||
///
|
||||
/// Respects the value of `elide` to omit all data after the headers.
|
||||
///
|
||||
/// See also:
|
||||
/// * `endUnflushed`
|
||||
/// * `endChunked`
|
||||
pub fn end(w: *BodyWriter) WriteError!void {
|
||||
try endUnflushed(w);
|
||||
try w.http_protocol_output.flush();
|
||||
}
|
||||
|
||||
/// When using content-length, asserts that the amount of data sent matches
|
||||
/// the value sent in the header.
|
||||
///
|
||||
/// Otherwise, transfer-encoding: chunked is being used, and it writes the
|
||||
/// end-of-stream message with empty trailers.
|
||||
///
|
||||
/// Respects the value of `elide` to omit all data after the headers.
|
||||
///
|
||||
/// See also:
|
||||
/// * `end`
|
||||
/// * `endChunked`
|
||||
pub fn endUnflushed(w: *BodyWriter) WriteError!void {
|
||||
switch (w.state) {
|
||||
.content_length => |len| {
|
||||
assert(len == 0); // Trips when end() called before all bytes written.
|
||||
w.state = .end;
|
||||
},
|
||||
.none => {},
|
||||
.chunked => return endChunked(w, .{}),
|
||||
}
|
||||
}
|
||||
|
||||
pub const EndChunkedOptions = struct {
|
||||
trailers: []const Header = &.{},
|
||||
};
|
||||
|
||||
/// Writes the end-of-stream message and any optional trailers.
|
||||
///
|
||||
/// Does not flush.
|
||||
///
|
||||
/// Asserts that the BodyWriter is using transfer-encoding: chunked.
|
||||
///
|
||||
/// Respects the value of `elide` to omit all data after the headers.
|
||||
///
|
||||
/// See also:
|
||||
/// * `end`
|
||||
/// * `endUnflushed`
|
||||
pub fn endChunked(w: *BodyWriter, options: EndChunkedOptions) WriteError!void {
|
||||
const chunked = &w.state.chunked;
|
||||
if (w.elide) {
|
||||
w.state = .end;
|
||||
return;
|
||||
}
|
||||
const bw = w.http_protocol_output;
|
||||
switch (chunked.*) {
|
||||
.offset => |offset| {
|
||||
const chunk_len = bw.end - offset - chunk_header_template.len;
|
||||
writeHex(bw.buffer[offset..][0..chunk_len_digits], chunk_len);
|
||||
try bw.writeAll("\r\n");
|
||||
},
|
||||
.chunk_len => |chunk_len| switch (chunk_len) {
|
||||
0 => {},
|
||||
1 => try bw.writeByte('\n'),
|
||||
2 => try bw.writeAll("\r\n"),
|
||||
else => unreachable, // An earlier write call indicated more data would follow.
|
||||
},
|
||||
}
|
||||
if (options.trailers.len > 0) {
|
||||
try bw.writeAll("0\r\n");
|
||||
for (options.trailers) |trailer| {
|
||||
try bw.writeAll(trailer.name);
|
||||
try bw.writeAll(": ");
|
||||
try bw.writeAll(trailer.value);
|
||||
try bw.writeAll("\r\n");
|
||||
}
|
||||
try bw.writeAll("\r\n");
|
||||
}
|
||||
w.state = .end;
|
||||
}
|
||||
|
||||
fn contentLengthWriteSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) WriteError!usize {
|
||||
const w: *BodyWriter = @alignCast(@ptrCast(context));
|
||||
const n = if (w.elide) countSplat(data, splat) else try w.http_protocol_output.writeSplat(data, splat);
|
||||
w.state.content_length -= n;
|
||||
return n;
|
||||
}
|
||||
|
||||
fn noneWriteSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) WriteError!usize {
|
||||
const w: *BodyWriter = @alignCast(@ptrCast(context));
|
||||
if (w.elide) return countSplat(data, splat);
|
||||
return w.http_protocol_output.writeSplat(data, splat);
|
||||
}
|
||||
|
||||
fn countSplat(data: []const []const u8, splat: usize) usize {
|
||||
if (data.len == 0) return 0;
|
||||
var total: usize = 0;
|
||||
for (data[0 .. data.len - 1]) |buf| total += buf.len;
|
||||
total += data[data.len - 1].len * splat;
|
||||
return total;
|
||||
}
|
||||
|
||||
fn elideWriteFile(
|
||||
w: *BodyWriter,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
) WriteError!usize {
|
||||
if (offset != .none) {
|
||||
if (countWriteFile(limit, headers_and_trailers)) |n| {
|
||||
return n;
|
||||
}
|
||||
}
|
||||
w.err = error.UnableToElideBody;
|
||||
return error.WriteFailed;
|
||||
}
|
||||
|
||||
/// Returns `null` if size cannot be computed without making any syscalls.
|
||||
fn countWriteFile(limit: std.io.Writer.Limit, headers_and_trailers: []const []const u8) ?usize {
|
||||
var total: usize = limit.toInt() orelse return null;
|
||||
for (headers_and_trailers) |buf| total += buf.len;
|
||||
return total;
|
||||
}
|
||||
|
||||
fn noneWriteFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) std.io.Writer.FileError!usize {
|
||||
if (limit == .nothing) return noneWriteSplat(context, headers_and_trailers, 1);
|
||||
const w: *BodyWriter = @alignCast(@ptrCast(context));
|
||||
if (w.elide) return elideWriteFile(w, offset, limit, headers_and_trailers);
|
||||
return w.http_protocol_output.writeFile(file, offset, limit, headers_and_trailers, headers_len);
|
||||
}
|
||||
|
||||
fn contentLengthWriteFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) std.io.Writer.FileError!usize {
|
||||
if (limit == .nothing) return contentLengthWriteSplat(context, headers_and_trailers, 1);
|
||||
const w: *BodyWriter = @alignCast(@ptrCast(context));
|
||||
if (w.elide) return elideWriteFile(w, offset, limit, headers_and_trailers);
|
||||
const n = try w.http_protocol_output.writeFile(file, offset, limit, headers_and_trailers, headers_len);
|
||||
w.state.content_length -= n;
|
||||
return n;
|
||||
}
|
||||
|
||||
fn chunkedWriteFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) std.io.Writer.FileError!usize {
|
||||
if (limit == .nothing) return chunkedWriteSplat(context, headers_and_trailers, 1);
|
||||
const w: *BodyWriter = @alignCast(@ptrCast(context));
|
||||
if (w.elide) return elideWriteFile(w, offset, limit, headers_and_trailers);
|
||||
const data_len = countWriteFile(limit, headers_and_trailers) orelse @panic("TODO");
|
||||
const bw = w.http_protocol_output;
|
||||
const chunked = &w.state.chunked;
|
||||
state: switch (chunked.*) {
|
||||
.offset => |off| {
|
||||
// TODO: is it better perf to read small files into the buffer?
|
||||
const buffered_len = bw.end - off - chunk_header_template.len;
|
||||
const chunk_len = data_len + buffered_len;
|
||||
writeHex(bw.buffer[off..][0..chunk_len_digits], chunk_len);
|
||||
const n = try bw.writeFile(file, offset, limit, headers_and_trailers, headers_len);
|
||||
chunked.* = .{ .chunk_len = data_len + 2 - n };
|
||||
return n;
|
||||
},
|
||||
.chunk_len => |chunk_len| l: switch (chunk_len) {
|
||||
0 => {
|
||||
const header_buf = try bw.writableArray(chunk_header_template.len);
|
||||
const off = bw.end;
|
||||
@memcpy(header_buf, chunk_header_template);
|
||||
chunked.* = .{ .offset = off };
|
||||
continue :state .{ .offset = off };
|
||||
},
|
||||
1 => {
|
||||
try bw.writeByte('\n');
|
||||
chunked.chunk_len = 0;
|
||||
continue :l 0;
|
||||
},
|
||||
2 => {
|
||||
try bw.writeByte('\r');
|
||||
chunked.chunk_len = 1;
|
||||
continue :l 1;
|
||||
},
|
||||
else => {
|
||||
const new_limit = limit.min(.limited(chunk_len - 2));
|
||||
const n = try bw.writeFile(file, offset, new_limit, headers_and_trailers, headers_len);
|
||||
chunked.chunk_len = chunk_len - n;
|
||||
return n;
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn chunkedWriteSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) WriteError!usize {
|
||||
const w: *BodyWriter = @alignCast(@ptrCast(context));
|
||||
const data_len = countSplat(data, splat);
|
||||
if (w.elide) return data_len;
|
||||
|
||||
const bw = w.http_protocol_output;
|
||||
const chunked = &w.state.chunked;
|
||||
|
||||
state: switch (chunked.*) {
|
||||
.offset => |offset| {
|
||||
if (bw.unusedCapacitySlice().len >= data_len) {
|
||||
assert(data_len == (bw.writeSplat(data, splat) catch unreachable));
|
||||
return data_len;
|
||||
}
|
||||
const buffered_len = bw.end - offset - chunk_header_template.len;
|
||||
const chunk_len = data_len + buffered_len;
|
||||
writeHex(bw.buffer[offset..][0..chunk_len_digits], chunk_len);
|
||||
const n = try bw.writeSplat(data, splat);
|
||||
chunked.* = .{ .chunk_len = data_len + 2 - n };
|
||||
return n;
|
||||
},
|
||||
.chunk_len => |chunk_len| l: switch (chunk_len) {
|
||||
0 => {
|
||||
const header_buf = try bw.writableArray(chunk_header_template.len);
|
||||
const offset = bw.end;
|
||||
@memcpy(header_buf, chunk_header_template);
|
||||
chunked.* = .{ .offset = offset };
|
||||
continue :state .{ .offset = offset };
|
||||
},
|
||||
1 => {
|
||||
try bw.writeByte('\n');
|
||||
chunked.chunk_len = 0;
|
||||
continue :l 0;
|
||||
},
|
||||
2 => {
|
||||
try bw.writeByte('\r');
|
||||
chunked.chunk_len = 1;
|
||||
continue :l 1;
|
||||
},
|
||||
else => {
|
||||
const n = try bw.writeSplatLimit(data, splat, .limited(chunk_len - 2));
|
||||
chunked.chunk_len = chunk_len - n;
|
||||
return n;
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes an integer as base 16 to `buf`, right-aligned, assuming the
|
||||
/// buffer has already been filled with zeroes.
|
||||
fn writeHex(buf: []u8, x: usize) void {
|
||||
assert(std.mem.allEqual(u8, buf, '0'));
|
||||
const base = 16;
|
||||
var index: usize = buf.len;
|
||||
var a = x;
|
||||
while (a > 0) {
|
||||
const digit = a % base;
|
||||
index -= 1;
|
||||
buf[index] = std.fmt.digitToChar(@intCast(digit), .lower);
|
||||
a /= base;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn interface(w: *BodyWriter) std.io.Writer {
|
||||
return .{
|
||||
.context = w,
|
||||
.vtable = switch (w.state) {
|
||||
.none => &.{
|
||||
.writeSplat = noneWriteSplat,
|
||||
.writeFile = noneWriteFile,
|
||||
},
|
||||
.content_length => &.{
|
||||
.writeSplat = contentLengthWriteSplat,
|
||||
.writeFile = contentLengthWriteFile,
|
||||
},
|
||||
.chunked => &.{
|
||||
.writeSplat = chunkedWriteSplat,
|
||||
.writeFile = chunkedWriteFile,
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
test {
|
||||
_ = Server;
|
||||
_ = Status;
|
||||
_ = Method;
|
||||
_ = ChunkParser;
|
||||
_ = HeadParser;
|
||||
_ = WebSocket;
|
||||
|
||||
if (builtin.os.tag != .wasi) {
|
||||
_ = Client;
|
||||
_ = Method;
|
||||
_ = Server;
|
||||
_ = Status;
|
||||
_ = HeadParser;
|
||||
_ = ChunkParser;
|
||||
_ = WebSocket;
|
||||
_ = @import("http/test.zig");
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,5 +1,8 @@
|
||||
//! Parser for transfer-encoding: chunked.
|
||||
|
||||
const ChunkParser = @This();
|
||||
const std = @import("std");
|
||||
|
||||
state: State,
|
||||
chunk_len: u64,
|
||||
|
||||
@ -97,9 +100,6 @@ pub fn feed(p: *ChunkParser, bytes: []const u8) usize {
|
||||
return bytes.len;
|
||||
}
|
||||
|
||||
const ChunkParser = @This();
|
||||
const std = @import("std");
|
||||
|
||||
test feed {
|
||||
const testing = std.testing;
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -1,142 +1,59 @@
|
||||
//! Blocking HTTP server implementation.
|
||||
//! Handles a single connection's lifecycle.
|
||||
//! Handles a single connection lifecycle.
|
||||
|
||||
const std = @import("../std.zig");
|
||||
const http = std.http;
|
||||
const mem = std.mem;
|
||||
const net = std.net;
|
||||
const Uri = std.Uri;
|
||||
const assert = std.debug.assert;
|
||||
const testing = std.testing;
|
||||
|
||||
const Server = @This();
|
||||
|
||||
/// The reader's buffer must be large enough to store the client's entire HTTP
|
||||
/// header, otherwise `receiveHead` returns `error.HttpHeadersOversize`.
|
||||
in: *std.io.BufferedReader,
|
||||
/// Data from the HTTP server to the HTTP client.
|
||||
out: *std.io.BufferedWriter,
|
||||
/// Keeps track of whether the Server is ready to accept a new request on the
|
||||
/// same connection, and makes invalid API usage cause assertion failures
|
||||
/// rather than HTTP protocol violations.
|
||||
state: State,
|
||||
/// Populated when `receiveHead` returns `ReceiveHeadError.HttpHeadersInvalid`.
|
||||
head_parse_err: ?Request.Head.ParseError = null,
|
||||
|
||||
pub const State = enum {
|
||||
/// The connection is available to be used for the first time, or reused.
|
||||
ready,
|
||||
/// An error occurred in `receiveHead`.
|
||||
receiving_head,
|
||||
/// A Request object has been obtained and from there a Response can be
|
||||
/// opened.
|
||||
received_head,
|
||||
/// The client is uploading something to this Server.
|
||||
receiving_body,
|
||||
/// The connection is eligible for another HTTP request, however the client
|
||||
/// and server did not negotiate a persistent connection.
|
||||
closing,
|
||||
};
|
||||
/// Internal state managed by this abstraction.
|
||||
reader: http.Reader,
|
||||
|
||||
/// Initialize an HTTP server that can respond to multiple requests on the same
|
||||
/// connection.
|
||||
///
|
||||
/// The buffer of `in` must be large enough to store the client's entire HTTP
|
||||
/// header, otherwise `receiveHead` returns `error.HttpHeadersOversize`.
|
||||
///
|
||||
/// The returned `Server` is ready for `receiveHead` to be called.
|
||||
pub fn init(in: *std.io.BufferedReader, out: *std.io.BufferedWriter) Server {
|
||||
return .{
|
||||
.in = in,
|
||||
.reader = .{
|
||||
.in = in,
|
||||
.state = .ready,
|
||||
},
|
||||
.out = out,
|
||||
.state = .ready,
|
||||
};
|
||||
}
|
||||
|
||||
pub const ReceiveHeadError = error{
|
||||
/// Client sent too many bytes of HTTP headers.
|
||||
/// The HTTP specification suggests to respond with a 431 status code
|
||||
/// before closing the connection.
|
||||
HttpHeadersOversize,
|
||||
/// Client sent headers that did not conform to the HTTP protocol;
|
||||
/// `head_parse_err` is populated.
|
||||
pub const ReceiveHeadError = http.Reader.HeadError || error{
|
||||
/// Client sent headers that did not conform to the HTTP protocol.
|
||||
///
|
||||
/// To find out more detailed diagnostics, `http.Reader.head_buffer` can be
|
||||
/// passed directly to `Request.Head.parse`.
|
||||
HttpHeadersInvalid,
|
||||
/// Partial HTTP request was received but the connection was closed before
|
||||
/// fully receiving the headers.
|
||||
HttpRequestTruncated,
|
||||
/// The client sent 0 bytes of headers before closing the stream.
|
||||
/// In other words, a keep-alive connection was finally closed.
|
||||
HttpConnectionClosing,
|
||||
/// Transitive error occurred reading from `in`.
|
||||
ReadFailed,
|
||||
};
|
||||
|
||||
/// The header bytes reference the internal storage of `in`, which are
|
||||
/// invalidated with the next call to `receiveHead`.
|
||||
pub fn receiveHead(s: *Server) ReceiveHeadError!Request {
|
||||
assert(s.state == .ready);
|
||||
s.state = .received_head;
|
||||
errdefer s.state = .receiving_head;
|
||||
|
||||
const in = s.in;
|
||||
var hp: http.HeadParser = .{};
|
||||
var head_end: usize = 0;
|
||||
|
||||
while (true) {
|
||||
if (head_end >= in.buffer.len) return error.HttpHeadersOversize;
|
||||
const buf = in.peekGreedy(head_end + 1) catch |err| switch (err) {
|
||||
error.EndOfStream => switch (head_end) {
|
||||
0 => return error.HttpConnectionClosing,
|
||||
else => return error.HttpRequestTruncated,
|
||||
},
|
||||
error.ReadFailed => return error.ReadFailed,
|
||||
};
|
||||
head_end += hp.feed(buf[head_end..]);
|
||||
if (hp.state == .finished) return .{
|
||||
.server = s,
|
||||
.head_end = head_end,
|
||||
.head = Request.Head.parse(buf[0..head_end]) catch |err| {
|
||||
s.head_parse_err = err;
|
||||
return error.HttpHeadersInvalid;
|
||||
},
|
||||
.reader_state = undefined,
|
||||
};
|
||||
}
|
||||
pub fn receiveHead(s: *Server) http.Reader.HeadError!Request {
|
||||
try s.reader.receiveHead();
|
||||
return .{
|
||||
.server = s,
|
||||
// No need to track the returned error here since users can repeat the
|
||||
// parse with the header buffer to get detailed diagnostics.
|
||||
.head = Request.Head.parse(s.reader.head_buffer) catch return error.HttpHeadersInvalid,
|
||||
};
|
||||
}
|
||||
|
||||
pub const Request = struct {
|
||||
server: *Server,
|
||||
/// Index into `Server.in` internal buffer.
|
||||
head_end: usize,
|
||||
/// Number of bytes of HTTP trailers. These are at the end of a
|
||||
/// transfer-encoding: chunked message.
|
||||
trailers_len: usize = 0,
|
||||
/// Pointers in this struct are invalidated with the next call to
|
||||
/// `receiveHead`.
|
||||
head: Head,
|
||||
reader_state: union {
|
||||
remaining_content_length: u64,
|
||||
remaining_chunk_len: RemainingChunkLen,
|
||||
},
|
||||
read_err: ?ReadError = null,
|
||||
|
||||
pub const ReadError = error{
|
||||
HttpChunkInvalid,
|
||||
HttpHeadersOversize,
|
||||
};
|
||||
|
||||
pub const max_chunk_header_len = 22;
|
||||
|
||||
pub const RemainingChunkLen = enum(u64) {
|
||||
head = 0,
|
||||
n = 1,
|
||||
rn = 2,
|
||||
done = std.math.maxInt(u64),
|
||||
_,
|
||||
|
||||
pub fn init(integer: u64) RemainingChunkLen {
|
||||
return @enumFromInt(integer);
|
||||
}
|
||||
|
||||
pub fn int(rcl: RemainingChunkLen) u64 {
|
||||
return @intFromEnum(rcl);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Compression = union(enum) {
|
||||
deflate: std.compress.zlib.Decompressor,
|
||||
@ -308,7 +225,7 @@ pub const Request = struct {
|
||||
};
|
||||
|
||||
pub fn iterateHeaders(r: *Request) http.HeaderIterator {
|
||||
return http.HeaderIterator.init(r.server.in.bufferContents()[0..r.head_end]);
|
||||
return http.HeaderIterator.init(r.server.reader.head_buffer);
|
||||
}
|
||||
|
||||
test iterateHeaders {
|
||||
@ -332,10 +249,8 @@ pub const Request = struct {
|
||||
|
||||
var request: Request = .{
|
||||
.server = &server,
|
||||
.head_end = request_bytes.len,
|
||||
.trailers_len = 0,
|
||||
.head = undefined,
|
||||
.reader_state = undefined,
|
||||
};
|
||||
|
||||
var it = request.iterateHeaders();
|
||||
@ -511,7 +426,8 @@ pub const Request = struct {
|
||||
respond_options: RespondOptions = .{},
|
||||
};
|
||||
|
||||
/// The header is not guaranteed to be sent until `Response.flush` is called.
|
||||
/// The header is not guaranteed to be sent until `BodyWriter.flush` or
|
||||
/// `BodyWriter.end` is called.
|
||||
///
|
||||
/// If the request contains a body and the connection is to be reused,
|
||||
/// discards the request body, leaving the Server in the `ready` state. If
|
||||
@ -519,13 +435,13 @@ pub const Request = struct {
|
||||
/// no error is surfaced.
|
||||
///
|
||||
/// HEAD requests are handled transparently by setting the
|
||||
/// `Response.elide_body` flag on the returned `Response`, causing
|
||||
/// `BodyWriter.elide` flag on the returned `BodyWriter`, causing
|
||||
/// the response stream to omit the body. However, it may be worth noticing
|
||||
/// that flag and skipping any expensive work that would otherwise need to
|
||||
/// be done to satisfy the request.
|
||||
///
|
||||
/// Asserts status is not `continue`.
|
||||
pub fn respondStreaming(request: *Request, options: RespondStreamingOptions) std.io.Writer.Error!Response {
|
||||
pub fn respondStreaming(request: *Request, options: RespondStreamingOptions) std.io.Writer.Error!http.BodyWriter {
|
||||
const o = options.respond_options;
|
||||
assert(o.status != .@"continue");
|
||||
const transfer_encoding_none = (o.transfer_encoding orelse .chunked) == .none;
|
||||
@ -573,7 +489,7 @@ pub const Request = struct {
|
||||
};
|
||||
|
||||
return .{
|
||||
.server_output = request.server.out,
|
||||
.http_protocol_output = request.server.out,
|
||||
.transfer_encoding = if (o.transfer_encoding) |te| switch (te) {
|
||||
.chunked => .{ .chunked = .init },
|
||||
.none => .none,
|
||||
@ -584,242 +500,6 @@ pub const Request = struct {
|
||||
};
|
||||
}
|
||||
|
||||
fn contentLengthRead(
|
||||
ctx: ?*anyopaque,
|
||||
bw: *std.io.BufferedWriter,
|
||||
limit: std.io.Reader.Limit,
|
||||
) std.io.Reader.RwError!usize {
|
||||
const request: *Request = @alignCast(@ptrCast(ctx));
|
||||
const remaining_content_length = &request.reader_state.remaining_content_length;
|
||||
const remaining = remaining_content_length.*;
|
||||
const server = request.server;
|
||||
if (remaining == 0) {
|
||||
server.state = .ready;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
const n = try server.in.read(bw, limit.min(.limited(remaining)));
|
||||
const new_remaining = remaining - n;
|
||||
remaining_content_length.* = new_remaining;
|
||||
return n;
|
||||
}
|
||||
|
||||
fn contentLengthReadVec(context: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
|
||||
const request: *Request = @alignCast(@ptrCast(context));
|
||||
const remaining_content_length = &request.reader_state.remaining_content_length;
|
||||
const server = request.server;
|
||||
const remaining = remaining_content_length.*;
|
||||
if (remaining == 0) {
|
||||
server.state = .ready;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
const n = try server.in.readVecLimit(data, .limited(remaining));
|
||||
const new_remaining = remaining - n;
|
||||
remaining_content_length.* = new_remaining;
|
||||
return n;
|
||||
}
|
||||
|
||||
fn contentLengthDiscard(ctx: ?*anyopaque, limit: std.io.Reader.Limit) std.io.Reader.Error!usize {
|
||||
const request: *Request = @alignCast(@ptrCast(ctx));
|
||||
const remaining_content_length = &request.reader_state.remaining_content_length;
|
||||
const server = request.server;
|
||||
const remaining = remaining_content_length.*;
|
||||
if (remaining == 0) {
|
||||
server.state = .ready;
|
||||
return error.EndOfStream;
|
||||
}
|
||||
const n = try server.in.discard(limit.min(.limited(remaining)));
|
||||
const new_remaining = remaining - n;
|
||||
remaining_content_length.* = new_remaining;
|
||||
return n;
|
||||
}
|
||||
|
||||
fn chunkedRead(
|
||||
ctx: ?*anyopaque,
|
||||
bw: *std.io.BufferedWriter,
|
||||
limit: std.io.Reader.Limit,
|
||||
) std.io.Reader.RwError!usize {
|
||||
const request: *Request = @alignCast(@ptrCast(ctx));
|
||||
const chunk_len_ptr = &request.reader_state.remaining_chunk_len;
|
||||
const in = request.server.in;
|
||||
len: switch (chunk_len_ptr.*) {
|
||||
.head => {
|
||||
var cp: http.ChunkParser = .init;
|
||||
const i = cp.feed(in.bufferContents());
|
||||
switch (cp.state) {
|
||||
.invalid => return request.failRead(error.HttpChunkInvalid),
|
||||
.data => {
|
||||
if (i > max_chunk_header_len) return request.failRead(error.HttpChunkInvalid);
|
||||
in.toss(i);
|
||||
},
|
||||
else => {
|
||||
try in.fill(max_chunk_header_len);
|
||||
const next_i = cp.feed(in.bufferContents()[i..]);
|
||||
if (cp.state != .data) return request.failRead(error.HttpChunkInvalid);
|
||||
const header_len = i + next_i;
|
||||
if (header_len > max_chunk_header_len) return request.failRead(error.HttpChunkInvalid);
|
||||
in.toss(header_len);
|
||||
},
|
||||
}
|
||||
if (cp.chunk_len == 0) return parseTrailers(request, 0);
|
||||
const n = try in.read(bw, limit.min(.limited(cp.chunk_len)));
|
||||
chunk_len_ptr.* = .init(cp.chunk_len + 2 - n);
|
||||
return n;
|
||||
},
|
||||
.n => {
|
||||
if ((try in.peekByte()) != '\n') return request.failRead(error.HttpChunkInvalid);
|
||||
in.toss(1);
|
||||
continue :len .head;
|
||||
},
|
||||
.rn => {
|
||||
const rn = try in.peekArray(2);
|
||||
if (rn[0] != '\r' or rn[1] != '\n') return request.failRead(error.HttpChunkInvalid);
|
||||
in.toss(2);
|
||||
continue :len .head;
|
||||
},
|
||||
else => |remaining_chunk_len| {
|
||||
const n = try in.read(bw, limit.min(.limited(@intFromEnum(remaining_chunk_len) - 2)));
|
||||
chunk_len_ptr.* = .init(@intFromEnum(remaining_chunk_len) - n);
|
||||
return n;
|
||||
},
|
||||
.done => return error.EndOfStream,
|
||||
}
|
||||
}
|
||||
|
||||
fn chunkedReadVec(ctx: ?*anyopaque, data: []const []u8) std.io.Reader.Error!usize {
|
||||
const request: *Request = @alignCast(@ptrCast(ctx));
|
||||
const chunk_len_ptr = &request.reader_state.remaining_chunk_len;
|
||||
const in = request.server.in;
|
||||
var already_requested_more = false;
|
||||
var amt_read: usize = 0;
|
||||
data: for (data) |d| {
|
||||
len: switch (chunk_len_ptr.*) {
|
||||
.head => {
|
||||
var cp: http.ChunkParser = .init;
|
||||
const available_buffer = in.bufferContents();
|
||||
const i = cp.feed(available_buffer);
|
||||
if (cp.state == .invalid) return request.failRead(error.HttpChunkInvalid);
|
||||
if (i == available_buffer.len) {
|
||||
if (already_requested_more) {
|
||||
chunk_len_ptr.* = .head;
|
||||
return amt_read;
|
||||
}
|
||||
already_requested_more = true;
|
||||
try in.fill(max_chunk_header_len);
|
||||
const next_i = cp.feed(in.bufferContents()[i..]);
|
||||
if (cp.state != .data) return request.failRead(error.HttpChunkInvalid);
|
||||
const header_len = i + next_i;
|
||||
if (header_len > max_chunk_header_len) return request.failRead(error.HttpChunkInvalid);
|
||||
in.toss(header_len);
|
||||
} else {
|
||||
if (i > max_chunk_header_len) return request.failRead(error.HttpChunkInvalid);
|
||||
in.toss(i);
|
||||
}
|
||||
if (cp.chunk_len == 0) return parseTrailers(request, amt_read);
|
||||
continue :len .init(cp.chunk_len + 2);
|
||||
},
|
||||
.n => {
|
||||
if (in.bufferContents().len < 1) already_requested_more = true;
|
||||
if ((try in.takeByte()) != '\n') return request.failRead(error.HttpChunkInvalid);
|
||||
continue :len .head;
|
||||
},
|
||||
.rn => {
|
||||
if (in.bufferContents().len < 2) already_requested_more = true;
|
||||
const rn = try in.takeArray(2);
|
||||
if (rn[0] != '\r' or rn[1] != '\n') return request.failRead(error.HttpChunkInvalid);
|
||||
continue :len .head;
|
||||
},
|
||||
else => |remaining_chunk_len| {
|
||||
const available_buffer = in.bufferContents();
|
||||
const copy_len = @min(available_buffer.len, d.len, remaining_chunk_len.int() - 2);
|
||||
@memcpy(d[0..copy_len], available_buffer[0..copy_len]);
|
||||
amt_read += copy_len;
|
||||
in.toss(copy_len);
|
||||
const next_chunk_len: RemainingChunkLen = .init(remaining_chunk_len.int() - copy_len);
|
||||
if (copy_len == d.len) {
|
||||
chunk_len_ptr.* = next_chunk_len;
|
||||
continue :data;
|
||||
}
|
||||
if (already_requested_more) {
|
||||
chunk_len_ptr.* = next_chunk_len;
|
||||
return amt_read;
|
||||
}
|
||||
already_requested_more = true;
|
||||
try in.fill(3);
|
||||
continue :len next_chunk_len;
|
||||
},
|
||||
.done => return error.EndOfStream,
|
||||
}
|
||||
}
|
||||
return amt_read;
|
||||
}
|
||||
|
||||
fn chunkedDiscard(ctx: ?*anyopaque, limit: std.io.Reader.Limit) std.io.Reader.Error!usize {
|
||||
const request: *Request = @alignCast(@ptrCast(ctx));
|
||||
const chunk_len_ptr = &request.reader_state.remaining_chunk_len;
|
||||
const in = request.server.in;
|
||||
len: switch (chunk_len_ptr.*) {
|
||||
.head => {
|
||||
var cp: http.ChunkParser = .init;
|
||||
const i = cp.feed(in.bufferContents());
|
||||
switch (cp.state) {
|
||||
.invalid => return request.failRead(error.HttpChunkInvalid),
|
||||
.data => {
|
||||
if (i > max_chunk_header_len) return request.failRead(error.HttpChunkInvalid);
|
||||
in.toss(i);
|
||||
},
|
||||
else => {
|
||||
try in.fill(max_chunk_header_len);
|
||||
const next_i = cp.feed(in.bufferContents()[i..]);
|
||||
if (cp.state != .data) return request.failRead(error.HttpChunkInvalid);
|
||||
const header_len = i + next_i;
|
||||
if (header_len > max_chunk_header_len) return request.failRead(error.HttpChunkInvalid);
|
||||
in.toss(header_len);
|
||||
},
|
||||
}
|
||||
if (cp.chunk_len == 0) return parseTrailers(request, 0);
|
||||
const n = try in.discard(limit.min(.limited(cp.chunk_len)));
|
||||
chunk_len_ptr.* = .init(cp.chunk_len + 2 - n);
|
||||
return n;
|
||||
},
|
||||
.n => {
|
||||
if ((try in.peekByte()) != '\n') return request.failRead(error.HttpChunkInvalid);
|
||||
in.toss(1);
|
||||
continue :len .head;
|
||||
},
|
||||
.rn => {
|
||||
const rn = try in.peekArray(2);
|
||||
if (rn[0] != '\r' or rn[1] != '\n') return request.failRead(error.HttpChunkInvalid);
|
||||
in.toss(2);
|
||||
continue :len .head;
|
||||
},
|
||||
else => |remaining_chunk_len| {
|
||||
const n = try in.discard(limit.min(.limited(remaining_chunk_len.int() - 2)));
|
||||
chunk_len_ptr.* = .init(remaining_chunk_len.int() - n);
|
||||
return n;
|
||||
},
|
||||
.done => return error.EndOfStream,
|
||||
}
|
||||
}
|
||||
|
||||
/// Called when next bytes in the stream are trailers, or "\r\n" to indicate
|
||||
/// end of chunked body.
|
||||
fn parseTrailers(request: *Request, amt_read: usize) std.io.Reader.Error!usize {
|
||||
const in = request.server.in;
|
||||
var hp: http.HeadParser = .{};
|
||||
var trailers_len: usize = 0;
|
||||
while (true) {
|
||||
if (trailers_len >= in.buffer.len) return request.failRead(error.HttpHeadersOversize);
|
||||
try in.fill(trailers_len + 1);
|
||||
trailers_len += hp.feed(in.bufferContents()[trailers_len..]);
|
||||
if (hp.state == .finished) {
|
||||
request.reader_state.remaining_chunk_len = .done;
|
||||
request.server.state = .ready;
|
||||
request.trailers_len = trailers_len;
|
||||
return amt_read;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub const ReaderError = error{
|
||||
/// Failed to write "100-continue" to the stream.
|
||||
WriteFailed,
|
||||
@ -837,10 +517,7 @@ pub const Request = struct {
|
||||
///
|
||||
/// Asserts that this function is only called once.
|
||||
pub fn reader(request: *Request) ReaderError!std.io.Reader {
|
||||
const s = request.server;
|
||||
assert(s.state == .received_head);
|
||||
s.state = .receiving_body;
|
||||
|
||||
assert(request.server.reader.state == .received_head);
|
||||
if (request.head.expect) |expect| {
|
||||
if (mem.eql(u8, expect, "100-continue")) {
|
||||
try request.server.out.writeAll("HTTP/1.1 100 Continue\r\n\r\n");
|
||||
@ -849,36 +526,11 @@ pub const Request = struct {
|
||||
return error.HttpExpectationFailed;
|
||||
}
|
||||
}
|
||||
|
||||
switch (request.head.transfer_encoding) {
|
||||
.chunked => {
|
||||
request.reader_state = .{ .remaining_chunk_len = .head };
|
||||
return .{
|
||||
.context = request,
|
||||
.vtable = &.{
|
||||
.read = &chunkedRead,
|
||||
.readVec = &chunkedReadVec,
|
||||
.discard = &chunkedDiscard,
|
||||
},
|
||||
};
|
||||
},
|
||||
.none => {
|
||||
request.reader_state = .{
|
||||
.remaining_content_length = request.head.content_length orelse 0,
|
||||
};
|
||||
return .{
|
||||
.context = request,
|
||||
.vtable = &.{
|
||||
.read = &contentLengthRead,
|
||||
.readVec = &contentLengthReadVec,
|
||||
.discard = &contentLengthDiscard,
|
||||
},
|
||||
};
|
||||
},
|
||||
}
|
||||
return request.server.reader.interface(request.head.transfer_encoding, request.head.content_length);
|
||||
}
|
||||
|
||||
/// Returns whether the connection should remain persistent.
|
||||
///
|
||||
/// If it would fail, it instead sets the Server state to `receiving_body`
|
||||
/// and returns false.
|
||||
fn discardBody(request: *Request, keep_alive: bool) bool {
|
||||
@ -890,12 +542,12 @@ pub const Request = struct {
|
||||
// or the request body.
|
||||
// If the connection won't be kept alive, then none of this matters
|
||||
// because the connection will be severed after the response is sent.
|
||||
const s = request.server;
|
||||
if (keep_alive and request.head.keep_alive) switch (s.state) {
|
||||
const r = &request.server.reader;
|
||||
if (keep_alive and request.head.keep_alive) switch (r.state) {
|
||||
.received_head => {
|
||||
const r = request.reader() catch return false;
|
||||
_ = r.discardRemaining() catch return false;
|
||||
assert(s.state == .ready);
|
||||
const reader_interface = request.reader() catch return false;
|
||||
_ = reader_interface.discardRemaining() catch return false;
|
||||
assert(r.state == .ready);
|
||||
return true;
|
||||
},
|
||||
.receiving_body, .ready => return true,
|
||||
@ -903,378 +555,10 @@ pub const Request = struct {
|
||||
};
|
||||
|
||||
// Avoid clobbering the state in case a reading stream already exists.
|
||||
switch (s.state) {
|
||||
.received_head => s.state = .closing,
|
||||
switch (r.state) {
|
||||
.received_head => r.state = .closing,
|
||||
else => {},
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
fn failRead(r: *Request, err: ReadError) error{ReadFailed} {
|
||||
r.read_err = err;
|
||||
return error.ReadFailed;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Response = struct {
|
||||
/// HTTP protocol to the client.
|
||||
///
|
||||
/// This is the underlying stream; use `buffered` to create a
|
||||
/// `BufferedWriter` for this `Response`.
|
||||
///
|
||||
/// Until the lifetime of `Response` ends, it is illegal to modify the
|
||||
/// state of this other than via methods of `Response`.
|
||||
server_output: *std.io.BufferedWriter,
|
||||
/// `null` means transfer-encoding: chunked.
|
||||
/// As a debugging utility, counts down to zero as bytes are written.
|
||||
transfer_encoding: TransferEncoding,
|
||||
elide_body: bool,
|
||||
err: Error!void = {},
|
||||
|
||||
pub const Error = error{
|
||||
/// Attempted to write a file to the stream, an expensive operation
|
||||
/// that should be avoided when `elide_body` is true.
|
||||
UnableToElideBody,
|
||||
};
|
||||
pub const WriteError = std.io.Writer.Error;
|
||||
|
||||
/// How many zeroes to reserve for hex-encoded chunk length.
|
||||
const chunk_len_digits = 8;
|
||||
const max_chunk_len: usize = std.math.pow(usize, 16, chunk_len_digits) - 1;
|
||||
const chunk_header_template = ("0" ** chunk_len_digits) ++ "\r\n";
|
||||
|
||||
comptime {
|
||||
assert(max_chunk_len == std.math.maxInt(u32));
|
||||
}
|
||||
|
||||
pub const TransferEncoding = union(enum) {
|
||||
/// End of connection signals the end of the stream.
|
||||
none,
|
||||
/// As a debugging utility, counts down to zero as bytes are written.
|
||||
content_length: u64,
|
||||
/// Each chunk is wrapped in a header and trailer.
|
||||
chunked: Chunked,
|
||||
|
||||
pub const Chunked = union(enum) {
|
||||
/// Index of the hex-encoded chunk length in the chunk header
|
||||
/// within the buffer of `Response.server_output`.
|
||||
offset: usize,
|
||||
/// We are in the middle of a chunk and this is how many bytes are
|
||||
/// left until the next header. This includes +2 for "\r"\n", and
|
||||
/// is zero for the beginning of the stream.
|
||||
chunk_len: usize,
|
||||
|
||||
pub const init: Chunked = .{ .chunk_len = 0 };
|
||||
};
|
||||
};
|
||||
|
||||
/// Sends all buffered data across `Response.server_output`.
|
||||
///
|
||||
/// Some buffered data will remain if transfer-encoding is chunked and the
|
||||
/// response is mid-chunk.
|
||||
pub fn flush(r: *Response) WriteError!void {
|
||||
switch (r.transfer_encoding) {
|
||||
.none, .content_length => return r.server_output.flush(),
|
||||
.chunked => |*chunked| switch (chunked.*) {
|
||||
.offset => |*offset| {
|
||||
try r.server_output.flushLimit(.limited(r.server_output.end - offset.*));
|
||||
offset.* = 0;
|
||||
},
|
||||
.chunk_len => return r.server_output.flush(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// When using content-length, asserts that the amount of data sent matches
|
||||
/// the value sent in the header, then flushes. Asserts the amount of bytes
|
||||
/// sent matches the content-length value provided in the HTTP header.
|
||||
///
|
||||
/// When using transfer-encoding: chunked, writes the end-of-stream message
|
||||
/// with empty trailers, then flushes the stream to the system. Asserts any
|
||||
/// started chunk has been completely finished.
|
||||
///
|
||||
/// Respects the value of `elide_body` to omit all data after the headers.
|
||||
///
|
||||
/// Sets `r` to undefined.
|
||||
///
|
||||
/// See also:
|
||||
/// * `endUnflushed`
|
||||
/// * `endChunked`
|
||||
pub fn end(r: *Response) WriteError!void {
|
||||
try endUnflushed(r);
|
||||
try r.server_output.flush();
|
||||
r.* = undefined;
|
||||
}
|
||||
|
||||
/// When using content-length, asserts that the amount of data sent matches
|
||||
/// the value sent in the header.
|
||||
///
|
||||
/// Otherwise, transfer-encoding: chunked is being used, and it writes the
|
||||
/// end-of-stream message with empty trailers.
|
||||
///
|
||||
/// Respects the value of `elide_body` to omit all data after the headers.
|
||||
///
|
||||
/// See also:
|
||||
/// * `end`
|
||||
/// * `endChunked`
|
||||
pub fn endUnflushed(r: *Response) WriteError!void {
|
||||
switch (r.transfer_encoding) {
|
||||
.content_length => |len| assert(len == 0), // Trips when end() called before all bytes written.
|
||||
.none => {},
|
||||
.chunked => try endChunked(r, .{}),
|
||||
}
|
||||
}
|
||||
|
||||
pub const EndChunkedOptions = struct {
|
||||
trailers: []const http.Header = &.{},
|
||||
};
|
||||
|
||||
/// Writes the end-of-stream message and any optional trailers.
|
||||
///
|
||||
/// Does not flush.
|
||||
///
|
||||
/// Asserts that the Response is using transfer-encoding: chunked.
|
||||
///
|
||||
/// Respects the value of `elide_body` to omit all data after the headers.
|
||||
///
|
||||
/// See also:
|
||||
/// * `end`
|
||||
/// * `endUnflushed`
|
||||
pub fn endChunked(r: *Response, options: EndChunkedOptions) WriteError!void {
|
||||
const chunked = &r.transfer_encoding.chunked;
|
||||
if (r.elide_body) return;
|
||||
const bw = r.server_output;
|
||||
switch (chunked.*) {
|
||||
.offset => |offset| {
|
||||
const chunk_len = bw.end - offset - chunk_header_template.len;
|
||||
writeHex(bw.buffer[offset..][0..chunk_len_digits], chunk_len);
|
||||
try bw.writeAll("\r\n");
|
||||
},
|
||||
.chunk_len => |chunk_len| switch (chunk_len) {
|
||||
0 => {},
|
||||
1 => try bw.writeByte('\n'),
|
||||
2 => try bw.writeAll("\r\n"),
|
||||
else => unreachable, // An earlier write call indicated more data would follow.
|
||||
},
|
||||
}
|
||||
if (options.trailers.len > 0) {
|
||||
try bw.writeAll("0\r\n");
|
||||
for (options.trailers) |trailer| {
|
||||
try bw.writeAll(trailer.name);
|
||||
try bw.writeAll(": ");
|
||||
try bw.writeAll(trailer.value);
|
||||
try bw.writeAll("\r\n");
|
||||
}
|
||||
try bw.writeAll("\r\n");
|
||||
}
|
||||
r.* = undefined;
|
||||
}
|
||||
|
||||
fn contentLengthWriteSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) WriteError!usize {
|
||||
const r: *Response = @alignCast(@ptrCast(context));
|
||||
const n = if (r.elide_body) countSplat(data, splat) else try r.server_output.writeSplat(data, splat);
|
||||
r.transfer_encoding.content_length -= n;
|
||||
return n;
|
||||
}
|
||||
|
||||
fn noneWriteSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) WriteError!usize {
|
||||
const r: *Response = @alignCast(@ptrCast(context));
|
||||
if (r.elide_body) return countSplat(data, splat);
|
||||
return r.server_output.writeSplat(data, splat);
|
||||
}
|
||||
|
||||
fn countSplat(data: []const []const u8, splat: usize) usize {
|
||||
if (data.len == 0) return 0;
|
||||
var total: usize = 0;
|
||||
for (data[0 .. data.len - 1]) |buf| total += buf.len;
|
||||
total += data[data.len - 1].len * splat;
|
||||
return total;
|
||||
}
|
||||
|
||||
fn elideWriteFile(
|
||||
r: *Response,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
) WriteError!usize {
|
||||
if (offset != .none) {
|
||||
if (countWriteFile(limit, headers_and_trailers)) |n| {
|
||||
return n;
|
||||
}
|
||||
}
|
||||
r.err = error.UnableToElideBody;
|
||||
return error.WriteFailed;
|
||||
}
|
||||
|
||||
/// Returns `null` if size cannot be computed without making any syscalls.
|
||||
fn countWriteFile(limit: std.io.Writer.Limit, headers_and_trailers: []const []const u8) ?usize {
|
||||
var total: usize = limit.toInt() orelse return null;
|
||||
for (headers_and_trailers) |buf| total += buf.len;
|
||||
return total;
|
||||
}
|
||||
|
||||
fn noneWriteFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) std.io.Writer.FileError!usize {
|
||||
if (limit == .nothing) return noneWriteSplat(context, headers_and_trailers, 1);
|
||||
const r: *Response = @alignCast(@ptrCast(context));
|
||||
if (r.elide_body) return elideWriteFile(r, offset, limit, headers_and_trailers);
|
||||
return r.server_output.writeFile(file, offset, limit, headers_and_trailers, headers_len);
|
||||
}
|
||||
|
||||
fn contentLengthWriteFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) std.io.Writer.FileError!usize {
|
||||
if (limit == .nothing) return contentLengthWriteSplat(context, headers_and_trailers, 1);
|
||||
const r: *Response = @alignCast(@ptrCast(context));
|
||||
if (r.elide_body) return elideWriteFile(r, offset, limit, headers_and_trailers);
|
||||
const n = try r.server_output.writeFile(file, offset, limit, headers_and_trailers, headers_len);
|
||||
r.transfer_encoding.content_length -= n;
|
||||
return n;
|
||||
}
|
||||
|
||||
fn chunkedWriteFile(
|
||||
context: ?*anyopaque,
|
||||
file: std.fs.File,
|
||||
offset: std.io.Writer.Offset,
|
||||
limit: std.io.Writer.Limit,
|
||||
headers_and_trailers: []const []const u8,
|
||||
headers_len: usize,
|
||||
) std.io.Writer.FileError!usize {
|
||||
if (limit == .nothing) return chunkedWriteSplat(context, headers_and_trailers, 1);
|
||||
const r: *Response = @alignCast(@ptrCast(context));
|
||||
if (r.elide_body) return elideWriteFile(r, offset, limit, headers_and_trailers);
|
||||
const data_len = countWriteFile(limit, headers_and_trailers) orelse @panic("TODO");
|
||||
const bw = r.server_output;
|
||||
const chunked = &r.transfer_encoding.chunked;
|
||||
state: switch (chunked.*) {
|
||||
.offset => |off| {
|
||||
// TODO: is it better perf to read small files into the buffer?
|
||||
const buffered_len = bw.end - off - chunk_header_template.len;
|
||||
const chunk_len = data_len + buffered_len;
|
||||
writeHex(bw.buffer[off..][0..chunk_len_digits], chunk_len);
|
||||
const n = try bw.writeFile(file, offset, limit, headers_and_trailers, headers_len);
|
||||
chunked.* = .{ .chunk_len = data_len + 2 - n };
|
||||
return n;
|
||||
},
|
||||
.chunk_len => |chunk_len| l: switch (chunk_len) {
|
||||
0 => {
|
||||
const header_buf = try bw.writableArray(chunk_header_template.len);
|
||||
const off = bw.end;
|
||||
@memcpy(header_buf, chunk_header_template);
|
||||
chunked.* = .{ .offset = off };
|
||||
continue :state .{ .offset = off };
|
||||
},
|
||||
1 => {
|
||||
try bw.writeByte('\n');
|
||||
chunked.chunk_len = 0;
|
||||
continue :l 0;
|
||||
},
|
||||
2 => {
|
||||
try bw.writeByte('\r');
|
||||
chunked.chunk_len = 1;
|
||||
continue :l 1;
|
||||
},
|
||||
else => {
|
||||
const new_limit = limit.min(.limited(chunk_len - 2));
|
||||
const n = try bw.writeFile(file, offset, new_limit, headers_and_trailers, headers_len);
|
||||
chunked.chunk_len = chunk_len - n;
|
||||
return n;
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn chunkedWriteSplat(context: ?*anyopaque, data: []const []const u8, splat: usize) WriteError!usize {
|
||||
const r: *Response = @alignCast(@ptrCast(context));
|
||||
const data_len = countSplat(data, splat);
|
||||
if (r.elide_body) return data_len;
|
||||
|
||||
const bw = r.server_output;
|
||||
const chunked = &r.transfer_encoding.chunked;
|
||||
|
||||
state: switch (chunked.*) {
|
||||
.offset => |offset| {
|
||||
if (bw.unusedCapacitySlice().len >= data_len) {
|
||||
assert(data_len == (bw.writeSplat(data, splat) catch unreachable));
|
||||
return data_len;
|
||||
}
|
||||
const buffered_len = bw.end - offset - chunk_header_template.len;
|
||||
const chunk_len = data_len + buffered_len;
|
||||
writeHex(bw.buffer[offset..][0..chunk_len_digits], chunk_len);
|
||||
const n = try bw.writeSplat(data, splat);
|
||||
chunked.* = .{ .chunk_len = data_len + 2 - n };
|
||||
return n;
|
||||
},
|
||||
.chunk_len => |chunk_len| l: switch (chunk_len) {
|
||||
0 => {
|
||||
const header_buf = try bw.writableArray(chunk_header_template.len);
|
||||
const offset = bw.end;
|
||||
@memcpy(header_buf, chunk_header_template);
|
||||
chunked.* = .{ .offset = offset };
|
||||
continue :state .{ .offset = offset };
|
||||
},
|
||||
1 => {
|
||||
try bw.writeByte('\n');
|
||||
chunked.chunk_len = 0;
|
||||
continue :l 0;
|
||||
},
|
||||
2 => {
|
||||
try bw.writeByte('\r');
|
||||
chunked.chunk_len = 1;
|
||||
continue :l 1;
|
||||
},
|
||||
else => {
|
||||
const n = try bw.writeSplatLimit(data, splat, .limited(chunk_len - 2));
|
||||
chunked.chunk_len = chunk_len - n;
|
||||
return n;
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes an integer as base 16 to `buf`, right-aligned, assuming the
|
||||
/// buffer has already been filled with zeroes.
|
||||
fn writeHex(buf: []u8, x: usize) void {
|
||||
assert(std.mem.allEqual(u8, buf, '0'));
|
||||
const base = 16;
|
||||
var index: usize = buf.len;
|
||||
var a = x;
|
||||
while (a > 0) {
|
||||
const digit = a % base;
|
||||
index -= 1;
|
||||
buf[index] = std.fmt.digitToChar(@intCast(digit), .lower);
|
||||
a /= base;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writer(r: *Response) std.io.Writer {
|
||||
return .{
|
||||
.context = r,
|
||||
.vtable = switch (r.transfer_encoding) {
|
||||
.none => &.{
|
||||
.writeSplat = noneWriteSplat,
|
||||
.writeFile = noneWriteFile,
|
||||
},
|
||||
.content_length => &.{
|
||||
.writeSplat = contentLengthWriteSplat,
|
||||
.writeFile = contentLengthWriteFile,
|
||||
},
|
||||
.chunked => &.{
|
||||
.writeSplat = chunkedWriteSplat,
|
||||
.writeFile = chunkedWriteFile,
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
@ -10,7 +10,7 @@ key: []const u8,
|
||||
request: *std.http.Server.Request,
|
||||
recv_fifo: std.fifo.LinearFifo(u8, .Slice),
|
||||
reader: std.io.BufferedReader,
|
||||
response: std.http.Server.Response,
|
||||
body_writer: std.http.BodyWriter,
|
||||
/// Number of bytes that have been peeked but not discarded yet.
|
||||
outstanding_len: usize,
|
||||
|
||||
@ -58,7 +58,7 @@ pub fn init(
|
||||
.key = key,
|
||||
.recv_fifo = .init(recv_buffer),
|
||||
.reader = (try request.reader()).unbuffered(),
|
||||
.response = try request.respondStreaming(.{
|
||||
.body_writer = try request.respondStreaming(.{
|
||||
.respond_options = .{
|
||||
.status = .switching_protocols,
|
||||
.extra_headers = &.{
|
||||
@ -236,7 +236,7 @@ pub fn writeMessagev(ws: *WebSocket, message: []const std.posix.iovec_const, opc
|
||||
},
|
||||
};
|
||||
|
||||
var bw = ws.response.writer().unbuffered();
|
||||
var bw = ws.body_writer.interface().unbuffered();
|
||||
try bw.writeAll(header);
|
||||
for (message) |iovec| try bw.writeAll(iovec.base[0..iovec.len]);
|
||||
try bw.flush();
|
||||
|
||||
@ -1,449 +0,0 @@
|
||||
const std = @import("../std.zig");
|
||||
const builtin = @import("builtin");
|
||||
const testing = std.testing;
|
||||
const mem = std.mem;
|
||||
|
||||
const assert = std.debug.assert;
|
||||
|
||||
pub const State = enum {
|
||||
invalid,
|
||||
|
||||
// Begin header and trailer parsing states.
|
||||
|
||||
start,
|
||||
seen_n,
|
||||
seen_r,
|
||||
seen_rn,
|
||||
seen_rnr,
|
||||
finished,
|
||||
|
||||
// Begin transfer-encoding: chunked parsing states.
|
||||
|
||||
chunk_head_size,
|
||||
chunk_head_ext,
|
||||
chunk_head_r,
|
||||
chunk_data,
|
||||
chunk_data_suffix,
|
||||
chunk_data_suffix_r,
|
||||
|
||||
/// Returns true if the parser is in a content state (ie. not waiting for more headers).
|
||||
pub fn isContent(self: State) bool {
|
||||
return switch (self) {
|
||||
.invalid, .start, .seen_n, .seen_r, .seen_rn, .seen_rnr => false,
|
||||
.finished, .chunk_head_size, .chunk_head_ext, .chunk_head_r, .chunk_data, .chunk_data_suffix, .chunk_data_suffix_r => true,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const HeadersParser = struct {
|
||||
state: State = .start,
|
||||
/// A fixed buffer of len `max_header_bytes`.
|
||||
/// Pointers into this buffer are not stable until after a message is complete.
|
||||
header_bytes_buffer: []u8,
|
||||
header_bytes_len: u32,
|
||||
next_chunk_length: u64,
|
||||
/// `false`: headers. `true`: trailers.
|
||||
done: bool,
|
||||
|
||||
/// Initializes the parser with a provided buffer `buf`.
|
||||
pub fn init(buf: []u8) HeadersParser {
|
||||
return .{
|
||||
.header_bytes_buffer = buf,
|
||||
.header_bytes_len = 0,
|
||||
.done = false,
|
||||
.next_chunk_length = 0,
|
||||
};
|
||||
}
|
||||
|
||||
/// Reinitialize the parser.
|
||||
/// Asserts the parser is in the "done" state.
|
||||
pub fn reset(hp: *HeadersParser) void {
|
||||
assert(hp.done);
|
||||
hp.* = .{
|
||||
.state = .start,
|
||||
.header_bytes_buffer = hp.header_bytes_buffer,
|
||||
.header_bytes_len = 0,
|
||||
.done = false,
|
||||
.next_chunk_length = 0,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn get(hp: HeadersParser) []u8 {
|
||||
return hp.header_bytes_buffer[0..hp.header_bytes_len];
|
||||
}
|
||||
|
||||
pub fn findHeadersEnd(r: *HeadersParser, bytes: []const u8) u32 {
|
||||
var hp: std.http.HeadParser = .{
|
||||
.state = switch (r.state) {
|
||||
.start => .start,
|
||||
.seen_n => .seen_n,
|
||||
.seen_r => .seen_r,
|
||||
.seen_rn => .seen_rn,
|
||||
.seen_rnr => .seen_rnr,
|
||||
.finished => .finished,
|
||||
else => unreachable,
|
||||
},
|
||||
};
|
||||
const result = hp.feed(bytes);
|
||||
r.state = switch (hp.state) {
|
||||
.start => .start,
|
||||
.seen_n => .seen_n,
|
||||
.seen_r => .seen_r,
|
||||
.seen_rn => .seen_rn,
|
||||
.seen_rnr => .seen_rnr,
|
||||
.finished => .finished,
|
||||
};
|
||||
return @intCast(result);
|
||||
}
|
||||
|
||||
pub fn findChunkedLen(r: *HeadersParser, bytes: []const u8) u32 {
|
||||
var cp: std.http.ChunkParser = .{
|
||||
.state = switch (r.state) {
|
||||
.chunk_head_size => .head_size,
|
||||
.chunk_head_ext => .head_ext,
|
||||
.chunk_head_r => .head_r,
|
||||
.chunk_data => .data,
|
||||
.chunk_data_suffix => .data_suffix,
|
||||
.chunk_data_suffix_r => .data_suffix_r,
|
||||
.invalid => .invalid,
|
||||
else => unreachable,
|
||||
},
|
||||
.chunk_len = r.next_chunk_length,
|
||||
};
|
||||
const result = cp.feed(bytes);
|
||||
r.state = switch (cp.state) {
|
||||
.head_size => .chunk_head_size,
|
||||
.head_ext => .chunk_head_ext,
|
||||
.head_r => .chunk_head_r,
|
||||
.data => .chunk_data,
|
||||
.data_suffix => .chunk_data_suffix,
|
||||
.data_suffix_r => .chunk_data_suffix_r,
|
||||
.invalid => .invalid,
|
||||
};
|
||||
r.next_chunk_length = cp.chunk_len;
|
||||
return @intCast(result);
|
||||
}
|
||||
|
||||
/// Returns whether or not the parser has finished parsing a complete
|
||||
/// message. A message is only complete after the entire body has been read
|
||||
/// and any trailing headers have been parsed.
|
||||
pub fn isComplete(r: *HeadersParser) bool {
|
||||
return r.done and r.state == .finished;
|
||||
}
|
||||
|
||||
pub const CheckCompleteHeadError = error{HttpHeadersOversize};
|
||||
|
||||
/// Pushes `in` into the parser. Returns the number of bytes consumed by
|
||||
/// the header. Any header bytes are appended to `header_bytes_buffer`.
|
||||
pub fn checkCompleteHead(hp: *HeadersParser, in: []const u8) CheckCompleteHeadError!u32 {
|
||||
if (hp.state.isContent()) return 0;
|
||||
|
||||
const i = hp.findHeadersEnd(in);
|
||||
const data = in[0..i];
|
||||
if (hp.header_bytes_len + data.len > hp.header_bytes_buffer.len)
|
||||
return error.HttpHeadersOversize;
|
||||
|
||||
@memcpy(hp.header_bytes_buffer[hp.header_bytes_len..][0..data.len], data);
|
||||
hp.header_bytes_len += @intCast(data.len);
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
pub const ReadError = error{
|
||||
HttpChunkInvalid,
|
||||
};
|
||||
|
||||
/// Reads the body of the message into `buffer`. Returns the number of
|
||||
/// bytes placed in the buffer.
|
||||
///
|
||||
/// If `skip` is true, the buffer will be unused and the body will be skipped.
|
||||
///
|
||||
/// See `std.http.Client.Connection for an example of `conn`.
|
||||
pub fn read(r: *HeadersParser, conn: anytype, buffer: []u8, skip: bool) !usize {
|
||||
assert(r.state.isContent());
|
||||
if (r.done) return 0;
|
||||
|
||||
var out_index: usize = 0;
|
||||
while (true) {
|
||||
switch (r.state) {
|
||||
.invalid, .start, .seen_n, .seen_r, .seen_rn, .seen_rnr => unreachable,
|
||||
.finished => {
|
||||
const data_avail = r.next_chunk_length;
|
||||
|
||||
if (skip) {
|
||||
conn.fill() catch |err| switch (err) {
|
||||
error.EndOfStream => {
|
||||
r.done = true;
|
||||
return 0;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
const nread = @min(conn.peek().len, data_avail);
|
||||
conn.drop(@intCast(nread));
|
||||
r.next_chunk_length -= nread;
|
||||
|
||||
if (r.next_chunk_length == 0 or nread == 0) r.done = true;
|
||||
|
||||
return out_index;
|
||||
} else if (out_index < buffer.len) {
|
||||
const out_avail = buffer.len - out_index;
|
||||
|
||||
const can_read = @as(usize, @intCast(@min(data_avail, out_avail)));
|
||||
const nread = try conn.read(buffer[0..can_read]);
|
||||
r.next_chunk_length -= nread;
|
||||
|
||||
if (r.next_chunk_length == 0 or nread == 0) r.done = true;
|
||||
|
||||
return nread;
|
||||
} else {
|
||||
return out_index;
|
||||
}
|
||||
},
|
||||
.chunk_data_suffix, .chunk_data_suffix_r, .chunk_head_size, .chunk_head_ext, .chunk_head_r => {
|
||||
conn.fill() catch |err| switch (err) {
|
||||
error.EndOfStream => {
|
||||
r.done = true;
|
||||
return 0;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
const i = r.findChunkedLen(conn.peek());
|
||||
conn.drop(@intCast(i));
|
||||
|
||||
switch (r.state) {
|
||||
.invalid => return error.HttpChunkInvalid,
|
||||
.chunk_data => if (r.next_chunk_length == 0) {
|
||||
if (std.mem.eql(u8, conn.peek(), "\r\n")) {
|
||||
r.state = .finished;
|
||||
conn.drop(2);
|
||||
} else {
|
||||
// The trailer section is formatted identically
|
||||
// to the header section.
|
||||
r.state = .seen_rn;
|
||||
}
|
||||
r.done = true;
|
||||
|
||||
return out_index;
|
||||
},
|
||||
else => return out_index,
|
||||
}
|
||||
|
||||
continue;
|
||||
},
|
||||
.chunk_data => {
|
||||
const data_avail = r.next_chunk_length;
|
||||
const out_avail = buffer.len - out_index;
|
||||
|
||||
if (skip) {
|
||||
conn.fill() catch |err| switch (err) {
|
||||
error.EndOfStream => {
|
||||
r.done = true;
|
||||
return 0;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
const nread = @min(conn.peek().len, data_avail);
|
||||
conn.drop(@intCast(nread));
|
||||
r.next_chunk_length -= nread;
|
||||
} else if (out_avail > 0) {
|
||||
const can_read: usize = @intCast(@min(data_avail, out_avail));
|
||||
const nread = try conn.read(buffer[out_index..][0..can_read]);
|
||||
r.next_chunk_length -= nread;
|
||||
out_index += nread;
|
||||
}
|
||||
|
||||
if (r.next_chunk_length == 0) {
|
||||
r.state = .chunk_data_suffix;
|
||||
continue;
|
||||
}
|
||||
|
||||
return out_index;
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
inline fn int16(array: *const [2]u8) u16 {
|
||||
return @as(u16, @bitCast(array.*));
|
||||
}
|
||||
|
||||
inline fn int24(array: *const [3]u8) u24 {
|
||||
return @as(u24, @bitCast(array.*));
|
||||
}
|
||||
|
||||
inline fn int32(array: *const [4]u8) u32 {
|
||||
return @as(u32, @bitCast(array.*));
|
||||
}
|
||||
|
||||
inline fn intShift(comptime T: type, x: anytype) T {
|
||||
switch (@import("builtin").cpu.arch.endian()) {
|
||||
.little => return @as(T, @truncate(x >> (@bitSizeOf(@TypeOf(x)) - @bitSizeOf(T)))),
|
||||
.big => return @as(T, @truncate(x)),
|
||||
}
|
||||
}
|
||||
|
||||
/// A buffered (and peekable) Connection.
|
||||
const MockBufferedConnection = struct {
|
||||
pub const buffer_size = 0x2000;
|
||||
|
||||
conn: std.io.FixedBufferStream,
|
||||
buf: [buffer_size]u8 = undefined,
|
||||
start: u16 = 0,
|
||||
end: u16 = 0,
|
||||
|
||||
pub fn fill(conn: *MockBufferedConnection) ReadError!void {
|
||||
if (conn.end != conn.start) return;
|
||||
|
||||
const nread = try conn.conn.read(conn.buf[0..]);
|
||||
if (nread == 0) return error.EndOfStream;
|
||||
conn.start = 0;
|
||||
conn.end = @as(u16, @truncate(nread));
|
||||
}
|
||||
|
||||
pub fn peek(conn: *MockBufferedConnection) []const u8 {
|
||||
return conn.buf[conn.start..conn.end];
|
||||
}
|
||||
|
||||
pub fn drop(conn: *MockBufferedConnection, num: u16) void {
|
||||
conn.start += num;
|
||||
}
|
||||
|
||||
pub fn readAtLeast(conn: *MockBufferedConnection, buffer: []u8, len: usize) ReadError!usize {
|
||||
var out_index: u16 = 0;
|
||||
while (out_index < len) {
|
||||
const available = conn.end - conn.start;
|
||||
const left = buffer.len - out_index;
|
||||
|
||||
if (available > 0) {
|
||||
const can_read = @as(u16, @truncate(@min(available, left)));
|
||||
|
||||
@memcpy(buffer[out_index..][0..can_read], conn.buf[conn.start..][0..can_read]);
|
||||
out_index += can_read;
|
||||
conn.start += can_read;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
if (left > conn.buf.len) {
|
||||
// skip the buffer if the output is large enough
|
||||
return conn.conn.read(buffer[out_index..]);
|
||||
}
|
||||
|
||||
try conn.fill();
|
||||
}
|
||||
|
||||
return out_index;
|
||||
}
|
||||
|
||||
pub fn read(conn: *MockBufferedConnection, buffer: []u8) ReadError!usize {
|
||||
return conn.readAtLeast(buffer, 1);
|
||||
}
|
||||
|
||||
pub const ReadError = std.io.FixedBufferStream.ReadError || error{EndOfStream};
|
||||
pub const Reader = std.io.Reader(*MockBufferedConnection, ReadError, read);
|
||||
|
||||
pub fn reader(conn: *MockBufferedConnection) Reader {
|
||||
return Reader{ .context = conn };
|
||||
}
|
||||
};
|
||||
|
||||
test "HeadersParser.read length" {
|
||||
// mock BufferedConnection for read
|
||||
var headers_buf: [256]u8 = undefined;
|
||||
|
||||
var r = HeadersParser.init(&headers_buf);
|
||||
const data = "GET / HTTP/1.1\r\nHost: localhost\r\nContent-Length: 5\r\n\r\nHello";
|
||||
|
||||
var conn: MockBufferedConnection = .{
|
||||
.conn = .{ .buffer = data },
|
||||
};
|
||||
|
||||
while (true) { // read headers
|
||||
try conn.fill();
|
||||
|
||||
const nchecked = try r.checkCompleteHead(conn.peek());
|
||||
conn.drop(@intCast(nchecked));
|
||||
|
||||
if (r.state.isContent()) break;
|
||||
}
|
||||
|
||||
var buf: [8]u8 = undefined;
|
||||
|
||||
r.next_chunk_length = 5;
|
||||
const len = try r.read(&conn, &buf, false);
|
||||
try std.testing.expectEqual(@as(usize, 5), len);
|
||||
try std.testing.expectEqualStrings("Hello", buf[0..len]);
|
||||
|
||||
try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\nContent-Length: 5\r\n\r\n", r.get());
|
||||
}
|
||||
|
||||
test "HeadersParser.read chunked" {
|
||||
// mock BufferedConnection for read
|
||||
|
||||
var headers_buf: [256]u8 = undefined;
|
||||
var r = HeadersParser.init(&headers_buf);
|
||||
const data = "GET / HTTP/1.1\r\nHost: localhost\r\n\r\n2\r\nHe\r\n2\r\nll\r\n1\r\no\r\n0\r\n\r\n";
|
||||
|
||||
var conn: MockBufferedConnection = .{
|
||||
.conn = .{ .buffer = data },
|
||||
};
|
||||
|
||||
while (true) { // read headers
|
||||
try conn.fill();
|
||||
|
||||
const nchecked = try r.checkCompleteHead(conn.peek());
|
||||
conn.drop(@intCast(nchecked));
|
||||
|
||||
if (r.state.isContent()) break;
|
||||
}
|
||||
var buf: [8]u8 = undefined;
|
||||
|
||||
r.state = .chunk_head_size;
|
||||
const len = try r.read(&conn, &buf, false);
|
||||
try std.testing.expectEqual(@as(usize, 5), len);
|
||||
try std.testing.expectEqualStrings("Hello", buf[0..len]);
|
||||
|
||||
try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\n\r\n", r.get());
|
||||
}
|
||||
|
||||
test "HeadersParser.read chunked trailer" {
|
||||
// mock BufferedConnection for read
|
||||
|
||||
var headers_buf: [256]u8 = undefined;
|
||||
var r = HeadersParser.init(&headers_buf);
|
||||
const data = "GET / HTTP/1.1\r\nHost: localhost\r\n\r\n2\r\nHe\r\n2\r\nll\r\n1\r\no\r\n0\r\nContent-Type: text/plain\r\n\r\n";
|
||||
|
||||
var conn: MockBufferedConnection = .{
|
||||
.conn = .{ .buffer = data },
|
||||
};
|
||||
|
||||
while (true) { // read headers
|
||||
try conn.fill();
|
||||
|
||||
const nchecked = try r.checkCompleteHead(conn.peek());
|
||||
conn.drop(@intCast(nchecked));
|
||||
|
||||
if (r.state.isContent()) break;
|
||||
}
|
||||
var buf: [8]u8 = undefined;
|
||||
|
||||
r.state = .chunk_head_size;
|
||||
const len = try r.read(&conn, &buf, false);
|
||||
try std.testing.expectEqual(@as(usize, 5), len);
|
||||
try std.testing.expectEqualStrings("Hello", buf[0..len]);
|
||||
|
||||
while (true) { // read headers
|
||||
try conn.fill();
|
||||
|
||||
const nchecked = try r.checkCompleteHead(conn.peek());
|
||||
conn.drop(@intCast(nchecked));
|
||||
|
||||
if (r.state.isContent()) break;
|
||||
}
|
||||
|
||||
try std.testing.expectEqualStrings("GET / HTTP/1.1\r\nHost: localhost\r\n\r\nContent-Type: text/plain\r\n\r\n", r.get());
|
||||
}
|
||||
@ -61,21 +61,18 @@ test "trailers" {
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
{
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
});
|
||||
var req = try client.open(.GET, uri, .{});
|
||||
defer req.deinit();
|
||||
|
||||
try req.send();
|
||||
try req.wait();
|
||||
try req.sendBodiless();
|
||||
var response = try req.receiveHead(&.{});
|
||||
|
||||
const body = try req.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
const body = try response.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
defer gpa.free(body);
|
||||
|
||||
try expectEqualStrings("Hello, World!\n", body);
|
||||
|
||||
var it = req.response.iterateHeaders();
|
||||
var it = response.iterateHeaders();
|
||||
{
|
||||
const header = it.next().?;
|
||||
try expect(!it.is_trailer);
|
||||
@ -565,20 +562,18 @@ test "general client/server API coverage" {
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
log.info("{s}", .{location});
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
});
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{});
|
||||
defer req.deinit();
|
||||
|
||||
try req.send();
|
||||
try req.wait();
|
||||
try req.sendBodiless();
|
||||
var response = try req.receiveHead(&redirect_buffer);
|
||||
|
||||
const body = try req.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
const body = try response.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
defer gpa.free(body);
|
||||
|
||||
try expectEqualStrings("Hello, World!\n", body);
|
||||
try expectEqualStrings("text/plain", req.response.content_type.?);
|
||||
try expectEqualStrings("text/plain", response.head.content_type.?);
|
||||
}
|
||||
|
||||
// connection has been kept alive
|
||||
@ -590,16 +585,14 @@ test "general client/server API coverage" {
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
log.info("{s}", .{location});
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
});
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{});
|
||||
defer req.deinit();
|
||||
|
||||
try req.send();
|
||||
try req.wait();
|
||||
try req.sendBodiless();
|
||||
var response = try req.receiveHead(&redirect_buffer);
|
||||
|
||||
const body = try req.reader().readRemainingAlloc(gpa, .limited(8192 * 1024));
|
||||
const body = try response.reader().readRemainingAlloc(gpa, .limited(8192 * 1024));
|
||||
defer gpa.free(body);
|
||||
|
||||
try expectEqual(@as(usize, 14 * 1024 + 14 * 10), body.len);
|
||||
@ -614,21 +607,19 @@ test "general client/server API coverage" {
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
log.info("{s}", .{location});
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.HEAD, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
});
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.HEAD, uri, .{});
|
||||
defer req.deinit();
|
||||
|
||||
try req.send();
|
||||
try req.wait();
|
||||
try req.sendBodiless();
|
||||
var response = try req.receiveHead(&redirect_buffer);
|
||||
|
||||
const body = try req.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
const body = try response.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
defer gpa.free(body);
|
||||
|
||||
try expectEqualStrings("", body);
|
||||
try expectEqualStrings("text/plain", req.response.content_type.?);
|
||||
try expectEqual(14, req.response.content_length.?);
|
||||
try expectEqualStrings("text/plain", response.content_type.?);
|
||||
try expectEqual(14, response.head.content_length.?);
|
||||
}
|
||||
|
||||
// connection has been kept alive
|
||||
@ -640,20 +631,18 @@ test "general client/server API coverage" {
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
log.info("{s}", .{location});
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
});
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{});
|
||||
defer req.deinit();
|
||||
|
||||
try req.send();
|
||||
try req.wait();
|
||||
try req.sendBodiless();
|
||||
var response = try req.receiveHead(&redirect_buffer);
|
||||
|
||||
const body = try req.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
const body = try response.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
defer gpa.free(body);
|
||||
|
||||
try expectEqualStrings("Hello, World!\n", body);
|
||||
try expectEqualStrings("text/plain", req.response.content_type.?);
|
||||
try expectEqualStrings("text/plain", response.head.content_type.?);
|
||||
}
|
||||
|
||||
// connection has been kept alive
|
||||
@ -665,14 +654,12 @@ test "general client/server API coverage" {
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
log.info("{s}", .{location});
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.HEAD, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
});
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.HEAD, uri, .{});
|
||||
defer req.deinit();
|
||||
|
||||
try req.send();
|
||||
try req.wait();
|
||||
try req.sendBodiless();
|
||||
try req.receiveHead(&redirect_buffer);
|
||||
|
||||
const body = try req.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
defer gpa.free(body);
|
||||
@ -691,15 +678,14 @@ test "general client/server API coverage" {
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
log.info("{s}", .{location});
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
.keep_alive = false,
|
||||
});
|
||||
defer req.deinit();
|
||||
|
||||
try req.send();
|
||||
try req.wait();
|
||||
try req.sendBodiless();
|
||||
try req.receiveHead(&redirect_buffer);
|
||||
|
||||
const body = try req.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
defer gpa.free(body);
|
||||
@ -717,17 +703,16 @@ test "general client/server API coverage" {
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
log.info("{s}", .{location});
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
.extra_headers = &.{
|
||||
.{ .name = "empty", .value = "" },
|
||||
},
|
||||
});
|
||||
defer req.deinit();
|
||||
|
||||
try req.send();
|
||||
try req.wait();
|
||||
try req.sendBodiless();
|
||||
try req.receiveHead(&redirect_buffer);
|
||||
|
||||
try std.testing.expectEqual(.ok, req.response.status);
|
||||
|
||||
@ -761,14 +746,12 @@ test "general client/server API coverage" {
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
log.info("{s}", .{location});
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
});
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{});
|
||||
defer req.deinit();
|
||||
|
||||
try req.send();
|
||||
try req.wait();
|
||||
try req.sendBodiless();
|
||||
try req.receiveHead(&redirect_buffer);
|
||||
|
||||
const body = try req.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
defer gpa.free(body);
|
||||
@ -785,14 +768,12 @@ test "general client/server API coverage" {
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
log.info("{s}", .{location});
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
});
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{});
|
||||
defer req.deinit();
|
||||
|
||||
try req.send();
|
||||
try req.wait();
|
||||
try req.sendBodiless();
|
||||
try req.receiveHead(&redirect_buffer);
|
||||
|
||||
const body = try req.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
defer gpa.free(body);
|
||||
@ -809,14 +790,12 @@ test "general client/server API coverage" {
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
log.info("{s}", .{location});
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
});
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{});
|
||||
defer req.deinit();
|
||||
|
||||
try req.send();
|
||||
try req.wait();
|
||||
try req.sendBodiless();
|
||||
try req.receiveHead(&redirect_buffer);
|
||||
|
||||
const body = try req.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
defer gpa.free(body);
|
||||
@ -833,14 +812,12 @@ test "general client/server API coverage" {
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
log.info("{s}", .{location});
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
});
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{});
|
||||
defer req.deinit();
|
||||
|
||||
try req.send();
|
||||
req.wait() catch |err| switch (err) {
|
||||
try req.sendBodiless();
|
||||
req.receiveHead(&redirect_buffer) catch |err| switch (err) {
|
||||
error.TooManyHttpRedirects => {},
|
||||
else => return err,
|
||||
};
|
||||
@ -852,14 +829,12 @@ test "general client/server API coverage" {
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
log.info("{s}", .{location});
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
});
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{});
|
||||
defer req.deinit();
|
||||
|
||||
try req.send();
|
||||
try req.wait();
|
||||
try req.sendBodiless();
|
||||
try req.receiveHead(&redirect_buffer);
|
||||
|
||||
const body = try req.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
defer gpa.free(body);
|
||||
@ -876,14 +851,12 @@ test "general client/server API coverage" {
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
log.info("{s}", .{location});
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
});
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{});
|
||||
defer req.deinit();
|
||||
|
||||
try req.send();
|
||||
const result = req.wait();
|
||||
try req.sendBodiless();
|
||||
const result = req.receiveHead(&redirect_buffer);
|
||||
|
||||
// a proxy without an upstream is likely to return a 5xx status.
|
||||
if (client.http_proxy == null) {
|
||||
@ -910,9 +883,7 @@ test "general client/server API coverage" {
|
||||
for (0..total_connections) |i| {
|
||||
const headers_buf = try gpa.alloc(u8, 1024);
|
||||
try header_bufs.append(headers_buf);
|
||||
var req = try client.open(.GET, uri, .{
|
||||
.server_header_buffer = headers_buf,
|
||||
});
|
||||
var req = try client.open(.GET, uri, .{});
|
||||
req.response.parser.done = true;
|
||||
req.connection.?.closing = false;
|
||||
requests[i] = req;
|
||||
@ -978,28 +949,26 @@ test "Server streams both reading and writing" {
|
||||
var client: http.Client = .{ .allocator = std.testing.allocator };
|
||||
defer client.deinit();
|
||||
|
||||
var server_header_buffer: [555]u8 = undefined;
|
||||
var redirect_buffer: [555]u8 = undefined;
|
||||
var req = try client.open(.POST, .{
|
||||
.scheme = "http",
|
||||
.host = .{ .raw = "127.0.0.1" },
|
||||
.port = test_server.port(),
|
||||
.path = .{ .percent_encoded = "/" },
|
||||
}, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
});
|
||||
}, .{});
|
||||
defer req.deinit();
|
||||
|
||||
req.transfer_encoding = .chunked;
|
||||
try req.send();
|
||||
try req.wait();
|
||||
var body_writer = try req.sendBody();
|
||||
var response = try req.receiveHead(&redirect_buffer);
|
||||
|
||||
var w = req.writer().unbuffered();
|
||||
var w = body_writer.interface().unbuffered();
|
||||
try w.writeAll("one ");
|
||||
try w.writeAll("fish");
|
||||
|
||||
try req.finish();
|
||||
|
||||
const body = try req.reader().readRemainingAlloc(std.testing.allocator, .limited(8192));
|
||||
const body = try response.reader().readRemainingAlloc(std.testing.allocator, .limited(8192));
|
||||
defer std.testing.allocator.free(body);
|
||||
|
||||
try expectEqualStrings("ONE FISH", body);
|
||||
@ -1014,9 +983,8 @@ fn echoTests(client: *http.Client, port: u16) !void {
|
||||
defer gpa.free(location);
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.POST, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
.extra_headers = &.{
|
||||
.{ .name = "content-type", .value = "text/plain" },
|
||||
},
|
||||
@ -1025,15 +993,15 @@ fn echoTests(client: *http.Client, port: u16) !void {
|
||||
|
||||
req.transfer_encoding = .{ .content_length = 14 };
|
||||
|
||||
try req.send();
|
||||
var w = req.writer().unbuffered();
|
||||
var body_writer = try req.sendBody();
|
||||
var w = body_writer.interface().unbuffered();
|
||||
try w.writeAll("Hello, ");
|
||||
try w.writeAll("World!\n");
|
||||
try req.finish();
|
||||
try body_writer.end();
|
||||
|
||||
try req.wait();
|
||||
var response = try req.receiveHead(&redirect_buffer);
|
||||
|
||||
const body = try req.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
const body = try response.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
defer gpa.free(body);
|
||||
|
||||
try expectEqualStrings("Hello, World!\n", body);
|
||||
@ -1049,9 +1017,8 @@ fn echoTests(client: *http.Client, port: u16) !void {
|
||||
.{port},
|
||||
));
|
||||
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.POST, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
.extra_headers = &.{
|
||||
.{ .name = "content-type", .value = "text/plain" },
|
||||
},
|
||||
@ -1060,15 +1027,15 @@ fn echoTests(client: *http.Client, port: u16) !void {
|
||||
|
||||
req.transfer_encoding = .chunked;
|
||||
|
||||
try req.send();
|
||||
var w = req.writer().unbuffered();
|
||||
var body_writer = try req.sendBody();
|
||||
var w = body_writer.interface().unbuffered();
|
||||
try w.writeAll("Hello, ");
|
||||
try w.writeAll("World!\n");
|
||||
try req.finish();
|
||||
try body_writer.end();
|
||||
|
||||
try req.wait();
|
||||
var response = try req.receiveHead(&redirect_buffer);
|
||||
|
||||
const body = try req.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
const body = try response.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
defer gpa.free(body);
|
||||
|
||||
try expectEqualStrings("Hello, World!\n", body);
|
||||
@ -1103,9 +1070,8 @@ fn echoTests(client: *http.Client, port: u16) !void {
|
||||
defer gpa.free(location);
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.POST, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
.extra_headers = &.{
|
||||
.{ .name = "expect", .value = "100-continue" },
|
||||
.{ .name = "content-type", .value = "text/plain" },
|
||||
@ -1115,16 +1081,16 @@ fn echoTests(client: *http.Client, port: u16) !void {
|
||||
|
||||
req.transfer_encoding = .chunked;
|
||||
|
||||
try req.send();
|
||||
var w = req.writer().unbuffered();
|
||||
var body_writer = try req.sendBody();
|
||||
var w = body_writer.interface().unbuffered();
|
||||
try w.writeAll("Hello, ");
|
||||
try w.writeAll("World!\n");
|
||||
try req.finish();
|
||||
try body_writer.end();
|
||||
|
||||
try req.wait();
|
||||
try expectEqual(.ok, req.response.status);
|
||||
var response = try req.receiveHead(&redirect_buffer);
|
||||
try expectEqual(.ok, response.head.status);
|
||||
|
||||
const body = try req.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
const body = try response.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
defer gpa.free(body);
|
||||
|
||||
try expectEqualStrings("Hello, World!\n", body);
|
||||
@ -1135,9 +1101,8 @@ fn echoTests(client: *http.Client, port: u16) !void {
|
||||
defer gpa.free(location);
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
var server_header_buffer: [1024]u8 = undefined;
|
||||
var redirect_buffer: [1024]u8 = undefined;
|
||||
var req = try client.open(.POST, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
.extra_headers = &.{
|
||||
.{ .name = "content-type", .value = "text/plain" },
|
||||
.{ .name = "expect", .value = "garbage" },
|
||||
@ -1147,9 +1112,11 @@ fn echoTests(client: *http.Client, port: u16) !void {
|
||||
|
||||
req.transfer_encoding = .chunked;
|
||||
|
||||
try req.send();
|
||||
try req.wait();
|
||||
try expectEqual(.expectation_failed, req.response.status);
|
||||
var body_writer = try req.sendBody();
|
||||
try body_writer.flush();
|
||||
var response = try req.receiveHead(&redirect_buffer);
|
||||
try expectEqual(.expectation_failed, response.head.status);
|
||||
_ = try response.reader().discardRemaining();
|
||||
}
|
||||
|
||||
_ = try client.fetch(.{
|
||||
@ -1255,16 +1222,14 @@ test "redirect to different connection" {
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
{
|
||||
var server_header_buffer: [666]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{
|
||||
.server_header_buffer = &server_header_buffer,
|
||||
});
|
||||
var redirect_buffer: [666]u8 = undefined;
|
||||
var req = try client.open(.GET, uri, .{});
|
||||
defer req.deinit();
|
||||
|
||||
try req.send();
|
||||
try req.wait();
|
||||
try req.sendBodiless();
|
||||
var response = try req.receiveHead(&redirect_buffer);
|
||||
|
||||
const body = try req.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
const body = try response.reader().readRemainingAlloc(gpa, .limited(8192));
|
||||
defer gpa.free(body);
|
||||
|
||||
try expectEqualStrings("good job, you pass", body);
|
||||
|
||||
@ -919,6 +919,40 @@ fn takeMultipleOf7Leb128(br: *BufferedReader, comptime Result: type) TakeLeb128E
|
||||
}
|
||||
}
|
||||
|
||||
/// Left-aligns data such that `br.seek` becomes zero.
|
||||
pub fn rebase(br: *BufferedReader) void {
|
||||
const data = br.buffer[br.seek..br.end];
|
||||
const dest = br.buffer[0..data.len];
|
||||
std.mem.copyForwards(u8, dest, data);
|
||||
br.seek = 0;
|
||||
br.end = data.len;
|
||||
}
|
||||
|
||||
/// Advances the stream and decreases the size of the storage buffer by `n`,
|
||||
/// returning the range of bytes no longer accessible by `br`.
|
||||
///
|
||||
/// This action can be undone by `restitute`.
|
||||
///
|
||||
/// Asserts there are at least `n` buffered bytes already.
|
||||
///
|
||||
/// Asserts that `br.seek` is zero, i.e. the buffer is in a rebased state.
|
||||
pub fn steal(br: *BufferedReader, n: usize) []u8 {
|
||||
assert(br.seek == 0);
|
||||
assert(n <= br.end);
|
||||
const stolen = br.buffer[0..n];
|
||||
br.buffer = br.buffer[n..];
|
||||
br.end -= n;
|
||||
return stolen;
|
||||
}
|
||||
|
||||
/// Expands the storage buffer, undoing the effects of `steal`
|
||||
/// Assumes that `n` does not exceed the total number of stolen bytes.
|
||||
pub fn restitute(br: *BufferedReader, n: usize) void {
|
||||
br.buffer = (br.buffer.ptr - n)[0 .. br.buffer.len + n];
|
||||
br.end += n;
|
||||
br.seek += n;
|
||||
}
|
||||
|
||||
test initFixed {
|
||||
var br: BufferedReader = undefined;
|
||||
br.initFixed("a\x02");
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user