mirror of
https://github.com/ziglang/zig.git
synced 2026-01-04 04:25:05 +00:00
Merge pull request #16929 from truemedian/more-http
std.http: handle Expect: 100-continue, improve redirect logic, add Client.fetch for simple requests
This commit is contained in:
commit
f40f81cbfb
@ -1,3 +1,5 @@
|
||||
const std = @import("std.zig");
|
||||
|
||||
pub const Client = @import("http/Client.zig");
|
||||
pub const Server = @import("http/Server.zig");
|
||||
pub const protocol = @import("http/protocol.zig");
|
||||
@ -14,16 +16,36 @@ pub const Version = enum {
|
||||
/// https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods
|
||||
/// https://datatracker.ietf.org/doc/html/rfc7231#section-4 Initial definition
|
||||
/// https://datatracker.ietf.org/doc/html/rfc5789#section-2 PATCH
|
||||
pub const Method = enum {
|
||||
GET,
|
||||
HEAD,
|
||||
POST,
|
||||
PUT,
|
||||
DELETE,
|
||||
CONNECT,
|
||||
OPTIONS,
|
||||
TRACE,
|
||||
PATCH,
|
||||
pub const Method = enum(u64) { // TODO: should be u192 or u256, but neither is supported by the C backend, and therefore cannot pass CI
|
||||
GET = parse("GET"),
|
||||
HEAD = parse("HEAD"),
|
||||
POST = parse("POST"),
|
||||
PUT = parse("PUT"),
|
||||
DELETE = parse("DELETE"),
|
||||
CONNECT = parse("CONNECT"),
|
||||
OPTIONS = parse("OPTIONS"),
|
||||
TRACE = parse("TRACE"),
|
||||
PATCH = parse("PATCH"),
|
||||
|
||||
_,
|
||||
|
||||
/// Converts `s` into a type that may be used as a `Method` field.
|
||||
/// Asserts that `s` is 24 or fewer bytes.
|
||||
pub fn parse(s: []const u8) u64 {
|
||||
var x: u64 = 0;
|
||||
@memcpy(std.mem.asBytes(&x)[0..s.len], s);
|
||||
return x;
|
||||
}
|
||||
|
||||
pub fn write(self: Method, w: anytype) !void {
|
||||
const bytes = std.mem.asBytes(&@intFromEnum(self));
|
||||
const str = std.mem.sliceTo(bytes, 0);
|
||||
try w.writeAll(str);
|
||||
}
|
||||
|
||||
pub fn format(value: Method, comptime _: []const u8, _: std.fmt.FormatOptions, writer: anytype) @TypeOf(writer).Error!void {
|
||||
return try value.write(writer);
|
||||
}
|
||||
|
||||
/// Returns true if a request of this method is allowed to have a body
|
||||
/// Actual behavior from servers may vary and should still be checked
|
||||
@ -31,6 +53,7 @@ pub const Method = enum {
|
||||
return switch (self) {
|
||||
.POST, .PUT, .PATCH => true,
|
||||
.GET, .HEAD, .DELETE, .CONNECT, .OPTIONS, .TRACE => false,
|
||||
else => true,
|
||||
};
|
||||
}
|
||||
|
||||
@ -40,6 +63,7 @@ pub const Method = enum {
|
||||
return switch (self) {
|
||||
.GET, .POST, .DELETE, .CONNECT, .OPTIONS, .PATCH => true,
|
||||
.HEAD, .PUT, .TRACE => false,
|
||||
else => true,
|
||||
};
|
||||
}
|
||||
|
||||
@ -50,6 +74,7 @@ pub const Method = enum {
|
||||
return switch (self) {
|
||||
.GET, .HEAD, .OPTIONS, .TRACE => true,
|
||||
.POST, .PUT, .DELETE, .CONNECT, .PATCH => false,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
@ -60,6 +85,7 @@ pub const Method = enum {
|
||||
return switch (self) {
|
||||
.GET, .HEAD, .PUT, .DELETE, .OPTIONS, .TRACE => true,
|
||||
.CONNECT, .POST, .PATCH => false,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
|
||||
@ -70,6 +96,7 @@ pub const Method = enum {
|
||||
return switch (self) {
|
||||
.GET, .HEAD => true,
|
||||
.POST, .PUT, .DELETE, .CONNECT, .OPTIONS, .TRACE, .PATCH => false,
|
||||
else => false,
|
||||
};
|
||||
}
|
||||
};
|
||||
@ -269,8 +296,6 @@ pub const Connection = enum {
|
||||
close,
|
||||
};
|
||||
|
||||
const std = @import("std.zig");
|
||||
|
||||
test {
|
||||
_ = Client;
|
||||
_ = Method;
|
||||
|
||||
@ -365,8 +365,11 @@ pub const Response = struct {
|
||||
if (trailing) continue;
|
||||
|
||||
if (std.ascii.eqlIgnoreCase(header_name, "content-length")) {
|
||||
if (res.content_length != null) return error.HttpHeadersInvalid;
|
||||
res.content_length = std.fmt.parseInt(u64, header_value, 10) catch return error.InvalidContentLength;
|
||||
const content_length = std.fmt.parseInt(u64, header_value, 10) catch return error.InvalidContentLength;
|
||||
|
||||
if (res.content_length != null and res.content_length != content_length) return error.HttpHeadersInvalid;
|
||||
|
||||
res.content_length = content_length;
|
||||
} else if (std.ascii.eqlIgnoreCase(header_name, "transfer-encoding")) {
|
||||
// Transfer-Encoding: second, first
|
||||
// Transfer-Encoding: deflate, chunked
|
||||
@ -475,6 +478,7 @@ pub const Request = struct {
|
||||
.zstd => |*zstd| zstd.deinit(),
|
||||
}
|
||||
|
||||
req.headers.deinit();
|
||||
req.response.headers.deinit();
|
||||
|
||||
if (req.response.parser.header_bytes_owned) {
|
||||
@ -536,10 +540,12 @@ pub const Request = struct {
|
||||
|
||||
/// Send the request to the server.
|
||||
pub fn start(req: *Request) StartError!void {
|
||||
if (!req.method.requestHasBody() and req.transfer_encoding != .none) return error.UnsupportedTransferEncoding;
|
||||
|
||||
var buffered = std.io.bufferedWriter(req.connection.?.data.writer());
|
||||
const w = buffered.writer();
|
||||
|
||||
try w.writeAll(@tagName(req.method));
|
||||
try req.method.write(w);
|
||||
try w.writeByte(' ');
|
||||
|
||||
if (req.method == .CONNECT) {
|
||||
@ -607,22 +613,29 @@ pub const Request = struct {
|
||||
}
|
||||
}
|
||||
|
||||
try w.print("{}", .{req.headers});
|
||||
for (req.headers.list.items) |entry| {
|
||||
if (entry.value.len == 0) continue;
|
||||
|
||||
try w.writeAll(entry.name);
|
||||
try w.writeAll(": ");
|
||||
try w.writeAll(entry.value);
|
||||
try w.writeAll("\r\n");
|
||||
}
|
||||
|
||||
try w.writeAll("\r\n");
|
||||
|
||||
try buffered.flush();
|
||||
}
|
||||
|
||||
pub const TransferReadError = Connection.ReadError || proto.HeadersParser.ReadError;
|
||||
const TransferReadError = Connection.ReadError || proto.HeadersParser.ReadError;
|
||||
|
||||
pub const TransferReader = std.io.Reader(*Request, TransferReadError, transferRead);
|
||||
const TransferReader = std.io.Reader(*Request, TransferReadError, transferRead);
|
||||
|
||||
pub fn transferReader(req: *Request) TransferReader {
|
||||
fn transferReader(req: *Request) TransferReader {
|
||||
return .{ .context = req };
|
||||
}
|
||||
|
||||
pub fn transferRead(req: *Request, buf: []u8) TransferReadError!usize {
|
||||
fn transferRead(req: *Request, buf: []u8) TransferReadError!usize {
|
||||
if (req.response.parser.done) return 0;
|
||||
|
||||
var index: usize = 0;
|
||||
@ -635,13 +648,13 @@ pub const Request = struct {
|
||||
return index;
|
||||
}
|
||||
|
||||
pub const WaitError = RequestError || StartError || TransferReadError || proto.HeadersParser.CheckCompleteHeadError || Response.ParseError || Uri.ParseError || error{ TooManyHttpRedirects, CannotRedirect, HttpRedirectMissingLocation, CompressionInitializationFailed, CompressionNotSupported };
|
||||
pub const WaitError = RequestError || StartError || TransferReadError || proto.HeadersParser.CheckCompleteHeadError || Response.ParseError || Uri.ParseError || error{ TooManyHttpRedirects, RedirectRequiresResend, HttpRedirectMissingLocation, CompressionInitializationFailed, CompressionNotSupported };
|
||||
|
||||
/// Waits for a response from the server and parses any headers that are sent.
|
||||
/// This function will block until the final response is received.
|
||||
///
|
||||
/// If `handle_redirects` is true and the request has no payload, then this function will automatically follow
|
||||
/// redirects. If a request payload is present, then this function will error with error.CannotRedirect.
|
||||
/// redirects. If a request payload is present, then this function will error with error.RedirectRequiresResend.
|
||||
pub fn wait(req: *Request) WaitError!void {
|
||||
while (true) { // handle redirects
|
||||
while (true) { // read headers
|
||||
@ -655,17 +668,19 @@ pub const Request = struct {
|
||||
|
||||
try req.response.parse(req.response.parser.header_bytes.items, false);
|
||||
|
||||
if (req.response.status == .switching_protocols) {
|
||||
if (req.response.status == .@"continue") {
|
||||
req.response.parser.done = true; // we're done parsing the continue response, reset to prepare for the real response
|
||||
req.response.parser.reset();
|
||||
break;
|
||||
}
|
||||
|
||||
// we're switching protocols, so this connection is no longer doing http
|
||||
if (req.response.status == .switching_protocols or (req.method == .CONNECT and req.response.status == .ok)) {
|
||||
req.connection.?.data.closing = false;
|
||||
req.response.parser.done = true;
|
||||
}
|
||||
|
||||
if (req.method == .CONNECT and req.response.status == .ok) {
|
||||
req.connection.?.data.closing = false;
|
||||
req.response.parser.done = true;
|
||||
}
|
||||
|
||||
// we default to using keep-alive if not provided
|
||||
// we default to using keep-alive if not provided in the client if the server asks for it
|
||||
const req_connection = req.headers.getFirstValue("connection");
|
||||
const req_keepalive = req_connection != null and !std.ascii.eqlIgnoreCase("close", req_connection.?);
|
||||
|
||||
@ -697,9 +712,10 @@ pub const Request = struct {
|
||||
req.response.parser.done = true;
|
||||
}
|
||||
|
||||
if (req.transfer_encoding == .none and req.response.status.class() == .redirect and req.handle_redirects) {
|
||||
if (req.response.status.class() == .redirect and req.handle_redirects) {
|
||||
req.response.skip = true;
|
||||
|
||||
// skip the body of the redirect response, this will at least leave the connection in a known good state.
|
||||
const empty = @as([*]u8, undefined)[0..0];
|
||||
assert(try req.transferRead(empty) == 0); // we're skipping, no buffer is necessary
|
||||
|
||||
@ -715,6 +731,30 @@ pub const Request = struct {
|
||||
const new_url = Uri.parse(location_duped) catch try Uri.parseWithoutScheme(location_duped);
|
||||
const resolved_url = try req.uri.resolve(new_url, false, arena);
|
||||
|
||||
// is the redirect location on the same domain, or a subdomain of the original request?
|
||||
const is_same_domain_or_subdomain = std.ascii.endsWithIgnoreCase(resolved_url.host.?, req.uri.host.?) and (resolved_url.host.?.len == req.uri.host.?.len or resolved_url.host.?[resolved_url.host.?.len - req.uri.host.?.len - 1] == '.');
|
||||
|
||||
if (resolved_url.host == null or !is_same_domain_or_subdomain or !std.ascii.eqlIgnoreCase(resolved_url.scheme, req.uri.scheme)) {
|
||||
// we're redirecting to a different domain, strip privileged headers like cookies
|
||||
_ = req.headers.delete("authorization");
|
||||
_ = req.headers.delete("www-authenticate");
|
||||
_ = req.headers.delete("cookie");
|
||||
_ = req.headers.delete("cookie2");
|
||||
}
|
||||
|
||||
if (req.response.status == .see_other or ((req.response.status == .moved_permanently or req.response.status == .found) and req.method == .POST)) {
|
||||
// we're redirecting to a GET, so we need to change the method and remove the body
|
||||
req.method = .GET;
|
||||
req.transfer_encoding = .none;
|
||||
_ = req.headers.delete("transfer-encoding");
|
||||
_ = req.headers.delete("content-length");
|
||||
_ = req.headers.delete("content-type");
|
||||
}
|
||||
|
||||
if (req.transfer_encoding != .none) {
|
||||
return error.RedirectRequiresResend; // The request body has already been sent. The request is still in a valid state, but the redirect must be handled manually.
|
||||
}
|
||||
|
||||
try req.redirect(resolved_url);
|
||||
|
||||
try req.start();
|
||||
@ -735,9 +775,6 @@ pub const Request = struct {
|
||||
};
|
||||
}
|
||||
|
||||
if (req.response.status.class() == .redirect and req.handle_redirects and req.transfer_encoding != .none)
|
||||
return error.CannotRedirect; // The request body has already been sent. The request is still in a valid state, but the redirect must be handled manually.
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -921,6 +958,40 @@ pub fn connectUnproxied(client: *Client, host: []const u8, port: u16, protocol:
|
||||
return conn;
|
||||
}
|
||||
|
||||
pub const ConnectUnixError = Allocator.Error || std.os.SocketError || error{ NameTooLong, Unsupported } || std.os.ConnectError;
|
||||
|
||||
pub fn connectUnix(client: *Client, path: []const u8) ConnectUnixError!*ConnectionPool.Node {
|
||||
if (!net.has_unix_sockets) return error.Unsupported;
|
||||
|
||||
if (client.connection_pool.findConnection(.{
|
||||
.host = path,
|
||||
.port = 0,
|
||||
.is_tls = false,
|
||||
})) |node|
|
||||
return node;
|
||||
|
||||
const conn = try client.allocator.create(ConnectionPool.Node);
|
||||
errdefer client.allocator.destroy(conn);
|
||||
conn.* = .{ .data = undefined };
|
||||
|
||||
const stream = try std.net.connectUnixSocket(path);
|
||||
errdefer stream.close();
|
||||
|
||||
conn.data = .{
|
||||
.stream = stream,
|
||||
.tls_client = undefined,
|
||||
.protocol = .plain,
|
||||
|
||||
.host = try client.allocator.dupe(u8, path),
|
||||
.port = 0,
|
||||
};
|
||||
errdefer client.allocator.free(conn.data.host);
|
||||
|
||||
client.connection_pool.addUsed(conn);
|
||||
|
||||
return conn;
|
||||
}
|
||||
|
||||
// Prevents a dependency loop in request()
|
||||
const ConnectErrorPartial = ConnectUnproxiedError || error{ UnsupportedUrlScheme, ConnectionRefused };
|
||||
pub const ConnectError = ConnectErrorPartial || RequestError;
|
||||
@ -956,17 +1027,17 @@ pub const RequestError = ConnectUnproxiedError || ConnectErrorPartial || Request
|
||||
UnsupportedTransferEncoding,
|
||||
};
|
||||
|
||||
pub const Options = struct {
|
||||
pub const RequestOptions = struct {
|
||||
version: http.Version = .@"HTTP/1.1",
|
||||
|
||||
handle_redirects: bool = true,
|
||||
max_redirects: u32 = 3,
|
||||
header_strategy: HeaderStrategy = .{ .dynamic = 16 * 1024 },
|
||||
header_strategy: StorageStrategy = .{ .dynamic = 16 * 1024 },
|
||||
|
||||
/// Must be an already acquired connection.
|
||||
connection: ?*ConnectionPool.Node = null,
|
||||
|
||||
pub const HeaderStrategy = union(enum) {
|
||||
pub const StorageStrategy = union(enum) {
|
||||
/// In this case, the client's Allocator will be used to store the
|
||||
/// entire HTTP header. This value is the maximum total size of
|
||||
/// HTTP headers allowed, otherwise
|
||||
@ -988,8 +1059,12 @@ pub const protocol_map = std.ComptimeStringMap(Connection.Protocol, .{
|
||||
});
|
||||
|
||||
/// Form and send a http request to a server.
|
||||
///
|
||||
/// `uri` must remain alive during the entire request.
|
||||
/// `headers` is cloned and may be freed after this function returns.
|
||||
///
|
||||
/// This function is threadsafe.
|
||||
pub fn request(client: *Client, method: http.Method, uri: Uri, headers: http.Headers, options: Options) RequestError!Request {
|
||||
pub fn request(client: *Client, method: http.Method, uri: Uri, headers: http.Headers, options: RequestOptions) RequestError!Request {
|
||||
const protocol = protocol_map.get(uri.scheme) orelse return error.UnsupportedUrlScheme;
|
||||
|
||||
const port: u16 = uri.port orelse switch (protocol) {
|
||||
@ -1015,7 +1090,7 @@ pub fn request(client: *Client, method: http.Method, uri: Uri, headers: http.Hea
|
||||
.uri = uri,
|
||||
.client = client,
|
||||
.connection = conn,
|
||||
.headers = headers,
|
||||
.headers = try headers.clone(client.allocator), // Headers must be cloned to properly handle header transformations in redirects.
|
||||
.method = method,
|
||||
.version = options.version,
|
||||
.redirects_left = options.max_redirects,
|
||||
@ -1039,6 +1114,123 @@ pub fn request(client: *Client, method: http.Method, uri: Uri, headers: http.Hea
|
||||
return req;
|
||||
}
|
||||
|
||||
pub const FetchOptions = struct {
|
||||
pub const Location = union(enum) {
|
||||
url: []const u8,
|
||||
uri: Uri,
|
||||
};
|
||||
|
||||
pub const Payload = union(enum) {
|
||||
string: []const u8,
|
||||
file: std.fs.File,
|
||||
none,
|
||||
};
|
||||
|
||||
pub const ResponseStrategy = union(enum) {
|
||||
storage: RequestOptions.StorageStrategy,
|
||||
file: std.fs.File,
|
||||
none,
|
||||
};
|
||||
|
||||
header_strategy: RequestOptions.StorageStrategy = .{ .dynamic = 16 * 1024 },
|
||||
response_strategy: ResponseStrategy = .{ .storage = .{ .dynamic = 16 * 1024 * 1024 } },
|
||||
|
||||
location: Location,
|
||||
method: http.Method = .GET,
|
||||
headers: http.Headers = http.Headers{ .allocator = std.heap.page_allocator, .owned = false },
|
||||
payload: Payload = .none,
|
||||
};
|
||||
|
||||
pub const FetchResult = struct {
|
||||
status: http.Status,
|
||||
body: ?[]const u8 = null,
|
||||
headers: http.Headers,
|
||||
|
||||
allocator: Allocator,
|
||||
options: FetchOptions,
|
||||
|
||||
pub fn deinit(res: *FetchResult) void {
|
||||
if (res.options.response_strategy == .storage and res.options.response_strategy.storage == .dynamic) {
|
||||
if (res.body) |body| res.allocator.free(body);
|
||||
}
|
||||
|
||||
res.headers.deinit();
|
||||
}
|
||||
};
|
||||
|
||||
pub fn fetch(client: *Client, allocator: Allocator, options: FetchOptions) !FetchResult {
|
||||
const has_transfer_encoding = options.headers.contains("transfer-encoding");
|
||||
const has_content_length = options.headers.contains("content-length");
|
||||
|
||||
if (has_content_length or has_transfer_encoding) return error.UnsupportedHeader;
|
||||
|
||||
const uri = switch (options.location) {
|
||||
.url => |u| try Uri.parse(u),
|
||||
.uri => |u| u,
|
||||
};
|
||||
|
||||
var req = try request(client, options.method, uri, options.headers, .{
|
||||
.header_strategy = options.header_strategy,
|
||||
.handle_redirects = options.payload == .none,
|
||||
});
|
||||
defer req.deinit();
|
||||
|
||||
{ // Block to maintain lock of file to attempt to prevent a race condition where another process modifies the file while we are reading it.
|
||||
// This relies on other processes actually obeying the advisory lock, which is not guaranteed.
|
||||
if (options.payload == .file) try options.payload.file.lock(.shared);
|
||||
defer if (options.payload == .file) options.payload.file.unlock();
|
||||
|
||||
switch (options.payload) {
|
||||
.string => |str| req.transfer_encoding = .{ .content_length = str.len },
|
||||
.file => |file| req.transfer_encoding = .{ .content_length = (try file.stat()).size },
|
||||
.none => {},
|
||||
}
|
||||
|
||||
try req.start();
|
||||
|
||||
switch (options.payload) {
|
||||
.string => |str| try req.writeAll(str),
|
||||
.file => |file| {
|
||||
try file.seekTo(0);
|
||||
var fifo = std.fifo.LinearFifo(u8, .{ .Static = 8192 }).init();
|
||||
try fifo.pump(file.reader(), req.writer());
|
||||
},
|
||||
.none => {},
|
||||
}
|
||||
|
||||
try req.finish();
|
||||
}
|
||||
|
||||
try req.wait();
|
||||
|
||||
var res = FetchResult{
|
||||
.status = req.response.status,
|
||||
.headers = try req.response.headers.clone(allocator),
|
||||
|
||||
.allocator = allocator,
|
||||
.options = options,
|
||||
};
|
||||
|
||||
switch (options.response_strategy) {
|
||||
.storage => |storage| switch (storage) {
|
||||
.dynamic => |max| res.body = try req.reader().readAllAlloc(allocator, max),
|
||||
.static => |buf| res.body = buf[0..try req.reader().readAll(buf)],
|
||||
},
|
||||
.file => |file| {
|
||||
var fifo = std.fifo.LinearFifo(u8, .{ .Static = 8192 }).init();
|
||||
try fifo.pump(req.reader(), file.writer());
|
||||
},
|
||||
.none => { // Take advantage of request internals to discard the response body and make the connection available for another request.
|
||||
req.response.skip = true;
|
||||
|
||||
const empty = @as([*]u8, undefined)[0..0];
|
||||
assert(try req.transferRead(empty) == 0); // we're skipping, no buffer is necessary
|
||||
},
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
test {
|
||||
const builtin = @import("builtin");
|
||||
const native_endian = comptime builtin.cpu.arch.endian();
|
||||
|
||||
@ -57,6 +57,18 @@ pub const Headers = struct {
|
||||
return .{ .allocator = allocator };
|
||||
}
|
||||
|
||||
pub fn initList(allocator: Allocator, list: []const Field) Headers {
|
||||
var new = Headers.init(allocator);
|
||||
|
||||
try new.list.ensureTotalCapacity(allocator, list.len);
|
||||
try new.index.ensureTotalCapacity(allocator, list.len);
|
||||
for (list) |field| {
|
||||
try new.append(field.name, field.value);
|
||||
}
|
||||
|
||||
return new;
|
||||
}
|
||||
|
||||
pub fn deinit(headers: *Headers) void {
|
||||
headers.deallocateIndexListsAndFields();
|
||||
headers.index.deinit(headers.allocator);
|
||||
@ -78,7 +90,7 @@ pub const Headers = struct {
|
||||
entry.name = kv.key_ptr.*;
|
||||
try kv.value_ptr.append(headers.allocator, n);
|
||||
} else {
|
||||
const name_duped = if (headers.owned) try headers.allocator.dupe(u8, name) else name;
|
||||
const name_duped = if (headers.owned) try std.ascii.allocLowerString(headers.allocator, name) else name;
|
||||
errdefer if (headers.owned) headers.allocator.free(name_duped);
|
||||
|
||||
entry.name = name_duped;
|
||||
@ -97,6 +109,7 @@ pub const Headers = struct {
|
||||
return headers.index.contains(name);
|
||||
}
|
||||
|
||||
/// Removes all headers with the given name.
|
||||
pub fn delete(headers: *Headers, name: []const u8) bool {
|
||||
if (headers.index.fetchRemove(name)) |kv| {
|
||||
var index = kv.value;
|
||||
@ -268,6 +281,18 @@ pub const Headers = struct {
|
||||
headers.index.clearRetainingCapacity();
|
||||
headers.list.clearRetainingCapacity();
|
||||
}
|
||||
|
||||
pub fn clone(headers: Headers, allocator: Allocator) !Headers {
|
||||
var new = Headers.init(allocator);
|
||||
|
||||
try new.list.ensureTotalCapacity(allocator, headers.list.capacity);
|
||||
try new.index.ensureTotalCapacity(allocator, headers.index.capacity());
|
||||
for (headers.list.items) |field| {
|
||||
try new.append(field.name, field.value);
|
||||
}
|
||||
|
||||
return new;
|
||||
}
|
||||
};
|
||||
|
||||
test "Headers.append" {
|
||||
|
||||
@ -185,8 +185,10 @@ pub const Request = struct {
|
||||
return error.HttpHeadersInvalid;
|
||||
|
||||
const method_end = mem.indexOfScalar(u8, first_line, ' ') orelse return error.HttpHeadersInvalid;
|
||||
if (method_end > 24) return error.HttpHeadersInvalid;
|
||||
|
||||
const method_str = first_line[0..method_end];
|
||||
const method = std.meta.stringToEnum(http.Method, method_str) orelse return error.UnknownHttpMethod;
|
||||
const method: http.Method = @enumFromInt(http.Method.parse(method_str));
|
||||
|
||||
const version_start = mem.lastIndexOfScalar(u8, first_line, ' ') orelse return error.HttpHeadersInvalid;
|
||||
if (version_start == method_end) return error.HttpHeadersInvalid;
|
||||
@ -411,59 +413,67 @@ pub const Response = struct {
|
||||
}
|
||||
try w.writeAll("\r\n");
|
||||
|
||||
if (!res.headers.contains("server")) {
|
||||
try w.writeAll("Server: zig (std.http)\r\n");
|
||||
}
|
||||
|
||||
if (!res.headers.contains("connection")) {
|
||||
const req_connection = res.request.headers.getFirstValue("connection");
|
||||
const req_keepalive = req_connection != null and !std.ascii.eqlIgnoreCase("close", req_connection.?);
|
||||
|
||||
if (req_keepalive) {
|
||||
try w.writeAll("Connection: keep-alive\r\n");
|
||||
} else {
|
||||
try w.writeAll("Connection: close\r\n");
|
||||
}
|
||||
}
|
||||
|
||||
const has_transfer_encoding = res.headers.contains("transfer-encoding");
|
||||
const has_content_length = res.headers.contains("content-length");
|
||||
|
||||
if (!has_transfer_encoding and !has_content_length) {
|
||||
switch (res.transfer_encoding) {
|
||||
.chunked => try w.writeAll("Transfer-Encoding: chunked\r\n"),
|
||||
.content_length => |content_length| try w.print("Content-Length: {d}\r\n", .{content_length}),
|
||||
.none => {},
|
||||
}
|
||||
if (res.status == .@"continue") {
|
||||
res.state = .waited; // we still need to send another request after this
|
||||
} else {
|
||||
if (has_content_length) {
|
||||
const content_length = std.fmt.parseInt(u64, res.headers.getFirstValue("content-length").?, 10) catch return error.InvalidContentLength;
|
||||
if (!res.headers.contains("server")) {
|
||||
try w.writeAll("Server: zig (std.http)\r\n");
|
||||
}
|
||||
|
||||
res.transfer_encoding = .{ .content_length = content_length };
|
||||
} else if (has_transfer_encoding) {
|
||||
const transfer_encoding = res.headers.getFirstValue("transfer-encoding").?;
|
||||
if (std.mem.eql(u8, transfer_encoding, "chunked")) {
|
||||
res.transfer_encoding = .chunked;
|
||||
if (!res.headers.contains("connection")) {
|
||||
const req_connection = res.request.headers.getFirstValue("connection");
|
||||
const req_keepalive = req_connection != null and !std.ascii.eqlIgnoreCase("close", req_connection.?);
|
||||
|
||||
if (req_keepalive) {
|
||||
try w.writeAll("Connection: keep-alive\r\n");
|
||||
} else {
|
||||
return error.UnsupportedTransferEncoding;
|
||||
try w.writeAll("Connection: close\r\n");
|
||||
}
|
||||
}
|
||||
|
||||
const has_transfer_encoding = res.headers.contains("transfer-encoding");
|
||||
const has_content_length = res.headers.contains("content-length");
|
||||
|
||||
if (!has_transfer_encoding and !has_content_length) {
|
||||
switch (res.transfer_encoding) {
|
||||
.chunked => try w.writeAll("Transfer-Encoding: chunked\r\n"),
|
||||
.content_length => |content_length| try w.print("Content-Length: {d}\r\n", .{content_length}),
|
||||
.none => {},
|
||||
}
|
||||
} else {
|
||||
res.transfer_encoding = .none;
|
||||
if (has_content_length) {
|
||||
const content_length = std.fmt.parseInt(u64, res.headers.getFirstValue("content-length").?, 10) catch return error.InvalidContentLength;
|
||||
|
||||
res.transfer_encoding = .{ .content_length = content_length };
|
||||
} else if (has_transfer_encoding) {
|
||||
const transfer_encoding = res.headers.getFirstValue("transfer-encoding").?;
|
||||
if (std.mem.eql(u8, transfer_encoding, "chunked")) {
|
||||
res.transfer_encoding = .chunked;
|
||||
} else {
|
||||
return error.UnsupportedTransferEncoding;
|
||||
}
|
||||
} else {
|
||||
res.transfer_encoding = .none;
|
||||
}
|
||||
}
|
||||
|
||||
try w.print("{}", .{res.headers});
|
||||
}
|
||||
|
||||
try w.print("{}", .{res.headers});
|
||||
if (res.request.method == .HEAD) {
|
||||
res.transfer_encoding = .none;
|
||||
}
|
||||
|
||||
try w.writeAll("\r\n");
|
||||
|
||||
try buffered.flush();
|
||||
}
|
||||
|
||||
pub const TransferReadError = Connection.ReadError || proto.HeadersParser.ReadError;
|
||||
const TransferReadError = Connection.ReadError || proto.HeadersParser.ReadError;
|
||||
|
||||
pub const TransferReader = std.io.Reader(*Response, TransferReadError, transferRead);
|
||||
const TransferReader = std.io.Reader(*Response, TransferReadError, transferRead);
|
||||
|
||||
pub fn transferReader(res: *Response) TransferReader {
|
||||
fn transferReader(res: *Response) TransferReader {
|
||||
return .{ .context = res };
|
||||
}
|
||||
|
||||
|
||||
@ -534,9 +534,9 @@ pub const HeadersParser = struct {
|
||||
|
||||
if (r.next_chunk_length == 0) r.done = true;
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
const out_avail = buffer.len;
|
||||
return out_index;
|
||||
} else if (out_index < buffer.len) {
|
||||
const out_avail = buffer.len - out_index;
|
||||
|
||||
const can_read = @as(usize, @intCast(@min(data_avail, out_avail)));
|
||||
const nread = try conn.read(buffer[0..can_read]);
|
||||
@ -545,6 +545,8 @@ pub const HeadersParser = struct {
|
||||
if (r.next_chunk_length == 0) r.done = true;
|
||||
|
||||
return nread;
|
||||
} else {
|
||||
return out_index;
|
||||
}
|
||||
},
|
||||
.chunk_data_suffix, .chunk_data_suffix_r, .chunk_head_size, .chunk_head_ext, .chunk_head_r => {
|
||||
@ -558,6 +560,7 @@ pub const HeadersParser = struct {
|
||||
.chunk_data => if (r.next_chunk_length == 0) {
|
||||
if (std.mem.eql(u8, conn.peek(), "\r\n")) {
|
||||
r.state = .finished;
|
||||
r.done = true;
|
||||
} else {
|
||||
// The trailer section is formatted identically to the header section.
|
||||
r.state = .seen_rn;
|
||||
|
||||
@ -20,7 +20,19 @@ var server: Server = undefined;
|
||||
fn handleRequest(res: *Server.Response) !void {
|
||||
const log = std.log.scoped(.server);
|
||||
|
||||
log.info("{s} {s} {s}", .{ @tagName(res.request.method), @tagName(res.request.version), res.request.target });
|
||||
log.info("{} {s} {s}", .{ res.request.method, @tagName(res.request.version), res.request.target });
|
||||
|
||||
if (res.request.headers.contains("expect")) {
|
||||
if (mem.eql(u8, res.request.headers.getFirstValue("expect").?, "100-continue")) {
|
||||
res.status = .@"continue";
|
||||
try res.do();
|
||||
res.status = .ok;
|
||||
} else {
|
||||
res.status = .expectation_failed;
|
||||
try res.do();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
const body = try res.reader().readAllAlloc(salloc, 8192);
|
||||
defer salloc.free(body);
|
||||
@ -43,6 +55,8 @@ fn handleRequest(res: *Server.Response) !void {
|
||||
try res.writeAll("Hello, ");
|
||||
try res.writeAll("World!\n");
|
||||
try res.finish();
|
||||
} else {
|
||||
try testing.expectEqual(res.writeAll("errors"), error.NotWriteable);
|
||||
}
|
||||
} else if (mem.startsWith(u8, res.request.target, "/large")) {
|
||||
res.transfer_encoding = .{ .content_length = 14 * 1024 + 14 * 10 };
|
||||
@ -62,7 +76,7 @@ fn handleRequest(res: *Server.Response) !void {
|
||||
}
|
||||
|
||||
try res.finish();
|
||||
} else if (mem.eql(u8, res.request.target, "/echo-content")) {
|
||||
} else if (mem.startsWith(u8, res.request.target, "/echo-content")) {
|
||||
try testing.expectEqualStrings("Hello, World!\n", body);
|
||||
try testing.expectEqualStrings("text/plain", res.request.headers.getFirstValue("content-type").?);
|
||||
|
||||
@ -571,7 +585,84 @@ pub fn main() !void {
|
||||
// connection has been kept alive
|
||||
try testing.expect(client.connection_pool.free_len == 1);
|
||||
|
||||
{ // issue 16282
|
||||
{ // Client.fetch()
|
||||
var h = http.Headers{ .allocator = calloc };
|
||||
defer h.deinit();
|
||||
|
||||
try h.append("content-type", "text/plain");
|
||||
|
||||
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/echo-content#fetch", .{port});
|
||||
defer calloc.free(location);
|
||||
|
||||
log.info("{s}", .{location});
|
||||
var res = try client.fetch(calloc, .{
|
||||
.location = .{ .url = location },
|
||||
.method = .POST,
|
||||
.headers = h,
|
||||
.payload = .{ .string = "Hello, World!\n" },
|
||||
});
|
||||
defer res.deinit();
|
||||
|
||||
try testing.expectEqualStrings("Hello, World!\n", res.body.?);
|
||||
}
|
||||
|
||||
{ // expect: 100-continue
|
||||
var h = http.Headers{ .allocator = calloc };
|
||||
defer h.deinit();
|
||||
|
||||
try h.append("expect", "100-continue");
|
||||
try h.append("content-type", "text/plain");
|
||||
|
||||
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/echo-content#expect-100", .{port});
|
||||
defer calloc.free(location);
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
log.info("{s}", .{location});
|
||||
var req = try client.request(.POST, uri, h, .{});
|
||||
defer req.deinit();
|
||||
|
||||
req.transfer_encoding = .chunked;
|
||||
|
||||
try req.start();
|
||||
try req.wait();
|
||||
try testing.expectEqual(http.Status.@"continue", req.response.status);
|
||||
|
||||
try req.writeAll("Hello, ");
|
||||
try req.writeAll("World!\n");
|
||||
try req.finish();
|
||||
|
||||
try req.wait();
|
||||
try testing.expectEqual(http.Status.ok, req.response.status);
|
||||
|
||||
const body = try req.reader().readAllAlloc(calloc, 8192);
|
||||
defer calloc.free(body);
|
||||
|
||||
try testing.expectEqualStrings("Hello, World!\n", body);
|
||||
}
|
||||
|
||||
{ // expect: garbage
|
||||
var h = http.Headers{ .allocator = calloc };
|
||||
defer h.deinit();
|
||||
|
||||
try h.append("content-type", "text/plain");
|
||||
try h.append("expect", "garbage");
|
||||
|
||||
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/echo-content#expect-garbage", .{port});
|
||||
defer calloc.free(location);
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
log.info("{s}", .{location});
|
||||
var req = try client.request(.POST, uri, h, .{});
|
||||
defer req.deinit();
|
||||
|
||||
req.transfer_encoding = .chunked;
|
||||
|
||||
try req.start();
|
||||
try req.wait();
|
||||
try testing.expectEqual(http.Status.expectation_failed, req.response.status);
|
||||
}
|
||||
|
||||
{ // issue 16282 *** This test leaves the client in an invalid state, it must be last ***
|
||||
const location = try std.fmt.allocPrint(calloc, "http://127.0.0.1:{d}/get", .{port});
|
||||
defer calloc.free(location);
|
||||
const uri = try std.Uri.parse(location);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user