update std.net and nail down delimiter APIs

"exclusive" functions still need to report EndOfStream after the last
returned slice
This commit is contained in:
Andrew Kelley 2025-04-23 16:52:03 -07:00
parent 1bb75f9d62
commit 396464ee6b
7 changed files with 455 additions and 386 deletions

View File

@ -681,7 +681,7 @@ pub fn BitReader(comptime T: type) type {
(self.nbits >> 3); // 0 for 0-7, 1 for 8-16, ... same as / 8
var buf: [t_bytes]u8 = [_]u8{0} ** t_bytes;
const bytes_read = self.forward_reader.readShort(buf[0..empty_bytes]) catch 0;
const bytes_read = self.forward_reader.readSliceShort(buf[0..empty_bytes]) catch 0;
if (bytes_read > 0) {
const u: T = std.mem.readInt(T, buf[0..t_bytes], .little);
self.bits |= u << @as(Tshift, @intCast(self.nbits));

View File

@ -225,7 +225,9 @@ pub const AddCertsFromFileError = Allocator.Error ||
error{ CertificateAuthorityBundleTooBig, MissingEndCertificateMarker };
pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFromFileError!void {
const size = try file.getEndPos();
var file_reader = file.reader();
const size = try file_reader.getSize();
var br = file_reader.interface().unbuffered();
// We borrow `bytes` as a temporary buffer for the base64-encoded data.
// This is possible by computing the decoded length and reserving the space
@ -236,7 +238,9 @@ pub fn addCertsFromFile(cb: *Bundle, gpa: Allocator, file: fs.File) AddCertsFrom
try cb.bytes.ensureUnusedCapacity(gpa, needed_capacity);
const end_reserved: u32 = @intCast(cb.bytes.items.len + decoded_size_upper_bound);
const buffer = cb.bytes.allocatedSlice()[end_reserved..];
const end_index = try file.readShort(buffer);
const end_index = br.readSliceShort(buffer) catch |err| switch (err) {
error.ReadFailed => return file_reader.err.?,
};
const encoded_bytes = buffer[0..end_index];
const begin_marker = "-----BEGIN CERTIFICATE-----";

View File

@ -1947,22 +1947,6 @@ pub fn readLinkW(self: Dir, sub_path_w: []const u16, buffer: []u8) ![]u8 {
return windows.ReadLink(self.fd, sub_path_w, buffer);
}
/// Read all of file contents using a preallocated buffer.
/// The returned slice has the same pointer as `buffer`. If the length matches `buffer.len`
/// the situation is ambiguous. It could either mean that the entire file was read, and
/// it exactly fits the buffer, or it could mean the buffer was not big enough for the
/// entire file.
/// On Windows, `file_path` should be encoded as [WTF-8](https://simonsapin.github.io/wtf-8/).
/// On WASI, `file_path` should be encoded as valid UTF-8.
/// On other platforms, `file_path` is an opaque sequence of bytes with no particular encoding.
pub fn readFile(self: Dir, file_path: []const u8, buffer: []u8) ![]u8 {
var file = try self.openFile(file_path, .{});
defer file.close();
const end_index = try file.readAll(buffer);
return buffer[0..end_index];
}
pub const ReadFileAllocError = File.OpenError || File.ReadError || Allocator.Error || error{StreamTooLong};
/// Reads all the bytes from the named file. On success, caller owns returned
@ -2046,10 +2030,13 @@ pub fn readFileIntoArrayList(
var file = try dir.openFile(file_path, .{});
defer file.close();
var file_reader = file.reader();
// Apply size hint by adjusting the array list's capacity.
if (size_hint) |size| {
try list.ensureUnusedCapacity(gpa, size);
} else if (file.getEndPos()) |size| {
file_reader.size = size;
} else if (file_reader.getSize()) |size| {
// If the file size doesn't fit a usize it'll be certainly exceed the limit.
try list.ensureUnusedCapacity(gpa, std.math.cast(usize, size) orelse return error.StreamTooLong);
} else |err| switch (err) {
@ -2058,7 +2045,6 @@ pub fn readFileIntoArrayList(
else => |e| return e,
}
var file_reader = file.reader();
file_reader.interface().readRemainingArrayList(gpa, alignment, list, limit) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.StreamTooLong => return error.StreamTooLong,

View File

@ -799,20 +799,6 @@ pub fn read(self: File, buffer: []u8) ReadError!usize {
return posix.read(self.handle, buffer);
}
/// One-shot alternative to `std.io.BufferedReader.readShort` via `reader`.
///
/// Returns the number of bytes read. If the number read is smaller than `buffer.len`, it
/// means the file reached the end.
pub fn readShort(self: File, buffer: []u8) ReadError!usize {
var index: usize = 0;
while (index != buffer.len) {
const n = try self.read(buffer[index..]);
if (n == 0) break;
index += n;
}
return index;
}
/// On Windows, this function currently does alter the file pointer.
/// https://github.com/ziglang/zig/issues/12783
pub fn pread(self: File, buffer: []u8, offset: u64) PReadError!usize {
@ -948,6 +934,19 @@ pub const Reader = struct {
};
}
pub fn getSize(r: *Reader) GetEndPosError!u64 {
return r.size orelse {
if (r.size_err) |err| return err;
if (r.file.getEndPos()) |size| {
r.size = size;
return size;
} else |err| {
r.size_err = err;
return err;
}
};
}
/// Number of slices to store on the stack, when trying to send as many byte
/// vectors through the underlying read calls as possible.
const max_buffers_len = 16;

View File

@ -113,8 +113,9 @@ test "HTTP server handles a chunked transfer coding request" {
try expect(request.head.transfer_encoding == .chunked);
var buf: [128]u8 = undefined;
const n = try (try request.reader()).readAll(&buf);
try expect(mem.eql(u8, buf[0..n], "ABCD"));
var br = (try request.reader()).unbuffered();
const n = try br.readSliceShort(&buf);
try expectEqualStrings("ABCD", buf[0..n]);
try request.respond("message from server!\n", .{
.extra_headers = &.{
@ -143,7 +144,8 @@ test "HTTP server handles a chunked transfer coding request" {
const gpa = std.testing.allocator;
const stream = try std.net.tcpConnectToHost(gpa, "127.0.0.1", test_server.port());
defer stream.close();
var writer = stream.writer().unbuffered();
var stream_writer = stream.writer();
var writer = stream_writer.interface().unbuffered();
try writer.writeAll(request_bytes);
const expected_response =
@ -153,7 +155,8 @@ test "HTTP server handles a chunked transfer coding request" {
"content-type: text/plain\r\n" ++
"\r\n" ++
"message from server!\n";
const response = try stream.reader().readRemainingAlloc(gpa, expected_response.len);
var stream_reader = stream.reader();
const response = try stream_reader.interface().readRemainingAlloc(gpa, .limited(expected_response.len));
defer gpa.free(response);
try expectEqualStrings(expected_response, response);
}
@ -206,7 +209,7 @@ test "echo content server" {
// request.head.target,
//});
const body = try (try request.reader()).readRemainingAlloc(std.testing.allocator, 8192);
const body = try (try request.reader()).readRemainingAlloc(std.testing.allocator, .limited(8192));
defer std.testing.allocator.free(body);
try expect(mem.startsWith(u8, request.head.target, "/echo-content"));
@ -288,10 +291,12 @@ test "Server.Request.respondStreaming non-chunked, unknown content-length" {
const gpa = std.testing.allocator;
const stream = try std.net.tcpConnectToHost(gpa, "127.0.0.1", test_server.port());
defer stream.close();
var writer = stream.writer().unbuffered();
var stream_writer = stream.writer();
var writer = stream_writer.interface().unbuffered();
try writer.writeAll(request_bytes);
const response = try stream.reader().readRemainingAlloc(gpa, 8192);
var stream_reader = stream.reader();
const response = try stream_reader.interface().readRemainingAlloc(gpa, .limited(8192));
defer gpa.free(response);
var expected_response = std.ArrayList(u8).init(gpa);
@ -362,7 +367,8 @@ test "receiving arbitrary http headers from the client" {
var writer = stream_writer.interface().unbuffered();
try writer.writeAll(request_bytes);
const response = try stream.reader().readRemainingAlloc(gpa, .limited(8192));
var stream_reader = stream.reader();
const response = try stream_reader.interface().readRemainingAlloc(gpa, .limited(8192));
defer gpa.free(response);
var expected_response = std.ArrayList(u8).init(gpa);

View File

@ -368,7 +368,7 @@ pub fn readSlice(br: *BufferedReader, buffer: []u8) Reader.Error!void {
/// Returns the number of bytes read, which is less than `buffer.len` if and
/// only if the stream reached the end.
pub fn readShort(br: *BufferedReader, buffer: []u8) Reader.ShortError!usize {
pub fn readSliceShort(br: *BufferedReader, buffer: []u8) Reader.ShortError!usize {
_ = br;
_ = buffer;
@panic("TODO");
@ -467,10 +467,12 @@ pub fn readRemainingArrayList(
}
}
pub const DelimiterInclusiveError = error{
pub const DelimiterError = error{
/// See the `Reader` implementation for detailed diagnostics.
ReadFailed,
/// Stream ended before the delimiter was found.
/// For "inclusive" functions, stream ended before the delimiter was found.
/// For "exclusive" functions, stream ended and there are no more bytes to
/// return.
EndOfStream,
/// The delimiter was not found within a number of bytes matching the
/// capacity of the `BufferedReader`.
@ -488,13 +490,13 @@ pub const DelimiterInclusiveError = error{
/// * `peekSentinel`
/// * `takeDelimiterExclusive`
/// * `takeDelimiterInclusive`
pub fn takeSentinel(br: *BufferedReader, comptime sentinel: u8) DelimiterInclusiveError![:sentinel]u8 {
pub fn takeSentinel(br: *BufferedReader, comptime sentinel: u8) DelimiterError![:sentinel]u8 {
const result = try br.peekSentinel(sentinel);
br.toss(result.len + 1);
return result;
}
pub fn peekSentinel(br: *BufferedReader, comptime sentinel: u8) DelimiterInclusiveError![:sentinel]u8 {
pub fn peekSentinel(br: *BufferedReader, comptime sentinel: u8) DelimiterError![:sentinel]u8 {
const result = try br.takeDelimiterInclusive(sentinel);
return result[0 .. result.len - 1 :sentinel];
}
@ -510,58 +512,24 @@ pub fn peekSentinel(br: *BufferedReader, comptime sentinel: u8) DelimiterInclusi
/// * `takeSentinel`
/// * `takeDelimiterExclusive`
/// * `peekDelimiterInclusive`
pub fn takeDelimiterInclusive(br: *BufferedReader, delimiter: u8) DelimiterInclusiveError![]u8 {
pub fn takeDelimiterInclusive(br: *BufferedReader, delimiter: u8) DelimiterError![]u8 {
const result = try br.peekDelimiterInclusive(delimiter);
br.toss(result.len);
return result;
}
pub fn peekDelimiterInclusive(br: *BufferedReader, delimiter: u8) DelimiterInclusiveError![]u8 {
return (try br.peekDelimiterInclusiveUnlessEnd(delimiter)) orelse error.EndOfStream;
}
pub const DelimiterExclusiveError = error{
/// See the `Reader` implementation for detailed diagnostics.
ReadFailed,
/// The delimiter was not found within a number of bytes matching the
/// capacity of the `BufferedReader`.
StreamTooLong,
};
/// Returns a slice of the next bytes of buffered data from the stream until
/// `delimiter` is found, advancing the seek position.
/// `delimiter` is found, without advancing the seek position.
///
/// Returned slice excludes the delimiter.
///
/// End-of-stream is treated equivalent to a delimiter.
/// Returned slice includes the delimiter as the last byte.
///
/// Invalidates previously returned values from `peek`.
///
/// See also:
/// * `takeSentinel`
/// * `takeDelimiterInclusive`
/// * `peekSentinel`
/// * `peekDelimiterExclusive`
pub fn takeDelimiterExclusive(br: *BufferedReader, delimiter: u8) DelimiterExclusiveError![]u8 {
const result = br.peekDelimiterInclusiveUnlessEnd(delimiter) catch |err| switch (err) {
error.EndOfStream => {
br.toss(br.end);
return br.buffer[0..br.end];
},
else => |e| return e,
};
br.toss(result.len);
return result[0 .. result.len - 1];
}
pub fn peekDelimiterExclusive(br: *BufferedReader, delimiter: u8) DelimiterExclusiveError![]u8 {
const result = br.peekDelimiterInclusiveUnlessEnd(delimiter) catch |err| switch (err) {
error.EndOfStream => return br.buffer[0..br.end],
else => |e| return e,
};
return result[0 .. result.len - 1];
}
fn peekDelimiterInclusiveUnlessEnd(br: *BufferedReader, delimiter: u8) DelimiterInclusiveError!?[]u8 {
/// * `takeDelimiterInclusive`
pub fn peekDelimiterInclusive(br: *BufferedReader, delimiter: u8) DelimiterError![]u8 {
const buffer = br.buffer[0..br.end];
const seek = br.seek;
if (std.mem.indexOfScalarPos(u8, buffer, seek, delimiter)) |end| {
@ -585,12 +553,70 @@ fn peekDelimiterInclusiveUnlessEnd(br: *BufferedReader, delimiter: u8) Delimiter
return error.StreamTooLong;
}
/// Returns a slice of the next bytes of buffered data from the stream until
/// `delimiter` is found, advancing the seek position.
///
/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
/// to a delimiter, unless it would result in a length 0 return value, in which
/// case `error.EndOfStream` is returned instead.
///
/// If the delimiter is not found within a number of bytes matching the
/// capacity of this `BufferedReader`, `error.StreamTooLong` is returned. In
/// such case, the stream state is unmodified as if this function was never
/// called.
///
/// Invalidates previously returned values from `peek`.
///
/// See also:
/// * `takeDelimiterInclusive`
/// * `peekDelimiterExclusive`
pub fn takeDelimiterExclusive(br: *BufferedReader, delimiter: u8) DelimiterError![]u8 {
const result = br.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
error.EndOfStream => {
if (br.end == 0) return error.EndOfStream;
br.toss(br.end);
return br.buffer[0..br.end];
},
else => |e| return e,
};
br.toss(result.len);
return result[0 .. result.len - 1];
}
/// Returns a slice of the next bytes of buffered data from the stream until
/// `delimiter` is found, without advancing the seek position.
///
/// Returned slice excludes the delimiter. End-of-stream is treated equivalent
/// to a delimiter, unless it would result in a length 0 return value, in which
/// case `error.EndOfStream` is returned instead.
///
/// If the delimiter is not found within a number of bytes matching the
/// capacity of this `BufferedReader`, `error.StreamTooLong` is returned. In
/// such case, the stream state is unmodified as if this function was never
/// called.
///
/// Invalidates previously returned values from `peek`.
///
/// See also:
/// * `peekDelimiterInclusive`
/// * `takeDelimiterExclusive`
pub fn peekDelimiterExclusive(br: *BufferedReader, delimiter: u8) DelimiterError![]u8 {
const result = br.peekDelimiterInclusive(delimiter) catch |err| switch (err) {
error.EndOfStream => {
if (br.end == 0) return error.EndOfStream;
return br.buffer[0..br.end];
},
else => |e| return e,
};
return result[0 .. result.len - 1];
}
/// Appends to `bw` contents by reading from the stream until `delimiter` is
/// found. Does not write the delimiter itself.
///
/// Returns number of bytes streamed.
pub fn streamToDelimiter(br: *BufferedReader, bw: *BufferedWriter, delimiter: u8) Reader.RwError!usize {
const amount, const to = try br.streamToAny(bw, delimiter, .unlimited);
pub fn readDelimiter(br: *BufferedReader, bw: *BufferedWriter, delimiter: u8) Reader.RwError!usize {
const amount, const to = try br.readAny(bw, delimiter, .unlimited);
return switch (to) {
.delimiter => amount,
.limit => unreachable,
@ -604,12 +630,12 @@ pub fn streamToDelimiter(br: *BufferedReader, bw: *BufferedWriter, delimiter: u8
/// Succeeds if stream ends before delimiter found.
///
/// Returns number of bytes streamed. The end is not signaled to the writer.
pub fn streamToDelimiterOrEnd(
pub fn readDelimiterEnding(
br: *BufferedReader,
bw: *BufferedWriter,
delimiter: u8,
) Reader.RwAllError!usize {
const amount, const to = try br.streamToAny(bw, delimiter, .unlimited);
const amount, const to = try br.readAny(bw, delimiter, .unlimited);
return switch (to) {
.delimiter, .end => amount,
.limit => unreachable,
@ -627,13 +653,13 @@ pub const StreamDelimiterLimitedError = Reader.RwAllError || error{
/// Does not write the delimiter itself.
///
/// Returns number of bytes streamed.
pub fn streamToDelimiterOrLimit(
pub fn readDelimiterLimit(
br: *BufferedReader,
bw: *BufferedWriter,
delimiter: u8,
limit: Reader.Limit,
) StreamDelimiterLimitedError!usize {
const amount, const to = try br.streamToAny(bw, delimiter, limit);
const amount, const to = try br.readAny(bw, delimiter, limit);
return switch (to) {
.delimiter => amount,
.limit => error.StreamTooLong,
@ -641,7 +667,7 @@ pub fn streamToDelimiterOrLimit(
};
}
fn streamToAny(
fn readAny(
br: *BufferedReader,
bw: *BufferedWriter,
delimiter: ?u8,
@ -971,15 +997,15 @@ test peekDelimiterExclusive {
return error.Unimplemented;
}
test streamToDelimiter {
test readDelimiter {
return error.Unimplemented;
}
test streamToDelimiterOrEnd {
test readDelimiterEnding {
return error.Unimplemented;
}
test streamToDelimiterOrLimit {
test readDelimiterLimit {
return error.Unimplemented;
}
@ -1035,7 +1061,7 @@ test takeLeb128 {
return error.Unimplemented;
}
test readShort {
test readSliceShort {
return error.Unimplemented;
}

View File

@ -11,6 +11,8 @@ const io = std.io;
const native_endian = builtin.target.cpu.arch.endian();
const native_os = builtin.os.tag;
const windows = std.os.windows;
const Allocator = std.mem.Allocator;
const ArrayList = std.ArrayListUnmanaged;
// Windows 10 added support for unix sockets in build 17063, redstone 4 is the
// first release to support them.
@ -818,7 +820,7 @@ pub const AddressList = struct {
pub const TcpConnectToHostError = GetAddressListError || TcpConnectToAddressError;
/// All memory allocated with `allocator` will be freed before this function returns.
pub fn tcpConnectToHost(allocator: mem.Allocator, name: []const u8, port: u16) TcpConnectToHostError!Stream {
pub fn tcpConnectToHost(allocator: Allocator, name: []const u8, port: u16) TcpConnectToHostError!Stream {
const list = try getAddressList(allocator, name, port);
defer list.deinit();
@ -851,7 +853,7 @@ pub fn tcpConnectToAddress(address: Address) TcpConnectToAddressError!Stream {
// TODO: Instead of having a massive error set, make the error set have categories, and then
// store the sub-error as a diagnostic value.
const GetAddressListError = std.mem.Allocator.Error || std.fs.File.OpenError || posix.SocketError || posix.BindError || posix.SetSockOptError || error{
const GetAddressListError = Allocator.Error || std.fs.File.OpenError || std.fs.File.ReadError || posix.SocketError || posix.BindError || posix.SetSockOptError || error{
TemporaryNameServerFailure,
NameServerFailure,
AddressFamilyNotSupported,
@ -871,12 +873,13 @@ const GetAddressListError = std.mem.Allocator.Error || std.fs.File.OpenError ||
InterfaceNotFound,
FileSystem,
ResolveConfParseFailed,
};
/// Call `AddressList.deinit` on the result.
pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) GetAddressListError!*AddressList {
pub fn getAddressList(gpa: Allocator, name: []const u8, port: u16) GetAddressListError!*AddressList {
const result = blk: {
var arena = std.heap.ArenaAllocator.init(allocator);
var arena = std.heap.ArenaAllocator.init(gpa);
errdefer arena.deinit();
const result = try arena.allocator().create(AddressList);
@ -891,11 +894,11 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get
errdefer result.deinit();
if (native_os == .windows) {
const name_c = try allocator.dupeZ(u8, name);
defer allocator.free(name_c);
const name_c = try gpa.dupeZ(u8, name);
defer gpa.free(name_c);
const port_c = try std.fmt.allocPrintZ(allocator, "{}", .{port});
defer allocator.free(port_c);
const port_c = try std.fmt.allocPrintZ(gpa, "{}", .{port});
defer gpa.free(port_c);
const ws2_32 = windows.ws2_32;
const hints: posix.addrinfo = .{
@ -963,11 +966,11 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get
}
if (builtin.link_libc) {
const name_c = try allocator.dupeZ(u8, name);
defer allocator.free(name_c);
const name_c = try gpa.dupeZ(u8, name);
defer gpa.free(name_c);
const port_c = try std.fmt.allocPrintZ(allocator, "{}", .{port});
defer allocator.free(port_c);
const port_c = try std.fmt.allocPrintZ(gpa, "{}", .{port});
defer gpa.free(port_c);
const hints: posix.addrinfo = .{
.flags = .{ .NUMERICSERV = true },
@ -1030,17 +1033,17 @@ pub fn getAddressList(allocator: mem.Allocator, name: []const u8, port: u16) Get
if (native_os == .linux) {
const family = posix.AF.UNSPEC;
var lookup_addrs = std.ArrayList(LookupAddr).init(allocator);
defer lookup_addrs.deinit();
var lookup_addrs: ArrayList(LookupAddr) = .empty;
defer lookup_addrs.deinit(gpa);
var canon = std.ArrayList(u8).init(arena);
defer canon.deinit();
var canon: ArrayList(u8) = .empty;
defer canon.deinit(gpa);
try linuxLookupName(&lookup_addrs, &canon, name, family, .{ .NUMERICSERV = true }, port);
try linuxLookupName(gpa, &lookup_addrs, &canon, name, family, .{ .NUMERICSERV = true }, port);
result.addrs = try arena.alloc(Address, lookup_addrs.items.len);
if (canon.items.len != 0) {
result.canon_name = try canon.toOwnedSlice();
result.canon_name = try arena.dupe(u8, canon.items);
}
for (lookup_addrs.items, 0..) |lookup_addr, i| {
@ -1067,8 +1070,9 @@ const DAS_PREFIX_SHIFT = 8;
const DAS_ORDER_SHIFT = 0;
fn linuxLookupName(
addrs: *std.ArrayList(LookupAddr),
canon: *std.ArrayList(u8),
gpa: Allocator,
addrs: *ArrayList(LookupAddr),
canon: *ArrayList(u8),
opt_name: ?[]const u8,
family: posix.sa_family_t,
flags: posix.AI,
@ -1077,13 +1081,13 @@ fn linuxLookupName(
if (opt_name) |name| {
// reject empty name and check len so it fits into temp bufs
canon.items.len = 0;
try canon.appendSlice(name);
try canon.appendSlice(gpa, name);
if (Address.parseExpectingFamily(name, family, port)) |addr| {
try addrs.append(LookupAddr{ .addr = addr });
try addrs.append(gpa, .{ .addr = addr });
} else |name_err| if (flags.NUMERICHOST) {
return name_err;
} else {
try linuxLookupNameFromHosts(addrs, canon, name, family, port);
try linuxLookupNameFromHosts(gpa, addrs, canon, name, family, port);
if (addrs.items.len == 0) {
// RFC 6761 Section 6.3.3
// Name resolution APIs and libraries SHOULD recognize localhost
@ -1094,17 +1098,18 @@ fn linuxLookupName(
// Check for equal to "localhost(.)" or ends in ".localhost(.)"
const localhost = if (name[name.len - 1] == '.') "localhost." else "localhost";
if (mem.endsWith(u8, name, localhost) and (name.len == localhost.len or name[name.len - localhost.len] == '.')) {
try addrs.append(LookupAddr{ .addr = .{ .in = Ip4Address.parse("127.0.0.1", port) catch unreachable } });
try addrs.append(LookupAddr{ .addr = .{ .in6 = Ip6Address.parse("::1", port) catch unreachable } });
try addrs.append(gpa, .{ .addr = .{ .in = Ip4Address.parse("127.0.0.1", port) catch unreachable } });
try addrs.append(gpa, .{ .addr = .{ .in6 = Ip6Address.parse("::1", port) catch unreachable } });
return;
}
try linuxLookupNameFromDnsSearch(addrs, canon, name, family, port);
try linuxLookupNameFromDnsSearch(gpa, addrs, canon, name, family, port);
}
}
} else {
try canon.resize(0);
try linuxLookupNameFromNull(addrs, family, flags, port);
try canon.resize(gpa, 0);
try addrs.ensureUnusedCapacity(gpa, 1);
linuxLookupNameFromNull(addrs, family, flags, port);
}
if (addrs.items.len == 0) return error.UnknownHostName;
@ -1310,39 +1315,40 @@ fn addrCmpLessThan(context: void, b: LookupAddr, a: LookupAddr) bool {
}
fn linuxLookupNameFromNull(
addrs: *std.ArrayList(LookupAddr),
addrs: *ArrayList(LookupAddr),
family: posix.sa_family_t,
flags: posix.AI,
port: u16,
) !void {
) void {
if (flags.PASSIVE) {
if (family != posix.AF.INET6) {
(try addrs.addOne()).* = LookupAddr{
addrs.appendAssumeCapacity(.{
.addr = Address.initIp4([1]u8{0} ** 4, port),
};
});
}
if (family != posix.AF.INET) {
(try addrs.addOne()).* = LookupAddr{
addrs.appendAssumeCapacity(.{
.addr = Address.initIp6([1]u8{0} ** 16, port, 0, 0),
};
});
}
} else {
if (family != posix.AF.INET6) {
(try addrs.addOne()).* = LookupAddr{
addrs.appendAssumeCapacity(.{
.addr = Address.initIp4([4]u8{ 127, 0, 0, 1 }, port),
};
});
}
if (family != posix.AF.INET) {
(try addrs.addOne()).* = LookupAddr{
addrs.appendAssumeCapacity(.{
.addr = Address.initIp6(([1]u8{0} ** 15) ++ [1]u8{1}, port, 0, 0),
};
});
}
}
}
fn linuxLookupNameFromHosts(
addrs: *std.ArrayList(LookupAddr),
canon: *std.ArrayList(u8),
gpa: Allocator,
addrs: *ArrayList(LookupAddr),
canon: *ArrayList(u8),
name: []const u8,
family: posix.sa_family_t,
port: u16,
@ -1359,7 +1365,34 @@ fn linuxLookupNameFromHosts(
var line_buf: [512]u8 = undefined;
var file_reader = file.reader();
var br = file_reader.interface().buffered(&line_buf);
while (br.takeSentinel('\n')) |line| {
return parseHosts(gpa, addrs, canon, name, family, port, &br) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.ReadFailed => return file_reader.err.?,
};
}
fn parseHosts(
gpa: Allocator,
addrs: *ArrayList(LookupAddr),
canon: *ArrayList(u8),
name: []const u8,
family: posix.sa_family_t,
port: u16,
br: *std.io.BufferedReader,
) error{ OutOfMemory, ReadFailed }!void {
while (true) {
const line = br.takeDelimiterExclusive('\n') catch |err| switch (err) {
error.StreamTooLong => {
// Skip lines that are too long.
br.discardDelimiterInclusive('\n') catch |e| switch (e) {
error.EndOfStream => break,
error.ReadFailed => return error.ReadFailed,
};
continue;
},
error.ReadFailed => return error.ReadFailed,
error.EndOfStream => break,
};
var split_it = mem.splitScalar(u8, line, '#');
const no_comment_line = split_it.first();
@ -1383,15 +1416,15 @@ fn linuxLookupNameFromHosts(
error.NonCanonical,
=> continue,
};
try addrs.append(LookupAddr{ .addr = addr });
try addrs.append(gpa, .{ .addr = addr });
// first name is canonical name
const name_text = first_name_text.?;
if (isValidHostName(name_text)) {
canon.items.len = 0;
try canon.appendSlice(name_text);
try canon.appendSlice(gpa, name_text);
}
} else |err| return err;
}
}
pub fn isValidHostName(hostname: []const u8) bool {
@ -1407,14 +1440,15 @@ pub fn isValidHostName(hostname: []const u8) bool {
}
fn linuxLookupNameFromDnsSearch(
addrs: *std.ArrayList(LookupAddr),
canon: *std.ArrayList(u8),
gpa: Allocator,
addrs: *ArrayList(LookupAddr),
canon: *ArrayList(u8),
name: []const u8,
family: posix.sa_family_t,
port: u16,
) !void {
var rc: ResolvConf = undefined;
try getResolvConf(addrs.allocator, &rc);
rc.init(gpa) catch return error.ResolveConfParseFailed;
defer rc.deinit();
// Count dots, suppress search when >=ndots or name ends in
@ -1439,37 +1473,40 @@ fn linuxLookupNameFromDnsSearch(
// provides the desired default canonical name (if the requested
// name is not a CNAME record) and serves as a buffer for passing
// the full requested name to name_from_dns.
try canon.resize(canon_name.len);
try canon.resize(gpa, canon_name.len);
@memcpy(canon.items, canon_name);
try canon.append('.');
try canon.append(gpa, '.');
var tok_it = mem.tokenizeAny(u8, search, " \t");
while (tok_it.next()) |tok| {
canon.shrinkRetainingCapacity(canon_name.len + 1);
try canon.appendSlice(tok);
try linuxLookupNameFromDns(addrs, canon, canon.items, family, rc, port);
try canon.appendSlice(gpa, tok);
try linuxLookupNameFromDns(gpa, addrs, canon, canon.items, family, rc, port);
if (addrs.items.len != 0) return;
}
canon.shrinkRetainingCapacity(canon_name.len);
return linuxLookupNameFromDns(addrs, canon, name, family, rc, port);
return linuxLookupNameFromDns(gpa, addrs, canon, name, family, rc, port);
}
const dpc_ctx = struct {
addrs: *std.ArrayList(LookupAddr),
canon: *std.ArrayList(u8),
gpa: Allocator,
addrs: *ArrayList(LookupAddr),
canon: *ArrayList(u8),
port: u16,
};
fn linuxLookupNameFromDns(
addrs: *std.ArrayList(LookupAddr),
canon: *std.ArrayList(u8),
gpa: Allocator,
addrs: *ArrayList(LookupAddr),
canon: *ArrayList(u8),
name: []const u8,
family: posix.sa_family_t,
rc: ResolvConf,
port: u16,
) !void {
const ctx = dpc_ctx{
const ctx: dpc_ctx = .{
.gpa = gpa,
.addrs = addrs,
.canon = canon,
.port = port,
@ -1479,8 +1516,8 @@ fn linuxLookupNameFromDns(
rr: u8,
};
const afrrs = [_]AfRr{
AfRr{ .af = posix.AF.INET6, .rr = posix.RR.A },
AfRr{ .af = posix.AF.INET, .rr = posix.RR.AAAA },
.{ .af = posix.AF.INET6, .rr = posix.RR.A },
.{ .af = posix.AF.INET, .rr = posix.RR.AAAA },
};
var qbuf: [2][280]u8 = undefined;
var abuf: [2][512]u8 = undefined;
@ -1500,7 +1537,7 @@ fn linuxLookupNameFromDns(
ap[0].len = 0;
ap[1].len = 0;
try resMSendRc(qp[0..nq], ap[0..nq], apbuf[0..nq], rc);
try rc.resMSendRc(qp[0..nq], ap[0..nq], apbuf[0..nq]);
var i: usize = 0;
while (i < nq) : (i += 1) {
@ -1515,240 +1552,252 @@ fn linuxLookupNameFromDns(
}
const ResolvConf = struct {
gpa: Allocator,
attempts: u32,
ndots: u32,
timeout: u32,
search: std.ArrayList(u8),
ns: std.ArrayList(LookupAddr),
search: ArrayList(u8),
/// TODO there are actually only allowed to be maximum 3 nameservers, no need
/// for an array list.
ns: ArrayList(LookupAddr),
/// Returns `error.StreamTooLong` if a line is longer than 512 bytes.
/// TODO: https://github.com/ziglang/zig/issues/2765 and https://github.com/ziglang/zig/issues/2761
fn init(rc: *ResolvConf, gpa: Allocator) !void {
rc.* = .{
.gpa = gpa,
.ns = .empty,
.search = .empty,
.ndots = 1,
.timeout = 5,
.attempts = 2,
};
errdefer rc.deinit();
const file = fs.openFileAbsoluteZ("/etc/resolv.conf", .{}) catch |err| switch (err) {
error.FileNotFound,
error.NotDir,
error.AccessDenied,
=> return linuxLookupNameFromNumericUnspec(gpa, &rc.ns, "127.0.0.1", 53),
else => |e| return e,
};
defer file.close();
var line_buf: [512]u8 = undefined;
var file_reader = file.reader();
var br = file_reader.interface().buffered(&line_buf);
return parse(rc, &br) catch |err| switch (err) {
error.ReadFailed => return file_reader.err.?,
else => |e| return e,
};
}
fn parse(rc: *ResolvConf, br: *std.io.BufferedReader) !void {
const gpa = rc.gpa;
while (br.takeSentinel('\n')) |line_with_comment| {
const line = line: {
var split = mem.splitScalar(u8, line_with_comment, '#');
break :line split.first();
};
var line_it = mem.tokenizeAny(u8, line, " \t");
const token = line_it.next() orelse continue;
if (mem.eql(u8, token, "options")) {
while (line_it.next()) |sub_tok| {
var colon_it = mem.splitScalar(u8, sub_tok, ':');
const name = colon_it.first();
const value_txt = colon_it.next() orelse continue;
const value = std.fmt.parseInt(u8, value_txt, 10) catch |err| switch (err) {
error.Overflow => 255,
error.InvalidCharacter => continue,
};
if (mem.eql(u8, name, "ndots")) {
rc.ndots = @min(value, 15);
} else if (mem.eql(u8, name, "attempts")) {
rc.attempts = @min(value, 10);
} else if (mem.eql(u8, name, "timeout")) {
rc.timeout = @min(value, 60);
}
}
} else if (mem.eql(u8, token, "nameserver")) {
const ip_txt = line_it.next() orelse continue;
try linuxLookupNameFromNumericUnspec(gpa, &rc.ns, ip_txt, 53);
} else if (mem.eql(u8, token, "domain") or mem.eql(u8, token, "search")) {
rc.search.items.len = 0;
try rc.search.appendSlice(gpa, line_it.rest());
}
} else |err| return err;
if (rc.ns.items.len == 0) {
return linuxLookupNameFromNumericUnspec(gpa, &rc.ns, "127.0.0.1", 53);
}
}
fn resMSendRc(
rc: ResolvConf,
queries: []const []const u8,
answers: [][]u8,
answer_bufs: []const []u8,
) !void {
const gpa = rc.gpa;
const timeout = 1000 * rc.timeout;
const attempts = rc.attempts;
var sl: posix.socklen_t = @sizeOf(posix.sockaddr.in);
var family: posix.sa_family_t = posix.AF.INET;
var ns_list: ArrayList(Address) = .empty;
defer ns_list.deinit(gpa);
try ns_list.resize(gpa, rc.ns.items.len);
for (ns_list.items, rc.ns.items) |*ns, iplit| {
ns.* = iplit.addr;
assert(ns.getPort() == 53);
if (iplit.addr.any.family != posix.AF.INET) {
family = posix.AF.INET6;
}
}
const flags = posix.SOCK.DGRAM | posix.SOCK.CLOEXEC | posix.SOCK.NONBLOCK;
const fd = posix.socket(family, flags, 0) catch |err| switch (err) {
error.AddressFamilyNotSupported => blk: {
// Handle case where system lacks IPv6 support
if (family == posix.AF.INET6) {
family = posix.AF.INET;
break :blk try posix.socket(posix.AF.INET, flags, 0);
}
return err;
},
else => |e| return e,
};
defer Stream.close(.{ .handle = fd });
// Past this point, there are no errors. Each individual query will
// yield either no reply (indicated by zero length) or an answer
// packet which is up to the caller to interpret.
// Convert any IPv4 addresses in a mixed environment to v4-mapped
if (family == posix.AF.INET6) {
try posix.setsockopt(
fd,
posix.SOL.IPV6,
std.os.linux.IPV6.V6ONLY,
&mem.toBytes(@as(c_int, 0)),
);
for (ns_list.items) |*ns| {
if (ns.any.family != posix.AF.INET) continue;
mem.writeInt(u32, ns.in6.sa.addr[12..], ns.in.sa.addr, native_endian);
ns.in6.sa.addr[0..12].* = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff".*;
ns.any.family = posix.AF.INET6;
ns.in6.sa.flowinfo = 0;
ns.in6.sa.scope_id = 0;
}
sl = @sizeOf(posix.sockaddr.in6);
}
// Get local address and open/bind a socket
var sa: Address = undefined;
@memset(@as([*]u8, @ptrCast(&sa))[0..@sizeOf(Address)], 0);
sa.any.family = family;
try posix.bind(fd, &sa.any, sl);
var pfd = [1]posix.pollfd{posix.pollfd{
.fd = fd,
.events = posix.POLL.IN,
.revents = undefined,
}};
const retry_interval = timeout / attempts;
var next: u32 = 0;
var t2: u64 = @bitCast(std.time.milliTimestamp());
const t0 = t2;
var t1 = t2 - retry_interval;
var servfail_retry: usize = undefined;
outer: while (t2 - t0 < timeout) : (t2 = @as(u64, @bitCast(std.time.milliTimestamp()))) {
if (t2 - t1 >= retry_interval) {
// Query all configured nameservers in parallel
var i: usize = 0;
while (i < queries.len) : (i += 1) {
if (answers[i].len == 0) {
for (ns_list.items) |*ns| {
_ = posix.sendto(fd, queries[i], posix.MSG.NOSIGNAL, &ns.any, sl) catch undefined;
}
}
}
t1 = t2;
servfail_retry = 2 * queries.len;
}
// Wait for a response, or until time to retry
const clamped_timeout = @min(@as(u31, std.math.maxInt(u31)), t1 + retry_interval - t2);
const nevents = posix.poll(&pfd, clamped_timeout) catch 0;
if (nevents == 0) continue;
while (true) {
var sl_copy = sl;
const rlen = posix.recvfrom(fd, answer_bufs[next], 0, &sa.any, &sl_copy) catch break;
// Ignore non-identifiable packets
if (rlen < 4) continue;
// Ignore replies from addresses we didn't send to
const ns = for (ns_list.items) |*ns| {
if (ns.eql(sa)) break ns;
} else continue;
// Find which query this answer goes with, if any
var i: usize = next;
while (i < queries.len and (answer_bufs[next][0] != queries[i][0] or
answer_bufs[next][1] != queries[i][1])) : (i += 1)
{}
if (i == queries.len) continue;
if (answers[i].len != 0) continue;
// Only accept positive or negative responses;
// retry immediately on server failure, and ignore
// all other codes such as refusal.
switch (answer_bufs[next][3] & 15) {
0, 3 => {},
2 => if (servfail_retry != 0) {
servfail_retry -= 1;
_ = posix.sendto(fd, queries[i], posix.MSG.NOSIGNAL, &ns.any, sl) catch undefined;
},
else => continue,
}
// Store answer in the right slot, or update next
// available temp slot if it's already in place.
answers[i].len = rlen;
if (i == next) {
while (next < queries.len and answers[next].len != 0) : (next += 1) {}
} else {
@memcpy(answer_bufs[i][0..rlen], answer_bufs[next][0..rlen]);
}
if (next == queries.len) break :outer;
}
}
}
fn deinit(rc: *ResolvConf) void {
rc.ns.deinit();
rc.search.deinit();
const gpa = rc.gpa;
rc.ns.deinit(gpa);
rc.search.deinit(gpa);
rc.* = undefined;
}
};
/// Returns `error.StreamTooLong` if a line is longer than 512 bytes.
/// TODO: https://github.com/ziglang/zig/issues/2765 and https://github.com/ziglang/zig/issues/2761
fn getResolvConf(allocator: mem.Allocator, rc: *ResolvConf) !void {
rc.* = .{
.ns = std.ArrayList(LookupAddr).init(allocator),
.search = std.ArrayList(u8).init(allocator),
.ndots = 1,
.timeout = 5,
.attempts = 2,
};
errdefer rc.deinit();
const file = fs.openFileAbsoluteZ("/etc/resolv.conf", .{}) catch |err| switch (err) {
error.FileNotFound,
error.NotDir,
error.AccessDenied,
=> return linuxLookupNameFromNumericUnspec(&rc.ns, "127.0.0.1", 53),
else => |e| return e,
};
defer file.close();
var line_buf: [512]u8 = undefined;
var file_reader = file.reader();
var br = file_reader.interface().buffered(&line_buf);
while (br.takeSentinel('\n')) |line_with_comment| {
const line = line: {
var split = mem.splitScalar(u8, line_with_comment, '#');
break :line split.first();
};
var line_it = mem.tokenizeAny(u8, line, " \t");
const token = line_it.next() orelse continue;
if (mem.eql(u8, token, "options")) {
while (line_it.next()) |sub_tok| {
var colon_it = mem.splitScalar(u8, sub_tok, ':');
const name = colon_it.first();
const value_txt = colon_it.next() orelse continue;
const value = std.fmt.parseInt(u8, value_txt, 10) catch |err| switch (err) {
// TODO https://github.com/ziglang/zig/issues/11812
error.Overflow => @as(u8, 255),
error.InvalidCharacter => continue,
};
if (mem.eql(u8, name, "ndots")) {
rc.ndots = @min(value, 15);
} else if (mem.eql(u8, name, "attempts")) {
rc.attempts = @min(value, 10);
} else if (mem.eql(u8, name, "timeout")) {
rc.timeout = @min(value, 60);
}
}
} else if (mem.eql(u8, token, "nameserver")) {
const ip_txt = line_it.next() orelse continue;
try linuxLookupNameFromNumericUnspec(&rc.ns, ip_txt, 53);
} else if (mem.eql(u8, token, "domain") or mem.eql(u8, token, "search")) {
rc.search.items.len = 0;
try rc.search.appendSlice(line_it.rest());
}
} else |err| return err;
if (rc.ns.items.len == 0) {
return linuxLookupNameFromNumericUnspec(&rc.ns, "127.0.0.1", 53);
}
}
fn linuxLookupNameFromNumericUnspec(
addrs: *std.ArrayList(LookupAddr),
gpa: Allocator,
addrs: *ArrayList(LookupAddr),
name: []const u8,
port: u16,
) !void {
const addr = try Address.resolveIp(name, port);
(try addrs.addOne()).* = LookupAddr{ .addr = addr };
}
fn resMSendRc(
queries: []const []const u8,
answers: [][]u8,
answer_bufs: []const []u8,
rc: ResolvConf,
) !void {
const timeout = 1000 * rc.timeout;
const attempts = rc.attempts;
var sl: posix.socklen_t = @sizeOf(posix.sockaddr.in);
var family: posix.sa_family_t = posix.AF.INET;
var ns_list = std.ArrayList(Address).init(rc.ns.allocator);
defer ns_list.deinit();
try ns_list.resize(rc.ns.items.len);
const ns = ns_list.items;
for (rc.ns.items, 0..) |iplit, i| {
ns[i] = iplit.addr;
assert(ns[i].getPort() == 53);
if (iplit.addr.any.family != posix.AF.INET) {
family = posix.AF.INET6;
}
}
const flags = posix.SOCK.DGRAM | posix.SOCK.CLOEXEC | posix.SOCK.NONBLOCK;
const fd = posix.socket(family, flags, 0) catch |err| switch (err) {
error.AddressFamilyNotSupported => blk: {
// Handle case where system lacks IPv6 support
if (family == posix.AF.INET6) {
family = posix.AF.INET;
break :blk try posix.socket(posix.AF.INET, flags, 0);
}
return err;
},
else => |e| return e,
};
defer Stream.close(.{ .handle = fd });
// Past this point, there are no errors. Each individual query will
// yield either no reply (indicated by zero length) or an answer
// packet which is up to the caller to interpret.
// Convert any IPv4 addresses in a mixed environment to v4-mapped
if (family == posix.AF.INET6) {
try posix.setsockopt(
fd,
posix.SOL.IPV6,
std.os.linux.IPV6.V6ONLY,
&mem.toBytes(@as(c_int, 0)),
);
for (0..ns.len) |i| {
if (ns[i].any.family != posix.AF.INET) continue;
mem.writeInt(u32, ns[i].in6.sa.addr[12..], ns[i].in.sa.addr, native_endian);
ns[i].in6.sa.addr[0..12].* = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff".*;
ns[i].any.family = posix.AF.INET6;
ns[i].in6.sa.flowinfo = 0;
ns[i].in6.sa.scope_id = 0;
}
sl = @sizeOf(posix.sockaddr.in6);
}
// Get local address and open/bind a socket
var sa: Address = undefined;
@memset(@as([*]u8, @ptrCast(&sa))[0..@sizeOf(Address)], 0);
sa.any.family = family;
try posix.bind(fd, &sa.any, sl);
var pfd = [1]posix.pollfd{posix.pollfd{
.fd = fd,
.events = posix.POLL.IN,
.revents = undefined,
}};
const retry_interval = timeout / attempts;
var next: u32 = 0;
var t2: u64 = @bitCast(std.time.milliTimestamp());
const t0 = t2;
var t1 = t2 - retry_interval;
var servfail_retry: usize = undefined;
outer: while (t2 - t0 < timeout) : (t2 = @as(u64, @bitCast(std.time.milliTimestamp()))) {
if (t2 - t1 >= retry_interval) {
// Query all configured nameservers in parallel
var i: usize = 0;
while (i < queries.len) : (i += 1) {
if (answers[i].len == 0) {
var j: usize = 0;
while (j < ns.len) : (j += 1) {
_ = posix.sendto(fd, queries[i], posix.MSG.NOSIGNAL, &ns[j].any, sl) catch undefined;
}
}
}
t1 = t2;
servfail_retry = 2 * queries.len;
}
// Wait for a response, or until time to retry
const clamped_timeout = @min(@as(u31, std.math.maxInt(u31)), t1 + retry_interval - t2);
const nevents = posix.poll(&pfd, clamped_timeout) catch 0;
if (nevents == 0) continue;
while (true) {
var sl_copy = sl;
const rlen = posix.recvfrom(fd, answer_bufs[next], 0, &sa.any, &sl_copy) catch break;
// Ignore non-identifiable packets
if (rlen < 4) continue;
// Ignore replies from addresses we didn't send to
var j: usize = 0;
while (j < ns.len and !ns[j].eql(sa)) : (j += 1) {}
if (j == ns.len) continue;
// Find which query this answer goes with, if any
var i: usize = next;
while (i < queries.len and (answer_bufs[next][0] != queries[i][0] or
answer_bufs[next][1] != queries[i][1])) : (i += 1)
{}
if (i == queries.len) continue;
if (answers[i].len != 0) continue;
// Only accept positive or negative responses;
// retry immediately on server failure, and ignore
// all other codes such as refusal.
switch (answer_bufs[next][3] & 15) {
0, 3 => {},
2 => if (servfail_retry != 0) {
servfail_retry -= 1;
_ = posix.sendto(fd, queries[i], posix.MSG.NOSIGNAL, &ns[j].any, sl) catch undefined;
},
else => continue,
}
// Store answer in the right slot, or update next
// available temp slot if it's already in place.
answers[i].len = rlen;
if (i == next) {
while (next < queries.len and answers[next].len != 0) : (next += 1) {}
} else {
@memcpy(answer_bufs[i][0..rlen], answer_bufs[next][0..rlen]);
}
if (next == queries.len) break :outer;
}
}
try addrs.append(gpa, .{ .addr = addr });
}
fn dnsParse(
@ -1785,20 +1834,19 @@ fn dnsParse(
}
fn dnsParseCallback(ctx: dpc_ctx, rr: u8, data: []const u8, packet: []const u8) !void {
const gpa = ctx.gpa;
switch (rr) {
posix.RR.A => {
if (data.len != 4) return error.InvalidDnsARecord;
const new_addr = try ctx.addrs.addOne();
new_addr.* = LookupAddr{
try ctx.addrs.append(gpa, .{
.addr = Address.initIp4(data[0..4].*, ctx.port),
};
});
},
posix.RR.AAAA => {
if (data.len != 16) return error.InvalidDnsAAAARecord;
const new_addr = try ctx.addrs.addOne();
new_addr.* = LookupAddr{
try ctx.addrs.append(gpa, .{
.addr = Address.initIp6(data[0..16].*, ctx.port, 0, 0),
};
});
},
posix.RR.CNAME => {
var tmp: [256]u8 = undefined;
@ -1807,7 +1855,7 @@ fn dnsParseCallback(ctx: dpc_ctx, rr: u8, data: []const u8, packet: []const u8)
const canon_name = mem.sliceTo(&tmp, 0);
if (isValidHostName(canon_name)) {
ctx.canon.items.len = 0;
try ctx.canon.appendSlice(canon_name);
try ctx.canon.appendSlice(gpa, canon_name);
}
},
else => return,