mirror of
https://github.com/ziglang/zig.git
synced 2026-02-20 00:08:56 +00:00
std.crypto.Certificate.Bundle.macos: rework
- use ArrayList strategically to reduce allocations - use a BufferedReader to avoid unnecessary memcpy of the certs - use for loops - skip certs with invalid magic instead of asserting
This commit is contained in:
parent
01b580e75d
commit
d603121dc3
@ -16,64 +16,64 @@ pub fn rescanMac(cb: *Bundle, gpa: Allocator) RescanMacError!void {
|
||||
"/Library/Keychains/System.keychain",
|
||||
};
|
||||
|
||||
const now_sec = std.time.timestamp();
|
||||
|
||||
var records: std.ArrayListUnmanaged(u32) = .empty;
|
||||
defer records.deinit(gpa);
|
||||
|
||||
var tables: std.ArrayListUnmanaged(u32) = .empty;
|
||||
defer tables.deinit(gpa);
|
||||
|
||||
for (keychainPaths) |keychainPath| {
|
||||
const file = try fs.openFileAbsolute(keychainPath, .{});
|
||||
defer file.close();
|
||||
|
||||
const bytes = try file.readToEndAlloc(gpa, std.math.maxInt(u32));
|
||||
defer gpa.free(bytes);
|
||||
var in_buffer: [256]u8 = undefined;
|
||||
comptime assert(in_buffer.len > @sizeOf(ApplDbHeader));
|
||||
comptime assert(in_buffer.len > @sizeOf(ApplDbSchema));
|
||||
comptime assert(in_buffer.len > @sizeOf(TableHeader));
|
||||
comptime assert(in_buffer.len > @sizeOf(X509CertHeader));
|
||||
var file_reader = file.reader();
|
||||
var br = file_reader.interface().buffered(&in_buffer);
|
||||
|
||||
var stream = std.io.fixedBufferStream(bytes);
|
||||
const reader = stream.reader();
|
||||
const db_header = try br.takeStructEndian(ApplDbHeader, .big);
|
||||
if (!mem.eql(u8, &db_header.signature, "kych")) continue;
|
||||
|
||||
const db_header = try reader.readStructEndian(ApplDbHeader, .big);
|
||||
assert(mem.eql(u8, &db_header.signature, "kych"));
|
||||
try file_reader.seekTo(db_header.schema_offset);
|
||||
br = file_reader.interface().buffered(&in_buffer);
|
||||
|
||||
try stream.seekTo(db_header.schema_offset);
|
||||
const db_schema = try br.takeStructEndian(ApplDbSchema, .big);
|
||||
|
||||
const db_schema = try reader.readStructEndian(ApplDbSchema, .big);
|
||||
try tables.resize(db_schema.table_count);
|
||||
for (tables.items) |*offset| offset.* = try br.takeInt(u32, .big);
|
||||
|
||||
var table_list = try gpa.alloc(u32, db_schema.table_count);
|
||||
defer gpa.free(table_list);
|
||||
for (tables.items) |table_offset| {
|
||||
try file_reader.seekTo(db_header.schema_offset + table_offset);
|
||||
br = file_reader.interface().buffered(&in_buffer);
|
||||
|
||||
var table_idx: u32 = 0;
|
||||
while (table_idx < table_list.len) : (table_idx += 1) {
|
||||
table_list[table_idx] = try reader.readInt(u32, .big);
|
||||
}
|
||||
|
||||
const now_sec = std.time.timestamp();
|
||||
|
||||
for (table_list) |table_offset| {
|
||||
try stream.seekTo(db_header.schema_offset + table_offset);
|
||||
|
||||
const table_header = try reader.readStructEndian(TableHeader, .big);
|
||||
const table_header = try br.takeStructEndian(TableHeader, .big);
|
||||
|
||||
if (@as(std.c.DB_RECORDTYPE, @enumFromInt(table_header.table_id)) != .X509_CERTIFICATE) {
|
||||
continue;
|
||||
}
|
||||
|
||||
var record_list = try gpa.alloc(u32, table_header.record_count);
|
||||
defer gpa.free(record_list);
|
||||
try records.resize(gpa, table_header.record_count);
|
||||
for (records.items) |*offset| offset.* = try br.takeInt(u32, .big);
|
||||
|
||||
var record_idx: u32 = 0;
|
||||
while (record_idx < record_list.len) : (record_idx += 1) {
|
||||
record_list[record_idx] = try reader.readInt(u32, .big);
|
||||
}
|
||||
|
||||
for (record_list) |record_offset| {
|
||||
for (records.items) |record_offset| {
|
||||
// An offset of zero means that the record is not present.
|
||||
// An offset that is not 4-byte-aligned is invalid.
|
||||
if (record_offset == 0 or record_offset % 4 != 0) continue;
|
||||
|
||||
try stream.seekTo(db_header.schema_offset + table_offset + record_offset);
|
||||
|
||||
const cert_header = try reader.readStructEndian(X509CertHeader, .big);
|
||||
try file_reader.seekTo(db_header.schema_offset + table_offset + record_offset);
|
||||
br = file_reader.interface().buffered(&in_buffer);
|
||||
|
||||
const cert_header = try br.takeStructEndian(X509CertHeader, .big);
|
||||
if (cert_header.cert_size == 0) continue;
|
||||
|
||||
const cert_start = @as(u32, @intCast(cb.bytes.items.len));
|
||||
const cert_start: u32 = @intCast(cb.bytes.items.len);
|
||||
const dest_buf = try cb.bytes.addManyAsSlice(gpa, cert_header.cert_size);
|
||||
try reader.readNoEof(dest_buf);
|
||||
try br.readSlice(dest_buf);
|
||||
|
||||
try cb.parseCert(gpa, cert_start, now_sec);
|
||||
}
|
||||
|
||||
@ -188,9 +188,10 @@ pub fn deserialize(comptime HashResult: type, str: []const u8) Error!HashResult
|
||||
///
|
||||
/// `params` can also include any additional parameters.
|
||||
pub fn serialize(params: anytype, str: []u8) Error![]const u8 {
|
||||
var buf = io.fixedBufferStream(str);
|
||||
try serializeTo(params, buf.writer());
|
||||
return buf.getWritten();
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(str);
|
||||
try serializeTo(params, &bw);
|
||||
return bw.getWritten();
|
||||
}
|
||||
|
||||
/// Compute the number of bytes required to serialize `params`
|
||||
@ -200,7 +201,7 @@ pub fn calcSize(params: anytype) usize {
|
||||
return @as(usize, @intCast(buf.bytes_written));
|
||||
}
|
||||
|
||||
fn serializeTo(params: anytype, out: anytype) !void {
|
||||
fn serializeTo(params: anytype, out: *std.io.BufferedWriter) !void {
|
||||
const HashResult = @TypeOf(params);
|
||||
|
||||
if (@hasField(HashResult, version_param_name)) {
|
||||
|
||||
@ -304,30 +304,35 @@ const crypt_format = struct {
|
||||
|
||||
/// Serialize parameters into a string in modular crypt format.
|
||||
pub fn serialize(params: anytype, str: []u8) EncodingError![]const u8 {
|
||||
var buf = io.fixedBufferStream(str);
|
||||
try serializeTo(params, buf.writer());
|
||||
return buf.getWritten();
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(str);
|
||||
try serializeTo(params, &bw);
|
||||
return bw.getWritten();
|
||||
}
|
||||
|
||||
/// Compute the number of bytes required to serialize `params`
|
||||
pub fn calcSize(params: anytype) usize {
|
||||
var buf = io.countingWriter(io.null_writer);
|
||||
serializeTo(params, buf.writer()) catch unreachable;
|
||||
return @as(usize, @intCast(buf.bytes_written));
|
||||
var null_writer: std.io.Writer.Null = .{};
|
||||
var trash: [64]u8 = undefined;
|
||||
var bw = null_writer.writer().buffered(&trash);
|
||||
serializeTo(params, &bw) catch |err| switch (err) {
|
||||
error.WriteFailed => unreachable,
|
||||
};
|
||||
return bw.count;
|
||||
}
|
||||
|
||||
fn serializeTo(params: anytype, out: anytype) !void {
|
||||
fn serializeTo(params: anytype, out: *std.io.BufferedWriter) !void {
|
||||
var header: [14]u8 = undefined;
|
||||
header[0..3].* = prefix.*;
|
||||
Codec.intEncode(header[3..4], params.ln);
|
||||
Codec.intEncode(header[4..9], params.r);
|
||||
Codec.intEncode(header[9..14], params.p);
|
||||
try out.writeAll(&header);
|
||||
try out.writeAll(params.salt);
|
||||
try out.writeAll("$");
|
||||
|
||||
var buf: [@TypeOf(params.hash).max_encoded_length]u8 = undefined;
|
||||
const hash_str = try params.hash.toB64(&buf);
|
||||
try out.writeAll(hash_str);
|
||||
|
||||
var vecs: [4][]const u8 = .{ &header, params.salt, "$", hash_str };
|
||||
try out.writeVecAll(&vecs);
|
||||
}
|
||||
|
||||
/// Custom codec that maps 6 bits into 8 like regular Base64, but uses its own alphabet,
|
||||
|
||||
@ -947,6 +947,22 @@ pub const Reader = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn seekTo(r: *Reader, offset: u64) SeekError!void {
|
||||
// TODO if the offset is after the current offset, seek by discarding.
|
||||
if (r.seek_err) |err| return err;
|
||||
switch (r.mode) {
|
||||
.positional, .positional_reading => {
|
||||
r.pos = offset;
|
||||
},
|
||||
.streaming, .streaming_reading => {
|
||||
posix.lseek_SET(r.file.handle, offset) catch |err| {
|
||||
r.seek_err = err;
|
||||
return err;
|
||||
};
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Number of slices to store on the stack, when trying to send as many byte
|
||||
/// vectors through the underlying read calls as possible.
|
||||
const max_buffers_len = 16;
|
||||
@ -975,7 +991,7 @@ pub const Reader = struct {
|
||||
error.WriteFailed => return error.WriteFailed,
|
||||
error.Unseekable => {
|
||||
r.mode = .streaming;
|
||||
assert(pos == 0);
|
||||
if (pos != 0) @panic("TODO need to seek here");
|
||||
return 0;
|
||||
},
|
||||
error.Unimplemented => {
|
||||
@ -1011,7 +1027,7 @@ pub const Reader = struct {
|
||||
const n = file.pread(dest, pos) catch |err| switch (err) {
|
||||
error.Unseekable => {
|
||||
r.mode = .streaming_reading;
|
||||
assert(pos == 0);
|
||||
if (pos != 0) @panic("TODO need to seek here");
|
||||
return 0;
|
||||
},
|
||||
else => |e| {
|
||||
|
||||
@ -579,6 +579,10 @@ pub fn addAny(comptime Result: type, a: anytype, b: anytype) ?Result {
|
||||
return cast(Result, @as(O, a) + @as(O, b));
|
||||
}
|
||||
|
||||
test addAny {
|
||||
return error.Unimplemented;
|
||||
}
|
||||
|
||||
/// Returns a - b, or an error on overflow.
|
||||
pub fn sub(comptime T: type, a: T, b: T) (error{Overflow}!T) {
|
||||
if (T == comptime_int) return a - b;
|
||||
|
||||
@ -955,13 +955,14 @@ test Iterator {
|
||||
// example/empty/
|
||||
|
||||
const data = @embedFile("tar/testdata/example.tar");
|
||||
var fbs = std.io.fixedBufferStream(data);
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(data);
|
||||
|
||||
// User provided buffers to the iterator
|
||||
var file_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
|
||||
var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
|
||||
// Create iterator
|
||||
var iter: Iterator = .init(fbs.reader(), .{
|
||||
var iter: Iterator = .init(&br, .{
|
||||
.file_name_buffer = &file_name_buffer,
|
||||
.link_name_buffer = &link_name_buffer,
|
||||
});
|
||||
@ -1014,15 +1015,15 @@ test pipeToFileSystem {
|
||||
// example/empty/
|
||||
|
||||
const data = @embedFile("tar/testdata/example.tar");
|
||||
var fbs = std.io.fixedBufferStream(data);
|
||||
const reader = fbs.reader();
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(data);
|
||||
|
||||
var tmp = testing.tmpDir(.{ .no_follow = true });
|
||||
defer tmp.cleanup();
|
||||
const dir = tmp.dir;
|
||||
|
||||
// Save tar from `reader` to the file system `dir`
|
||||
pipeToFileSystem(dir, reader, .{
|
||||
// Save tar from reader to the file system `dir`
|
||||
pipeToFileSystem(dir, &br, .{
|
||||
.mode_mode = .ignore,
|
||||
.strip_components = 1,
|
||||
.exclude_empty_directories = true,
|
||||
@ -1046,8 +1047,8 @@ test pipeToFileSystem {
|
||||
|
||||
test "pipeToFileSystem root_dir" {
|
||||
const data = @embedFile("tar/testdata/example.tar");
|
||||
var fbs = std.io.fixedBufferStream(data);
|
||||
const reader = fbs.reader();
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(data);
|
||||
|
||||
// with strip_components = 1
|
||||
{
|
||||
@ -1056,7 +1057,7 @@ test "pipeToFileSystem root_dir" {
|
||||
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
|
||||
defer diagnostics.deinit();
|
||||
|
||||
pipeToFileSystem(tmp.dir, reader, .{
|
||||
pipeToFileSystem(tmp.dir, &br, .{
|
||||
.strip_components = 1,
|
||||
.diagnostics = &diagnostics,
|
||||
}) catch |err| {
|
||||
@ -1072,13 +1073,13 @@ test "pipeToFileSystem root_dir" {
|
||||
|
||||
// with strip_components = 0
|
||||
{
|
||||
fbs.reset();
|
||||
br.initFixed(data);
|
||||
var tmp = testing.tmpDir(.{ .no_follow = true });
|
||||
defer tmp.cleanup();
|
||||
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
|
||||
defer diagnostics.deinit();
|
||||
|
||||
pipeToFileSystem(tmp.dir, reader, .{
|
||||
pipeToFileSystem(tmp.dir, &br, .{
|
||||
.strip_components = 0,
|
||||
.diagnostics = &diagnostics,
|
||||
}) catch |err| {
|
||||
@ -1095,45 +1096,45 @@ test "pipeToFileSystem root_dir" {
|
||||
|
||||
test "findRoot with single file archive" {
|
||||
const data = @embedFile("tar/testdata/22752.tar");
|
||||
var fbs = std.io.fixedBufferStream(data);
|
||||
const reader = fbs.reader();
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(data);
|
||||
|
||||
var tmp = testing.tmpDir(.{});
|
||||
defer tmp.cleanup();
|
||||
|
||||
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
|
||||
defer diagnostics.deinit();
|
||||
try pipeToFileSystem(tmp.dir, reader, .{ .diagnostics = &diagnostics });
|
||||
try pipeToFileSystem(tmp.dir, &br, .{ .diagnostics = &diagnostics });
|
||||
|
||||
try testing.expectEqualStrings("", diagnostics.root_dir);
|
||||
}
|
||||
|
||||
test "findRoot without explicit root dir" {
|
||||
const data = @embedFile("tar/testdata/19820.tar");
|
||||
var fbs = std.io.fixedBufferStream(data);
|
||||
const reader = fbs.reader();
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(data);
|
||||
|
||||
var tmp = testing.tmpDir(.{});
|
||||
defer tmp.cleanup();
|
||||
|
||||
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
|
||||
defer diagnostics.deinit();
|
||||
try pipeToFileSystem(tmp.dir, reader, .{ .diagnostics = &diagnostics });
|
||||
try pipeToFileSystem(tmp.dir, &br, .{ .diagnostics = &diagnostics });
|
||||
|
||||
try testing.expectEqualStrings("root", diagnostics.root_dir);
|
||||
}
|
||||
|
||||
test "pipeToFileSystem strip_components" {
|
||||
const data = @embedFile("tar/testdata/example.tar");
|
||||
var fbs = std.io.fixedBufferStream(data);
|
||||
const reader = fbs.reader();
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(data);
|
||||
|
||||
var tmp = testing.tmpDir(.{ .no_follow = true });
|
||||
defer tmp.cleanup();
|
||||
var diagnostics: Diagnostics = .{ .allocator = testing.allocator };
|
||||
defer diagnostics.deinit();
|
||||
|
||||
pipeToFileSystem(tmp.dir, reader, .{
|
||||
pipeToFileSystem(tmp.dir, &br, .{
|
||||
.strip_components = 3,
|
||||
.diagnostics = &diagnostics,
|
||||
}) catch |err| {
|
||||
@ -1187,13 +1188,13 @@ test "executable bit" {
|
||||
const data = @embedFile("tar/testdata/example.tar");
|
||||
|
||||
for ([_]PipeOptions.ModeMode{ .ignore, .executable_bit_only }) |opt| {
|
||||
var fbs = std.io.fixedBufferStream(data);
|
||||
const reader = fbs.reader();
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(data);
|
||||
|
||||
var tmp = testing.tmpDir(.{ .no_follow = true });
|
||||
//defer tmp.cleanup();
|
||||
|
||||
pipeToFileSystem(tmp.dir, reader, .{
|
||||
pipeToFileSystem(tmp.dir, &br, .{
|
||||
.strip_components = 1,
|
||||
.exclude_empty_directories = true,
|
||||
.mode_mode = opt,
|
||||
|
||||
@ -441,11 +441,12 @@ test "write files" {
|
||||
for (files) |file|
|
||||
try wrt.writeFileBytes(file.path, file.content, .{});
|
||||
|
||||
var input: std.io.FixedBufferStream = .{ .buffer = output.getWritten() };
|
||||
var iter = std.tar.iterator(
|
||||
input.reader(),
|
||||
.{ .file_name_buffer = &file_name_buffer, .link_name_buffer = &link_name_buffer },
|
||||
);
|
||||
var input: std.io.BufferedReader = undefined;
|
||||
input.initFixed(output.getWritten());
|
||||
var iter = std.tar.iterator(&input, .{
|
||||
.file_name_buffer = &file_name_buffer,
|
||||
.link_name_buffer = &link_name_buffer,
|
||||
});
|
||||
|
||||
// first entry is directory with prefix
|
||||
{
|
||||
@ -475,15 +476,17 @@ test "write files" {
|
||||
var wrt: Writer = .{ .underlying_writer = &output.buffered_writer };
|
||||
defer output.deinit();
|
||||
for (files) |file| {
|
||||
var content = std.io.fixedBufferStream(file.content);
|
||||
try wrt.writeFileStream(file.path, file.content.len, content.reader(), .{});
|
||||
var content: std.io.BufferedReader = undefined;
|
||||
content.initFixed(file.content);
|
||||
try wrt.writeFileStream(file.path, file.content.len, &content, .{});
|
||||
}
|
||||
|
||||
var input: std.io.FixedBufferStream = .{ .buffer = output.getWritten() };
|
||||
var iter = std.tar.iterator(
|
||||
input.reader(),
|
||||
.{ .file_name_buffer = &file_name_buffer, .link_name_buffer = &link_name_buffer },
|
||||
);
|
||||
var input: std.io.BufferedReader = undefined;
|
||||
input.initFixed(output.getWritten());
|
||||
var iter = std.tar.iterator(&input, .{
|
||||
.file_name_buffer = &file_name_buffer,
|
||||
.link_name_buffer = &link_name_buffer,
|
||||
});
|
||||
|
||||
var i: usize = 0;
|
||||
while (try iter.next()) |actual| {
|
||||
|
||||
@ -346,8 +346,9 @@ test "run test cases" {
|
||||
var link_name_buffer: [std.fs.max_path_bytes]u8 = undefined;
|
||||
|
||||
for (cases) |case| {
|
||||
var fsb = std.io.fixedBufferStream(case.data);
|
||||
var iter = tar.iterator(fsb.reader(), .{
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(case.data);
|
||||
var iter = tar.iterator(&br, .{
|
||||
.file_name_buffer = &file_name_buffer,
|
||||
.link_name_buffer = &link_name_buffer,
|
||||
});
|
||||
@ -390,8 +391,9 @@ test "pax/gnu long names with small buffer" {
|
||||
const long_name_cases = [_]Case{ cases[11], cases[25], cases[28] };
|
||||
|
||||
for (long_name_cases) |case| {
|
||||
var fsb = std.io.fixedBufferStream(case.data);
|
||||
var iter = tar.iterator(fsb.reader(), .{
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(case.data);
|
||||
var iter = tar.iterator(&br, .{
|
||||
.file_name_buffer = &min_file_name_buffer,
|
||||
.link_name_buffer = &min_link_name_buffer,
|
||||
});
|
||||
@ -411,8 +413,9 @@ test "insufficient buffer in Header name filed" {
|
||||
var min_file_name_buffer: [9]u8 = undefined;
|
||||
var min_link_name_buffer: [100]u8 = undefined;
|
||||
|
||||
var fsb = std.io.fixedBufferStream(cases[0].data);
|
||||
var iter = tar.iterator(fsb.reader(), .{
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(cases[0].data);
|
||||
var iter = tar.iterator(&br, .{
|
||||
.file_name_buffer = &min_file_name_buffer,
|
||||
.link_name_buffer = &min_link_name_buffer,
|
||||
});
|
||||
@ -466,21 +469,22 @@ test "should not overwrite existing file" {
|
||||
// This ensures that file is not overwritten.
|
||||
//
|
||||
const data = @embedFile("testdata/overwrite_file.tar");
|
||||
var fsb = std.io.fixedBufferStream(data);
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(data);
|
||||
|
||||
// Unpack with strip_components = 1 should fail
|
||||
var root = std.testing.tmpDir(.{});
|
||||
defer root.cleanup();
|
||||
try testing.expectError(
|
||||
error.PathAlreadyExists,
|
||||
tar.pipeToFileSystem(root.dir, fsb.reader(), .{ .mode_mode = .ignore, .strip_components = 1 }),
|
||||
tar.pipeToFileSystem(root.dir, &br, .{ .mode_mode = .ignore, .strip_components = 1 }),
|
||||
);
|
||||
|
||||
// Unpack with strip_components = 0 should pass
|
||||
fsb.reset();
|
||||
br.initFixed(data);
|
||||
var root2 = std.testing.tmpDir(.{});
|
||||
defer root2.cleanup();
|
||||
try tar.pipeToFileSystem(root2.dir, fsb.reader(), .{ .mode_mode = .ignore, .strip_components = 0 });
|
||||
try tar.pipeToFileSystem(root2.dir, &br, .{ .mode_mode = .ignore, .strip_components = 0 });
|
||||
}
|
||||
|
||||
test "case sensitivity" {
|
||||
@ -494,12 +498,13 @@ test "case sensitivity" {
|
||||
// 18089/alacritty/Darkermatrix.yml
|
||||
//
|
||||
const data = @embedFile("testdata/18089.tar");
|
||||
var fsb = std.io.fixedBufferStream(data);
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(data);
|
||||
|
||||
var root = std.testing.tmpDir(.{});
|
||||
defer root.cleanup();
|
||||
|
||||
tar.pipeToFileSystem(root.dir, fsb.reader(), .{ .mode_mode = .ignore, .strip_components = 1 }) catch |err| {
|
||||
tar.pipeToFileSystem(root.dir, &br, .{ .mode_mode = .ignore, .strip_components = 1 }) catch |err| {
|
||||
// on case insensitive fs we fail on overwrite existing file
|
||||
try testing.expectEqual(error.PathAlreadyExists, err);
|
||||
return;
|
||||
|
||||
@ -759,14 +759,14 @@ const MsvcLibDir = struct {
|
||||
while (instances_dir_it.next() catch return error.PathNotFound) |entry| {
|
||||
if (entry.kind != .directory) continue;
|
||||
|
||||
var fbs = std.io.fixedBufferStream(&state_subpath_buf);
|
||||
const writer = fbs.writer();
|
||||
var bw: std.io.BufferedWriter = undefined;
|
||||
bw.initFixed(&state_subpath_buf);
|
||||
|
||||
writer.writeAll(entry.name) catch unreachable;
|
||||
writer.writeByte(std.fs.path.sep) catch unreachable;
|
||||
writer.writeAll("state.json") catch unreachable;
|
||||
bw.writeAll(entry.name) catch unreachable;
|
||||
bw.writeByte(std.fs.path.sep) catch unreachable;
|
||||
bw.writeAll("state.json") catch unreachable;
|
||||
|
||||
const json_contents = instances_dir.readFileAlloc(allocator, fbs.getWritten(), std.math.maxInt(usize)) catch continue;
|
||||
const json_contents = instances_dir.readFileAlloc(allocator, bw.getWritten(), std.math.maxInt(usize)) catch continue;
|
||||
defer allocator.free(json_contents);
|
||||
|
||||
var parsed = std.json.parseFromSlice(std.json.Value, allocator, json_contents, .{}) catch continue;
|
||||
|
||||
@ -342,8 +342,9 @@ fn testParser(
|
||||
expected_model: *const Target.Cpu.Model,
|
||||
input: []const u8,
|
||||
) !void {
|
||||
var fbs = io.fixedBufferStream(input);
|
||||
const result = try parser.parse(arch, fbs.reader());
|
||||
var br: std.io.BufferedReader = undefined;
|
||||
br.initFixed(@constCast(input));
|
||||
const result = try parser.parse(arch, &br);
|
||||
try testing.expectEqual(expected_model, result.?.model);
|
||||
try testing.expect(expected_model.features.eql(result.?.features));
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user