mirror of
https://github.com/ziglang/zig.git
synced 2026-01-20 14:25:16 +00:00
Merge pull request #18076 from ziglang/revert-iterable-dir
std.fs: Absorb `IterableDir` into `Dir`
This commit is contained in:
commit
f4e426a06c
@ -247,7 +247,9 @@ set(ZIG_STAGE2_SOURCES
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/fmt/errol/lookup.zig"
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/fmt/parse_float.zig"
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/fs.zig"
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/fs/file.zig"
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/fs/AtomicFile.zig"
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/fs/Dir.zig"
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/fs/File.zig"
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/fs/get_app_data_dir.zig"
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/fs/path.zig"
|
||||
"${CMAKE_SOURCE_DIR}/lib/std/hash.zig"
|
||||
|
||||
@ -69,7 +69,7 @@ fn make(step: *Step, prog_node: *std.Progress.Node) !void {
|
||||
const dest_prefix = dest_builder.getInstallPath(self.options.install_dir, self.options.install_subdir);
|
||||
const src_builder = self.step.owner;
|
||||
const src_dir_path = self.options.source_dir.getPath2(src_builder, step);
|
||||
var src_dir = src_builder.build_root.handle.openIterableDir(src_dir_path, .{}) catch |err| {
|
||||
var src_dir = src_builder.build_root.handle.openDir(src_dir_path, .{ .iterate = true }) catch |err| {
|
||||
return step.fail("unable to open source directory '{}{s}': {s}", .{
|
||||
src_builder.build_root, src_dir_path, @errorName(err),
|
||||
});
|
||||
|
||||
@ -976,7 +976,8 @@ fn windowsCreateProcessPathExt(
|
||||
defer dir_buf.shrinkRetainingCapacity(dir_path_len);
|
||||
const dir_path_z = dir_buf.items[0 .. dir_buf.items.len - 1 :0];
|
||||
const prefixed_path = try windows.wToPrefixedFileW(null, dir_path_z);
|
||||
break :dir fs.cwd().openDirW(prefixed_path.span().ptr, .{}, true) catch return error.FileNotFound;
|
||||
break :dir fs.cwd().openDirW(prefixed_path.span().ptr, .{ .iterate = true }) catch
|
||||
return error.FileNotFound;
|
||||
};
|
||||
defer dir.close();
|
||||
|
||||
|
||||
@ -160,7 +160,7 @@ pub fn addCertsFromDirPath(
|
||||
dir: fs.Dir,
|
||||
sub_dir_path: []const u8,
|
||||
) AddCertsFromDirPathError!void {
|
||||
var iterable_dir = try dir.openIterableDir(sub_dir_path, .{});
|
||||
var iterable_dir = try dir.openDir(sub_dir_path, .{ .iterate = true });
|
||||
defer iterable_dir.close();
|
||||
return addCertsFromDir(cb, gpa, iterable_dir);
|
||||
}
|
||||
@ -171,14 +171,14 @@ pub fn addCertsFromDirPathAbsolute(
|
||||
abs_dir_path: []const u8,
|
||||
) AddCertsFromDirPathError!void {
|
||||
assert(fs.path.isAbsolute(abs_dir_path));
|
||||
var iterable_dir = try fs.openIterableDirAbsolute(abs_dir_path, .{});
|
||||
var iterable_dir = try fs.openDirAbsolute(abs_dir_path, .{ .iterate = true });
|
||||
defer iterable_dir.close();
|
||||
return addCertsFromDir(cb, gpa, iterable_dir);
|
||||
}
|
||||
|
||||
pub const AddCertsFromDirError = AddCertsFromFilePathError;
|
||||
|
||||
pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, iterable_dir: fs.IterableDir) AddCertsFromDirError!void {
|
||||
pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, iterable_dir: fs.Dir) AddCertsFromDirError!void {
|
||||
var it = iterable_dir.iterate();
|
||||
while (try it.next()) |entry| {
|
||||
switch (entry.kind) {
|
||||
@ -186,7 +186,7 @@ pub fn addCertsFromDir(cb: *Bundle, gpa: Allocator, iterable_dir: fs.IterableDir
|
||||
else => continue,
|
||||
}
|
||||
|
||||
try addCertsFromFilePath(cb, gpa, iterable_dir.dir, entry.name);
|
||||
try addCertsFromFilePath(cb, gpa, iterable_dir, entry.name);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
2709
lib/std/fs.zig
2709
lib/std/fs.zig
File diff suppressed because it is too large
Load Diff
85
lib/std/fs/AtomicFile.zig
Normal file
85
lib/std/fs/AtomicFile.zig
Normal file
@ -0,0 +1,85 @@
|
||||
file: File,
|
||||
// TODO either replace this with rand_buf or use []u16 on Windows
|
||||
tmp_path_buf: [tmp_path_len:0]u8,
|
||||
dest_basename: []const u8,
|
||||
file_open: bool,
|
||||
file_exists: bool,
|
||||
close_dir_on_deinit: bool,
|
||||
dir: Dir,
|
||||
|
||||
pub const InitError = File.OpenError;
|
||||
|
||||
pub const random_bytes_len = 12;
|
||||
const tmp_path_len = fs.base64_encoder.calcSize(random_bytes_len);
|
||||
|
||||
/// Note that the `Dir.atomicFile` API may be more handy than this lower-level function.
|
||||
pub fn init(
|
||||
dest_basename: []const u8,
|
||||
mode: File.Mode,
|
||||
dir: Dir,
|
||||
close_dir_on_deinit: bool,
|
||||
) InitError!AtomicFile {
|
||||
var rand_buf: [random_bytes_len]u8 = undefined;
|
||||
var tmp_path_buf: [tmp_path_len:0]u8 = undefined;
|
||||
|
||||
while (true) {
|
||||
std.crypto.random.bytes(rand_buf[0..]);
|
||||
const tmp_path = fs.base64_encoder.encode(&tmp_path_buf, &rand_buf);
|
||||
tmp_path_buf[tmp_path.len] = 0;
|
||||
|
||||
const file = dir.createFile(
|
||||
tmp_path,
|
||||
.{ .mode = mode, .exclusive = true },
|
||||
) catch |err| switch (err) {
|
||||
error.PathAlreadyExists => continue,
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
return AtomicFile{
|
||||
.file = file,
|
||||
.tmp_path_buf = tmp_path_buf,
|
||||
.dest_basename = dest_basename,
|
||||
.file_open = true,
|
||||
.file_exists = true,
|
||||
.close_dir_on_deinit = close_dir_on_deinit,
|
||||
.dir = dir,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Always call deinit, even after a successful finish().
|
||||
pub fn deinit(self: *AtomicFile) void {
|
||||
if (self.file_open) {
|
||||
self.file.close();
|
||||
self.file_open = false;
|
||||
}
|
||||
if (self.file_exists) {
|
||||
self.dir.deleteFile(&self.tmp_path_buf) catch {};
|
||||
self.file_exists = false;
|
||||
}
|
||||
if (self.close_dir_on_deinit) {
|
||||
self.dir.close();
|
||||
}
|
||||
self.* = undefined;
|
||||
}
|
||||
|
||||
pub const FinishError = posix.RenameError;
|
||||
|
||||
pub fn finish(self: *AtomicFile) FinishError!void {
|
||||
assert(self.file_exists);
|
||||
if (self.file_open) {
|
||||
self.file.close();
|
||||
self.file_open = false;
|
||||
}
|
||||
try posix.renameat(self.dir.fd, self.tmp_path_buf[0..], self.dir.fd, self.dest_basename);
|
||||
self.file_exists = false;
|
||||
}
|
||||
|
||||
const AtomicFile = @This();
|
||||
const std = @import("../std.zig");
|
||||
const File = std.fs.File;
|
||||
const Dir = std.fs.Dir;
|
||||
const fs = std.fs;
|
||||
const assert = std.debug.assert;
|
||||
// https://github.com/ziglang/zig/issues/5019
|
||||
const posix = std.os;
|
||||
2534
lib/std/fs/Dir.zig
Normal file
2534
lib/std/fs/Dir.zig
Normal file
File diff suppressed because it is too large
Load Diff
1624
lib/std/fs/File.zig
Normal file
1624
lib/std/fs/File.zig
Normal file
File diff suppressed because it is too large
Load Diff
1622
lib/std/fs/file.zig
1622
lib/std/fs/file.zig
File diff suppressed because it is too large
Load Diff
@ -8,10 +8,8 @@ const wasi = std.os.wasi;
|
||||
|
||||
const ArenaAllocator = std.heap.ArenaAllocator;
|
||||
const Dir = std.fs.Dir;
|
||||
const IterableDir = std.fs.IterableDir;
|
||||
const File = std.fs.File;
|
||||
const tmpDir = testing.tmpDir;
|
||||
const tmpIterableDir = testing.tmpIterableDir;
|
||||
|
||||
const PathType = enum {
|
||||
relative,
|
||||
@ -74,19 +72,17 @@ const PathType = enum {
|
||||
const TestContext = struct {
|
||||
path_type: PathType,
|
||||
arena: ArenaAllocator,
|
||||
tmp: testing.TmpIterableDir,
|
||||
tmp: testing.TmpDir,
|
||||
dir: std.fs.Dir,
|
||||
iterable_dir: std.fs.IterableDir,
|
||||
transform_fn: *const PathType.TransformFn,
|
||||
|
||||
pub fn init(path_type: PathType, allocator: mem.Allocator, transform_fn: *const PathType.TransformFn) TestContext {
|
||||
const tmp = tmpIterableDir(.{});
|
||||
const tmp = tmpDir(.{ .iterate = true });
|
||||
return .{
|
||||
.path_type = path_type,
|
||||
.arena = ArenaAllocator.init(allocator),
|
||||
.tmp = tmp,
|
||||
.dir = tmp.iterable_dir.dir,
|
||||
.iterable_dir = tmp.iterable_dir,
|
||||
.dir = tmp.dir,
|
||||
.transform_fn = transform_fn,
|
||||
};
|
||||
}
|
||||
@ -323,28 +319,28 @@ fn testReadLinkAbsolute(target_path: []const u8, symlink_path: []const u8) !void
|
||||
}
|
||||
|
||||
test "Dir.Iterator" {
|
||||
var tmp_dir = tmpIterableDir(.{});
|
||||
var tmp_dir = tmpDir(.{ .iterate = true });
|
||||
defer tmp_dir.cleanup();
|
||||
|
||||
// First, create a couple of entries to iterate over.
|
||||
const file = try tmp_dir.iterable_dir.dir.createFile("some_file", .{});
|
||||
const file = try tmp_dir.dir.createFile("some_file", .{});
|
||||
file.close();
|
||||
|
||||
try tmp_dir.iterable_dir.dir.makeDir("some_dir");
|
||||
try tmp_dir.dir.makeDir("some_dir");
|
||||
|
||||
var arena = ArenaAllocator.init(testing.allocator);
|
||||
defer arena.deinit();
|
||||
const allocator = arena.allocator();
|
||||
|
||||
var entries = std.ArrayList(IterableDir.Entry).init(allocator);
|
||||
var entries = std.ArrayList(Dir.Entry).init(allocator);
|
||||
|
||||
// Create iterator.
|
||||
var iter = tmp_dir.iterable_dir.iterate();
|
||||
var iter = tmp_dir.dir.iterate();
|
||||
while (try iter.next()) |entry| {
|
||||
// We cannot just store `entry` as on Windows, we're re-using the name buffer
|
||||
// which means we'll actually share the `name` pointer between entries!
|
||||
const name = try allocator.dupe(u8, entry.name);
|
||||
try entries.append(.{ .name = name, .kind = entry.kind });
|
||||
try entries.append(Dir.Entry{ .name = name, .kind = entry.kind });
|
||||
}
|
||||
|
||||
try testing.expectEqual(@as(usize, 2), entries.items.len); // note that the Iterator skips '.' and '..'
|
||||
@ -353,7 +349,7 @@ test "Dir.Iterator" {
|
||||
}
|
||||
|
||||
test "Dir.Iterator many entries" {
|
||||
var tmp_dir = tmpIterableDir(.{});
|
||||
var tmp_dir = tmpDir(.{ .iterate = true });
|
||||
defer tmp_dir.cleanup();
|
||||
|
||||
const num = 1024;
|
||||
@ -361,7 +357,7 @@ test "Dir.Iterator many entries" {
|
||||
var buf: [4]u8 = undefined; // Enough to store "1024".
|
||||
while (i < num) : (i += 1) {
|
||||
const name = try std.fmt.bufPrint(&buf, "{}", .{i});
|
||||
const file = try tmp_dir.iterable_dir.dir.createFile(name, .{});
|
||||
const file = try tmp_dir.dir.createFile(name, .{});
|
||||
file.close();
|
||||
}
|
||||
|
||||
@ -369,10 +365,10 @@ test "Dir.Iterator many entries" {
|
||||
defer arena.deinit();
|
||||
const allocator = arena.allocator();
|
||||
|
||||
var entries = std.ArrayList(IterableDir.Entry).init(allocator);
|
||||
var entries = std.ArrayList(Dir.Entry).init(allocator);
|
||||
|
||||
// Create iterator.
|
||||
var iter = tmp_dir.iterable_dir.iterate();
|
||||
var iter = tmp_dir.dir.iterate();
|
||||
while (try iter.next()) |entry| {
|
||||
// We cannot just store `entry` as on Windows, we're re-using the name buffer
|
||||
// which means we'll actually share the `name` pointer between entries!
|
||||
@ -388,14 +384,14 @@ test "Dir.Iterator many entries" {
|
||||
}
|
||||
|
||||
test "Dir.Iterator twice" {
|
||||
var tmp_dir = tmpIterableDir(.{});
|
||||
var tmp_dir = tmpDir(.{ .iterate = true });
|
||||
defer tmp_dir.cleanup();
|
||||
|
||||
// First, create a couple of entries to iterate over.
|
||||
const file = try tmp_dir.iterable_dir.dir.createFile("some_file", .{});
|
||||
const file = try tmp_dir.dir.createFile("some_file", .{});
|
||||
file.close();
|
||||
|
||||
try tmp_dir.iterable_dir.dir.makeDir("some_dir");
|
||||
try tmp_dir.dir.makeDir("some_dir");
|
||||
|
||||
var arena = ArenaAllocator.init(testing.allocator);
|
||||
defer arena.deinit();
|
||||
@ -403,15 +399,15 @@ test "Dir.Iterator twice" {
|
||||
|
||||
var i: u8 = 0;
|
||||
while (i < 2) : (i += 1) {
|
||||
var entries = std.ArrayList(IterableDir.Entry).init(allocator);
|
||||
var entries = std.ArrayList(Dir.Entry).init(allocator);
|
||||
|
||||
// Create iterator.
|
||||
var iter = tmp_dir.iterable_dir.iterate();
|
||||
var iter = tmp_dir.dir.iterate();
|
||||
while (try iter.next()) |entry| {
|
||||
// We cannot just store `entry` as on Windows, we're re-using the name buffer
|
||||
// which means we'll actually share the `name` pointer between entries!
|
||||
const name = try allocator.dupe(u8, entry.name);
|
||||
try entries.append(.{ .name = name, .kind = entry.kind });
|
||||
try entries.append(Dir.Entry{ .name = name, .kind = entry.kind });
|
||||
}
|
||||
|
||||
try testing.expectEqual(@as(usize, 2), entries.items.len); // note that the Iterator skips '.' and '..'
|
||||
@ -421,25 +417,25 @@ test "Dir.Iterator twice" {
|
||||
}
|
||||
|
||||
test "Dir.Iterator reset" {
|
||||
var tmp_dir = tmpIterableDir(.{});
|
||||
var tmp_dir = tmpDir(.{ .iterate = true });
|
||||
defer tmp_dir.cleanup();
|
||||
|
||||
// First, create a couple of entries to iterate over.
|
||||
const file = try tmp_dir.iterable_dir.dir.createFile("some_file", .{});
|
||||
const file = try tmp_dir.dir.createFile("some_file", .{});
|
||||
file.close();
|
||||
|
||||
try tmp_dir.iterable_dir.dir.makeDir("some_dir");
|
||||
try tmp_dir.dir.makeDir("some_dir");
|
||||
|
||||
var arena = ArenaAllocator.init(testing.allocator);
|
||||
defer arena.deinit();
|
||||
const allocator = arena.allocator();
|
||||
|
||||
// Create iterator.
|
||||
var iter = tmp_dir.iterable_dir.iterate();
|
||||
var iter = tmp_dir.dir.iterate();
|
||||
|
||||
var i: u8 = 0;
|
||||
while (i < 2) : (i += 1) {
|
||||
var entries = std.ArrayList(IterableDir.Entry).init(allocator);
|
||||
var entries = std.ArrayList(Dir.Entry).init(allocator);
|
||||
|
||||
while (try iter.next()) |entry| {
|
||||
// We cannot just store `entry` as on Windows, we're re-using the name buffer
|
||||
@ -461,10 +457,10 @@ test "Dir.Iterator but dir is deleted during iteration" {
|
||||
defer tmp.cleanup();
|
||||
|
||||
// Create directory and setup an iterator for it
|
||||
var iterable_subdir = try tmp.dir.makeOpenPathIterable("subdir", .{});
|
||||
defer iterable_subdir.close();
|
||||
var subdir = try tmp.dir.makeOpenPath("subdir", .{ .iterate = true });
|
||||
defer subdir.close();
|
||||
|
||||
var iterator = iterable_subdir.iterate();
|
||||
var iterator = subdir.iterate();
|
||||
|
||||
// Create something to iterate over within the subdir
|
||||
try tmp.dir.makePath("subdir/b");
|
||||
@ -485,11 +481,11 @@ test "Dir.Iterator but dir is deleted during iteration" {
|
||||
}
|
||||
}
|
||||
|
||||
fn entryEql(lhs: IterableDir.Entry, rhs: IterableDir.Entry) bool {
|
||||
fn entryEql(lhs: Dir.Entry, rhs: Dir.Entry) bool {
|
||||
return mem.eql(u8, lhs.name, rhs.name) and lhs.kind == rhs.kind;
|
||||
}
|
||||
|
||||
fn contains(entries: *const std.ArrayList(IterableDir.Entry), el: IterableDir.Entry) bool {
|
||||
fn contains(entries: *const std.ArrayList(Dir.Entry), el: Dir.Entry) bool {
|
||||
for (entries.items) |entry| {
|
||||
if (entryEql(entry, el)) return true;
|
||||
}
|
||||
@ -963,10 +959,10 @@ test "makePath in a directory that no longer exists" {
|
||||
try testing.expectError(error.FileNotFound, tmp.dir.makePath("sub-path"));
|
||||
}
|
||||
|
||||
fn testFilenameLimits(iterable_dir: IterableDir, maxed_filename: []const u8) !void {
|
||||
fn testFilenameLimits(iterable_dir: Dir, maxed_filename: []const u8) !void {
|
||||
// setup, create a dir and a nested file both with maxed filenames, and walk the dir
|
||||
{
|
||||
var maxed_dir = try iterable_dir.dir.makeOpenPath(maxed_filename, .{});
|
||||
var maxed_dir = try iterable_dir.makeOpenPath(maxed_filename, .{});
|
||||
defer maxed_dir.close();
|
||||
|
||||
try maxed_dir.writeFile(maxed_filename, "");
|
||||
@ -983,27 +979,27 @@ fn testFilenameLimits(iterable_dir: IterableDir, maxed_filename: []const u8) !vo
|
||||
}
|
||||
|
||||
// ensure that we can delete the tree
|
||||
try iterable_dir.dir.deleteTree(maxed_filename);
|
||||
try iterable_dir.deleteTree(maxed_filename);
|
||||
}
|
||||
|
||||
test "max file name component lengths" {
|
||||
var tmp = tmpIterableDir(.{});
|
||||
var tmp = tmpDir(.{ .iterate = true });
|
||||
defer tmp.cleanup();
|
||||
|
||||
if (builtin.os.tag == .windows) {
|
||||
// U+FFFF is the character with the largest code point that is encoded as a single
|
||||
// UTF-16 code unit, so Windows allows for NAME_MAX of them.
|
||||
const maxed_windows_filename = ("\u{FFFF}".*) ** std.os.windows.NAME_MAX;
|
||||
try testFilenameLimits(tmp.iterable_dir, &maxed_windows_filename);
|
||||
try testFilenameLimits(tmp.dir, &maxed_windows_filename);
|
||||
} else if (builtin.os.tag == .wasi) {
|
||||
// On WASI, the maxed filename depends on the host OS, so in order for this test to
|
||||
// work on any host, we need to use a length that will work for all platforms
|
||||
// (i.e. the minimum MAX_NAME_BYTES of all supported platforms).
|
||||
const maxed_wasi_filename = [_]u8{'1'} ** 255;
|
||||
try testFilenameLimits(tmp.iterable_dir, &maxed_wasi_filename);
|
||||
try testFilenameLimits(tmp.dir, &maxed_wasi_filename);
|
||||
} else {
|
||||
const maxed_ascii_filename = [_]u8{'1'} ** std.fs.MAX_NAME_BYTES;
|
||||
try testFilenameLimits(tmp.iterable_dir, &maxed_ascii_filename);
|
||||
try testFilenameLimits(tmp.dir, &maxed_ascii_filename);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1384,7 +1380,7 @@ test "open file with exclusive nonblocking lock twice (absolute paths)" {
|
||||
test "walker" {
|
||||
if (builtin.os.tag == .wasi and builtin.link_libc) return error.SkipZigTest;
|
||||
|
||||
var tmp = tmpIterableDir(.{});
|
||||
var tmp = tmpDir(.{ .iterate = true });
|
||||
defer tmp.cleanup();
|
||||
|
||||
// iteration order of walker is undefined, so need lookup maps to check against
|
||||
@ -1410,10 +1406,10 @@ test "walker" {
|
||||
});
|
||||
|
||||
for (expected_paths.kvs) |kv| {
|
||||
try tmp.iterable_dir.dir.makePath(kv.key);
|
||||
try tmp.dir.makePath(kv.key);
|
||||
}
|
||||
|
||||
var walker = try tmp.iterable_dir.walk(testing.allocator);
|
||||
var walker = try tmp.dir.walk(testing.allocator);
|
||||
defer walker.deinit();
|
||||
|
||||
var num_walked: usize = 0;
|
||||
@ -1437,17 +1433,17 @@ test "walker" {
|
||||
test "walker without fully iterating" {
|
||||
if (builtin.os.tag == .wasi and builtin.link_libc) return error.SkipZigTest;
|
||||
|
||||
var tmp = tmpIterableDir(.{});
|
||||
var tmp = tmpDir(.{ .iterate = true });
|
||||
defer tmp.cleanup();
|
||||
|
||||
var walker = try tmp.iterable_dir.walk(testing.allocator);
|
||||
var walker = try tmp.dir.walk(testing.allocator);
|
||||
defer walker.deinit();
|
||||
|
||||
// Create 2 directories inside the tmp directory, but then only iterate once before breaking.
|
||||
// This ensures that walker doesn't try to close the initial directory when not fully iterating.
|
||||
|
||||
try tmp.iterable_dir.dir.makePath("a");
|
||||
try tmp.iterable_dir.dir.makePath("b");
|
||||
try tmp.dir.makePath("a");
|
||||
try tmp.dir.makePath("b");
|
||||
|
||||
var num_walked: usize = 0;
|
||||
while (try walker.next()) |_| {
|
||||
@ -1490,7 +1486,7 @@ test ". and .. in fs.Dir functions" {
|
||||
|
||||
try ctx.dir.writeFile(update_path, "something");
|
||||
const prev_status = try ctx.dir.updateFile(file_path, ctx.dir, update_path, .{});
|
||||
try testing.expectEqual(fs.PrevStatus.stale, prev_status);
|
||||
try testing.expectEqual(fs.Dir.PrevStatus.stale, prev_status);
|
||||
|
||||
try ctx.dir.deleteDir(subdir_path);
|
||||
}
|
||||
@ -1536,7 +1532,7 @@ test ". and .. in absolute functions" {
|
||||
try update_file.writeAll("something");
|
||||
update_file.close();
|
||||
const prev_status = try fs.updateFileAbsolute(created_file_path, update_file_path, .{});
|
||||
try testing.expectEqual(fs.PrevStatus.stale, prev_status);
|
||||
try testing.expectEqual(fs.Dir.PrevStatus.stale, prev_status);
|
||||
|
||||
try fs.deleteDirAbsolute(subdir_path);
|
||||
}
|
||||
@ -1556,11 +1552,11 @@ test "chmod" {
|
||||
try testing.expectEqual(@as(File.Mode, 0o644), (try file.stat()).mode & 0o7777);
|
||||
|
||||
try tmp.dir.makeDir("test_dir");
|
||||
var iterable_dir = try tmp.dir.openIterableDir("test_dir", .{});
|
||||
defer iterable_dir.close();
|
||||
var dir = try tmp.dir.openDir("test_dir", .{ .iterate = true });
|
||||
defer dir.close();
|
||||
|
||||
try iterable_dir.chmod(0o700);
|
||||
try testing.expectEqual(@as(File.Mode, 0o700), (try iterable_dir.dir.stat()).mode & 0o7777);
|
||||
try dir.chmod(0o700);
|
||||
try testing.expectEqual(@as(File.Mode, 0o700), (try dir.stat()).mode & 0o7777);
|
||||
}
|
||||
|
||||
test "chown" {
|
||||
@ -1576,9 +1572,9 @@ test "chown" {
|
||||
|
||||
try tmp.dir.makeDir("test_dir");
|
||||
|
||||
var iterable_dir = try tmp.dir.openIterableDir("test_dir", .{});
|
||||
defer iterable_dir.close();
|
||||
try iterable_dir.chown(null, null);
|
||||
var dir = try tmp.dir.openDir("test_dir", .{ .iterate = true });
|
||||
defer dir.close();
|
||||
try dir.chown(null, null);
|
||||
}
|
||||
|
||||
test "File.Metadata" {
|
||||
|
||||
@ -402,7 +402,7 @@ pub fn fchown(fd: fd_t, owner: ?uid_t, group: ?gid_t) FChownError!void {
|
||||
switch (system.getErrno(res)) {
|
||||
.SUCCESS => return,
|
||||
.INTR => continue,
|
||||
.BADF => unreachable, // Can be reached if the fd refers to a non-iterable directory.
|
||||
.BADF => unreachable, // Can be reached if the fd refers to a directory opened without `OpenDirOptions{ .iterate = true }`
|
||||
|
||||
.FAULT => unreachable,
|
||||
.INVAL => unreachable,
|
||||
|
||||
@ -543,22 +543,6 @@ pub const TmpDir = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub const TmpIterableDir = struct {
|
||||
iterable_dir: std.fs.IterableDir,
|
||||
parent_dir: std.fs.Dir,
|
||||
sub_path: [sub_path_len]u8,
|
||||
|
||||
const random_bytes_count = 12;
|
||||
const sub_path_len = std.fs.base64_encoder.calcSize(random_bytes_count);
|
||||
|
||||
pub fn cleanup(self: *TmpIterableDir) void {
|
||||
self.iterable_dir.close();
|
||||
self.parent_dir.deleteTree(&self.sub_path) catch {};
|
||||
self.parent_dir.close();
|
||||
self.* = undefined;
|
||||
}
|
||||
};
|
||||
|
||||
pub fn tmpDir(opts: std.fs.Dir.OpenDirOptions) TmpDir {
|
||||
var random_bytes: [TmpDir.random_bytes_count]u8 = undefined;
|
||||
std.crypto.random.bytes(&random_bytes);
|
||||
@ -581,28 +565,6 @@ pub fn tmpDir(opts: std.fs.Dir.OpenDirOptions) TmpDir {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn tmpIterableDir(opts: std.fs.Dir.OpenDirOptions) TmpIterableDir {
|
||||
var random_bytes: [TmpIterableDir.random_bytes_count]u8 = undefined;
|
||||
std.crypto.random.bytes(&random_bytes);
|
||||
var sub_path: [TmpIterableDir.sub_path_len]u8 = undefined;
|
||||
_ = std.fs.base64_encoder.encode(&sub_path, &random_bytes);
|
||||
|
||||
const cwd = std.fs.cwd();
|
||||
var cache_dir = cwd.makeOpenPath("zig-cache", .{}) catch
|
||||
@panic("unable to make tmp dir for testing: unable to make and open zig-cache dir");
|
||||
defer cache_dir.close();
|
||||
const parent_dir = cache_dir.makeOpenPath("tmp", .{}) catch
|
||||
@panic("unable to make tmp dir for testing: unable to make and open zig-cache/tmp dir");
|
||||
const dir = parent_dir.makeOpenPathIterable(&sub_path, opts) catch
|
||||
@panic("unable to make tmp dir for testing: unable to make and open the tmp dir");
|
||||
|
||||
return .{
|
||||
.iterable_dir = dir,
|
||||
.parent_dir = parent_dir,
|
||||
.sub_path = sub_path,
|
||||
};
|
||||
}
|
||||
|
||||
test "expectEqual nested array" {
|
||||
const a = [2][2]f32{
|
||||
[_]f32{ 1.0, 0.0 },
|
||||
|
||||
@ -280,7 +280,7 @@ pub fn run(f: *Fetch) RunError!void {
|
||||
},
|
||||
.remote => |remote| remote,
|
||||
.path_or_url => |path_or_url| {
|
||||
if (fs.cwd().openIterableDir(path_or_url, .{})) |dir| {
|
||||
if (fs.cwd().openDir(path_or_url, .{ .iterate = true })) |dir| {
|
||||
var resource: Resource = .{ .dir = dir };
|
||||
return runResource(f, path_or_url, &resource, null);
|
||||
} else |dir_err| {
|
||||
@ -363,7 +363,9 @@ fn runResource(
|
||||
var tmp_directory: Cache.Directory = .{
|
||||
.path = tmp_directory_path,
|
||||
.handle = handle: {
|
||||
const dir = cache_root.handle.makeOpenPathIterable(tmp_dir_sub_path, .{}) catch |err| {
|
||||
const dir = cache_root.handle.makeOpenPath(tmp_dir_sub_path, .{
|
||||
.iterate = true,
|
||||
}) catch |err| {
|
||||
try eb.addRootErrorMessage(.{
|
||||
.msg = try eb.printString("unable to create temporary directory '{s}': {s}", .{
|
||||
tmp_directory_path, @errorName(err),
|
||||
@ -371,7 +373,7 @@ fn runResource(
|
||||
});
|
||||
return error.FetchFailed;
|
||||
};
|
||||
break :handle dir.dir;
|
||||
break :handle dir;
|
||||
},
|
||||
};
|
||||
defer tmp_directory.handle.close();
|
||||
@ -400,9 +402,9 @@ fn runResource(
|
||||
if (builtin.os.tag == .linux and f.job_queue.work_around_btrfs_bug) {
|
||||
// https://github.com/ziglang/zig/issues/17095
|
||||
tmp_directory.handle.close();
|
||||
const iterable_dir = cache_root.handle.makeOpenPathIterable(tmp_dir_sub_path, .{}) catch
|
||||
@panic("btrfs workaround failed");
|
||||
tmp_directory.handle = iterable_dir.dir;
|
||||
tmp_directory.handle = cache_root.handle.makeOpenPath(tmp_dir_sub_path, .{
|
||||
.iterate = true,
|
||||
}) catch @panic("btrfs workaround failed");
|
||||
}
|
||||
|
||||
f.actual_hash = try computeHash(f, tmp_directory, filter);
|
||||
@ -717,7 +719,7 @@ const Resource = union(enum) {
|
||||
file: fs.File,
|
||||
http_request: std.http.Client.Request,
|
||||
git: Git,
|
||||
dir: fs.IterableDir,
|
||||
dir: fs.Dir,
|
||||
|
||||
const Git = struct {
|
||||
fetch_stream: git.Session.FetchStream,
|
||||
@ -1198,7 +1200,7 @@ fn unpackGitPack(f: *Fetch, out_dir: fs.Dir, resource: *Resource) anyerror!void
|
||||
try out_dir.deleteTree(".git");
|
||||
}
|
||||
|
||||
fn recursiveDirectoryCopy(f: *Fetch, dir: fs.IterableDir, tmp_dir: fs.Dir) anyerror!void {
|
||||
fn recursiveDirectoryCopy(f: *Fetch, dir: fs.Dir, tmp_dir: fs.Dir) anyerror!void {
|
||||
const gpa = f.arena.child_allocator;
|
||||
// Recursive directory copy.
|
||||
var it = try dir.walk(gpa);
|
||||
@ -1207,7 +1209,7 @@ fn recursiveDirectoryCopy(f: *Fetch, dir: fs.IterableDir, tmp_dir: fs.Dir) anyer
|
||||
switch (entry.kind) {
|
||||
.directory => {}, // omit empty directories
|
||||
.file => {
|
||||
dir.dir.copyFile(
|
||||
dir.copyFile(
|
||||
entry.path,
|
||||
tmp_dir,
|
||||
entry.path,
|
||||
@ -1215,14 +1217,14 @@ fn recursiveDirectoryCopy(f: *Fetch, dir: fs.IterableDir, tmp_dir: fs.Dir) anyer
|
||||
) catch |err| switch (err) {
|
||||
error.FileNotFound => {
|
||||
if (fs.path.dirname(entry.path)) |dirname| try tmp_dir.makePath(dirname);
|
||||
try dir.dir.copyFile(entry.path, tmp_dir, entry.path, .{});
|
||||
try dir.copyFile(entry.path, tmp_dir, entry.path, .{});
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
},
|
||||
.sym_link => {
|
||||
var buf: [fs.MAX_PATH_BYTES]u8 = undefined;
|
||||
const link_name = try dir.dir.readLink(entry.path, &buf);
|
||||
const link_name = try dir.readLink(entry.path, &buf);
|
||||
// TODO: if this would create a symlink to outside
|
||||
// the destination directory, fail with an error instead.
|
||||
tmp_dir.symLink(link_name, entry.path, .{}) catch |err| switch (err) {
|
||||
@ -1296,7 +1298,7 @@ fn computeHash(
|
||||
var sus_dirs: std.StringArrayHashMapUnmanaged(void) = .{};
|
||||
defer sus_dirs.deinit(gpa);
|
||||
|
||||
var walker = try @as(fs.IterableDir, .{ .dir = tmp_directory.handle }).walk(gpa);
|
||||
var walker = try tmp_directory.handle.walk(gpa);
|
||||
defer walker.deinit();
|
||||
|
||||
{
|
||||
|
||||
@ -1384,11 +1384,11 @@ test "packfile indexing and checkout" {
|
||||
var repository = try Repository.init(testing.allocator, pack_file, index_file);
|
||||
defer repository.deinit();
|
||||
|
||||
var worktree = testing.tmpIterableDir(.{});
|
||||
var worktree = testing.tmpDir(.{ .iterate = true });
|
||||
defer worktree.cleanup();
|
||||
|
||||
const commit_id = try parseOid("dd582c0720819ab7130b103635bd7271b9fd4feb");
|
||||
try repository.checkout(worktree.iterable_dir.dir, commit_id);
|
||||
try repository.checkout(worktree.dir, commit_id);
|
||||
|
||||
const expected_files: []const []const u8 = &.{
|
||||
"dir/file",
|
||||
@ -1410,7 +1410,7 @@ test "packfile indexing and checkout" {
|
||||
var actual_files: std.ArrayListUnmanaged([]u8) = .{};
|
||||
defer actual_files.deinit(testing.allocator);
|
||||
defer for (actual_files.items) |file| testing.allocator.free(file);
|
||||
var walker = try worktree.iterable_dir.walk(testing.allocator);
|
||||
var walker = try worktree.dir.walk(testing.allocator);
|
||||
defer walker.deinit();
|
||||
while (try walker.next()) |entry| {
|
||||
if (entry.kind != .file) continue;
|
||||
@ -1442,7 +1442,7 @@ test "packfile indexing and checkout" {
|
||||
\\revision 19
|
||||
\\
|
||||
;
|
||||
const actual_file_contents = try worktree.iterable_dir.dir.readFileAlloc(testing.allocator, "file", max_file_size);
|
||||
const actual_file_contents = try worktree.dir.readFileAlloc(testing.allocator, "file", max_file_size);
|
||||
defer testing.allocator.free(actual_file_contents);
|
||||
try testing.expectEqualStrings(expected_file_contents, actual_file_contents);
|
||||
}
|
||||
|
||||
12
src/main.zig
12
src/main.zig
@ -5698,13 +5698,13 @@ fn fmtPathDir(
|
||||
parent_dir: fs.Dir,
|
||||
parent_sub_path: []const u8,
|
||||
) FmtError!void {
|
||||
var iterable_dir = try parent_dir.openIterableDir(parent_sub_path, .{});
|
||||
defer iterable_dir.close();
|
||||
var dir = try parent_dir.openDir(parent_sub_path, .{ .iterate = true });
|
||||
defer dir.close();
|
||||
|
||||
const stat = try iterable_dir.dir.stat();
|
||||
const stat = try dir.stat();
|
||||
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
|
||||
|
||||
var dir_it = iterable_dir.iterate();
|
||||
var dir_it = dir.iterate();
|
||||
while (try dir_it.next()) |entry| {
|
||||
const is_dir = entry.kind == .directory;
|
||||
|
||||
@ -5715,9 +5715,9 @@ fn fmtPathDir(
|
||||
defer fmt.gpa.free(full_path);
|
||||
|
||||
if (is_dir) {
|
||||
try fmtPathDir(fmt, full_path, check_mode, iterable_dir.dir, entry.name);
|
||||
try fmtPathDir(fmt, full_path, check_mode, dir, entry.name);
|
||||
} else {
|
||||
fmtPathFile(fmt, full_path, check_mode, iterable_dir.dir, entry.name) catch |err| {
|
||||
fmtPathFile(fmt, full_path, check_mode, dir, entry.name) catch |err| {
|
||||
warn("unable to format '{s}': {s}", .{ full_path, @errorName(err) });
|
||||
fmt.any_error = true;
|
||||
return;
|
||||
|
||||
@ -14,7 +14,11 @@ const product_version_max_length = version_major_minor_max_length + ".65535".len
|
||||
/// Iterates via `iterator` and collects all folders with names starting with `optional_prefix`
|
||||
/// and similar to SemVer. Returns slice of folder names sorted in descending order.
|
||||
/// Caller owns result.
|
||||
fn iterateAndFilterBySemVer(iterator: *std.fs.IterableDir.Iterator, allocator: std.mem.Allocator, comptime optional_prefix: ?[]const u8) error{ OutOfMemory, VersionNotFound }![][]const u8 {
|
||||
fn iterateAndFilterBySemVer(
|
||||
iterator: *std.fs.Dir.Iterator,
|
||||
allocator: std.mem.Allocator,
|
||||
comptime optional_prefix: ?[]const u8,
|
||||
) error{ OutOfMemory, VersionNotFound }![][]const u8 {
|
||||
var dirs_filtered_list = std.ArrayList([]const u8).init(allocator);
|
||||
errdefer {
|
||||
for (dirs_filtered_list.items) |filtered_dir| allocator.free(filtered_dir);
|
||||
@ -476,7 +480,9 @@ pub const Windows81Sdk = struct {
|
||||
if (!std.fs.path.isAbsolute(sdk_lib_dir_path)) return error.Windows81SdkNotFound;
|
||||
|
||||
// enumerate files in sdk path looking for latest version
|
||||
var sdk_lib_dir = std.fs.openIterableDirAbsolute(sdk_lib_dir_path, .{}) catch |err| switch (err) {
|
||||
var sdk_lib_dir = std.fs.openDirAbsolute(sdk_lib_dir_path, .{
|
||||
.iterate = true,
|
||||
}) catch |err| switch (err) {
|
||||
error.NameTooLong => return error.PathTooLong,
|
||||
else => return error.Windows81SdkNotFound,
|
||||
};
|
||||
@ -727,7 +733,9 @@ const MsvcLibDir = struct {
|
||||
if (!std.fs.path.isAbsolute(visualstudio_folder_path)) return error.PathNotFound;
|
||||
// enumerate folders that contain `privateregistry.bin`, looking for all versions
|
||||
// f.i. %localappdata%\Microsoft\VisualStudio\17.0_9e9cbb98\
|
||||
var visualstudio_folder = std.fs.openIterableDirAbsolute(visualstudio_folder_path, .{}) catch return error.PathNotFound;
|
||||
var visualstudio_folder = std.fs.openDirAbsolute(visualstudio_folder_path, .{
|
||||
.iterate = true,
|
||||
}) catch return error.PathNotFound;
|
||||
defer visualstudio_folder.close();
|
||||
|
||||
var iterator = visualstudio_folder.iterate();
|
||||
|
||||
@ -368,7 +368,7 @@ pub fn addCompile(
|
||||
/// Each file should include a test manifest as a contiguous block of comments at
|
||||
/// the end of the file. The first line should be the test type, followed by a set of
|
||||
/// key-value config values, followed by a blank line, then the expected output.
|
||||
pub fn addFromDir(ctx: *Cases, dir: std.fs.IterableDir) void {
|
||||
pub fn addFromDir(ctx: *Cases, dir: std.fs.Dir) void {
|
||||
var current_file: []const u8 = "none";
|
||||
ctx.addFromDirInner(dir, ¤t_file) catch |err| {
|
||||
std.debug.panicExtra(
|
||||
@ -382,7 +382,7 @@ pub fn addFromDir(ctx: *Cases, dir: std.fs.IterableDir) void {
|
||||
|
||||
fn addFromDirInner(
|
||||
ctx: *Cases,
|
||||
iterable_dir: std.fs.IterableDir,
|
||||
iterable_dir: std.fs.Dir,
|
||||
/// This is kept up to date with the currently being processed file so
|
||||
/// that if any errors occur the caller knows it happened during this file.
|
||||
current_file: *[]const u8,
|
||||
@ -416,7 +416,7 @@ fn addFromDirInner(
|
||||
}
|
||||
|
||||
const max_file_size = 10 * 1024 * 1024;
|
||||
const src = try iterable_dir.dir.readFileAllocOptions(ctx.arena, filename, max_file_size, null, 1, 0);
|
||||
const src = try iterable_dir.readFileAllocOptions(ctx.arena, filename, max_file_size, null, 1, 0);
|
||||
|
||||
// Parse the manifest
|
||||
var manifest = try TestManifest.parse(ctx.arena, src);
|
||||
@ -1246,7 +1246,7 @@ pub fn main() !void {
|
||||
var filenames = std.ArrayList([]const u8).init(arena);
|
||||
|
||||
const case_dirname = std.fs.path.dirname(case_file_path).?;
|
||||
var iterable_dir = try std.fs.cwd().openIterableDir(case_dirname, .{});
|
||||
var iterable_dir = try std.fs.cwd().openDir(case_dirname, .{ .iterate = true });
|
||||
defer iterable_dir.close();
|
||||
|
||||
if (std.mem.endsWith(u8, case_file_path, ".0.zig")) {
|
||||
@ -1280,7 +1280,7 @@ pub fn main() !void {
|
||||
|
||||
for (batch) |filename| {
|
||||
const max_file_size = 10 * 1024 * 1024;
|
||||
const src = try iterable_dir.dir.readFileAllocOptions(arena, filename, max_file_size, null, 1, 0);
|
||||
const src = try iterable_dir.readFileAllocOptions(arena, filename, max_file_size, null, 1, 0);
|
||||
|
||||
// Parse the manifest
|
||||
var manifest = try TestManifest.parse(arena, src);
|
||||
|
||||
@ -1288,7 +1288,7 @@ pub fn addCases(
|
||||
|
||||
var cases = @import("src/Cases.zig").init(gpa, arena);
|
||||
|
||||
var dir = try b.build_root.handle.openIterableDir("test/cases", .{});
|
||||
var dir = try b.build_root.handle.openDir("test/cases", .{ .iterate = true });
|
||||
defer dir.close();
|
||||
|
||||
cases.addFromDir(dir);
|
||||
|
||||
@ -18,7 +18,7 @@ pub fn main() !void {
|
||||
);
|
||||
|
||||
var names = std.ArrayList([]const u8).init(allocator);
|
||||
var cwd = try std.fs.cwd().openIterableDir(".", .{});
|
||||
var cwd = try std.fs.cwd().openDir(".", .{ .iterate = true });
|
||||
var it = cwd.iterate();
|
||||
while (try it.next()) |entry| {
|
||||
try names.append(try allocator.dupe(u8, entry.name));
|
||||
|
||||
@ -382,14 +382,14 @@ pub fn main() !void {
|
||||
try dir_stack.append(target_include_dir);
|
||||
|
||||
while (dir_stack.popOrNull()) |full_dir_name| {
|
||||
var iterable_dir = std.fs.cwd().openIterableDir(full_dir_name, .{}) catch |err| switch (err) {
|
||||
var dir = std.fs.cwd().openDir(full_dir_name, .{ .iterate = true }) catch |err| switch (err) {
|
||||
error.FileNotFound => continue :search,
|
||||
error.AccessDenied => continue :search,
|
||||
else => return err,
|
||||
};
|
||||
defer iterable_dir.close();
|
||||
defer dir.close();
|
||||
|
||||
var dir_it = iterable_dir.iterate();
|
||||
var dir_it = dir.iterate();
|
||||
|
||||
while (try dir_it.next()) |entry| {
|
||||
const full_path = try std.fs.path.join(allocator, &[_][]const u8{ full_dir_name, entry.name });
|
||||
|
||||
@ -14,9 +14,9 @@ pub fn main() !void {
|
||||
|
||||
const args = try std.process.argsAlloc(arena);
|
||||
const path_to_walk = args[1];
|
||||
const iterable_dir = try std.fs.cwd().openIterableDir(path_to_walk, .{});
|
||||
const dir = try std.fs.cwd().openDir(path_to_walk, .{ .iterate = true });
|
||||
|
||||
var walker = try iterable_dir.walk(arena);
|
||||
var walker = try dir.walk(arena);
|
||||
defer walker.deinit();
|
||||
|
||||
var buffer: [500]u8 = undefined;
|
||||
@ -30,7 +30,7 @@ pub fn main() !void {
|
||||
node.activate();
|
||||
defer node.end();
|
||||
|
||||
const source = try iterable_dir.dir.readFileAlloc(arena, entry.path, 20 * 1024 * 1024);
|
||||
const source = try dir.readFileAlloc(arena, entry.path, 20 * 1024 * 1024);
|
||||
if (!std.mem.startsWith(u8, source, expected_header)) {
|
||||
std.debug.print("no match: {s}\n", .{entry.path});
|
||||
continue;
|
||||
@ -42,6 +42,6 @@ pub fn main() !void {
|
||||
std.mem.copy(u8, new_source, new_header);
|
||||
std.mem.copy(u8, new_source[new_header.len..], truncated_source);
|
||||
|
||||
try iterable_dir.dir.writeFile(entry.path, new_source);
|
||||
try dir.writeFile(entry.path, new_source);
|
||||
}
|
||||
}
|
||||
|
||||
@ -190,14 +190,14 @@ pub fn main() !void {
|
||||
try dir_stack.append(target_include_dir);
|
||||
|
||||
while (dir_stack.popOrNull()) |full_dir_name| {
|
||||
var iterable_dir = std.fs.cwd().openIterableDir(full_dir_name, .{}) catch |err| switch (err) {
|
||||
var dir = std.fs.cwd().openDir(full_dir_name, .{ .iterate = true }) catch |err| switch (err) {
|
||||
error.FileNotFound => continue :search,
|
||||
error.AccessDenied => continue :search,
|
||||
else => return err,
|
||||
};
|
||||
defer iterable_dir.close();
|
||||
defer dir.close();
|
||||
|
||||
var dir_it = iterable_dir.iterate();
|
||||
var dir_it = dir.iterate();
|
||||
|
||||
while (try dir_it.next()) |entry| {
|
||||
const full_path = try std.fs.path.join(arena, &[_][]const u8{ full_dir_name, entry.name });
|
||||
|
||||
@ -47,7 +47,7 @@ pub fn main() !void {
|
||||
|
||||
const dest_dir_path = try std.fmt.allocPrint(arena, "{s}/lib/libc/glibc", .{zig_src_path});
|
||||
|
||||
var dest_dir = fs.cwd().openIterableDir(dest_dir_path, .{}) catch |err| {
|
||||
var dest_dir = fs.cwd().openDir(dest_dir_path, .{ .iterate = true }) catch |err| {
|
||||
fatal("unable to open destination directory '{s}': {s}", .{
|
||||
dest_dir_path, @errorName(err),
|
||||
});
|
||||
@ -72,14 +72,14 @@ pub fn main() !void {
|
||||
if (mem.endsWith(u8, entry.path, ext)) continue :walk;
|
||||
}
|
||||
|
||||
glibc_src_dir.copyFile(entry.path, dest_dir.dir, entry.path, .{}) catch |err| {
|
||||
glibc_src_dir.copyFile(entry.path, dest_dir, entry.path, .{}) catch |err| {
|
||||
log.warn("unable to copy '{s}/{s}' to '{s}/{s}': {s}", .{
|
||||
glibc_src_path, entry.path,
|
||||
dest_dir_path, entry.path,
|
||||
@errorName(err),
|
||||
});
|
||||
if (err == error.FileNotFound) {
|
||||
try dest_dir.dir.deleteFile(entry.path);
|
||||
try dest_dir.deleteFile(entry.path);
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -88,7 +88,7 @@ pub fn main() !void {
|
||||
// Warn about duplicated files inside glibc/include/* that can be omitted
|
||||
// because they are already in generic-glibc/*.
|
||||
|
||||
var include_dir = dest_dir.dir.openIterableDir("include", .{}) catch |err| {
|
||||
var include_dir = dest_dir.openDir("include", .{ .iterate = true }) catch |err| {
|
||||
fatal("unable to open directory '{s}/include': {s}", .{
|
||||
dest_dir_path, @errorName(err),
|
||||
});
|
||||
@ -125,7 +125,7 @@ pub fn main() !void {
|
||||
generic_glibc_path, entry.path, @errorName(e),
|
||||
}),
|
||||
};
|
||||
const glibc_include_contents = include_dir.dir.readFileAlloc(
|
||||
const glibc_include_contents = include_dir.readFileAlloc(
|
||||
arena,
|
||||
entry.path,
|
||||
max_file_size,
|
||||
|
||||
@ -226,7 +226,7 @@ pub fn main() !void {
|
||||
/// TODO: Unfortunately, neither repository contains a machine-readable list of extension dependencies.
|
||||
fn gather_extensions(allocator: Allocator, spirv_registry_root: []const u8) ![]const []const u8 {
|
||||
const extensions_path = try fs.path.join(allocator, &.{ spirv_registry_root, "extensions" });
|
||||
var extensions_dir = try fs.cwd().openIterableDir(extensions_path, .{});
|
||||
var extensions_dir = try fs.cwd().openDir(extensions_path, .{ .iterate = true });
|
||||
defer extensions_dir.close();
|
||||
|
||||
var extensions = std.ArrayList([]const u8).init(allocator);
|
||||
@ -235,7 +235,7 @@ fn gather_extensions(allocator: Allocator, spirv_registry_root: []const u8) ![]c
|
||||
while (try vendor_it.next()) |vendor_entry| {
|
||||
std.debug.assert(vendor_entry.kind == .directory); // If this fails, the structure of SPIRV-Registry has changed.
|
||||
|
||||
const vendor_dir = try extensions_dir.dir.openIterableDir(vendor_entry.name, .{});
|
||||
const vendor_dir = try extensions_dir.openDir(vendor_entry.name, .{ .iterate = true });
|
||||
var ext_it = vendor_dir.iterate();
|
||||
while (try ext_it.next()) |ext_entry| {
|
||||
// There is both a HTML and asciidoc version of every spec (as well as some other directories),
|
||||
@ -258,7 +258,7 @@ fn gather_extensions(allocator: Allocator, spirv_registry_root: []const u8) ![]c
|
||||
// SPV_EXT_name
|
||||
// ```
|
||||
|
||||
const ext_spec = try vendor_dir.dir.readFileAlloc(allocator, ext_entry.name, std.math.maxInt(usize));
|
||||
const ext_spec = try vendor_dir.readFileAlloc(allocator, ext_entry.name, std.math.maxInt(usize));
|
||||
const name_strings = "Name Strings";
|
||||
|
||||
const name_strings_offset = std.mem.indexOf(u8, ext_spec, name_strings) orelse return error.InvalidRegistry;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user