mirror of
https://github.com/ziglang/zig.git
synced 2025-12-29 17:43:17 +00:00
Merge pull request #13596 from ziglang/cache-path-prefixes
Cache: introduce prefixes to manifests
This commit is contained in:
commit
81d2135ca6
@ -63,7 +63,4 @@ stage3-release/bin/zig build test docs \
|
||||
tidy --drop-empty-elements no -qe ../zig-cache/langref.html
|
||||
|
||||
# Produce the experimental std lib documentation.
|
||||
stage3-release/bin/zig test ../lib/std/std.zig \
|
||||
-femit-docs \
|
||||
-fno-emit-bin \
|
||||
--zig-lib-dir "$(pwd)/../lib"
|
||||
stage3-release/bin/zig test ../lib/std/std.zig -femit-docs -fno-emit-bin --zig-lib-dir ../lib
|
||||
|
||||
@ -67,7 +67,4 @@ stage3-debug/bin/zig build test \
|
||||
#tidy --drop-empty-elements no -qe ../zig-cache/langref.html
|
||||
|
||||
# Produce the experimental std lib documentation.
|
||||
stage3-debug/bin/zig test ../lib/std/std.zig \
|
||||
-femit-docs \
|
||||
-fno-emit-bin \
|
||||
--zig-lib-dir "$(pwd)/../lib"
|
||||
stage3-debug/bin/zig test ../lib/std/std.zig -femit-docs -fno-emit-bin --zig-lib-dir ../lib
|
||||
|
||||
@ -63,10 +63,7 @@ stage3-release/bin/zig build test docs \
|
||||
tidy --drop-empty-elements no -qe ../zig-cache/langref.html
|
||||
|
||||
# Produce the experimental std lib documentation.
|
||||
stage3-release/bin/zig test ../lib/std/std.zig \
|
||||
-femit-docs \
|
||||
-fno-emit-bin \
|
||||
--zig-lib-dir "$(pwd)/../lib"
|
||||
stage3-release/bin/zig test ../lib/std/std.zig -femit-docs -fno-emit-bin --zig-lib-dir ../lib
|
||||
|
||||
stage3-release/bin/zig build \
|
||||
--prefix stage4-release \
|
||||
|
||||
@ -44,8 +44,4 @@ stage3-release/bin/zig build test docs \
|
||||
--search-prefix "$PREFIX"
|
||||
|
||||
# Produce the experimental std lib documentation.
|
||||
mkdir -p "stage3-release/doc/std"
|
||||
stage3-release/bin/zig test "$(pwd)/../lib/std/std.zig" \
|
||||
--zig-lib-dir "$(pwd)/../lib" \
|
||||
-femit-docs="$(pwd)/stage3-release/doc/std" \
|
||||
-fno-emit-bin
|
||||
stage3-release/bin/zig test ../lib/std/std.zig -femit-docs -fno-emit-bin --zig-lib-dir ../lib
|
||||
|
||||
@ -51,8 +51,4 @@ stage3-release/bin/zig build test docs \
|
||||
--search-prefix "$PREFIX"
|
||||
|
||||
# Produce the experimental std lib documentation.
|
||||
mkdir -p "stage3-release/doc/std"
|
||||
stage3-release/bin/zig test "$(pwd)/../lib/std/std.zig" \
|
||||
--zig-lib-dir "$(pwd)/../lib" \
|
||||
-femit-docs="$(pwd)/stage3-release/doc/std" \
|
||||
-fno-emit-bin
|
||||
stage3-release/bin/zig test ../lib/std/std.zig -femit-docs -fno-emit-bin --zig-lib-dir ../lib
|
||||
|
||||
@ -53,11 +53,9 @@ Write-Output " zig build test docs..."
|
||||
CheckLastExitCode
|
||||
|
||||
# Produce the experimental std lib documentation.
|
||||
mkdir "$ZIGINSTALLDIR\doc\std" -force
|
||||
|
||||
Write-Output "zig test std/std.zig..."
|
||||
|
||||
& "$ZIGINSTALLDIR\bin\zig.exe" test "$ZIGLIBDIR\std\std.zig" `
|
||||
--zig-lib-dir "$ZIGLIBDIR" `
|
||||
-femit-docs="$ZIGINSTALLDIR\doc\std" `
|
||||
-femit-docs `
|
||||
-fno-emit-bin
|
||||
|
||||
@ -467,55 +467,49 @@ pub fn resolve(allocator: Allocator, paths: []const []const u8) ![]u8 {
|
||||
/// Path separators are canonicalized to '\\' and drives are canonicalized to capital letters.
|
||||
/// Note: all usage of this function should be audited due to the existence of symlinks.
|
||||
/// Without performing actual syscalls, resolving `..` could be incorrect.
|
||||
/// This API may break in the future: https://github.com/ziglang/zig/issues/13613
|
||||
pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
|
||||
if (paths.len == 0) {
|
||||
assert(native_os == .windows); // resolveWindows called on non windows can't use getCwd
|
||||
return process.getCwdAlloc(allocator);
|
||||
}
|
||||
assert(paths.len > 0);
|
||||
|
||||
// determine which disk designator we will result with, if any
|
||||
var result_drive_buf = "_:".*;
|
||||
var result_disk_designator: []const u8 = "";
|
||||
var have_drive_kind = WindowsPath.Kind.None;
|
||||
var disk_designator: []const u8 = "";
|
||||
var drive_kind = WindowsPath.Kind.None;
|
||||
var have_abs_path = false;
|
||||
var first_index: usize = 0;
|
||||
var max_size: usize = 0;
|
||||
for (paths) |p, i| {
|
||||
const parsed = windowsParsePath(p);
|
||||
if (parsed.is_abs) {
|
||||
have_abs_path = true;
|
||||
first_index = i;
|
||||
max_size = result_disk_designator.len;
|
||||
}
|
||||
switch (parsed.kind) {
|
||||
WindowsPath.Kind.Drive => {
|
||||
.Drive => {
|
||||
result_drive_buf[0] = ascii.toUpper(parsed.disk_designator[0]);
|
||||
result_disk_designator = result_drive_buf[0..];
|
||||
have_drive_kind = WindowsPath.Kind.Drive;
|
||||
disk_designator = result_drive_buf[0..];
|
||||
drive_kind = WindowsPath.Kind.Drive;
|
||||
},
|
||||
WindowsPath.Kind.NetworkShare => {
|
||||
result_disk_designator = parsed.disk_designator;
|
||||
have_drive_kind = WindowsPath.Kind.NetworkShare;
|
||||
.NetworkShare => {
|
||||
disk_designator = parsed.disk_designator;
|
||||
drive_kind = WindowsPath.Kind.NetworkShare;
|
||||
},
|
||||
WindowsPath.Kind.None => {},
|
||||
.None => {},
|
||||
}
|
||||
max_size += p.len + 1;
|
||||
}
|
||||
|
||||
// if we will result with a disk designator, loop again to determine
|
||||
// which is the last time the disk designator is absolutely specified, if any
|
||||
// and count up the max bytes for paths related to this disk designator
|
||||
if (have_drive_kind != WindowsPath.Kind.None) {
|
||||
if (drive_kind != WindowsPath.Kind.None) {
|
||||
have_abs_path = false;
|
||||
first_index = 0;
|
||||
max_size = result_disk_designator.len;
|
||||
var correct_disk_designator = false;
|
||||
|
||||
for (paths) |p, i| {
|
||||
const parsed = windowsParsePath(p);
|
||||
if (parsed.kind != WindowsPath.Kind.None) {
|
||||
if (parsed.kind == have_drive_kind) {
|
||||
correct_disk_designator = compareDiskDesignators(have_drive_kind, result_disk_designator, parsed.disk_designator);
|
||||
if (parsed.kind == drive_kind) {
|
||||
correct_disk_designator = compareDiskDesignators(drive_kind, disk_designator, parsed.disk_designator);
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
@ -525,92 +519,51 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
|
||||
}
|
||||
if (parsed.is_abs) {
|
||||
first_index = i;
|
||||
max_size = result_disk_designator.len;
|
||||
have_abs_path = true;
|
||||
}
|
||||
max_size += p.len + 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate result and fill in the disk designator, calling getCwd if we have to.
|
||||
var result: []u8 = undefined;
|
||||
var result_index: usize = 0;
|
||||
// Allocate result and fill in the disk designator.
|
||||
var result = std.ArrayList(u8).init(allocator);
|
||||
defer result.deinit();
|
||||
|
||||
if (have_abs_path) {
|
||||
switch (have_drive_kind) {
|
||||
WindowsPath.Kind.Drive => {
|
||||
result = try allocator.alloc(u8, max_size);
|
||||
|
||||
mem.copy(u8, result, result_disk_designator);
|
||||
result_index += result_disk_designator.len;
|
||||
const disk_designator_len: usize = l: {
|
||||
if (!have_abs_path) break :l 0;
|
||||
switch (drive_kind) {
|
||||
.Drive => {
|
||||
try result.appendSlice(disk_designator);
|
||||
break :l disk_designator.len;
|
||||
},
|
||||
WindowsPath.Kind.NetworkShare => {
|
||||
result = try allocator.alloc(u8, max_size);
|
||||
.NetworkShare => {
|
||||
var it = mem.tokenize(u8, paths[first_index], "/\\");
|
||||
const server_name = it.next().?;
|
||||
const other_name = it.next().?;
|
||||
|
||||
result[result_index] = '\\';
|
||||
result_index += 1;
|
||||
result[result_index] = '\\';
|
||||
result_index += 1;
|
||||
mem.copy(u8, result[result_index..], server_name);
|
||||
result_index += server_name.len;
|
||||
result[result_index] = '\\';
|
||||
result_index += 1;
|
||||
mem.copy(u8, result[result_index..], other_name);
|
||||
result_index += other_name.len;
|
||||
try result.ensureUnusedCapacity(2 + 1 + server_name.len + other_name.len);
|
||||
result.appendSliceAssumeCapacity("\\\\");
|
||||
result.appendSliceAssumeCapacity(server_name);
|
||||
result.appendAssumeCapacity('\\');
|
||||
result.appendSliceAssumeCapacity(other_name);
|
||||
|
||||
result_disk_designator = result[0..result_index];
|
||||
break :l result.items.len;
|
||||
},
|
||||
WindowsPath.Kind.None => {
|
||||
assert(native_os == .windows); // resolveWindows called on non windows can't use getCwd
|
||||
const cwd = try process.getCwdAlloc(allocator);
|
||||
defer allocator.free(cwd);
|
||||
const parsed_cwd = windowsParsePath(cwd);
|
||||
result = try allocator.alloc(u8, max_size + parsed_cwd.disk_designator.len + 1);
|
||||
mem.copy(u8, result, parsed_cwd.disk_designator);
|
||||
result_index += parsed_cwd.disk_designator.len;
|
||||
result_disk_designator = result[0..parsed_cwd.disk_designator.len];
|
||||
if (parsed_cwd.kind == WindowsPath.Kind.Drive) {
|
||||
result[0] = ascii.toUpper(result[0]);
|
||||
}
|
||||
have_drive_kind = parsed_cwd.kind;
|
||||
.None => {
|
||||
break :l 1;
|
||||
},
|
||||
}
|
||||
} else {
|
||||
assert(native_os == .windows); // resolveWindows called on non windows can't use getCwd
|
||||
// TODO call get cwd for the result_disk_designator instead of the global one
|
||||
const cwd = try process.getCwdAlloc(allocator);
|
||||
defer allocator.free(cwd);
|
||||
};
|
||||
|
||||
result = try allocator.alloc(u8, max_size + cwd.len + 1);
|
||||
|
||||
mem.copy(u8, result, cwd);
|
||||
result_index += cwd.len;
|
||||
const parsed_cwd = windowsParsePath(result[0..result_index]);
|
||||
result_disk_designator = parsed_cwd.disk_designator;
|
||||
if (parsed_cwd.kind == WindowsPath.Kind.Drive) {
|
||||
result[0] = ascii.toUpper(result[0]);
|
||||
// Remove the trailing slash if present, eg. if the cwd is a root
|
||||
// directory.
|
||||
if (cwd.len > 0 and cwd[cwd.len - 1] == sep_windows) {
|
||||
result_index -= 1;
|
||||
}
|
||||
}
|
||||
have_drive_kind = parsed_cwd.kind;
|
||||
}
|
||||
errdefer allocator.free(result);
|
||||
|
||||
// Now we know the disk designator to use, if any, and what kind it is. And our result
|
||||
// is big enough to append all the paths to.
|
||||
var correct_disk_designator = true;
|
||||
var negative_count: usize = 0;
|
||||
|
||||
for (paths[first_index..]) |p| {
|
||||
const parsed = windowsParsePath(p);
|
||||
|
||||
if (parsed.kind != WindowsPath.Kind.None) {
|
||||
if (parsed.kind == have_drive_kind) {
|
||||
correct_disk_designator = compareDiskDesignators(have_drive_kind, result_disk_designator, parsed.disk_designator);
|
||||
if (parsed.kind != .None) {
|
||||
if (parsed.kind == drive_kind) {
|
||||
const dd = result.items[0..disk_designator_len];
|
||||
correct_disk_designator = compareDiskDesignators(drive_kind, dd, parsed.disk_designator);
|
||||
} else {
|
||||
continue;
|
||||
}
|
||||
@ -619,154 +572,167 @@ pub fn resolveWindows(allocator: Allocator, paths: []const []const u8) ![]u8 {
|
||||
continue;
|
||||
}
|
||||
var it = mem.tokenize(u8, p[parsed.disk_designator.len..], "/\\");
|
||||
while (it.next()) |component| {
|
||||
component: while (it.next()) |component| {
|
||||
if (mem.eql(u8, component, ".")) {
|
||||
continue;
|
||||
} else if (mem.eql(u8, component, "..")) {
|
||||
while (true) {
|
||||
if (result_index == 0 or result_index == result_disk_designator.len)
|
||||
break;
|
||||
result_index -= 1;
|
||||
if (result[result_index] == '\\' or result[result_index] == '/')
|
||||
if (result.items.len == 0) {
|
||||
negative_count += 1;
|
||||
continue :component;
|
||||
}
|
||||
if (result.items.len == disk_designator_len) {
|
||||
break;
|
||||
}
|
||||
const end_with_sep = switch (result.items[result.items.len - 1]) {
|
||||
'\\', '/' => true,
|
||||
else => false,
|
||||
};
|
||||
result.items.len -= 1;
|
||||
if (end_with_sep) break;
|
||||
}
|
||||
} else if (!have_abs_path and result.items.len == 0) {
|
||||
try result.appendSlice(component);
|
||||
} else {
|
||||
result[result_index] = sep_windows;
|
||||
result_index += 1;
|
||||
mem.copy(u8, result[result_index..], component);
|
||||
result_index += component.len;
|
||||
try result.ensureUnusedCapacity(1 + component.len);
|
||||
result.appendAssumeCapacity('\\');
|
||||
result.appendSliceAssumeCapacity(component);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (result_index == result_disk_designator.len) {
|
||||
result[result_index] = '\\';
|
||||
result_index += 1;
|
||||
if (disk_designator_len != 0 and result.items.len == disk_designator_len) {
|
||||
try result.append('\\');
|
||||
return result.toOwnedSlice();
|
||||
}
|
||||
|
||||
return allocator.shrink(result, result_index);
|
||||
if (result.items.len == 0) {
|
||||
if (negative_count == 0) {
|
||||
return allocator.dupe(u8, ".");
|
||||
} else {
|
||||
const real_result = try allocator.alloc(u8, 3 * negative_count - 1);
|
||||
var count = negative_count - 1;
|
||||
var i: usize = 0;
|
||||
while (count > 0) : (count -= 1) {
|
||||
real_result[i..][0..3].* = "..\\".*;
|
||||
i += 3;
|
||||
}
|
||||
real_result[i..][0..2].* = "..".*;
|
||||
return real_result;
|
||||
}
|
||||
}
|
||||
|
||||
if (negative_count == 0) {
|
||||
return result.toOwnedSlice();
|
||||
} else {
|
||||
const real_result = try allocator.alloc(u8, 3 * negative_count + result.items.len);
|
||||
var count = negative_count;
|
||||
var i: usize = 0;
|
||||
while (count > 0) : (count -= 1) {
|
||||
real_result[i..][0..3].* = "..\\".*;
|
||||
i += 3;
|
||||
}
|
||||
mem.copy(u8, real_result[i..], result.items);
|
||||
return real_result;
|
||||
}
|
||||
}
|
||||
|
||||
/// This function is like a series of `cd` statements executed one after another.
|
||||
/// It resolves "." and "..".
|
||||
/// The result does not have a trailing path separator.
|
||||
/// If all paths are relative it uses the current working directory as a starting point.
|
||||
/// Note: all usage of this function should be audited due to the existence of symlinks.
|
||||
/// Without performing actual syscalls, resolving `..` could be incorrect.
|
||||
pub fn resolvePosix(allocator: Allocator, paths: []const []const u8) ![]u8 {
|
||||
if (paths.len == 0) {
|
||||
assert(native_os != .windows); // resolvePosix called on windows can't use getCwd
|
||||
return process.getCwdAlloc(allocator);
|
||||
}
|
||||
/// This function does not perform any syscalls. Executing this series of path
|
||||
/// lookups on the actual filesystem may produce different results due to
|
||||
/// symlinks.
|
||||
pub fn resolvePosix(allocator: Allocator, paths: []const []const u8) Allocator.Error![]u8 {
|
||||
assert(paths.len > 0);
|
||||
|
||||
var first_index: usize = 0;
|
||||
var have_abs = false;
|
||||
var max_size: usize = 0;
|
||||
for (paths) |p, i| {
|
||||
var result = std.ArrayList(u8).init(allocator);
|
||||
defer result.deinit();
|
||||
|
||||
var negative_count: usize = 0;
|
||||
var is_abs = false;
|
||||
|
||||
for (paths) |p| {
|
||||
if (isAbsolutePosix(p)) {
|
||||
first_index = i;
|
||||
have_abs = true;
|
||||
max_size = 0;
|
||||
is_abs = true;
|
||||
negative_count = 0;
|
||||
result.clearRetainingCapacity();
|
||||
}
|
||||
max_size += p.len + 1;
|
||||
}
|
||||
|
||||
var result: []u8 = undefined;
|
||||
var result_index: usize = 0;
|
||||
|
||||
if (have_abs) {
|
||||
result = try allocator.alloc(u8, max_size);
|
||||
} else {
|
||||
assert(native_os != .windows); // resolvePosix called on windows can't use getCwd
|
||||
const cwd = try process.getCwdAlloc(allocator);
|
||||
defer allocator.free(cwd);
|
||||
result = try allocator.alloc(u8, max_size + cwd.len + 1);
|
||||
mem.copy(u8, result, cwd);
|
||||
result_index += cwd.len;
|
||||
}
|
||||
errdefer allocator.free(result);
|
||||
|
||||
for (paths[first_index..]) |p| {
|
||||
var it = mem.tokenize(u8, p, "/");
|
||||
while (it.next()) |component| {
|
||||
component: while (it.next()) |component| {
|
||||
if (mem.eql(u8, component, ".")) {
|
||||
continue;
|
||||
} else if (mem.eql(u8, component, "..")) {
|
||||
while (true) {
|
||||
if (result_index == 0)
|
||||
break;
|
||||
result_index -= 1;
|
||||
if (result[result_index] == '/')
|
||||
break;
|
||||
if (result.items.len == 0) {
|
||||
negative_count += @boolToInt(!is_abs);
|
||||
continue :component;
|
||||
}
|
||||
const ends_with_slash = result.items[result.items.len - 1] == '/';
|
||||
result.items.len -= 1;
|
||||
if (ends_with_slash) break;
|
||||
}
|
||||
} else if (result.items.len > 0 or is_abs) {
|
||||
try result.ensureUnusedCapacity(1 + component.len);
|
||||
result.appendAssumeCapacity('/');
|
||||
result.appendSliceAssumeCapacity(component);
|
||||
} else {
|
||||
result[result_index] = '/';
|
||||
result_index += 1;
|
||||
mem.copy(u8, result[result_index..], component);
|
||||
result_index += component.len;
|
||||
try result.appendSlice(component);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (result_index == 0) {
|
||||
result[0] = '/';
|
||||
result_index += 1;
|
||||
if (result.items.len == 0) {
|
||||
if (is_abs) {
|
||||
return allocator.dupe(u8, "/");
|
||||
}
|
||||
if (negative_count == 0) {
|
||||
return allocator.dupe(u8, ".");
|
||||
} else {
|
||||
const real_result = try allocator.alloc(u8, 3 * negative_count - 1);
|
||||
var count = negative_count - 1;
|
||||
var i: usize = 0;
|
||||
while (count > 0) : (count -= 1) {
|
||||
real_result[i..][0..3].* = "../".*;
|
||||
i += 3;
|
||||
}
|
||||
real_result[i..][0..2].* = "..".*;
|
||||
return real_result;
|
||||
}
|
||||
}
|
||||
|
||||
return allocator.shrink(result, result_index);
|
||||
if (negative_count == 0) {
|
||||
return result.toOwnedSlice();
|
||||
} else {
|
||||
const real_result = try allocator.alloc(u8, 3 * negative_count + result.items.len);
|
||||
var count = negative_count;
|
||||
var i: usize = 0;
|
||||
while (count > 0) : (count -= 1) {
|
||||
real_result[i..][0..3].* = "../".*;
|
||||
i += 3;
|
||||
}
|
||||
mem.copy(u8, real_result[i..], result.items);
|
||||
return real_result;
|
||||
}
|
||||
}
|
||||
|
||||
test "resolve" {
|
||||
if (native_os == .wasi and builtin.link_libc) return error.SkipZigTest;
|
||||
if (native_os == .wasi and !builtin.link_libc) try os.initPreopensWasi(std.heap.page_allocator, "/");
|
||||
try testResolveWindows(&[_][]const u8{ "a\\b\\c\\", "..\\..\\.." }, "..");
|
||||
try testResolveWindows(&[_][]const u8{"."}, ".");
|
||||
|
||||
const cwd = try process.getCwdAlloc(testing.allocator);
|
||||
defer testing.allocator.free(cwd);
|
||||
if (native_os == .windows) {
|
||||
if (windowsParsePath(cwd).kind == WindowsPath.Kind.Drive) {
|
||||
cwd[0] = ascii.toUpper(cwd[0]);
|
||||
}
|
||||
try testResolveWindows(&[_][]const u8{"."}, cwd);
|
||||
} else {
|
||||
try testResolvePosix(&[_][]const u8{ "a/b/c/", "../../.." }, cwd);
|
||||
try testResolvePosix(&[_][]const u8{"."}, cwd);
|
||||
}
|
||||
try testResolvePosix(&[_][]const u8{ "a/b/c/", "../../.." }, "..");
|
||||
try testResolvePosix(&[_][]const u8{"."}, ".");
|
||||
}
|
||||
|
||||
test "resolveWindows" {
|
||||
if (builtin.target.cpu.arch == .aarch64) {
|
||||
// TODO https://github.com/ziglang/zig/issues/3288
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
if (native_os == .wasi and builtin.link_libc) return error.SkipZigTest;
|
||||
if (native_os == .wasi and !builtin.link_libc) try os.initPreopensWasi(std.heap.page_allocator, "/");
|
||||
if (native_os == .windows) {
|
||||
const cwd = try process.getCwdAlloc(testing.allocator);
|
||||
defer testing.allocator.free(cwd);
|
||||
const parsed_cwd = windowsParsePath(cwd);
|
||||
{
|
||||
const expected = try join(testing.allocator, &[_][]const u8{
|
||||
parsed_cwd.disk_designator,
|
||||
"usr\\local\\lib\\zig\\std\\array_list.zig",
|
||||
});
|
||||
defer testing.allocator.free(expected);
|
||||
if (parsed_cwd.kind == WindowsPath.Kind.Drive) {
|
||||
expected[0] = ascii.toUpper(parsed_cwd.disk_designator[0]);
|
||||
}
|
||||
try testResolveWindows(&[_][]const u8{ "/usr/local", "lib\\zig\\std\\array_list.zig" }, expected);
|
||||
}
|
||||
{
|
||||
const expected = try join(testing.allocator, &[_][]const u8{
|
||||
cwd,
|
||||
"usr\\local\\lib\\zig",
|
||||
});
|
||||
defer testing.allocator.free(expected);
|
||||
if (parsed_cwd.kind == WindowsPath.Kind.Drive) {
|
||||
expected[0] = ascii.toUpper(parsed_cwd.disk_designator[0]);
|
||||
}
|
||||
try testResolveWindows(&[_][]const u8{ "usr/local", "lib\\zig" }, expected);
|
||||
}
|
||||
}
|
||||
try testResolveWindows(
|
||||
&[_][]const u8{ "Z:\\", "/usr/local", "lib\\zig\\std\\array_list.zig" },
|
||||
"Z:\\usr\\local\\lib\\zig\\std\\array_list.zig",
|
||||
);
|
||||
try testResolveWindows(
|
||||
&[_][]const u8{ "z:\\", "usr/local", "lib\\zig" },
|
||||
"Z:\\usr\\local\\lib\\zig",
|
||||
);
|
||||
|
||||
try testResolveWindows(&[_][]const u8{ "c:\\a\\b\\c", "/hi", "ok" }, "C:\\hi\\ok");
|
||||
try testResolveWindows(&[_][]const u8{ "c:/blah\\blah", "d:/games", "c:../a" }, "C:\\blah\\a");
|
||||
@ -781,12 +747,12 @@ test "resolveWindows" {
|
||||
try testResolveWindows(&[_][]const u8{ "c:/", "//server//share" }, "\\\\server\\share\\");
|
||||
try testResolveWindows(&[_][]const u8{ "c:/", "///some//dir" }, "C:\\some\\dir");
|
||||
try testResolveWindows(&[_][]const u8{ "C:\\foo\\tmp.3\\", "..\\tmp.3\\cycles\\root.js" }, "C:\\foo\\tmp.3\\cycles\\root.js");
|
||||
|
||||
// Keep relative paths relative.
|
||||
try testResolveWindows(&[_][]const u8{"a/b"}, "a\\b");
|
||||
}
|
||||
|
||||
test "resolvePosix" {
|
||||
if (native_os == .wasi and builtin.link_libc) return error.SkipZigTest;
|
||||
if (native_os == .wasi and !builtin.link_libc) try os.initPreopensWasi(std.heap.page_allocator, "/");
|
||||
|
||||
try testResolvePosix(&[_][]const u8{ "/a/b", "c" }, "/a/b/c");
|
||||
try testResolvePosix(&[_][]const u8{ "/a/b", "c", "//d", "e///" }, "/d/e");
|
||||
try testResolvePosix(&[_][]const u8{ "/a/b/c", "..", "../" }, "/a");
|
||||
@ -797,18 +763,21 @@ test "resolvePosix" {
|
||||
try testResolvePosix(&[_][]const u8{ "/var/lib", "/../", "file/" }, "/file");
|
||||
try testResolvePosix(&[_][]const u8{ "/some/dir", ".", "/absolute/" }, "/absolute");
|
||||
try testResolvePosix(&[_][]const u8{ "/foo/tmp.3/", "../tmp.3/cycles/root.js" }, "/foo/tmp.3/cycles/root.js");
|
||||
|
||||
// Keep relative paths relative.
|
||||
try testResolvePosix(&[_][]const u8{"a/b"}, "a/b");
|
||||
}
|
||||
|
||||
fn testResolveWindows(paths: []const []const u8, expected: []const u8) !void {
|
||||
const actual = try resolveWindows(testing.allocator, paths);
|
||||
defer testing.allocator.free(actual);
|
||||
try testing.expect(mem.eql(u8, actual, expected));
|
||||
try testing.expectEqualStrings(expected, actual);
|
||||
}
|
||||
|
||||
fn testResolvePosix(paths: []const []const u8, expected: []const u8) !void {
|
||||
const actual = try resolvePosix(testing.allocator, paths);
|
||||
defer testing.allocator.free(actual);
|
||||
try testing.expect(mem.eql(u8, actual, expected));
|
||||
try testing.expectEqualStrings(expected, actual);
|
||||
}
|
||||
|
||||
/// Strip the last component from a file path.
|
||||
@ -1089,13 +1058,15 @@ pub fn relativeWindows(allocator: Allocator, from: []const u8, to: []const u8) !
|
||||
if (parsed_from.kind != parsed_to.kind) {
|
||||
break :x true;
|
||||
} else switch (parsed_from.kind) {
|
||||
WindowsPath.Kind.NetworkShare => {
|
||||
.NetworkShare => {
|
||||
break :x !networkShareServersEql(parsed_to.disk_designator, parsed_from.disk_designator);
|
||||
},
|
||||
WindowsPath.Kind.Drive => {
|
||||
.Drive => {
|
||||
break :x ascii.toUpper(parsed_from.disk_designator[0]) != ascii.toUpper(parsed_to.disk_designator[0]);
|
||||
},
|
||||
else => unreachable,
|
||||
.None => {
|
||||
break :x false;
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
@ -1194,13 +1165,6 @@ pub fn relativePosix(allocator: Allocator, from: []const u8, to: []const u8) ![]
|
||||
}
|
||||
|
||||
test "relative" {
|
||||
if (builtin.target.cpu.arch == .aarch64) {
|
||||
// TODO https://github.com/ziglang/zig/issues/3288
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
if (native_os == .wasi and builtin.link_libc) return error.SkipZigTest;
|
||||
if (native_os == .wasi and !builtin.link_libc) try os.initPreopensWasi(std.heap.page_allocator, "/");
|
||||
|
||||
try testRelativeWindows("c:/blah\\blah", "d:/games", "D:\\games");
|
||||
try testRelativeWindows("c:/aaaa/bbbb", "c:/aaaa", "..");
|
||||
try testRelativeWindows("c:/aaaa/bbbb", "c:/cccc", "..\\..\\cccc");
|
||||
@ -1226,6 +1190,10 @@ test "relative" {
|
||||
try testRelativeWindows("C:\\baz", "\\\\foo\\bar\\baz", "\\\\foo\\bar\\baz");
|
||||
try testRelativeWindows("\\\\foo\\bar\\baz", "C:\\baz", "C:\\baz");
|
||||
|
||||
try testRelativeWindows("a/b/c", "a\\b", "..");
|
||||
try testRelativeWindows("a/b/c", "a", "..\\..");
|
||||
try testRelativeWindows("a/b/c", "a\\b\\c\\d", "d");
|
||||
|
||||
try testRelativePosix("/var/lib", "/var", "..");
|
||||
try testRelativePosix("/var/lib", "/bin", "../../bin");
|
||||
try testRelativePosix("/var/lib", "/var/lib", "");
|
||||
@ -1243,13 +1211,13 @@ test "relative" {
|
||||
fn testRelativePosix(from: []const u8, to: []const u8, expected_output: []const u8) !void {
|
||||
const result = try relativePosix(testing.allocator, from, to);
|
||||
defer testing.allocator.free(result);
|
||||
try testing.expectEqualSlices(u8, expected_output, result);
|
||||
try testing.expectEqualStrings(expected_output, result);
|
||||
}
|
||||
|
||||
fn testRelativeWindows(from: []const u8, to: []const u8, expected_output: []const u8) !void {
|
||||
const result = try relativeWindows(testing.allocator, from, to);
|
||||
defer testing.allocator.free(result);
|
||||
try testing.expectEqualSlices(u8, expected_output, result);
|
||||
try testing.expectEqualStrings(expected_output, result);
|
||||
}
|
||||
|
||||
/// Returns the extension of the file name (if any).
|
||||
|
||||
@ -1095,7 +1095,9 @@ test "open file with exclusive nonblocking lock twice (absolute paths)" {
|
||||
|
||||
const allocator = testing.allocator;
|
||||
|
||||
const file_paths: [1][]const u8 = .{"zig-test-absolute-paths.txt"};
|
||||
const cwd = try std.process.getCwdAlloc(allocator);
|
||||
defer allocator.free(cwd);
|
||||
const file_paths: [2][]const u8 = .{ cwd, "zig-test-absolute-paths.txt" };
|
||||
const filename = try fs.path.resolve(allocator, &file_paths);
|
||||
defer allocator.free(filename);
|
||||
|
||||
|
||||
@ -202,10 +202,7 @@ pub const PreopenList = struct {
|
||||
// POSIX paths, relative to "/" or `cwd_root` depending on whether they start with "."
|
||||
const path = if (cwd_root) |cwd| blk: {
|
||||
const resolve_paths: []const []const u8 = if (raw_path[0] == '.') &.{ cwd, raw_path } else &.{ "/", raw_path };
|
||||
break :blk fs.path.resolve(self.buffer.allocator, resolve_paths) catch |err| switch (err) {
|
||||
error.CurrentWorkingDirectoryUnlinked => unreachable, // root is absolute, so CWD not queried
|
||||
else => |e| return e,
|
||||
};
|
||||
break :blk try fs.path.resolve(self.buffer.allocator, resolve_paths);
|
||||
} else blk: {
|
||||
// If we were provided no CWD root, we preserve the preopen dir without resolving
|
||||
break :blk try self.buffer.allocator.dupe(u8, raw_path);
|
||||
|
||||
178
src/Cache.zig
178
src/Cache.zig
@ -1,3 +1,7 @@
|
||||
//! Manages `zig-cache` directories.
|
||||
//! This is not a general-purpose cache. It is designed to be fast and simple,
|
||||
//! not to withstand attacks using specially-crafted input.
|
||||
|
||||
gpa: Allocator,
|
||||
manifest_dir: fs.Dir,
|
||||
hash: HashHelper = .{},
|
||||
@ -5,6 +9,14 @@ hash: HashHelper = .{},
|
||||
recent_problematic_timestamp: i128 = 0,
|
||||
mutex: std.Thread.Mutex = .{},
|
||||
|
||||
/// A set of strings such as the zig library directory or project source root, which
|
||||
/// are stripped from the file paths before putting into the cache. They
|
||||
/// are replaced with single-character indicators. This is not to save
|
||||
/// space but to eliminate absolute file paths. This improves portability
|
||||
/// and usefulness of the cache for advanced use cases.
|
||||
prefixes_buffer: [3]Compilation.Directory = undefined,
|
||||
prefixes_len: usize = 0,
|
||||
|
||||
const Cache = @This();
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
@ -18,6 +30,14 @@ const Allocator = std.mem.Allocator;
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const log = std.log.scoped(.cache);
|
||||
|
||||
pub fn addPrefix(cache: *Cache, directory: Compilation.Directory) void {
|
||||
if (directory.path) |p| {
|
||||
log.debug("Cache.addPrefix {d} {s}", .{ cache.prefixes_len, p });
|
||||
}
|
||||
cache.prefixes_buffer[cache.prefixes_len] = directory;
|
||||
cache.prefixes_len += 1;
|
||||
}
|
||||
|
||||
/// Be sure to call `Manifest.deinit` after successful initialization.
|
||||
pub fn obtain(cache: *Cache) Manifest {
|
||||
return Manifest{
|
||||
@ -29,6 +49,48 @@ pub fn obtain(cache: *Cache) Manifest {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn prefixes(cache: *const Cache) []const Compilation.Directory {
|
||||
return cache.prefixes_buffer[0..cache.prefixes_len];
|
||||
}
|
||||
|
||||
const PrefixedPath = struct {
|
||||
prefix: u8,
|
||||
sub_path: []u8,
|
||||
};
|
||||
|
||||
fn findPrefix(cache: *const Cache, file_path: []const u8) !PrefixedPath {
|
||||
const gpa = cache.gpa;
|
||||
const resolved_path = try fs.path.resolve(gpa, &[_][]const u8{file_path});
|
||||
errdefer gpa.free(resolved_path);
|
||||
return findPrefixResolved(cache, resolved_path);
|
||||
}
|
||||
|
||||
/// Takes ownership of `resolved_path` on success.
|
||||
fn findPrefixResolved(cache: *const Cache, resolved_path: []u8) !PrefixedPath {
|
||||
const gpa = cache.gpa;
|
||||
const prefixes_slice = cache.prefixes();
|
||||
var i: u8 = 1; // Start at 1 to skip over checking the null prefix.
|
||||
while (i < prefixes_slice.len) : (i += 1) {
|
||||
const p = prefixes_slice[i].path.?;
|
||||
if (mem.startsWith(u8, resolved_path, p)) {
|
||||
// +1 to skip over the path separator here
|
||||
const sub_path = try gpa.dupe(u8, resolved_path[p.len + 1 ..]);
|
||||
gpa.free(resolved_path);
|
||||
return PrefixedPath{
|
||||
.prefix = @intCast(u8, i),
|
||||
.sub_path = sub_path,
|
||||
};
|
||||
} else {
|
||||
log.debug("'{s}' does not start with '{s}'", .{ resolved_path, p });
|
||||
}
|
||||
}
|
||||
|
||||
return PrefixedPath{
|
||||
.prefix = 0,
|
||||
.sub_path = resolved_path,
|
||||
};
|
||||
}
|
||||
|
||||
/// This is 128 bits - Even with 2^54 cache entries, the probably of a collision would be under 10^-6
|
||||
pub const bin_digest_len = 16;
|
||||
pub const hex_digest_len = bin_digest_len * 2;
|
||||
@ -45,7 +107,7 @@ pub const Hasher = crypto.auth.siphash.SipHash128(1, 3);
|
||||
pub const hasher_init: Hasher = Hasher.init(&[_]u8{0} ** Hasher.key_length);
|
||||
|
||||
pub const File = struct {
|
||||
path: ?[]const u8,
|
||||
prefixed_path: ?PrefixedPath,
|
||||
max_file_size: ?usize,
|
||||
stat: Stat,
|
||||
bin_digest: BinDigest,
|
||||
@ -57,13 +119,13 @@ pub const File = struct {
|
||||
mtime: i128,
|
||||
};
|
||||
|
||||
pub fn deinit(self: *File, allocator: Allocator) void {
|
||||
if (self.path) |owned_slice| {
|
||||
allocator.free(owned_slice);
|
||||
self.path = null;
|
||||
pub fn deinit(self: *File, gpa: Allocator) void {
|
||||
if (self.prefixed_path) |pp| {
|
||||
gpa.free(pp.sub_path);
|
||||
self.prefixed_path = null;
|
||||
}
|
||||
if (self.contents) |contents| {
|
||||
allocator.free(contents);
|
||||
gpa.free(contents);
|
||||
self.contents = null;
|
||||
}
|
||||
self.* = undefined;
|
||||
@ -175,9 +237,6 @@ pub const Lock = struct {
|
||||
}
|
||||
};
|
||||
|
||||
/// Manifest manages project-local `zig-cache` directories.
|
||||
/// This is not a general-purpose cache.
|
||||
/// It is designed to be fast and simple, not to withstand attacks using specially-crafted input.
|
||||
pub const Manifest = struct {
|
||||
cache: *Cache,
|
||||
/// Current state for incremental hashing.
|
||||
@ -220,21 +279,27 @@ pub const Manifest = struct {
|
||||
pub fn addFile(self: *Manifest, file_path: []const u8, max_file_size: ?usize) !usize {
|
||||
assert(self.manifest_file == null);
|
||||
|
||||
try self.files.ensureUnusedCapacity(self.cache.gpa, 1);
|
||||
const resolved_path = try fs.path.resolve(self.cache.gpa, &[_][]const u8{file_path});
|
||||
const gpa = self.cache.gpa;
|
||||
try self.files.ensureUnusedCapacity(gpa, 1);
|
||||
const prefixed_path = try self.cache.findPrefix(file_path);
|
||||
errdefer gpa.free(prefixed_path.sub_path);
|
||||
|
||||
log.debug("Manifest.addFile {s} -> {d} {s}", .{
|
||||
file_path, prefixed_path.prefix, prefixed_path.sub_path,
|
||||
});
|
||||
|
||||
const idx = self.files.items.len;
|
||||
self.files.addOneAssumeCapacity().* = .{
|
||||
.path = resolved_path,
|
||||
.prefixed_path = prefixed_path,
|
||||
.contents = null,
|
||||
.max_file_size = max_file_size,
|
||||
.stat = undefined,
|
||||
.bin_digest = undefined,
|
||||
};
|
||||
|
||||
self.hash.addBytes(resolved_path);
|
||||
self.hash.add(prefixed_path.prefix);
|
||||
self.hash.addBytes(prefixed_path.sub_path);
|
||||
|
||||
return idx;
|
||||
return self.files.items.len - 1;
|
||||
}
|
||||
|
||||
pub fn hashCSource(self: *Manifest, c_source: Compilation.CSourceFile) !void {
|
||||
@ -281,6 +346,7 @@ pub const Manifest = struct {
|
||||
/// option, one may call `toOwnedLock` to obtain a smaller object which can represent
|
||||
/// the lock. `deinit` is safe to call whether or not `toOwnedLock` has been called.
|
||||
pub fn hit(self: *Manifest) !bool {
|
||||
const gpa = self.cache.gpa;
|
||||
assert(self.manifest_file == null);
|
||||
|
||||
self.failed_file_index = null;
|
||||
@ -362,8 +428,8 @@ pub const Manifest = struct {
|
||||
|
||||
self.want_refresh_timestamp = true;
|
||||
|
||||
const file_contents = try self.manifest_file.?.reader().readAllAlloc(self.cache.gpa, manifest_file_size_max);
|
||||
defer self.cache.gpa.free(file_contents);
|
||||
const file_contents = try self.manifest_file.?.reader().readAllAlloc(gpa, manifest_file_size_max);
|
||||
defer gpa.free(file_contents);
|
||||
|
||||
const input_file_count = self.files.items.len;
|
||||
var any_file_changed = false;
|
||||
@ -373,9 +439,9 @@ pub const Manifest = struct {
|
||||
defer idx += 1;
|
||||
|
||||
const cache_hash_file = if (idx < input_file_count) &self.files.items[idx] else blk: {
|
||||
const new = try self.files.addOne(self.cache.gpa);
|
||||
const new = try self.files.addOne(gpa);
|
||||
new.* = .{
|
||||
.path = null,
|
||||
.prefixed_path = null,
|
||||
.contents = null,
|
||||
.max_file_size = null,
|
||||
.stat = undefined,
|
||||
@ -389,27 +455,35 @@ pub const Manifest = struct {
|
||||
const inode = iter.next() orelse return error.InvalidFormat;
|
||||
const mtime_nsec_str = iter.next() orelse return error.InvalidFormat;
|
||||
const digest_str = iter.next() orelse return error.InvalidFormat;
|
||||
const prefix_str = iter.next() orelse return error.InvalidFormat;
|
||||
const file_path = iter.rest();
|
||||
|
||||
cache_hash_file.stat.size = fmt.parseInt(u64, size, 10) catch return error.InvalidFormat;
|
||||
cache_hash_file.stat.inode = fmt.parseInt(fs.File.INode, inode, 10) catch return error.InvalidFormat;
|
||||
cache_hash_file.stat.mtime = fmt.parseInt(i64, mtime_nsec_str, 10) catch return error.InvalidFormat;
|
||||
_ = std.fmt.hexToBytes(&cache_hash_file.bin_digest, digest_str) catch return error.InvalidFormat;
|
||||
const prefix = fmt.parseInt(u8, prefix_str, 10) catch return error.InvalidFormat;
|
||||
if (prefix >= self.cache.prefixes_len) return error.InvalidFormat;
|
||||
|
||||
if (file_path.len == 0) {
|
||||
return error.InvalidFormat;
|
||||
}
|
||||
if (cache_hash_file.path) |p| {
|
||||
if (!mem.eql(u8, file_path, p)) {
|
||||
if (cache_hash_file.prefixed_path) |pp| {
|
||||
if (pp.prefix != prefix or !mem.eql(u8, file_path, pp.sub_path)) {
|
||||
return error.InvalidFormat;
|
||||
}
|
||||
}
|
||||
|
||||
if (cache_hash_file.path == null) {
|
||||
cache_hash_file.path = try self.cache.gpa.dupe(u8, file_path);
|
||||
if (cache_hash_file.prefixed_path == null) {
|
||||
cache_hash_file.prefixed_path = .{
|
||||
.prefix = prefix,
|
||||
.sub_path = try gpa.dupe(u8, file_path),
|
||||
};
|
||||
}
|
||||
|
||||
const this_file = fs.cwd().openFile(cache_hash_file.path.?, .{ .mode = .read_only }) catch |err| switch (err) {
|
||||
const pp = cache_hash_file.prefixed_path.?;
|
||||
const dir = self.cache.prefixes()[pp.prefix].handle;
|
||||
const this_file = dir.openFile(pp.sub_path, .{ .mode = .read_only }) catch |err| switch (err) {
|
||||
error.FileNotFound => {
|
||||
try self.upgradeToExclusiveLock();
|
||||
return false;
|
||||
@ -535,8 +609,9 @@ pub const Manifest = struct {
|
||||
}
|
||||
|
||||
fn populateFileHash(self: *Manifest, ch_file: *File) !void {
|
||||
log.debug("populateFileHash {s}", .{ch_file.path.?});
|
||||
const file = try fs.cwd().openFile(ch_file.path.?, .{});
|
||||
const pp = ch_file.prefixed_path.?;
|
||||
const dir = self.cache.prefixes()[pp.prefix].handle;
|
||||
const file = try dir.openFile(pp.sub_path, .{});
|
||||
defer file.close();
|
||||
|
||||
const actual_stat = try file.stat();
|
||||
@ -588,12 +663,17 @@ pub const Manifest = struct {
|
||||
pub fn addFilePostFetch(self: *Manifest, file_path: []const u8, max_file_size: usize) ![]const u8 {
|
||||
assert(self.manifest_file != null);
|
||||
|
||||
const resolved_path = try fs.path.resolve(self.cache.gpa, &[_][]const u8{file_path});
|
||||
errdefer self.cache.gpa.free(resolved_path);
|
||||
const gpa = self.cache.gpa;
|
||||
const prefixed_path = try self.cache.findPrefix(file_path);
|
||||
errdefer gpa.free(prefixed_path.sub_path);
|
||||
|
||||
const new_ch_file = try self.files.addOne(self.cache.gpa);
|
||||
log.debug("Manifest.addFilePostFetch {s} -> {d} {s}", .{
|
||||
file_path, prefixed_path.prefix, prefixed_path.sub_path,
|
||||
});
|
||||
|
||||
const new_ch_file = try self.files.addOne(gpa);
|
||||
new_ch_file.* = .{
|
||||
.path = resolved_path,
|
||||
.prefixed_path = prefixed_path,
|
||||
.max_file_size = max_file_size,
|
||||
.stat = undefined,
|
||||
.bin_digest = undefined,
|
||||
@ -613,12 +693,17 @@ pub const Manifest = struct {
|
||||
pub fn addFilePost(self: *Manifest, file_path: []const u8) !void {
|
||||
assert(self.manifest_file != null);
|
||||
|
||||
const resolved_path = try fs.path.resolve(self.cache.gpa, &[_][]const u8{file_path});
|
||||
errdefer self.cache.gpa.free(resolved_path);
|
||||
const gpa = self.cache.gpa;
|
||||
const prefixed_path = try self.cache.findPrefix(file_path);
|
||||
errdefer gpa.free(prefixed_path.sub_path);
|
||||
|
||||
const new_ch_file = try self.files.addOne(self.cache.gpa);
|
||||
log.debug("Manifest.addFilePost {s} -> {d} {s}", .{
|
||||
file_path, prefixed_path.prefix, prefixed_path.sub_path,
|
||||
});
|
||||
|
||||
const new_ch_file = try self.files.addOne(gpa);
|
||||
new_ch_file.* = .{
|
||||
.path = resolved_path,
|
||||
.prefixed_path = prefixed_path,
|
||||
.max_file_size = null,
|
||||
.stat = undefined,
|
||||
.bin_digest = undefined,
|
||||
@ -633,17 +718,27 @@ pub const Manifest = struct {
|
||||
/// On success, cache takes ownership of `resolved_path`.
|
||||
pub fn addFilePostContents(
|
||||
self: *Manifest,
|
||||
resolved_path: []const u8,
|
||||
resolved_path: []u8,
|
||||
bytes: []const u8,
|
||||
stat: File.Stat,
|
||||
) error{OutOfMemory}!void {
|
||||
assert(self.manifest_file != null);
|
||||
const gpa = self.cache.gpa;
|
||||
|
||||
const ch_file = try self.files.addOne(self.cache.gpa);
|
||||
const ch_file = try self.files.addOne(gpa);
|
||||
errdefer self.files.shrinkRetainingCapacity(self.files.items.len - 1);
|
||||
|
||||
log.debug("Manifest.addFilePostContents resolved_path={s}", .{resolved_path});
|
||||
|
||||
const prefixed_path = try self.cache.findPrefixResolved(resolved_path);
|
||||
errdefer gpa.free(prefixed_path.sub_path);
|
||||
|
||||
log.debug("Manifest.addFilePostContents -> {d} {s}", .{
|
||||
prefixed_path.prefix, prefixed_path.sub_path,
|
||||
});
|
||||
|
||||
ch_file.* = .{
|
||||
.path = resolved_path,
|
||||
.prefixed_path = prefixed_path,
|
||||
.max_file_size = null,
|
||||
.stat = stat,
|
||||
.bin_digest = undefined,
|
||||
@ -742,12 +837,13 @@ pub const Manifest = struct {
|
||||
"{s}",
|
||||
.{std.fmt.fmtSliceHexLower(&file.bin_digest)},
|
||||
) catch unreachable;
|
||||
try writer.print("{d} {d} {d} {s} {s}\n", .{
|
||||
try writer.print("{d} {d} {d} {s} {d} {s}\n", .{
|
||||
file.stat.size,
|
||||
file.stat.inode,
|
||||
file.stat.mtime,
|
||||
&encoded_digest,
|
||||
file.path.?,
|
||||
file.prefixed_path.?.prefix,
|
||||
file.prefixed_path.?.sub_path,
|
||||
});
|
||||
}
|
||||
|
||||
@ -889,6 +985,7 @@ test "cache file and then recall it" {
|
||||
.gpa = testing.allocator,
|
||||
.manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
|
||||
};
|
||||
cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
|
||||
defer cache.manifest_dir.close();
|
||||
|
||||
{
|
||||
@ -960,6 +1057,7 @@ test "check that changing a file makes cache fail" {
|
||||
.gpa = testing.allocator,
|
||||
.manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
|
||||
};
|
||||
cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
|
||||
defer cache.manifest_dir.close();
|
||||
|
||||
{
|
||||
@ -1022,6 +1120,7 @@ test "no file inputs" {
|
||||
.gpa = testing.allocator,
|
||||
.manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
|
||||
};
|
||||
cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
|
||||
defer cache.manifest_dir.close();
|
||||
|
||||
{
|
||||
@ -1080,6 +1179,7 @@ test "Manifest with files added after initial hash work" {
|
||||
.gpa = testing.allocator,
|
||||
.manifest_dir = try cwd.makeOpenPath(temp_manifest_dir, .{}),
|
||||
};
|
||||
cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
|
||||
defer cache.manifest_dir.close();
|
||||
|
||||
{
|
||||
|
||||
@ -201,7 +201,9 @@ pub const CRTFile = struct {
|
||||
/// For passing to a C compiler.
|
||||
pub const CSourceFile = struct {
|
||||
src_path: []const u8,
|
||||
extra_flags: []const []const u8 = &[0][]const u8{},
|
||||
extra_flags: []const []const u8 = &.{},
|
||||
/// Same as extra_flags except they are not added to the Cache hash.
|
||||
cache_exempt_flags: []const []const u8 = &.{},
|
||||
};
|
||||
|
||||
const Job = union(enum) {
|
||||
@ -1456,23 +1458,27 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
|
||||
else => @as(u8, 3),
|
||||
};
|
||||
|
||||
// We put everything into the cache hash that *cannot be modified during an incremental update*.
|
||||
// For example, one cannot change the target between updates, but one can change source files,
|
||||
// so the target goes into the cache hash, but source files do not. This is so that we can
|
||||
// find the same binary and incrementally update it even if there are modified source files.
|
||||
// We do this even if outputting to the current directory because we need somewhere to store
|
||||
// incremental compilation metadata.
|
||||
// We put everything into the cache hash that *cannot be modified
|
||||
// during an incremental update*. For example, one cannot change the
|
||||
// target between updates, but one can change source files, so the
|
||||
// target goes into the cache hash, but source files do not. This is so
|
||||
// that we can find the same binary and incrementally update it even if
|
||||
// there are modified source files. We do this even if outputting to
|
||||
// the current directory because we need somewhere to store incremental
|
||||
// compilation metadata.
|
||||
const cache = try arena.create(Cache);
|
||||
cache.* = .{
|
||||
.gpa = gpa,
|
||||
.manifest_dir = try options.local_cache_directory.handle.makeOpenPath("h", .{}),
|
||||
};
|
||||
cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
|
||||
cache.addPrefix(options.zig_lib_directory);
|
||||
cache.addPrefix(options.local_cache_directory);
|
||||
errdefer cache.manifest_dir.close();
|
||||
|
||||
// This is shared hasher state common to zig source and all C source files.
|
||||
cache.hash.addBytes(build_options.version);
|
||||
cache.hash.add(builtin.zig_backend);
|
||||
cache.hash.addBytes(options.zig_lib_directory.path orelse ".");
|
||||
cache.hash.add(options.optimize_mode);
|
||||
cache.hash.add(options.target.cpu.arch);
|
||||
cache.hash.addBytes(options.target.cpu.model.name);
|
||||
@ -2265,8 +2271,9 @@ pub fn update(comp: *Compilation) !void {
|
||||
const is_hit = man.hit() catch |err| {
|
||||
// TODO properly bubble these up instead of emitting a warning
|
||||
const i = man.failed_file_index orelse return err;
|
||||
const file_path = man.files.items[i].path orelse return err;
|
||||
std.log.warn("{s}: {s}", .{ @errorName(err), file_path });
|
||||
const pp = man.files.items[i].prefixed_path orelse return err;
|
||||
const prefix = man.cache.prefixes()[pp.prefix].path orelse "";
|
||||
std.log.warn("{s}: {s}{s}", .{ @errorName(err), prefix, pp.sub_path });
|
||||
return err;
|
||||
};
|
||||
if (is_hit) {
|
||||
@ -3246,13 +3253,6 @@ fn processOneJob(comp: *Compilation, job: Job) !void {
|
||||
|
||||
const module = comp.bin_file.options.module.?;
|
||||
module.semaPkg(pkg) catch |err| switch (err) {
|
||||
error.CurrentWorkingDirectoryUnlinked,
|
||||
error.Unexpected,
|
||||
=> comp.lockAndSetMiscFailure(
|
||||
.analyze_pkg,
|
||||
"unexpected problem analyzing package '{s}'",
|
||||
.{pkg.root_src_path},
|
||||
),
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AnalysisFail => return,
|
||||
};
|
||||
@ -3557,7 +3557,14 @@ pub fn obtainCObjectCacheManifest(comp: *const Compilation) Cache.Manifest {
|
||||
man.hash.add(comp.sanitize_c);
|
||||
man.hash.addListOfBytes(comp.clang_argv);
|
||||
man.hash.add(comp.bin_file.options.link_libcpp);
|
||||
man.hash.addListOfBytes(comp.libc_include_dir_list);
|
||||
|
||||
// When libc_installation is null it means that Zig generated this dir list
|
||||
// based on the zig library directory alone. The zig lib directory file
|
||||
// path is purposefully either in the cache or not in the cache. The
|
||||
// decision should not be overridden here.
|
||||
if (comp.bin_file.options.libc_installation != null) {
|
||||
man.hash.addListOfBytes(comp.libc_include_dir_list);
|
||||
}
|
||||
|
||||
return man;
|
||||
}
|
||||
@ -3944,6 +3951,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
|
||||
{
|
||||
try comp.addCCArgs(arena, &argv, ext, null);
|
||||
try argv.appendSlice(c_object.src.extra_flags);
|
||||
try argv.appendSlice(c_object.src.cache_exempt_flags);
|
||||
|
||||
const out_obj_path = if (comp.bin_file.options.emit) |emit|
|
||||
try emit.directory.join(arena, &.{emit.sub_path})
|
||||
@ -3985,6 +3993,7 @@ fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.P
|
||||
try std.fmt.allocPrint(arena, "{s}.d", .{out_obj_path});
|
||||
try comp.addCCArgs(arena, &argv, ext, out_dep_path);
|
||||
try argv.appendSlice(c_object.src.extra_flags);
|
||||
try argv.appendSlice(c_object.src.cache_exempt_flags);
|
||||
|
||||
try argv.ensureUnusedCapacity(5);
|
||||
switch (comp.clang_preprocessor_mode) {
|
||||
|
||||
@ -653,6 +653,9 @@ pub fn buildSharedObjects(comp: *Compilation) !void {
|
||||
.gpa = comp.gpa,
|
||||
.manifest_dir = try comp.global_cache_directory.handle.makeOpenPath("h", .{}),
|
||||
};
|
||||
cache.addPrefix(.{ .path = null, .handle = fs.cwd() });
|
||||
cache.addPrefix(comp.zig_lib_directory);
|
||||
cache.addPrefix(comp.global_cache_directory);
|
||||
defer cache.manifest_dir.close();
|
||||
|
||||
var man = cache.obtain();
|
||||
|
||||
@ -187,15 +187,6 @@ pub fn buildLibCXX(comp: *Compilation) !void {
|
||||
try cflags.append("-faligned-allocation");
|
||||
}
|
||||
|
||||
try cflags.append("-I");
|
||||
try cflags.append(cxx_include_path);
|
||||
|
||||
try cflags.append("-I");
|
||||
try cflags.append(cxxabi_include_path);
|
||||
|
||||
try cflags.append("-I");
|
||||
try cflags.append(cxx_src_include_path);
|
||||
|
||||
if (target_util.supports_fpic(target)) {
|
||||
try cflags.append("-fPIC");
|
||||
}
|
||||
@ -203,9 +194,24 @@ pub fn buildLibCXX(comp: *Compilation) !void {
|
||||
try cflags.append("-std=c++20");
|
||||
try cflags.append("-Wno-user-defined-literals");
|
||||
|
||||
// These depend on only the zig lib directory file path, which is
|
||||
// purposefully either in the cache or not in the cache. The decision
|
||||
// should not be overridden here.
|
||||
var cache_exempt_flags = std.ArrayList([]const u8).init(arena);
|
||||
|
||||
try cache_exempt_flags.append("-I");
|
||||
try cache_exempt_flags.append(cxx_include_path);
|
||||
|
||||
try cache_exempt_flags.append("-I");
|
||||
try cache_exempt_flags.append(cxxabi_include_path);
|
||||
|
||||
try cache_exempt_flags.append("-I");
|
||||
try cache_exempt_flags.append(cxx_src_include_path);
|
||||
|
||||
c_source_files.appendAssumeCapacity(.{
|
||||
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxx", cxx_src }),
|
||||
.extra_flags = cflags.items,
|
||||
.cache_exempt_flags = cache_exempt_flags.items,
|
||||
});
|
||||
}
|
||||
|
||||
@ -340,15 +346,6 @@ pub fn buildLibCXXABI(comp: *Compilation) !void {
|
||||
try cflags.append("-D_LIBCPP_HAS_MUSL_LIBC");
|
||||
}
|
||||
|
||||
try cflags.append("-I");
|
||||
try cflags.append(cxxabi_include_path);
|
||||
|
||||
try cflags.append("-I");
|
||||
try cflags.append(cxx_include_path);
|
||||
|
||||
try cflags.append("-I");
|
||||
try cflags.append(cxx_src_include_path);
|
||||
|
||||
if (target_util.supports_fpic(target)) {
|
||||
try cflags.append("-fPIC");
|
||||
}
|
||||
@ -357,9 +354,24 @@ pub fn buildLibCXXABI(comp: *Compilation) !void {
|
||||
try cflags.append("-funwind-tables");
|
||||
try cflags.append("-std=c++20");
|
||||
|
||||
// These depend on only the zig lib directory file path, which is
|
||||
// purposefully either in the cache or not in the cache. The decision
|
||||
// should not be overridden here.
|
||||
var cache_exempt_flags = std.ArrayList([]const u8).init(arena);
|
||||
|
||||
try cache_exempt_flags.append("-I");
|
||||
try cache_exempt_flags.append(cxxabi_include_path);
|
||||
|
||||
try cache_exempt_flags.append("-I");
|
||||
try cache_exempt_flags.append(cxx_include_path);
|
||||
|
||||
try cache_exempt_flags.append("-I");
|
||||
try cache_exempt_flags.append(cxx_src_include_path);
|
||||
|
||||
c_source_files.appendAssumeCapacity(.{
|
||||
.src_path = try comp.zig_lib_directory.join(arena, &[_][]const u8{ "libcxxabi", cxxabi_src }),
|
||||
.extra_flags = cflags.items,
|
||||
.cache_exempt_flags = cache_exempt_flags.items,
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
13
src/main.zig
13
src/main.zig
@ -2744,11 +2744,14 @@ fn buildOutputType(
|
||||
}
|
||||
|
||||
const self_exe_path = try introspect.findZigExePath(arena);
|
||||
var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |lib_dir| .{
|
||||
.path = lib_dir,
|
||||
.handle = fs.cwd().openDir(lib_dir, .{}) catch |err| {
|
||||
fatal("unable to open zig lib directory '{s}': {s}", .{ lib_dir, @errorName(err) });
|
||||
},
|
||||
var zig_lib_directory: Compilation.Directory = if (override_lib_dir) |unresolved_lib_dir| l: {
|
||||
const lib_dir = try fs.path.resolve(arena, &.{unresolved_lib_dir});
|
||||
break :l .{
|
||||
.path = lib_dir,
|
||||
.handle = fs.cwd().openDir(lib_dir, .{}) catch |err| {
|
||||
fatal("unable to open zig lib directory '{s}': {s}", .{ lib_dir, @errorName(err) });
|
||||
},
|
||||
};
|
||||
} else introspect.findZigLibDirFromSelfExe(arena, self_exe_path) catch |err| {
|
||||
fatal("unable to find zig installation directory: {s}\n", .{@errorName(err)});
|
||||
};
|
||||
|
||||
@ -302,6 +302,10 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
|
||||
.gpa = comp.gpa,
|
||||
.manifest_dir = comp.cache_parent.manifest_dir,
|
||||
};
|
||||
for (comp.cache_parent.prefixes()) |prefix| {
|
||||
cache.addPrefix(prefix);
|
||||
}
|
||||
|
||||
cache.hash.addBytes(build_options.version);
|
||||
cache.hash.addOptionalBytes(comp.zig_lib_directory.path);
|
||||
cache.hash.add(target.cpu.arch);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user