mirror of
https://github.com/ziglang/zig.git
synced 2026-01-21 06:45:24 +00:00
Merge pull request #16548 from mlugg/feat/package-dl-progress
Display package download progress
This commit is contained in:
commit
71dfce31bb
@ -68,6 +68,7 @@ pub const Node = struct {
|
||||
context: *Progress,
|
||||
parent: ?*Node,
|
||||
name: []const u8,
|
||||
unit: []const u8 = "",
|
||||
/// Must be handled atomically to be thread-safe.
|
||||
recently_updated_child: ?*Node = null,
|
||||
/// Must be handled atomically to be thread-safe. 0 means null.
|
||||
@ -141,6 +142,21 @@ pub const Node = struct {
|
||||
}
|
||||
}
|
||||
|
||||
/// Thread-safe.
|
||||
pub fn setUnit(self: *Node, unit: []const u8) void {
|
||||
const progress = self.context;
|
||||
progress.update_mutex.lock();
|
||||
defer progress.update_mutex.unlock();
|
||||
self.unit = unit;
|
||||
if (self.parent) |parent| {
|
||||
@atomicStore(?*Node, &parent.recently_updated_child, self, .Release);
|
||||
if (parent.parent) |grand_parent| {
|
||||
@atomicStore(?*Node, &grand_parent.recently_updated_child, parent, .Release);
|
||||
}
|
||||
if (progress.timer) |*timer| progress.maybeRefreshWithHeldLock(timer);
|
||||
}
|
||||
}
|
||||
|
||||
/// Thread-safe. 0 means unknown.
|
||||
pub fn setEstimatedTotalItems(self: *Node, count: usize) void {
|
||||
@atomicStore(usize, &self.unprotected_estimated_total_items, count, .Monotonic);
|
||||
@ -307,11 +323,11 @@ fn refreshWithHeldLock(self: *Progress) void {
|
||||
}
|
||||
if (eti > 0) {
|
||||
if (need_ellipse) self.bufWrite(&end, " ", .{});
|
||||
self.bufWrite(&end, "[{d}/{d}] ", .{ current_item, eti });
|
||||
self.bufWrite(&end, "[{d}/{d}{s}] ", .{ current_item, eti, node.unit });
|
||||
need_ellipse = false;
|
||||
} else if (completed_items != 0) {
|
||||
if (need_ellipse) self.bufWrite(&end, " ", .{});
|
||||
self.bufWrite(&end, "[{d}] ", .{current_item});
|
||||
self.bufWrite(&end, "[{d}{s}] ", .{ current_item, node.unit });
|
||||
need_ellipse = false;
|
||||
}
|
||||
}
|
||||
|
||||
114
src/Package.zig
114
src/Package.zig
@ -228,6 +228,7 @@ pub fn fetchAndAddDependencies(
|
||||
name_prefix: []const u8,
|
||||
error_bundle: *std.zig.ErrorBundle.Wip,
|
||||
all_modules: *AllModules,
|
||||
root_prog_node: *std.Progress.Node,
|
||||
) !void {
|
||||
const max_bytes = 10 * 1024 * 1024;
|
||||
const gpa = thread_pool.allocator;
|
||||
@ -272,6 +273,17 @@ pub fn fetchAndAddDependencies(
|
||||
.error_bundle = error_bundle,
|
||||
};
|
||||
|
||||
for (manifest.dependencies.values()) |dep| {
|
||||
// If the hash is invalid, let errors happen later
|
||||
// We only want to add these for progress reporting
|
||||
const hash = dep.hash orelse continue;
|
||||
if (hash.len != hex_multihash_len) continue;
|
||||
const gop = try all_modules.getOrPut(gpa, hash[0..hex_multihash_len].*);
|
||||
if (!gop.found_existing) gop.value_ptr.* = null;
|
||||
}
|
||||
|
||||
root_prog_node.setEstimatedTotalItems(all_modules.count());
|
||||
|
||||
const deps_list = manifest.dependencies.values();
|
||||
for (manifest.dependencies.keys(), 0..) |name, i| {
|
||||
const dep = deps_list[i];
|
||||
@ -288,6 +300,7 @@ pub fn fetchAndAddDependencies(
|
||||
build_roots_source,
|
||||
fqn,
|
||||
all_modules,
|
||||
root_prog_node,
|
||||
);
|
||||
|
||||
if (!sub.found_existing) {
|
||||
@ -304,6 +317,7 @@ pub fn fetchAndAddDependencies(
|
||||
sub_prefix,
|
||||
error_bundle,
|
||||
all_modules,
|
||||
root_prog_node,
|
||||
);
|
||||
}
|
||||
|
||||
@ -404,7 +418,51 @@ const Report = struct {
|
||||
const hex_multihash_len = 2 * Manifest.multihash_len;
|
||||
const MultiHashHexDigest = [hex_multihash_len]u8;
|
||||
/// This is to avoid creating multiple modules for the same build.zig file.
|
||||
pub const AllModules = std.AutoHashMapUnmanaged(MultiHashHexDigest, *Package);
|
||||
/// If the value is `null`, the package is a known dependency, but has not yet
|
||||
/// been fetched.
|
||||
pub const AllModules = std.AutoHashMapUnmanaged(MultiHashHexDigest, ?*Package);
|
||||
|
||||
fn ProgressReader(comptime ReaderType: type) type {
|
||||
return struct {
|
||||
child_reader: ReaderType,
|
||||
bytes_read: u64 = 0,
|
||||
prog_node: *std.Progress.Node,
|
||||
unit: enum {
|
||||
kib,
|
||||
mib,
|
||||
any,
|
||||
},
|
||||
|
||||
pub const Error = ReaderType.Error;
|
||||
pub const Reader = std.io.Reader(*@This(), Error, read);
|
||||
|
||||
pub fn read(self: *@This(), buf: []u8) Error!usize {
|
||||
const amt = try self.child_reader.read(buf);
|
||||
self.bytes_read += amt;
|
||||
const kib = self.bytes_read / 1024;
|
||||
const mib = kib / 1024;
|
||||
switch (self.unit) {
|
||||
.kib => self.prog_node.setCompletedItems(@intCast(kib)),
|
||||
.mib => self.prog_node.setCompletedItems(@intCast(mib)),
|
||||
.any => {
|
||||
if (mib > 0) {
|
||||
self.prog_node.setUnit("MiB");
|
||||
self.prog_node.setCompletedItems(@intCast(mib));
|
||||
} else {
|
||||
self.prog_node.setUnit("KiB");
|
||||
self.prog_node.setCompletedItems(@intCast(kib));
|
||||
}
|
||||
},
|
||||
}
|
||||
self.prog_node.context.maybeRefresh();
|
||||
return amt;
|
||||
}
|
||||
|
||||
pub fn reader(self: *@This()) Reader {
|
||||
return .{ .context = self };
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
fn fetchAndUnpack(
|
||||
thread_pool: *ThreadPool,
|
||||
@ -415,6 +473,7 @@ fn fetchAndUnpack(
|
||||
build_roots_source: *std.ArrayList(u8),
|
||||
fqn: []const u8,
|
||||
all_modules: *AllModules,
|
||||
root_prog_node: *std.Progress.Node,
|
||||
) !struct { mod: *Package, found_existing: bool } {
|
||||
const gpa = http_client.allocator;
|
||||
const s = fs.path.sep_str;
|
||||
@ -442,13 +501,17 @@ fn fetchAndUnpack(
|
||||
// so we must detect if a module has been created for this package and reuse it.
|
||||
const gop = try all_modules.getOrPut(gpa, hex_digest.*);
|
||||
if (gop.found_existing) {
|
||||
gpa.free(build_root);
|
||||
return .{
|
||||
.mod = gop.value_ptr.*,
|
||||
.found_existing = true,
|
||||
};
|
||||
if (gop.value_ptr.*) |mod| {
|
||||
gpa.free(build_root);
|
||||
return .{
|
||||
.mod = mod,
|
||||
.found_existing = true,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
root_prog_node.completeOne();
|
||||
|
||||
const ptr = try gpa.create(Package);
|
||||
errdefer gpa.destroy(ptr);
|
||||
|
||||
@ -471,6 +534,11 @@ fn fetchAndUnpack(
|
||||
};
|
||||
}
|
||||
|
||||
var pkg_prog_node = root_prog_node.start(fqn, 0);
|
||||
defer pkg_prog_node.end();
|
||||
pkg_prog_node.activate();
|
||||
pkg_prog_node.context.refresh();
|
||||
|
||||
const uri = try std.Uri.parse(dep.url);
|
||||
|
||||
const rand_int = std.crypto.random.int(u64);
|
||||
@ -510,29 +578,53 @@ fn fetchAndUnpack(
|
||||
const content_type = req.response.headers.getFirstValue("Content-Type") orelse
|
||||
return report.fail(dep.url_tok, "Missing 'Content-Type' header", .{});
|
||||
|
||||
var prog_reader: ProgressReader(std.http.Client.Request.Reader) = .{
|
||||
.child_reader = req.reader(),
|
||||
.prog_node = &pkg_prog_node,
|
||||
.unit = if (req.response.content_length) |content_length| unit: {
|
||||
const kib = content_length / 1024;
|
||||
const mib = kib / 1024;
|
||||
if (mib > 0) {
|
||||
pkg_prog_node.setEstimatedTotalItems(@intCast(mib));
|
||||
pkg_prog_node.setUnit("MiB");
|
||||
break :unit .mib;
|
||||
} else {
|
||||
pkg_prog_node.setEstimatedTotalItems(@intCast(@max(1, kib)));
|
||||
pkg_prog_node.setUnit("KiB");
|
||||
break :unit .kib;
|
||||
}
|
||||
} else .any,
|
||||
};
|
||||
pkg_prog_node.context.refresh();
|
||||
|
||||
if (ascii.eqlIgnoreCase(content_type, "application/gzip") or
|
||||
ascii.eqlIgnoreCase(content_type, "application/x-gzip") or
|
||||
ascii.eqlIgnoreCase(content_type, "application/tar+gzip"))
|
||||
{
|
||||
// I observed the gzip stream to read 1 byte at a time, so I am using a
|
||||
// buffered reader on the front of it.
|
||||
try unpackTarball(gpa, &req, tmp_directory.handle, std.compress.gzip);
|
||||
try unpackTarball(gpa, prog_reader.reader(), tmp_directory.handle, std.compress.gzip);
|
||||
} else if (ascii.eqlIgnoreCase(content_type, "application/x-xz")) {
|
||||
// I have not checked what buffer sizes the xz decompression implementation uses
|
||||
// by default, so the same logic applies for buffering the reader as for gzip.
|
||||
try unpackTarball(gpa, &req, tmp_directory.handle, std.compress.xz);
|
||||
try unpackTarball(gpa, prog_reader.reader(), tmp_directory.handle, std.compress.xz);
|
||||
} else if (ascii.eqlIgnoreCase(content_type, "application/octet-stream")) {
|
||||
// support gitlab tarball urls such as https://gitlab.com/<namespace>/<project>/-/archive/<sha>/<project>-<sha>.tar.gz
|
||||
// whose content-disposition header is: 'attachment; filename="<project>-<sha>.tar.gz"'
|
||||
const content_disposition = req.response.headers.getFirstValue("Content-Disposition") orelse
|
||||
return report.fail(dep.url_tok, "Missing 'Content-Disposition' header for Content-Type=application/octet-stream", .{});
|
||||
if (isTarAttachment(content_disposition)) {
|
||||
try unpackTarball(gpa, &req, tmp_directory.handle, std.compress.gzip);
|
||||
try unpackTarball(gpa, prog_reader.reader(), tmp_directory.handle, std.compress.gzip);
|
||||
} else return report.fail(dep.url_tok, "Unsupported 'Content-Disposition' header value: '{s}' for Content-Type=application/octet-stream", .{content_disposition});
|
||||
} else {
|
||||
return report.fail(dep.url_tok, "Unsupported 'Content-Type' header value: '{s}'", .{content_type});
|
||||
}
|
||||
|
||||
// Download completed - stop showing downloaded amount as progress
|
||||
pkg_prog_node.setEstimatedTotalItems(0);
|
||||
pkg_prog_node.setCompletedItems(0);
|
||||
pkg_prog_node.context.refresh();
|
||||
|
||||
// TODO: delete files not included in the package prior to computing the package hash.
|
||||
// for example, if the ini file has directives to include/not include certain files,
|
||||
// apply those rules directly to the filesystem right here. This ensures that files
|
||||
@ -591,11 +683,11 @@ fn fetchAndUnpack(
|
||||
|
||||
fn unpackTarball(
|
||||
gpa: Allocator,
|
||||
req: *std.http.Client.Request,
|
||||
req_reader: anytype,
|
||||
out_dir: fs.Dir,
|
||||
comptime compression: type,
|
||||
) !void {
|
||||
var br = std.io.bufferedReaderSize(std.crypto.tls.max_ciphertext_record_len, req.reader());
|
||||
var br = std.io.bufferedReaderSize(std.crypto.tls.max_ciphertext_record_len, req_reader);
|
||||
|
||||
var decompress = try compression.decompress(gpa, br.reader());
|
||||
defer decompress.deinit();
|
||||
|
||||
@ -4433,6 +4433,10 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
|
||||
try wip_errors.init(gpa);
|
||||
defer wip_errors.deinit();
|
||||
|
||||
var progress: std.Progress = .{};
|
||||
const root_prog_node = progress.start("Fetch Packages", 0);
|
||||
defer root_prog_node.end();
|
||||
|
||||
// Here we borrow main package's table and will replace it with a fresh
|
||||
// one after this process completes.
|
||||
const fetch_result = build_pkg.fetchAndAddDependencies(
|
||||
@ -4448,6 +4452,7 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
|
||||
"",
|
||||
&wip_errors,
|
||||
&all_modules,
|
||||
root_prog_node,
|
||||
);
|
||||
if (wip_errors.root_list.items.len > 0) {
|
||||
var errors = try wip_errors.toOwnedBundle("");
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user