mirror of
https://github.com/ziglang/zig.git
synced 2026-01-20 22:35:24 +00:00
InternPool: implement and use thread-safe list for extra and limbs
This commit is contained in:
parent
49b25475ad
commit
bdae01ab04
@ -21,7 +21,7 @@ const Runnable = struct {
|
||||
runFn: RunProto,
|
||||
};
|
||||
|
||||
const RunProto = *const fn (*Runnable, id: ?usize) void;
|
||||
const RunProto = *const fn (*Runnable, id: ?u32) void;
|
||||
|
||||
pub const Options = struct {
|
||||
allocator: std.mem.Allocator,
|
||||
@ -109,7 +109,7 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args
|
||||
run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } },
|
||||
wait_group: *WaitGroup,
|
||||
|
||||
fn runFn(runnable: *Runnable, _: ?usize) void {
|
||||
fn runFn(runnable: *Runnable, _: ?u32) void {
|
||||
const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable);
|
||||
const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node));
|
||||
@call(.auto, func, closure.arguments);
|
||||
@ -150,7 +150,7 @@ pub fn spawnWg(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, args
|
||||
/// Runs `func` in the thread pool, calling `WaitGroup.start` beforehand, and
|
||||
/// `WaitGroup.finish` after it returns.
|
||||
///
|
||||
/// The first argument passed to `func` is a dense `usize` thread id, the rest
|
||||
/// The first argument passed to `func` is a dense `u32` thread id, the rest
|
||||
/// of the arguments are passed from `args`. Requires the pool to have been
|
||||
/// initialized with `.track_ids = true`.
|
||||
///
|
||||
@ -172,7 +172,7 @@ pub fn spawnWgId(pool: *Pool, wait_group: *WaitGroup, comptime func: anytype, ar
|
||||
run_node: RunQueue.Node = .{ .data = .{ .runFn = runFn } },
|
||||
wait_group: *WaitGroup,
|
||||
|
||||
fn runFn(runnable: *Runnable, id: ?usize) void {
|
||||
fn runFn(runnable: *Runnable, id: ?u32) void {
|
||||
const run_node: *RunQueue.Node = @fieldParentPtr("data", runnable);
|
||||
const closure: *@This() = @alignCast(@fieldParentPtr("run_node", run_node));
|
||||
@call(.auto, func, .{id.?} ++ closure.arguments);
|
||||
@ -258,7 +258,7 @@ fn worker(pool: *Pool) void {
|
||||
pool.mutex.lock();
|
||||
defer pool.mutex.unlock();
|
||||
|
||||
const id = if (pool.ids.count() > 0) pool.ids.count() else null;
|
||||
const id: ?u32 = if (pool.ids.count() > 0) @intCast(pool.ids.count()) else null;
|
||||
if (id) |_| pool.ids.putAssumeCapacityNoClobber(std.Thread.getCurrentId(), {});
|
||||
|
||||
while (true) {
|
||||
@ -280,12 +280,15 @@ fn worker(pool: *Pool) void {
|
||||
}
|
||||
|
||||
pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void {
|
||||
var id: ?usize = null;
|
||||
var id: ?u32 = null;
|
||||
|
||||
while (!wait_group.isDone()) {
|
||||
pool.mutex.lock();
|
||||
if (pool.run_queue.popFirst()) |run_node| {
|
||||
id = id orelse pool.ids.getIndex(std.Thread.getCurrentId());
|
||||
id = id orelse if (pool.ids.getIndex(std.Thread.getCurrentId())) |index|
|
||||
@intCast(index)
|
||||
else
|
||||
null;
|
||||
pool.mutex.unlock();
|
||||
run_node.data.runFn(&run_node.data, id);
|
||||
continue;
|
||||
@ -297,6 +300,6 @@ pub fn waitAndWork(pool: *Pool, wait_group: *WaitGroup) void {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getIdCount(pool: *Pool) usize {
|
||||
return 1 + pool.threads.len;
|
||||
pub fn getIdCount(pool: *Pool) u32 {
|
||||
return @intCast(1 + pool.threads.len);
|
||||
}
|
||||
|
||||
@ -2746,8 +2746,8 @@ pub fn makeBinFileWritable(comp: *Compilation) !void {
|
||||
const Header = extern struct {
|
||||
intern_pool: extern struct {
|
||||
//items_len: u32,
|
||||
extra_len: u32,
|
||||
limbs_len: u32,
|
||||
//extra_len: u32,
|
||||
//limbs_len: u32,
|
||||
//string_bytes_len: u32,
|
||||
tracked_insts_len: u32,
|
||||
src_hash_deps_len: u32,
|
||||
@ -2775,8 +2775,8 @@ pub fn saveState(comp: *Compilation) !void {
|
||||
const header: Header = .{
|
||||
.intern_pool = .{
|
||||
//.items_len = @intCast(ip.items.len),
|
||||
.extra_len = @intCast(ip.extra.items.len),
|
||||
.limbs_len = @intCast(ip.limbs.items.len),
|
||||
//.extra_len = @intCast(ip.extra.items.len),
|
||||
//.limbs_len = @intCast(ip.limbs.items.len),
|
||||
//.string_bytes_len = @intCast(ip.string_bytes.items.len),
|
||||
.tracked_insts_len = @intCast(ip.tracked_insts.count()),
|
||||
.src_hash_deps_len = @intCast(ip.src_hash_deps.count()),
|
||||
@ -2790,8 +2790,8 @@ pub fn saveState(comp: *Compilation) !void {
|
||||
},
|
||||
};
|
||||
addBuf(&bufs_list, &bufs_len, mem.asBytes(&header));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.limbs.items));
|
||||
addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.extra.items));
|
||||
//addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.limbs.items));
|
||||
//addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.extra.items));
|
||||
//addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.data)));
|
||||
//addBuf(&bufs_list, &bufs_len, mem.sliceAsBytes(ip.items.items(.tag)));
|
||||
//addBuf(&bufs_list, &bufs_len, ip.string_bytes.items);
|
||||
|
||||
1930
src/InternPool.zig
1930
src/InternPool.zig
File diff suppressed because it is too large
Load Diff
@ -36925,7 +36925,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value {
|
||||
.none,
|
||||
=> unreachable,
|
||||
|
||||
_ => switch (ty.toIntern().getTag(ip)) {
|
||||
_ => switch (ty.toIntern().unwrap(ip).getTag(ip)) {
|
||||
.removed => unreachable,
|
||||
|
||||
.type_int_signed, // i0 handled above
|
||||
|
||||
@ -3686,7 +3686,7 @@ pub fn resolveFields(ty: Type, pt: Zcu.PerThread) SemaError!void {
|
||||
.empty_struct => unreachable,
|
||||
.generic_poison => unreachable,
|
||||
|
||||
else => switch (ty_ip.getTag(ip)) {
|
||||
else => switch (ty_ip.unwrap(ip).getTag(ip)) {
|
||||
.type_struct,
|
||||
.type_struct_packed,
|
||||
.type_struct_packed_inits,
|
||||
|
||||
@ -110,14 +110,13 @@ fn arrayToIpString(val: Value, len_u64: u64, pt: Zcu.PerThread) !InternPool.Null
|
||||
const ip = &mod.intern_pool;
|
||||
const len: u32 = @intCast(len_u64);
|
||||
const strings = ip.getLocal(pt.tid).getMutableStrings(gpa);
|
||||
const strings_len = strings.lenPtr();
|
||||
try strings.ensureUnusedCapacity(len);
|
||||
for (0..len) |i| {
|
||||
// I don't think elemValue has the possibility to affect ip.string_bytes. Let's
|
||||
// assert just to be sure.
|
||||
const prev_len = strings_len.*;
|
||||
const prev_len = strings.mutate.len;
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
assert(strings_len.* == prev_len);
|
||||
assert(strings.mutate.len == prev_len);
|
||||
const byte: u8 = @intCast(elem_val.toUnsignedInt(pt));
|
||||
strings.appendAssumeCapacity(.{byte});
|
||||
}
|
||||
|
||||
@ -3,7 +3,7 @@ zcu: *Zcu,
|
||||
/// Dense, per-thread unique index.
|
||||
tid: Id,
|
||||
|
||||
pub const Id = if (InternPool.single_threaded) enum { main } else enum(usize) { main, _ };
|
||||
pub const Id = if (InternPool.single_threaded) enum { main } else enum(u8) { main, _ };
|
||||
|
||||
pub fn astGenFile(
|
||||
pt: Zcu.PerThread,
|
||||
|
||||
44
src/main.zig
44
src/main.zig
@ -403,6 +403,7 @@ const usage_build_generic =
|
||||
\\General Options:
|
||||
\\ -h, --help Print this help and exit
|
||||
\\ --color [auto|off|on] Enable or disable colored error messages
|
||||
\\ -j<N> Limit concurrent jobs (default is to use all CPU cores)
|
||||
\\ -femit-bin[=path] (default) Output machine code
|
||||
\\ -fno-emit-bin Do not output machine code
|
||||
\\ -femit-asm[=path] Output .s (assembly code)
|
||||
@ -1004,6 +1005,7 @@ fn buildOutputType(
|
||||
.on
|
||||
else
|
||||
.auto;
|
||||
var n_jobs: ?u32 = null;
|
||||
|
||||
switch (arg_mode) {
|
||||
.build, .translate_c, .zig_test, .run => {
|
||||
@ -1141,6 +1143,17 @@ fn buildOutputType(
|
||||
color = std.meta.stringToEnum(Color, next_arg) orelse {
|
||||
fatal("expected [auto|on|off] after --color, found '{s}'", .{next_arg});
|
||||
};
|
||||
} else if (mem.startsWith(u8, arg, "-j")) {
|
||||
const str = arg["-j".len..];
|
||||
const num = std.fmt.parseUnsigned(u32, str, 10) catch |err| {
|
||||
fatal("unable to parse jobs count '{s}': {s}", .{
|
||||
str, @errorName(err),
|
||||
});
|
||||
};
|
||||
if (num < 1) {
|
||||
fatal("number of jobs must be at least 1\n", .{});
|
||||
}
|
||||
n_jobs = num;
|
||||
} else if (mem.eql(u8, arg, "--subsystem")) {
|
||||
subsystem = try parseSubSystem(args_iter.nextOrFatal());
|
||||
} else if (mem.eql(u8, arg, "-O")) {
|
||||
@ -3092,7 +3105,11 @@ fn buildOutputType(
|
||||
defer emit_implib_resolved.deinit();
|
||||
|
||||
var thread_pool: ThreadPool = undefined;
|
||||
try thread_pool.init(.{ .allocator = gpa, .track_ids = true });
|
||||
try thread_pool.init(.{
|
||||
.allocator = gpa,
|
||||
.n_jobs = @min(@max(n_jobs orelse std.Thread.getCpuCount() catch 1, 1), std.math.maxInt(u8)),
|
||||
.track_ids = true,
|
||||
});
|
||||
defer thread_pool.deinit();
|
||||
|
||||
var cleanup_local_cache_dir: ?fs.Dir = null;
|
||||
@ -4644,6 +4661,7 @@ const usage_build =
|
||||
\\ all Print the build summary in its entirety
|
||||
\\ failures (Default) Only print failed steps
|
||||
\\ none Do not print the build summary
|
||||
\\ -j<N> Limit concurrent jobs (default is to use all CPU cores)
|
||||
\\ --build-file [file] Override path to build.zig
|
||||
\\ --cache-dir [path] Override path to local Zig cache directory
|
||||
\\ --global-cache-dir [path] Override path to global Zig cache directory
|
||||
@ -4718,6 +4736,7 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
|
||||
try child_argv.append("-Z" ++ results_tmp_file_nonce);
|
||||
|
||||
var color: Color = .auto;
|
||||
var n_jobs: ?u32 = null;
|
||||
|
||||
{
|
||||
var i: usize = 0;
|
||||
@ -4811,6 +4830,17 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
|
||||
};
|
||||
try child_argv.appendSlice(&.{ arg, args[i] });
|
||||
continue;
|
||||
} else if (mem.startsWith(u8, arg, "-j")) {
|
||||
const str = arg["-j".len..];
|
||||
const num = std.fmt.parseUnsigned(u32, str, 10) catch |err| {
|
||||
fatal("unable to parse jobs count '{s}': {s}", .{
|
||||
str, @errorName(err),
|
||||
});
|
||||
};
|
||||
if (num < 1) {
|
||||
fatal("number of jobs must be at least 1\n", .{});
|
||||
}
|
||||
n_jobs = num;
|
||||
} else if (mem.eql(u8, arg, "--seed")) {
|
||||
if (i + 1 >= args.len) fatal("expected argument after '{s}'", .{arg});
|
||||
i += 1;
|
||||
@ -4895,7 +4925,11 @@ fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
|
||||
child_argv.items[argv_index_cache_dir] = local_cache_directory.path orelse cwd_path;
|
||||
|
||||
var thread_pool: ThreadPool = undefined;
|
||||
try thread_pool.init(.{ .allocator = gpa, .track_ids = true });
|
||||
try thread_pool.init(.{
|
||||
.allocator = gpa,
|
||||
.n_jobs = @min(@max(n_jobs orelse std.Thread.getCpuCount() catch 1, 1), std.math.maxInt(u8)),
|
||||
.track_ids = true,
|
||||
});
|
||||
defer thread_pool.deinit();
|
||||
|
||||
// Dummy http client that is not actually used when only_core_functionality is enabled.
|
||||
@ -5329,7 +5363,11 @@ fn jitCmd(
|
||||
defer global_cache_directory.handle.close();
|
||||
|
||||
var thread_pool: ThreadPool = undefined;
|
||||
try thread_pool.init(.{ .allocator = gpa, .track_ids = true });
|
||||
try thread_pool.init(.{
|
||||
.allocator = gpa,
|
||||
.n_jobs = @min(@max(std.Thread.getCpuCount() catch 1, 1), std.math.maxInt(u8)),
|
||||
.track_ids = true,
|
||||
});
|
||||
defer thread_pool.deinit();
|
||||
|
||||
var child_argv: std.ArrayListUnmanaged([]const u8) = .{};
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user