InternPool: make global_error_set thread-safe

This commit is contained in:
Jacob Young 2024-07-10 21:39:11 -04:00
parent 98f3a262a7
commit c2316c5228
15 changed files with 252 additions and 96 deletions

View File

@ -2943,7 +2943,7 @@ pub fn totalErrorCount(comp: *Compilation) u32 {
}
}
if (zcu.global_error_set.entries.len - 1 > zcu.error_limit) {
if (zcu.intern_pool.global_error_set.mutate.list.len > zcu.error_limit) {
total += 1;
}
}
@ -3072,7 +3072,7 @@ pub fn getAllErrorsAlloc(comp: *Compilation) !ErrorBundle {
try addModuleErrorMsg(zcu, &bundle, value.*, &all_references);
}
const actual_error_count = zcu.global_error_set.entries.len - 1;
const actual_error_count = zcu.intern_pool.global_error_set.mutate.list.len;
if (actual_error_count > zcu.error_limit) {
try bundle.addRootErrorMessage(.{
.msg = try bundle.printString("ZCU used more errors than possible: used {d}, max {d}", .{

View File

@ -6,6 +6,8 @@ locals: []Local = &.{},
/// Length must be a power of two and represents the number of simultaneous
/// writers that can mutate any single sharded data structure.
shards: []Shard = &.{},
/// Key is the error name, index is the error tag value. Index 0 has a length-0 string.
global_error_set: GlobalErrorSet = GlobalErrorSet.empty,
/// Cached number of active bits in a `tid`.
tid_width: if (single_threaded) u0 else std.math.Log2Int(u32) = 0,
/// Cached shift amount to put a `tid` in the top bits of a 31-bit value.
@ -10129,10 +10131,10 @@ pub fn getOrPutTrailingString(
defer shard.mutate.string_map.len += 1;
const map_header = map.header().*;
if (shard.mutate.string_map.len < map_header.capacity * 3 / 5) {
strings.appendAssumeCapacity(.{0});
const entry = &map.entries[map_index];
entry.hash = hash;
entry.release(@enumFromInt(@intFromEnum(value)));
strings.appendAssumeCapacity(.{0});
return value;
}
const arena_state = &ip.getLocal(tid).mutate.arena;
@ -10171,12 +10173,12 @@ pub fn getOrPutTrailingString(
map_index &= new_map_mask;
if (map.entries[map_index].value == .none) break;
}
strings.appendAssumeCapacity(.{0});
map.entries[map_index] = .{
.value = @enumFromInt(@intFromEnum(value)),
.hash = hash,
};
shard.shared.string_map.release(new_map);
strings.appendAssumeCapacity(.{0});
return value;
}
@ -10942,3 +10944,159 @@ fn ptrsHaveSameAlignment(ip: *InternPool, a_ty: Index, a_info: Key.PtrType, b_ty
return a_info.flags.alignment == b_info.flags.alignment and
(a_info.child == b_info.child or a_info.flags.alignment != .none);
}
const GlobalErrorSet = struct {
shared: struct {
names: Names,
map: Shard.Map(GlobalErrorSet.Index),
} align(std.atomic.cache_line),
mutate: Local.MutexListMutate align(std.atomic.cache_line),
const Names = Local.List(struct { NullTerminatedString });
const empty: GlobalErrorSet = .{
.shared = .{
.names = Names.empty,
.map = Shard.Map(GlobalErrorSet.Index).empty,
},
.mutate = Local.MutexListMutate.empty,
};
const Index = enum(Zcu.ErrorInt) {
none = 0,
_,
};
/// Not thread-safe, may only be called from the main thread.
pub fn getNamesFromMainThread(ges: *const GlobalErrorSet) []const NullTerminatedString {
return ges.shared.names.view().items(.@"0")[0..ges.mutate.list.len];
}
fn getErrorValue(
ges: *GlobalErrorSet,
gpa: Allocator,
arena_state: *std.heap.ArenaAllocator.State,
name: NullTerminatedString,
) Allocator.Error!GlobalErrorSet.Index {
if (name == .empty) return .none;
const hash = std.hash.uint32(@intFromEnum(name));
var map = ges.shared.map.acquire();
const Map = @TypeOf(map);
var map_mask = map.header().mask();
const names = ges.shared.names.acquire();
var map_index = hash;
while (true) : (map_index += 1) {
map_index &= map_mask;
const entry = &map.entries[map_index];
const index = entry.acquire();
if (index == .none) break;
if (entry.hash != hash) continue;
if (names.view().items(.@"0")[@intFromEnum(index) - 1] == name) return index;
}
ges.mutate.mutex.lock();
defer ges.mutate.mutex.unlock();
if (map.entries != ges.shared.map.entries) {
map = ges.shared.map;
map_mask = map.header().mask();
map_index = hash;
}
while (true) : (map_index += 1) {
map_index &= map_mask;
const entry = &map.entries[map_index];
const index = entry.value;
if (index == .none) break;
if (entry.hash != hash) continue;
if (names.view().items(.@"0")[@intFromEnum(index) - 1] == name) return index;
}
const mutable_names: Names.Mutable = .{
.gpa = gpa,
.arena = arena_state,
.mutate = &ges.mutate.list,
.list = &ges.shared.names,
};
try mutable_names.ensureUnusedCapacity(1);
const map_header = map.header().*;
if (ges.mutate.list.len < map_header.capacity * 3 / 5) {
mutable_names.appendAssumeCapacity(.{name});
const index: GlobalErrorSet.Index = @enumFromInt(mutable_names.mutate.len);
const entry = &map.entries[map_index];
entry.hash = hash;
entry.release(index);
return index;
}
var arena = arena_state.promote(gpa);
defer arena_state.* = arena.state;
const new_map_capacity = map_header.capacity * 2;
const new_map_buf = try arena.allocator().alignedAlloc(
u8,
Map.alignment,
Map.entries_offset + new_map_capacity * @sizeOf(Map.Entry),
);
const new_map: Map = .{ .entries = @ptrCast(new_map_buf[Map.entries_offset..].ptr) };
new_map.header().* = .{ .capacity = new_map_capacity };
@memset(new_map.entries[0..new_map_capacity], .{ .value = .none, .hash = undefined });
const new_map_mask = new_map.header().mask();
map_index = 0;
while (map_index < map_header.capacity) : (map_index += 1) {
const entry = &map.entries[map_index];
const index = entry.value;
if (index == .none) continue;
const item_hash = entry.hash;
var new_map_index = item_hash;
while (true) : (new_map_index += 1) {
new_map_index &= new_map_mask;
const new_entry = &new_map.entries[new_map_index];
if (new_entry.value != .none) continue;
new_entry.* = .{
.value = index,
.hash = item_hash,
};
break;
}
}
map = new_map;
map_index = hash;
while (true) : (map_index += 1) {
map_index &= new_map_mask;
if (map.entries[map_index].value == .none) break;
}
mutable_names.appendAssumeCapacity(.{name});
const index: GlobalErrorSet.Index = @enumFromInt(mutable_names.mutate.len);
map.entries[map_index] = .{ .value = index, .hash = hash };
ges.shared.map.release(new_map);
return index;
}
fn getErrorValueIfExists(
ges: *const GlobalErrorSet,
name: NullTerminatedString,
) ?GlobalErrorSet.Index {
if (name == .empty) return .none;
const hash = std.hash.uint32(@intFromEnum(name));
const map = ges.shared.map.acquire();
const map_mask = map.header().mask();
const names_items = ges.shared.names.acquire().view().items(.@"0");
var map_index = hash;
while (true) : (map_index += 1) {
map_index &= map_mask;
const entry = &map.entries[map_index];
const index = entry.acquire();
if (index == .none) return null;
if (entry.hash != hash) continue;
if (names_items[@intFromEnum(index) - 1] == name) return index;
}
}
};
pub fn getErrorValue(
ip: *InternPool,
gpa: Allocator,
tid: Zcu.PerThread.Id,
name: NullTerminatedString,
) Allocator.Error!Zcu.ErrorInt {
return @intFromEnum(try ip.global_error_set.getErrorValue(gpa, &ip.getLocal(tid).mutate.arena, name));
}
pub fn getErrorValueIfExists(ip: *const InternPool, name: NullTerminatedString) ?Zcu.ErrorInt {
return @intFromEnum(ip.global_error_set.getErrorValueIfExists(name) orelse return null);
}

View File

@ -3473,7 +3473,7 @@ fn zirErrorSetDecl(
const name_index: Zir.NullTerminatedString = @enumFromInt(sema.code.extra[extra_index]);
const name = sema.code.nullTerminatedString(name_index);
const name_ip = try mod.intern_pool.getOrPutString(gpa, pt.tid, name, .no_embedded_nulls);
_ = try mod.getErrorValue(name_ip);
_ = try pt.getErrorValue(name_ip);
const result = names.getOrPutAssumeCapacity(name_ip);
assert(!result.found_existing); // verified in AstGen
}
@ -8705,7 +8705,7 @@ fn zirErrorValue(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!
inst_data.get(sema.code),
.no_embedded_nulls,
);
_ = try pt.zcu.getErrorValue(name);
_ = try pt.getErrorValue(name);
// Create an error set type with only this error value, and return the value.
const error_set_type = try pt.singleErrorSetType(name);
return Air.internedToRef((try pt.intern(.{ .err = .{
@ -8735,7 +8735,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
const err_name = ip.indexToKey(val.toIntern()).err.name;
return Air.internedToRef((try pt.intValue(
err_int_ty,
try mod.getErrorValue(err_name),
try pt.getErrorValue(err_name),
)).toIntern());
}
@ -8746,10 +8746,7 @@ fn zirIntFromError(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
const names = ip.indexToKey(err_set_ty_index).error_set_type.names;
switch (names.len) {
0 => return Air.internedToRef((try pt.intValue(err_int_ty, 0)).toIntern()),
1 => {
const int: Module.ErrorInt = @intCast(mod.global_error_set.getIndex(names.get(ip)[0]).?);
return pt.intRef(err_int_ty, int);
},
1 => return pt.intRef(err_int_ty, ip.getErrorValueIfExists(names.get(ip)[0]).?),
else => {},
}
},
@ -8765,6 +8762,7 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
const pt = sema.pt;
const mod = pt.zcu;
const ip = &mod.intern_pool;
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = block.nodeOffset(extra.node);
const operand_src = block.builtinCallArgSrc(extra.node, 0);
@ -8774,11 +8772,16 @@ fn zirErrorFromInt(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstD
if (try sema.resolveDefinedValue(block, operand_src, operand)) |value| {
const int = try sema.usizeCast(block, operand_src, try value.toUnsignedIntSema(pt));
if (int > mod.global_error_set.count() or int == 0)
if (int > len: {
const mutate = &ip.global_error_set.mutate;
mutate.mutex.lock();
defer mutate.mutex.unlock();
break :len mutate.list.len;
} or int == 0)
return sema.fail(block, operand_src, "integer value '{d}' represents no error", .{int});
return Air.internedToRef((try pt.intern(.{ .err = .{
.ty = .anyerror_type,
.name = mod.global_error_set.keys()[int],
.name = ip.global_error_set.shared.names.acquire().view().items(.@"0")[int - 1],
} })));
}
try sema.requireRuntimeBlock(block, src, operand_src);
@ -14005,7 +14008,7 @@ fn zirRetErrValueCode(sema: *Sema, inst: Zir.Inst.Index) CompileError!Air.Inst.R
inst_data.get(sema.code),
.no_embedded_nulls,
);
_ = try mod.getErrorValue(name);
_ = try pt.getErrorValue(name);
const error_set_type = try pt.singleErrorSetType(name);
return Air.internedToRef((try pt.intern(.{ .err = .{
.ty = error_set_type.toIntern(),
@ -19564,7 +19567,7 @@ fn zirRetErrValue(
inst_data.get(sema.code),
.no_embedded_nulls,
);
_ = try mod.getErrorValue(err_name);
_ = try pt.getErrorValue(err_name);
// Return the error code from the function.
const error_set_type = try pt.singleErrorSetType(err_name);
const result_inst = Air.internedToRef((try pt.intern(.{ .err = .{
@ -21607,7 +21610,7 @@ fn zirReify(
const name = try sema.sliceToIpString(block, src, name_val, .{
.needed_comptime_reason = "error set contents must be comptime-known",
});
_ = try mod.getErrorValue(name);
_ = try pt.getErrorValue(name);
const gop = names.getOrPutAssumeCapacity(name);
if (gop.found_existing) {
return sema.fail(block, src, "duplicate error '{}'", .{
@ -27485,7 +27488,7 @@ fn fieldVal(
},
.simple_type => |t| {
assert(t == .anyerror);
_ = try mod.getErrorValue(field_name);
_ = try pt.getErrorValue(field_name);
},
else => unreachable,
}
@ -27725,7 +27728,7 @@ fn fieldPtr(
},
.simple_type => |t| {
assert(t == .anyerror);
_ = try mod.getErrorValue(field_name);
_ = try pt.getErrorValue(field_name);
},
else => unreachable,
}

View File

@ -417,7 +417,7 @@ pub fn writeToMemory(val: Value, ty: Type, pt: Zcu.PerThread, buffer: []u8) erro
var bigint_buffer: BigIntSpace = undefined;
const bigint = BigIntMutable.init(
&bigint_buffer.limbs,
mod.global_error_set.getIndex(name).?,
ip.getErrorValueIfExists(name).?,
).toConst();
bigint.writeTwosComplement(buffer[0..byte_count], endian);
},
@ -427,7 +427,7 @@ pub fn writeToMemory(val: Value, ty: Type, pt: Zcu.PerThread, buffer: []u8) erro
if (val.unionTag(mod)) |union_tag| {
const union_obj = mod.typeToUnion(ty).?;
const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?;
const field_type = Type.fromInterned(union_obj.field_types.get(&mod.intern_pool)[field_index]);
const field_type = Type.fromInterned(union_obj.field_types.get(ip)[field_index]);
const field_val = try val.fieldValue(pt, field_index);
const byte_count: usize = @intCast(field_type.abiSize(pt));
return writeToMemory(field_val, field_type, pt, buffer[0..byte_count]);
@ -1455,9 +1455,9 @@ pub fn getErrorName(val: Value, mod: *const Module) InternPool.OptionalNullTermi
};
}
pub fn getErrorInt(val: Value, mod: *const Module) Module.ErrorInt {
return if (getErrorName(val, mod).unwrap()) |err_name|
@intCast(mod.global_error_set.getIndex(err_name).?)
pub fn getErrorInt(val: Value, zcu: *Zcu) Module.ErrorInt {
return if (getErrorName(val, zcu).unwrap()) |err_name|
zcu.intern_pool.getErrorValueIfExists(err_name).?
else
0;
}

View File

@ -141,9 +141,6 @@ failed_exports: std.AutoArrayHashMapUnmanaged(u32, *ErrorMsg) = .{},
/// are stored here.
cimport_errors: std.AutoArrayHashMapUnmanaged(AnalUnit, std.zig.ErrorBundle) = .{},
/// Key is the error name, index is the error tag value. Index 0 has a length-0 string.
global_error_set: GlobalErrorSet = .{},
/// Maximum amount of distinct error values, set by --error-limit
error_limit: ErrorInt,
@ -2399,7 +2396,6 @@ pub const CompileError = error{
pub fn init(mod: *Module, thread_count: usize) !void {
const gpa = mod.gpa;
try mod.intern_pool.init(gpa, thread_count);
try mod.global_error_set.put(gpa, .empty, {});
}
pub fn deinit(zcu: *Zcu) void {
@ -2471,8 +2467,6 @@ pub fn deinit(zcu: *Zcu) void {
zcu.single_exports.deinit(gpa);
zcu.multi_exports.deinit(gpa);
zcu.global_error_set.deinit(gpa);
zcu.potentially_outdated.deinit(gpa);
zcu.outdated.deinit(gpa);
zcu.outdated_ready.deinit(gpa);
@ -3108,22 +3102,6 @@ pub fn addUnitReference(zcu: *Zcu, src_unit: AnalUnit, referenced_unit: AnalUnit
gop.value_ptr.* = @intCast(ref_idx);
}
pub fn getErrorValue(
mod: *Module,
name: InternPool.NullTerminatedString,
) Allocator.Error!ErrorInt {
const gop = try mod.global_error_set.getOrPut(mod.gpa, name);
return @as(ErrorInt, @intCast(gop.index));
}
pub fn getErrorValueFromSlice(
mod: *Module,
name: []const u8,
) Allocator.Error!ErrorInt {
const interned_name = try mod.intern_pool.getOrPutString(mod.gpa, name);
return getErrorValue(mod, interned_name);
}
pub fn errorSetBits(mod: *Module) u16 {
if (mod.error_limit == 0) return 0;
return std.math.log2_int_ceil(ErrorInt, mod.error_limit + 1); // +1 for no error

View File

@ -2287,6 +2287,17 @@ pub fn allocateNewDecl(pt: Zcu.PerThread, namespace: Zcu.Namespace.Index) !Zcu.D
return decl_index;
}
pub fn getErrorValue(
pt: Zcu.PerThread,
name: InternPool.NullTerminatedString,
) Allocator.Error!Zcu.ErrorInt {
return pt.zcu.intern_pool.getErrorValue(pt.zcu.gpa, pt.tid, name);
}
pub fn getErrorValueFromSlice(pt: Zcu.PerThread, name: []const u8) Allocator.Error!Zcu.ErrorInt {
return pt.getErrorValue(try pt.zcu.intern_pool.getOrPutString(pt.zcu.gpa, name));
}
pub fn initNewAnonDecl(
pt: Zcu.PerThread,
new_decl_index: Zcu.Decl.Index,

View File

@ -3304,7 +3304,7 @@ fn lowerConstant(func: *CodeGen, val: Value, ty: Type) InnerError!WValue {
}
},
.err => |err| {
const int = try mod.getErrorValue(err.name);
const int = try pt.getErrorValue(err.name);
return WValue{ .imm32 = int };
},
.error_union => |error_union| {
@ -3452,30 +3452,25 @@ fn emitUndefined(func: *CodeGen, ty: Type) InnerError!WValue {
/// Returns a `Value` as a signed 32 bit value.
/// It's illegal to provide a value with a type that cannot be represented
/// as an integer value.
fn valueAsI32(func: *const CodeGen, val: Value, ty: Type) i32 {
fn valueAsI32(func: *const CodeGen, val: Value) i32 {
const pt = func.pt;
const mod = pt.zcu;
const ip = &mod.intern_pool;
switch (val.ip_index) {
.none => {},
switch (val.toIntern()) {
.bool_true => return 1,
.bool_false => return 0,
else => return switch (mod.intern_pool.indexToKey(val.ip_index)) {
.enum_tag => |enum_tag| intIndexAsI32(&mod.intern_pool, enum_tag.int, pt),
else => return switch (ip.indexToKey(val.ip_index)) {
.enum_tag => |enum_tag| intIndexAsI32(ip, enum_tag.int, pt),
.int => |int| intStorageAsI32(int.storage, pt),
.ptr => |ptr| {
assert(ptr.base_addr == .int);
return @intCast(ptr.byte_offset);
},
.err => |err| @as(i32, @bitCast(@as(Zcu.ErrorInt, @intCast(mod.global_error_set.getIndex(err.name).?)))),
.err => |err| @bitCast(ip.getErrorValueIfExists(err.name).?),
else => unreachable,
},
}
return switch (ty.zigTypeTag(mod)) {
.ErrorSet => @as(i32, @bitCast(val.getErrorInt(mod))),
else => unreachable, // Programmer called this function for an illegal type
};
}
fn intIndexAsI32(ip: *const InternPool, int: InternPool.Index, pt: Zcu.PerThread) i32 {
@ -4098,7 +4093,7 @@ fn airSwitchBr(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
for (items, 0..) |ref, i| {
const item_val = (try func.air.value(ref, pt)).?;
const int_val = func.valueAsI32(item_val, target_ty);
const int_val = func.valueAsI32(item_val);
if (lowest_maybe == null or int_val < lowest_maybe.?) {
lowest_maybe = int_val;
}
@ -7454,7 +7449,7 @@ fn airErrorSetHasValue(func: *CodeGen, inst: Air.Inst.Index) InnerError!void {
var lowest: ?u32 = null;
var highest: ?u32 = null;
for (0..names.len) |name_index| {
const err_int: Zcu.ErrorInt = @intCast(mod.global_error_set.getIndex(names.get(ip)[name_index]).?);
const err_int = ip.getErrorValueIfExists(names.get(ip)[name_index]).?;
if (lowest) |*l| {
if (err_int < l.*) {
l.* = err_int;

View File

@ -16435,7 +16435,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
.size = .dword,
.index = err_reg.to64(),
.scale = .@"4",
.disp = 4,
.disp = (1 - 1) * 4,
} },
},
);
@ -16448,7 +16448,7 @@ fn airErrorName(self: *Self, inst: Air.Inst.Index) !void {
.size = .dword,
.index = err_reg.to64(),
.scale = .@"4",
.disp = 8,
.disp = (2 - 1) * 4,
} },
},
);

View File

@ -137,10 +137,10 @@ pub fn generateLazySymbol(
if (lazy_sym.ty.isAnyError(pt.zcu)) {
alignment.* = .@"4";
const err_names = pt.zcu.global_error_set.keys();
const err_names = ip.global_error_set.getNamesFromMainThread();
mem.writeInt(u32, try code.addManyAsArray(4), @intCast(err_names.len), endian);
var offset = code.items.len;
try code.resize((1 + err_names.len + 1) * 4);
try code.resize((err_names.len + 1) * 4);
for (err_names) |err_name_nts| {
const err_name = err_name_nts.toSlice(ip);
mem.writeInt(u32, code.items[offset..][0..4], @intCast(code.items.len), endian);
@ -243,13 +243,13 @@ pub fn generateSymbol(
int_val.writeTwosComplement(try code.addManyAsSlice(abi_size), endian);
},
.err => |err| {
const int = try mod.getErrorValue(err.name);
const int = try pt.getErrorValue(err.name);
try code.writer().writeInt(u16, @intCast(int), endian);
},
.error_union => |error_union| {
const payload_ty = ty.errorUnionPayload(mod);
const err_val: u16 = switch (error_union.val) {
.err_name => |err_name| @intCast(try mod.getErrorValue(err_name)),
.err_name => |err_name| @intCast(try pt.getErrorValue(err_name)),
.payload => 0,
};
@ -1058,7 +1058,7 @@ pub fn genTypedValue(
},
.ErrorSet => {
const err_name = ip.indexToKey(val.toIntern()).err.name;
const error_index = zcu.global_error_set.getIndex(err_name).?;
const error_index = try pt.getErrorValue(err_name);
return GenResult.mcv(.{ .immediate = error_index });
},
.ErrorUnion => {

View File

@ -2622,10 +2622,11 @@ pub fn genErrDecls(o: *Object) !void {
var max_name_len: usize = 0;
// do not generate an invalid empty enum when the global error set is empty
if (zcu.global_error_set.keys().len > 1) {
const names = ip.global_error_set.getNamesFromMainThread();
if (names.len > 0) {
try writer.writeAll("enum {\n");
o.indent_writer.pushIndent();
for (zcu.global_error_set.keys()[1..], 1..) |name_nts, value| {
for (names, 1..) |name_nts, value| {
const name = name_nts.toSlice(ip);
max_name_len = @max(name.len, max_name_len);
const err_val = try pt.intern(.{ .err = .{
@ -2644,7 +2645,7 @@ pub fn genErrDecls(o: *Object) !void {
defer o.dg.gpa.free(name_buf);
@memcpy(name_buf[0..name_prefix.len], name_prefix);
for (zcu.global_error_set.keys()) |name| {
for (names) |name| {
const name_slice = name.toSlice(ip);
@memcpy(name_buf[name_prefix.len..][0..name_slice.len], name_slice);
const identifier = name_buf[0 .. name_prefix.len + name_slice.len];
@ -2674,7 +2675,7 @@ pub fn genErrDecls(o: *Object) !void {
}
const name_array_ty = try pt.arrayType(.{
.len = zcu.global_error_set.count(),
.len = 1 + names.len,
.child = .slice_const_u8_sentinel_0_type,
});
@ -2688,9 +2689,9 @@ pub fn genErrDecls(o: *Object) !void {
.complete,
);
try writer.writeAll(" = {");
for (zcu.global_error_set.keys(), 0..) |name_nts, value| {
for (names, 1..) |name_nts, val| {
const name = name_nts.toSlice(ip);
if (value != 0) try writer.writeByte(',');
if (val > 1) try writer.writeAll(", ");
try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{
fmtIdent(name),
try o.dg.fmtIntLiteral(try pt.intValue(Type.usize, name.len), .StaticInitializer),
@ -6873,7 +6874,7 @@ fn airErrorName(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.writeAll(" = zig_errorName[");
try f.writeCValue(writer, operand, .Other);
try writer.writeAll("];\n");
try writer.writeAll(" - 1];\n");
return local;
}

View File

@ -1036,20 +1036,21 @@ pub const Object = struct {
const pt = o.pt;
const mod = pt.zcu;
const ip = &mod.intern_pool;
const error_name_list = mod.global_error_set.keys();
const llvm_errors = try mod.gpa.alloc(Builder.Constant, error_name_list.len);
const error_name_list = ip.global_error_set.getNamesFromMainThread();
const llvm_errors = try mod.gpa.alloc(Builder.Constant, 1 + error_name_list.len);
defer mod.gpa.free(llvm_errors);
// TODO: Address space
const slice_ty = Type.slice_const_u8_sentinel_0;
const llvm_usize_ty = try o.lowerType(Type.usize);
const llvm_slice_ty = try o.lowerType(slice_ty);
const llvm_table_ty = try o.builder.arrayType(error_name_list.len, llvm_slice_ty);
const llvm_table_ty = try o.builder.arrayType(1 + error_name_list.len, llvm_slice_ty);
llvm_errors[0] = try o.builder.undefConst(llvm_slice_ty);
for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name| {
const name_string = try o.builder.stringNull(name.toSlice(&mod.intern_pool));
for (llvm_errors[1..], error_name_list) |*llvm_error, name| {
const name_string = try o.builder.stringNull(name.toSlice(ip));
const name_init = try o.builder.stringConst(name_string);
const name_variable_index =
try o.builder.addVariable(.empty, name_init.typeOf(&o.builder), .default);
@ -1085,7 +1086,7 @@ pub const Object = struct {
// If there is no such function in the module, it means the source code does not need it.
const name = o.builder.strtabStringIfExists(lt_errors_fn_name) orelse return;
const llvm_fn = o.builder.getGlobal(name) orelse return;
const errors_len = o.pt.zcu.global_error_set.count();
const errors_len = o.pt.zcu.intern_pool.global_error_set.mutate.list.len;
var wip = try Builder.WipFunction.init(&o.builder, .{
.function = llvm_fn.ptrConst(&o.builder).kind.function,
@ -1096,12 +1097,12 @@ pub const Object = struct {
// Example source of the following LLVM IR:
// fn __zig_lt_errors_len(index: u16) bool {
// return index < total_errors_len;
// return index <= total_errors_len;
// }
const lhs = wip.arg(0);
const rhs = try o.builder.intValue(try o.errorIntType(), errors_len);
const is_lt = try wip.icmp(.ult, lhs, rhs, "");
const is_lt = try wip.icmp(.ule, lhs, rhs, "");
_ = try wip.ret(is_lt);
try wip.finish();
}
@ -3820,7 +3821,7 @@ pub const Object = struct {
return lowerBigInt(o, ty, bigint);
},
.err => |err| {
const int = try mod.getErrorValue(err.name);
const int = try pt.getErrorValue(err.name);
const llvm_int = try o.builder.intConst(try o.errorIntType(), int);
return llvm_int;
},
@ -9658,7 +9659,7 @@ pub const FuncGen = struct {
defer wip_switch.finish(&self.wip);
for (0..names.len) |name_index| {
const err_int = mod.global_error_set.getIndex(names.get(ip)[name_index]).?;
const err_int = ip.getErrorValueIfExists(names.get(ip)[name_index]).?;
const this_tag_int_value = try o.builder.intConst(try o.errorIntType(), err_int);
try wip_switch.addCase(this_tag_int_value, valid_block, &self.wip);
}

View File

@ -963,7 +963,7 @@ const DeclGen = struct {
break :cache result_id;
},
.err => |err| {
const value = try mod.getErrorValue(err.name);
const value = try pt.getErrorValue(err.name);
break :cache try self.constInt(ty, value, repr);
},
.error_union => |error_union| {

View File

@ -2698,7 +2698,7 @@ pub fn flushModule(self: *Dwarf, pt: Zcu.PerThread) !void {
try addDbgInfoErrorSetNames(
pt,
Type.anyerror,
pt.zcu.global_error_set.keys(),
pt.zcu.intern_pool.global_error_set.getNamesFromMainThread(),
target,
&dbg_info_buffer,
);
@ -2867,7 +2867,7 @@ fn addDbgInfoErrorSetNames(
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian);
for (error_names) |error_name| {
const int = try pt.zcu.getErrorValue(error_name);
const int = try pt.getErrorValue(error_name);
const error_name_slice = error_name.toSlice(&pt.zcu.intern_pool);
// DW.AT.enumerator
try dbg_info_buffer.ensureUnusedCapacity(error_name_slice.len + 2 + @sizeOf(u64));

View File

@ -227,9 +227,9 @@ pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
var error_info = std.ArrayList(u8).init(self.object.gpa);
defer error_info.deinit();
try error_info.appendSlice("zig_errors");
const mod = self.base.comp.module.?;
for (mod.global_error_set.keys()) |name| {
try error_info.appendSlice("zig_errors:");
const ip = &self.base.comp.module.?.intern_pool;
for (ip.global_error_set.getNamesFromMainThread()) |name| {
// Errors can contain pretty much any character - to encode them in a string we must escape
// them somehow. Easiest here is to use some established scheme, one which also preseves the
// name if it contains no strange characters is nice for debugging. URI encoding fits the bill.
@ -238,7 +238,7 @@ pub fn flushModule(self: *SpirV, arena: Allocator, tid: Zcu.PerThread.Id, prog_n
try error_info.append(':');
try std.Uri.Component.percentEncode(
error_info.writer(),
name.toSlice(&mod.intern_pool),
name.toSlice(ip),
struct {
fn isValidChar(c: u8) bool {
return switch (c) {

View File

@ -652,13 +652,22 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm, tid: Zcu.Per
// Addend for each relocation to the table
var addend: u32 = 0;
const pt: Zcu.PerThread = .{ .zcu = wasm_file.base.comp.module.?, .tid = tid };
for (pt.zcu.global_error_set.keys()) |error_name| {
const atom = wasm_file.getAtomPtr(atom_index);
const slice_ty = Type.slice_const_u8_sentinel_0;
const atom = wasm_file.getAtomPtr(atom_index);
{
// TODO: remove this unreachable entry
try atom.code.appendNTimes(gpa, 0, 4);
try atom.code.writer(gpa).writeInt(u32, 0, .little);
atom.size += @intCast(slice_ty.abiSize(pt));
addend += 1;
const error_name_slice = error_name.toSlice(&pt.zcu.intern_pool);
try names_atom.code.append(gpa, 0);
}
const ip = &pt.zcu.intern_pool;
for (ip.global_error_set.getNamesFromMainThread()) |error_name| {
const error_name_slice = error_name.toSlice(ip);
const len: u32 = @intCast(error_name_slice.len + 1); // names are 0-terminated
const slice_ty = Type.slice_const_u8_sentinel_0;
const offset = @as(u32, @intCast(atom.code.items.len));
// first we create the data for the slice of the name
try atom.code.appendNTimes(gpa, 0, 4); // ptr to name, will be relocated
@ -677,7 +686,7 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm, tid: Zcu.Per
try names_atom.code.ensureUnusedCapacity(gpa, len);
names_atom.code.appendSliceAssumeCapacity(error_name_slice[0..len]);
log.debug("Populated error name: '{}'", .{error_name.fmt(&pt.zcu.intern_pool)});
log.debug("Populated error name: '{}'", .{error_name.fmt(ip)});
}
names_atom.size = addend;
zig_object.error_names_atom = names_atom_index;
@ -1042,7 +1051,7 @@ fn setupErrorsLen(zig_object: *ZigObject, wasm_file: *Wasm) !void {
const gpa = wasm_file.base.comp.gpa;
const sym_index = zig_object.findGlobalSymbol("__zig_errors_len") orelse return;
const errors_len = wasm_file.base.comp.module.?.global_error_set.count();
const errors_len = 1 + wasm_file.base.comp.module.?.intern_pool.global_error_set.mutate.list.len;
// overwrite existing atom if it already exists (maybe the error set has increased)
// if not, allcoate a new atom.
const atom_index = if (wasm_file.symbol_atom.get(.{ .file = zig_object.index, .index = sym_index })) |index| blk: {