InternPool: remove slice from byte aggregate keys

This deletes a ton of lookups and avoids many UAF bugs.

Closes #19485
This commit is contained in:
Jacob Young 2024-04-08 12:44:42 -04:00
parent 4cd92567e7
commit 7611d90ba0
24 changed files with 1038 additions and 952 deletions

View File

@ -106,12 +106,8 @@ pub const NullTerminatedString = enum(u32) {
/// Given an index into `string_bytes` returns the null-terminated string found there.
pub fn nullTerminatedString(code: Zir, index: NullTerminatedString) [:0]const u8 {
const start = @intFromEnum(index);
var end: u32 = start;
while (code.string_bytes[end] != 0) {
end += 1;
}
return code.string_bytes[start..end :0];
const slice = code.string_bytes[@intFromEnum(index)..];
return slice[0..std.mem.indexOfScalar(u8, slice, 0).? :0];
}
pub fn refSlice(code: Zir, start: usize, len: usize) []Inst.Ref {

View File

@ -3159,7 +3159,7 @@ pub fn addModuleErrorMsg(mod: *Module, eb: *ErrorBundle.Wip, module_err_msg: Mod
const rt_file_path = try module_reference.src_loc.file_scope.fullPath(gpa);
defer gpa.free(rt_file_path);
ref_traces.appendAssumeCapacity(.{
.decl_name = try eb.addString(ip.stringToSlice(module_reference.decl)),
.decl_name = try eb.addString(module_reference.decl.toSlice(ip)),
.src_loc = try eb.addSourceLocation(.{
.src_path = try eb.addString(rt_file_path),
.span_start = span.start,
@ -4074,8 +4074,7 @@ fn workerCheckEmbedFile(
fn detectEmbedFileUpdate(comp: *Compilation, embed_file: *Module.EmbedFile) !void {
const mod = comp.module.?;
const ip = &mod.intern_pool;
const sub_file_path = ip.stringToSlice(embed_file.sub_file_path);
var file = try embed_file.owner.root.openFile(sub_file_path, .{});
var file = try embed_file.owner.root.openFile(embed_file.sub_file_path.toSlice(ip), .{});
defer file.close();
const stat = try file.stat();
@ -4444,7 +4443,7 @@ fn reportRetryableEmbedFileError(
const ip = &mod.intern_pool;
const err_msg = try Module.ErrorMsg.create(gpa, src_loc, "unable to load '{}{s}': {s}", .{
embed_file.owner.root,
ip.stringToSlice(embed_file.sub_file_path),
embed_file.sub_file_path.toSlice(ip),
@errorName(err),
});

View File

@ -351,7 +351,7 @@ const KeyAdapter = struct {
pub fn eql(ctx: @This(), a: Key, b_void: void, b_map_index: usize) bool {
_ = b_void;
if (ctx.intern_pool.items.items(.tag)[b_map_index] == .removed) return false;
return ctx.intern_pool.indexToKey(@as(Index, @enumFromInt(b_map_index))).eql(a, ctx.intern_pool);
return ctx.intern_pool.indexToKey(@enumFromInt(b_map_index)).eql(a, ctx.intern_pool);
}
pub fn hash(ctx: @This(), a: Key) u32 {
@ -385,7 +385,7 @@ pub const RuntimeIndex = enum(u32) {
_,
pub fn increment(ri: *RuntimeIndex) void {
ri.* = @as(RuntimeIndex, @enumFromInt(@intFromEnum(ri.*) + 1));
ri.* = @enumFromInt(@intFromEnum(ri.*) + 1);
}
};
@ -418,12 +418,44 @@ pub const OptionalNamespaceIndex = enum(u32) {
/// An index into `string_bytes`.
pub const String = enum(u32) {
/// An empty string.
empty = 0,
_,
pub fn toSlice(string: String, len: u64, ip: *const InternPool) []const u8 {
return ip.string_bytes.items[@intFromEnum(string)..][0..@intCast(len)];
}
pub fn at(string: String, index: u64, ip: *const InternPool) u8 {
return ip.string_bytes.items[@intCast(@intFromEnum(string) + index)];
}
pub fn toNullTerminatedString(string: String, len: u64, ip: *const InternPool) NullTerminatedString {
assert(std.mem.indexOfScalar(u8, string.toSlice(len, ip), 0) == null);
assert(string.at(len, ip) == 0);
return @enumFromInt(@intFromEnum(string));
}
};
/// An index into `string_bytes` which might be `none`.
pub const OptionalString = enum(u32) {
/// This is distinct from `none` - it is a valid index that represents empty string.
empty = 0,
none = std.math.maxInt(u32),
_,
pub fn unwrap(string: OptionalString) ?String {
return if (string != .none) @enumFromInt(@intFromEnum(string)) else null;
}
pub fn toSlice(string: OptionalString, len: u64, ip: *const InternPool) ?[]const u8 {
return (string.unwrap() orelse return null).toSlice(len, ip);
}
};
/// An index into `string_bytes`.
pub const NullTerminatedString = enum(u32) {
/// This is distinct from `none` - it is a valid index that represents empty string.
/// An empty string.
empty = 0,
_,
@ -447,6 +479,19 @@ pub const NullTerminatedString = enum(u32) {
return @enumFromInt(@intFromEnum(self));
}
pub fn toSlice(string: NullTerminatedString, ip: *const InternPool) [:0]const u8 {
const slice = ip.string_bytes.items[@intFromEnum(string)..];
return slice[0..std.mem.indexOfScalar(u8, slice, 0).? :0];
}
pub fn length(string: NullTerminatedString, ip: *const InternPool) u32 {
return @intCast(string.toSlice(ip).len);
}
pub fn eqlSlice(string: NullTerminatedString, slice: []const u8, ip: *const InternPool) bool {
return std.mem.eql(u8, string.toSlice(ip), slice);
}
const Adapter = struct {
strings: []const NullTerminatedString,
@ -467,11 +512,11 @@ pub const NullTerminatedString = enum(u32) {
return @intFromEnum(a) < @intFromEnum(b);
}
pub fn toUnsigned(self: NullTerminatedString, ip: *const InternPool) ?u32 {
const s = ip.stringToSlice(self);
if (s.len > 1 and s[0] == '0') return null;
if (std.mem.indexOfScalar(u8, s, '_')) |_| return null;
return std.fmt.parseUnsigned(u32, s, 10) catch null;
pub fn toUnsigned(string: NullTerminatedString, ip: *const InternPool) ?u32 {
const slice = string.toSlice(ip);
if (slice.len > 1 and slice[0] == '0') return null;
if (std.mem.indexOfScalar(u8, slice, '_')) |_| return null;
return std.fmt.parseUnsigned(u32, slice, 10) catch null;
}
const FormatData = struct {
@ -484,11 +529,11 @@ pub const NullTerminatedString = enum(u32) {
_: std.fmt.FormatOptions,
writer: anytype,
) @TypeOf(writer).Error!void {
const s = data.ip.stringToSlice(data.string);
const slice = data.string.toSlice(data.ip);
if (comptime std.mem.eql(u8, specifier, "")) {
try writer.writeAll(s);
try writer.writeAll(slice);
} else if (comptime std.mem.eql(u8, specifier, "i")) {
try writer.print("{p}", .{std.zig.fmtId(s)});
try writer.print("{p}", .{std.zig.fmtId(slice)});
} else @compileError("invalid format string '" ++ specifier ++ "' for '" ++ @typeName(NullTerminatedString) ++ "'");
}
@ -504,9 +549,12 @@ pub const OptionalNullTerminatedString = enum(u32) {
none = std.math.maxInt(u32),
_,
pub fn unwrap(oi: OptionalNullTerminatedString) ?NullTerminatedString {
if (oi == .none) return null;
return @enumFromInt(@intFromEnum(oi));
pub fn unwrap(string: OptionalNullTerminatedString) ?NullTerminatedString {
return if (string != .none) @enumFromInt(@intFromEnum(string)) else null;
}
pub fn toSlice(string: OptionalNullTerminatedString, ip: *const InternPool) ?[:0]const u8 {
return (string.unwrap() orelse return null).toSlice(ip);
}
};
@ -690,6 +738,10 @@ pub const Key = union(enum) {
len: u64,
child: Index,
sentinel: Index = .none,
pub fn lenIncludingSentinel(array_type: ArrayType) u64 {
return array_type.len + @intFromBool(array_type.sentinel != .none);
}
};
/// Extern so that hashing can be done via memory reinterpreting.
@ -1043,7 +1095,7 @@ pub const Key = union(enum) {
storage: Storage,
pub const Storage = union(enum) {
bytes: []const u8,
bytes: String,
elems: []const Index,
repeated_elem: Index,
@ -1203,7 +1255,7 @@ pub const Key = union(enum) {
if (child == .u8_type) {
switch (aggregate.storage) {
.bytes => |bytes| for (bytes[0..@intCast(len)]) |byte| {
.bytes => |bytes| for (bytes.toSlice(len, ip)) |byte| {
std.hash.autoHash(&hasher, KeyTag.int);
std.hash.autoHash(&hasher, byte);
},
@ -1240,7 +1292,7 @@ pub const Key = union(enum) {
switch (aggregate.storage) {
.bytes => unreachable,
.elems => |elems| for (elems[0..@as(usize, @intCast(len))]) |elem|
.elems => |elems| for (elems[0..@intCast(len)]) |elem|
std.hash.autoHash(&hasher, elem),
.repeated_elem => |elem| {
var remaining = len;
@ -1505,11 +1557,11 @@ pub const Key = union(enum) {
if (a_info.ty == .c_longdouble_type and a_info.storage != .f80) {
// These are strange: we'll sometimes represent them as f128, even if the
// underlying type is smaller. f80 is an exception: see float_c_longdouble_f80.
const a_val = switch (a_info.storage) {
inline else => |val| @as(u128, @bitCast(@as(f128, @floatCast(val)))),
const a_val: u128 = switch (a_info.storage) {
inline else => |val| @bitCast(@as(f128, @floatCast(val))),
};
const b_val = switch (b_info.storage) {
inline else => |val| @as(u128, @bitCast(@as(f128, @floatCast(val)))),
const b_val: u128 = switch (b_info.storage) {
inline else => |val| @bitCast(@as(f128, @floatCast(val))),
};
return a_val == b_val;
}
@ -1560,11 +1612,11 @@ pub const Key = union(enum) {
const len = ip.aggregateTypeLen(a_info.ty);
const StorageTag = @typeInfo(Key.Aggregate.Storage).Union.tag_type.?;
if (@as(StorageTag, a_info.storage) != @as(StorageTag, b_info.storage)) {
for (0..@as(usize, @intCast(len))) |elem_index| {
for (0..@intCast(len)) |elem_index| {
const a_elem = switch (a_info.storage) {
.bytes => |bytes| ip.getIfExists(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes[elem_index] },
.storage = .{ .u64 = bytes.at(elem_index, ip) },
} }) orelse return false,
.elems => |elems| elems[elem_index],
.repeated_elem => |elem| elem,
@ -1572,7 +1624,7 @@ pub const Key = union(enum) {
const b_elem = switch (b_info.storage) {
.bytes => |bytes| ip.getIfExists(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes[elem_index] },
.storage = .{ .u64 = bytes.at(elem_index, ip) },
} }) orelse return false,
.elems => |elems| elems[elem_index],
.repeated_elem => |elem| elem,
@ -1585,18 +1637,15 @@ pub const Key = union(enum) {
switch (a_info.storage) {
.bytes => |a_bytes| {
const b_bytes = b_info.storage.bytes;
return std.mem.eql(
u8,
a_bytes[0..@as(usize, @intCast(len))],
b_bytes[0..@as(usize, @intCast(len))],
);
return a_bytes == b_bytes or
std.mem.eql(u8, a_bytes.toSlice(len, ip), b_bytes.toSlice(len, ip));
},
.elems => |a_elems| {
const b_elems = b_info.storage.elems;
return std.mem.eql(
Index,
a_elems[0..@as(usize, @intCast(len))],
b_elems[0..@as(usize, @intCast(len))],
a_elems[0..@intCast(len)],
b_elems[0..@intCast(len)],
);
},
.repeated_elem => |a_elem| {
@ -4175,10 +4224,10 @@ pub const Float64 = struct {
}
fn pack(val: f64) Float64 {
const bits = @as(u64, @bitCast(val));
const bits: u64 = @bitCast(val);
return .{
.piece0 = @as(u32, @truncate(bits)),
.piece1 = @as(u32, @truncate(bits >> 32)),
.piece0 = @truncate(bits),
.piece1 = @truncate(bits >> 32),
};
}
};
@ -4197,11 +4246,11 @@ pub const Float80 = struct {
}
fn pack(val: f80) Float80 {
const bits = @as(u80, @bitCast(val));
const bits: u80 = @bitCast(val);
return .{
.piece0 = @as(u32, @truncate(bits)),
.piece1 = @as(u32, @truncate(bits >> 32)),
.piece2 = @as(u16, @truncate(bits >> 64)),
.piece0 = @truncate(bits),
.piece1 = @truncate(bits >> 32),
.piece2 = @truncate(bits >> 64),
};
}
};
@ -4222,12 +4271,12 @@ pub const Float128 = struct {
}
fn pack(val: f128) Float128 {
const bits = @as(u128, @bitCast(val));
const bits: u128 = @bitCast(val);
return .{
.piece0 = @as(u32, @truncate(bits)),
.piece1 = @as(u32, @truncate(bits >> 32)),
.piece2 = @as(u32, @truncate(bits >> 64)),
.piece3 = @as(u32, @truncate(bits >> 96)),
.piece0 = @truncate(bits),
.piece1 = @truncate(bits >> 32),
.piece2 = @truncate(bits >> 64),
.piece3 = @truncate(bits >> 96),
};
}
};
@ -4244,7 +4293,7 @@ pub fn init(ip: *InternPool, gpa: Allocator) !void {
assert(ip.items.len == 0);
// Reserve string index 0 for an empty string.
assert((try ip.getOrPutString(gpa, "")) == .empty);
assert((try ip.getOrPutString(gpa, "", .no_embedded_nulls)) == .empty);
// So that we can use `catch unreachable` below.
try ip.items.ensureUnusedCapacity(gpa, static_keys.len);
@ -4329,13 +4378,13 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
.type_int_signed => .{
.int_type = .{
.signedness = .signed,
.bits = @as(u16, @intCast(data)),
.bits = @intCast(data),
},
},
.type_int_unsigned => .{
.int_type = .{
.signedness = .unsigned,
.bits = @as(u16, @intCast(data)),
.bits = @intCast(data),
},
},
.type_array_big => {
@ -4354,8 +4403,8 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
.sentinel = .none,
} };
},
.simple_type => .{ .simple_type = @as(SimpleType, @enumFromInt(data)) },
.simple_value => .{ .simple_value = @as(SimpleValue, @enumFromInt(data)) },
.simple_type => .{ .simple_type = @enumFromInt(data) },
.simple_value => .{ .simple_value = @enumFromInt(data) },
.type_vector => {
const vector_info = ip.extraData(Vector, data);
@ -4506,9 +4555,9 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
} },
.type_function => .{ .func_type = ip.extraFuncType(data) },
.undef => .{ .undef = @as(Index, @enumFromInt(data)) },
.undef => .{ .undef = @enumFromInt(data) },
.opt_null => .{ .opt = .{
.ty = @as(Index, @enumFromInt(data)),
.ty = @enumFromInt(data),
.val = .none,
} },
.opt_payload => {
@ -4670,11 +4719,11 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
},
.float_f16 => .{ .float = .{
.ty = .f16_type,
.storage = .{ .f16 = @as(f16, @bitCast(@as(u16, @intCast(data)))) },
.storage = .{ .f16 = @bitCast(@as(u16, @intCast(data))) },
} },
.float_f32 => .{ .float = .{
.ty = .f32_type,
.storage = .{ .f32 = @as(f32, @bitCast(data)) },
.storage = .{ .f32 = @bitCast(data) },
} },
.float_f64 => .{ .float = .{
.ty = .f64_type,
@ -4771,10 +4820,9 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
},
.bytes => {
const extra = ip.extraData(Bytes, data);
const len: u32 = @intCast(ip.aggregateTypeLenIncludingSentinel(extra.ty));
return .{ .aggregate = .{
.ty = extra.ty,
.storage = .{ .bytes = ip.string_bytes.items[@intFromEnum(extra.bytes)..][0..len] },
.storage = .{ .bytes = extra.bytes },
} };
},
.aggregate => {
@ -4809,14 +4857,14 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key {
.val = .{ .payload = extra.val },
} };
},
.enum_literal => .{ .enum_literal = @as(NullTerminatedString, @enumFromInt(data)) },
.enum_literal => .{ .enum_literal = @enumFromInt(data) },
.enum_tag => .{ .enum_tag = ip.extraData(Tag.EnumTag, data) },
.memoized_call => {
const extra = ip.extraDataTrail(MemoizedCall, data);
return .{ .memoized_call = .{
.func = extra.data.func,
.arg_values = @as([]const Index, @ptrCast(ip.extra.items[extra.end..][0..extra.data.args_len])),
.arg_values = @ptrCast(ip.extra.items[extra.end..][0..extra.data.args_len]),
.result = extra.data.result,
} };
},
@ -5596,9 +5644,8 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
switch (aggregate.storage) {
.bytes => |bytes| {
assert(child == .u8_type);
if (bytes.len != len) {
assert(bytes.len == len_including_sentinel);
assert(bytes[@intCast(len)] == ip.indexToKey(sentinel).int.storage.u64);
if (sentinel != .none) {
assert(bytes.at(@intCast(len), ip) == ip.indexToKey(sentinel).int.storage.u64);
}
},
.elems => |elems| {
@ -5641,11 +5688,16 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
switch (ty_key) {
.anon_struct_type => |anon_struct_type| opv: {
switch (aggregate.storage) {
.bytes => |bytes| for (anon_struct_type.values.get(ip), bytes) |value, byte| {
if (value != ip.getIfExists(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = byte },
} })) break :opv;
.bytes => |bytes| for (anon_struct_type.values.get(ip), bytes.at(0, ip)..) |value, byte| {
if (value == .none) break :opv;
switch (ip.indexToKey(value)) {
.undef => break :opv,
.int => |int| switch (int.storage) {
.u64 => |x| if (x != byte) break :opv,
else => break :opv,
},
else => unreachable,
}
},
.elems => |elems| if (!std.mem.eql(
Index,
@ -5670,9 +5722,9 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
repeated: {
switch (aggregate.storage) {
.bytes => |bytes| for (bytes[1..@as(usize, @intCast(len))]) |byte|
if (byte != bytes[0]) break :repeated,
.elems => |elems| for (elems[1..@as(usize, @intCast(len))]) |elem|
.bytes => |bytes| for (bytes.toSlice(len, ip)[1..]) |byte|
if (byte != bytes.at(0, ip)) break :repeated,
.elems => |elems| for (elems[1..@intCast(len)]) |elem|
if (elem != elems[0]) break :repeated,
.repeated_elem => {},
}
@ -5681,7 +5733,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
_ = ip.map.pop();
const elem = try ip.get(gpa, .{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes[0] },
.storage = .{ .u64 = bytes.at(0, ip) },
} });
assert(!(try ip.map.getOrPutAdapted(gpa, key, adapter)).found_existing);
try ip.items.ensureUnusedCapacity(gpa, 1);
@ -5710,7 +5762,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
try ip.string_bytes.ensureUnusedCapacity(gpa, @intCast(len_including_sentinel + 1));
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len);
switch (aggregate.storage) {
.bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes[0..@intCast(len)]),
.bytes => |bytes| ip.string_bytes.appendSliceAssumeCapacity(bytes.toSlice(len, ip)),
.elems => |elems| for (elems[0..@intCast(len)]) |elem| switch (ip.indexToKey(elem)) {
.undef => {
ip.string_bytes.shrinkRetainingCapacity(string_bytes_index);
@ -5730,15 +5782,14 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
else => unreachable,
},
}
const has_internal_null =
std.mem.indexOfScalar(u8, ip.string_bytes.items[string_bytes_index..], 0) != null;
if (sentinel != .none) ip.string_bytes.appendAssumeCapacity(
@intCast(ip.indexToKey(sentinel).int.storage.u64),
);
const string: String = if (has_internal_null)
@enumFromInt(string_bytes_index)
else
(try ip.getOrPutTrailingString(gpa, @intCast(len_including_sentinel))).toString();
const string = try ip.getOrPutTrailingString(
gpa,
@intCast(len_including_sentinel),
.maybe_embedded_nulls,
);
ip.items.appendAssumeCapacity(.{
.tag = .bytes,
.data = ip.addExtraAssumeCapacity(Bytes{
@ -5780,7 +5831,7 @@ pub fn get(ip: *InternPool, gpa: Allocator, key: Key) Allocator.Error!Index {
.tag = .memoized_call,
.data = ip.addExtraAssumeCapacity(MemoizedCall{
.func = memoized_call.func,
.args_len = @as(u32, @intCast(memoized_call.arg_values.len)),
.args_len = @intCast(memoized_call.arg_values.len),
.result = memoized_call.result,
}),
});
@ -6753,7 +6804,7 @@ fn finishFuncInstance(
const decl = ip.declPtr(decl_index);
decl.name = try ip.getOrPutStringFmt(gpa, "{}__anon_{d}", .{
fn_owner_decl.name.fmt(ip), @intFromEnum(decl_index),
});
}, .no_embedded_nulls);
return func_index;
}
@ -7216,7 +7267,7 @@ pub fn remove(ip: *InternPool, index: Index) void {
}
fn addInt(ip: *InternPool, gpa: Allocator, ty: Index, tag: Tag, limbs: []const Limb) !void {
const limbs_len = @as(u32, @intCast(limbs.len));
const limbs_len: u32 = @intCast(limbs.len);
try ip.reserveLimbs(gpa, @typeInfo(Int).Struct.fields.len + limbs_len);
ip.items.appendAssumeCapacity(.{
.tag = tag,
@ -7235,7 +7286,7 @@ fn addExtra(ip: *InternPool, gpa: Allocator, extra: anytype) Allocator.Error!u32
}
fn addExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
const result = @as(u32, @intCast(ip.extra.items.len));
const result: u32 = @intCast(ip.extra.items.len);
inline for (@typeInfo(@TypeOf(extra)).Struct.fields) |field| {
ip.extra.appendAssumeCapacity(switch (field.type) {
Index,
@ -7286,7 +7337,7 @@ fn addLimbsExtraAssumeCapacity(ip: *InternPool, extra: anytype) u32 {
@sizeOf(u64) => {},
else => @compileError("unsupported host"),
}
const result = @as(u32, @intCast(ip.limbs.items.len));
const result: u32 = @intCast(ip.limbs.items.len);
inline for (@typeInfo(@TypeOf(extra)).Struct.fields, 0..) |field, i| {
const new: u32 = switch (field.type) {
u32 => @field(extra, field.name),
@ -7374,7 +7425,7 @@ fn limbData(ip: *const InternPool, comptime T: type, index: usize) T {
@field(result, field.name) = switch (field.type) {
u32 => int32,
Index => @as(Index, @enumFromInt(int32)),
Index => @enumFromInt(int32),
else => @compileError("bad field type: " ++ @typeName(field.type)),
};
}
@ -7410,8 +7461,8 @@ fn limbsSliceToIndex(ip: *const InternPool, limbs: []const Limb) LimbsAsIndexes
};
// TODO: https://github.com/ziglang/zig/issues/1738
return .{
.start = @as(u32, @intCast(@divExact(@intFromPtr(limbs.ptr) - @intFromPtr(host_slice.ptr), @sizeOf(Limb)))),
.len = @as(u32, @intCast(limbs.len)),
.start = @intCast(@divExact(@intFromPtr(limbs.ptr) - @intFromPtr(host_slice.ptr), @sizeOf(Limb))),
.len = @intCast(limbs.len),
};
}
@ -7683,7 +7734,7 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
.val = error_union.val,
} }),
.aggregate => |aggregate| {
const new_len = @as(usize, @intCast(ip.aggregateTypeLen(new_ty)));
const new_len: usize = @intCast(ip.aggregateTypeLen(new_ty));
direct: {
const old_ty_child = switch (ip.indexToKey(old_ty)) {
inline .array_type, .vector_type => |seq_type| seq_type.child,
@ -7696,16 +7747,11 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
else => unreachable,
};
if (old_ty_child != new_ty_child) break :direct;
// TODO: write something like getCoercedInts to avoid needing to dupe here
switch (aggregate.storage) {
.bytes => |bytes| {
const bytes_copy = try gpa.dupe(u8, bytes[0..new_len]);
defer gpa.free(bytes_copy);
return ip.get(gpa, .{ .aggregate = .{
.ty = new_ty,
.storage = .{ .bytes = bytes_copy },
} });
},
.bytes => |bytes| return ip.get(gpa, .{ .aggregate = .{
.ty = new_ty,
.storage = .{ .bytes = bytes },
} }),
.elems => |elems| {
const elems_copy = try gpa.dupe(Index, elems[0..new_len]);
defer gpa.free(elems_copy);
@ -7729,14 +7775,13 @@ pub fn getCoerced(ip: *InternPool, gpa: Allocator, val: Index, new_ty: Index) Al
// lifetime issues, since it'll allow us to avoid referencing `aggregate` after we
// begin interning elems.
switch (aggregate.storage) {
.bytes => {
.bytes => |bytes| {
// We have to intern each value here, so unfortunately we can't easily avoid
// the repeated indexToKey calls.
for (agg_elems, 0..) |*elem, i| {
const x = ip.indexToKey(val).aggregate.storage.bytes[i];
for (agg_elems, 0..) |*elem, index| {
elem.* = try ip.get(gpa, .{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = x },
.storage = .{ .u64 = bytes.at(index, ip) },
} });
}
},
@ -8169,9 +8214,8 @@ fn dumpStatsFallible(ip: *const InternPool, arena: Allocator) anyerror!void {
.bytes => b: {
const info = ip.extraData(Bytes, data);
const len = @as(u32, @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty)));
break :b @sizeOf(Bytes) + len +
@intFromBool(ip.string_bytes.items[@intFromEnum(info.bytes) + len - 1] != 0);
const len: usize = @intCast(ip.aggregateTypeLenIncludingSentinel(info.ty));
break :b @sizeOf(Bytes) + len + @intFromBool(info.bytes.at(len - 1, ip) != 0);
},
.aggregate => b: {
const info = ip.extraData(Tag.Aggregate, data);
@ -8434,15 +8478,35 @@ pub fn destroyNamespace(ip: *InternPool, gpa: Allocator, index: NamespaceIndex)
};
}
const EmbeddedNulls = enum {
no_embedded_nulls,
maybe_embedded_nulls,
fn StringType(comptime embedded_nulls: EmbeddedNulls) type {
return switch (embedded_nulls) {
.no_embedded_nulls => NullTerminatedString,
.maybe_embedded_nulls => String,
};
}
fn OptionalStringType(comptime embedded_nulls: EmbeddedNulls) type {
return switch (embedded_nulls) {
.no_embedded_nulls => OptionalNullTerminatedString,
.maybe_embedded_nulls => OptionalString,
};
}
};
pub fn getOrPutString(
ip: *InternPool,
gpa: Allocator,
s: []const u8,
) Allocator.Error!NullTerminatedString {
try ip.string_bytes.ensureUnusedCapacity(gpa, s.len + 1);
ip.string_bytes.appendSliceAssumeCapacity(s);
slice: []const u8,
comptime embedded_nulls: EmbeddedNulls,
) Allocator.Error!embedded_nulls.StringType() {
try ip.string_bytes.ensureUnusedCapacity(gpa, slice.len + 1);
ip.string_bytes.appendSliceAssumeCapacity(slice);
ip.string_bytes.appendAssumeCapacity(0);
return ip.getOrPutTrailingString(gpa, s.len + 1);
return ip.getOrPutTrailingString(gpa, slice.len + 1, embedded_nulls);
}
pub fn getOrPutStringFmt(
@ -8450,23 +8514,24 @@ pub fn getOrPutStringFmt(
gpa: Allocator,
comptime format: []const u8,
args: anytype,
) Allocator.Error!NullTerminatedString {
comptime embedded_nulls: EmbeddedNulls,
) Allocator.Error!embedded_nulls.StringType() {
// ensure that references to string_bytes in args do not get invalidated
const len: usize = @intCast(std.fmt.count(format, args) + 1);
try ip.string_bytes.ensureUnusedCapacity(gpa, len);
ip.string_bytes.writer(undefined).print(format, args) catch unreachable;
ip.string_bytes.appendAssumeCapacity(0);
return ip.getOrPutTrailingString(gpa, len);
return ip.getOrPutTrailingString(gpa, len, embedded_nulls);
}
pub fn getOrPutStringOpt(
ip: *InternPool,
gpa: Allocator,
optional_string: ?[]const u8,
) Allocator.Error!OptionalNullTerminatedString {
const s = optional_string orelse return .none;
const interned = try getOrPutString(ip, gpa, s);
return interned.toOptional();
slice: ?[]const u8,
comptime embedded_nulls: EmbeddedNulls,
) Allocator.Error!embedded_nulls.OptionalStringType() {
const string = try getOrPutString(ip, gpa, slice orelse return .none, embedded_nulls);
return string.toOptional();
}
/// Uses the last len bytes of ip.string_bytes as the key.
@ -8474,7 +8539,8 @@ pub fn getOrPutTrailingString(
ip: *InternPool,
gpa: Allocator,
len: usize,
) Allocator.Error!NullTerminatedString {
comptime embedded_nulls: EmbeddedNulls,
) Allocator.Error!embedded_nulls.StringType() {
const string_bytes = &ip.string_bytes;
const str_index: u32 = @intCast(string_bytes.items.len - len);
if (len > 0 and string_bytes.getLast() == 0) {
@ -8483,6 +8549,14 @@ pub fn getOrPutTrailingString(
try string_bytes.ensureUnusedCapacity(gpa, 1);
}
const key: []const u8 = string_bytes.items[str_index..];
const has_embedded_null = std.mem.indexOfScalar(u8, key, 0) != null;
switch (embedded_nulls) {
.no_embedded_nulls => assert(!has_embedded_null),
.maybe_embedded_nulls => if (has_embedded_null) {
string_bytes.appendAssumeCapacity(0);
return @enumFromInt(str_index);
},
}
const gop = try ip.string_table.getOrPutContextAdapted(gpa, key, std.hash_map.StringIndexAdapter{
.bytes = string_bytes,
}, std.hash_map.StringIndexContext{
@ -8498,58 +8572,10 @@ pub fn getOrPutTrailingString(
}
}
/// Uses the last len bytes of ip.string_bytes as the key.
pub fn getTrailingAggregate(
ip: *InternPool,
gpa: Allocator,
ty: Index,
len: usize,
) Allocator.Error!Index {
try ip.items.ensureUnusedCapacity(gpa, 1);
try ip.extra.ensureUnusedCapacity(gpa, @typeInfo(Bytes).Struct.fields.len);
const str: String = @enumFromInt(ip.string_bytes.items.len - len);
const adapter: KeyAdapter = .{ .intern_pool = ip };
const gop = try ip.map.getOrPutAdapted(gpa, Key{ .aggregate = .{
.ty = ty,
.storage = .{ .bytes = ip.string_bytes.items[@intFromEnum(str)..] },
} }, adapter);
if (gop.found_existing) return @enumFromInt(gop.index);
ip.items.appendAssumeCapacity(.{
.tag = .bytes,
.data = ip.addExtraAssumeCapacity(Bytes{
.ty = ty,
.bytes = str,
}),
});
return @enumFromInt(ip.items.len - 1);
}
pub fn getString(ip: *InternPool, s: []const u8) OptionalNullTerminatedString {
if (ip.string_table.getKeyAdapted(s, std.hash_map.StringIndexAdapter{
return if (ip.string_table.getKeyAdapted(s, std.hash_map.StringIndexAdapter{
.bytes = &ip.string_bytes,
})) |index| {
return @as(NullTerminatedString, @enumFromInt(index)).toOptional();
} else {
return .none;
}
}
pub fn stringToSlice(ip: *const InternPool, s: NullTerminatedString) [:0]const u8 {
const string_bytes = ip.string_bytes.items;
const start = @intFromEnum(s);
var end: usize = start;
while (string_bytes[end] != 0) end += 1;
return string_bytes[start..end :0];
}
pub fn stringToSliceUnwrap(ip: *const InternPool, s: OptionalNullTerminatedString) ?[:0]const u8 {
return ip.stringToSlice(s.unwrap() orelse return null);
}
pub fn stringEqlSlice(ip: *const InternPool, a: NullTerminatedString, b: []const u8) bool {
return std.mem.eql(u8, stringToSlice(ip, a), b);
})) |index| @enumFromInt(index) else .none;
}
pub fn typeOf(ip: *const InternPool, index: Index) Index {
@ -8767,7 +8793,7 @@ pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 {
return switch (ip.indexToKey(ty)) {
.struct_type => ip.loadStructType(ty).field_types.len,
.anon_struct_type => |anon_struct_type| anon_struct_type.types.len,
.array_type => |array_type| array_type.len + @intFromBool(array_type.sentinel != .none),
.array_type => |array_type| array_type.lenIncludingSentinel(),
.vector_type => |vector_type| vector_type.len,
else => unreachable,
};

View File

@ -763,11 +763,11 @@ pub const Namespace = struct {
) !InternPool.NullTerminatedString {
const ip = &zcu.intern_pool;
const count = count: {
var count: usize = ip.stringToSlice(name).len + 1;
var count: usize = name.length(ip) + 1;
var cur_ns = &ns;
while (true) {
const decl = zcu.declPtr(cur_ns.decl_index);
count += ip.stringToSlice(decl.name).len + 1;
count += decl.name.length(ip) + 1;
cur_ns = zcu.namespacePtr(cur_ns.parent.unwrap() orelse {
count += ns.file_scope.sub_file_path.len;
break :count count;
@ -793,7 +793,7 @@ pub const Namespace = struct {
};
}
return ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start);
return ip.getOrPutTrailingString(gpa, ip.string_bytes.items.len - start, .no_embedded_nulls);
}
pub fn getType(ns: Namespace, zcu: *Zcu) Type {
@ -980,17 +980,13 @@ pub const File = struct {
const ip = &mod.intern_pool;
const start = ip.string_bytes.items.len;
try file.renderFullyQualifiedName(ip.string_bytes.writer(mod.gpa));
return ip.getOrPutTrailingString(mod.gpa, ip.string_bytes.items.len - start);
return ip.getOrPutTrailingString(mod.gpa, ip.string_bytes.items.len - start, .no_embedded_nulls);
}
pub fn fullPath(file: File, ally: Allocator) ![]u8 {
return file.mod.root.joinString(ally, file.sub_file_path);
}
pub fn fullPathZ(file: File, ally: Allocator) ![:0]u8 {
return file.mod.root.joinStringZ(ally, file.sub_file_path);
}
pub fn dumpSrc(file: *File, src: LazySrcLoc) void {
const loc = std.zig.findLineColumn(file.source.bytes, src);
std.debug.print("{s}:{d}:{d}\n", .{ file.sub_file_path, loc.line + 1, loc.column + 1 });
@ -2534,6 +2530,7 @@ fn updateZirRefs(zcu: *Module, file: *File, old_zir: Zir) !void {
const name_ip = try zcu.intern_pool.getOrPutString(
zcu.gpa,
old_zir.nullTerminatedString(name_zir),
.no_embedded_nulls,
);
try old_names.put(zcu.gpa, name_ip, {});
}
@ -2551,6 +2548,7 @@ fn updateZirRefs(zcu: *Module, file: *File, old_zir: Zir) !void {
const name_ip = try zcu.intern_pool.getOrPutString(
zcu.gpa,
old_zir.nullTerminatedString(name_zir),
.no_embedded_nulls,
);
if (!old_names.swapRemove(name_ip)) continue;
// Name added
@ -3555,37 +3553,46 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
const gpa = mod.gpa;
const zir = decl.getFileScope(mod).zir;
const builtin_type_target_index: InternPool.Index = blk: {
const builtin_type_target_index: InternPool.Index = ip_index: {
const std_mod = mod.std_mod;
if (decl.getFileScope(mod).mod != std_mod) break :blk .none;
if (decl.getFileScope(mod).mod != std_mod) break :ip_index .none;
// We're in the std module.
const std_file = (try mod.importPkg(std_mod)).file;
const std_decl = mod.declPtr(std_file.root_decl.unwrap().?);
const std_namespace = std_decl.getInnerNamespace(mod).?;
const builtin_str = try ip.getOrPutString(gpa, "builtin");
const builtin_decl = mod.declPtr(std_namespace.decls.getKeyAdapted(builtin_str, DeclAdapter{ .zcu = mod }) orelse break :blk .none);
const builtin_namespace = builtin_decl.getInnerNamespaceIndex(mod).unwrap() orelse break :blk .none;
if (decl.src_namespace != builtin_namespace) break :blk .none;
const builtin_str = try ip.getOrPutString(gpa, "builtin", .no_embedded_nulls);
const builtin_decl = mod.declPtr(std_namespace.decls.getKeyAdapted(builtin_str, DeclAdapter{ .zcu = mod }) orelse break :ip_index .none);
const builtin_namespace = builtin_decl.getInnerNamespaceIndex(mod).unwrap() orelse break :ip_index .none;
if (decl.src_namespace != builtin_namespace) break :ip_index .none;
// We're in builtin.zig. This could be a builtin we need to add to a specific InternPool index.
for ([_]struct { []const u8, InternPool.Index }{
.{ "AtomicOrder", .atomic_order_type },
.{ "AtomicRmwOp", .atomic_rmw_op_type },
.{ "CallingConvention", .calling_convention_type },
.{ "AddressSpace", .address_space_type },
.{ "FloatMode", .float_mode_type },
.{ "ReduceOp", .reduce_op_type },
.{ "CallModifier", .call_modifier_type },
.{ "PrefetchOptions", .prefetch_options_type },
.{ "ExportOptions", .export_options_type },
.{ "ExternOptions", .extern_options_type },
.{ "Type", .type_info_type },
}) |pair| {
const decl_name = ip.stringToSlice(decl.name);
if (std.mem.eql(u8, decl_name, pair[0])) {
break :blk pair[1];
}
for ([_][]const u8{
"AtomicOrder",
"AtomicRmwOp",
"CallingConvention",
"AddressSpace",
"FloatMode",
"ReduceOp",
"CallModifier",
"PrefetchOptions",
"ExportOptions",
"ExternOptions",
"Type",
}, [_]InternPool.Index{
.atomic_order_type,
.atomic_rmw_op_type,
.calling_convention_type,
.address_space_type,
.float_mode_type,
.reduce_op_type,
.call_modifier_type,
.prefetch_options_type,
.export_options_type,
.extern_options_type,
.type_info_type,
}) |type_name, type_ip| {
if (decl.name.eqlSlice(type_name, ip)) break :ip_index type_ip;
}
break :blk .none;
break :ip_index .none;
};
mod.intern_pool.removeDependenciesForDepender(gpa, InternPool.Depender.wrap(.{ .decl = decl_index }));
@ -3725,8 +3732,7 @@ fn semaDecl(mod: *Module, decl_index: Decl.Index) !SemaDeclResult {
} else if (bytes.len == 0) {
return sema.fail(&block_scope, section_src, "linksection cannot be empty", .{});
}
const section = try ip.getOrPutString(gpa, bytes);
break :blk section.toOptional();
break :blk try ip.getOrPutStringOpt(gpa, bytes, .no_embedded_nulls);
};
decl.@"addrspace" = blk: {
const addrspace_ctx: Sema.AddressSpaceContext = switch (ip.indexToKey(decl_val.toIntern())) {
@ -4101,7 +4107,10 @@ fn newEmbedFile(
.sentinel = .zero_u8,
.child = .u8_type,
} });
const array_val = try ip.getTrailingAggregate(gpa, array_ty, bytes.len);
const array_val = try ip.get(gpa, .{ .aggregate = .{
.ty = array_ty,
.storage = .{ .bytes = try ip.getOrPutTrailingString(gpa, bytes.len, .maybe_embedded_nulls) },
} });
const ptr_ty = (try mod.ptrType(.{
.child = array_ty,
@ -4111,7 +4120,6 @@ fn newEmbedFile(
.address_space = .generic,
},
})).toIntern();
const ptr_val = try ip.get(gpa, .{ .ptr = .{
.ty = ptr_ty,
.addr = .{ .anon_decl = .{
@ -4122,7 +4130,7 @@ fn newEmbedFile(
result.* = new_file;
new_file.* = .{
.sub_file_path = try ip.getOrPutString(gpa, sub_file_path),
.sub_file_path = try ip.getOrPutString(gpa, sub_file_path, .no_embedded_nulls),
.owner = pkg,
.stat = stat,
.val = ptr_val,
@ -4214,11 +4222,11 @@ const ScanDeclIter = struct {
const zcu = iter.zcu;
const gpa = zcu.gpa;
const ip = &zcu.intern_pool;
var name = try ip.getOrPutStringFmt(gpa, fmt, args);
var name = try ip.getOrPutStringFmt(gpa, fmt, args, .no_embedded_nulls);
var gop = try iter.seen_decls.getOrPut(gpa, name);
var next_suffix: u32 = 0;
while (gop.found_existing) {
name = try ip.getOrPutStringFmt(gpa, fmt ++ "_{d}", args ++ .{next_suffix});
name = try ip.getOrPutStringFmt(gpa, "{}_{d}", .{ name.fmt(ip), next_suffix }, .no_embedded_nulls);
gop = try iter.seen_decls.getOrPut(gpa, name);
next_suffix += 1;
}
@ -4300,7 +4308,11 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void
};
} else info: {
if (iter.pass != .named) return;
const name = try ip.getOrPutString(gpa, zir.nullTerminatedString(declaration.name.toString(zir).?));
const name = try ip.getOrPutString(
gpa,
zir.nullTerminatedString(declaration.name.toString(zir).?),
.no_embedded_nulls,
);
try iter.seen_decls.putNoClobber(gpa, name, {});
break :info .{
name,
@ -4362,9 +4374,10 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void
if (!comp.config.is_test) break :a false;
if (decl_mod != zcu.main_mod) break :a false;
if (is_named_test and comp.test_filters.len > 0) {
const decl_fqn = ip.stringToSlice(try namespace.fullyQualifiedName(zcu, decl_name));
const decl_fqn = try namespace.fullyQualifiedName(zcu, decl_name);
const decl_fqn_slice = decl_fqn.toSlice(ip);
for (comp.test_filters) |test_filter| {
if (mem.indexOf(u8, decl_fqn, test_filter)) |_| break;
if (mem.indexOf(u8, decl_fqn_slice, test_filter)) |_| break;
} else break :a false;
}
zcu.test_functions.putAssumeCapacity(decl_index, {}); // may clobber on incremental update
@ -4377,8 +4390,8 @@ fn scanDecl(iter: *ScanDeclIter, decl_inst: Zir.Inst.Index) Allocator.Error!void
// `is_export` is unchanged. In this case, the incremental update mechanism will handle
// re-analysis for us if necessary.
if (prev_exported != declaration.flags.is_export or decl.analysis == .unreferenced) {
log.debug("scanDecl queue analyze_decl file='{s}' decl_name='{s}' decl_index={d}", .{
namespace.file_scope.sub_file_path, ip.stringToSlice(decl_name), decl_index,
log.debug("scanDecl queue analyze_decl file='{s}' decl_name='{}' decl_index={d}", .{
namespace.file_scope.sub_file_path, decl_name.fmt(ip), decl_index,
});
comp.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = decl_index });
}
@ -5300,7 +5313,7 @@ pub fn populateTestFunctions(
const builtin_file = (mod.importPkg(builtin_mod) catch unreachable).file;
const root_decl = mod.declPtr(builtin_file.root_decl.unwrap().?);
const builtin_namespace = mod.namespacePtr(root_decl.src_namespace);
const test_functions_str = try ip.getOrPutString(gpa, "test_functions");
const test_functions_str = try ip.getOrPutString(gpa, "test_functions", .no_embedded_nulls);
const decl_index = builtin_namespace.decls.getKeyAdapted(
test_functions_str,
DeclAdapter{ .zcu = mod },
@ -5327,16 +5340,16 @@ pub fn populateTestFunctions(
for (test_fn_vals, mod.test_functions.keys()) |*test_fn_val, test_decl_index| {
const test_decl = mod.declPtr(test_decl_index);
const test_decl_name = try gpa.dupe(u8, ip.stringToSlice(try test_decl.fullyQualifiedName(mod)));
defer gpa.free(test_decl_name);
const test_decl_name = try test_decl.fullyQualifiedName(mod);
const test_decl_name_len = test_decl_name.length(ip);
const test_name_anon_decl: InternPool.Key.Ptr.Addr.AnonDecl = n: {
const test_name_ty = try mod.arrayType(.{
.len = test_decl_name.len,
.len = test_decl_name_len,
.child = .u8_type,
});
const test_name_val = try mod.intern(.{ .aggregate = .{
.ty = test_name_ty.toIntern(),
.storage = .{ .bytes = test_decl_name },
.storage = .{ .bytes = test_decl_name.toString() },
} });
break :n .{
.orig_ty = (try mod.singleConstPtrType(test_name_ty)).toIntern(),
@ -5354,7 +5367,7 @@ pub fn populateTestFunctions(
} }),
.len = try mod.intern(.{ .int = .{
.ty = .usize_type,
.storage = .{ .u64 = test_decl_name.len },
.storage = .{ .u64 = test_decl_name_len },
} }),
} }),
// func

File diff suppressed because it is too large Load Diff

View File

@ -52,30 +52,31 @@ pub fn toIpString(val: Value, ty: Type, mod: *Module) !InternPool.NullTerminated
assert(ty.zigTypeTag(mod) == .Array);
assert(ty.childType(mod).toIntern() == .u8_type);
const ip = &mod.intern_pool;
return switch (mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| try ip.getOrPutString(mod.gpa, bytes),
.elems => try arrayToIpString(val, ty.arrayLen(mod), mod),
switch (mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| return bytes.toNullTerminatedString(ty.arrayLen(mod), ip),
.elems => return arrayToIpString(val, ty.arrayLen(mod), mod),
.repeated_elem => |elem| {
const byte = @as(u8, @intCast(Value.fromInterned(elem).toUnsignedInt(mod)));
const len = @as(usize, @intCast(ty.arrayLen(mod)));
const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(mod));
const len: usize = @intCast(ty.arrayLen(mod));
try ip.string_bytes.appendNTimes(mod.gpa, byte, len);
return ip.getOrPutTrailingString(mod.gpa, len);
return ip.getOrPutTrailingString(mod.gpa, len, .no_embedded_nulls);
},
};
}
}
/// Asserts that the value is representable as an array of bytes.
/// Copies the value into a freshly allocated slice of memory, which is owned by the caller.
pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module) ![]u8 {
return switch (mod.intern_pool.indexToKey(val.toIntern())) {
.enum_literal => |enum_literal| allocator.dupe(u8, mod.intern_pool.stringToSlice(enum_literal)),
const ip = &mod.intern_pool;
return switch (ip.indexToKey(val.toIntern())) {
.enum_literal => |enum_literal| allocator.dupe(u8, enum_literal.toSlice(ip)),
.slice => |slice| try arrayToAllocatedBytes(val, Value.fromInterned(slice.len).toUnsignedInt(mod), allocator, mod),
.aggregate => |aggregate| switch (aggregate.storage) {
.bytes => |bytes| try allocator.dupe(u8, bytes),
.bytes => |bytes| try allocator.dupe(u8, bytes.toSlice(ty.arrayLenIncludingSentinel(mod), ip)),
.elems => try arrayToAllocatedBytes(val, ty.arrayLen(mod), allocator, mod),
.repeated_elem => |elem| {
const byte = @as(u8, @intCast(Value.fromInterned(elem).toUnsignedInt(mod)));
const result = try allocator.alloc(u8, @as(usize, @intCast(ty.arrayLen(mod))));
const byte: u8 = @intCast(Value.fromInterned(elem).toUnsignedInt(mod));
const result = try allocator.alloc(u8, @intCast(ty.arrayLen(mod)));
@memset(result, byte);
return result;
},
@ -85,10 +86,10 @@ pub fn toAllocatedBytes(val: Value, ty: Type, allocator: Allocator, mod: *Module
}
fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Module) ![]u8 {
const result = try allocator.alloc(u8, @as(usize, @intCast(len)));
const result = try allocator.alloc(u8, @intCast(len));
for (result, 0..) |*elem, i| {
const elem_val = try val.elemValue(mod, i);
elem.* = @as(u8, @intCast(elem_val.toUnsignedInt(mod)));
elem.* = @intCast(elem_val.toUnsignedInt(mod));
}
return result;
}
@ -96,7 +97,7 @@ fn arrayToAllocatedBytes(val: Value, len: u64, allocator: Allocator, mod: *Modul
fn arrayToIpString(val: Value, len_u64: u64, mod: *Module) !InternPool.NullTerminatedString {
const gpa = mod.gpa;
const ip = &mod.intern_pool;
const len = @as(usize, @intCast(len_u64));
const len: usize = @intCast(len_u64);
try ip.string_bytes.ensureUnusedCapacity(gpa, len);
for (0..len) |i| {
// I don't think elemValue has the possibility to affect ip.string_bytes. Let's
@ -104,10 +105,10 @@ fn arrayToIpString(val: Value, len_u64: u64, mod: *Module) !InternPool.NullTermi
const prev = ip.string_bytes.items.len;
const elem_val = try val.elemValue(mod, i);
assert(ip.string_bytes.items.len == prev);
const byte = @as(u8, @intCast(elem_val.toUnsignedInt(mod)));
const byte: u8 = @intCast(elem_val.toUnsignedInt(mod));
ip.string_bytes.appendAssumeCapacity(byte);
}
return ip.getOrPutTrailingString(gpa, len);
return ip.getOrPutTrailingString(gpa, len, .no_embedded_nulls);
}
pub fn fromInterned(i: InternPool.Index) Value {
@ -256,7 +257,7 @@ pub fn getUnsignedIntAdvanced(val: Value, mod: *Module, opt_sema: ?*Sema) !?u64
const base_addr = (try Value.fromInterned(field.base).getUnsignedIntAdvanced(mod, opt_sema)) orelse return null;
const struct_ty = Value.fromInterned(field.base).typeOf(mod).childType(mod);
if (opt_sema) |sema| try sema.resolveTypeLayout(struct_ty);
return base_addr + struct_ty.structFieldOffset(@as(usize, @intCast(field.index)), mod);
return base_addr + struct_ty.structFieldOffset(@intCast(field.index), mod);
},
else => null,
},
@ -351,17 +352,17 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
bigint.writeTwosComplement(buffer[0..byte_count], endian);
},
.Float => switch (ty.floatBits(target)) {
16 => std.mem.writeInt(u16, buffer[0..2], @as(u16, @bitCast(val.toFloat(f16, mod))), endian),
32 => std.mem.writeInt(u32, buffer[0..4], @as(u32, @bitCast(val.toFloat(f32, mod))), endian),
64 => std.mem.writeInt(u64, buffer[0..8], @as(u64, @bitCast(val.toFloat(f64, mod))), endian),
80 => std.mem.writeInt(u80, buffer[0..10], @as(u80, @bitCast(val.toFloat(f80, mod))), endian),
128 => std.mem.writeInt(u128, buffer[0..16], @as(u128, @bitCast(val.toFloat(f128, mod))), endian),
16 => std.mem.writeInt(u16, buffer[0..2], @bitCast(val.toFloat(f16, mod)), endian),
32 => std.mem.writeInt(u32, buffer[0..4], @bitCast(val.toFloat(f32, mod)), endian),
64 => std.mem.writeInt(u64, buffer[0..8], @bitCast(val.toFloat(f64, mod)), endian),
80 => std.mem.writeInt(u80, buffer[0..10], @bitCast(val.toFloat(f80, mod)), endian),
128 => std.mem.writeInt(u128, buffer[0..16], @bitCast(val.toFloat(f128, mod)), endian),
else => unreachable,
},
.Array => {
const len = ty.arrayLen(mod);
const elem_ty = ty.childType(mod);
const elem_size = @as(usize, @intCast(elem_ty.abiSize(mod)));
const elem_size: usize = @intCast(elem_ty.abiSize(mod));
var elem_i: usize = 0;
var buf_off: usize = 0;
while (elem_i < len) : (elem_i += 1) {
@ -380,17 +381,17 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
const struct_type = mod.typeToStruct(ty) orelse return error.IllDefinedMemoryLayout;
switch (struct_type.layout) {
.auto => return error.IllDefinedMemoryLayout,
.@"extern" => for (0..struct_type.field_types.len) |i| {
const off: usize = @intCast(ty.structFieldOffset(i, mod));
.@"extern" => for (0..struct_type.field_types.len) |field_index| {
const off: usize = @intCast(ty.structFieldOffset(field_index, mod));
const field_val = Value.fromInterned(switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| {
buffer[off] = bytes[i];
buffer[off] = bytes.at(field_index, ip);
continue;
},
.elems => |elems| elems[i],
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
});
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[i]);
const field_ty = Type.fromInterned(struct_type.field_types.get(ip)[field_index]);
try writeToMemory(field_val, field_ty, mod, buffer[off..]);
},
.@"packed" => {
@ -423,7 +424,7 @@ pub fn writeToMemory(val: Value, ty: Type, mod: *Module, buffer: []u8) error{
const field_index = mod.unionTagFieldIndex(union_obj, union_tag).?;
const field_type = Type.fromInterned(union_obj.field_types.get(&mod.intern_pool)[field_index]);
const field_val = try val.fieldValue(mod, field_index);
const byte_count = @as(usize, @intCast(field_type.abiSize(mod)));
const byte_count: usize = @intCast(field_type.abiSize(mod));
return writeToMemory(field_val, field_type, mod, buffer[0..byte_count]);
} else {
const backing_ty = try ty.unionBackingType(mod);
@ -471,7 +472,7 @@ pub fn writeToPackedMemory(
const target = mod.getTarget();
const endian = target.cpu.arch.endian();
if (val.isUndef(mod)) {
const bit_size = @as(usize, @intCast(ty.bitSize(mod)));
const bit_size: usize = @intCast(ty.bitSize(mod));
std.mem.writeVarPackedInt(buffer, bit_offset, bit_size, @as(u1, 0), endian);
return;
}
@ -507,17 +508,17 @@ pub fn writeToPackedMemory(
}
},
.Float => switch (ty.floatBits(target)) {
16 => std.mem.writePackedInt(u16, buffer, bit_offset, @as(u16, @bitCast(val.toFloat(f16, mod))), endian),
32 => std.mem.writePackedInt(u32, buffer, bit_offset, @as(u32, @bitCast(val.toFloat(f32, mod))), endian),
64 => std.mem.writePackedInt(u64, buffer, bit_offset, @as(u64, @bitCast(val.toFloat(f64, mod))), endian),
80 => std.mem.writePackedInt(u80, buffer, bit_offset, @as(u80, @bitCast(val.toFloat(f80, mod))), endian),
128 => std.mem.writePackedInt(u128, buffer, bit_offset, @as(u128, @bitCast(val.toFloat(f128, mod))), endian),
16 => std.mem.writePackedInt(u16, buffer, bit_offset, @bitCast(val.toFloat(f16, mod)), endian),
32 => std.mem.writePackedInt(u32, buffer, bit_offset, @bitCast(val.toFloat(f32, mod)), endian),
64 => std.mem.writePackedInt(u64, buffer, bit_offset, @bitCast(val.toFloat(f64, mod)), endian),
80 => std.mem.writePackedInt(u80, buffer, bit_offset, @bitCast(val.toFloat(f80, mod)), endian),
128 => std.mem.writePackedInt(u128, buffer, bit_offset, @bitCast(val.toFloat(f128, mod)), endian),
else => unreachable,
},
.Vector => {
const elem_ty = ty.childType(mod);
const elem_bit_size = @as(u16, @intCast(elem_ty.bitSize(mod)));
const len = @as(usize, @intCast(ty.arrayLen(mod)));
const elem_bit_size: u16 = @intCast(elem_ty.bitSize(mod));
const len: usize = @intCast(ty.arrayLen(mod));
var bits: u16 = 0;
var elem_i: usize = 0;
@ -644,22 +645,22 @@ pub fn readFromMemory(
.Float => return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = ty.toIntern(),
.storage = switch (ty.floatBits(target)) {
16 => .{ .f16 = @as(f16, @bitCast(std.mem.readInt(u16, buffer[0..2], endian))) },
32 => .{ .f32 = @as(f32, @bitCast(std.mem.readInt(u32, buffer[0..4], endian))) },
64 => .{ .f64 = @as(f64, @bitCast(std.mem.readInt(u64, buffer[0..8], endian))) },
80 => .{ .f80 = @as(f80, @bitCast(std.mem.readInt(u80, buffer[0..10], endian))) },
128 => .{ .f128 = @as(f128, @bitCast(std.mem.readInt(u128, buffer[0..16], endian))) },
16 => .{ .f16 = @bitCast(std.mem.readInt(u16, buffer[0..2], endian)) },
32 => .{ .f32 = @bitCast(std.mem.readInt(u32, buffer[0..4], endian)) },
64 => .{ .f64 = @bitCast(std.mem.readInt(u64, buffer[0..8], endian)) },
80 => .{ .f80 = @bitCast(std.mem.readInt(u80, buffer[0..10], endian)) },
128 => .{ .f128 = @bitCast(std.mem.readInt(u128, buffer[0..16], endian)) },
else => unreachable,
},
} }))),
.Array => {
const elem_ty = ty.childType(mod);
const elem_size = elem_ty.abiSize(mod);
const elems = try arena.alloc(InternPool.Index, @as(usize, @intCast(ty.arrayLen(mod))));
const elems = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(mod)));
var offset: usize = 0;
for (elems) |*elem| {
elem.* = (try readFromMemory(elem_ty, mod, buffer[offset..], arena)).toIntern();
offset += @as(usize, @intCast(elem_size));
offset += @intCast(elem_size);
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
@ -795,7 +796,7 @@ pub fn readFromPackedMemory(
};
// Slow path, we have to construct a big-int
const abi_size = @as(usize, @intCast(ty.abiSize(mod)));
const abi_size: usize = @intCast(ty.abiSize(mod));
const Limb = std.math.big.Limb;
const limb_count = (abi_size + @sizeOf(Limb) - 1) / @sizeOf(Limb);
const limbs_buffer = try arena.alloc(Limb, limb_count);
@ -812,20 +813,20 @@ pub fn readFromPackedMemory(
.Float => return Value.fromInterned((try mod.intern(.{ .float = .{
.ty = ty.toIntern(),
.storage = switch (ty.floatBits(target)) {
16 => .{ .f16 = @as(f16, @bitCast(std.mem.readPackedInt(u16, buffer, bit_offset, endian))) },
32 => .{ .f32 = @as(f32, @bitCast(std.mem.readPackedInt(u32, buffer, bit_offset, endian))) },
64 => .{ .f64 = @as(f64, @bitCast(std.mem.readPackedInt(u64, buffer, bit_offset, endian))) },
80 => .{ .f80 = @as(f80, @bitCast(std.mem.readPackedInt(u80, buffer, bit_offset, endian))) },
128 => .{ .f128 = @as(f128, @bitCast(std.mem.readPackedInt(u128, buffer, bit_offset, endian))) },
16 => .{ .f16 = @bitCast(std.mem.readPackedInt(u16, buffer, bit_offset, endian)) },
32 => .{ .f32 = @bitCast(std.mem.readPackedInt(u32, buffer, bit_offset, endian)) },
64 => .{ .f64 = @bitCast(std.mem.readPackedInt(u64, buffer, bit_offset, endian)) },
80 => .{ .f80 = @bitCast(std.mem.readPackedInt(u80, buffer, bit_offset, endian)) },
128 => .{ .f128 = @bitCast(std.mem.readPackedInt(u128, buffer, bit_offset, endian)) },
else => unreachable,
},
} }))),
.Vector => {
const elem_ty = ty.childType(mod);
const elems = try arena.alloc(InternPool.Index, @as(usize, @intCast(ty.arrayLen(mod))));
const elems = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(mod)));
var bits: u16 = 0;
const elem_bit_size = @as(u16, @intCast(elem_ty.bitSize(mod)));
const elem_bit_size: u16 = @intCast(elem_ty.bitSize(mod));
for (elems, 0..) |_, i| {
// On big-endian systems, LLVM reverses the element order of vectors by default
const tgt_elem_i = if (endian == .big) elems.len - i - 1 else i;
@ -909,7 +910,7 @@ fn bigIntToFloat(limbs: []const std.math.big.Limb, positive: bool) f128 {
var i: usize = limbs.len;
while (i != 0) {
i -= 1;
const limb: f128 = @as(f128, @floatFromInt(limbs[i]));
const limb: f128 = @floatFromInt(limbs[i]);
result = @mulAdd(f128, base, result, limb);
}
if (positive) {
@ -934,7 +935,7 @@ pub fn ctz(val: Value, ty: Type, mod: *Module) u64 {
pub fn popCount(val: Value, ty: Type, mod: *Module) u64 {
var bigint_buf: BigIntSpace = undefined;
const bigint = val.toBigInt(&bigint_buf, mod);
return @as(u64, @intCast(bigint.popCount(ty.intInfo(mod).bits)));
return @intCast(bigint.popCount(ty.intInfo(mod).bits));
}
pub fn bitReverse(val: Value, ty: Type, mod: *Module, arena: Allocator) !Value {
@ -1191,7 +1192,7 @@ pub fn compareAllWithZeroAdvancedExtra(
inline else => |x| if (std.math.isNan(x)) return op == .neq,
},
.aggregate => |aggregate| return switch (aggregate.storage) {
.bytes => |bytes| for (bytes) |byte| {
.bytes => |bytes| for (bytes.toSlice(lhs.typeOf(mod).arrayLenIncludingSentinel(mod), &mod.intern_pool)) |byte| {
if (!std.math.order(byte, 0).compare(op)) break false;
} else true,
.elems => |elems| for (elems) |elem| {
@ -1279,7 +1280,7 @@ pub fn elemValue(val: Value, zcu: *Zcu, index: usize) Allocator.Error!Value {
if (index < len) return Value.fromInterned(switch (aggregate.storage) {
.bytes => |bytes| try zcu.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes[index] },
.storage = .{ .u64 = bytes.at(index, ip) },
} }),
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
@ -1318,28 +1319,37 @@ pub fn sliceArray(
start: usize,
end: usize,
) error{OutOfMemory}!Value {
// TODO: write something like getCoercedInts to avoid needing to dupe
const mod = sema.mod;
const aggregate = mod.intern_pool.indexToKey(val.toIntern()).aggregate;
return Value.fromInterned(try mod.intern(.{ .aggregate = .{
.ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) {
.array_type => |array_type| try mod.arrayType(.{
.len = @as(u32, @intCast(end - start)),
.child = array_type.child,
.sentinel = if (end == array_type.len) array_type.sentinel else .none,
}),
.vector_type => |vector_type| try mod.vectorType(.{
.len = @as(u32, @intCast(end - start)),
.child = vector_type.child,
}),
else => unreachable,
}.toIntern(),
.storage = switch (aggregate.storage) {
.bytes => .{ .bytes = try sema.arena.dupe(u8, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.bytes[start..end]) },
.elems => .{ .elems = try sema.arena.dupe(InternPool.Index, mod.intern_pool.indexToKey(val.toIntern()).aggregate.storage.elems[start..end]) },
.repeated_elem => |elem| .{ .repeated_elem = elem },
const ip = &mod.intern_pool;
return Value.fromInterned(try mod.intern(.{
.aggregate = .{
.ty = switch (mod.intern_pool.indexToKey(mod.intern_pool.typeOf(val.toIntern()))) {
.array_type => |array_type| try mod.arrayType(.{
.len = @intCast(end - start),
.child = array_type.child,
.sentinel = if (end == array_type.len) array_type.sentinel else .none,
}),
.vector_type => |vector_type| try mod.vectorType(.{
.len = @intCast(end - start),
.child = vector_type.child,
}),
else => unreachable,
}.toIntern(),
.storage = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| storage: {
try ip.string_bytes.ensureUnusedCapacity(sema.gpa, end - start + 1);
break :storage .{ .bytes = try ip.getOrPutString(
sema.gpa,
bytes.toSlice(end, ip)[start..],
.maybe_embedded_nulls,
) };
},
// TODO: write something like getCoercedInts to avoid needing to dupe
.elems => |elems| .{ .elems = try sema.arena.dupe(InternPool.Index, elems[start..end]) },
.repeated_elem => |elem| .{ .repeated_elem = elem },
},
},
} }));
}));
}
pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value {
@ -1350,7 +1360,7 @@ pub fn fieldValue(val: Value, mod: *Module, index: usize) !Value {
.aggregate => |aggregate| Value.fromInterned(switch (aggregate.storage) {
.bytes => |bytes| try mod.intern(.{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = bytes[index] },
.storage = .{ .u64 = bytes.at(index, &mod.intern_pool) },
} }),
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
@ -1461,7 +1471,7 @@ pub fn getErrorName(val: Value, mod: *const Module) InternPool.OptionalNullTermi
pub fn getErrorInt(val: Value, mod: *const Module) Module.ErrorInt {
return if (getErrorName(val, mod).unwrap()) |err_name|
@as(Module.ErrorInt, @intCast(mod.global_error_set.getIndex(err_name).?))
@intCast(mod.global_error_set.getIndex(err_name).?)
else
0;
}
@ -2413,14 +2423,14 @@ pub fn intTruncBitsAsValue(
for (result_data, 0..) |*scalar, i| {
const elem_val = try val.elemValue(mod, i);
const bits_elem = try bits.elemValue(mod, i);
scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @as(u16, @intCast(bits_elem.toUnsignedInt(mod))), mod)).toIntern();
scalar.* = (try intTruncScalar(elem_val, scalar_ty, allocator, signedness, @intCast(bits_elem.toUnsignedInt(mod)), mod)).toIntern();
}
return Value.fromInterned((try mod.intern(.{ .aggregate = .{
.ty = ty.toIntern(),
.storage = .{ .elems = result_data },
} })));
}
return intTruncScalar(val, ty, allocator, signedness, @as(u16, @intCast(bits.toUnsignedInt(mod))), mod);
return intTruncScalar(val, ty, allocator, signedness, @intCast(bits.toUnsignedInt(mod)), mod);
}
pub fn intTruncScalar(
@ -2468,7 +2478,7 @@ pub fn shlScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *M
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod)));
const shift: usize = @intCast(rhs.toUnsignedInt(mod));
const limbs = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
@ -2530,7 +2540,7 @@ pub fn shlWithOverflowScalar(
const info = ty.intInfo(mod);
var lhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod)));
const shift: usize = @intCast(rhs.toUnsignedInt(mod));
const limbs = try allocator.alloc(
std.math.big.Limb,
lhs_bigint.limbs.len + (shift / (@sizeOf(std.math.big.Limb) * 8)) + 1,
@ -2587,7 +2597,7 @@ pub fn shlSatScalar(
var lhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod)));
const shift: usize = @intCast(rhs.toUnsignedInt(mod));
const limbs = try arena.alloc(
std.math.big.Limb,
std.math.big.int.calcTwosCompLimbCount(info.bits) + 1,
@ -2659,7 +2669,7 @@ pub fn shrScalar(lhs: Value, rhs: Value, ty: Type, allocator: Allocator, mod: *M
// resorting to BigInt first.
var lhs_space: Value.BigIntSpace = undefined;
const lhs_bigint = lhs.toBigInt(&lhs_space, mod);
const shift = @as(usize, @intCast(rhs.toUnsignedInt(mod)));
const shift: usize = @intCast(rhs.toUnsignedInt(mod));
const result_limbs = lhs_bigint.limbs.len -| (shift / (@sizeOf(std.math.big.Limb) * 8));
if (result_limbs == 0) {

View File

@ -4345,8 +4345,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallModifier
.data = .{ .reg = .x30 },
});
} else if (func_value.getExternFunc(mod)) |extern_func| {
const decl_name = mod.intern_pool.stringToSlice(mod.declPtr(extern_func.decl).name);
const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name);
const decl_name = mod.declPtr(extern_func.decl).name.toSlice(&mod.intern_pool);
const lib_name = extern_func.lib_name.toSlice(&mod.intern_pool);
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
_ = macho_file;
@panic("TODO airCall");

View File

@ -2199,9 +2199,9 @@ fn airCall(func: *CodeGen, inst: Air.Inst.Index, modifier: std.builtin.CallModif
const atom = func.bin_file.getAtomPtr(atom_index);
const type_index = try func.bin_file.storeDeclType(extern_func.decl, func_type);
try func.bin_file.addOrUpdateImport(
mod.intern_pool.stringToSlice(ext_decl.name),
ext_decl.name.toSlice(&mod.intern_pool),
atom.sym_index,
mod.intern_pool.stringToSliceUnwrap(ext_decl.getOwnedExternFunc(mod).?.lib_name),
ext_decl.getOwnedExternFunc(mod).?.lib_name.toSlice(&mod.intern_pool),
type_index,
);
break :blk extern_func.decl;
@ -7236,8 +7236,8 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
const fqn = ip.stringToSlice(try mod.declPtr(enum_decl_index).fullyQualifiedName(mod));
const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{s}", .{fqn});
const fqn = try mod.declPtr(enum_decl_index).fullyQualifiedName(mod);
const func_name = try std.fmt.allocPrintZ(arena, "__zig_tag_name_{}", .{fqn.fmt(ip)});
// check if we already generated code for this.
if (func.bin_file.findGlobalSymbol(func_name)) |loc| {
@ -7268,17 +7268,18 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
// generate an if-else chain for each tag value as well as constant.
const tag_names = enum_ty.enumFields(mod);
for (0..tag_names.len) |tag_index| {
const tag_name = ip.stringToSlice(tag_names.get(ip)[tag_index]);
const tag_name = tag_names.get(ip)[tag_index];
const tag_name_len = tag_name.length(ip);
// for each tag name, create an unnamed const,
// and then get a pointer to its value.
const name_ty = try mod.arrayType(.{
.len = tag_name.len,
.len = tag_name_len,
.child = .u8_type,
.sentinel = .zero_u8,
});
const name_val = try mod.intern(.{ .aggregate = .{
.ty = name_ty.toIntern(),
.storage = .{ .bytes = tag_name },
.storage = .{ .bytes = tag_name.toString() },
} });
const tag_sym_index = try func.bin_file.lowerUnnamedConst(
Value.fromInterned(name_val),
@ -7338,7 +7339,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
// store length
try writer.writeByte(std.wasm.opcode(.i32_const));
try leb.writeULEB128(writer, @as(u32, @intCast(tag_name.len)));
try leb.writeULEB128(writer, @as(u32, @intCast(tag_name_len)));
try writer.writeByte(std.wasm.opcode(.i32_store));
try leb.writeULEB128(writer, encoded_alignment);
try leb.writeULEB128(writer, @as(u32, 4));
@ -7359,7 +7360,7 @@ fn getTagNameFunction(func: *CodeGen, enum_ty: Type) InnerError!u32 {
// store length
try writer.writeByte(std.wasm.opcode(.i64_const));
try leb.writeULEB128(writer, @as(u64, @intCast(tag_name.len)));
try leb.writeULEB128(writer, @as(u64, @intCast(tag_name_len)));
try writer.writeByte(std.wasm.opcode(.i64_store));
try leb.writeULEB128(writer, encoded_alignment);
try leb.writeULEB128(writer, @as(u32, 8));

View File

@ -2247,7 +2247,7 @@ fn genLazy(self: *Self, lazy_sym: link.File.LazySymbol) InnerError!void {
var data_off: i32 = 0;
const tag_names = enum_ty.enumFields(mod);
for (exitlude_jump_relocs, 0..) |*exitlude_jump_reloc, tag_index| {
const tag_name_len = ip.stringToSlice(tag_names.get(ip)[tag_index]).len;
const tag_name_len = tag_names.get(ip)[tag_index].length(ip);
const tag_val = try mod.enumValueFieldIndex(enum_ty, @intCast(tag_index));
const tag_mcv = try self.genTypedValue(tag_val);
try self.genBinOpMir(.{ ._, .cmp }, enum_ty, enum_mcv, tag_mcv);
@ -12314,8 +12314,8 @@ fn genCall(self: *Self, info: union(enum) {
},
.extern_func => |extern_func| {
const owner_decl = mod.declPtr(extern_func.decl);
const lib_name = mod.intern_pool.stringToSliceUnwrap(extern_func.lib_name);
const decl_name = mod.intern_pool.stringToSlice(owner_decl.name);
const lib_name = extern_func.lib_name.toSlice(&mod.intern_pool);
const decl_name = owner_decl.name.toSlice(&mod.intern_pool);
try self.genExternSymbolRef(.call, lib_name, decl_name);
},
else => return self.fail("TODO implement calling bitcasted functions", .{}),

View File

@ -97,7 +97,7 @@ fn writeFloat(comptime F: type, f: F, target: Target, endian: std.builtin.Endian
_ = target;
const bits = @typeInfo(F).Float.bits;
const Int = @Type(.{ .Int = .{ .signedness = .unsigned, .bits = bits } });
const int = @as(Int, @bitCast(f));
const int: Int = @bitCast(f);
mem.writeInt(Int, code[0..@divExact(bits, 8)], int, endian);
}
@ -136,24 +136,24 @@ pub fn generateLazySymbol(
if (lazy_sym.ty.isAnyError(zcu)) {
alignment.* = .@"4";
const err_names = zcu.global_error_set.keys();
mem.writeInt(u32, try code.addManyAsArray(4), @as(u32, @intCast(err_names.len)), endian);
mem.writeInt(u32, try code.addManyAsArray(4), @intCast(err_names.len), endian);
var offset = code.items.len;
try code.resize((1 + err_names.len + 1) * 4);
for (err_names) |err_name_nts| {
const err_name = zcu.intern_pool.stringToSlice(err_name_nts);
mem.writeInt(u32, code.items[offset..][0..4], @as(u32, @intCast(code.items.len)), endian);
const err_name = err_name_nts.toSlice(ip);
mem.writeInt(u32, code.items[offset..][0..4], @intCast(code.items.len), endian);
offset += 4;
try code.ensureUnusedCapacity(err_name.len + 1);
code.appendSliceAssumeCapacity(err_name);
code.appendAssumeCapacity(0);
}
mem.writeInt(u32, code.items[offset..][0..4], @as(u32, @intCast(code.items.len)), endian);
mem.writeInt(u32, code.items[offset..][0..4], @intCast(code.items.len), endian);
return Result.ok;
} else if (lazy_sym.ty.zigTypeTag(zcu) == .Enum) {
alignment.* = .@"1";
const tag_names = lazy_sym.ty.enumFields(zcu);
for (0..tag_names.len) |tag_index| {
const tag_name = zcu.intern_pool.stringToSlice(tag_names.get(ip)[tag_index]);
const tag_name = tag_names.get(ip)[tag_index].toSlice(ip);
try code.ensureUnusedCapacity(tag_name.len + 1);
code.appendSliceAssumeCapacity(tag_name);
code.appendAssumeCapacity(0);
@ -241,13 +241,13 @@ pub fn generateSymbol(
},
.err => |err| {
const int = try mod.getErrorValue(err.name);
try code.writer().writeInt(u16, @as(u16, @intCast(int)), endian);
try code.writer().writeInt(u16, @intCast(int), endian);
},
.error_union => |error_union| {
const payload_ty = ty.errorUnionPayload(mod);
const err_val = switch (error_union.val) {
.err_name => |err_name| @as(u16, @intCast(try mod.getErrorValue(err_name))),
.payload => @as(u16, 0),
const err_val: u16 = switch (error_union.val) {
.err_name => |err_name| @intCast(try mod.getErrorValue(err_name)),
.payload => 0,
};
if (!payload_ty.hasRuntimeBitsIgnoreComptime(mod)) {
@ -357,15 +357,13 @@ pub fn generateSymbol(
},
.aggregate => |aggregate| switch (ip.indexToKey(ty.toIntern())) {
.array_type => |array_type| switch (aggregate.storage) {
.bytes => |bytes| try code.appendSlice(bytes),
.bytes => |bytes| try code.appendSlice(bytes.toSlice(array_type.lenIncludingSentinel(), ip)),
.elems, .repeated_elem => {
var index: u64 = 0;
const len_including_sentinel =
array_type.len + @intFromBool(array_type.sentinel != .none);
while (index < len_including_sentinel) : (index += 1) {
while (index < array_type.lenIncludingSentinel()) : (index += 1) {
switch (try generateSymbol(bin_file, src_loc, Value.fromInterned(switch (aggregate.storage) {
.bytes => unreachable,
.elems => |elems| elems[@as(usize, @intCast(index))],
.elems => |elems| elems[@intCast(index)],
.repeated_elem => |elem| if (index < array_type.len)
elem
else
@ -399,7 +397,7 @@ pub fn generateSymbol(
}) {
.bool_true => true,
.bool_false => false,
else => |elem| switch (mod.intern_pool.indexToKey(elem)) {
else => |elem| switch (ip.indexToKey(elem)) {
.undef => continue,
.int => |int| switch (int.storage) {
.u64 => |x| switch (x) {
@ -420,7 +418,7 @@ pub fn generateSymbol(
}
} else {
switch (aggregate.storage) {
.bytes => |bytes| try code.appendSlice(bytes),
.bytes => |bytes| try code.appendSlice(bytes.toSlice(vector_type.len, ip)),
.elems, .repeated_elem => {
var index: u64 = 0;
while (index < vector_type.len) : (index += 1) {
@ -457,7 +455,7 @@ pub fn generateSymbol(
const field_val = switch (aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes[index] },
.storage = .{ .u64 = bytes.at(index, ip) },
} }),
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
@ -493,7 +491,7 @@ pub fn generateSymbol(
const field_val = switch (aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes[index] },
.storage = .{ .u64 = bytes.at(index, ip) },
} }),
.elems => |elems| elems[index],
.repeated_elem => |elem| elem,
@ -513,7 +511,7 @@ pub fn generateSymbol(
} else {
Value.fromInterned(field_val).writeToPackedMemory(Type.fromInterned(field_ty), mod, code.items[current_pos..], bits) catch unreachable;
}
bits += @as(u16, @intCast(Type.fromInterned(field_ty).bitSize(mod)));
bits += @intCast(Type.fromInterned(field_ty).bitSize(mod));
}
},
.auto, .@"extern" => {
@ -529,7 +527,7 @@ pub fn generateSymbol(
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| try ip.get(mod.gpa, .{ .int = .{
.ty = field_ty,
.storage = .{ .u64 = bytes[field_index] },
.storage = .{ .u64 = bytes.at(field_index, ip) },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
@ -625,7 +623,8 @@ fn lowerParentPtr(
reloc_info: RelocInfo,
) CodeGenError!Result {
const mod = bin_file.comp.module.?;
const ptr = mod.intern_pool.indexToKey(parent_ptr).ptr;
const ip = &mod.intern_pool;
const ptr = ip.indexToKey(parent_ptr).ptr;
return switch (ptr.addr) {
.decl => |decl| try lowerDeclRef(bin_file, src_loc, decl, code, debug_output, reloc_info),
.anon_decl => |ad| try lowerAnonDeclRef(bin_file, src_loc, ad, code, debug_output, reloc_info),
@ -636,10 +635,10 @@ fn lowerParentPtr(
eu_payload,
code,
debug_output,
reloc_info.offset(@as(u32, @intCast(errUnionPayloadOffset(
Type.fromInterned(mod.intern_pool.typeOf(eu_payload)),
reloc_info.offset(@intCast(errUnionPayloadOffset(
Type.fromInterned(ip.typeOf(eu_payload)),
mod,
)))),
))),
),
.opt_payload => |opt_payload| try lowerParentPtr(
bin_file,
@ -655,19 +654,19 @@ fn lowerParentPtr(
elem.base,
code,
debug_output,
reloc_info.offset(@as(u32, @intCast(elem.index *
Type.fromInterned(mod.intern_pool.typeOf(elem.base)).elemType2(mod).abiSize(mod)))),
reloc_info.offset(@intCast(elem.index *
Type.fromInterned(ip.typeOf(elem.base)).elemType2(mod).abiSize(mod))),
),
.field => |field| {
const base_ptr_ty = mod.intern_pool.typeOf(field.base);
const base_ty = mod.intern_pool.indexToKey(base_ptr_ty).ptr_type.child;
const base_ptr_ty = ip.typeOf(field.base);
const base_ty = ip.indexToKey(base_ptr_ty).ptr_type.child;
return lowerParentPtr(
bin_file,
src_loc,
field.base,
code,
debug_output,
reloc_info.offset(switch (mod.intern_pool.indexToKey(base_ty)) {
reloc_info.offset(switch (ip.indexToKey(base_ty)) {
.ptr_type => |ptr_type| switch (ptr_type.flags.size) {
.One, .Many, .C => unreachable,
.Slice => switch (field.index) {
@ -723,11 +722,12 @@ fn lowerAnonDeclRef(
) CodeGenError!Result {
_ = debug_output;
const zcu = lf.comp.module.?;
const ip = &zcu.intern_pool;
const target = lf.comp.root_mod.resolved_target.result;
const ptr_width_bytes = @divExact(target.ptrBitWidth(), 8);
const decl_val = anon_decl.val;
const decl_ty = Type.fromInterned(zcu.intern_pool.typeOf(decl_val));
const decl_ty = Type.fromInterned(ip.typeOf(decl_val));
log.debug("lowerAnonDecl: ty = {}", .{decl_ty.fmt(zcu)});
const is_fn_body = decl_ty.zigTypeTag(zcu) == .Fn;
if (!is_fn_body and !decl_ty.hasRuntimeBits(zcu)) {
@ -735,7 +735,7 @@ fn lowerAnonDeclRef(
return Result.ok;
}
const decl_align = zcu.intern_pool.indexToKey(anon_decl.orig_ty).ptr_type.flags.alignment;
const decl_align = ip.indexToKey(anon_decl.orig_ty).ptr_type.flags.alignment;
const res = try lf.lowerAnonDecl(decl_val, decl_align, src_loc);
switch (res) {
.ok => {},
@ -787,8 +787,8 @@ fn lowerDeclRef(
});
const endian = target.cpu.arch.endian();
switch (ptr_width) {
16 => mem.writeInt(u16, try code.addManyAsArray(2), @as(u16, @intCast(vaddr)), endian),
32 => mem.writeInt(u32, try code.addManyAsArray(4), @as(u32, @intCast(vaddr)), endian),
16 => mem.writeInt(u16, try code.addManyAsArray(2), @intCast(vaddr), endian),
32 => mem.writeInt(u32, try code.addManyAsArray(4), @intCast(vaddr), endian),
64 => mem.writeInt(u64, try code.addManyAsArray(8), vaddr, endian),
else => unreachable,
}
@ -859,6 +859,7 @@ fn genDeclRef(
ptr_decl_index: InternPool.DeclIndex,
) CodeGenError!GenResult {
const zcu = lf.comp.module.?;
const ip = &zcu.intern_pool;
const ty = val.typeOf(zcu);
log.debug("genDeclRef: val = {}", .{val.fmtValue(zcu)});
@ -869,7 +870,7 @@ fn genDeclRef(
const ptr_bits = target.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const decl_index = switch (zcu.intern_pool.indexToKey(ptr_decl.val.toIntern())) {
const decl_index = switch (ip.indexToKey(ptr_decl.val.toIntern())) {
.func => |func| func.owner_decl,
.extern_func => |extern_func| extern_func.decl,
else => ptr_decl_index,
@ -909,12 +910,9 @@ fn genDeclRef(
if (lf.cast(link.File.Elf)) |elf_file| {
if (is_extern) {
const name = zcu.intern_pool.stringToSlice(decl.name);
const name = decl.name.toSlice(ip);
// TODO audit this
const lib_name = if (decl.getOwnedVariable(zcu)) |ov|
zcu.intern_pool.stringToSliceUnwrap(ov.lib_name)
else
null;
const lib_name = if (decl.getOwnedVariable(zcu)) |ov| ov.lib_name.toSlice(ip) else null;
const sym_index = try elf_file.getGlobalSymbol(name, lib_name);
elf_file.symbol(elf_file.zigObjectPtr().?.symbol(sym_index)).flags.needs_got = true;
return GenResult.mcv(.{ .load_symbol = sym_index });
@ -927,11 +925,8 @@ fn genDeclRef(
return GenResult.mcv(.{ .load_symbol = sym.esym_index });
} else if (lf.cast(link.File.MachO)) |macho_file| {
if (is_extern) {
const name = zcu.intern_pool.stringToSlice(decl.name);
const lib_name = if (decl.getOwnedVariable(zcu)) |ov|
zcu.intern_pool.stringToSliceUnwrap(ov.lib_name)
else
null;
const name = decl.name.toSlice(ip);
const lib_name = if (decl.getOwnedVariable(zcu)) |ov| ov.lib_name.toSlice(ip) else null;
const sym_index = try macho_file.getGlobalSymbol(name, lib_name);
macho_file.getSymbol(macho_file.getZigObject().?.symbols.items[sym_index]).flags.needs_got = true;
return GenResult.mcv(.{ .load_symbol = sym_index });
@ -944,12 +939,9 @@ fn genDeclRef(
return GenResult.mcv(.{ .load_symbol = sym.nlist_idx });
} else if (lf.cast(link.File.Coff)) |coff_file| {
if (is_extern) {
const name = zcu.intern_pool.stringToSlice(decl.name);
const name = decl.name.toSlice(ip);
// TODO audit this
const lib_name = if (decl.getOwnedVariable(zcu)) |ov|
zcu.intern_pool.stringToSliceUnwrap(ov.lib_name)
else
null;
const lib_name = if (decl.getOwnedVariable(zcu)) |ov| ov.lib_name.toSlice(ip) else null;
const global_index = try coff_file.getGlobalSymbol(name, lib_name);
try coff_file.need_got_table.put(gpa, global_index, {}); // needs GOT
return GenResult.mcv(.{ .load_got = link.File.Coff.global_symbol_bit | global_index });
@ -1012,6 +1004,7 @@ pub fn genTypedValue(
owner_decl_index: InternPool.DeclIndex,
) CodeGenError!GenResult {
const zcu = lf.comp.module.?;
const ip = &zcu.intern_pool;
const ty = val.typeOf(zcu);
log.debug("genTypedValue: val = {}", .{val.fmtValue(zcu)});
@ -1024,7 +1017,7 @@ pub fn genTypedValue(
const target = namespace.file_scope.mod.resolved_target.result;
const ptr_bits = target.ptrBitWidth();
if (!ty.isSlice(zcu)) switch (zcu.intern_pool.indexToKey(val.toIntern())) {
if (!ty.isSlice(zcu)) switch (ip.indexToKey(val.toIntern())) {
.ptr => |ptr| switch (ptr.addr) {
.decl => |decl| return genDeclRef(lf, src_loc, val, decl),
else => {},
@ -1041,7 +1034,7 @@ pub fn genTypedValue(
return GenResult.mcv(.{ .immediate = 0 });
},
.none => {},
else => switch (zcu.intern_pool.indexToKey(val.toIntern())) {
else => switch (ip.indexToKey(val.toIntern())) {
.int => {
return GenResult.mcv(.{ .immediate = val.toUnsignedInt(zcu) });
},
@ -1052,8 +1045,8 @@ pub fn genTypedValue(
.Int => {
const info = ty.intInfo(zcu);
if (info.bits <= ptr_bits) {
const unsigned = switch (info.signedness) {
.signed => @as(u64, @bitCast(val.toSignedInt(zcu))),
const unsigned: u64 = switch (info.signedness) {
.signed => @bitCast(val.toSignedInt(zcu)),
.unsigned => val.toUnsignedInt(zcu),
};
return GenResult.mcv(.{ .immediate = unsigned });
@ -1075,7 +1068,7 @@ pub fn genTypedValue(
}
},
.Enum => {
const enum_tag = zcu.intern_pool.indexToKey(val.toIntern()).enum_tag;
const enum_tag = ip.indexToKey(val.toIntern()).enum_tag;
return genTypedValue(
lf,
src_loc,
@ -1084,7 +1077,7 @@ pub fn genTypedValue(
);
},
.ErrorSet => {
const err_name = zcu.intern_pool.indexToKey(val.toIntern()).err.name;
const err_name = ip.indexToKey(val.toIntern()).err.name;
const error_index = zcu.global_error_set.getIndex(err_name).?;
return GenResult.mcv(.{ .immediate = error_index });
},
@ -1094,7 +1087,7 @@ pub fn genTypedValue(
if (!payload_type.hasRuntimeBitsIgnoreComptime(zcu)) {
// We use the error type directly as the type.
const err_int_ty = try zcu.errorIntType();
switch (zcu.intern_pool.indexToKey(val.toIntern()).error_union.val) {
switch (ip.indexToKey(val.toIntern()).error_union.val) {
.err_name => |err_name| return genTypedValue(
lf,
src_loc,

View File

@ -505,7 +505,7 @@ pub const Function = struct {
.never_inline,
=> |owner_decl| try ctype_pool.fmt(gpa, "zig_{s}_{}__{d}", .{
@tagName(key),
fmtIdent(zcu.intern_pool.stringToSlice(zcu.declPtr(owner_decl).name)),
fmtIdent(zcu.declPtr(owner_decl).name.toSlice(&zcu.intern_pool)),
@intFromEnum(owner_decl),
}),
},
@ -898,7 +898,7 @@ pub const DeclGen = struct {
},
},
.err => |err| try writer.print("zig_error_{}", .{
fmtIdent(ip.stringToSlice(err.name)),
fmtIdent(err.name.toSlice(ip)),
}),
.error_union => |error_union| {
const payload_ty = ty.errorUnionPayload(zcu);
@ -1178,7 +1178,7 @@ pub const DeclGen = struct {
switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_index] },
.storage = .{ .u64 = bytes.at(field_index, ip) },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
@ -1212,7 +1212,7 @@ pub const DeclGen = struct {
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_index] },
.storage = .{ .u64 = bytes.at(field_index, ip) },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
@ -1258,7 +1258,7 @@ pub const DeclGen = struct {
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_index] },
.storage = .{ .u64 = bytes.at(field_index, ip) },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
@ -1299,7 +1299,7 @@ pub const DeclGen = struct {
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| try ip.get(zcu.gpa, .{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes[field_index] },
.storage = .{ .u64 = bytes.at(field_index, ip) },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
@ -1392,7 +1392,7 @@ pub const DeclGen = struct {
try writer.writeAll(" .payload = {");
}
if (field_ty.hasRuntimeBits(zcu)) {
try writer.print(" .{ } = ", .{fmtIdent(ip.stringToSlice(field_name))});
try writer.print(" .{ } = ", .{fmtIdent(field_name.toSlice(ip))});
try dg.renderValue(writer, Value.fromInterned(un.val), initializer_type);
try writer.writeByte(' ');
} else for (0..loaded_union.field_types.len) |this_field_index| {
@ -1741,14 +1741,12 @@ pub const DeclGen = struct {
switch (name) {
.export_index => |export_index| mangled: {
const maybe_exports = zcu.decl_exports.get(fn_decl_index);
const external_name = ip.stringToSlice(
if (maybe_exports) |exports|
exports.items[export_index].opts.name
else if (fn_decl.isExtern(zcu))
fn_decl.name
else
break :mangled,
);
const external_name = (if (maybe_exports) |exports|
exports.items[export_index].opts.name
else if (fn_decl.isExtern(zcu))
fn_decl.name
else
break :mangled).toSlice(ip);
const is_mangled = isMangledIdent(external_name, true);
const is_export = export_index > 0;
if (is_mangled and is_export) {
@ -1756,7 +1754,7 @@ pub const DeclGen = struct {
fmtIdent(external_name),
fmtStringLiteral(external_name, null),
fmtStringLiteral(
ip.stringToSlice(maybe_exports.?.items[0].opts.name),
maybe_exports.?.items[0].opts.name.toSlice(ip),
null,
),
});
@ -1767,7 +1765,7 @@ pub const DeclGen = struct {
} else if (is_export) {
try w.print(" zig_export({s}, {s})", .{
fmtStringLiteral(
ip.stringToSlice(maybe_exports.?.items[0].opts.name),
maybe_exports.?.items[0].opts.name.toSlice(ip),
null,
),
fmtStringLiteral(external_name, null),
@ -2075,12 +2073,12 @@ pub const DeclGen = struct {
.complete,
);
mangled: {
const external_name = zcu.intern_pool.stringToSlice(if (maybe_exports) |exports|
const external_name = (if (maybe_exports) |exports|
exports.items[0].opts.name
else if (variable.is_extern)
decl.name
else
break :mangled);
break :mangled).toSlice(&zcu.intern_pool);
if (isMangledIdent(external_name, true)) {
try fwd.print(" zig_mangled_{s}({ }, {s})", .{
@tagName(fwd_kind),
@ -2094,15 +2092,16 @@ pub const DeclGen = struct {
fn renderDeclName(dg: *DeclGen, writer: anytype, decl_index: InternPool.DeclIndex, export_index: u32) !void {
const zcu = dg.zcu;
const ip = &zcu.intern_pool;
const decl = zcu.declPtr(decl_index);
if (zcu.decl_exports.get(decl_index)) |exports| {
try writer.print("{ }", .{
fmtIdent(zcu.intern_pool.stringToSlice(exports.items[export_index].opts.name)),
fmtIdent(exports.items[export_index].opts.name.toSlice(ip)),
});
} else if (decl.getExternDecl(zcu).unwrap()) |extern_decl_index| {
try writer.print("{ }", .{
fmtIdent(zcu.intern_pool.stringToSlice(zcu.declPtr(extern_decl_index).name)),
fmtIdent(zcu.declPtr(extern_decl_index).name.toSlice(ip)),
});
} else {
// MSVC has a limit of 4095 character token length limit, and fmtIdent can (worst case),
@ -2226,7 +2225,7 @@ fn renderFwdDeclTypeName(
switch (fwd_decl.name) {
.anon => try w.print("anon__lazy_{d}", .{@intFromEnum(ctype.index)}),
.owner_decl => |owner_decl| try w.print("{}__{d}", .{
fmtIdent(zcu.intern_pool.stringToSlice(zcu.declPtr(owner_decl).name)),
fmtIdent(zcu.declPtr(owner_decl).name.toSlice(&zcu.intern_pool)),
@intFromEnum(owner_decl),
}),
}
@ -2548,7 +2547,7 @@ pub fn genErrDecls(o: *Object) !void {
try writer.writeAll("enum {\n");
o.indent_writer.pushIndent();
for (zcu.global_error_set.keys()[1..], 1..) |name_nts, value| {
const name = ip.stringToSlice(name_nts);
const name = name_nts.toSlice(ip);
max_name_len = @max(name.len, max_name_len);
const err_val = try zcu.intern(.{ .err = .{
.ty = .anyerror_type,
@ -2566,19 +2565,19 @@ pub fn genErrDecls(o: *Object) !void {
defer o.dg.gpa.free(name_buf);
@memcpy(name_buf[0..name_prefix.len], name_prefix);
for (zcu.global_error_set.keys()) |name_ip| {
const name = ip.stringToSlice(name_ip);
@memcpy(name_buf[name_prefix.len..][0..name.len], name);
const identifier = name_buf[0 .. name_prefix.len + name.len];
for (zcu.global_error_set.keys()) |name| {
const name_slice = name.toSlice(ip);
@memcpy(name_buf[name_prefix.len..][0..name_slice.len], name_slice);
const identifier = name_buf[0 .. name_prefix.len + name_slice.len];
const name_ty = try zcu.arrayType(.{
.len = name.len,
.len = name_slice.len,
.child = .u8_type,
.sentinel = .zero_u8,
});
const name_val = try zcu.intern(.{ .aggregate = .{
.ty = name_ty.toIntern(),
.storage = .{ .bytes = name },
.storage = .{ .bytes = name.toString() },
} });
try writer.writeAll("static ");
@ -2611,7 +2610,7 @@ pub fn genErrDecls(o: *Object) !void {
);
try writer.writeAll(" = {");
for (zcu.global_error_set.keys(), 0..) |name_nts, value| {
const name = ip.stringToSlice(name_nts);
const name = name_nts.toSlice(ip);
if (value != 0) try writer.writeByte(',');
try writer.print("{{" ++ name_prefix ++ "{}, {}}}", .{
fmtIdent(name),
@ -2659,7 +2658,7 @@ fn genExports(o: *Object) !void {
for (exports.items[1..]) |@"export"| {
try fwd.writeAll("zig_extern ");
if (@"export".opts.linkage == .weak) try fwd.writeAll("zig_weak_linkage ");
const export_name = ip.stringToSlice(@"export".opts.name);
const export_name = @"export".opts.name.toSlice(ip);
try o.dg.renderTypeAndName(
fwd,
decl.typeOf(zcu),
@ -2672,11 +2671,11 @@ fn genExports(o: *Object) !void {
try fwd.print(" zig_mangled_export({ }, {s}, {s})", .{
fmtIdent(export_name),
fmtStringLiteral(export_name, null),
fmtStringLiteral(ip.stringToSlice(exports.items[0].opts.name), null),
fmtStringLiteral(exports.items[0].opts.name.toSlice(ip), null),
});
} else {
try fwd.print(" zig_export({s}, {s})", .{
fmtStringLiteral(ip.stringToSlice(exports.items[0].opts.name), null),
fmtStringLiteral(exports.items[0].opts.name.toSlice(ip), null),
fmtStringLiteral(export_name, null),
});
}
@ -2706,17 +2705,18 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn
try w.writeAll(") {\n switch (tag) {\n");
const tag_names = enum_ty.enumFields(zcu);
for (0..tag_names.len) |tag_index| {
const tag_name = ip.stringToSlice(tag_names.get(ip)[tag_index]);
const tag_name = tag_names.get(ip)[tag_index];
const tag_name_len = tag_name.length(ip);
const tag_val = try zcu.enumValueFieldIndex(enum_ty, @intCast(tag_index));
const name_ty = try zcu.arrayType(.{
.len = tag_name.len,
.len = tag_name_len,
.child = .u8_type,
.sentinel = .zero_u8,
});
const name_val = try zcu.intern(.{ .aggregate = .{
.ty = name_ty.toIntern(),
.storage = .{ .bytes = tag_name },
.storage = .{ .bytes = tag_name.toString() },
} });
try w.print(" case {}: {{\n static ", .{
@ -2729,7 +2729,7 @@ pub fn genLazyFn(o: *Object, lazy_ctype_pool: *const CType.Pool, lazy_fn: LazyFn
try o.dg.renderType(w, name_slice_ty);
try w.print("){{{}, {}}};\n", .{
fmtIdent("name"),
try o.dg.fmtIntLiteral(try zcu.intValue(Type.usize, tag_name.len), .Other),
try o.dg.fmtIntLiteral(try zcu.intValue(Type.usize, tag_name_len), .Other),
});
try w.writeAll(" }\n");
@ -2797,7 +2797,7 @@ pub fn genFunc(f: *Function) !void {
try o.indent_writer.insertNewline();
if (!is_global) try o.writer().writeAll("static ");
if (zcu.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
if (decl.@"linksection".toSlice(&zcu.intern_pool)) |s|
try o.writer().print("zig_linksection_fn({s}) ", .{fmtStringLiteral(s, null)});
try o.dg.renderFunctionSignature(o.writer(), decl_index, .complete, .{ .export_index = 0 });
try o.writer().writeByte(' ');
@ -2887,7 +2887,7 @@ pub fn genDecl(o: *Object) !void {
if (!is_global) try w.writeAll("static ");
if (variable.is_weak_linkage) try w.writeAll("zig_weak_linkage ");
if (variable.is_threadlocal and !o.dg.mod.single_threaded) try w.writeAll("zig_threadlocal ");
if (zcu.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |s|
if (decl.@"linksection".toSlice(&zcu.intern_pool)) |s|
try w.print("zig_linksection({s}) ", .{fmtStringLiteral(s, null)});
const decl_c_value = .{ .decl = decl_index };
try o.dg.renderTypeAndName(w, decl_ty, decl_c_value, .{}, decl.alignment, .complete);
@ -2920,7 +2920,7 @@ pub fn genDeclValue(
switch (o.dg.pass) {
.decl => |decl_index| {
if (zcu.decl_exports.get(decl_index)) |exports| {
const export_name = zcu.intern_pool.stringToSlice(exports.items[0].opts.name);
const export_name = exports.items[0].opts.name.toSlice(&zcu.intern_pool);
if (isMangledIdent(export_name, true)) {
try fwd_decl_writer.print(" zig_mangled_final({ }, {s})", .{
fmtIdent(export_name), fmtStringLiteral(export_name, null),
@ -2936,7 +2936,7 @@ pub fn genDeclValue(
const w = o.writer();
if (!is_global) try w.writeAll("static ");
if (zcu.intern_pool.stringToSliceUnwrap(@"linksection")) |s|
if (@"linksection".toSlice(&zcu.intern_pool)) |s|
try w.print("zig_linksection({s}) ", .{fmtStringLiteral(s, null)});
try o.dg.renderTypeAndName(w, ty, decl_c_value, Const, alignment, .complete);
try w.writeAll(" = ");
@ -5454,7 +5454,7 @@ fn fieldLocation(
.{ .byte_offset = loaded_struct.offsets.get(ip)[field_index] }
else
.{ .field = if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name|
.{ .identifier = ip.stringToSlice(field_name) }
.{ .identifier = field_name.toSlice(ip) }
else
.{ .field = field_index } },
.@"packed" => if (field_ptr_ty.ptrInfo(zcu).packed_offset.host_size == 0)
@ -5470,7 +5470,7 @@ fn fieldLocation(
.{ .byte_offset = container_ty.structFieldOffset(field_index, zcu) }
else
.{ .field = if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name|
.{ .identifier = ip.stringToSlice(field_name) }
.{ .identifier = field_name.toSlice(ip) }
else
.{ .field = field_index } },
.union_type => {
@ -5485,9 +5485,9 @@ fn fieldLocation(
.begin;
const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index];
return .{ .field = if (loaded_union.hasTag(ip))
.{ .payload_identifier = ip.stringToSlice(field_name) }
.{ .payload_identifier = field_name.toSlice(ip) }
else
.{ .identifier = ip.stringToSlice(field_name) } };
.{ .identifier = field_name.toSlice(ip) } };
},
.@"packed" => return .begin,
}
@ -5643,7 +5643,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
const loaded_struct = ip.loadStructType(struct_ty.toIntern());
switch (loaded_struct.layout) {
.auto, .@"extern" => break :field_name if (loaded_struct.fieldName(ip, extra.field_index).unwrap()) |field_name|
.{ .identifier = ip.stringToSlice(field_name) }
.{ .identifier = field_name.toSlice(ip) }
else
.{ .field = extra.field_index },
.@"packed" => {
@ -5701,7 +5701,7 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
}
},
.anon_struct_type => |anon_struct_info| if (anon_struct_info.fieldName(ip, extra.field_index).unwrap()) |field_name|
.{ .identifier = ip.stringToSlice(field_name) }
.{ .identifier = field_name.toSlice(ip) }
else
.{ .field = extra.field_index },
.union_type => field_name: {
@ -5710,9 +5710,9 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
.auto, .@"extern" => {
const name = loaded_union.loadTagType(ip).names.get(ip)[extra.field_index];
break :field_name if (loaded_union.hasTag(ip))
.{ .payload_identifier = ip.stringToSlice(name) }
.{ .payload_identifier = name.toSlice(ip) }
else
.{ .identifier = ip.stringToSlice(name) };
.{ .identifier = name.toSlice(ip) };
},
.@"packed" => {
const operand_lval = if (struct_byval == .constant) blk: {
@ -7062,7 +7062,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
const a = try Assignment.start(f, writer, field_ty);
try f.writeCValueMember(writer, local, if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name|
.{ .identifier = ip.stringToSlice(field_name) }
.{ .identifier = field_name.toSlice(ip) }
else
.{ .field = field_index });
try a.assign(f, writer);
@ -7142,7 +7142,7 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
const a = try Assignment.start(f, writer, field_ty);
try f.writeCValueMember(writer, local, if (anon_struct_info.fieldName(ip, field_index).unwrap()) |field_name|
.{ .identifier = ip.stringToSlice(field_name) }
.{ .identifier = field_name.toSlice(ip) }
else
.{ .field = field_index });
try a.assign(f, writer);
@ -7190,8 +7190,8 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
try writer.print("{}", .{try f.fmtIntLiteral(try tag_val.intFromEnum(tag_ty, zcu))});
try a.end(f, writer);
}
break :field .{ .payload_identifier = ip.stringToSlice(field_name) };
} else .{ .identifier = ip.stringToSlice(field_name) };
break :field .{ .payload_identifier = field_name.toSlice(ip) };
} else .{ .identifier = field_name.toSlice(ip) };
const a = try Assignment.start(f, writer, payload_ty);
try f.writeCValueMember(writer, local, field);

View File

@ -1465,7 +1465,7 @@ pub const Pool = struct {
},
},
.array_type => |array_info| {
const len = array_info.len + @intFromBool(array_info.sentinel != .none);
const len = array_info.lenIncludingSentinel();
if (len == 0) return .{ .index = .void };
const elem_type = Type.fromInterned(array_info.child);
const elem_ctype = try pool.fromType(
@ -1479,7 +1479,7 @@ pub const Pool = struct {
if (elem_ctype.index == .void) return .{ .index = .void };
const array_ctype = try pool.getArray(allocator, .{
.elem_ctype = elem_ctype,
.len = array_info.len + @intFromBool(array_info.sentinel != .none),
.len = len,
});
if (!kind.isParameter()) return array_ctype;
var fields = [_]Info.Field{
@ -1625,7 +1625,7 @@ pub const Pool = struct {
if (field_ctype.index == .void) continue;
const field_name = if (loaded_struct.fieldName(ip, field_index)
.unwrap()) |field_name|
try pool.string(allocator, ip.stringToSlice(field_name))
try pool.string(allocator, field_name.toSlice(ip))
else
try pool.fmt(allocator, "f{d}", .{field_index});
const field_alignas = AlignAs.fromAlignment(.{
@ -1685,7 +1685,7 @@ pub const Pool = struct {
if (field_ctype.index == .void) continue;
const field_name = if (anon_struct_info.fieldName(ip, @intCast(field_index))
.unwrap()) |field_name|
try pool.string(allocator, ip.stringToSlice(field_name))
try pool.string(allocator, field_name.toSlice(ip))
else
try pool.fmt(allocator, "f{d}", .{field_index});
pool.addHashedExtraAssumeCapacityTo(scratch, &hasher, Field, .{
@ -1766,7 +1766,7 @@ pub const Pool = struct {
if (field_ctype.index == .void) continue;
const field_name = try pool.string(
allocator,
ip.stringToSlice(loaded_tag.names.get(ip)[field_index]),
loaded_tag.names.get(ip)[field_index].toSlice(ip),
);
const field_alignas = AlignAs.fromAlignment(.{
.@"align" = loaded_union.fieldAlign(ip, @intCast(field_index)),

View File

@ -1011,7 +1011,7 @@ pub const Object = struct {
llvm_errors[0] = try o.builder.undefConst(llvm_slice_ty);
for (llvm_errors[1..], error_name_list[1..]) |*llvm_error, name| {
const name_string = try o.builder.stringNull(mod.intern_pool.stringToSlice(name));
const name_string = try o.builder.stringNull(name.toSlice(&mod.intern_pool));
const name_init = try o.builder.stringConst(name_string);
const name_variable_index =
try o.builder.addVariable(.empty, name_init.typeOf(&o.builder), .default);
@ -1086,7 +1086,7 @@ pub const Object = struct {
for (object.extern_collisions.keys()) |decl_index| {
const global = object.decl_map.get(decl_index) orelse continue;
// Same logic as below but for externs instead of exports.
const decl_name = object.builder.strtabStringIfExists(mod.intern_pool.stringToSlice(mod.declPtr(decl_index).name)) orelse continue;
const decl_name = object.builder.strtabStringIfExists(mod.declPtr(decl_index).name.toSlice(&mod.intern_pool)) orelse continue;
const other_global = object.builder.getGlobal(decl_name) orelse continue;
if (other_global.toConst().getBase(&object.builder) ==
global.toConst().getBase(&object.builder)) continue;
@ -1116,7 +1116,7 @@ pub const Object = struct {
for (export_list) |exp| {
// Detect if the LLVM global has already been created as an extern. In such
// case, we need to replace all uses of it with this exported global.
const exp_name = object.builder.strtabStringIfExists(mod.intern_pool.stringToSlice(exp.opts.name)) orelse continue;
const exp_name = object.builder.strtabStringIfExists(exp.opts.name.toSlice(&mod.intern_pool)) orelse continue;
const other_global = object.builder.getGlobal(exp_name) orelse continue;
if (other_global.toConst().getBase(&object.builder) == global_base) continue;
@ -1442,7 +1442,7 @@ pub const Object = struct {
} }, &o.builder);
}
if (ip.stringToSliceUnwrap(decl.@"linksection")) |section|
if (decl.@"linksection".toSlice(ip)) |section|
function_index.setSection(try o.builder.string(section), &o.builder);
var deinit_wip = true;
@ -1662,7 +1662,7 @@ pub const Object = struct {
const subprogram = try o.builder.debugSubprogram(
file,
try o.builder.metadataString(ip.stringToSlice(decl.name)),
try o.builder.metadataString(decl.name.toSlice(ip)),
try o.builder.metadataStringFromStrtabString(function_index.name(&o.builder)),
line_number,
line_number + func.lbrace_line,
@ -1752,6 +1752,7 @@ pub const Object = struct {
.value => |val| return updateExportedValue(self, mod, val, exports),
};
const gpa = mod.gpa;
const ip = &mod.intern_pool;
// If the module does not already have the function, we ignore this function call
// because we call `updateExports` at the end of `updateFunc` and `updateDecl`.
const global_index = self.decl_map.get(decl_index) orelse return;
@ -1759,17 +1760,14 @@ pub const Object = struct {
const comp = mod.comp;
if (decl.isExtern(mod)) {
const decl_name = decl_name: {
const decl_name = mod.intern_pool.stringToSlice(decl.name);
if (mod.getTarget().isWasm() and decl.val.typeOf(mod).zigTypeTag(mod) == .Fn) {
if (mod.intern_pool.stringToSliceUnwrap(decl.getOwnedExternFunc(mod).?.lib_name)) |lib_name| {
if (decl.getOwnedExternFunc(mod).?.lib_name.toSlice(ip)) |lib_name| {
if (!std.mem.eql(u8, lib_name, "c")) {
break :decl_name try self.builder.strtabStringFmt("{s}|{s}", .{ decl_name, lib_name });
break :decl_name try self.builder.strtabStringFmt("{}|{s}", .{ decl.name.fmt(ip), lib_name });
}
}
}
break :decl_name try self.builder.strtabString(decl_name);
break :decl_name try self.builder.strtabString(decl.name.toSlice(ip));
};
if (self.builder.getGlobal(decl_name)) |other_global| {
@ -1792,9 +1790,7 @@ pub const Object = struct {
if (decl_var.is_weak_linkage) global_index.setLinkage(.extern_weak, &self.builder);
}
} else if (exports.len != 0) {
const main_exp_name = try self.builder.strtabString(
mod.intern_pool.stringToSlice(exports[0].opts.name),
);
const main_exp_name = try self.builder.strtabString(exports[0].opts.name.toSlice(ip));
try global_index.rename(main_exp_name, &self.builder);
if (decl.val.getVariable(mod)) |decl_var| if (decl_var.is_threadlocal)
@ -1803,9 +1799,7 @@ pub const Object = struct {
return updateExportedGlobal(self, mod, global_index, exports);
} else {
const fqn = try self.builder.strtabString(
mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod)),
);
const fqn = try self.builder.strtabString((try decl.fullyQualifiedName(mod)).toSlice(ip));
try global_index.rename(fqn, &self.builder);
global_index.setLinkage(.internal, &self.builder);
if (comp.config.dll_export_fns)
@ -1832,9 +1826,8 @@ pub const Object = struct {
exports: []const *Module.Export,
) link.File.UpdateExportsError!void {
const gpa = mod.gpa;
const main_exp_name = try o.builder.strtabString(
mod.intern_pool.stringToSlice(exports[0].opts.name),
);
const ip = &mod.intern_pool;
const main_exp_name = try o.builder.strtabString(exports[0].opts.name.toSlice(ip));
const global_index = i: {
const gop = try o.anon_decl_map.getOrPut(gpa, exported_value);
if (gop.found_existing) {
@ -1845,7 +1838,7 @@ pub const Object = struct {
const llvm_addr_space = toLlvmAddressSpace(.generic, o.target);
const variable_index = try o.builder.addVariable(
main_exp_name,
try o.lowerType(Type.fromInterned(mod.intern_pool.typeOf(exported_value))),
try o.lowerType(Type.fromInterned(ip.typeOf(exported_value))),
llvm_addr_space,
);
const global_index = variable_index.ptrConst(&o.builder).global;
@ -1867,8 +1860,9 @@ pub const Object = struct {
global_index: Builder.Global.Index,
exports: []const *Module.Export,
) link.File.UpdateExportsError!void {
global_index.setUnnamedAddr(.default, &o.builder);
const comp = mod.comp;
const ip = &mod.intern_pool;
global_index.setUnnamedAddr(.default, &o.builder);
if (comp.config.dll_export_fns)
global_index.setDllStorageClass(.dllexport, &o.builder);
global_index.setLinkage(switch (exports[0].opts.linkage) {
@ -1882,7 +1876,7 @@ pub const Object = struct {
.hidden => .hidden,
.protected => .protected,
}, &o.builder);
if (mod.intern_pool.stringToSliceUnwrap(exports[0].opts.section)) |section|
if (exports[0].opts.section.toSlice(ip)) |section|
switch (global_index.ptrConst(&o.builder).kind) {
.variable => |impl_index| impl_index.setSection(
try o.builder.string(section),
@ -1900,7 +1894,7 @@ pub const Object = struct {
// Until then we iterate over existing aliases and make them point
// to the correct decl, or otherwise add a new alias. Old aliases are leaked.
for (exports[1..]) |exp| {
const exp_name = try o.builder.strtabString(mod.intern_pool.stringToSlice(exp.opts.name));
const exp_name = try o.builder.strtabString(exp.opts.name.toSlice(ip));
if (o.builder.getGlobal(exp_name)) |global| {
switch (global.ptrConst(&o.builder).kind) {
.alias => |alias| {
@ -2013,7 +2007,7 @@ pub const Object = struct {
std.math.big.int.Mutable.init(&bigint_space.limbs, i).toConst();
enumerators[i] = try o.builder.debugEnumerator(
try o.builder.metadataString(ip.stringToSlice(field_name_ip)),
try o.builder.metadataString(field_name_ip.toSlice(ip)),
int_info.signedness == .unsigned,
int_info.bits,
bigint,
@ -2473,7 +2467,7 @@ pub const Object = struct {
offset = field_offset + field_size;
const field_name = if (tuple.names.len != 0)
ip.stringToSlice(tuple.names.get(ip)[i])
tuple.names.get(ip)[i].toSlice(ip)
else
try std.fmt.allocPrintZ(gpa, "{d}", .{i});
defer if (tuple.names.len == 0) gpa.free(field_name);
@ -2557,10 +2551,10 @@ pub const Object = struct {
const field_offset = ty.structFieldOffset(field_index, mod);
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
try ip.getOrPutStringFmt(gpa, "{d}", .{field_index});
try ip.getOrPutStringFmt(gpa, "{d}", .{field_index}, .no_embedded_nulls);
fields.appendAssumeCapacity(try o.builder.debugMemberType(
try o.builder.metadataString(ip.stringToSlice(field_name)),
try o.builder.metadataString(field_name.toSlice(ip)),
.none, // File
debug_fwd_ref,
0, // Line
@ -2655,7 +2649,7 @@ pub const Object = struct {
const field_name = tag_type.names.get(ip)[field_index];
fields.appendAssumeCapacity(try o.builder.debugMemberType(
try o.builder.metadataString(ip.stringToSlice(field_name)),
try o.builder.metadataString(field_name.toSlice(ip)),
.none, // File
debug_union_fwd_ref,
0, // Line
@ -2827,7 +2821,7 @@ pub const Object = struct {
const mod = o.module;
const decl = mod.declPtr(decl_index);
return o.builder.debugStructType(
try o.builder.metadataString(mod.intern_pool.stringToSlice(decl.name)), // TODO use fully qualified name
try o.builder.metadataString(decl.name.toSlice(&mod.intern_pool)), // TODO use fully qualified name
try o.getDebugFile(mod.namespacePtr(decl.src_namespace).file_scope),
try o.namespaceToDebugScope(decl.src_namespace),
decl.src_line + 1,
@ -2844,11 +2838,11 @@ pub const Object = struct {
const std_mod = mod.std_mod;
const std_file = (mod.importPkg(std_mod) catch unreachable).file;
const builtin_str = try mod.intern_pool.getOrPutString(mod.gpa, "builtin");
const builtin_str = try mod.intern_pool.getOrPutString(mod.gpa, "builtin", .no_embedded_nulls);
const std_namespace = mod.namespacePtr(mod.declPtr(std_file.root_decl.unwrap().?).src_namespace);
const builtin_decl = std_namespace.decls.getKeyAdapted(builtin_str, Module.DeclAdapter{ .zcu = mod }).?;
const stack_trace_str = try mod.intern_pool.getOrPutString(mod.gpa, "StackTrace");
const stack_trace_str = try mod.intern_pool.getOrPutString(mod.gpa, "StackTrace", .no_embedded_nulls);
// buffer is only used for int_type, `builtin` is a struct.
const builtin_ty = mod.declPtr(builtin_decl).val.toType();
const builtin_namespace = mod.namespacePtrUnwrap(builtin_ty.getNamespaceIndex(mod)).?;
@ -2892,10 +2886,10 @@ pub const Object = struct {
const is_extern = decl.isExtern(zcu);
const function_index = try o.builder.addFunction(
try o.lowerType(zig_fn_type),
try o.builder.strtabString(ip.stringToSlice(if (is_extern)
try o.builder.strtabString((if (is_extern)
decl.name
else
try decl.fullyQualifiedName(zcu))),
try decl.fullyQualifiedName(zcu)).toSlice(ip)),
toLlvmAddressSpace(decl.@"addrspace", target),
);
gop.value_ptr.* = function_index.ptrConst(&o.builder).global;
@ -2910,9 +2904,9 @@ pub const Object = struct {
if (target.isWasm()) {
try attributes.addFnAttr(.{ .string = .{
.kind = try o.builder.string("wasm-import-name"),
.value = try o.builder.string(ip.stringToSlice(decl.name)),
.value = try o.builder.string(decl.name.toSlice(ip)),
} }, &o.builder);
if (ip.stringToSliceUnwrap(decl.getOwnedExternFunc(zcu).?.lib_name)) |lib_name| {
if (decl.getOwnedExternFunc(zcu).?.lib_name.toSlice(ip)) |lib_name| {
if (!std.mem.eql(u8, lib_name, "c")) try attributes.addFnAttr(.{ .string = .{
.kind = try o.builder.string("wasm-import-module"),
.value = try o.builder.string(lib_name),
@ -3108,9 +3102,10 @@ pub const Object = struct {
const is_extern = decl.isExtern(mod);
const variable_index = try o.builder.addVariable(
try o.builder.strtabString(mod.intern_pool.stringToSlice(
if (is_extern) decl.name else try decl.fullyQualifiedName(mod),
)),
try o.builder.strtabString((if (is_extern)
decl.name
else
try decl.fullyQualifiedName(mod)).toSlice(&mod.intern_pool)),
try o.lowerType(decl.typeOf(mod)),
toLlvmGlobalAddressSpace(decl.@"addrspace", mod.getTarget()),
);
@ -3258,7 +3253,7 @@ pub const Object = struct {
};
},
.array_type => |array_type| o.builder.arrayType(
array_type.len + @intFromBool(array_type.sentinel != .none),
array_type.lenIncludingSentinel(),
try o.lowerType(Type.fromInterned(array_type.child)),
),
.vector_type => |vector_type| o.builder.vectorType(
@ -3335,9 +3330,7 @@ pub const Object = struct {
return int_ty;
}
const name = try o.builder.string(ip.stringToSlice(
try mod.declPtr(struct_type.decl.unwrap().?).fullyQualifiedName(mod),
));
const fqn = try mod.declPtr(struct_type.decl.unwrap().?).fullyQualifiedName(mod);
var llvm_field_types = std.ArrayListUnmanaged(Builder.Type){};
defer llvm_field_types.deinit(o.gpa);
@ -3402,7 +3395,7 @@ pub const Object = struct {
);
}
const ty = try o.builder.opaqueType(name);
const ty = try o.builder.opaqueType(try o.builder.string(fqn.toSlice(ip)));
try o.type_map.put(o.gpa, t.toIntern(), ty);
o.builder.namedTypeSetBody(
@ -3491,9 +3484,7 @@ pub const Object = struct {
return enum_tag_ty;
}
const name = try o.builder.string(ip.stringToSlice(
try mod.declPtr(union_obj.decl).fullyQualifiedName(mod),
));
const fqn = try mod.declPtr(union_obj.decl).fullyQualifiedName(mod);
const aligned_field_ty = Type.fromInterned(union_obj.field_types.get(ip)[layout.most_aligned_field]);
const aligned_field_llvm_ty = try o.lowerType(aligned_field_ty);
@ -3513,7 +3504,7 @@ pub const Object = struct {
};
if (layout.tag_size == 0) {
const ty = try o.builder.opaqueType(name);
const ty = try o.builder.opaqueType(try o.builder.string(fqn.toSlice(ip)));
try o.type_map.put(o.gpa, t.toIntern(), ty);
o.builder.namedTypeSetBody(
@ -3541,7 +3532,7 @@ pub const Object = struct {
llvm_fields_len += 1;
}
const ty = try o.builder.opaqueType(name);
const ty = try o.builder.opaqueType(try o.builder.string(fqn.toSlice(ip)));
try o.type_map.put(o.gpa, t.toIntern(), ty);
o.builder.namedTypeSetBody(
@ -3554,8 +3545,8 @@ pub const Object = struct {
const gop = try o.type_map.getOrPut(o.gpa, t.toIntern());
if (!gop.found_existing) {
const decl = mod.declPtr(ip.loadOpaqueType(t.toIntern()).decl);
const name = try o.builder.string(ip.stringToSlice(try decl.fullyQualifiedName(mod)));
gop.value_ptr.* = try o.builder.opaqueType(name);
const fqn = try decl.fullyQualifiedName(mod);
gop.value_ptr.* = try o.builder.opaqueType(try o.builder.string(fqn.toSlice(ip)));
}
return gop.value_ptr.*;
},
@ -3859,7 +3850,9 @@ pub const Object = struct {
},
.aggregate => |aggregate| switch (ip.indexToKey(ty.toIntern())) {
.array_type => |array_type| switch (aggregate.storage) {
.bytes => |bytes| try o.builder.stringConst(try o.builder.string(bytes)),
.bytes => |bytes| try o.builder.stringConst(try o.builder.string(
bytes.toSlice(array_type.lenIncludingSentinel(), ip),
)),
.elems => |elems| {
const array_ty = try o.lowerType(ty);
const elem_ty = array_ty.childType(&o.builder);
@ -3892,8 +3885,7 @@ pub const Object = struct {
},
.repeated_elem => |elem| {
const len: usize = @intCast(array_type.len);
const len_including_sentinel: usize =
@intCast(len + @intFromBool(array_type.sentinel != .none));
const len_including_sentinel: usize = @intCast(array_type.lenIncludingSentinel());
const array_ty = try o.lowerType(ty);
const elem_ty = array_ty.childType(&o.builder);
@ -3942,7 +3934,7 @@ pub const Object = struct {
defer allocator.free(vals);
switch (aggregate.storage) {
.bytes => |bytes| for (vals, bytes) |*result_val, byte| {
.bytes => |bytes| for (vals, bytes.toSlice(vector_type.len, ip)) |*result_val, byte| {
result_val.* = try o.builder.intConst(.i8, byte);
},
.elems => |elems| for (vals, elems) |*result_val, elem| {
@ -4633,7 +4625,7 @@ pub const Object = struct {
defer wip_switch.finish(&wip);
for (0..enum_type.names.len) |field_index| {
const name = try o.builder.stringNull(ip.stringToSlice(enum_type.names.get(ip)[field_index]));
const name = try o.builder.stringNull(enum_type.names.get(ip)[field_index].toSlice(ip));
const name_init = try o.builder.stringConst(name);
const name_variable_index =
try o.builder.addVariable(.empty, name_init.typeOf(&o.builder), .default);
@ -4693,6 +4685,7 @@ pub const DeclGen = struct {
fn genDecl(dg: *DeclGen) !void {
const o = dg.object;
const zcu = o.module;
const ip = &zcu.intern_pool;
const decl = dg.decl;
const decl_index = dg.decl_index;
assert(decl.has_tv);
@ -4705,7 +4698,7 @@ pub const DeclGen = struct {
decl.getAlignment(zcu).toLlvm(),
&o.builder,
);
if (zcu.intern_pool.stringToSliceUnwrap(decl.@"linksection")) |section|
if (decl.@"linksection".toSlice(ip)) |section|
variable_index.setSection(try o.builder.string(section), &o.builder);
assert(decl.has_tv);
const init_val = if (decl.val.getVariable(zcu)) |decl_var| decl_var.init else init_val: {
@ -4728,7 +4721,7 @@ pub const DeclGen = struct {
const debug_file = try o.getDebugFile(namespace.file_scope);
const debug_global_var = try o.builder.debugGlobalVar(
try o.builder.metadataString(zcu.intern_pool.stringToSlice(decl.name)), // Name
try o.builder.metadataString(decl.name.toSlice(ip)), // Name
try o.builder.metadataStringFromStrtabString(variable_index.name(&o.builder)), // Linkage name
debug_file, // File
debug_file, // Scope
@ -5156,8 +5149,8 @@ pub const FuncGen = struct {
self.scope = try o.builder.debugSubprogram(
self.file,
try o.builder.metadataString(zcu.intern_pool.stringToSlice(decl.name)),
try o.builder.metadataString(zcu.intern_pool.stringToSlice(fqn)),
try o.builder.metadataString(decl.name.toSlice(&zcu.intern_pool)),
try o.builder.metadataString(fqn.toSlice(&zcu.intern_pool)),
line_number,
line_number + func.lbrace_line,
try o.lowerDebugType(fn_ty),

View File

@ -1028,39 +1028,30 @@ const DeclGen = struct {
inline .array_type, .vector_type => |array_type, tag| {
const elem_ty = Type.fromInterned(array_type.child);
const constituents = try self.gpa.alloc(IdRef, @as(u32, @intCast(ty.arrayLenIncludingSentinel(mod))));
const constituents = try self.gpa.alloc(IdRef, @intCast(ty.arrayLenIncludingSentinel(mod)));
defer self.gpa.free(constituents);
switch (aggregate.storage) {
.bytes => |bytes| {
// TODO: This is really space inefficient, perhaps there is a better
// way to do it?
for (bytes, 0..) |byte, i| {
constituents[i] = try self.constInt(elem_ty, byte, .indirect);
for (constituents, bytes.toSlice(constituents.len, ip)) |*constituent, byte| {
constituent.* = try self.constInt(elem_ty, byte, .indirect);
}
},
.elems => |elems| {
for (0..@as(usize, @intCast(array_type.len))) |i| {
constituents[i] = try self.constant(elem_ty, Value.fromInterned(elems[i]), .indirect);
for (constituents, elems) |*constituent, elem| {
constituent.* = try self.constant(elem_ty, Value.fromInterned(elem), .indirect);
}
},
.repeated_elem => |elem| {
const val_id = try self.constant(elem_ty, Value.fromInterned(elem), .indirect);
for (0..@as(usize, @intCast(array_type.len))) |i| {
constituents[i] = val_id;
}
@memset(constituents, try self.constant(elem_ty, Value.fromInterned(elem), .indirect));
},
}
switch (tag) {
inline .array_type => {
if (array_type.sentinel != .none) {
const sentinel = Value.fromInterned(array_type.sentinel);
constituents[constituents.len - 1] = try self.constant(elem_ty, sentinel, .indirect);
}
return self.constructArray(ty, constituents);
},
inline .vector_type => return self.constructVector(ty, constituents),
.array_type => return self.constructArray(ty, constituents),
.vector_type => return self.constructVector(ty, constituents),
else => unreachable,
}
},
@ -1683,9 +1674,9 @@ const DeclGen = struct {
}
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse
try ip.getOrPutStringFmt(mod.gpa, "{d}", .{field_index});
try ip.getOrPutStringFmt(mod.gpa, "{d}", .{field_index}, .no_embedded_nulls);
try member_types.append(try self.resolveType(field_ty, .indirect));
try member_names.append(ip.stringToSlice(field_name));
try member_names.append(field_name.toSlice(ip));
}
const result_id = try self.spv.structType(member_types.items, member_names.items);
@ -2123,12 +2114,12 @@ const DeclGen = struct {
// Append the actual code into the functions section.
try self.spv.addFunction(spv_decl_index, self.func);
const fqn = ip.stringToSlice(try decl.fullyQualifiedName(self.module));
try self.spv.debugName(result_id, fqn);
const fqn = try decl.fullyQualifiedName(self.module);
try self.spv.debugName(result_id, fqn.toSlice(ip));
// Temporarily generate a test kernel declaration if this is a test function.
if (self.module.test_functions.contains(self.decl_index)) {
try self.generateTestEntryPoint(fqn, spv_decl_index);
try self.generateTestEntryPoint(fqn.toSlice(ip), spv_decl_index);
}
},
.global => {
@ -2152,8 +2143,8 @@ const DeclGen = struct {
.storage_class = final_storage_class,
});
const fqn = ip.stringToSlice(try decl.fullyQualifiedName(self.module));
try self.spv.debugName(result_id, fqn);
const fqn = try decl.fullyQualifiedName(self.module);
try self.spv.debugName(result_id, fqn.toSlice(ip));
try self.spv.declareDeclDeps(spv_decl_index, &.{});
},
.invocation_global => {
@ -2197,8 +2188,8 @@ const DeclGen = struct {
try self.func.body.emit(self.spv.gpa, .OpFunctionEnd, {});
try self.spv.addFunction(spv_decl_index, self.func);
const fqn = ip.stringToSlice(try decl.fullyQualifiedName(self.module));
try self.spv.debugNameFmt(initializer_id, "initializer of {s}", .{fqn});
const fqn = try decl.fullyQualifiedName(self.module);
try self.spv.debugNameFmt(initializer_id, "initializer of {}", .{fqn.fmt(ip)});
try self.spv.sections.types_globals_constants.emit(self.spv.gpa, .OpExtInst, .{
.id_result_type = ptr_ty_id,

View File

@ -1176,9 +1176,9 @@ pub fn lowerUnnamedConst(self: *Coff, val: Value, decl_index: InternPool.DeclInd
gop.value_ptr.* = .{};
}
const unnamed_consts = gop.value_ptr;
const decl_name = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod));
const decl_name = try decl.fullyQualifiedName(mod);
const index = unnamed_consts.items.len;
const sym_name = try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
const sym_name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index });
defer gpa.free(sym_name);
const ty = val.typeOf(mod);
const atom_index = switch (try self.lowerConst(sym_name, val, ty.abiAlignment(mod), self.rdata_section_index.?, decl.srcLoc(mod))) {
@ -1257,8 +1257,8 @@ pub fn updateDecl(
if (decl.isExtern(mod)) {
// TODO make this part of getGlobalSymbol
const variable = decl.getOwnedVariable(mod).?;
const name = mod.intern_pool.stringToSlice(decl.name);
const lib_name = mod.intern_pool.stringToSliceUnwrap(variable.lib_name);
const name = decl.name.toSlice(&mod.intern_pool);
const lib_name = variable.lib_name.toSlice(&mod.intern_pool);
const global_index = try self.getGlobalSymbol(name, lib_name);
try self.need_got_table.put(gpa, global_index, {});
return;
@ -1425,9 +1425,9 @@ fn updateDeclCode(self: *Coff, decl_index: InternPool.DeclIndex, code: []u8, com
const mod = self.base.comp.module.?;
const decl = mod.declPtr(decl_index);
const decl_name = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod));
const decl_name = try decl.fullyQualifiedName(mod);
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl });
const required_alignment: u32 = @intCast(decl.getAlignment(mod).toByteUnits() orelse 0);
const decl_metadata = self.decls.get(decl_index).?;
@ -1439,7 +1439,7 @@ fn updateDeclCode(self: *Coff, decl_index: InternPool.DeclIndex, code: []u8, com
if (atom.size != 0) {
const sym = atom.getSymbolPtr(self);
try self.setSymbolName(sym, decl_name);
try self.setSymbolName(sym, decl_name.toSlice(&mod.intern_pool));
sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_index + 1));
sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
@ -1447,7 +1447,7 @@ fn updateDeclCode(self: *Coff, decl_index: InternPool.DeclIndex, code: []u8, com
const need_realloc = code.len > capacity or !mem.isAlignedGeneric(u64, sym.value, required_alignment);
if (need_realloc) {
const vaddr = try self.growAtom(atom_index, code_len, required_alignment);
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, sym.value, vaddr });
log.debug("growing {} from 0x{x} to 0x{x}", .{ decl_name.fmt(&mod.intern_pool), sym.value, vaddr });
log.debug(" (required alignment 0x{x}", .{required_alignment});
if (vaddr != sym.value) {
@ -1463,13 +1463,13 @@ fn updateDeclCode(self: *Coff, decl_index: InternPool.DeclIndex, code: []u8, com
self.getAtomPtr(atom_index).size = code_len;
} else {
const sym = atom.getSymbolPtr(self);
try self.setSymbolName(sym, decl_name);
try self.setSymbolName(sym, decl_name.toSlice(&mod.intern_pool));
sym.section_number = @as(coff.SectionNumber, @enumFromInt(sect_index + 1));
sym.type = .{ .complex_type = complex_type, .base_type = .NULL };
const vaddr = try self.allocateAtom(atom_index, code_len, required_alignment);
errdefer self.freeAtom(atom_index);
log.debug("allocated atom for {s} at 0x{x}", .{ decl_name, vaddr });
log.debug("allocated atom for {} at 0x{x}", .{ decl_name.fmt(&mod.intern_pool), vaddr });
self.getAtomPtr(atom_index).size = code_len;
sym.value = vaddr;
@ -1534,20 +1534,18 @@ pub fn updateExports(
else => std.builtin.CallingConvention.C,
};
const decl_cc = exported_decl.typeOf(mod).fnCallingConvention(mod);
if (decl_cc == .C and ip.stringEqlSlice(exp.opts.name, "main") and
comp.config.link_libc)
{
if (decl_cc == .C and exp.opts.name.eqlSlice("main", ip) and comp.config.link_libc) {
mod.stage1_flags.have_c_main = true;
} else if (decl_cc == winapi_cc and target.os.tag == .windows) {
if (ip.stringEqlSlice(exp.opts.name, "WinMain")) {
if (exp.opts.name.eqlSlice("WinMain", ip)) {
mod.stage1_flags.have_winmain = true;
} else if (ip.stringEqlSlice(exp.opts.name, "wWinMain")) {
} else if (exp.opts.name.eqlSlice("wWinMain", ip)) {
mod.stage1_flags.have_wwinmain = true;
} else if (ip.stringEqlSlice(exp.opts.name, "WinMainCRTStartup")) {
} else if (exp.opts.name.eqlSlice("WinMainCRTStartup", ip)) {
mod.stage1_flags.have_winmain_crt_startup = true;
} else if (ip.stringEqlSlice(exp.opts.name, "wWinMainCRTStartup")) {
} else if (exp.opts.name.eqlSlice("wWinMainCRTStartup", ip)) {
mod.stage1_flags.have_wwinmain_crt_startup = true;
} else if (ip.stringEqlSlice(exp.opts.name, "DllMainCRTStartup")) {
} else if (exp.opts.name.eqlSlice("DllMainCRTStartup", ip)) {
mod.stage1_flags.have_dllmain_crt_startup = true;
}
}
@ -1585,7 +1583,7 @@ pub fn updateExports(
for (exports) |exp| {
log.debug("adding new export '{}'", .{exp.opts.name.fmt(&mod.intern_pool)});
if (mod.intern_pool.stringToSliceUnwrap(exp.opts.section)) |section_name| {
if (exp.opts.section.toSlice(&mod.intern_pool)) |section_name| {
if (!mem.eql(u8, section_name, ".text")) {
try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create(
gpa,
@ -1607,7 +1605,7 @@ pub fn updateExports(
continue;
}
const exp_name = mod.intern_pool.stringToSlice(exp.opts.name);
const exp_name = exp.opts.name.toSlice(&mod.intern_pool);
const sym_index = metadata.getExport(self, exp_name) orelse blk: {
const sym_index = if (self.getGlobalIndex(exp_name)) |global_index| ind: {
const global = self.globals.items[global_index];
@ -1646,18 +1644,18 @@ pub fn updateExports(
pub fn deleteDeclExport(
self: *Coff,
decl_index: InternPool.DeclIndex,
name_ip: InternPool.NullTerminatedString,
name: InternPool.NullTerminatedString,
) void {
if (self.llvm_object) |_| return;
const metadata = self.decls.getPtr(decl_index) orelse return;
const mod = self.base.comp.module.?;
const name = mod.intern_pool.stringToSlice(name_ip);
const sym_index = metadata.getExportPtr(self, name) orelse return;
const name_slice = name.toSlice(&mod.intern_pool);
const sym_index = metadata.getExportPtr(self, name_slice) orelse return;
const gpa = self.base.comp.gpa;
const sym_loc = SymbolWithLoc{ .sym_index = sym_index.*, .file = null };
const sym = self.getSymbolPtr(sym_loc);
log.debug("deleting export '{s}'", .{name});
log.debug("deleting export '{}'", .{name.fmt(&mod.intern_pool)});
assert(sym.storage_class == .EXTERNAL and sym.section_number != .UNDEFINED);
sym.* = .{
.name = [_]u8{0} ** 8,
@ -1669,7 +1667,7 @@ pub fn deleteDeclExport(
};
self.locals_free_list.append(gpa, sym_index.*) catch {};
if (self.resolver.fetchRemove(name)) |entry| {
if (self.resolver.fetchRemove(name_slice)) |entry| {
defer gpa.free(entry.key);
self.globals_free_list.append(gpa, entry.value) catch {};
self.globals.items[entry.value] = .{

View File

@ -339,15 +339,14 @@ pub const DeclState = struct {
struct_type.field_names.get(ip),
struct_type.field_types.get(ip),
struct_type.offsets.get(ip),
) |field_name_ip, field_ty, field_off| {
) |field_name, field_ty, field_off| {
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
const field_name = ip.stringToSlice(field_name_ip);
const field_name_slice = field_name.toSlice(ip);
// DW.AT.member
try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2);
try dbg_info_buffer.ensureUnusedCapacity(field_name_slice.len + 2);
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.struct_member));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity(field_name);
dbg_info_buffer.appendAssumeCapacity(0);
dbg_info_buffer.appendSliceAssumeCapacity(field_name_slice[0 .. field_name_slice.len + 1]);
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.appendNTimes(0, 4);
@ -374,14 +373,13 @@ pub const DeclState = struct {
try dbg_info_buffer.append(0);
const enum_type = ip.loadEnumType(ty.ip_index);
for (enum_type.names.get(ip), 0..) |field_name_index, field_i| {
const field_name = ip.stringToSlice(field_name_index);
for (enum_type.names.get(ip), 0..) |field_name, field_i| {
const field_name_slice = field_name.toSlice(ip);
// DW.AT.enumerator
try dbg_info_buffer.ensureUnusedCapacity(field_name.len + 2 + @sizeOf(u64));
try dbg_info_buffer.ensureUnusedCapacity(field_name_slice.len + 2 + @sizeOf(u64));
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.enum_variant));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity(field_name);
dbg_info_buffer.appendAssumeCapacity(0);
dbg_info_buffer.appendSliceAssumeCapacity(field_name_slice[0 .. field_name_slice.len + 1]);
// DW.AT.const_value, DW.FORM.data8
const value: u64 = value: {
if (enum_type.values.len == 0) break :value field_i; // auto-numbered
@ -443,11 +441,11 @@ pub const DeclState = struct {
for (union_obj.field_types.get(ip), union_obj.loadTagType(ip).names.get(ip)) |field_ty, field_name| {
if (!Type.fromInterned(field_ty).hasRuntimeBits(mod)) continue;
const field_name_slice = field_name.toSlice(ip);
// DW.AT.member
try dbg_info_buffer.append(@intFromEnum(AbbrevCode.struct_member));
// DW.AT.name, DW.FORM.string
try dbg_info_buffer.appendSlice(ip.stringToSlice(field_name));
try dbg_info_buffer.append(0);
try dbg_info_buffer.appendSlice(field_name_slice[0 .. field_name_slice.len + 1]);
// DW.AT.type, DW.FORM.ref4
const index = dbg_info_buffer.items.len;
try dbg_info_buffer.appendNTimes(0, 4);
@ -1155,8 +1153,8 @@ pub fn initDeclState(self: *Dwarf, mod: *Module, decl_index: InternPool.DeclInde
dbg_line_buffer.appendAssumeCapacity(DW.LNS.copy);
// .debug_info subprogram
const decl_name_slice = mod.intern_pool.stringToSlice(decl.name);
const decl_linkage_name_slice = mod.intern_pool.stringToSlice(decl_linkage_name);
const decl_name_slice = decl.name.toSlice(&mod.intern_pool);
const decl_linkage_name_slice = decl_linkage_name.toSlice(&mod.intern_pool);
try dbg_info_buffer.ensureUnusedCapacity(1 + ptr_width_bytes + 4 + 4 +
(decl_name_slice.len + 1) + (decl_linkage_name_slice.len + 1));
@ -2866,15 +2864,14 @@ fn addDbgInfoErrorSetNames(
// DW.AT.const_value, DW.FORM.data8
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), 0, target_endian);
for (error_names) |error_name_ip| {
const int = try mod.getErrorValue(error_name_ip);
const error_name = mod.intern_pool.stringToSlice(error_name_ip);
for (error_names) |error_name| {
const int = try mod.getErrorValue(error_name);
const error_name_slice = error_name.toSlice(&mod.intern_pool);
// DW.AT.enumerator
try dbg_info_buffer.ensureUnusedCapacity(error_name.len + 2 + @sizeOf(u64));
try dbg_info_buffer.ensureUnusedCapacity(error_name_slice.len + 2 + @sizeOf(u64));
dbg_info_buffer.appendAssumeCapacity(@intFromEnum(AbbrevCode.enum_variant));
// DW.AT.name, DW.FORM.string
dbg_info_buffer.appendSliceAssumeCapacity(error_name);
dbg_info_buffer.appendAssumeCapacity(0);
dbg_info_buffer.appendSliceAssumeCapacity(error_name_slice[0 .. error_name_slice.len + 1]);
// DW.AT.const_value, DW.FORM.data8
mem.writeInt(u64, dbg_info_buffer.addManyAsArrayAssumeCapacity(8), int, target_endian);
}

View File

@ -902,9 +902,9 @@ fn updateDeclCode(
const gpa = elf_file.base.comp.gpa;
const mod = elf_file.base.comp.module.?;
const decl = mod.declPtr(decl_index);
const decl_name = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod));
const decl_name = try decl.fullyQualifiedName(mod);
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl });
const required_alignment = decl.getAlignment(mod);
@ -915,7 +915,7 @@ fn updateDeclCode(
sym.output_section_index = shdr_index;
atom_ptr.output_section_index = shdr_index;
sym.name_offset = try self.strtab.insert(gpa, decl_name);
sym.name_offset = try self.strtab.insert(gpa, decl_name.toSlice(&mod.intern_pool));
atom_ptr.flags.alive = true;
atom_ptr.name_offset = sym.name_offset;
esym.st_name = sym.name_offset;
@ -932,7 +932,7 @@ fn updateDeclCode(
const need_realloc = code.len > capacity or !required_alignment.check(atom_ptr.value);
if (need_realloc) {
try atom_ptr.grow(elf_file);
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, old_vaddr, atom_ptr.value });
log.debug("growing {} from 0x{x} to 0x{x}", .{ decl_name.fmt(&mod.intern_pool), old_vaddr, atom_ptr.value });
if (old_vaddr != atom_ptr.value) {
sym.value = 0;
esym.st_value = 0;
@ -1000,9 +1000,9 @@ fn updateTlv(
const gpa = elf_file.base.comp.gpa;
const mod = elf_file.base.comp.module.?;
const decl = mod.declPtr(decl_index);
const decl_name = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod));
const decl_name = try decl.fullyQualifiedName(mod);
log.debug("updateTlv {s} ({*})", .{ decl_name, decl });
log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&mod.intern_pool), decl });
const required_alignment = decl.getAlignment(mod);
@ -1014,7 +1014,7 @@ fn updateTlv(
sym.output_section_index = shndx;
atom_ptr.output_section_index = shndx;
sym.name_offset = try self.strtab.insert(gpa, decl_name);
sym.name_offset = try self.strtab.insert(gpa, decl_name.toSlice(&mod.intern_pool));
atom_ptr.flags.alive = true;
atom_ptr.name_offset = sym.name_offset;
esym.st_value = 0;
@ -1136,8 +1136,8 @@ pub fn updateDecl(
if (decl.isExtern(mod)) {
// Extern variable gets a .got entry only.
const variable = decl.getOwnedVariable(mod).?;
const name = mod.intern_pool.stringToSlice(decl.name);
const lib_name = mod.intern_pool.stringToSliceUnwrap(variable.lib_name);
const name = decl.name.toSlice(&mod.intern_pool);
const lib_name = variable.lib_name.toSlice(&mod.intern_pool);
const esym_index = try self.getGlobalSymbol(elf_file, name, lib_name);
elf_file.symbol(self.symbol(esym_index)).flags.needs_got = true;
return;
@ -1293,9 +1293,9 @@ pub fn lowerUnnamedConst(
}
const unnamed_consts = gop.value_ptr;
const decl = mod.declPtr(decl_index);
const decl_name = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod));
const decl_name = try decl.fullyQualifiedName(mod);
const index = unnamed_consts.items.len;
const name = try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index });
defer gpa.free(name);
const ty = val.typeOf(mod);
const sym_index = switch (try self.lowerConst(
@ -1418,7 +1418,7 @@ pub fn updateExports(
for (exports) |exp| {
if (exp.opts.section.unwrap()) |section_name| {
if (!mod.intern_pool.stringEqlSlice(section_name, ".text")) {
if (!section_name.eqlSlice(".text", &mod.intern_pool)) {
try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1);
mod.failed_exports.putAssumeCapacityNoClobber(exp, try Module.ErrorMsg.create(
gpa,
@ -1445,7 +1445,7 @@ pub fn updateExports(
},
};
const stt_bits: u8 = @as(u4, @truncate(esym.st_info));
const exp_name = mod.intern_pool.stringToSlice(exp.opts.name);
const exp_name = exp.opts.name.toSlice(&mod.intern_pool);
const name_off = try self.strtab.insert(gpa, exp_name);
const global_esym_index = if (metadata.@"export"(self, exp_name)) |exp_index|
exp_index.*
@ -1476,9 +1476,9 @@ pub fn updateDeclLineNumber(
defer tracy.end();
const decl = mod.declPtr(decl_index);
const decl_name = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod));
const decl_name = try decl.fullyQualifiedName(mod);
log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl });
log.debug("updateDeclLineNumber {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl });
if (self.dwarf) |*dw| {
try dw.updateDeclLineNumber(mod, decl_index);
@ -1493,7 +1493,7 @@ pub fn deleteDeclExport(
) void {
const metadata = self.decls.getPtr(decl_index) orelse return;
const mod = elf_file.base.comp.module.?;
const exp_name = mod.intern_pool.stringToSlice(name);
const exp_name = name.toSlice(&mod.intern_pool);
const esym_index = metadata.@"export"(self, exp_name) orelse return;
log.debug("deleting export '{s}'", .{exp_name});
const esym = &self.global_esyms.items(.elf_sym)[esym_index.*];

View File

@ -716,8 +716,8 @@ pub fn updateDecl(
if (decl.isExtern(mod)) {
// Extern variable gets a __got entry only
const variable = decl.getOwnedVariable(mod).?;
const name = mod.intern_pool.stringToSlice(decl.name);
const lib_name = mod.intern_pool.stringToSliceUnwrap(variable.lib_name);
const name = decl.name.toSlice(&mod.intern_pool);
const lib_name = variable.lib_name.toSlice(&mod.intern_pool);
const index = try self.getGlobalSymbol(macho_file, name, lib_name);
const actual_index = self.symbols.items[index];
macho_file.getSymbol(actual_index).flags.needs_got = true;
@ -786,9 +786,9 @@ fn updateDeclCode(
const gpa = macho_file.base.comp.gpa;
const mod = macho_file.base.comp.module.?;
const decl = mod.declPtr(decl_index);
const decl_name = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod));
const decl_name = try decl.fullyQualifiedName(mod);
log.debug("updateDeclCode {s}{*}", .{ decl_name, decl });
log.debug("updateDeclCode {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl });
const required_alignment = decl.getAlignment(mod);
@ -800,7 +800,7 @@ fn updateDeclCode(
sym.out_n_sect = sect_index;
atom.out_n_sect = sect_index;
sym.name = try self.strtab.insert(gpa, decl_name);
sym.name = try self.strtab.insert(gpa, decl_name.toSlice(&mod.intern_pool));
atom.flags.alive = true;
atom.name = sym.name;
nlist.n_strx = sym.name;
@ -819,7 +819,7 @@ fn updateDeclCode(
if (need_realloc) {
try atom.grow(macho_file);
log.debug("growing {s} from 0x{x} to 0x{x}", .{ decl_name, old_vaddr, atom.value });
log.debug("growing {} from 0x{x} to 0x{x}", .{ decl_name.fmt(&mod.intern_pool), old_vaddr, atom.value });
if (old_vaddr != atom.value) {
sym.value = 0;
nlist.n_value = 0;
@ -870,23 +870,24 @@ fn updateTlv(
) !void {
const mod = macho_file.base.comp.module.?;
const decl = mod.declPtr(decl_index);
const decl_name = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod));
const decl_name = try decl.fullyQualifiedName(mod);
log.debug("updateTlv {s} ({*})", .{ decl_name, decl });
log.debug("updateTlv {} ({*})", .{ decl_name.fmt(&mod.intern_pool), decl });
const decl_name_slice = decl_name.toSlice(&mod.intern_pool);
const required_alignment = decl.getAlignment(mod);
// 1. Lower TLV initializer
const init_sym_index = try self.createTlvInitializer(
macho_file,
decl_name,
decl_name_slice,
required_alignment,
sect_index,
code,
);
// 2. Create TLV descriptor
try self.createTlvDescriptor(macho_file, sym_index, init_sym_index, decl_name);
try self.createTlvDescriptor(macho_file, sym_index, init_sym_index, decl_name_slice);
}
fn createTlvInitializer(
@ -1073,9 +1074,9 @@ pub fn lowerUnnamedConst(
}
const unnamed_consts = gop.value_ptr;
const decl = mod.declPtr(decl_index);
const decl_name = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod));
const decl_name = try decl.fullyQualifiedName(mod);
const index = unnamed_consts.items.len;
const name = try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index });
defer gpa.free(name);
const sym_index = switch (try self.lowerConst(
macho_file,
@ -1199,7 +1200,7 @@ pub fn updateExports(
for (exports) |exp| {
if (exp.opts.section.unwrap()) |section_name| {
if (!mod.intern_pool.stringEqlSlice(section_name, "__text")) {
if (!section_name.eqlSlice("__text", &mod.intern_pool)) {
try mod.failed_exports.ensureUnusedCapacity(mod.gpa, 1);
mod.failed_exports.putAssumeCapacityNoClobber(exp, try Module.ErrorMsg.create(
gpa,
@ -1220,7 +1221,7 @@ pub fn updateExports(
continue;
}
const exp_name = mod.intern_pool.stringToSlice(exp.opts.name);
const exp_name = exp.opts.name.toSlice(&mod.intern_pool);
const global_nlist_index = if (metadata.@"export"(self, exp_name)) |exp_index|
exp_index.*
else blk: {
@ -1349,13 +1350,12 @@ pub fn deleteDeclExport(
decl_index: InternPool.DeclIndex,
name: InternPool.NullTerminatedString,
) void {
const metadata = self.decls.getPtr(decl_index) orelse return;
const mod = macho_file.base.comp.module.?;
const exp_name = mod.intern_pool.stringToSlice(name);
const nlist_index = metadata.@"export"(self, exp_name) orelse return;
log.debug("deleting export '{s}'", .{exp_name});
const metadata = self.decls.getPtr(decl_index) orelse return;
const nlist_index = metadata.@"export"(self, name.toSlice(&mod.intern_pool)) orelse return;
log.debug("deleting export '{}'", .{name.fmt(&mod.intern_pool)});
const nlist = &self.symtab.items(.nlist)[nlist_index.*];
self.symtab.items(.size)[nlist_index.*] = 0;

View File

@ -477,11 +477,11 @@ pub fn lowerUnnamedConst(self: *Plan9, val: Value, decl_index: InternPool.DeclIn
}
const unnamed_consts = gop.value_ptr;
const decl_name = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod));
const decl_name = try decl.fullyQualifiedName(mod);
const index = unnamed_consts.items.len;
// name is freed when the unnamed const is freed
const name = try std.fmt.allocPrint(gpa, "__unnamed_{s}_{d}", .{ decl_name, index });
const name = try std.fmt.allocPrint(gpa, "__unnamed_{}_{d}", .{ decl_name.fmt(&mod.intern_pool), index });
const sym_index = try self.allocateSymbolIndex();
const new_atom_idx = try self.createAtom();
@ -529,7 +529,7 @@ pub fn updateDecl(self: *Plan9, mod: *Module, decl_index: InternPool.DeclIndex)
const decl = mod.declPtr(decl_index);
if (decl.isExtern(mod)) {
log.debug("found extern decl: {s}", .{mod.intern_pool.stringToSlice(decl.name)});
log.debug("found extern decl: {}", .{decl.name.fmt(&mod.intern_pool)});
return;
}
const atom_idx = try self.seeDecl(decl_index);
@ -573,7 +573,7 @@ fn updateFinish(self: *Plan9, decl_index: InternPool.DeclIndex) !void {
const sym: aout.Sym = .{
.value = undefined, // the value of stuff gets filled in in flushModule
.type = atom.type,
.name = try gpa.dupe(u8, mod.intern_pool.stringToSlice(decl.name)),
.name = try gpa.dupe(u8, decl.name.toSlice(&mod.intern_pool)),
};
if (atom.sym_index) |s| {
@ -1013,10 +1013,12 @@ fn addDeclExports(
const atom = self.getAtom(metadata.index);
for (exports) |exp| {
const exp_name = mod.intern_pool.stringToSlice(exp.opts.name);
const exp_name = exp.opts.name.toSlice(&mod.intern_pool);
// plan9 does not support custom sections
if (exp.opts.section.unwrap()) |section_name| {
if (!mod.intern_pool.stringEqlSlice(section_name, ".text") and !mod.intern_pool.stringEqlSlice(section_name, ".data")) {
if (!section_name.eqlSlice(".text", &mod.intern_pool) and
!section_name.eqlSlice(".data", &mod.intern_pool))
{
try mod.failed_exports.put(mod.gpa, exp, try Module.ErrorMsg.create(
gpa,
mod.declPtr(decl_index).srcLoc(mod),
@ -1129,19 +1131,21 @@ pub fn seeDecl(self: *Plan9, decl_index: InternPool.DeclIndex) !Atom.Index {
// handle externs here because they might not get updateDecl called on them
const mod = self.base.comp.module.?;
const decl = mod.declPtr(decl_index);
const name = mod.intern_pool.stringToSlice(decl.name);
if (decl.isExtern(mod)) {
// this is a "phantom atom" - it is never actually written to disk, just convenient for us to store stuff about externs
if (std.mem.eql(u8, name, "etext")) {
if (decl.name.eqlSlice("etext", &mod.intern_pool)) {
self.etext_edata_end_atom_indices[0] = atom_idx;
} else if (std.mem.eql(u8, name, "edata")) {
} else if (decl.name.eqlSlice("edata", &mod.intern_pool)) {
self.etext_edata_end_atom_indices[1] = atom_idx;
} else if (std.mem.eql(u8, name, "end")) {
} else if (decl.name.eqlSlice("end", &mod.intern_pool)) {
self.etext_edata_end_atom_indices[2] = atom_idx;
}
try self.updateFinish(decl_index);
log.debug("seeDecl(extern) for {s} (got_addr=0x{x})", .{ name, self.getAtom(atom_idx).getOffsetTableAddress(self) });
} else log.debug("seeDecl for {s}", .{name});
log.debug("seeDecl(extern) for {} (got_addr=0x{x})", .{
decl.name.fmt(&mod.intern_pool),
self.getAtom(atom_idx).getOffsetTableAddress(self),
});
} else log.debug("seeDecl for {}", .{decl.name.fmt(&mod.intern_pool)});
return atom_idx;
}
@ -1393,7 +1397,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
const sym = self.syms.items[atom.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.comp.module.?.decl_exports.get(decl_index)) |exports| {
for (exports.items) |e| if (decl_metadata.getExport(self, ip.stringToSlice(e.opts.name))) |exp_i| {
for (exports.items) |e| if (decl_metadata.getExport(self, e.opts.name.toSlice(ip))) |exp_i| {
try self.writeSym(writer, self.syms.items[exp_i]);
};
}
@ -1440,7 +1444,7 @@ pub fn writeSyms(self: *Plan9, buf: *std.ArrayList(u8)) !void {
const sym = self.syms.items[atom.sym_index.?];
try self.writeSym(writer, sym);
if (self.base.comp.module.?.decl_exports.get(decl_index)) |exports| {
for (exports.items) |e| if (decl_metadata.getExport(self, ip.stringToSlice(e.opts.name))) |exp_i| {
for (exports.items) |e| if (decl_metadata.getExport(self, e.opts.name.toSlice(ip))) |exp_i| {
const s = self.syms.items[exp_i];
if (mem.eql(u8, s.name, "_start"))
self.entry_val = s.value;
@ -1483,25 +1487,25 @@ pub fn getDeclVAddr(
reloc_info: link.File.RelocInfo,
) !u64 {
const mod = self.base.comp.module.?;
const ip = &mod.intern_pool;
const decl = mod.declPtr(decl_index);
log.debug("getDeclVAddr for {s}", .{mod.intern_pool.stringToSlice(decl.name)});
log.debug("getDeclVAddr for {}", .{decl.name.fmt(ip)});
if (decl.isExtern(mod)) {
const extern_name = mod.intern_pool.stringToSlice(decl.name);
if (std.mem.eql(u8, extern_name, "etext")) {
if (decl.name.eqlSlice("etext", ip)) {
try self.addReloc(reloc_info.parent_atom_index, .{
.target = undefined,
.offset = reloc_info.offset,
.addend = reloc_info.addend,
.type = .special_etext,
});
} else if (std.mem.eql(u8, extern_name, "edata")) {
} else if (decl.name.eqlSlice("edata", ip)) {
try self.addReloc(reloc_info.parent_atom_index, .{
.target = undefined,
.offset = reloc_info.offset,
.addend = reloc_info.addend,
.type = .special_edata,
});
} else if (std.mem.eql(u8, extern_name, "end")) {
} else if (decl.name.eqlSlice("end", ip)) {
try self.addReloc(reloc_info.parent_atom_index, .{
.target = undefined,
.offset = reloc_info.offset,

View File

@ -130,7 +130,7 @@ pub fn updateFunc(self: *SpirV, module: *Module, func_index: InternPool.Index, a
const func = module.funcInfo(func_index);
const decl = module.declPtr(func.owner_decl);
log.debug("lowering function {s}", .{module.intern_pool.stringToSlice(decl.name)});
log.debug("lowering function {}", .{decl.name.fmt(&module.intern_pool)});
try self.object.updateFunc(module, func_index, air, liveness);
}
@ -141,7 +141,7 @@ pub fn updateDecl(self: *SpirV, module: *Module, decl_index: InternPool.DeclInde
}
const decl = module.declPtr(decl_index);
log.debug("lowering declaration {s}", .{module.intern_pool.stringToSlice(decl.name)});
log.debug("lowering declaration {}", .{decl.name.fmt(&module.intern_pool)});
try self.object.updateDecl(module, decl_index);
}
@ -178,7 +178,7 @@ pub fn updateExports(
for (exports) |exp| {
try self.object.spv.declareEntryPoint(
spv_decl_index,
mod.intern_pool.stringToSlice(exp.opts.name),
exp.opts.name.toSlice(&mod.intern_pool),
execution_model,
);
}
@ -227,14 +227,13 @@ pub fn flushModule(self: *SpirV, arena: Allocator, prog_node: *std.Progress.Node
try error_info.appendSlice("zig_errors");
const mod = self.base.comp.module.?;
for (mod.global_error_set.keys()) |name_nts| {
const name = mod.intern_pool.stringToSlice(name_nts);
for (mod.global_error_set.keys()) |name| {
// Errors can contain pretty much any character - to encode them in a string we must escape
// them somehow. Easiest here is to use some established scheme, one which also preseves the
// name if it contains no strange characters is nice for debugging. URI encoding fits the bill.
// We're using : as separator, which is a reserved character.
const escaped_name = try std.Uri.escapeString(gpa, name);
const escaped_name = try std.Uri.escapeString(gpa, name.toSlice(&mod.intern_pool));
defer gpa.free(escaped_name);
try error_info.writer().print(":{s}", .{escaped_name});
}

View File

@ -258,8 +258,8 @@ pub fn updateDecl(
if (decl.isExtern(mod)) {
const variable = decl.getOwnedVariable(mod).?;
const name = mod.intern_pool.stringToSlice(decl.name);
const lib_name = mod.intern_pool.stringToSliceUnwrap(variable.lib_name);
const name = decl.name.toSlice(&mod.intern_pool);
const lib_name = variable.lib_name.toSlice(&mod.intern_pool);
return zig_object.addOrUpdateImport(wasm_file, name, atom.sym_index, lib_name, null);
}
const val = if (decl.val.getVariable(mod)) |variable| Value.fromInterned(variable.init) else decl.val;
@ -341,8 +341,8 @@ fn finishUpdateDecl(
const atom_index = decl_info.atom;
const atom = wasm_file.getAtomPtr(atom_index);
const sym = zig_object.symbol(atom.sym_index);
const full_name = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod));
sym.name = try zig_object.string_table.insert(gpa, full_name);
const full_name = try decl.fullyQualifiedName(mod);
sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(&mod.intern_pool));
try atom.code.appendSlice(gpa, code);
atom.size = @intCast(code.len);
@ -382,7 +382,7 @@ fn finishUpdateDecl(
// Will be freed upon freeing of decl or after cleanup of Wasm binary.
const full_segment_name = try std.mem.concat(gpa, u8, &.{
segment_name,
full_name,
full_name.toSlice(&mod.intern_pool),
});
errdefer gpa.free(full_segment_name);
sym.tag = .data;
@ -427,9 +427,9 @@ pub fn getOrCreateAtomForDecl(zig_object: *ZigObject, wasm_file: *Wasm, decl_ind
gop.value_ptr.* = .{ .atom = try wasm_file.createAtom(sym_index, zig_object.index) };
const mod = wasm_file.base.comp.module.?;
const decl = mod.declPtr(decl_index);
const full_name = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod));
const full_name = try decl.fullyQualifiedName(mod);
const sym = zig_object.symbol(sym_index);
sym.name = try zig_object.string_table.insert(gpa, full_name);
sym.name = try zig_object.string_table.insert(gpa, full_name.toSlice(&mod.intern_pool));
}
return gop.value_ptr.atom;
}
@ -478,9 +478,9 @@ pub fn lowerUnnamedConst(zig_object: *ZigObject, wasm_file: *Wasm, val: Value, d
const parent_atom_index = try zig_object.getOrCreateAtomForDecl(wasm_file, decl_index);
const parent_atom = wasm_file.getAtom(parent_atom_index);
const local_index = parent_atom.locals.items.len;
const fqn = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod));
const name = try std.fmt.allocPrintZ(gpa, "__unnamed_{s}_{d}", .{
fqn, local_index,
const fqn = try decl.fullyQualifiedName(mod);
const name = try std.fmt.allocPrintZ(gpa, "__unnamed_{}_{d}", .{
fqn.fmt(&mod.intern_pool), local_index,
});
defer gpa.free(name);
@ -623,11 +623,11 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm) !void {
// Addend for each relocation to the table
var addend: u32 = 0;
const mod = wasm_file.base.comp.module.?;
for (mod.global_error_set.keys()) |error_name_nts| {
for (mod.global_error_set.keys()) |error_name| {
const atom = wasm_file.getAtomPtr(atom_index);
const error_name = mod.intern_pool.stringToSlice(error_name_nts);
const len: u32 = @intCast(error_name.len + 1); // names are 0-terminated
const error_name_slice = error_name.toSlice(&mod.intern_pool);
const len: u32 = @intCast(error_name_slice.len + 1); // names are 0-terminated
const slice_ty = Type.slice_const_u8_sentinel_0;
const offset = @as(u32, @intCast(atom.code.items.len));
@ -646,10 +646,9 @@ fn populateErrorNameTable(zig_object: *ZigObject, wasm_file: *Wasm) !void {
// as we updated the error name table, we now store the actual name within the names atom
try names_atom.code.ensureUnusedCapacity(gpa, len);
names_atom.code.appendSliceAssumeCapacity(error_name);
names_atom.code.appendAssumeCapacity(0);
names_atom.code.appendSliceAssumeCapacity(error_name_slice[0..len]);
log.debug("Populated error name: '{s}'", .{error_name});
log.debug("Populated error name: '{}'", .{error_name.fmt(&mod.intern_pool)});
}
names_atom.size = addend;
zig_object.error_names_atom = names_atom_index;
@ -833,8 +832,7 @@ pub fn deleteDeclExport(
) void {
const mod = wasm_file.base.comp.module.?;
const decl_info = zig_object.decls_map.getPtr(decl_index) orelse return;
const export_name = mod.intern_pool.stringToSlice(name);
if (decl_info.@"export"(zig_object, export_name)) |sym_index| {
if (decl_info.@"export"(zig_object, name.toSlice(&mod.intern_pool))) |sym_index| {
const sym = zig_object.symbol(sym_index);
decl_info.deleteExport(sym_index);
std.debug.assert(zig_object.global_syms.remove(sym.name));
@ -864,10 +862,10 @@ pub fn updateExports(
const atom = wasm_file.getAtom(atom_index);
const atom_sym = atom.symbolLoc().getSymbol(wasm_file).*;
const gpa = mod.gpa;
log.debug("Updating exports for decl '{s}'", .{mod.intern_pool.stringToSlice(decl.name)});
log.debug("Updating exports for decl '{}'", .{decl.name.fmt(&mod.intern_pool)});
for (exports) |exp| {
if (mod.intern_pool.stringToSliceUnwrap(exp.opts.section)) |section| {
if (exp.opts.section.toSlice(&mod.intern_pool)) |section| {
try mod.failed_exports.putNoClobber(gpa, exp, try Module.ErrorMsg.create(
gpa,
decl.srcLoc(mod),
@ -877,10 +875,8 @@ pub fn updateExports(
continue;
}
const export_string = mod.intern_pool.stringToSlice(exp.opts.name);
const sym_index = if (decl_info.@"export"(zig_object, export_string)) |idx|
idx
else index: {
const export_string = exp.opts.name.toSlice(&mod.intern_pool);
const sym_index = if (decl_info.@"export"(zig_object, export_string)) |idx| idx else index: {
const sym_index = try zig_object.allocateSymbol(gpa);
try decl_info.appendExport(gpa, sym_index);
break :index sym_index;
@ -1089,9 +1085,9 @@ pub fn createDebugSectionForIndex(zig_object: *ZigObject, wasm_file: *Wasm, inde
pub fn updateDeclLineNumber(zig_object: *ZigObject, mod: *Module, decl_index: InternPool.DeclIndex) !void {
if (zig_object.dwarf) |*dw| {
const decl = mod.declPtr(decl_index);
const decl_name = mod.intern_pool.stringToSlice(try decl.fullyQualifiedName(mod));
const decl_name = try decl.fullyQualifiedName(mod);
log.debug("updateDeclLineNumber {s}{*}", .{ decl_name, decl });
log.debug("updateDeclLineNumber {}{*}", .{ decl_name.fmt(&mod.intern_pool), decl });
try dw.updateDeclLineNumber(mod, decl_index);
}
}

View File

@ -73,7 +73,7 @@ pub const MutableValue = union(enum) {
} }),
.bytes => |b| try ip.get(gpa, .{ .aggregate = .{
.ty = b.ty,
.storage = .{ .bytes = b.data },
.storage = .{ .bytes = try ip.getOrPutString(gpa, b.data, .maybe_embedded_nulls) },
} }),
.aggregate => |a| {
const elems = try arena.alloc(InternPool.Index, a.elems.len);
@ -158,18 +158,18 @@ pub const MutableValue = union(enum) {
},
.aggregate => |agg| switch (agg.storage) {
.bytes => |bytes| {
assert(bytes.len == ip.aggregateTypeLenIncludingSentinel(agg.ty));
const len: usize = @intCast(ip.aggregateTypeLenIncludingSentinel(agg.ty));
assert(ip.childType(agg.ty) == .u8_type);
if (allow_bytes) {
const arena_bytes = try arena.alloc(u8, bytes.len);
@memcpy(arena_bytes, bytes);
const arena_bytes = try arena.alloc(u8, len);
@memcpy(arena_bytes, bytes.toSlice(len, ip));
mv.* = .{ .bytes = .{
.ty = agg.ty,
.data = arena_bytes,
} };
} else {
const mut_elems = try arena.alloc(MutableValue, bytes.len);
for (bytes, mut_elems) |b, *mut_elem| {
const mut_elems = try arena.alloc(MutableValue, len);
for (bytes.toSlice(len, ip), mut_elems) |b, *mut_elem| {
mut_elem.* = .{ .interned = try ip.get(gpa, .{ .int = .{
.ty = .u8_type,
.storage = .{ .u64 = b },

View File

@ -204,26 +204,35 @@ fn printAggregate(
try writer.writeAll(" }");
return;
},
.Array => if (aggregate.storage == .bytes and aggregate.storage.bytes.len > 0) {
const skip_terminator = aggregate.storage.bytes[aggregate.storage.bytes.len - 1] == 0;
const bytes = if (skip_terminator) b: {
break :b aggregate.storage.bytes[0 .. aggregate.storage.bytes.len - 1];
} else aggregate.storage.bytes;
try writer.print("\"{}\"", .{std.zig.fmtEscapes(bytes)});
if (!is_ref) try writer.writeAll(".*");
return;
} else if (ty.arrayLen(zcu) == 0) {
if (is_ref) try writer.writeByte('&');
return writer.writeAll(".{}");
} else if (ty.arrayLen(zcu) == 1) one_byte_str: {
// The repr isn't `bytes`, but we might still be able to print this as a string
if (ty.childType(zcu).toIntern() != .u8_type) break :one_byte_str;
const elem_val = Value.fromInterned(aggregate.storage.values()[0]);
if (elem_val.isUndef(zcu)) break :one_byte_str;
const byte = elem_val.toUnsignedInt(zcu);
try writer.print("\"{}\"", .{std.zig.fmtEscapes(&.{@intCast(byte)})});
if (!is_ref) try writer.writeAll(".*");
return;
.Array => {
switch (aggregate.storage) {
.bytes => |bytes| string: {
const len = ty.arrayLenIncludingSentinel(zcu);
if (len == 0) break :string;
const slice = bytes.toSlice(if (bytes.at(len - 1, ip) == 0) len - 1 else len, ip);
try writer.print("\"{}\"", .{std.zig.fmtEscapes(slice)});
if (!is_ref) try writer.writeAll(".*");
return;
},
.elems, .repeated_elem => {},
}
switch (ty.arrayLen(zcu)) {
0 => {
if (is_ref) try writer.writeByte('&');
return writer.writeAll(".{}");
},
1 => one_byte_str: {
// The repr isn't `bytes`, but we might still be able to print this as a string
if (ty.childType(zcu).toIntern() != .u8_type) break :one_byte_str;
const elem_val = Value.fromInterned(aggregate.storage.values()[0]);
if (elem_val.isUndef(zcu)) break :one_byte_str;
const byte = elem_val.toUnsignedInt(zcu);
try writer.print("\"{}\"", .{std.zig.fmtEscapes(&.{@intCast(byte)})});
if (!is_ref) try writer.writeAll(".*");
return;
},
else => {},
}
},
.Vector => if (ty.arrayLen(zcu) == 0) {
if (is_ref) try writer.writeByte('&');

View File

@ -490,18 +490,10 @@ pub const Type = struct {
};
},
.anyframe_type => true,
.array_type => |array_type| {
if (array_type.sentinel != .none) {
return Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat);
} else {
return array_type.len > 0 and
try Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat);
}
},
.vector_type => |vector_type| {
return vector_type.len > 0 and
try Type.fromInterned(vector_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat);
},
.array_type => |array_type| return array_type.lenIncludingSentinel() > 0 and
try Type.fromInterned(array_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
.vector_type => |vector_type| return vector_type.len > 0 and
try Type.fromInterned(vector_type.child).hasRuntimeBitsAdvanced(mod, ignore_comptime_only, strat),
.opt_type => |child| {
const child_ty = Type.fromInterned(child);
if (child_ty.isNoReturn(mod)) {
@ -1240,7 +1232,7 @@ pub const Type = struct {
.anyframe_type => return AbiSizeAdvanced{ .scalar = @divExact(target.ptrBitWidth(), 8) },
.array_type => |array_type| {
const len = array_type.len + @intFromBool(array_type.sentinel != .none);
const len = array_type.lenIncludingSentinel();
if (len == 0) return .{ .scalar = 0 };
switch (try Type.fromInterned(array_type.child).abiSizeAdvanced(mod, strat)) {
.scalar => |elem_size| return .{ .scalar = len * elem_size },
@ -1577,7 +1569,7 @@ pub const Type = struct {
.anyframe_type => return target.ptrBitWidth(),
.array_type => |array_type| {
const len = array_type.len + @intFromBool(array_type.sentinel != .none);
const len = array_type.lenIncludingSentinel();
if (len == 0) return 0;
const elem_ty = Type.fromInterned(array_type.child);
const elem_size = @max(
@ -1731,7 +1723,7 @@ pub const Type = struct {
.struct_type => ip.loadStructType(ty.toIntern()).haveLayout(ip),
.union_type => ip.loadUnionType(ty.toIntern()).haveLayout(ip),
.array_type => |array_type| {
if ((array_type.len + @intFromBool(array_type.sentinel != .none)) == 0) return true;
if (array_type.lenIncludingSentinel() == 0) return true;
return Type.fromInterned(array_type.child).layoutIsResolved(mod);
},
.opt_type => |child| Type.fromInterned(child).layoutIsResolved(mod),