diff --git a/src/Air.zig b/src/Air.zig index 56f7d4cf01..b179a3c024 100644 --- a/src/Air.zig +++ b/src/Air.zig @@ -1182,7 +1182,7 @@ pub fn getMainBody(air: Air) []const Air.Inst.Index { return air.extra[extra.end..][0..extra.data.body_len]; } -pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: InternPool) Type { +pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: *const InternPool) Type { const ref_int = @enumToInt(inst); if (ref_int < InternPool.static_keys.len) { return InternPool.static_keys[ref_int].typeOf().toType(); @@ -1190,7 +1190,7 @@ pub fn typeOf(air: Air, inst: Air.Inst.Ref, ip: InternPool) Type { return air.typeOfIndex(ref_int - ref_start_index, ip); } -pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: InternPool) Type { +pub fn typeOfIndex(air: Air, inst: Air.Inst.Index, ip: *const InternPool) Type { const datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst]) { .add, @@ -1520,7 +1520,7 @@ pub fn value(air: Air, inst: Inst.Ref, mod: *Module) !?Value { const air_datas = air.instructions.items(.data); switch (air.instructions.items(.tag)[inst_index]) { .interned => return air_datas[inst_index].interned.toValue(), - else => return air.typeOfIndex(inst_index, mod.intern_pool).onePossibleValue(mod), + else => return air.typeOfIndex(inst_index, &mod.intern_pool).onePossibleValue(mod), } } @@ -1537,7 +1537,7 @@ pub fn nullTerminatedString(air: Air, index: usize) [:0]const u8 { /// because it can cause side effects. If an instruction does not need to be /// lowered, and Liveness determines its result is unused, backends should /// avoid lowering it. -pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: InternPool) bool { +pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool { const data = air.instructions.items(.data)[inst]; return switch (air.instructions.items(.tag)[inst]) { .arg, diff --git a/src/InternPool.zig b/src/InternPool.zig index 7debd2c2a3..ffd72245d5 100644 --- a/src/InternPool.zig +++ b/src/InternPool.zig @@ -2992,7 +2992,7 @@ pub fn indexToKey(ip: *const InternPool, index: Index) Key { }; } -fn indexToKeyFuncType(ip: InternPool, data: u32) Key.FuncType { +fn indexToKeyFuncType(ip: *const InternPool, data: u32) Key.FuncType { const type_function = ip.extraDataTrail(TypeFunction, data); const param_types = @ptrCast( []Index, @@ -3015,7 +3015,7 @@ fn indexToKeyFuncType(ip: InternPool, data: u32) Key.FuncType { }; } -fn indexToKeyEnum(ip: InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { +fn indexToKeyEnum(ip: *const InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key { const enum_explicit = ip.extraDataTrail(EnumExplicit, data); const names = @ptrCast( []const NullTerminatedString, @@ -3038,7 +3038,7 @@ fn indexToKeyEnum(ip: InternPool, data: u32, tag_mode: Key.EnumType.TagMode) Key } }; } -fn indexToKeyBigInt(ip: InternPool, limb_index: u32, positive: bool) Key { +fn indexToKeyBigInt(ip: *const InternPool, limb_index: u32, positive: bool) Key { const int_info = ip.limbData(Int, limb_index); return .{ .int = .{ .ty = int_info.ty, @@ -4351,7 +4351,7 @@ fn addLimbsAssumeCapacity(ip: *InternPool, limbs: []const Limb) void { } } -fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: T, end: usize } { +fn extraDataTrail(ip: *const InternPool, comptime T: type, index: usize) struct { data: T, end: usize } { var result: T = undefined; const fields = @typeInfo(T).Struct.fields; inline for (fields, 0..) |field, i| { @@ -4384,12 +4384,12 @@ fn extraDataTrail(ip: InternPool, comptime T: type, index: usize) struct { data: }; } -fn extraData(ip: InternPool, comptime T: type, index: usize) T { +fn extraData(ip: *const InternPool, comptime T: type, index: usize) T { return extraDataTrail(ip, T, index).data; } /// Asserts the struct has 32-bit fields and the number of fields is evenly divisible by 2. -fn limbData(ip: InternPool, comptime T: type, index: usize) T { +fn limbData(ip: *const InternPool, comptime T: type, index: usize) T { switch (@sizeOf(Limb)) { @sizeOf(u32) => return extraData(ip, T, index), @sizeOf(u64) => {}, @@ -4413,7 +4413,7 @@ fn limbData(ip: InternPool, comptime T: type, index: usize) T { } /// This function returns the Limb slice that is trailing data after a payload. -fn limbSlice(ip: InternPool, comptime S: type, limb_index: u32, len: u32) []const Limb { +fn limbSlice(ip: *const InternPool, comptime S: type, limb_index: u32, len: u32) []const Limb { const field_count = @typeInfo(S).Struct.fields.len; switch (@sizeOf(Limb)) { @sizeOf(u32) => { @@ -4433,7 +4433,7 @@ const LimbsAsIndexes = struct { len: u32, }; -fn limbsSliceToIndex(ip: InternPool, limbs: []const Limb) LimbsAsIndexes { +fn limbsSliceToIndex(ip: *const InternPool, limbs: []const Limb) LimbsAsIndexes { const host_slice = switch (@sizeOf(Limb)) { @sizeOf(u32) => ip.extra.items, @sizeOf(u64) => ip.limbs.items, @@ -4447,7 +4447,7 @@ fn limbsSliceToIndex(ip: InternPool, limbs: []const Limb) LimbsAsIndexes { } /// This function converts Limb array indexes to a primitive slice type. -fn limbsIndexToSlice(ip: InternPool, limbs: LimbsAsIndexes) []const Limb { +fn limbsIndexToSlice(ip: *const InternPool, limbs: LimbsAsIndexes) []const Limb { return switch (@sizeOf(Limb)) { @sizeOf(u32) => ip.extra.items[limbs.start..][0..limbs.len], @sizeOf(u64) => ip.limbs.items[limbs.start..][0..limbs.len], @@ -4485,7 +4485,7 @@ test "basic usage" { try std.testing.expect(another_array_i32 == array_i32); } -pub fn childType(ip: InternPool, i: Index) Index { +pub fn childType(ip: *const InternPool, i: Index) Index { return switch (ip.indexToKey(i)) { .ptr_type => |ptr_type| ptr_type.elem_type, .vector_type => |vector_type| vector_type.child, @@ -4496,7 +4496,7 @@ pub fn childType(ip: InternPool, i: Index) Index { } /// Given a slice type, returns the type of the ptr field. -pub fn slicePtrType(ip: InternPool, i: Index) Index { +pub fn slicePtrType(ip: *const InternPool, i: Index) Index { switch (i) { .slice_const_u8_type => return .manyptr_const_u8_type, .slice_const_u8_sentinel_0_type => return .manyptr_const_u8_sentinel_0_type, @@ -4510,7 +4510,7 @@ pub fn slicePtrType(ip: InternPool, i: Index) Index { } /// Given a slice value, returns the value of the ptr field. -pub fn slicePtr(ip: InternPool, i: Index) Index { +pub fn slicePtr(ip: *const InternPool, i: Index) Index { const item = ip.items.get(@enumToInt(i)); switch (item.tag) { .ptr_slice => return ip.extraData(PtrSlice, item.data).ptr, @@ -4519,7 +4519,7 @@ pub fn slicePtr(ip: InternPool, i: Index) Index { } /// Given a slice value, returns the value of the len field. -pub fn sliceLen(ip: InternPool, i: Index) Index { +pub fn sliceLen(ip: *const InternPool, i: Index) Index { const item = ip.items.get(@enumToInt(i)); switch (item.tag) { .ptr_slice => return ip.extraData(PtrSlice, item.data).len, @@ -4702,7 +4702,7 @@ pub fn getCoercedInts(ip: *InternPool, gpa: Allocator, int: Key.Int, new_ty: Ind } }); } -pub fn indexToStructType(ip: InternPool, val: Index) Module.Struct.OptionalIndex { +pub fn indexToStructType(ip: *const InternPool, val: Index) Module.Struct.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); if (tags[@enumToInt(val)] != .type_struct) return .none; @@ -4710,7 +4710,7 @@ pub fn indexToStructType(ip: InternPool, val: Index) Module.Struct.OptionalIndex return @intToEnum(Module.Struct.Index, datas[@enumToInt(val)]).toOptional(); } -pub fn indexToUnionType(ip: InternPool, val: Index) Module.Union.OptionalIndex { +pub fn indexToUnionType(ip: *const InternPool, val: Index) Module.Union.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); switch (tags[@enumToInt(val)]) { @@ -4721,7 +4721,7 @@ pub fn indexToUnionType(ip: InternPool, val: Index) Module.Union.OptionalIndex { return @intToEnum(Module.Union.Index, datas[@enumToInt(val)]).toOptional(); } -pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType { +pub fn indexToFuncType(ip: *const InternPool, val: Index) ?Key.FuncType { assert(val != .none); const tags = ip.items.items(.tag); const datas = ip.items.items(.data); @@ -4731,7 +4731,7 @@ pub fn indexToFuncType(ip: InternPool, val: Index) ?Key.FuncType { } } -pub fn indexToFunc(ip: InternPool, val: Index) Module.Fn.OptionalIndex { +pub fn indexToFunc(ip: *const InternPool, val: Index) Module.Fn.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); if (tags[@enumToInt(val)] != .func) return .none; @@ -4739,7 +4739,7 @@ pub fn indexToFunc(ip: InternPool, val: Index) Module.Fn.OptionalIndex { return ip.extraData(Key.Func, datas[@enumToInt(val)]).index.toOptional(); } -pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { +pub fn indexToInferredErrorSetType(ip: *const InternPool, val: Index) Module.Fn.InferredErrorSet.OptionalIndex { assert(val != .none); const tags = ip.items.items(.tag); if (tags[@enumToInt(val)] != .type_inferred_error_set) return .none; @@ -4748,7 +4748,7 @@ pub fn indexToInferredErrorSetType(ip: InternPool, val: Index) Module.Fn.Inferre } /// includes .comptime_int_type -pub fn isIntegerType(ip: InternPool, ty: Index) bool { +pub fn isIntegerType(ip: *const InternPool, ty: Index) bool { return switch (ty) { .usize_type, .isize_type, @@ -4769,7 +4769,7 @@ pub fn isIntegerType(ip: InternPool, ty: Index) bool { } /// does not include .enum_literal_type -pub fn isEnumType(ip: InternPool, ty: Index) bool { +pub fn isEnumType(ip: *const InternPool, ty: Index) bool { return switch (ty) { .atomic_order_type, .atomic_rmw_op_type, @@ -4783,35 +4783,35 @@ pub fn isEnumType(ip: InternPool, ty: Index) bool { }; } -pub fn isFunctionType(ip: InternPool, ty: Index) bool { +pub fn isFunctionType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .func_type; } -pub fn isPointerType(ip: InternPool, ty: Index) bool { +pub fn isPointerType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .ptr_type; } -pub fn isOptionalType(ip: InternPool, ty: Index) bool { +pub fn isOptionalType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .opt_type; } /// includes .inferred_error_set_type -pub fn isErrorSetType(ip: InternPool, ty: Index) bool { +pub fn isErrorSetType(ip: *const InternPool, ty: Index) bool { return ty == .anyerror_type or switch (ip.indexToKey(ty)) { .error_set_type, .inferred_error_set_type => true, else => false, }; } -pub fn isInferredErrorSetType(ip: InternPool, ty: Index) bool { +pub fn isInferredErrorSetType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .inferred_error_set_type; } -pub fn isErrorUnionType(ip: InternPool, ty: Index) bool { +pub fn isErrorUnionType(ip: *const InternPool, ty: Index) bool { return ip.indexToKey(ty) == .error_union_type; } -pub fn isAggregateType(ip: InternPool, ty: Index) bool { +pub fn isAggregateType(ip: *const InternPool, ty: Index) bool { return switch (ip.indexToKey(ty)) { .array_type, .vector_type, .anon_struct_type, .struct_type => true, else => false, @@ -4827,11 +4827,11 @@ pub fn mutateVarInit(ip: *InternPool, index: Index, init_index: Index) void { ip.extra.items[ip.items.items(.data)[@enumToInt(index)] + field_index] = @enumToInt(init_index); } -pub fn dump(ip: InternPool) void { +pub fn dump(ip: *const InternPool) void { dumpFallible(ip, std.heap.page_allocator) catch return; } -fn dumpFallible(ip: InternPool, arena: Allocator) anyerror!void { +fn dumpFallible(ip: *const InternPool, arena: Allocator) anyerror!void { const items_size = (1 + 4) * ip.items.len; const extra_size = 4 * ip.extra.items.len; const limbs_size = 8 * ip.limbs.items.len; @@ -5023,11 +5023,11 @@ pub fn structPtr(ip: *InternPool, index: Module.Struct.Index) *Module.Struct { return ip.allocated_structs.at(@enumToInt(index)); } -pub fn structPtrConst(ip: InternPool, index: Module.Struct.Index) *const Module.Struct { +pub fn structPtrConst(ip: *const InternPool, index: Module.Struct.Index) *const Module.Struct { return ip.allocated_structs.at(@enumToInt(index)); } -pub fn structPtrUnwrapConst(ip: InternPool, index: Module.Struct.OptionalIndex) ?*const Module.Struct { +pub fn structPtrUnwrapConst(ip: *const InternPool, index: Module.Struct.OptionalIndex) ?*const Module.Struct { return structPtrConst(ip, index.unwrap() orelse return null); } @@ -5035,7 +5035,7 @@ pub fn unionPtr(ip: *InternPool, index: Module.Union.Index) *Module.Union { return ip.allocated_unions.at(@enumToInt(index)); } -pub fn unionPtrConst(ip: InternPool, index: Module.Union.Index) *const Module.Union { +pub fn unionPtrConst(ip: *const InternPool, index: Module.Union.Index) *const Module.Union { return ip.allocated_unions.at(@enumToInt(index)); } @@ -5043,7 +5043,7 @@ pub fn funcPtr(ip: *InternPool, index: Module.Fn.Index) *Module.Fn { return ip.allocated_funcs.at(@enumToInt(index)); } -pub fn funcPtrConst(ip: InternPool, index: Module.Fn.Index) *const Module.Fn { +pub fn funcPtrConst(ip: *const InternPool, index: Module.Fn.Index) *const Module.Fn { return ip.allocated_funcs.at(@enumToInt(index)); } @@ -5051,7 +5051,7 @@ pub fn inferredErrorSetPtr(ip: *InternPool, index: Module.Fn.InferredErrorSet.In return ip.allocated_inferred_error_sets.at(@enumToInt(index)); } -pub fn inferredErrorSetPtrConst(ip: InternPool, index: Module.Fn.InferredErrorSet.Index) *const Module.Fn.InferredErrorSet { +pub fn inferredErrorSetPtrConst(ip: *const InternPool, index: Module.Fn.InferredErrorSet.Index) *const Module.Fn.InferredErrorSet { return ip.allocated_inferred_error_sets.at(@enumToInt(index)); } @@ -5182,7 +5182,7 @@ pub fn getString(ip: *InternPool, s: []const u8) OptionalNullTerminatedString { } } -pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 { +pub fn stringToSlice(ip: *const InternPool, s: NullTerminatedString) [:0]const u8 { const string_bytes = ip.string_bytes.items; const start = @enumToInt(s); var end: usize = start; @@ -5190,11 +5190,11 @@ pub fn stringToSlice(ip: InternPool, s: NullTerminatedString) [:0]const u8 { return string_bytes[start..end :0]; } -pub fn stringToSliceUnwrap(ip: InternPool, s: OptionalNullTerminatedString) ?[:0]const u8 { +pub fn stringToSliceUnwrap(ip: *const InternPool, s: OptionalNullTerminatedString) ?[:0]const u8 { return ip.stringToSlice(s.unwrap() orelse return null); } -pub fn typeOf(ip: InternPool, index: Index) Index { +pub fn typeOf(ip: *const InternPool, index: Index) Index { // This optimization of static keys is required so that typeOf can be called // on static keys that haven't been added yet during static key initialization. // An alternative would be to topological sort the static keys, but this would @@ -5382,12 +5382,12 @@ pub fn typeOf(ip: InternPool, index: Index) Index { } /// Assumes that the enum's field indexes equal its value tags. -pub fn toEnum(ip: InternPool, comptime E: type, i: Index) E { +pub fn toEnum(ip: *const InternPool, comptime E: type, i: Index) E { const int = ip.indexToKey(i).enum_tag.int; return @intToEnum(E, ip.indexToKey(int).int.storage.u64); } -pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 { +pub fn aggregateTypeLen(ip: *const InternPool, ty: Index) u64 { return switch (ip.indexToKey(ty)) { .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(), .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, @@ -5397,7 +5397,7 @@ pub fn aggregateTypeLen(ip: InternPool, ty: Index) u64 { }; } -pub fn aggregateTypeLenIncludingSentinel(ip: InternPool, ty: Index) u64 { +pub fn aggregateTypeLenIncludingSentinel(ip: *const InternPool, ty: Index) u64 { return switch (ip.indexToKey(ty)) { .struct_type => |struct_type| ip.structPtrConst(struct_type.index.unwrap() orelse return 0).fields.count(), .anon_struct_type => |anon_struct_type| anon_struct_type.types.len, @@ -5407,7 +5407,7 @@ pub fn aggregateTypeLenIncludingSentinel(ip: InternPool, ty: Index) u64 { }; } -pub fn isNoReturn(ip: InternPool, ty: Index) bool { +pub fn isNoReturn(ip: *const InternPool, ty: Index) bool { return switch (ty) { .noreturn_type => true, else => switch (ip.indexToKey(ty)) { @@ -5420,7 +5420,7 @@ pub fn isNoReturn(ip: InternPool, ty: Index) bool { /// This is a particularly hot function, so we operate directly on encodings /// rather than the more straightforward implementation of calling `indexToKey`. -pub fn zigTypeTagOrPoison(ip: InternPool, index: Index) error{GenericPoison}!std.builtin.TypeId { +pub fn zigTypeTagOrPoison(ip: *const InternPool, index: Index) error{GenericPoison}!std.builtin.TypeId { return switch (index) { .u1_type, .u8_type, diff --git a/src/Liveness.zig b/src/Liveness.zig index 4f3d87d3c2..b12b638208 100644 --- a/src/Liveness.zig +++ b/src/Liveness.zig @@ -225,7 +225,7 @@ pub fn categorizeOperand( air: Air, inst: Air.Inst.Index, operand: Air.Inst.Index, - ip: InternPool, + ip: *const InternPool, ) OperandCategory { const air_tags = air.instructions.items(.tag); const air_datas = air.instructions.items(.data); @@ -1139,7 +1139,7 @@ fn analyzeInst( .aggregate_init => { const ty_pl = inst_datas[inst].ty_pl; const aggregate_ty = a.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLenIp(ip.*)); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, a.air.extra[ty_pl.payload..][0..len]); if (elements.len <= bpi - 1) { @@ -1291,7 +1291,7 @@ fn analyzeOperands( // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (!immediate_death or a.air.mustLower(inst, ip.*)) { + if (!immediate_death or a.air.mustLower(inst, ip)) { // Note that it's important we iterate over the operands backwards, so that if a dying // operand is used multiple times we mark its last use as its death. var i = operands.len; @@ -1837,7 +1837,7 @@ fn AnalyzeBigOperands(comptime pass: LivenessPass) type { // If our result is unused and the instruction doesn't need to be lowered, backends will // skip the lowering of this instruction, so we don't want to record uses of operands. // That way, we can mark as many instructions as possible unused. - if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip.*)) return; + if (big.will_die_immediately and !big.a.air.mustLower(big.inst, ip)) return; const extra_byte = (big.operands_remaining - (bpi - 1)) / 31; const extra_bit = @intCast(u5, big.operands_remaining - (bpi - 1) - extra_byte * 31); diff --git a/src/Liveness/Verify.zig b/src/Liveness/Verify.zig index e8b024eb6f..a5fc592894 100644 --- a/src/Liveness/Verify.zig +++ b/src/Liveness/Verify.zig @@ -32,7 +32,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { const tag = self.air.instructions.items(.tag); const data = self.air.instructions.items(.data); for (body) |inst| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) { + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) { // This instruction will not be lowered and should be ignored. continue; } @@ -325,7 +325,7 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void { .aggregate_init => { const ty_pl = data[inst].ty_pl; const aggregate_ty = self.air.getRefType(ty_pl.ty); - const len = @intCast(usize, aggregate_ty.arrayLenIp(ip.*)); + const len = @intCast(usize, aggregate_ty.arrayLenIp(ip)); const elements = @ptrCast([]const Air.Inst.Ref, self.air.extra[ty_pl.payload..][0..len]); var bt = self.liveness.iterateBigTomb(inst); diff --git a/src/Module.zig b/src/Module.zig index ffc6a95fe1..b1a74932d3 100644 --- a/src/Module.zig +++ b/src/Module.zig @@ -6726,7 +6726,7 @@ pub fn manyConstPtrType(mod: *Module, child_type: Type) Allocator.Error!Type { } pub fn adjustPtrTypeChild(mod: *Module, ptr_ty: Type, new_child: Type) Allocator.Error!Type { - const info = Type.ptrInfoIp(mod.intern_pool, ptr_ty.toIntern()); + const info = Type.ptrInfoIp(&mod.intern_pool, ptr_ty.toIntern()); return mod.ptrType(.{ .elem_type = new_child.toIntern(), diff --git a/src/Sema.zig b/src/Sema.zig index 8836e89528..b4e07d749e 100644 --- a/src/Sema.zig +++ b/src/Sema.zig @@ -33624,7 +33624,7 @@ pub fn typeHasOnePossibleValue(sema: *Sema, ty: Type) CompileError!?Value { /// Returns the type of the AIR instruction. fn typeOf(sema: *Sema, inst: Air.Inst.Ref) Type { - return sema.getTmpAir().typeOf(inst, sema.mod.intern_pool); + return sema.getTmpAir().typeOf(inst, &sema.mod.intern_pool); } pub fn getTmpAir(sema: Sema) Air { diff --git a/src/arch/aarch64/CodeGen.zig b/src/arch/aarch64/CodeGen.zig index 5874440e50..d01a93dd0d 100644 --- a/src/arch/aarch64/CodeGen.zig +++ b/src/arch/aarch64/CodeGen.zig @@ -660,7 +660,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; @@ -6412,10 +6412,10 @@ fn registerAlias(self: *Self, reg: Register, ty: Type) Register { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/arm/CodeGen.zig b/src/arch/arm/CodeGen.zig index 360f52cb30..69a156999b 100644 --- a/src/arch/arm/CodeGen.zig +++ b/src/arch/arm/CodeGen.zig @@ -644,7 +644,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; @@ -6317,10 +6317,10 @@ fn parseRegName(name: []const u8) ?Register { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/riscv64/CodeGen.zig b/src/arch/riscv64/CodeGen.zig index 5417650dd5..809c388532 100644 --- a/src/arch/riscv64/CodeGen.zig +++ b/src/arch/riscv64/CodeGen.zig @@ -478,7 +478,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; @@ -2737,10 +2737,10 @@ fn parseRegName(name: []const u8) ?Register { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/sparc64/CodeGen.zig b/src/arch/sparc64/CodeGen.zig index 3bcdd5ad25..fde5424ddc 100644 --- a/src/arch/sparc64/CodeGen.zig +++ b/src/arch/sparc64/CodeGen.zig @@ -498,7 +498,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { for (body) |inst| { // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const old_air_bookkeeping = self.air_bookkeeping; @@ -4883,10 +4883,10 @@ fn wantSafety(self: *Self) bool { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/wasm/CodeGen.zig b/src/arch/wasm/CodeGen.zig index af2b37312d..e397cf29f8 100644 --- a/src/arch/wasm/CodeGen.zig +++ b/src/arch/wasm/CodeGen.zig @@ -2076,7 +2076,7 @@ fn genBody(func: *CodeGen, body: []const Air.Inst.Index) InnerError!void { const ip = &mod.intern_pool; for (body) |inst| { - if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip.*)) { + if (func.liveness.isUnused(inst) and !func.air.mustLower(inst, ip)) { continue; } const old_bookkeeping_value = func.air_bookkeeping; @@ -7436,10 +7436,10 @@ fn airFrameAddress(func: *CodeGen, inst: Air.Inst.Index) InnerError!void { fn typeOf(func: *CodeGen, inst: Air.Inst.Ref) Type { const mod = func.bin_file.base.options.module.?; - return func.air.typeOf(inst, mod.intern_pool); + return func.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(func: *CodeGen, inst: Air.Inst.Index) Type { const mod = func.bin_file.base.options.module.?; - return func.air.typeOfIndex(inst, mod.intern_pool); + return func.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/arch/x86_64/CodeGen.zig b/src/arch/x86_64/CodeGen.zig index dbb3d977b8..b9cc3f7052 100644 --- a/src/arch/x86_64/CodeGen.zig +++ b/src/arch/x86_64/CodeGen.zig @@ -1738,7 +1738,7 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void { try self.mir_to_air_map.put(self.gpa, mir_inst, inst); } - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) continue; + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; wip_mir_log.debug("{}", .{self.fmtAir(inst)}); verbose_tracking_log.debug("{}", .{self.fmtTracking()}); @@ -11992,10 +11992,10 @@ fn hasAllFeatures(self: *Self, features: anytype) bool { fn typeOf(self: *Self, inst: Air.Inst.Ref) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *Self, inst: Air.Inst.Index) Type { const mod = self.bin_file.options.module.?; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } diff --git a/src/codegen/c.zig b/src/codegen/c.zig index d705d6143e..0db223c6b6 100644 --- a/src/codegen/c.zig +++ b/src/codegen/c.zig @@ -489,12 +489,12 @@ pub const Function = struct { fn typeOf(f: *Function, inst: Air.Inst.Ref) Type { const mod = f.object.dg.module; - return f.air.typeOf(inst, mod.intern_pool); + return f.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(f: *Function, inst: Air.Inst.Index) Type { const mod = f.object.dg.module; - return f.air.typeOfIndex(inst, mod.intern_pool); + return f.air.typeOfIndex(inst, &mod.intern_pool); } }; @@ -2808,7 +2808,7 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) error{ AnalysisFail, const air_tags = f.air.instructions.items(.tag); for (body) |inst| { - if (f.liveness.isUnused(inst) and !f.air.mustLower(inst, ip.*)) + if (f.liveness.isUnused(inst) and !f.air.mustLower(inst, ip)) continue; const result_value = switch (air_tags[inst]) { diff --git a/src/codegen/llvm.zig b/src/codegen/llvm.zig index 606c57b187..8cf6a51ba1 100644 --- a/src/codegen/llvm.zig +++ b/src/codegen/llvm.zig @@ -1574,7 +1574,7 @@ pub const Object = struct { }, .Pointer => { // Normalize everything that the debug info does not represent. - const ptr_info = Type.ptrInfoIp(mod.intern_pool, ty.toIntern()); + const ptr_info = Type.ptrInfoIp(&mod.intern_pool, ty.toIntern()); if (ptr_info.sentinel != .none or ptr_info.address_space != .generic or @@ -4330,7 +4330,7 @@ pub const FuncGen = struct { const ip = &mod.intern_pool; const air_tags = self.air.instructions.items(.tag); for (body, 0..) |inst, i| { - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) continue; const opt_value: ?*llvm.Value = switch (air_tags[inst]) { @@ -8055,7 +8055,7 @@ pub const FuncGen = struct { const mod = fg.dg.module; const ip = &mod.intern_pool; for (body_tail[1..]) |body_inst| { - switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip.*)) { + switch (fg.liveness.categorizeOperand(fg.air, body_inst, body_tail[0], ip)) { .none => continue, .write, .noret, .complex => return false, .tomb => return true, @@ -9920,12 +9920,12 @@ pub const FuncGen = struct { fn typeOf(fg: *FuncGen, inst: Air.Inst.Ref) Type { const mod = fg.dg.module; - return fg.air.typeOf(inst, mod.intern_pool); + return fg.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(fg: *FuncGen, inst: Air.Inst.Index) Type { const mod = fg.dg.module; - return fg.air.typeOfIndex(inst, mod.intern_pool); + return fg.air.typeOfIndex(inst, &mod.intern_pool); } }; diff --git a/src/codegen/spirv.zig b/src/codegen/spirv.zig index 0fbcb47f71..ddd7f36435 100644 --- a/src/codegen/spirv.zig +++ b/src/codegen/spirv.zig @@ -1688,7 +1688,7 @@ pub const DeclGen = struct { const mod = self.module; const ip = &mod.intern_pool; // TODO: remove now-redundant isUnused calls from AIR handler functions - if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip.*)) + if (self.liveness.isUnused(inst) and !self.air.mustLower(inst, ip)) return; const air_tags = self.air.instructions.items(.tag); @@ -3339,11 +3339,11 @@ pub const DeclGen = struct { fn typeOf(self: *DeclGen, inst: Air.Inst.Ref) Type { const mod = self.module; - return self.air.typeOf(inst, mod.intern_pool); + return self.air.typeOf(inst, &mod.intern_pool); } fn typeOfIndex(self: *DeclGen, inst: Air.Inst.Index) Type { const mod = self.module; - return self.air.typeOfIndex(inst, mod.intern_pool); + return self.air.typeOfIndex(inst, &mod.intern_pool); } }; diff --git a/src/print_air.zig b/src/print_air.zig index be7bc9610d..8da80e1360 100644 --- a/src/print_air.zig +++ b/src/print_air.zig @@ -978,6 +978,6 @@ const Writer = struct { fn typeOfIndex(w: *Writer, inst: Air.Inst.Index) Type { const mod = w.module; - return w.air.typeOfIndex(inst, mod.intern_pool); + return w.air.typeOfIndex(inst, &mod.intern_pool); } }; diff --git a/src/type.zig b/src/type.zig index f285caff95..fc7821b50b 100644 --- a/src/type.zig +++ b/src/type.zig @@ -102,7 +102,7 @@ pub const Type = struct { }; } - pub fn ptrInfoIp(ip: InternPool, ty: InternPool.Index) InternPool.Key.PtrType { + pub fn ptrInfoIp(ip: *const InternPool, ty: InternPool.Index) InternPool.Key.PtrType { return switch (ip.indexToKey(ty)) { .ptr_type => |p| p, .opt_type => |child| switch (ip.indexToKey(child)) { @@ -114,7 +114,7 @@ pub const Type = struct { } pub fn ptrInfo(ty: Type, mod: *const Module) Payload.Pointer.Data { - return Payload.Pointer.Data.fromKey(ptrInfoIp(mod.intern_pool, ty.toIntern())); + return Payload.Pointer.Data.fromKey(ptrInfoIp(&mod.intern_pool, ty.toIntern())); } pub fn eql(a: Type, b: Type, mod: *const Module) bool { @@ -1832,10 +1832,10 @@ pub const Type = struct { } pub fn isVolatilePtr(ty: Type, mod: *const Module) bool { - return isVolatilePtrIp(ty, mod.intern_pool); + return isVolatilePtrIp(ty, &mod.intern_pool); } - pub fn isVolatilePtrIp(ty: Type, ip: InternPool) bool { + pub fn isVolatilePtrIp(ty: Type, ip: *const InternPool) bool { return switch (ip.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| ptr_type.is_volatile, else => false, @@ -1920,10 +1920,10 @@ pub const Type = struct { /// For *T, returns T. /// For [*]T, returns T. pub fn childType(ty: Type, mod: *const Module) Type { - return childTypeIp(ty, mod.intern_pool); + return childTypeIp(ty, &mod.intern_pool); } - pub fn childTypeIp(ty: Type, ip: InternPool) Type { + pub fn childTypeIp(ty: Type, ip: *const InternPool) Type { return ip.childType(ty.toIntern()).toType(); } @@ -2164,10 +2164,10 @@ pub const Type = struct { /// Asserts the type is an array or vector or struct. pub fn arrayLen(ty: Type, mod: *const Module) u64 { - return arrayLenIp(ty, mod.intern_pool); + return arrayLenIp(ty, &mod.intern_pool); } - pub fn arrayLenIp(ty: Type, ip: InternPool) u64 { + pub fn arrayLenIp(ty: Type, ip: *const InternPool) u64 { return switch (ip.indexToKey(ty.toIntern())) { .vector_type => |vector_type| vector_type.len, .array_type => |array_type| array_type.len, @@ -2385,10 +2385,10 @@ pub const Type = struct { /// Asserts the type is a function or a function pointer. pub fn fnReturnType(ty: Type, mod: *Module) Type { - return fnReturnTypeIp(ty, mod.intern_pool); + return fnReturnTypeIp(ty, &mod.intern_pool); } - pub fn fnReturnTypeIp(ty: Type, ip: InternPool) Type { + pub fn fnReturnTypeIp(ty: Type, ip: *const InternPool) Type { return switch (ip.indexToKey(ty.toIntern())) { .ptr_type => |ptr_type| ip.indexToKey(ptr_type.elem_type).func_type.return_type, .func_type => |func_type| func_type.return_type,