compiler: spring cleaning

I started this diff trying to remove a little dead code from the C
backend, but ended up finding a bunch of dead code sprinkled all over
the place:

* `packed` handling in the C backend which was made dead by `Legalize`
* Representation of pointers to runtime-known vector indices
* Handling for the `vector_store_elem` AIR instruction (now removed)
* Old tuple handling from when they used the InternPool repr of structs
* Straightforward unused functions
* TODOs in the LLVM backend for features which Zig just does not support
This commit is contained in:
Matthew Lugg 2025-10-30 09:20:04 +00:00
parent 9a7d28fe58
commit c091e27aac
No known key found for this signature in database
GPG Key ID: 3F5B7DCCBF4AF02E
21 changed files with 212 additions and 2444 deletions

View File

@ -874,10 +874,6 @@ pub const Inst = struct {
/// Uses the `ty_pl` field. /// Uses the `ty_pl` field.
save_err_return_trace_index, save_err_return_trace_index,
/// Store an element to a vector pointer at an index.
/// Uses the `vector_store_elem` field.
vector_store_elem,
/// Compute a pointer to a `Nav` at runtime, always one of: /// Compute a pointer to a `Nav` at runtime, always one of:
/// ///
/// * `threadlocal var` /// * `threadlocal var`
@ -1220,11 +1216,6 @@ pub const Inst = struct {
operand: Ref, operand: Ref,
operation: std.builtin.ReduceOp, operation: std.builtin.ReduceOp,
}, },
vector_store_elem: struct {
vector_ptr: Ref,
// Index into a different array.
payload: u32,
},
ty_nav: struct { ty_nav: struct {
ty: InternPool.Index, ty: InternPool.Index,
nav: InternPool.Nav.Index, nav: InternPool.Nav.Index,
@ -1689,7 +1680,6 @@ pub fn typeOfIndex(air: *const Air, inst: Air.Inst.Index, ip: *const InternPool)
.set_union_tag, .set_union_tag,
.prefetch, .prefetch,
.set_err_return_trace, .set_err_return_trace,
.vector_store_elem,
.c_va_end, .c_va_end,
=> return .void, => return .void,
@ -1857,7 +1847,6 @@ pub fn mustLower(air: Air, inst: Air.Inst.Index, ip: *const InternPool) bool {
.prefetch, .prefetch,
.wasm_memory_grow, .wasm_memory_grow,
.set_err_return_trace, .set_err_return_trace,
.vector_store_elem,
.c_va_arg, .c_va_arg,
.c_va_copy, .c_va_copy,
.c_va_end, .c_va_end,

View File

@ -463,12 +463,6 @@ fn analyzeInst(
return analyzeOperands(a, pass, data, inst, .{ o.lhs, o.rhs, .none }); return analyzeOperands(a, pass, data, inst, .{ o.lhs, o.rhs, .none });
}, },
.vector_store_elem => {
const o = inst_datas[@intFromEnum(inst)].vector_store_elem;
const extra = a.air.extraData(Air.Bin, o.payload).data;
return analyzeOperands(a, pass, data, inst, .{ o.vector_ptr, extra.lhs, extra.rhs });
},
.arg, .arg,
.alloc, .alloc,
.ret_ptr, .ret_ptr,

View File

@ -322,11 +322,6 @@ fn verifyBody(self: *Verify, body: []const Air.Inst.Index) Error!void {
const extra = self.air.extraData(Air.Bin, pl_op.payload).data; const extra = self.air.extraData(Air.Bin, pl_op.payload).data;
try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, pl_op.operand }); try self.verifyInstOperands(inst, .{ extra.lhs, extra.rhs, pl_op.operand });
}, },
.vector_store_elem => {
const vector_store_elem = data[@intFromEnum(inst)].vector_store_elem;
const extra = self.air.extraData(Air.Bin, vector_store_elem.payload).data;
try self.verifyInstOperands(inst, .{ vector_store_elem.vector_ptr, extra.lhs, extra.rhs });
},
.cmpxchg_strong, .cmpxchg_strong,
.cmpxchg_weak, .cmpxchg_weak,
=> { => {

View File

@ -330,7 +330,6 @@ const Writer = struct {
.shuffle_two => try w.writeShuffleTwo(s, inst), .shuffle_two => try w.writeShuffleTwo(s, inst),
.reduce, .reduce_optimized => try w.writeReduce(s, inst), .reduce, .reduce_optimized => try w.writeReduce(s, inst),
.cmp_vector, .cmp_vector_optimized => try w.writeCmpVector(s, inst), .cmp_vector, .cmp_vector_optimized => try w.writeCmpVector(s, inst),
.vector_store_elem => try w.writeVectorStoreElem(s, inst),
.runtime_nav_ptr => try w.writeRuntimeNavPtr(s, inst), .runtime_nav_ptr => try w.writeRuntimeNavPtr(s, inst),
.work_item_id, .work_item_id,
@ -576,17 +575,6 @@ const Writer = struct {
try w.writeOperand(s, inst, 1, extra.rhs); try w.writeOperand(s, inst, 1, extra.rhs);
} }
fn writeVectorStoreElem(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const data = w.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem;
const extra = w.air.extraData(Air.VectorCmp, data.payload).data;
try w.writeOperand(s, inst, 0, data.vector_ptr);
try s.writeAll(", ");
try w.writeOperand(s, inst, 1, extra.lhs);
try s.writeAll(", ");
try w.writeOperand(s, inst, 2, extra.rhs);
}
fn writeRuntimeNavPtr(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void { fn writeRuntimeNavPtr(w: *Writer, s: *std.Io.Writer, inst: Air.Inst.Index) Error!void {
const ip = &w.pt.zcu.intern_pool; const ip = &w.pt.zcu.intern_pool;
const ty_nav = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_nav; const ty_nav = w.air.instructions.items(.data)[@intFromEnum(inst)].ty_nav;

View File

@ -316,13 +316,6 @@ fn checkBody(air: Air, body: []const Air.Inst.Index, zcu: *Zcu) bool {
if (!checkRef(data.prefetch.ptr, zcu)) return false; if (!checkRef(data.prefetch.ptr, zcu)) return false;
}, },
.vector_store_elem => {
const bin = air.extraData(Air.Bin, data.vector_store_elem.payload).data;
if (!checkRef(data.vector_store_elem.vector_ptr, zcu)) return false;
if (!checkRef(bin.lhs, zcu)) return false;
if (!checkRef(bin.rhs, zcu)) return false;
},
.runtime_nav_ptr => { .runtime_nav_ptr => {
if (!checkType(.fromInterned(data.ty_nav.ty), zcu)) return false; if (!checkType(.fromInterned(data.ty_nav.ty), zcu)) return false;
}, },

View File

@ -2104,7 +2104,6 @@ pub const Key = union(enum) {
pub const VectorIndex = enum(u16) { pub const VectorIndex = enum(u16) {
none = std.math.maxInt(u16), none = std.math.maxInt(u16),
runtime = std.math.maxInt(u16) - 1,
_, _,
}; };
@ -3739,10 +3738,8 @@ pub const LoadedStructType = struct {
return s.field_inits.get(ip)[i]; return s.field_inits.get(ip)[i];
} }
/// Returns `none` in the case the struct is a tuple. pub fn fieldName(s: LoadedStructType, ip: *const InternPool, i: usize) NullTerminatedString {
pub fn fieldName(s: LoadedStructType, ip: *const InternPool, i: usize) OptionalNullTerminatedString { return s.field_names.get(ip)[i];
if (s.field_names.len == 0) return .none;
return s.field_names.get(ip)[i].toOptional();
} }
pub fn fieldIsComptime(s: LoadedStructType, ip: *const InternPool, i: usize) bool { pub fn fieldIsComptime(s: LoadedStructType, ip: *const InternPool, i: usize) bool {

View File

@ -15919,26 +15919,27 @@ fn zirOverflowArithmetic(
}, },
.mul_with_overflow => { .mul_with_overflow => {
// If either of the arguments is zero, the result is zero and no overflow occured. // If either of the arguments is zero, the result is zero and no overflow occured.
// If either of the arguments is one, the result is the other and no overflow occured.
// Otherwise, if either of the arguments is undefined, both results are undefined.
const scalar_one = try pt.intValue(dest_ty.scalarType(zcu), 1);
if (maybe_lhs_val) |lhs_val| { if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(zcu)) { if (!lhs_val.isUndef(zcu) and try lhs_val.compareAllWithZeroSema(.eq, pt)) {
if (try lhs_val.compareAllWithZeroSema(.eq, pt)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
} else if (try sema.compareAll(lhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) {
break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = rhs };
}
} }
} }
if (maybe_rhs_val) |rhs_val| { if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef(zcu)) { if (!rhs_val.isUndef(zcu) and try rhs_val.compareAllWithZeroSema(.eq, pt)) {
if (try rhs_val.compareAllWithZeroSema(.eq, pt)) { break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = rhs };
break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = rhs }; }
} else if (try sema.compareAll(rhs_val, .eq, try sema.splat(dest_ty, scalar_one), dest_ty)) { }
break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs }; // If either of the arguments is one, the result is the other and no overflow occured.
} const scalar_one = try pt.intValue(dest_ty.scalarType(zcu), 1);
const vec_one = try sema.splat(dest_ty, scalar_one);
if (maybe_lhs_val) |lhs_val| {
if (!lhs_val.isUndef(zcu) and try sema.compareAll(lhs_val, .eq, vec_one, dest_ty)) {
break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = rhs };
}
}
if (maybe_rhs_val) |rhs_val| {
if (!rhs_val.isUndef(zcu) and try sema.compareAll(rhs_val, .eq, vec_one, dest_ty)) {
break :result .{ .overflow_bit = try sema.splat(overflow_ty, .zero_u1), .inst = lhs };
} }
} }
@ -15947,7 +15948,6 @@ fn zirOverflowArithmetic(
if (lhs_val.isUndef(zcu) or rhs_val.isUndef(zcu)) { if (lhs_val.isUndef(zcu) or rhs_val.isUndef(zcu)) {
break :result .{ .overflow_bit = .undef, .wrapped = .undef }; break :result .{ .overflow_bit = .undef, .wrapped = .undef };
} }
const result = try arith.mulWithOverflow(sema, dest_ty, lhs_val, rhs_val); const result = try arith.mulWithOverflow(sema, dest_ty, lhs_val, rhs_val);
break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result }; break :result .{ .overflow_bit = result.overflow_bit, .wrapped = result.wrapped_result };
} }
@ -17751,10 +17751,7 @@ fn zirTypeInfo(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Ai
try ty.resolveStructFieldInits(pt); try ty.resolveStructFieldInits(pt);
for (struct_field_vals, 0..) |*field_val, field_index| { for (struct_field_vals, 0..) |*field_val, field_index| {
const field_name = if (struct_type.fieldName(ip, field_index).unwrap()) |field_name| const field_name = struct_type.fieldName(ip, field_index);
field_name
else
try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
const field_name_len = field_name.length(ip); const field_name_len = field_name.length(ip);
const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]); const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]);
const field_init = struct_type.fieldInit(ip, field_index); const field_init = struct_type.fieldInit(ip, field_index);
@ -28347,6 +28344,10 @@ fn elemPtrArray(
break :o index; break :o index;
} else null; } else null;
if (offset == null and array_ty.zigTypeTag(zcu) == .vector) {
return sema.fail(block, elem_index_src, "vector index not comptime known", .{});
}
const elem_ptr_ty = try array_ptr_ty.elemPtrType(offset, pt); const elem_ptr_ty = try array_ptr_ty.elemPtrType(offset, pt);
if (maybe_undef_array_ptr_val) |array_ptr_val| { if (maybe_undef_array_ptr_val) |array_ptr_val| {
@ -28364,10 +28365,6 @@ fn elemPtrArray(
try sema.validateRuntimeValue(block, array_ptr_src, array_ptr); try sema.validateRuntimeValue(block, array_ptr_src, array_ptr);
} }
if (offset == null and array_ty.zigTypeTag(zcu) == .vector) {
return sema.fail(block, elem_index_src, "vector index not comptime known", .{});
}
// Runtime check is only needed if unable to comptime check. // Runtime check is only needed if unable to comptime check.
if (oob_safety and block.wantSafety() and offset == null) { if (oob_safety and block.wantSafety() and offset == null) {
const len_inst = try pt.intRef(.usize, array_len); const len_inst = try pt.intRef(.usize, array_len);
@ -30399,22 +30396,6 @@ fn storePtr2(
const is_ret = air_tag == .ret_ptr; const is_ret = air_tag == .ret_ptr;
// Detect if we are storing an array operand to a bitcasted vector pointer.
// If so, we instead reach through the bitcasted pointer to the vector pointer,
// bitcast the array operand to a vector, and then lower this as a store of
// a vector value to a vector pointer. This generally results in better code,
// as well as working around an LLVM bug:
// https://github.com/ziglang/zig/issues/11154
if (sema.obtainBitCastedVectorPtr(ptr)) |vector_ptr| {
const vector_ty = sema.typeOf(vector_ptr).childType(zcu);
const vector = sema.coerceExtra(block, vector_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) {
error.NotCoercible => unreachable,
else => |e| return e,
};
try sema.storePtr2(block, src, vector_ptr, ptr_src, vector, operand_src, .store);
return;
}
const operand = sema.coerceExtra(block, elem_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) { const operand = sema.coerceExtra(block, elem_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) {
error.NotCoercible => unreachable, error.NotCoercible => unreachable,
else => |e| return e, else => |e| return e,
@ -30447,29 +30428,6 @@ fn storePtr2(
try sema.requireRuntimeBlock(block, src, runtime_src); try sema.requireRuntimeBlock(block, src, runtime_src);
if (ptr_ty.ptrInfo(zcu).flags.vector_index == .runtime) {
const ptr_inst = ptr.toIndex().?;
const air_tags = sema.air_instructions.items(.tag);
if (air_tags[@intFromEnum(ptr_inst)] == .ptr_elem_ptr) {
const ty_pl = sema.air_instructions.items(.data)[@intFromEnum(ptr_inst)].ty_pl;
const bin_op = sema.getTmpAir().extraData(Air.Bin, ty_pl.payload).data;
_ = try block.addInst(.{
.tag = .vector_store_elem,
.data = .{ .vector_store_elem = .{
.vector_ptr = bin_op.lhs,
.payload = try block.sema.addExtra(Air.Bin{
.lhs = bin_op.rhs,
.rhs = operand,
}),
} },
});
return;
}
return sema.fail(block, ptr_src, "unable to determine vector element index of type '{f}'", .{
ptr_ty.fmt(pt),
});
}
const store_inst = if (is_ret) const store_inst = if (is_ret)
try block.addBinOp(.store, ptr, operand) try block.addBinOp(.store, ptr, operand)
else else
@ -30569,37 +30527,6 @@ fn markMaybeComptimeAllocRuntime(sema: *Sema, block: *Block, alloc_inst: Air.Ins
} }
} }
/// Traverse an arbitrary number of bitcasted pointers and return the underyling vector
/// pointer. Only if the final element type matches the vector element type, and the
/// lengths match.
fn obtainBitCastedVectorPtr(sema: *Sema, ptr: Air.Inst.Ref) ?Air.Inst.Ref {
const pt = sema.pt;
const zcu = pt.zcu;
const array_ty = sema.typeOf(ptr).childType(zcu);
if (array_ty.zigTypeTag(zcu) != .array) return null;
var ptr_ref = ptr;
var ptr_inst = ptr_ref.toIndex() orelse return null;
const air_datas = sema.air_instructions.items(.data);
const air_tags = sema.air_instructions.items(.tag);
const vector_ty = while (air_tags[@intFromEnum(ptr_inst)] == .bitcast) {
ptr_ref = air_datas[@intFromEnum(ptr_inst)].ty_op.operand;
if (!sema.isKnownZigType(ptr_ref, .pointer)) return null;
const child_ty = sema.typeOf(ptr_ref).childType(zcu);
if (child_ty.zigTypeTag(zcu) == .vector) break child_ty;
ptr_inst = ptr_ref.toIndex() orelse return null;
} else return null;
// We have a pointer-to-array and a pointer-to-vector. If the elements and
// lengths match, return the result.
if (array_ty.childType(zcu).eql(vector_ty.childType(zcu), zcu) and
array_ty.arrayLen(zcu) == vector_ty.vectorLen(zcu))
{
return ptr_ref;
} else {
return null;
}
}
/// Call when you have Value objects rather than Air instructions, and you want to /// Call when you have Value objects rather than Air instructions, and you want to
/// assert the store must be done at comptime. /// assert the store must be done at comptime.
fn storePtrVal( fn storePtrVal(
@ -35579,8 +35506,13 @@ fn structFieldInits(
const default_val = try sema.resolveConstValue(&block_scope, init_src, coerced, null); const default_val = try sema.resolveConstValue(&block_scope, init_src, coerced, null);
if (default_val.canMutateComptimeVarState(zcu)) { if (default_val.canMutateComptimeVarState(zcu)) {
const field_name = struct_type.fieldName(ip, field_i).unwrap().?; return sema.failWithContainsReferenceToComptimeVar(
return sema.failWithContainsReferenceToComptimeVar(&block_scope, init_src, field_name, "field default value", default_val); &block_scope,
init_src,
struct_type.fieldName(ip, field_i),
"field default value",
default_val,
);
} }
struct_type.field_inits.get(ip)[field_i] = default_val.toIntern(); struct_type.field_inits.get(ip)[field_i] = default_val.toIntern();
} }

View File

@ -24,7 +24,6 @@ pub fn loadComptimePtr(sema: *Sema, block: *Block, src: LazySrcLoc, ptr: Value)
const child_bits = Type.fromInterned(ptr_info.child).bitSize(zcu); const child_bits = Type.fromInterned(ptr_info.child).bitSize(zcu);
const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) { const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
.none => 0, .none => 0,
.runtime => return .runtime_load,
else => |idx| switch (pt.zcu.getTarget().cpu.arch.endian()) { else => |idx| switch (pt.zcu.getTarget().cpu.arch.endian()) {
.little => child_bits * @intFromEnum(idx), .little => child_bits * @intFromEnum(idx),
.big => host_bits - child_bits * (@intFromEnum(idx) + 1), // element order reversed on big endian .big => host_bits - child_bits * (@intFromEnum(idx) + 1), // element order reversed on big endian
@ -81,7 +80,6 @@ pub fn storeComptimePtr(
}; };
const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) { const bit_offset = ptr_info.packed_offset.bit_offset + switch (ptr_info.flags.vector_index) {
.none => 0, .none => 0,
.runtime => return .runtime_store,
else => |idx| switch (zcu.getTarget().cpu.arch.endian()) { else => |idx| switch (zcu.getTarget().cpu.arch.endian()) {
.little => Type.fromInterned(ptr_info.child).bitSize(zcu) * @intFromEnum(idx), .little => Type.fromInterned(ptr_info.child).bitSize(zcu) * @intFromEnum(idx),
.big => host_bits - Type.fromInterned(ptr_info.child).bitSize(zcu) * (@intFromEnum(idx) + 1), // element order reversed on big endian .big => host_bits - Type.fromInterned(ptr_info.child).bitSize(zcu) * (@intFromEnum(idx) + 1), // element order reversed on big endian

View File

@ -198,9 +198,7 @@ pub fn print(ty: Type, writer: *std.Io.Writer, pt: Zcu.PerThread) std.Io.Writer.
info.packed_offset.bit_offset, info.packed_offset.host_size, info.packed_offset.bit_offset, info.packed_offset.host_size,
}); });
} }
if (info.flags.vector_index == .runtime) { if (info.flags.vector_index != .none) {
try writer.writeAll(":?");
} else if (info.flags.vector_index != .none) {
try writer.print(":{d}", .{@intFromEnum(info.flags.vector_index)}); try writer.print(":{d}", .{@intFromEnum(info.flags.vector_index)});
} }
try writer.writeAll(") "); try writer.writeAll(") ");
@ -3113,7 +3111,7 @@ pub fn enumTagFieldIndex(ty: Type, enum_tag: Value, zcu: *const Zcu) ?u32 {
pub fn structFieldName(ty: Type, index: usize, zcu: *const Zcu) InternPool.OptionalNullTerminatedString { pub fn structFieldName(ty: Type, index: usize, zcu: *const Zcu) InternPool.OptionalNullTerminatedString {
const ip = &zcu.intern_pool; const ip = &zcu.intern_pool;
return switch (ip.indexToKey(ty.toIntern())) { return switch (ip.indexToKey(ty.toIntern())) {
.struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index), .struct_type => ip.loadStructType(ty.toIntern()).fieldName(ip, index).toOptional(),
.tuple_type => .none, .tuple_type => .none,
else => unreachable, else => unreachable,
}; };
@ -3985,7 +3983,7 @@ pub fn elemPtrType(ptr_ty: Type, offset: ?usize, pt: Zcu.PerThread) !Type {
break :blk .{ break :blk .{
.host_size = @intCast(parent_ty.arrayLen(zcu)), .host_size = @intCast(parent_ty.arrayLen(zcu)),
.alignment = parent_ty.abiAlignment(zcu), .alignment = parent_ty.abiAlignment(zcu),
.vector_index = if (offset) |some| @enumFromInt(some) else .runtime, .vector_index = @enumFromInt(offset.?),
}; };
} else .{}; } else .{};

View File

@ -574,166 +574,37 @@ pub fn writeToPackedMemory(
} }
} }
/// Load a Value from the contents of `buffer`. /// Load a Value from the contents of `buffer`, where `ty` is an unsigned integer type.
/// ///
/// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past /// Asserts that buffer.len >= ty.abiSize(). The buffer is allowed to extend past
/// the end of the value in memory. /// the end of the value in memory.
pub fn readFromMemory( pub fn readUintFromMemory(
ty: Type, ty: Type,
pt: Zcu.PerThread, pt: Zcu.PerThread,
buffer: []const u8, buffer: []const u8,
arena: Allocator, arena: Allocator,
) error{ ) Allocator.Error!Value {
IllDefinedMemoryLayout,
Unimplemented,
OutOfMemory,
}!Value {
const zcu = pt.zcu; const zcu = pt.zcu;
const ip = &zcu.intern_pool; const endian = zcu.getTarget().cpu.arch.endian();
const target = zcu.getTarget();
const endian = target.cpu.arch.endian();
switch (ty.zigTypeTag(zcu)) {
.void => return Value.void,
.bool => {
if (buffer[0] == 0) {
return Value.false;
} else {
return Value.true;
}
},
.int, .@"enum" => |ty_tag| {
const int_ty = switch (ty_tag) {
.int => ty,
.@"enum" => ty.intTagType(zcu),
else => unreachable,
};
const int_info = int_ty.intInfo(zcu);
const bits = int_info.bits;
const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8);
if (bits == 0 or buffer.len == 0) return zcu.getCoerced(try zcu.intValue(int_ty, 0), ty);
if (bits <= 64) switch (int_info.signedness) { // Fast path for integers <= u64 assert(ty.isUnsignedInt(zcu));
.signed => { const bits = ty.intInfo(zcu).bits;
const val = std.mem.readVarInt(i64, buffer[0..byte_count], endian); const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8);
const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits));
return zcu.getCoerced(try zcu.intValue(int_ty, result), ty);
},
.unsigned => {
const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian);
const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits));
return zcu.getCoerced(try zcu.intValue(int_ty, result), ty);
},
} else { // Slow path, we have to construct a big-int
const Limb = std.math.big.Limb;
const limb_count = (byte_count + @sizeOf(Limb) - 1) / @sizeOf(Limb);
const limbs_buffer = try arena.alloc(Limb, limb_count);
var bigint = BigIntMutable.init(limbs_buffer, 0); assert(buffer.len >= byte_count);
bigint.readTwosComplement(buffer[0..byte_count], bits, endian, int_info.signedness);
return zcu.getCoerced(try zcu.intValue_big(int_ty, bigint.toConst()), ty);
}
},
.float => return Value.fromInterned(try pt.intern(.{ .float = .{
.ty = ty.toIntern(),
.storage = switch (ty.floatBits(target)) {
16 => .{ .f16 = @bitCast(std.mem.readInt(u16, buffer[0..2], endian)) },
32 => .{ .f32 = @bitCast(std.mem.readInt(u32, buffer[0..4], endian)) },
64 => .{ .f64 = @bitCast(std.mem.readInt(u64, buffer[0..8], endian)) },
80 => .{ .f80 = @bitCast(std.mem.readInt(u80, buffer[0..10], endian)) },
128 => .{ .f128 = @bitCast(std.mem.readInt(u128, buffer[0..16], endian)) },
else => unreachable,
},
} })),
.array => {
const elem_ty = ty.childType(zcu);
const elem_size = elem_ty.abiSize(zcu);
const elems = try arena.alloc(InternPool.Index, @intCast(ty.arrayLen(zcu)));
var offset: usize = 0;
for (elems) |*elem| {
elem.* = (try readFromMemory(elem_ty, zcu, buffer[offset..], arena)).toIntern();
offset += @intCast(elem_size);
}
return pt.aggregateValue(ty, elems);
},
.vector => {
// We use byte_count instead of abi_size here, so that any padding bytes
// follow the data bytes, on both big- and little-endian systems.
const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
return readFromPackedMemory(ty, zcu, buffer[0..byte_count], 0, arena);
},
.@"struct" => {
const struct_type = zcu.typeToStruct(ty).?;
switch (struct_type.layout) {
.auto => unreachable, // Sema is supposed to have emitted a compile error already
.@"extern" => {
const field_types = struct_type.field_types;
const field_vals = try arena.alloc(InternPool.Index, field_types.len);
for (field_vals, 0..) |*field_val, i| {
const field_ty = Type.fromInterned(field_types.get(ip)[i]);
const off: usize = @intCast(ty.structFieldOffset(i, zcu));
const sz: usize = @intCast(field_ty.abiSize(zcu));
field_val.* = (try readFromMemory(field_ty, zcu, buffer[off..(off + sz)], arena)).toIntern();
}
return pt.aggregateValue(ty, field_vals);
},
.@"packed" => {
const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
return readFromPackedMemory(ty, zcu, buffer[0..byte_count], 0, arena);
},
}
},
.error_set => {
const bits = zcu.errorSetBits();
const byte_count: u16 = @intCast((@as(u17, bits) + 7) / 8);
const int = std.mem.readVarInt(u64, buffer[0..byte_count], endian);
const index = (int << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits));
const name = zcu.global_error_set.keys()[@intCast(index)];
return Value.fromInterned(try pt.intern(.{ .err = .{ if (bits <= 64) {
.ty = ty.toIntern(), const val = std.mem.readVarInt(u64, buffer[0..byte_count], endian);
.name = name, const result = (val << @as(u6, @intCast(64 - bits))) >> @as(u6, @intCast(64 - bits));
} })); return pt.intValue(ty, result);
}, } else {
.@"union" => switch (ty.containerLayout(zcu)) { const Limb = std.math.big.Limb;
.auto => return error.IllDefinedMemoryLayout, const limb_count = (byte_count + @sizeOf(Limb) - 1) / @sizeOf(Limb);
.@"extern" => { const limbs_buffer = try arena.alloc(Limb, limb_count);
const union_size = ty.abiSize(zcu);
const array_ty = try zcu.arrayType(.{ .len = union_size, .child = .u8_type }); var bigint: BigIntMutable = .init(limbs_buffer, 0);
const val = (try readFromMemory(array_ty, zcu, buffer, arena)).toIntern(); bigint.readTwosComplement(buffer[0..byte_count], bits, endian, .unsigned);
return Value.fromInterned(try pt.internUnion(.{ return pt.intValue_big(ty, bigint.toConst());
.ty = ty.toIntern(),
.tag = .none,
.val = val,
}));
},
.@"packed" => {
const byte_count = (@as(usize, @intCast(ty.bitSize(zcu))) + 7) / 8;
return readFromPackedMemory(ty, zcu, buffer[0..byte_count], 0, arena);
},
},
.pointer => {
assert(!ty.isSlice(zcu)); // No well defined layout.
const int_val = try readFromMemory(Type.usize, zcu, buffer, arena);
return Value.fromInterned(try pt.intern(.{ .ptr = .{
.ty = ty.toIntern(),
.base_addr = .int,
.byte_offset = int_val.toUnsignedInt(zcu),
} }));
},
.optional => {
assert(ty.isPtrLikeOptional(zcu));
const child_ty = ty.optionalChild(zcu);
const child_val = try readFromMemory(child_ty, zcu, buffer, arena);
return Value.fromInterned(try pt.intern(.{ .opt = .{
.ty = ty.toIntern(),
.val = switch (child_val.orderAgainstZero(pt)) {
.lt => unreachable,
.eq => .none,
.gt => child_val.toIntern(),
},
} }));
},
else => return error.Unimplemented,
} }
} }

View File

@ -3512,7 +3512,6 @@ pub fn ptrType(pt: Zcu.PerThread, info: InternPool.Key.PtrType) Allocator.Error!
canon_info.packed_offset.host_size = 0; canon_info.packed_offset.host_size = 0;
} }
}, },
.runtime => {},
_ => assert(@intFromEnum(info.flags.vector_index) < info.packed_offset.host_size), _ => assert(@intFromEnum(info.flags.vector_index) < info.packed_offset.host_size),
} }
@ -3663,21 +3662,40 @@ pub fn intRef(pt: Zcu.PerThread, ty: Type, x: anytype) Allocator.Error!Air.Inst.
} }
pub fn intValue_big(pt: Zcu.PerThread, ty: Type, x: BigIntConst) Allocator.Error!Value { pub fn intValue_big(pt: Zcu.PerThread, ty: Type, x: BigIntConst) Allocator.Error!Value {
return Value.fromInterned(try pt.intern(.{ .int = .{ if (ty.toIntern() != .comptime_int_type) {
const int_info = ty.intInfo(pt.zcu);
assert(x.fitsInTwosComp(int_info.signedness, int_info.bits));
}
return .fromInterned(try pt.intern(.{ .int = .{
.ty = ty.toIntern(), .ty = ty.toIntern(),
.storage = .{ .big_int = x }, .storage = .{ .big_int = x },
} })); } }));
} }
pub fn intValue_u64(pt: Zcu.PerThread, ty: Type, x: u64) Allocator.Error!Value { pub fn intValue_u64(pt: Zcu.PerThread, ty: Type, x: u64) Allocator.Error!Value {
return Value.fromInterned(try pt.intern(.{ .int = .{ if (ty.toIntern() != .comptime_int_type and x != 0) {
const int_info = ty.intInfo(pt.zcu);
const unsigned_bits = int_info.bits - @intFromBool(int_info.signedness == .signed);
assert(unsigned_bits >= std.math.log2(x) + 1);
}
return .fromInterned(try pt.intern(.{ .int = .{
.ty = ty.toIntern(), .ty = ty.toIntern(),
.storage = .{ .u64 = x }, .storage = .{ .u64 = x },
} })); } }));
} }
pub fn intValue_i64(pt: Zcu.PerThread, ty: Type, x: i64) Allocator.Error!Value { pub fn intValue_i64(pt: Zcu.PerThread, ty: Type, x: i64) Allocator.Error!Value {
return Value.fromInterned(try pt.intern(.{ .int = .{ if (ty.toIntern() != .comptime_int_type and x != 0) {
const int_info = ty.intInfo(pt.zcu);
const unsigned_bits = int_info.bits - @intFromBool(int_info.signedness == .signed);
if (x > 0) {
assert(unsigned_bits >= std.math.log2(x) + 1);
} else {
assert(int_info.signedness == .signed);
assert(unsigned_bits >= std.math.log2_int_ceil(u64, @abs(x)));
}
}
return .fromInterned(try pt.intern(.{ .int = .{
.ty = ty.toIntern(), .ty = ty.toIntern(),
.storage = .{ .i64 = x }, .storage = .{ .i64 = x },
} })); } }));

View File

@ -826,18 +826,6 @@ pub fn analyze(isel: *Select, air_body: []const Air.Inst.Index) !void {
try isel.analyzeUse(un_op); try isel.analyzeUse(un_op);
air_body_index += 1;
air_inst_index = air_body[air_body_index];
continue :air_tag air_tags[@intFromEnum(air_inst_index)];
},
.vector_store_elem => {
const vector_store_elem = air_data[@intFromEnum(air_inst_index)].vector_store_elem;
const bin_op = isel.air.extraData(Air.Bin, vector_store_elem.payload).data;
try isel.analyzeUse(vector_store_elem.vector_ptr);
try isel.analyzeUse(bin_op.lhs);
try isel.analyzeUse(bin_op.rhs);
air_body_index += 1; air_body_index += 1;
air_inst_index = air_body[air_body_index]; air_inst_index = air_body[air_body_index];
continue :air_tag air_tags[@intFromEnum(air_inst_index)]; continue :air_tag air_tags[@intFromEnum(air_inst_index)];

View File

@ -37,6 +37,7 @@ pub fn legalizeFeatures(_: *const std.Target) ?*const Air.Legalize.Features {
.expand_packed_load = true, .expand_packed_load = true,
.expand_packed_store = true, .expand_packed_store = true,
.expand_packed_struct_field_val = true, .expand_packed_struct_field_val = true,
.expand_packed_aggregate_init = true,
}), }),
}; };
} }
@ -1392,114 +1393,21 @@ pub const DeclGen = struct {
try w.writeByte('}'); try w.writeByte('}');
}, },
.@"packed" => { .@"packed" => {
const int_info = ty.intInfo(zcu); // https://github.com/ziglang/zig/issues/24657 will eliminate most of the
// following logic, leaving only the recursive `renderValue` call. Once
const bits = Type.smallestUnsignedBits(int_info.bits - 1); // that proposal is implemented, a `packed struct` will literally be
const bit_offset_ty = try pt.intType(.unsigned, bits); // represented in the InternPool by its comptime-known backing integer.
var arena: std.heap.ArenaAllocator = .init(zcu.gpa);
var bit_offset: u64 = 0; defer arena.deinit();
var eff_num_fields: usize = 0; const backing_ty: Type = .fromInterned(loaded_struct.backingIntTypeUnordered(ip));
const buf = try arena.allocator().alloc(u8, @intCast(ty.abiSize(zcu)));
for (0..loaded_struct.field_types.len) |field_index| { val.writeToMemory(pt, buf) catch |err| switch (err) {
const field_ty: Type = .fromInterned(loaded_struct.field_types.get(ip)[field_index]); error.IllDefinedMemoryLayout => unreachable,
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; error.OutOfMemory => |e| return e,
eff_num_fields += 1; error.ReinterpretDeclRef, error.Unimplemented => return dg.fail("TODO: C backend: lower packed struct value", .{}),
} };
const backing_val: Value = try .readUintFromMemory(backing_ty, pt, buf, arena.allocator());
if (eff_num_fields == 0) { return dg.renderValue(w, backing_val, location);
try w.writeByte('(');
try dg.renderUndefValue(w, ty, location);
try w.writeByte(')');
} else if (ty.bitSize(zcu) > 64) {
// zig_or_u128(zig_or_u128(zig_shl_u128(a, a_off), zig_shl_u128(b, b_off)), zig_shl_u128(c, c_off))
var num_or = eff_num_fields - 1;
while (num_or > 0) : (num_or -= 1) {
try w.writeAll("zig_or_");
try dg.renderTypeForBuiltinFnName(w, ty);
try w.writeByte('(');
}
var eff_index: usize = 0;
var needs_closing_paren = false;
for (0..loaded_struct.field_types.len) |field_index| {
const field_ty: Type = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| try pt.intern(.{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes.at(field_index, ip) },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
const cast_context = IntCastContext{ .value = .{ .value = Value.fromInterned(field_val) } };
if (bit_offset != 0) {
try w.writeAll("zig_shl_");
try dg.renderTypeForBuiltinFnName(w, ty);
try w.writeByte('(');
try dg.renderIntCast(w, ty, cast_context, field_ty, .FunctionArgument);
try w.writeAll(", ");
try dg.renderValue(w, try pt.intValue(bit_offset_ty, bit_offset), .FunctionArgument);
try w.writeByte(')');
} else {
try dg.renderIntCast(w, ty, cast_context, field_ty, .FunctionArgument);
}
if (needs_closing_paren) try w.writeByte(')');
if (eff_index != eff_num_fields - 1) try w.writeAll(", ");
bit_offset += field_ty.bitSize(zcu);
needs_closing_paren = true;
eff_index += 1;
}
} else {
try w.writeByte('(');
// a << a_off | b << b_off | c << c_off
var empty = true;
for (0..loaded_struct.field_types.len) |field_index| {
const field_ty: Type = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (!empty) try w.writeAll(" | ");
try w.writeByte('(');
try dg.renderCType(w, ctype);
try w.writeByte(')');
const field_val = switch (ip.indexToKey(val.toIntern()).aggregate.storage) {
.bytes => |bytes| try pt.intern(.{ .int = .{
.ty = field_ty.toIntern(),
.storage = .{ .u64 = bytes.at(field_index, ip) },
} }),
.elems => |elems| elems[field_index],
.repeated_elem => |elem| elem,
};
const field_int_info: std.builtin.Type.Int = if (field_ty.isAbiInt(zcu))
field_ty.intInfo(zcu)
else
.{ .signedness = .unsigned, .bits = undefined };
switch (field_int_info.signedness) {
.signed => {
try w.writeByte('(');
try dg.renderValue(w, Value.fromInterned(field_val), .Other);
try w.writeAll(" & ");
const field_uint_ty = try pt.intType(.unsigned, field_int_info.bits);
try dg.renderValue(w, try field_uint_ty.maxIntScalar(pt, field_uint_ty), .Other);
try w.writeByte(')');
},
.unsigned => try dg.renderValue(w, Value.fromInterned(field_val), .Other),
}
if (bit_offset != 0) {
try w.writeAll(" << ");
try dg.renderValue(w, try pt.intValue(bit_offset_ty, bit_offset), .FunctionArgument);
}
bit_offset += field_ty.bitSize(zcu);
empty = false;
}
try w.writeByte(')');
}
}, },
} }
}, },
@ -1507,33 +1415,38 @@ pub const DeclGen = struct {
}, },
.un => |un| { .un => |un| {
const loaded_union = ip.loadUnionType(ty.toIntern()); const loaded_union = ip.loadUnionType(ty.toIntern());
if (loaded_union.flagsUnordered(ip).layout == .@"packed") {
// https://github.com/ziglang/zig/issues/24657 will eliminate most of the
// following logic, leaving only the recursive `renderValue` call. Once
// that proposal is implemented, a `packed union` will literally be
// represented in the InternPool by its comptime-known backing integer.
var arena: std.heap.ArenaAllocator = .init(zcu.gpa);
defer arena.deinit();
const backing_ty = try ty.unionBackingType(pt);
const buf = try arena.allocator().alloc(u8, @intCast(ty.abiSize(zcu)));
val.writeToMemory(pt, buf) catch |err| switch (err) {
error.IllDefinedMemoryLayout => unreachable,
error.OutOfMemory => |e| return e,
error.ReinterpretDeclRef, error.Unimplemented => return dg.fail("TODO: C backend: lower packed union value", .{}),
};
const backing_val: Value = try .readUintFromMemory(backing_ty, pt, buf, arena.allocator());
return dg.renderValue(w, backing_val, location);
}
if (un.tag == .none) { if (un.tag == .none) {
const backing_ty = try ty.unionBackingType(pt); const backing_ty = try ty.unionBackingType(pt);
switch (loaded_union.flagsUnordered(ip).layout) { assert(loaded_union.flagsUnordered(ip).layout == .@"extern");
.@"packed" => { if (location == .StaticInitializer) {
if (!location.isInitializer()) { return dg.fail("TODO: C backend: implement extern union backing type rendering in static initializers", .{});
try w.writeByte('(');
try dg.renderType(w, backing_ty);
try w.writeByte(')');
}
try dg.renderValue(w, Value.fromInterned(un.val), location);
},
.@"extern" => {
if (location == .StaticInitializer) {
return dg.fail("TODO: C backend: implement extern union backing type rendering in static initializers", .{});
}
const ptr_ty = try pt.singleConstPtrType(ty);
try w.writeAll("*((");
try dg.renderType(w, ptr_ty);
try w.writeAll(")(");
try dg.renderType(w, backing_ty);
try w.writeAll("){");
try dg.renderValue(w, Value.fromInterned(un.val), location);
try w.writeAll("})");
},
else => unreachable,
} }
const ptr_ty = try pt.singleConstPtrType(ty);
try w.writeAll("*((");
try dg.renderType(w, ptr_ty);
try w.writeAll(")(");
try dg.renderType(w, backing_ty);
try w.writeAll("){");
try dg.renderValue(w, Value.fromInterned(un.val), location);
try w.writeAll("})");
} else { } else {
if (!location.isInitializer()) { if (!location.isInitializer()) {
try w.writeByte('('); try w.writeByte('(');
@ -1544,21 +1457,6 @@ pub const DeclGen = struct {
const field_index = zcu.unionTagFieldIndex(loaded_union, Value.fromInterned(un.tag)).?; const field_index = zcu.unionTagFieldIndex(loaded_union, Value.fromInterned(un.tag)).?;
const field_ty: Type = .fromInterned(loaded_union.field_types.get(ip)[field_index]); const field_ty: Type = .fromInterned(loaded_union.field_types.get(ip)[field_index]);
const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index]; const field_name = loaded_union.loadTagType(ip).names.get(ip)[field_index];
if (loaded_union.flagsUnordered(ip).layout == .@"packed") {
if (field_ty.hasRuntimeBits(zcu)) {
if (field_ty.isPtrAtRuntime(zcu)) {
try w.writeByte('(');
try dg.renderCType(w, ctype);
try w.writeByte(')');
} else if (field_ty.zigTypeTag(zcu) == .float) {
try w.writeByte('(');
try dg.renderCType(w, ctype);
try w.writeByte(')');
}
try dg.renderValue(w, Value.fromInterned(un.val), location);
} else try w.writeByte('0');
return;
}
const has_tag = loaded_union.hasTag(ip); const has_tag = loaded_union.hasTag(ip);
if (has_tag) try w.writeByte('{'); if (has_tag) try w.writeByte('{');
@ -1745,9 +1643,11 @@ pub const DeclGen = struct {
} }
return w.writeByte('}'); return w.writeByte('}');
}, },
.@"packed" => return w.print("{f}", .{ .@"packed" => return dg.renderUndefValue(
try dg.fmtIntLiteralHex(try pt.undefValue(ty), .Other), w,
}), .fromInterned(loaded_struct.backingIntTypeUnordered(ip)),
location,
),
} }
}, },
.tuple_type => |tuple_info| { .tuple_type => |tuple_info| {
@ -1815,9 +1715,11 @@ pub const DeclGen = struct {
} }
if (has_tag) try w.writeByte('}'); if (has_tag) try w.writeByte('}');
}, },
.@"packed" => return w.print("{f}", .{ .@"packed" => return dg.renderUndefValue(
try dg.fmtIntLiteralHex(try pt.undefValue(ty), .Other), w,
}), try ty.unionBackingType(pt),
location,
),
} }
}, },
.error_union_type => |error_union_type| switch (ctype.info(ctype_pool)) { .error_union_type => |error_union_type| switch (ctype.info(ctype_pool)) {
@ -2445,10 +2347,7 @@ pub const DeclGen = struct {
const ty = val.typeOf(zcu); const ty = val.typeOf(zcu);
return .{ .data = .{ return .{ .data = .{
.dg = dg, .dg = dg,
.int_info = if (ty.zigTypeTag(zcu) == .@"union" and ty.containerLayout(zcu) == .@"packed") .int_info = ty.intInfo(zcu),
.{ .signedness = .unsigned, .bits = @intCast(ty.bitSize(zcu)) }
else
ty.intInfo(zcu),
.kind = kind, .kind = kind,
.ctype = try dg.ctypeFromType(ty, kind), .ctype = try dg.ctypeFromType(ty, kind),
.val = val, .val = val,
@ -3656,7 +3555,6 @@ fn genBodyInner(f: *Function, body: []const Air.Inst.Index) Error!void {
.is_named_enum_value => return f.fail("TODO: C backend: implement is_named_enum_value", .{}), .is_named_enum_value => return f.fail("TODO: C backend: implement is_named_enum_value", .{}),
.error_set_has_value => return f.fail("TODO: C backend: implement error_set_has_value", .{}), .error_set_has_value => return f.fail("TODO: C backend: implement error_set_has_value", .{}),
.vector_store_elem => return f.fail("TODO: C backend: implement vector_store_elem", .{}),
.runtime_nav_ptr => try airRuntimeNavPtr(f, inst), .runtime_nav_ptr => try airRuntimeNavPtr(f, inst),
@ -3956,6 +3854,10 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
const ptr_info = ptr_scalar_ty.ptrInfo(zcu); const ptr_info = ptr_scalar_ty.ptrInfo(zcu);
const src_ty: Type = .fromInterned(ptr_info.child); const src_ty: Type = .fromInterned(ptr_info.child);
// `Air.Legalize.Feature.expand_packed_load` should ensure that the only
// bit-pointers we see here are vector element pointers.
assert(ptr_info.packed_offset.host_size == 0 or ptr_info.flags.vector_index != .none);
if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) { if (!src_ty.hasRuntimeBitsIgnoreComptime(zcu)) {
try reap(f, inst, &.{ty_op.operand}); try reap(f, inst, &.{ty_op.operand});
return .none; return .none;
@ -3987,40 +3889,6 @@ fn airLoad(f: *Function, inst: Air.Inst.Index) !CValue {
try w.writeAll(", sizeof("); try w.writeAll(", sizeof(");
try f.renderType(w, src_ty); try f.renderType(w, src_ty);
try w.writeAll("))"); try w.writeAll("))");
} else if (ptr_info.packed_offset.host_size > 0 and ptr_info.flags.vector_index == .none) {
const host_bits: u16 = ptr_info.packed_offset.host_size * 8;
const host_ty = try pt.intType(.unsigned, host_bits);
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
const field_ty = try pt.intType(.unsigned, @as(u16, @intCast(src_ty.bitSize(zcu))));
try f.writeCValue(w, local, .Other);
try v.elem(f, w);
try w.writeAll(" = (");
try f.renderType(w, src_ty);
try w.writeAll(")zig_wrap_");
try f.object.dg.renderTypeForBuiltinFnName(w, field_ty);
try w.writeAll("((");
try f.renderType(w, field_ty);
try w.writeByte(')');
const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64;
if (cant_cast) {
if (field_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try w.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(w, host_ty);
try w.writeByte('(');
}
try w.writeAll("zig_shr_");
try f.object.dg.renderTypeForBuiltinFnName(w, host_ty);
try w.writeByte('(');
try f.writeCValueDeref(w, operand);
try v.elem(f, w);
try w.print(", {f})", .{try f.fmtIntLiteralDec(bit_offset_val)});
if (cant_cast) try w.writeByte(')');
try f.object.dg.renderBuiltinInfo(w, field_ty, .bits);
try w.writeByte(')');
} else { } else {
try f.writeCValue(w, local, .Other); try f.writeCValue(w, local, .Other);
try v.elem(f, w); try v.elem(f, w);
@ -4213,6 +4081,10 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
const ptr_scalar_ty = ptr_ty.scalarType(zcu); const ptr_scalar_ty = ptr_ty.scalarType(zcu);
const ptr_info = ptr_scalar_ty.ptrInfo(zcu); const ptr_info = ptr_scalar_ty.ptrInfo(zcu);
// `Air.Legalize.Feature.expand_packed_store` should ensure that the only
// bit-pointers we see here are vector element pointers.
assert(ptr_info.packed_offset.host_size == 0 or ptr_info.flags.vector_index != .none);
const ptr_val = try f.resolveInst(bin_op.lhs); const ptr_val = try f.resolveInst(bin_op.lhs);
const src_ty = f.typeOf(bin_op.rhs); const src_ty = f.typeOf(bin_op.rhs);
@ -4277,66 +4149,6 @@ fn airStore(f: *Function, inst: Air.Inst.Index, safety: bool) !CValue {
try w.writeByte(';'); try w.writeByte(';');
try f.object.newline(); try f.object.newline();
try v.end(f, inst, w); try v.end(f, inst, w);
} else if (ptr_info.packed_offset.host_size > 0 and ptr_info.flags.vector_index == .none) {
const host_bits = ptr_info.packed_offset.host_size * 8;
const host_ty = try pt.intType(.unsigned, host_bits);
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(host_bits - 1));
const bit_offset_val = try pt.intValue(bit_offset_ty, ptr_info.packed_offset.bit_offset);
const src_bits = src_ty.bitSize(zcu);
const ExpectedContents = [BigInt.Managed.default_capacity]BigIntLimb;
var stack align(@alignOf(ExpectedContents)) =
std.heap.stackFallback(@sizeOf(ExpectedContents), f.object.dg.gpa);
var mask = try BigInt.Managed.initCapacity(stack.get(), BigInt.calcTwosCompLimbCount(host_bits));
defer mask.deinit();
try mask.setTwosCompIntLimit(.max, .unsigned, @intCast(src_bits));
try mask.shiftLeft(&mask, ptr_info.packed_offset.bit_offset);
try mask.bitNotWrap(&mask, .unsigned, host_bits);
const mask_val = try pt.intValue_big(host_ty, mask.toConst());
const v = try Vectorize.start(f, inst, w, ptr_ty);
const a = try Assignment.start(f, w, src_scalar_ctype);
try f.writeCValueDeref(w, ptr_val);
try v.elem(f, w);
try a.assign(f, w);
try w.writeAll("zig_or_");
try f.object.dg.renderTypeForBuiltinFnName(w, host_ty);
try w.writeAll("(zig_and_");
try f.object.dg.renderTypeForBuiltinFnName(w, host_ty);
try w.writeByte('(');
try f.writeCValueDeref(w, ptr_val);
try v.elem(f, w);
try w.print(", {f}), zig_shl_", .{try f.fmtIntLiteralHex(mask_val)});
try f.object.dg.renderTypeForBuiltinFnName(w, host_ty);
try w.writeByte('(');
const cant_cast = host_ty.isInt(zcu) and host_ty.bitSize(zcu) > 64;
if (cant_cast) {
if (src_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try w.writeAll("zig_make_");
try f.object.dg.renderTypeForBuiltinFnName(w, host_ty);
try w.writeAll("(0, ");
} else {
try w.writeByte('(');
try f.renderType(w, host_ty);
try w.writeByte(')');
}
if (src_ty.isPtrAtRuntime(zcu)) {
try w.writeByte('(');
try f.renderType(w, .usize);
try w.writeByte(')');
}
try f.writeCValue(w, src_val, .Other);
try v.elem(f, w);
if (cant_cast) try w.writeByte(')');
try w.print(", {f}))", .{try f.fmtIntLiteralDec(bit_offset_val)});
try a.end(f, w);
try v.end(f, inst, w);
} else { } else {
switch (ptr_val) { switch (ptr_val) {
.local_ref => |ptr_local_index| switch (src_val) { .local_ref => |ptr_local_index| switch (src_val) {
@ -6015,10 +5827,7 @@ fn fieldLocation(
else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu)) else if (!field_ptr_ty.childType(zcu).hasRuntimeBitsIgnoreComptime(zcu))
.{ .byte_offset = loaded_struct.offsets.get(ip)[field_index] } .{ .byte_offset = loaded_struct.offsets.get(ip)[field_index] }
else else
.{ .field = if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| .{ .field = .{ .identifier = loaded_struct.fieldName(ip, field_index).toSlice(ip) } },
.{ .identifier = field_name.toSlice(ip) }
else
.{ .field = field_index } },
.@"packed" => if (field_ptr_ty.ptrInfo(zcu).packed_offset.host_size == 0) .@"packed" => if (field_ptr_ty.ptrInfo(zcu).packed_offset.host_size == 0)
.{ .byte_offset = @divExact(zcu.structPackedFieldBitOffset(loaded_struct, field_index) + .{ .byte_offset = @divExact(zcu.structPackedFieldBitOffset(loaded_struct, field_index) +
container_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset, 8) } container_ptr_ty.ptrInfo(zcu).packed_offset.bit_offset, 8) }
@ -6202,115 +6011,20 @@ fn airStructFieldVal(f: *Function, inst: Air.Inst.Index) !CValue {
// Ensure complete type definition is visible before accessing fields. // Ensure complete type definition is visible before accessing fields.
_ = try f.ctypeFromType(struct_ty, .complete); _ = try f.ctypeFromType(struct_ty, .complete);
assert(struct_ty.containerLayout(zcu) != .@"packed"); // `Air.Legalize.Feature.expand_packed_struct_field_val` handles this case
const field_name: CValue = switch (ip.indexToKey(struct_ty.toIntern())) { const field_name: CValue = switch (ip.indexToKey(struct_ty.toIntern())) {
.struct_type => field_name: { .struct_type => .{ .identifier = struct_ty.structFieldName(extra.field_index, zcu).unwrap().?.toSlice(ip) },
const loaded_struct = ip.loadStructType(struct_ty.toIntern()); .union_type => name: {
switch (loaded_struct.layout) { const union_type = ip.loadUnionType(struct_ty.toIntern());
.auto, .@"extern" => break :field_name if (loaded_struct.fieldName(ip, extra.field_index).unwrap()) |field_name| const enum_tag_ty: Type = .fromInterned(union_type.enum_tag_ty);
.{ .identifier = field_name.toSlice(ip) } const field_name_str = enum_tag_ty.enumFieldName(extra.field_index, zcu).toSlice(ip);
else if (union_type.hasTag(ip)) {
.{ .field = extra.field_index }, break :name .{ .payload_identifier = field_name_str };
.@"packed" => { } else {
const int_info = struct_ty.intInfo(zcu); break :name .{ .identifier = field_name_str };
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
const bit_offset = zcu.structPackedFieldBitOffset(loaded_struct, extra.field_index);
const field_int_signedness = if (inst_ty.isAbiInt(zcu))
inst_ty.intInfo(zcu).signedness
else
.unsigned;
const field_int_ty = try pt.intType(field_int_signedness, @as(u16, @intCast(inst_ty.bitSize(zcu))));
const temp_local = try f.allocLocal(inst, field_int_ty);
try f.writeCValue(w, temp_local, .Other);
try w.writeAll(" = zig_wrap_");
try f.object.dg.renderTypeForBuiltinFnName(w, field_int_ty);
try w.writeAll("((");
try f.renderType(w, field_int_ty);
try w.writeByte(')');
const cant_cast = int_info.bits > 64;
if (cant_cast) {
if (field_int_ty.bitSize(zcu) > 64) return f.fail("TODO: C backend: implement casting between types > 64 bits", .{});
try w.writeAll("zig_lo_");
try f.object.dg.renderTypeForBuiltinFnName(w, struct_ty);
try w.writeByte('(');
}
if (bit_offset > 0) {
try w.writeAll("zig_shr_");
try f.object.dg.renderTypeForBuiltinFnName(w, struct_ty);
try w.writeByte('(');
}
try f.writeCValue(w, struct_byval, .Other);
if (bit_offset > 0) try w.print(", {f})", .{
try f.fmtIntLiteralDec(try pt.intValue(bit_offset_ty, bit_offset)),
});
if (cant_cast) try w.writeByte(')');
try f.object.dg.renderBuiltinInfo(w, field_int_ty, .bits);
try w.writeAll(");");
try f.object.newline();
if (inst_ty.eql(field_int_ty, zcu)) return temp_local;
const local = try f.allocLocal(inst, inst_ty);
if (local.new_local != temp_local.new_local) {
try w.writeAll("memcpy(");
try f.writeCValue(w, .{ .local_ref = local.new_local }, .FunctionArgument);
try w.writeAll(", ");
try f.writeCValue(w, .{ .local_ref = temp_local.new_local }, .FunctionArgument);
try w.writeAll(", sizeof(");
try f.renderType(w, inst_ty);
try w.writeAll("));");
try f.object.newline();
}
try freeLocal(f, inst, temp_local.new_local, null);
return local;
},
} }
}, },
.tuple_type => .{ .field = extra.field_index }, .tuple_type => .{ .field = extra.field_index },
.union_type => field_name: {
const loaded_union = ip.loadUnionType(struct_ty.toIntern());
switch (loaded_union.flagsUnordered(ip).layout) {
.auto, .@"extern" => {
const name = loaded_union.loadTagType(ip).names.get(ip)[extra.field_index];
break :field_name if (loaded_union.hasTag(ip))
.{ .payload_identifier = name.toSlice(ip) }
else
.{ .identifier = name.toSlice(ip) };
},
.@"packed" => {
const operand_lval = if (struct_byval == .constant) blk: {
const operand_local = try f.allocLocal(inst, struct_ty);
try f.writeCValue(w, operand_local, .Other);
try w.writeAll(" = ");
try f.writeCValue(w, struct_byval, .Other);
try w.writeByte(';');
try f.object.newline();
break :blk operand_local;
} else struct_byval;
const local = try f.allocLocal(inst, inst_ty);
if (switch (local) {
.new_local, .local => |local_index| switch (operand_lval) {
.new_local, .local => |operand_local_index| local_index != operand_local_index,
else => true,
},
else => true,
}) {
try w.writeAll("memcpy(&");
try f.writeCValue(w, local, .Other);
try w.writeAll(", &");
try f.writeCValue(w, operand_lval, .Other);
try w.writeAll(", sizeof(");
try f.renderType(w, inst_ty);
try w.writeAll("));");
try f.object.newline();
}
try f.freeCValue(inst, operand_lval);
return local;
},
}
},
else => unreachable, else => unreachable,
}; };
@ -7702,98 +7416,13 @@ fn airAggregateInit(f: *Function, inst: Air.Inst.Index) !CValue {
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
const a = try Assignment.start(f, w, try f.ctypeFromType(field_ty, .complete)); const a = try Assignment.start(f, w, try f.ctypeFromType(field_ty, .complete));
try f.writeCValueMember(w, local, if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| try f.writeCValueMember(w, local, .{ .identifier = loaded_struct.fieldName(ip, field_index).toSlice(ip) });
.{ .identifier = field_name.toSlice(ip) }
else
.{ .field = field_index });
try a.assign(f, w); try a.assign(f, w);
try f.writeCValue(w, resolved_elements[field_index], .Other); try f.writeCValue(w, resolved_elements[field_index], .Other);
try a.end(f, w); try a.end(f, w);
} }
}, },
.@"packed" => { .@"packed" => unreachable, // `Air.Legalize.Feature.expand_packed_struct_init` handles this case
try f.writeCValue(w, local, .Other);
try w.writeAll(" = ");
const backing_int_ty: Type = .fromInterned(loaded_struct.backingIntTypeUnordered(ip));
const int_info = backing_int_ty.intInfo(zcu);
const bit_offset_ty = try pt.intType(.unsigned, Type.smallestUnsignedBits(int_info.bits - 1));
var bit_offset: u64 = 0;
var empty = true;
for (0..elements.len) |field_index| {
if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
const field_ty = inst_ty.fieldType(field_index, zcu);
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (!empty) {
try w.writeAll("zig_or_");
try f.object.dg.renderTypeForBuiltinFnName(w, inst_ty);
try w.writeByte('(');
}
empty = false;
}
empty = true;
for (resolved_elements, 0..) |element, field_index| {
if (inst_ty.structFieldIsComptime(field_index, zcu)) continue;
const field_ty = inst_ty.fieldType(field_index, zcu);
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
if (!empty) try w.writeAll(", ");
// TODO: Skip this entire shift if val is 0?
try w.writeAll("zig_shlw_");
try f.object.dg.renderTypeForBuiltinFnName(w, inst_ty);
try w.writeByte('(');
if (field_ty.isAbiInt(zcu)) {
try w.writeAll("zig_and_");
try f.object.dg.renderTypeForBuiltinFnName(w, inst_ty);
try w.writeByte('(');
}
if (inst_ty.isAbiInt(zcu) and (field_ty.isAbiInt(zcu) or field_ty.isPtrAtRuntime(zcu))) {
try f.renderIntCast(w, inst_ty, element, .{}, field_ty, .FunctionArgument);
} else {
try w.writeByte('(');
try f.renderType(w, inst_ty);
try w.writeByte(')');
if (field_ty.isPtrAtRuntime(zcu)) {
try w.writeByte('(');
try f.renderType(w, switch (int_info.signedness) {
.unsigned => .usize,
.signed => .isize,
});
try w.writeByte(')');
}
try f.writeCValue(w, element, .Other);
}
if (field_ty.isAbiInt(zcu)) {
try w.writeAll(", ");
const field_int_info = field_ty.intInfo(zcu);
const field_mask = if (int_info.signedness == .signed and int_info.bits == field_int_info.bits)
try pt.intValue(backing_int_ty, -1)
else
try (try pt.intType(.unsigned, field_int_info.bits)).maxIntScalar(pt, backing_int_ty);
try f.object.dg.renderValue(w, field_mask, .FunctionArgument);
try w.writeByte(')');
}
try w.print(", {f}", .{
try f.fmtIntLiteralDec(try pt.intValue(bit_offset_ty, bit_offset)),
});
try f.object.dg.renderBuiltinInfo(w, inst_ty, .bits);
try w.writeByte(')');
if (!empty) try w.writeByte(')');
bit_offset += field_ty.bitSize(zcu);
empty = false;
}
try w.writeByte(';');
try f.object.newline();
},
} }
}, },
.tuple_type => |tuple_info| for (0..tuple_info.types.len) |field_index| { .tuple_type => |tuple_info| for (0..tuple_info.types.len) |field_index| {
@ -7828,9 +7457,10 @@ fn airUnionInit(f: *Function, inst: Air.Inst.Index) !CValue {
try reap(f, inst, &.{extra.init}); try reap(f, inst, &.{extra.init});
const w = &f.object.code.writer; const w = &f.object.code.writer;
const local = try f.allocLocal(inst, union_ty);
if (loaded_union.flagsUnordered(ip).layout == .@"packed") return f.moveCValue(inst, union_ty, payload); if (loaded_union.flagsUnordered(ip).layout == .@"packed") return f.moveCValue(inst, union_ty, payload);
const local = try f.allocLocal(inst, union_ty);
const field: CValue = if (union_ty.unionTagTypeSafety(zcu)) |tag_ty| field: { const field: CValue = if (union_ty.unionTagTypeSafety(zcu)) |tag_ty| field: {
const layout = union_ty.unionGetLayout(zcu); const layout = union_ty.unionGetLayout(zcu);
if (layout.tag_size != 0) { if (layout.tag_size != 0) {

View File

@ -2514,11 +2514,7 @@ pub const Pool = struct {
kind.noParameter(), kind.noParameter(),
); );
if (field_ctype.index == .void) continue; if (field_ctype.index == .void) continue;
const field_name = if (loaded_struct.fieldName(ip, field_index) const field_name = try pool.string(allocator, loaded_struct.fieldName(ip, field_index).toSlice(ip));
.unwrap()) |field_name|
try pool.string(allocator, field_name.toSlice(ip))
else
String.fromUnnamed(@intCast(field_index));
const field_alignas = AlignAs.fromAlignment(.{ const field_alignas = AlignAs.fromAlignment(.{
.@"align" = loaded_struct.fieldAlign(ip, field_index), .@"align" = loaded_struct.fieldAlign(ip, field_index),
.abi = field_type.abiAlignment(zcu), .abi = field_type.abiAlignment(zcu),

View File

@ -2411,8 +2411,7 @@ pub const Object = struct {
const field_size = field_ty.abiSize(zcu); const field_size = field_ty.abiSize(zcu);
const field_align = ty.fieldAlignment(field_index, zcu); const field_align = ty.fieldAlignment(field_index, zcu);
const field_offset = ty.structFieldOffset(field_index, zcu); const field_offset = ty.structFieldOffset(field_index, zcu);
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse const field_name = struct_type.fieldName(ip, field_index);
try ip.getOrPutStringFmt(gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
fields.appendAssumeCapacity(try o.builder.debugMemberType( fields.appendAssumeCapacity(try o.builder.debugMemberType(
try o.builder.metadataString(field_name.toSlice(ip)), try o.builder.metadataString(field_name.toSlice(ip)),
null, // File null, // File
@ -5093,8 +5092,6 @@ pub const FuncGen = struct {
.wasm_memory_size => try self.airWasmMemorySize(inst), .wasm_memory_size => try self.airWasmMemorySize(inst),
.wasm_memory_grow => try self.airWasmMemoryGrow(inst), .wasm_memory_grow => try self.airWasmMemoryGrow(inst),
.vector_store_elem => try self.airVectorStoreElem(inst),
.runtime_nav_ptr => try self.airRuntimeNavPtr(inst), .runtime_nav_ptr => try self.airRuntimeNavPtr(inst),
.inferred_alloc, .inferred_alloc_comptime => unreachable, .inferred_alloc, .inferred_alloc_comptime => unreachable,
@ -6873,16 +6870,14 @@ pub const FuncGen = struct {
const array_llvm_ty = try o.lowerType(pt, array_ty); const array_llvm_ty = try o.lowerType(pt, array_ty);
const elem_ty = array_ty.childType(zcu); const elem_ty = array_ty.childType(zcu);
if (isByRef(array_ty, zcu)) { if (isByRef(array_ty, zcu)) {
const indices: [2]Builder.Value = .{ const elem_ptr = try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &.{
try o.builder.intValue(try o.lowerType(pt, Type.usize), 0), rhs, try o.builder.intValue(try o.lowerType(pt, Type.usize), 0),
}; rhs,
}, "");
if (isByRef(elem_ty, zcu)) { if (isByRef(elem_ty, zcu)) {
const elem_ptr = try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, "");
const elem_alignment = elem_ty.abiAlignment(zcu).toLlvm(); const elem_alignment = elem_ty.abiAlignment(zcu).toLlvm();
return self.loadByRef(elem_ptr, elem_ty, elem_alignment, .normal); return self.loadByRef(elem_ptr, elem_ty, elem_alignment, .normal);
} else { } else {
const elem_ptr =
try self.wip.gep(.inbounds, array_llvm_ty, array_llvm_val, &indices, "");
return self.loadTruncate(.normal, elem_ty, elem_ptr, .default); return self.loadTruncate(.normal, elem_ty, elem_ptr, .default);
} }
} }
@ -8140,33 +8135,6 @@ pub const FuncGen = struct {
}, ""); }, "");
} }
fn airVectorStoreElem(self: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = self.ng.object;
const pt = self.ng.pt;
const zcu = pt.zcu;
const data = self.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem;
const extra = self.air.extraData(Air.Bin, data.payload).data;
const vector_ptr = try self.resolveInst(data.vector_ptr);
const vector_ptr_ty = self.typeOf(data.vector_ptr);
const index = try self.resolveInst(extra.lhs);
const operand = try self.resolveInst(extra.rhs);
self.maybeMarkAllowZeroAccess(vector_ptr_ty.ptrInfo(zcu));
// TODO: Emitting a load here is a violation of volatile semantics. Not fixable in general.
// https://github.com/ziglang/zig/issues/18652#issuecomment-2452844908
const access_kind: Builder.MemoryAccessKind =
if (vector_ptr_ty.isVolatilePtr(zcu)) .@"volatile" else .normal;
const elem_llvm_ty = try o.lowerType(pt, vector_ptr_ty.childType(zcu));
const alignment = vector_ptr_ty.ptrAlignment(zcu).toLlvm();
const loaded = try self.wip.load(access_kind, elem_llvm_ty, vector_ptr, alignment, "");
const new_vector = try self.wip.insertElement(loaded, operand, index, "");
_ = try self.store(vector_ptr, vector_ptr_ty, new_vector, .none);
return .none;
}
fn airRuntimeNavPtr(fg: *FuncGen, inst: Air.Inst.Index) !Builder.Value { fn airRuntimeNavPtr(fg: *FuncGen, inst: Air.Inst.Index) !Builder.Value {
const o = fg.ng.object; const o = fg.ng.object;
const pt = fg.ng.pt; const pt = fg.ng.pt;
@ -8303,8 +8271,7 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs); const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst); const inst_ty = self.typeOfIndex(inst);
const scalar_ty = inst_ty.scalarType(zcu); const scalar_ty = inst_ty.scalarType(zcu);
assert(scalar_ty.zigTypeTag(zcu) == .int);
if (scalar_ty.isAnyFloat()) return self.todo("saturating float add", .{});
return self.wip.callIntrinsic( return self.wip.callIntrinsic(
.normal, .normal,
.none, .none,
@ -8344,8 +8311,7 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs); const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst); const inst_ty = self.typeOfIndex(inst);
const scalar_ty = inst_ty.scalarType(zcu); const scalar_ty = inst_ty.scalarType(zcu);
assert(scalar_ty.zigTypeTag(zcu) == .int);
if (scalar_ty.isAnyFloat()) return self.todo("saturating float sub", .{});
return self.wip.callIntrinsic( return self.wip.callIntrinsic(
.normal, .normal,
.none, .none,
@ -8385,8 +8351,7 @@ pub const FuncGen = struct {
const rhs = try self.resolveInst(bin_op.rhs); const rhs = try self.resolveInst(bin_op.rhs);
const inst_ty = self.typeOfIndex(inst); const inst_ty = self.typeOfIndex(inst);
const scalar_ty = inst_ty.scalarType(zcu); const scalar_ty = inst_ty.scalarType(zcu);
assert(scalar_ty.zigTypeTag(zcu) == .int);
if (scalar_ty.isAnyFloat()) return self.todo("saturating float mul", .{});
return self.wip.callIntrinsic( return self.wip.callIntrinsic(
.normal, .normal,
.none, .none,
@ -11454,7 +11419,6 @@ pub const FuncGen = struct {
const access_kind: Builder.MemoryAccessKind = const access_kind: Builder.MemoryAccessKind =
if (info.flags.is_volatile) .@"volatile" else .normal; if (info.flags.is_volatile) .@"volatile" else .normal;
assert(info.flags.vector_index != .runtime);
if (info.flags.vector_index != .none) { if (info.flags.vector_index != .none) {
const index_u32 = try o.builder.intValue(.i32, info.flags.vector_index); const index_u32 = try o.builder.intValue(.i32, info.flags.vector_index);
const vec_elem_ty = try o.lowerType(pt, elem_ty); const vec_elem_ty = try o.lowerType(pt, elem_ty);
@ -11524,7 +11488,6 @@ pub const FuncGen = struct {
const access_kind: Builder.MemoryAccessKind = const access_kind: Builder.MemoryAccessKind =
if (info.flags.is_volatile) .@"volatile" else .normal; if (info.flags.is_volatile) .@"volatile" else .normal;
assert(info.flags.vector_index != .runtime);
if (info.flags.vector_index != .none) { if (info.flags.vector_index != .none) {
const index_u32 = try o.builder.intValue(.i32, info.flags.vector_index); const index_u32 = try o.builder.intValue(.i32, info.flags.vector_index);
const vec_elem_ty = try o.lowerType(pt, elem_ty); const vec_elem_ty = try o.lowerType(pt, elem_ty);

View File

@ -1633,7 +1633,6 @@ fn genBody(func: *Func, body: []const Air.Inst.Index) InnerError!void {
.is_named_enum_value => return func.fail("TODO implement is_named_enum_value", .{}), .is_named_enum_value => return func.fail("TODO implement is_named_enum_value", .{}),
.error_set_has_value => return func.fail("TODO implement error_set_has_value", .{}), .error_set_has_value => return func.fail("TODO implement error_set_has_value", .{}),
.vector_store_elem => return func.fail("TODO implement vector_store_elem", .{}),
.c_va_arg => return func.fail("TODO implement c_va_arg", .{}), .c_va_arg => return func.fail("TODO implement c_va_arg", .{}),
.c_va_copy => return func.fail("TODO implement c_va_copy", .{}), .c_va_copy => return func.fail("TODO implement c_va_copy", .{}),

View File

@ -702,7 +702,6 @@ fn genBody(self: *Self, body: []const Air.Inst.Index) InnerError!void {
.is_named_enum_value => @panic("TODO implement is_named_enum_value"), .is_named_enum_value => @panic("TODO implement is_named_enum_value"),
.error_set_has_value => @panic("TODO implement error_set_has_value"), .error_set_has_value => @panic("TODO implement error_set_has_value"),
.vector_store_elem => @panic("TODO implement vector_store_elem"),
.runtime_nav_ptr => @panic("TODO implement runtime_nav_ptr"), .runtime_nav_ptr => @panic("TODO implement runtime_nav_ptr"),
.c_va_arg => return self.fail("TODO implement c_va_arg", .{}), .c_va_arg => return self.fail("TODO implement c_va_arg", .{}),

View File

@ -1520,8 +1520,7 @@ fn resolveType(cg: *CodeGen, ty: Type, repr: Repr) Error!Id {
const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]); const field_ty: Type = .fromInterned(struct_type.field_types.get(ip)[field_index]);
if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue; if (!field_ty.hasRuntimeBitsIgnoreComptime(zcu)) continue;
const field_name = struct_type.fieldName(ip, field_index).unwrap() orelse const field_name = struct_type.fieldName(ip, field_index);
try ip.getOrPutStringFmt(zcu.gpa, pt.tid, "{d}", .{field_index}, .no_embedded_nulls);
try member_types.append(try cg.resolveType(field_ty, .indirect)); try member_types.append(try cg.resolveType(field_ty, .indirect));
try member_names.append(field_name.toSlice(ip)); try member_names.append(field_name.toSlice(ip));
try member_offsets.append(@intCast(ty.structFieldOffset(field_index, zcu))); try member_offsets.append(@intCast(ty.structFieldOffset(field_index, zcu)));
@ -2726,8 +2725,6 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) Error!void {
.ptr_elem_val => try cg.airPtrElemVal(inst), .ptr_elem_val => try cg.airPtrElemVal(inst),
.array_elem_val => try cg.airArrayElemVal(inst), .array_elem_val => try cg.airArrayElemVal(inst),
.vector_store_elem => return cg.airVectorStoreElem(inst),
.set_union_tag => return cg.airSetUnionTag(inst), .set_union_tag => return cg.airSetUnionTag(inst),
.get_union_tag => try cg.airGetUnionTag(inst), .get_union_tag => try cg.airGetUnionTag(inst),
.union_init => try cg.airUnionInit(inst), .union_init => try cg.airUnionInit(inst),
@ -4446,29 +4443,6 @@ fn airPtrElemVal(cg: *CodeGen, inst: Air.Inst.Index) !?Id {
return try cg.load(elem_ty, elem_ptr_id, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) }); return try cg.load(elem_ty, elem_ptr_id, .{ .is_volatile = ptr_ty.isVolatilePtr(zcu) });
} }
fn airVectorStoreElem(cg: *CodeGen, inst: Air.Inst.Index) !void {
const zcu = cg.module.zcu;
const data = cg.air.instructions.items(.data)[@intFromEnum(inst)].vector_store_elem;
const extra = cg.air.extraData(Air.Bin, data.payload).data;
const vector_ptr_ty = cg.typeOf(data.vector_ptr);
const vector_ty = vector_ptr_ty.childType(zcu);
const scalar_ty = vector_ty.scalarType(zcu);
const scalar_ty_id = try cg.resolveType(scalar_ty, .indirect);
const storage_class = cg.module.storageClass(vector_ptr_ty.ptrAddressSpace(zcu));
const scalar_ptr_ty_id = try cg.module.ptrType(scalar_ty_id, storage_class);
const vector_ptr = try cg.resolve(data.vector_ptr);
const index = try cg.resolve(extra.lhs);
const operand = try cg.resolve(extra.rhs);
const elem_ptr_id = try cg.accessChainId(scalar_ptr_ty_id, vector_ptr, &.{index});
try cg.store(scalar_ty, elem_ptr_id, operand, .{
.is_volatile = vector_ptr_ty.isVolatilePtr(zcu),
});
}
fn airSetUnionTag(cg: *CodeGen, inst: Air.Inst.Index) !void { fn airSetUnionTag(cg: *CodeGen, inst: Air.Inst.Index) !void {
const zcu = cg.module.zcu; const zcu = cg.module.zcu;
const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op; const bin_op = cg.air.instructions.items(.data)[@intFromEnum(inst)].bin_op;

View File

@ -1978,7 +1978,6 @@ fn genInst(cg: *CodeGen, inst: Air.Inst.Index) InnerError!void {
.save_err_return_trace_index, .save_err_return_trace_index,
.is_named_enum_value, .is_named_enum_value,
.addrspace_cast, .addrspace_cast,
.vector_store_elem,
.c_va_arg, .c_va_arg,
.c_va_copy, .c_va_copy,
.c_va_end, .c_va_end,

File diff suppressed because it is too large Load Diff

View File

@ -3158,11 +3158,7 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo
.struct_field .struct_field
else else
.struct_field); .struct_field);
if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else { try wip_nav.strp(loaded_struct.fieldName(ip, field_index).toSlice(ip));
var field_name_buf: [std.fmt.count("{d}", .{std.math.maxInt(u32)})]u8 = undefined;
const field_name = std.fmt.bufPrint(&field_name_buf, "{d}", .{field_index}) catch unreachable;
try wip_nav.strp(field_name);
}
try wip_nav.refType(field_type); try wip_nav.refType(field_type);
if (!is_comptime) { if (!is_comptime) {
try diw.writeUleb128(loaded_struct.offsets.get(ip)[field_index]); try diw.writeUleb128(loaded_struct.offsets.get(ip)[field_index]);
@ -3187,7 +3183,7 @@ fn updateComptimeNavInner(dwarf: *Dwarf, pt: Zcu.PerThread, nav_index: InternPoo
var field_bit_offset: u16 = 0; var field_bit_offset: u16 = 0;
for (0..loaded_struct.field_types.len) |field_index| { for (0..loaded_struct.field_types.len) |field_index| {
try wip_nav.abbrevCode(.packed_struct_field); try wip_nav.abbrevCode(.packed_struct_field);
try wip_nav.strp(loaded_struct.fieldName(ip, field_index).unwrap().?.toSlice(ip)); try wip_nav.strp(loaded_struct.fieldName(ip, field_index).toSlice(ip));
const field_type: Type = .fromInterned(loaded_struct.field_types.get(ip)[field_index]); const field_type: Type = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
try wip_nav.refType(field_type); try wip_nav.refType(field_type);
try diw.writeUleb128(field_bit_offset); try diw.writeUleb128(field_bit_offset);
@ -4269,11 +4265,7 @@ fn updateLazyValue(
.comptime_value_field_runtime_bits .comptime_value_field_runtime_bits
else else
continue); continue);
if (loaded_struct_type.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else { try wip_nav.strp(loaded_struct_type.fieldName(ip, field_index).toSlice(ip));
var field_name_buf: [std.fmt.count("{d}", .{std.math.maxInt(u32)})]u8 = undefined;
const field_name = std.fmt.bufPrint(&field_name_buf, "{d}", .{field_index}) catch unreachable;
try wip_nav.strp(field_name);
}
const field_value: Value = .fromInterned(switch (aggregate.storage) { const field_value: Value = .fromInterned(switch (aggregate.storage) {
.bytes => unreachable, .bytes => unreachable,
.elems => |elems| elems[field_index], .elems => |elems| elems[field_index],
@ -4467,11 +4459,7 @@ fn updateContainerTypeWriterError(
.struct_field .struct_field
else else
.struct_field); .struct_field);
if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else { try wip_nav.strp(loaded_struct.fieldName(ip, field_index).toSlice(ip));
var field_name_buf: [std.fmt.count("{d}", .{std.math.maxInt(u32)})]u8 = undefined;
const field_name = std.fmt.bufPrint(&field_name_buf, "{d}", .{field_index}) catch unreachable;
try wip_nav.strp(field_name);
}
try wip_nav.refType(field_type); try wip_nav.refType(field_type);
if (!is_comptime) { if (!is_comptime) {
try diw.writeUleb128(loaded_struct.offsets.get(ip)[field_index]); try diw.writeUleb128(loaded_struct.offsets.get(ip)[field_index]);
@ -4573,11 +4561,7 @@ fn updateContainerTypeWriterError(
.struct_field .struct_field
else else
.struct_field); .struct_field);
if (loaded_struct.fieldName(ip, field_index).unwrap()) |field_name| try wip_nav.strp(field_name.toSlice(ip)) else { try wip_nav.strp(loaded_struct.fieldName(ip, field_index).toSlice(ip));
var field_name_buf: [std.fmt.count("{d}", .{std.math.maxInt(u32)})]u8 = undefined;
const field_name = std.fmt.bufPrint(&field_name_buf, "{d}", .{field_index}) catch unreachable;
try wip_nav.strp(field_name);
}
try wip_nav.refType(field_type); try wip_nav.refType(field_type);
if (!is_comptime) { if (!is_comptime) {
try diw.writeUleb128(loaded_struct.offsets.get(ip)[field_index]); try diw.writeUleb128(loaded_struct.offsets.get(ip)[field_index]);
@ -4600,7 +4584,7 @@ fn updateContainerTypeWriterError(
var field_bit_offset: u16 = 0; var field_bit_offset: u16 = 0;
for (0..loaded_struct.field_types.len) |field_index| { for (0..loaded_struct.field_types.len) |field_index| {
try wip_nav.abbrevCode(.packed_struct_field); try wip_nav.abbrevCode(.packed_struct_field);
try wip_nav.strp(loaded_struct.fieldName(ip, field_index).unwrap().?.toSlice(ip)); try wip_nav.strp(loaded_struct.fieldName(ip, field_index).toSlice(ip));
const field_type: Type = .fromInterned(loaded_struct.field_types.get(ip)[field_index]); const field_type: Type = .fromInterned(loaded_struct.field_types.get(ip)[field_index]);
try wip_nav.refType(field_type); try wip_nav.refType(field_type);
try diw.writeUleb128(field_bit_offset); try diw.writeUleb128(field_bit_offset);