mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
replace even more aggregate interns
This commit is contained in:
parent
7756fa6641
commit
79e5c138c6
@ -941,10 +941,7 @@ fn scalarizeBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index, comptime form:
|
||||
.lhs = Air.internedToRef(try pt.intern(.{ .ptr = .{
|
||||
.ty = (try pt.manyConstPtrType(mask_elem_ty)).toIntern(),
|
||||
.base_addr = .{ .uav = .{
|
||||
.val = try pt.intern(.{ .aggregate = .{
|
||||
.ty = mask_ty.toIntern(),
|
||||
.storage = .{ .elems = mask_elems },
|
||||
} }),
|
||||
.val = (try pt.aggregateValue(mask_ty, mask_elems)).toIntern(),
|
||||
.orig_ty = (try pt.singleConstPtrType(mask_ty)).toIntern(),
|
||||
} },
|
||||
.byte_offset = 0,
|
||||
@ -1023,10 +1020,7 @@ fn scalarizeBlockPayload(l: *Legalize, orig_inst: Air.Inst.Index, comptime form:
|
||||
break :operand_b Air.internedToRef(try pt.intern(.{ .ptr = .{
|
||||
.ty = (try pt.manyConstPtrType(elem_ty)).toIntern(),
|
||||
.base_addr = .{ .uav = .{
|
||||
.val = try pt.intern(.{ .aggregate = .{
|
||||
.ty = ct_elems_ty.toIntern(),
|
||||
.storage = .{ .elems = ct_elems.keys() },
|
||||
} }),
|
||||
.val = (try pt.aggregateValue(ct_elems_ty, ct_elems.keys())).toIntern(),
|
||||
.orig_ty = (try pt.singleConstPtrType(ct_elems_ty)).toIntern(),
|
||||
} },
|
||||
.byte_offset = 0,
|
||||
@ -2550,10 +2544,7 @@ fn floatFromBigIntVal(
|
||||
else => unreachable,
|
||||
};
|
||||
if (is_vector) {
|
||||
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_ty.toIntern(),
|
||||
.storage = .{ .repeated_elem = scalar_val.toIntern() },
|
||||
} }));
|
||||
return pt.aggregateSplatValue(float_ty, scalar_val);
|
||||
} else {
|
||||
return scalar_val;
|
||||
}
|
||||
|
||||
@ -14838,8 +14838,7 @@ fn zirDiv(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Ins
|
||||
|
||||
if (maybe_lhs_val) |lhs_val| {
|
||||
if (maybe_rhs_val) |rhs_val| {
|
||||
const result = try arith.div(sema, block, resolved_type, lhs_val, rhs_val, src, lhs_src, rhs_src, .div);
|
||||
return Air.internedToRef(result.toIntern());
|
||||
return .fromValue(try arith.div(sema, block, resolved_type, lhs_val, rhs_val, src, lhs_src, rhs_src, .div));
|
||||
}
|
||||
if (allow_div_zero) {
|
||||
if (lhs_val.isUndef(zcu)) return pt.undefRef(resolved_type);
|
||||
|
||||
@ -120,10 +120,7 @@ fn lowerExprAnonResTy(self: *LowerZon, node: Zoir.Node.Index) CompileError!Inter
|
||||
.values = values,
|
||||
},
|
||||
);
|
||||
return pt.intern(.{ .aggregate = .{
|
||||
.ty = ty,
|
||||
.storage = .{ .elems = values },
|
||||
} });
|
||||
return (try pt.aggregateValue(.fromInterned(ty), values)).toIntern();
|
||||
},
|
||||
.struct_literal => |init| {
|
||||
const elems = try self.sema.arena.alloc(InternPool.Index, init.names.len);
|
||||
@ -205,10 +202,7 @@ fn lowerExprAnonResTy(self: *LowerZon, node: Zoir.Node.Index) CompileError!Inter
|
||||
try self.sema.declareDependency(.{ .interned = struct_ty });
|
||||
try self.sema.addTypeReferenceEntry(self.nodeSrc(node), struct_ty);
|
||||
|
||||
return try pt.intern(.{ .aggregate = .{
|
||||
.ty = struct_ty,
|
||||
.storage = .{ .elems = elems },
|
||||
} });
|
||||
return (try pt.aggregateValue(.fromInterned(struct_ty), elems)).toIntern();
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -638,10 +632,7 @@ fn lowerArray(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
|
||||
elems[elems.len - 1] = sentinel.toIntern();
|
||||
}
|
||||
|
||||
return self.sema.pt.intern(.{ .aggregate = .{
|
||||
.ty = res_ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} });
|
||||
return (try self.sema.pt.aggregateValue(res_ty, elems)).toIntern();
|
||||
}
|
||||
|
||||
fn lowerEnum(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.Index {
|
||||
@ -752,10 +743,7 @@ fn lowerTuple(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
|
||||
}
|
||||
}
|
||||
|
||||
return self.sema.pt.intern(.{ .aggregate = .{
|
||||
.ty = res_ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} });
|
||||
return (try self.sema.pt.aggregateValue(res_ty, elems)).toIntern();
|
||||
}
|
||||
|
||||
fn lowerStruct(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.Index {
|
||||
@ -815,12 +803,7 @@ fn lowerStruct(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool
|
||||
if (value.* == .none) return self.fail(node, "missing field '{f}'", .{name.fmt(ip)});
|
||||
}
|
||||
|
||||
return self.sema.pt.intern(.{ .aggregate = .{
|
||||
.ty = res_ty.toIntern(),
|
||||
.storage = .{
|
||||
.elems = field_values,
|
||||
},
|
||||
} });
|
||||
return (try self.sema.pt.aggregateValue(res_ty, field_values)).toIntern();
|
||||
}
|
||||
|
||||
fn lowerSlice(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.Index {
|
||||
@ -867,16 +850,13 @@ fn lowerSlice(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
|
||||
elems[elems.len - 1] = ptr_info.sentinel;
|
||||
}
|
||||
|
||||
const array_ty = try self.sema.pt.intern(.{ .array_type = .{
|
||||
const array_ty = try self.sema.pt.arrayType(.{
|
||||
.len = elems.len,
|
||||
.sentinel = ptr_info.sentinel,
|
||||
.child = ptr_info.child,
|
||||
} });
|
||||
});
|
||||
|
||||
const array = try self.sema.pt.intern(.{ .aggregate = .{
|
||||
.ty = array_ty,
|
||||
.storage = .{ .elems = elems },
|
||||
} });
|
||||
const array_val = try self.sema.pt.aggregateValue(array_ty, elems);
|
||||
|
||||
const many_item_ptr_type = try self.sema.pt.intern(.{ .ptr_type = .{
|
||||
.child = ptr_info.child,
|
||||
@ -894,8 +874,8 @@ fn lowerSlice(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool.
|
||||
.ty = many_item_ptr_type,
|
||||
.base_addr = .{
|
||||
.uav = .{
|
||||
.orig_ty = (try self.sema.pt.singleConstPtrType(.fromInterned(array_ty))).toIntern(),
|
||||
.val = array,
|
||||
.orig_ty = (try self.sema.pt.singleConstPtrType(array_ty)).toIntern(),
|
||||
.val = array_val.toIntern(),
|
||||
},
|
||||
},
|
||||
.byte_offset = 0,
|
||||
@ -994,8 +974,5 @@ fn lowerVector(self: *LowerZon, node: Zoir.Node.Index, res_ty: Type) !InternPool
|
||||
elem.* = try self.lowerExprKnownResTy(elem_nodes.at(@intCast(i)), .fromInterned(vector_info.child));
|
||||
}
|
||||
|
||||
return self.sema.pt.intern(.{ .aggregate = .{
|
||||
.ty = res_ty.toIntern(),
|
||||
.storage = .{ .elems = elems },
|
||||
} });
|
||||
return (try self.sema.pt.aggregateValue(res_ty, elems)).toIntern();
|
||||
}
|
||||
|
||||
@ -906,12 +906,7 @@ pub fn modRem(
|
||||
const rhs_elem = try rhs_val.elemValue(pt, elem_idx);
|
||||
result_elem.* = (try modRemScalar(sema, block, elem_ty, lhs_elem, rhs_elem, lhs_src, rhs_src, op, elem_idx)).toIntern();
|
||||
}
|
||||
|
||||
const result_val = try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elem_vals },
|
||||
} });
|
||||
return .fromInterned(result_val);
|
||||
return pt.aggregateValue(ty, elem_vals);
|
||||
},
|
||||
else => return modRemScalar(sema, block, ty, lhs_val, rhs_val, lhs_src, rhs_src, op, null),
|
||||
}
|
||||
@ -1022,14 +1017,11 @@ pub fn shlWithOverflow(
|
||||
wr.* = elem_result.wrapped_result.toIntern();
|
||||
}
|
||||
return .{
|
||||
.overflow_bit = .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = (try pt.vectorType(.{ .len = @intCast(overflow_bits.len), .child = .u1_type })).toIntern(),
|
||||
.storage = .{ .elems = overflow_bits },
|
||||
} })),
|
||||
.wrapped_result = .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = lhs_ty.toIntern(),
|
||||
.storage = .{ .elems = wrapped_results },
|
||||
} })),
|
||||
.overflow_bit = try pt.aggregateValue(try pt.vectorType(.{
|
||||
.len = @intCast(overflow_bits.len),
|
||||
.child = .u1_type,
|
||||
}), overflow_bits),
|
||||
.wrapped_result = try pt.aggregateValue(lhs_ty, wrapped_results),
|
||||
};
|
||||
},
|
||||
else => unreachable,
|
||||
@ -1222,10 +1214,7 @@ pub fn truncate(
|
||||
dest_bits,
|
||||
)).toIntern();
|
||||
}
|
||||
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = dest_ty.toIntern(),
|
||||
.storage = .{ .elems = elem_vals },
|
||||
} }));
|
||||
return pt.aggregateValue(dest_ty, elem_vals);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -1256,10 +1245,7 @@ pub fn bitwiseNot(sema: *Sema, ty: Type, val: Value) CompileError!Value {
|
||||
else
|
||||
(try intBitwiseNot(sema, elem_val, elem_ty)).toIntern();
|
||||
}
|
||||
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elem_vals },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, elem_vals);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -1357,10 +1343,7 @@ pub fn bitReverse(sema: *Sema, val: Value, ty: Type) CompileError!Value {
|
||||
else
|
||||
(try intBitReverse(sema, elem_val, elem_ty)).toIntern();
|
||||
}
|
||||
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elem_vals },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, elem_vals);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -1389,10 +1372,7 @@ pub fn byteSwap(sema: *Sema, val: Value, ty: Type) CompileError!Value {
|
||||
else
|
||||
(try intByteSwap(sema, elem_val, elem_ty)).toIntern();
|
||||
}
|
||||
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = elem_vals },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, elem_vals);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
@ -980,13 +980,14 @@ fn unflattenArray(
|
||||
elems: []const InternPool.Index,
|
||||
next_idx: *u64,
|
||||
) Allocator.Error!Value {
|
||||
const zcu = sema.pt.zcu;
|
||||
const pt = sema.pt;
|
||||
const zcu = pt.zcu;
|
||||
const arena = sema.arena;
|
||||
|
||||
if (ty.zigTypeTag(zcu) != .array) {
|
||||
const val = Value.fromInterned(elems[@intCast(next_idx.*)]);
|
||||
next_idx.* += 1;
|
||||
return sema.pt.getCoerced(val, ty);
|
||||
return pt.getCoerced(val, ty);
|
||||
}
|
||||
|
||||
const elem_ty = ty.childType(zcu);
|
||||
@ -998,10 +999,7 @@ fn unflattenArray(
|
||||
// TODO: validate sentinel
|
||||
_ = try unflattenArray(sema, elem_ty, elems, next_idx);
|
||||
}
|
||||
return Value.fromInterned(try sema.pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = buf },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, buf);
|
||||
}
|
||||
|
||||
/// Given a `MutableValue` representing a potentially-nested array, treats `index` as an index into
|
||||
|
||||
32
src/Type.zig
32
src/Type.zig
@ -2490,15 +2490,11 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
|
||||
|
||||
inline .array_type, .vector_type => |seq_type, seq_tag| {
|
||||
const has_sentinel = seq_tag == .array_type and seq_type.sentinel != .none;
|
||||
if (seq_type.len + @intFromBool(has_sentinel) == 0) return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = &.{} },
|
||||
} }));
|
||||
if (seq_type.len + @intFromBool(has_sentinel) == 0) {
|
||||
return try pt.aggregateValue(ty, &.{});
|
||||
}
|
||||
if (try Type.fromInterned(seq_type.child).onePossibleValue(pt)) |opv| {
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .repeated_elem = opv.toIntern() },
|
||||
} }));
|
||||
return try pt.aggregateSplatValue(ty, opv);
|
||||
}
|
||||
return null;
|
||||
},
|
||||
@ -2567,10 +2563,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
|
||||
|
||||
// In this case the struct has no runtime-known fields and
|
||||
// therefore has one possible value.
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = field_vals },
|
||||
} }));
|
||||
return try pt.aggregateValue(ty, field_vals);
|
||||
},
|
||||
|
||||
.tuple_type => |tuple| {
|
||||
@ -2582,10 +2575,7 @@ pub fn onePossibleValue(starting_type: Type, pt: Zcu.PerThread) !?Value {
|
||||
// TODO: write something like getCoercedInts to avoid needing to dupe
|
||||
const duped_values = try zcu.gpa.dupe(InternPool.Index, tuple.values.get(ip));
|
||||
defer zcu.gpa.free(duped_values);
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = duped_values },
|
||||
} }));
|
||||
return try pt.aggregateValue(ty, duped_values);
|
||||
},
|
||||
|
||||
.union_type => {
|
||||
@ -2957,10 +2947,7 @@ pub fn getParentNamespace(ty: Type, zcu: *Zcu) InternPool.OptionalNamespaceIndex
|
||||
pub fn minInt(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
|
||||
const zcu = pt.zcu;
|
||||
const scalar = try minIntScalar(ty.scalarType(zcu), pt, dest_ty.scalarType(zcu));
|
||||
return if (ty.zigTypeTag(zcu) == .vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = dest_ty.toIntern(),
|
||||
.storage = .{ .repeated_elem = scalar.toIntern() },
|
||||
} })) else scalar;
|
||||
return if (ty.zigTypeTag(zcu) == .vector) pt.aggregateSplatValue(dest_ty, scalar) else scalar;
|
||||
}
|
||||
|
||||
/// Asserts that the type is an integer.
|
||||
@ -2987,10 +2974,7 @@ pub fn minIntScalar(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
|
||||
pub fn maxInt(ty: Type, pt: Zcu.PerThread, dest_ty: Type) !Value {
|
||||
const zcu = pt.zcu;
|
||||
const scalar = try maxIntScalar(ty.scalarType(zcu), pt, dest_ty.scalarType(zcu));
|
||||
return if (ty.zigTypeTag(zcu) == .vector) Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = dest_ty.toIntern(),
|
||||
.storage = .{ .repeated_elem = scalar.toIntern() },
|
||||
} })) else scalar;
|
||||
return if (ty.zigTypeTag(zcu) == .vector) pt.aggregateSplatValue(dest_ty, scalar) else scalar;
|
||||
}
|
||||
|
||||
/// The returned Value will have type dest_ty.
|
||||
|
||||
103
src/Value.zig
103
src/Value.zig
@ -1586,10 +1586,7 @@ pub fn sqrt(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try sqrtScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return sqrtScalar(val, float_type, pt);
|
||||
}
|
||||
@ -1620,10 +1617,7 @@ pub fn sin(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try sinScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return sinScalar(val, float_type, pt);
|
||||
}
|
||||
@ -1654,10 +1648,7 @@ pub fn cos(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try cosScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return cosScalar(val, float_type, pt);
|
||||
}
|
||||
@ -1688,10 +1679,7 @@ pub fn tan(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try tanScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return tanScalar(val, float_type, pt);
|
||||
}
|
||||
@ -1722,10 +1710,7 @@ pub fn exp(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try expScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return expScalar(val, float_type, pt);
|
||||
}
|
||||
@ -1756,10 +1741,7 @@ pub fn exp2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try exp2Scalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return exp2Scalar(val, float_type, pt);
|
||||
}
|
||||
@ -1790,10 +1772,7 @@ pub fn log(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !V
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try logScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return logScalar(val, float_type, pt);
|
||||
}
|
||||
@ -1824,10 +1803,7 @@ pub fn log2(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try log2Scalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return log2Scalar(val, float_type, pt);
|
||||
}
|
||||
@ -1858,10 +1834,7 @@ pub fn log10(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread)
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try log10Scalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return log10Scalar(val, float_type, pt);
|
||||
}
|
||||
@ -1892,10 +1865,7 @@ pub fn abs(val: Value, ty: Type, arena: Allocator, pt: Zcu.PerThread) !Value {
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try absScalar(elem_val, scalar_ty, pt, arena)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, result_data);
|
||||
}
|
||||
return absScalar(val, ty, pt, arena);
|
||||
}
|
||||
@ -1945,10 +1915,7 @@ pub fn floor(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread)
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try floorScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return floorScalar(val, float_type, pt);
|
||||
}
|
||||
@ -1979,10 +1946,7 @@ pub fn ceil(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread) !
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try ceilScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return ceilScalar(val, float_type, pt);
|
||||
}
|
||||
@ -2013,10 +1977,7 @@ pub fn round(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread)
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try roundScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return roundScalar(val, float_type, pt);
|
||||
}
|
||||
@ -2047,10 +2008,7 @@ pub fn trunc(val: Value, float_type: Type, arena: Allocator, pt: Zcu.PerThread)
|
||||
const elem_val = try val.elemValue(pt, i);
|
||||
scalar.* = (try truncScalar(elem_val, scalar_ty, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return truncScalar(val, float_type, pt);
|
||||
}
|
||||
@ -2090,10 +2048,7 @@ pub fn mulAdd(
|
||||
const addend_elem = try addend.elemValue(pt, i);
|
||||
scalar.* = (try mulAddScalar(scalar_ty, mulend1_elem, mulend2_elem, addend_elem, pt)).toIntern();
|
||||
}
|
||||
return Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = float_type.toIntern(),
|
||||
.storage = .{ .elems = result_data },
|
||||
} }));
|
||||
return pt.aggregateValue(float_type, result_data);
|
||||
}
|
||||
return mulAddScalar(float_type, mulend1, mulend2, addend, pt);
|
||||
}
|
||||
@ -2978,17 +2933,17 @@ pub fn resolveLazy(
|
||||
}
|
||||
if (resolved_elems.len > 0) resolved_elems[i] = resolved_elem;
|
||||
}
|
||||
return if (resolved_elems.len == 0) val else Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = aggregate.ty,
|
||||
.storage = .{ .elems = resolved_elems },
|
||||
} }));
|
||||
return if (resolved_elems.len == 0)
|
||||
val
|
||||
else
|
||||
pt.aggregateValue(.fromInterned(aggregate.ty), resolved_elems);
|
||||
},
|
||||
.repeated_elem => |elem| {
|
||||
const resolved_elem = (try Value.fromInterned(elem).resolveLazy(arena, pt)).toIntern();
|
||||
return if (resolved_elem == elem) val else Value.fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = aggregate.ty,
|
||||
.storage = .{ .repeated_elem = resolved_elem },
|
||||
} }));
|
||||
const resolved_elem = try Value.fromInterned(elem).resolveLazy(arena, pt);
|
||||
return if (resolved_elem.toIntern() == elem)
|
||||
val
|
||||
else
|
||||
pt.aggregateSplatValue(.fromInterned(aggregate.ty), resolved_elem);
|
||||
},
|
||||
},
|
||||
.un => |un| {
|
||||
@ -3205,10 +3160,7 @@ pub fn uninterpret(val: anytype, ty: Type, pt: Zcu.PerThread) error{ OutOfMemory
|
||||
const field_ty = ty.fieldType(field_idx, zcu);
|
||||
field_val.* = (try uninterpret(@field(val, field.name), field_ty, pt)).toIntern();
|
||||
}
|
||||
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = &field_vals },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, &field_vals);
|
||||
},
|
||||
.by_name => {
|
||||
const struct_obj = zcu.typeToStruct(ty) orelse return error.TypeMismatch;
|
||||
@ -3230,10 +3182,7 @@ pub fn uninterpret(val: anytype, ty: Type, pt: Zcu.PerThread) error{ OutOfMemory
|
||||
field_val.* = default_init;
|
||||
}
|
||||
}
|
||||
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .elems = field_vals },
|
||||
} }));
|
||||
return pt.aggregateValue(ty, &field_vals);
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
@ -3327,10 +3327,7 @@ pub fn populateTestFunctions(pt: Zcu.PerThread) Allocator.Error!void {
|
||||
.byte_offset = 0,
|
||||
} }),
|
||||
};
|
||||
test_fn_val.* = try pt.intern(.{ .aggregate = .{
|
||||
.ty = test_fn_ty.toIntern(),
|
||||
.storage = .{ .elems = &test_fn_fields },
|
||||
} });
|
||||
test_fn_val.* = (try pt.aggregateValue(test_fn_ty, &test_fn_fields)).toIntern();
|
||||
}
|
||||
|
||||
const array_ty = try pt.arrayType(.{
|
||||
@ -3338,13 +3335,9 @@ pub fn populateTestFunctions(pt: Zcu.PerThread) Allocator.Error!void {
|
||||
.child = test_fn_ty.toIntern(),
|
||||
.sentinel = .none,
|
||||
});
|
||||
const array_val = try pt.intern(.{ .aggregate = .{
|
||||
.ty = array_ty.toIntern(),
|
||||
.storage = .{ .elems = test_fn_vals },
|
||||
} });
|
||||
break :array .{
|
||||
.orig_ty = (try pt.singleConstPtrType(array_ty)).toIntern(),
|
||||
.val = array_val,
|
||||
.val = (try pt.aggregateValue(array_ty, test_fn_vals)).toIntern(),
|
||||
};
|
||||
};
|
||||
|
||||
@ -3685,15 +3678,15 @@ pub fn aggregateValue(pt: Zcu.PerThread, ty: Type, elems: []const InternPool.Ind
|
||||
}
|
||||
|
||||
/// Asserts that `ty` is either an array or a vector.
|
||||
pub fn aggregateSplatValue(pt: Zcu.PerThread, ty: Type, repeated: Value) Allocator.Error!Value {
|
||||
pub fn aggregateSplatValue(pt: Zcu.PerThread, ty: Type, repeated_elem: Value) Allocator.Error!Value {
|
||||
switch (ty.zigTypeTag(pt.zcu)) {
|
||||
.array, .vector => {},
|
||||
else => unreachable,
|
||||
}
|
||||
if (repeated.isUndef(pt.zcu)) return pt.undefValue(ty);
|
||||
if (repeated_elem.isUndef(pt.zcu)) return pt.undefValue(ty);
|
||||
return .fromInterned(try pt.intern(.{ .aggregate = .{
|
||||
.ty = ty.toIntern(),
|
||||
.storage = .{ .repeated_elem = repeated.toIntern() },
|
||||
.storage = .{ .repeated_elem = repeated_elem.toIntern() },
|
||||
} }));
|
||||
}
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user