stage2: fix hashing and comparison design flaw with Value

* `Value.toType` accepts a buffer parameter instead of an allocator
   parameter and can no longer fail.
 * Module: remove the unused `mod: *Module` parameter from various
   functions.
 * `Value.compare` now accepts a `Type` parameter which indicates the
   type of both operands. There is also a `Value.compareHetero` which
   accepts only Value parameters and supports comparing mixed types.
   Likewise, `Value.eql` requires a `Type` parameter.
 * `Value.hash` is removed; instead the hash map context structs now
   have a `ty: Type` field, and the hash function lives there, where it
   has access to a Value's Type when it computes a hash.
   - This allowed the hash function to be greatly simplified and sound
     in the sense that the same Values, even with different
     representations, always hash to the same thing.
 * Sema: Fix source location of zirCmp when an operand is runtime known
   but needs to be comptime known.
 * Remove unused target parameter from `Value.floatCast`.
This commit is contained in:
Andrew Kelley 2021-07-30 16:05:46 -07:00
parent 84039a57e4
commit 507dc1f2e7
6 changed files with 230 additions and 443 deletions

View File

@ -503,7 +503,8 @@ pub fn typeOfIndex(air: Air, inst: Air.Inst.Index) Type {
pub fn getRefType(air: Air, ref: Air.Inst.Ref) Type {
const ref_int = @enumToInt(ref);
if (ref_int < Air.Inst.Ref.typed_value_map.len) {
return Air.Inst.Ref.typed_value_map[ref_int].val.toType(undefined) catch unreachable;
var buffer: Value.ToTypeBuffer = undefined;
return Air.Inst.Ref.typed_value_map[ref_int].val.toType(&buffer);
}
const inst_index = ref_int - Air.Inst.Ref.typed_value_map.len;
const air_tags = air.instructions.items(.tag);

View File

@ -4299,7 +4299,6 @@ pub fn simplePtrType(
}
pub fn ptrType(
mod: *Module,
arena: *Allocator,
elem_ty: Type,
sentinel: ?Value,
@ -4311,7 +4310,6 @@ pub fn ptrType(
@"volatile": bool,
size: std.builtin.TypeInfo.Pointer.Size,
) Allocator.Error!Type {
_ = mod;
assert(host_size == 0 or bit_offset < host_size * 8);
// TODO check if type can be represented by simplePtrType
@ -4328,8 +4326,7 @@ pub fn ptrType(
});
}
pub fn optionalType(mod: *Module, arena: *Allocator, child_type: Type) Allocator.Error!Type {
_ = mod;
pub fn optionalType(arena: *Allocator, child_type: Type) Allocator.Error!Type {
switch (child_type.tag()) {
.single_const_pointer => return Type.Tag.optional_single_const_pointer.create(
arena,
@ -4344,16 +4341,14 @@ pub fn optionalType(mod: *Module, arena: *Allocator, child_type: Type) Allocator
}
pub fn arrayType(
mod: *Module,
arena: *Allocator,
len: u64,
sentinel: ?Value,
elem_type: Type,
) Allocator.Error!Type {
_ = mod;
if (elem_type.eql(Type.initTag(.u8))) {
if (sentinel) |some| {
if (some.eql(Value.initTag(.zero))) {
if (some.eql(Value.initTag(.zero), elem_type)) {
return Type.Tag.array_u8_sentinel_0.create(arena, len);
}
} else {
@ -4376,12 +4371,10 @@ pub fn arrayType(
}
pub fn errorUnionType(
mod: *Module,
arena: *Allocator,
error_set: Type,
payload: Type,
) Allocator.Error!Type {
_ = mod;
assert(error_set.zigTypeTag() == .ErrorSet);
if (error_set.eql(Type.initTag(.anyerror)) and payload.eql(Type.initTag(.void))) {
return Type.initTag(.anyerror_void_error_union);

View File

@ -1,5 +1,6 @@
const std = @import("std");
const Order = std.math.Order;
const Type = @import("type.zig").Type;
const Value = @import("value.zig").Value;
const RangeSet = @This();
const SwitchProngSrc = @import("Module.zig").SwitchProngSrc;
@ -22,9 +23,15 @@ pub fn deinit(self: *RangeSet) void {
self.ranges.deinit();
}
pub fn add(self: *RangeSet, first: Value, last: Value, src: SwitchProngSrc) !?SwitchProngSrc {
pub fn add(
self: *RangeSet,
first: Value,
last: Value,
ty: Type,
src: SwitchProngSrc,
) !?SwitchProngSrc {
for (self.ranges.items) |range| {
if (last.compare(.gte, range.first) and first.compare(.lte, range.last)) {
if (last.compare(.gte, range.first, ty) and first.compare(.lte, range.last, ty)) {
return range.src; // They overlap.
}
}
@ -37,18 +44,18 @@ pub fn add(self: *RangeSet, first: Value, last: Value, src: SwitchProngSrc) !?Sw
}
/// Assumes a and b do not overlap
fn lessThan(_: void, a: Range, b: Range) bool {
return a.first.compare(.lt, b.first);
fn lessThan(ty: Type, a: Range, b: Range) bool {
return a.first.compare(.lt, b.first, ty);
}
pub fn spans(self: *RangeSet, first: Value, last: Value) !bool {
pub fn spans(self: *RangeSet, first: Value, last: Value, ty: Type) !bool {
if (self.ranges.items.len == 0)
return false;
std.sort.sort(Range, self.ranges.items, {}, lessThan);
std.sort.sort(Range, self.ranges.items, ty, lessThan);
if (!self.ranges.items[0].first.eql(first) or
!self.ranges.items[self.ranges.items.len - 1].last.eql(last))
if (!self.ranges.items[0].first.eql(first, ty) or
!self.ranges.items[self.ranges.items.len - 1].last.eql(last, ty))
{
return false;
}

View File

@ -634,7 +634,9 @@ fn analyzeAsType(
const wanted_type = Type.initTag(.@"type");
const coerced_inst = try sema.coerce(block, wanted_type, air_inst, src);
const val = try sema.resolveConstValue(block, src, coerced_inst);
return val.toType(sema.arena);
var buffer: Value.ToTypeBuffer = undefined;
const ty = val.toType(&buffer);
return ty.copy(sema.arena);
}
/// May return Value Tags: `variable`, `undef`.
@ -1022,7 +1024,9 @@ fn zirEnumDecl(
if (bag != 0) break true;
} else false;
if (any_values) {
try enum_obj.values.ensureCapacity(&new_decl_arena.allocator, fields_len);
try enum_obj.values.ensureTotalCapacityContext(&new_decl_arena.allocator, fields_len, .{
.ty = tag_ty,
});
}
{
@ -1100,10 +1104,10 @@ fn zirEnumDecl(
// that points to this default value expression rather than the struct.
// But only resolve the source location if we need to emit a compile error.
const tag_val = (try sema.resolveInstConst(block, src, tag_val_ref)).val;
enum_obj.values.putAssumeCapacityNoClobber(tag_val, {});
enum_obj.values.putAssumeCapacityNoClobberContext(tag_val, {}, .{ .ty = tag_ty });
} else if (any_values) {
const tag_val = try Value.Tag.int_u64.create(&new_decl_arena.allocator, field_i);
enum_obj.values.putAssumeCapacityNoClobber(tag_val, {});
enum_obj.values.putAssumeCapacityNoClobberContext(tag_val, {}, .{ .ty = tag_ty });
}
}
@ -2516,7 +2520,7 @@ fn zirOptionalType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Compi
const inst_data = sema.code.instructions.items(.data)[inst].un_node;
const src = inst_data.src();
const child_type = try sema.resolveType(block, src, inst_data.operand);
const opt_type = try sema.mod.optionalType(sema.arena, child_type);
const opt_type = try Module.optionalType(sema.arena, child_type);
return sema.addType(opt_type);
}
@ -2547,11 +2551,10 @@ fn zirArrayType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileE
const tracy = trace(@src());
defer tracy.end();
// TODO these should be lazily evaluated
const bin_inst = sema.code.instructions.items(.data)[inst].bin;
const len = try sema.resolveInstConst(block, .unneeded, bin_inst.lhs);
const elem_type = try sema.resolveType(block, .unneeded, bin_inst.rhs);
const array_ty = try sema.mod.arrayType(sema.arena, len.val.toUnsignedInt(), null, elem_type);
const array_ty = try Module.arrayType(sema.arena, len.val.toUnsignedInt(), null, elem_type);
return sema.addType(array_ty);
}
@ -2560,13 +2563,12 @@ fn zirArrayTypeSentinel(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index)
const tracy = trace(@src());
defer tracy.end();
// TODO these should be lazily evaluated
const inst_data = sema.code.instructions.items(.data)[inst].array_type_sentinel;
const len = try sema.resolveInstConst(block, .unneeded, inst_data.len);
const extra = sema.code.extraData(Zir.Inst.ArrayTypeSentinel, inst_data.payload_index).data;
const sentinel = try sema.resolveInstConst(block, .unneeded, extra.sentinel);
const elem_type = try sema.resolveType(block, .unneeded, extra.elem_type);
const array_ty = try sema.mod.arrayType(sema.arena, len.val.toUnsignedInt(), sentinel.val, elem_type);
const array_ty = try Module.arrayType(sema.arena, len.val.toUnsignedInt(), sentinel.val, elem_type);
return sema.addType(array_ty);
}
@ -2599,7 +2601,7 @@ fn zirErrorUnionType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Com
error_union.elemType(),
});
}
const err_union_ty = try sema.mod.errorUnionType(sema.arena, error_union, payload);
const err_union_ty = try Module.errorUnionType(sema.arena, error_union, payload);
return sema.addType(err_union_ty);
}
@ -3890,6 +3892,7 @@ fn analyzeSwitch(
block,
&range_set,
item_ref,
operand_ty,
src_node_offset,
.{ .scalar = scalar_i },
);
@ -3912,6 +3915,7 @@ fn analyzeSwitch(
block,
&range_set,
item_ref,
operand_ty,
src_node_offset,
.{ .multi = .{ .prong = multi_i, .item = @intCast(u32, item_i) } },
);
@ -3929,6 +3933,7 @@ fn analyzeSwitch(
&range_set,
item_first,
item_last,
operand_ty,
src_node_offset,
.{ .range = .{ .prong = multi_i, .item = range_i } },
);
@ -3945,7 +3950,7 @@ fn analyzeSwitch(
const min_int = try operand_ty.minInt(&arena, mod.getTarget());
const max_int = try operand_ty.maxInt(&arena, mod.getTarget());
if (try range_set.spans(min_int, max_int)) {
if (try range_set.spans(min_int, max_int, operand_ty)) {
if (special_prong == .@"else") {
return mod.fail(
&block.base,
@ -4050,7 +4055,7 @@ fn analyzeSwitch(
);
}
var seen_values = ValueSrcMap.init(gpa);
var seen_values = ValueSrcMap.initContext(gpa, .{ .ty = operand_ty });
defer seen_values.deinit();
var extra_index: usize = special.end;
@ -4161,7 +4166,7 @@ fn analyzeSwitch(
const item = sema.resolveInst(item_ref);
// Validation above ensured these will succeed.
const item_val = sema.resolveConstValue(&child_block, .unneeded, item) catch unreachable;
if (operand_val.eql(item_val)) {
if (operand_val.eql(item_val, operand_ty)) {
return sema.resolveBlockBody(block, src, &child_block, body, merges);
}
}
@ -4183,7 +4188,7 @@ fn analyzeSwitch(
const item = sema.resolveInst(item_ref);
// Validation above ensured these will succeed.
const item_val = sema.resolveConstValue(&child_block, .unneeded, item) catch unreachable;
if (operand_val.eql(item_val)) {
if (operand_val.eql(item_val, operand_ty)) {
return sema.resolveBlockBody(block, src, &child_block, body, merges);
}
}
@ -4198,8 +4203,8 @@ fn analyzeSwitch(
// Validation above ensured these will succeed.
const first_tv = sema.resolveInstConst(&child_block, .unneeded, item_first) catch unreachable;
const last_tv = sema.resolveInstConst(&child_block, .unneeded, item_last) catch unreachable;
if (Value.compare(operand_val, .gte, first_tv.val) and
Value.compare(operand_val, .lte, last_tv.val))
if (Value.compare(operand_val, .gte, first_tv.val, operand_ty) and
Value.compare(operand_val, .lte, last_tv.val, operand_ty))
{
return sema.resolveBlockBody(block, src, &child_block, body, merges);
}
@ -4450,12 +4455,13 @@ fn validateSwitchRange(
range_set: *RangeSet,
first_ref: Zir.Inst.Ref,
last_ref: Zir.Inst.Ref,
operand_ty: Type,
src_node_offset: i32,
switch_prong_src: Module.SwitchProngSrc,
) CompileError!void {
const first_val = (try sema.resolveSwitchItemVal(block, first_ref, src_node_offset, switch_prong_src, .first)).val;
const last_val = (try sema.resolveSwitchItemVal(block, last_ref, src_node_offset, switch_prong_src, .last)).val;
const maybe_prev_src = try range_set.add(first_val, last_val, switch_prong_src);
const maybe_prev_src = try range_set.add(first_val, last_val, operand_ty, switch_prong_src);
return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
}
@ -4464,11 +4470,12 @@ fn validateSwitchItem(
block: *Scope.Block,
range_set: *RangeSet,
item_ref: Zir.Inst.Ref,
operand_ty: Type,
src_node_offset: i32,
switch_prong_src: Module.SwitchProngSrc,
) CompileError!void {
const item_val = (try sema.resolveSwitchItemVal(block, item_ref, src_node_offset, switch_prong_src, .none)).val;
const maybe_prev_src = try range_set.add(item_val, item_val, switch_prong_src);
const maybe_prev_src = try range_set.add(item_val, item_val, operand_ty, switch_prong_src);
return sema.validateSwitchDupe(block, maybe_prev_src, switch_prong_src, src_node_offset);
}
@ -5137,20 +5144,26 @@ fn zirCmp(
const casted_lhs = try sema.coerce(block, resolved_type, lhs, lhs_src);
const casted_rhs = try sema.coerce(block, resolved_type, rhs, rhs_src);
if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| {
if (lhs_val.isUndef() or rhs_val.isUndef()) {
return sema.addConstUndef(resolved_type);
}
if (lhs_val.compare(op, rhs_val)) {
return Air.Inst.Ref.bool_true;
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveMaybeUndefVal(block, lhs_src, casted_lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefVal(block, rhs_src, casted_rhs)) |rhs_val| {
if (lhs_val.isUndef() or rhs_val.isUndef()) {
return sema.addConstUndef(resolved_type);
}
if (lhs_val.compare(op, rhs_val, resolved_type)) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
}
} else {
return Air.Inst.Ref.bool_false;
break :src rhs_src;
}
} else {
break :src lhs_src;
}
}
};
try sema.requireRuntimeBlock(block, runtime_src);
try sema.requireRuntimeBlock(block, src);
const tag: Air.Inst.Tag = switch (op) {
.lt => .cmp_lt,
.lte => .cmp_lte,
@ -5626,7 +5639,7 @@ fn zirPtrTypeSimple(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) Comp
const inst_data = sema.code.instructions.items(.data)[inst].ptr_type_simple;
const elem_type = try sema.resolveType(block, .unneeded, inst_data.elem_type);
const ty = try sema.mod.ptrType(
const ty = try Module.ptrType(
sema.arena,
elem_type,
null,
@ -5680,7 +5693,7 @@ fn zirPtrType(sema: *Sema, block: *Scope.Block, inst: Zir.Inst.Index) CompileErr
const elem_type = try sema.resolveType(block, .unneeded, extra.data.elem_type);
const ty = try sema.mod.ptrType(
const ty = try Module.ptrType(
sema.arena,
elem_type,
sentinel,
@ -6569,7 +6582,7 @@ fn panicWithMsg(
const stack_trace_ty = try sema.resolveTypeFields(block, src, unresolved_stack_trace_ty);
const ptr_stack_trace_ty = try Module.simplePtrType(arena, stack_trace_ty, true, .One);
const null_stack_trace = try sema.addConstant(
try mod.optionalType(arena, ptr_stack_trace_ty),
try Module.optionalType(arena, ptr_stack_trace_ty),
Value.initTag(.null_value),
);
const args = try arena.create([2]Air.Inst.Ref);
@ -6713,7 +6726,8 @@ fn fieldVal(
},
.Type => {
const val = (try sema.resolveDefinedValue(block, object_src, object)).?;
const child_type = try val.toType(arena);
var to_type_buffer: Value.ToTypeBuffer = undefined;
const child_type = val.toType(&to_type_buffer);
switch (child_type.zigTypeTag()) {
.ErrorSet => {
// TODO resolve inferred error sets
@ -6733,7 +6747,7 @@ fn fieldVal(
} else (try mod.getErrorValue(field_name)).key;
return sema.addConstant(
child_type,
try child_type.copy(arena),
try Value.Tag.@"error".create(arena, .{ .name = name }),
);
},
@ -6781,7 +6795,7 @@ fn fieldVal(
};
const field_index_u32 = @intCast(u32, field_index);
const enum_val = try Value.Tag.enum_field_index.create(arena, field_index_u32);
return sema.addConstant(child_type, enum_val);
return sema.addConstant(try child_type.copy(arena), enum_val);
},
else => return mod.fail(&block.base, src, "type '{}' has no members", .{child_type}),
}
@ -6805,7 +6819,6 @@ fn fieldPtr(
// in `fieldVal`. This function takes a pointer and returns a pointer.
const mod = sema.mod;
const arena = sema.arena;
const object_ptr_src = src; // TODO better source location
const object_ptr_ty = sema.typeOf(object_ptr);
const object_ty = switch (object_ptr_ty.zigTypeTag()) {
@ -6887,7 +6900,8 @@ fn fieldPtr(
_ = try sema.resolveConstValue(block, object_ptr_src, object_ptr);
const result = try sema.analyzeLoad(block, src, object_ptr, object_ptr_src);
const val = (sema.resolveDefinedValue(block, src, result) catch unreachable).?;
const child_type = try val.toType(arena);
var to_type_buffer: Value.ToTypeBuffer = undefined;
const child_type = val.toType(&to_type_buffer);
switch (child_type.zigTypeTag()) {
.ErrorSet => {
// TODO resolve inferred error sets
@ -6902,15 +6916,14 @@ fn fieldPtr(
}
}
return mod.fail(&block.base, src, "no error named '{s}' in '{}'", .{
field_name,
child_type,
field_name, child_type,
});
} else (try mod.getErrorValue(field_name)).key;
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
child_type,
try child_type.copy(anon_decl.arena()),
try Value.Tag.@"error".create(anon_decl.arena(), .{ .name = name }),
));
},
@ -6960,7 +6973,7 @@ fn fieldPtr(
var anon_decl = try block.startAnonDecl();
defer anon_decl.deinit();
return sema.analyzeDeclRef(try anon_decl.finish(
child_type,
try child_type.copy(anon_decl.arena()),
try Value.Tag.enum_field_index.create(anon_decl.arena(), field_index_u32),
));
},
@ -7352,7 +7365,7 @@ fn coerce(
if (src_sentinel) |src_s| {
if (dst_sentinel) |dst_s| {
if (src_s.eql(dst_s)) {
if (src_s.eql(dst_s, dst_elem_type)) {
return sema.coerceArrayPtrToMany(block, dest_type, inst, inst_src);
}
}
@ -7474,7 +7487,7 @@ fn coerceNum(
}
} else if (dst_zig_tag == .ComptimeFloat or dst_zig_tag == .Float) {
if (src_zig_tag == .Float or src_zig_tag == .ComptimeFloat) {
const res = val.floatCast(sema.arena, dest_type, target) catch |err| switch (err) {
const res = val.floatCast(sema.arena, dest_type) catch |err| switch (err) {
error.Overflow => return sema.mod.fail(
&block.base,
inst_src,
@ -7813,12 +7826,12 @@ fn analyzeSlice(
array_type.sentinel()
else
slice_sentinel;
return_elem_type = try sema.mod.arrayType(sema.arena, len, array_sentinel, elem_type);
return_elem_type = try Module.arrayType(sema.arena, len, array_sentinel, elem_type);
return_ptr_size = .One;
}
}
}
const return_type = try sema.mod.ptrType(
const return_type = try Module.ptrType(
sema.arena,
return_elem_type,
if (end_opt == .none) slice_sentinel else null,
@ -7858,39 +7871,42 @@ fn cmpNumeric(
if (lhs_ty_tag == .Vector and rhs_ty_tag == .Vector) {
if (lhs_ty.arrayLen() != rhs_ty.arrayLen()) {
return sema.mod.fail(&block.base, src, "vector length mismatch: {d} and {d}", .{
lhs_ty.arrayLen(),
rhs_ty.arrayLen(),
lhs_ty.arrayLen(), rhs_ty.arrayLen(),
});
}
return sema.mod.fail(&block.base, src, "TODO implement support for vectors in cmpNumeric", .{});
} else if (lhs_ty_tag == .Vector or rhs_ty_tag == .Vector) {
return sema.mod.fail(&block.base, src, "mixed scalar and vector operands to comparison operator: '{}' and '{}'", .{
lhs_ty,
rhs_ty,
lhs_ty, rhs_ty,
});
}
if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| {
if (lhs_val.isUndef() or rhs_val.isUndef()) {
return sema.addConstUndef(Type.initTag(.bool));
}
if (Value.compare(lhs_val, op, rhs_val)) {
return Air.Inst.Ref.bool_true;
const runtime_src: LazySrcLoc = src: {
if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| {
if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| {
if (lhs_val.isUndef() or rhs_val.isUndef()) {
return sema.addConstUndef(Type.initTag(.bool));
}
if (Value.compareHetero(lhs_val, op, rhs_val)) {
return Air.Inst.Ref.bool_true;
} else {
return Air.Inst.Ref.bool_false;
}
} else {
return Air.Inst.Ref.bool_false;
break :src rhs_src;
}
} else {
break :src lhs_src;
}
}
};
// TODO handle comparisons against lazy zero values
// Some values can be compared against zero without being runtime known or without forcing
// a full resolution of their value, for example `@sizeOf(@Frame(function))` is known to
// always be nonzero, and we benefit from not forcing the full evaluation and stack frame layout
// of this function if we don't need to.
try sema.requireRuntimeBlock(block, runtime_src);
// It must be a runtime comparison.
try sema.requireRuntimeBlock(block, src);
// For floats, emit a float comparison instruction.
const lhs_is_float = switch (lhs_ty_tag) {
.Float, .ComptimeFloat => true,

View File

@ -426,7 +426,7 @@ pub const Type = extern union {
const sentinel_b = info_b.sentinel;
if (sentinel_a) |sa| {
if (sentinel_b) |sb| {
if (!sa.eql(sb))
if (!sa.eql(sb, info_a.pointee_type))
return false;
} else {
return false;
@ -455,13 +455,14 @@ pub const Type = extern union {
.Array, .Vector => {
if (a.arrayLen() != b.arrayLen())
return false;
if (!a.elemType().eql(b.elemType()))
const elem_ty = a.elemType();
if (!elem_ty.eql(b.elemType()))
return false;
const sentinel_a = a.sentinel();
const sentinel_b = b.sentinel();
if (sentinel_a) |sa| {
if (sentinel_b) |sb| {
return sa.eql(sb);
return sa.eql(sb, elem_ty);
} else {
return false;
}
@ -2744,29 +2745,37 @@ pub const Type = extern union {
return @as(usize, payload.data);
}
const S = struct {
fn fieldWithRange(int_val: Value, end: usize) ?usize {
fn fieldWithRange(int_ty: Type, int_val: Value, end: usize) ?usize {
if (int_val.compareWithZero(.lt)) return null;
var end_payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = end,
};
const end_val = Value.initPayload(&end_payload.base);
if (int_val.compare(.gte, end_val)) return null;
if (int_val.compare(.gte, end_val, int_ty)) return null;
return @intCast(usize, int_val.toUnsignedInt());
}
};
switch (ty.tag()) {
.enum_full, .enum_nonexhaustive => {
const enum_full = ty.cast(Payload.EnumFull).?.data;
const tag_ty = enum_full.tag_ty;
if (enum_full.values.count() == 0) {
return S.fieldWithRange(enum_tag, enum_full.fields.count());
return S.fieldWithRange(tag_ty, enum_tag, enum_full.fields.count());
} else {
return enum_full.values.getIndex(enum_tag);
return enum_full.values.getIndexContext(enum_tag, .{ .ty = tag_ty });
}
},
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
return S.fieldWithRange(enum_tag, enum_simple.fields.count());
const fields_len = enum_simple.fields.count();
const bits = std.math.log2_int_ceil(usize, fields_len);
var buffer: Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
.data = bits,
};
const tag_ty = Type.initPayload(&buffer.base);
return S.fieldWithRange(tag_ty, enum_tag, fields_len);
},
.atomic_ordering,
.atomic_rmw_op,
@ -2875,14 +2884,14 @@ pub const Type = extern union {
/// Asserts the type is an enum.
pub fn enumHasInt(ty: Type, int: Value, target: Target) bool {
const S = struct {
fn intInRange(int_val: Value, end: usize) bool {
fn intInRange(tag_ty: Type, int_val: Value, end: usize) bool {
if (int_val.compareWithZero(.lt)) return false;
var end_payload: Value.Payload.U64 = .{
.base = .{ .tag = .int_u64 },
.data = end,
};
const end_val = Value.initPayload(&end_payload.base);
if (int_val.compare(.gte, end_val)) return false;
if (int_val.compare(.gte, end_val, tag_ty)) return false;
return true;
}
};
@ -2890,15 +2899,23 @@ pub const Type = extern union {
.enum_nonexhaustive => return int.intFitsInType(ty, target),
.enum_full => {
const enum_full = ty.castTag(.enum_full).?.data;
const tag_ty = enum_full.tag_ty;
if (enum_full.values.count() == 0) {
return S.intInRange(int, enum_full.fields.count());
return S.intInRange(tag_ty, int, enum_full.fields.count());
} else {
return enum_full.values.contains(int);
return enum_full.values.containsContext(int, .{ .ty = tag_ty });
}
},
.enum_simple => {
const enum_simple = ty.castTag(.enum_simple).?.data;
return S.intInRange(int, enum_simple.fields.count());
const fields_len = enum_simple.fields.count();
const bits = std.math.log2_int_ceil(usize, fields_len);
var buffer: Payload.Bits = .{
.base = .{ .tag = .int_unsigned },
.data = bits,
};
const tag_ty = Type.initPayload(&buffer.base);
return S.intInRange(tag_ty, int, fields_len);
},
.atomic_ordering,
.atomic_rmw_op,

View File

@ -653,8 +653,10 @@ pub const Value = extern union {
unreachable;
}
pub const ToTypeBuffer = Type.Payload.Bits;
/// Asserts that the value is representable as a type.
pub fn toType(self: Value, allocator: *Allocator) !Type {
pub fn toType(self: Value, buffer: *ToTypeBuffer) Type {
return switch (self.tag()) {
.ty => self.castTag(.ty).?.data,
.u1_type => Type.initTag(.u1),
@ -714,14 +716,13 @@ pub const Value = extern union {
.int_type => {
const payload = self.castTag(.int_type).?.data;
const new = try allocator.create(Type.Payload.Bits);
new.* = .{
buffer.* = .{
.base = .{
.tag = if (payload.signed) .int_signed else .int_unsigned,
},
.data = payload.bits,
};
return Type.initPayload(&new.base);
return Type.initPayload(&buffer.base);
},
.undef,
@ -958,9 +959,8 @@ pub const Value = extern union {
/// Converts an integer or a float to a float.
/// Returns `error.Overflow` if the value does not fit in the new type.
pub fn floatCast(self: Value, allocator: *Allocator, ty: Type, target: Target) !Value {
_ = target;
switch (ty.tag()) {
pub fn floatCast(self: Value, allocator: *Allocator, dest_ty: Type) !Value {
switch (dest_ty.tag()) {
.f16 => {
@panic("TODO add __trunctfhf2 to compiler-rt");
//const res = try Value.Tag.float_16.create(allocator, self.toFloat(f16));
@ -970,13 +970,13 @@ pub const Value = extern union {
},
.f32 => {
const res = try Value.Tag.float_32.create(allocator, self.toFloat(f32));
if (!self.eql(res))
if (!self.eql(res, dest_ty))
return error.Overflow;
return res;
},
.f64 => {
const res = try Value.Tag.float_64.create(allocator, self.toFloat(f64));
if (!self.eql(res))
if (!self.eql(res, dest_ty))
return error.Overflow;
return res;
},
@ -1083,12 +1083,18 @@ pub const Value = extern union {
return lhs_bigint.order(rhs_bigint);
}
/// Asserts the value is comparable.
pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value) bool {
/// Asserts the value is comparable. Does not take a type parameter because it supports
/// comparisons between heterogeneous types.
pub fn compareHetero(lhs: Value, op: std.math.CompareOperator, rhs: Value) bool {
return order(lhs, rhs).compare(op);
}
/// Asserts the value is comparable. Both operands have type `ty`.
pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, ty: Type) bool {
return switch (op) {
.eq => lhs.eql(rhs),
.neq => !lhs.eql(rhs),
else => order(lhs, rhs).compare(op),
.eq => lhs.eql(rhs, ty),
.neq => !lhs.eql(rhs, ty),
else => compareHetero(lhs, op, rhs),
};
}
@ -1097,11 +1103,11 @@ pub const Value = extern union {
return orderAgainstZero(lhs).compare(op);
}
/// TODO we can't compare value equality without also knowing the type to treat
/// the values as
pub fn eql(a: Value, b: Value) bool {
pub fn eql(a: Value, b: Value, ty: Type) bool {
const a_tag = a.tag();
const b_tag = b.tag();
assert(a_tag != .undef);
assert(b_tag != .undef);
if (a_tag == b_tag) {
switch (a_tag) {
.void_value, .null_value => return true,
@ -1118,230 +1124,106 @@ pub const Value = extern union {
else => {},
}
}
if (a.isType() and b.isType()) {
// 128 bytes should be enough to hold both types
var buf: [128]u8 = undefined;
var fib = std.heap.FixedBufferAllocator.init(&buf);
const a_type = a.toType(&fib.allocator) catch unreachable;
const b_type = b.toType(&fib.allocator) catch unreachable;
if (ty.zigTypeTag() == .Type) {
var buf_a: ToTypeBuffer = undefined;
var buf_b: ToTypeBuffer = undefined;
const a_type = a.toType(&buf_a);
const b_type = b.toType(&buf_b);
return a_type.eql(b_type);
}
return order(a, b).compare(.eq);
}
pub fn hash_u32(self: Value) u32 {
return @truncate(u32, self.hash());
}
pub const ArrayHashContext = struct {
ty: Type,
/// TODO we can't hash without also knowing the type of the value.
/// we have to hash as if there were a canonical value memory layout.
pub fn hash(self: Value) u64 {
var hasher = std.hash.Wyhash.init(0);
pub fn hash(self: @This(), v: Value) u32 {
const other_context: HashContext = .{ .ty = self.ty };
return @truncate(u32, other_context.hash(v));
}
pub fn eql(self: @This(), a: Value, b: Value) bool {
return a.eql(b, self.ty);
}
};
switch (self.tag()) {
.u1_type,
.u8_type,
.i8_type,
.u16_type,
.i16_type,
.u32_type,
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
.c_ushort_type,
.c_int_type,
.c_uint_type,
.c_long_type,
.c_ulong_type,
.c_longlong_type,
.c_ulonglong_type,
.c_longdouble_type,
.f16_type,
.f32_type,
.f64_type,
.f128_type,
.c_void_type,
.bool_type,
.void_type,
.type_type,
.anyerror_type,
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
.null_type,
.undefined_type,
.fn_noreturn_no_args_type,
.fn_void_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
.single_const_pointer_to_comptime_int_type,
.anyframe_type,
.const_slice_u8_type,
.enum_literal_type,
.ty,
.abi_align_default,
=> {
// Directly return Type.hash, toType can only fail for .int_type.
var allocator = std.heap.FixedBufferAllocator.init(&[_]u8{});
return (self.toType(&allocator.allocator) catch unreachable).hash();
},
.int_type => {
const payload = self.castTag(.int_type).?.data;
var int_payload = Type.Payload.Bits{
.base = .{
.tag = if (payload.signed) .int_signed else .int_unsigned,
},
.data = payload.bits,
};
return Type.initPayload(&int_payload.base).hash();
},
pub const HashContext = struct {
ty: Type,
.empty_struct_value,
.empty_array,
=> {},
pub fn hash(self: @This(), v: Value) u64 {
var hasher = std.hash.Wyhash.init(0);
.undef,
.null_value,
.void_value,
.unreachable_value,
=> std.hash.autoHash(&hasher, self.tag()),
switch (self.ty.zigTypeTag()) {
.BoundFn => unreachable, // TODO remove this from the language
.zero, .bool_false => std.hash.autoHash(&hasher, @as(u64, 0)),
.one, .bool_true => std.hash.autoHash(&hasher, @as(u64, 1)),
.Void,
.NoReturn,
.Undefined,
.Null,
=> {},
.float_16, .float_32, .float_64, .float_128 => {
@panic("TODO implement Value.hash for floats");
},
.enum_literal => {
const payload = self.castTag(.enum_literal).?;
hasher.update(payload.data);
},
.enum_field_index => {
const payload = self.castTag(.enum_field_index).?;
std.hash.autoHash(&hasher, payload.data);
},
.bytes => {
const payload = self.castTag(.bytes).?;
hasher.update(payload.data);
},
.repeated => {
@panic("TODO Value.hash for repeated");
},
.array => {
@panic("TODO Value.hash for array");
},
.slice => {
@panic("TODO Value.hash for slice");
},
.eu_payload_ptr => {
@panic("TODO Value.hash for eu_payload_ptr");
},
.int_u64 => {
const payload = self.castTag(.int_u64).?;
std.hash.autoHash(&hasher, payload.data);
},
.int_i64 => {
const payload = self.castTag(.int_i64).?;
std.hash.autoHash(&hasher, payload.data);
},
.comptime_alloc => {
const payload = self.castTag(.comptime_alloc).?;
std.hash.autoHash(&hasher, payload.data.val.hash());
},
.int_big_positive, .int_big_negative => {
var space: BigIntSpace = undefined;
const big = self.toBigInt(&space);
if (big.limbs.len == 1) {
// handle like {u,i}64 to ensure same hash as with Int{i,u}64
if (big.positive) {
std.hash.autoHash(&hasher, @as(u64, big.limbs[0]));
} else {
std.hash.autoHash(&hasher, @as(u64, @bitCast(usize, -@bitCast(isize, big.limbs[0]))));
}
} else {
.Type => {
var buf: ToTypeBuffer = undefined;
return v.toType(&buf).hash();
},
.Bool => {
std.hash.autoHash(&hasher, v.toBool());
},
.Int, .ComptimeInt => {
var space: BigIntSpace = undefined;
const big = v.toBigInt(&space);
std.hash.autoHash(&hasher, big.positive);
for (big.limbs) |limb| {
std.hash.autoHash(&hasher, limb);
}
}
},
.elem_ptr => {
const payload = self.castTag(.elem_ptr).?.data;
std.hash.autoHash(&hasher, payload.array_ptr.hash());
std.hash.autoHash(&hasher, payload.index);
},
.field_ptr => {
const payload = self.castTag(.field_ptr).?.data;
std.hash.autoHash(&hasher, payload.container_ptr.hash());
std.hash.autoHash(&hasher, payload.field_index);
},
.decl_ref => {
const decl = self.castTag(.decl_ref).?.data;
std.hash.autoHash(&hasher, decl);
},
.function => {
const func = self.castTag(.function).?.data;
std.hash.autoHash(&hasher, func);
},
.extern_fn => {
const decl = self.castTag(.extern_fn).?.data;
std.hash.autoHash(&hasher, decl);
},
.variable => {
const variable = self.castTag(.variable).?.data;
std.hash.autoHash(&hasher, variable);
},
.@"error" => {
const payload = self.castTag(.@"error").?.data;
hasher.update(payload.name);
},
.error_union => {
const payload = self.castTag(.error_union).?.data;
std.hash.autoHash(&hasher, payload.hash());
},
.inferred_alloc => unreachable,
},
.Float, .ComptimeFloat => {
@panic("TODO implement hashing float values");
},
.Pointer => {
@panic("TODO implement hashing pointer values");
},
.Array, .Vector => {
@panic("TODO implement hashing array/vector values");
},
.Struct => {
@panic("TODO implement hashing struct values");
},
.Optional => {
@panic("TODO implement hashing optional values");
},
.ErrorUnion => {
@panic("TODO implement hashing error union values");
},
.ErrorSet => {
@panic("TODO implement hashing error set values");
},
.Enum => {
@panic("TODO implement hashing enum values");
},
.Union => {
@panic("TODO implement hashing union values");
},
.Fn => {
@panic("TODO implement hashing function values");
},
.Opaque => {
@panic("TODO implement hashing opaque values");
},
.Frame => {
@panic("TODO implement hashing frame values");
},
.AnyFrame => {
@panic("TODO implement hashing anyframe values");
},
.EnumLiteral => {
@panic("TODO implement hashing enum literal values");
},
}
return hasher.final();
}
.manyptr_u8_type,
.manyptr_const_u8_type,
.atomic_ordering_type,
.atomic_rmw_op_type,
.calling_convention_type,
.float_mode_type,
.reduce_op_type,
.call_options_type,
.export_options_type,
.extern_options_type,
.@"struct",
.@"union",
=> @panic("TODO this hash function looks pretty broken. audit it"),
}
return hasher.final();
}
pub const ArrayHashContext = struct {
pub fn hash(self: @This(), v: Value) u32 {
_ = self;
return v.hash_u32();
}
pub fn eql(self: @This(), a: Value, b: Value) bool {
_ = self;
return a.eql(b);
}
};
pub const HashContext = struct {
pub fn hash(self: @This(), v: Value) u64 {
_ = self;
return v.hash();
}
pub fn eql(self: @This(), a: Value, b: Value) bool {
_ = self;
return a.eql(b);
return a.eql(b, self.ty);
}
};
@ -1508,111 +1390,6 @@ pub const Value = extern union {
};
}
/// Valid for all types. Asserts the value is not undefined.
/// TODO this function is a code smell and should be deleted
fn isType(self: Value) bool {
return switch (self.tag()) {
.ty,
.int_type,
.u1_type,
.u8_type,
.i8_type,
.u16_type,
.i16_type,
.u32_type,
.i32_type,
.u64_type,
.i64_type,
.u128_type,
.i128_type,
.usize_type,
.isize_type,
.c_short_type,
.c_ushort_type,
.c_int_type,
.c_uint_type,
.c_long_type,
.c_ulong_type,
.c_longlong_type,
.c_ulonglong_type,
.c_longdouble_type,
.f16_type,
.f32_type,
.f64_type,
.f128_type,
.c_void_type,
.bool_type,
.void_type,
.type_type,
.anyerror_type,
.comptime_int_type,
.comptime_float_type,
.noreturn_type,
.null_type,
.undefined_type,
.fn_noreturn_no_args_type,
.fn_void_no_args_type,
.fn_naked_noreturn_no_args_type,
.fn_ccc_void_no_args_type,
.single_const_pointer_to_comptime_int_type,
.anyframe_type,
.const_slice_u8_type,
.enum_literal_type,
.manyptr_u8_type,
.manyptr_const_u8_type,
.atomic_ordering_type,
.atomic_rmw_op_type,
.calling_convention_type,
.float_mode_type,
.reduce_op_type,
.call_options_type,
.export_options_type,
.extern_options_type,
=> true,
.zero,
.one,
.empty_array,
.bool_true,
.bool_false,
.function,
.extern_fn,
.variable,
.int_u64,
.int_i64,
.int_big_positive,
.int_big_negative,
.comptime_alloc,
.decl_ref,
.elem_ptr,
.field_ptr,
.bytes,
.repeated,
.array,
.slice,
.float_16,
.float_32,
.float_64,
.float_128,
.void_value,
.enum_literal,
.enum_field_index,
.@"error",
.error_union,
.empty_struct_value,
.@"struct",
.@"union",
.null_value,
.abi_align_default,
.eu_payload_ptr,
=> false,
.undef => unreachable,
.unreachable_value => unreachable,
.inferred_alloc => unreachable,
};
}
/// This type is not copyable since it may contain pointers to its inner data.
pub const Payload = struct {
tag: Tag,
@ -1806,27 +1583,3 @@ pub const Value = extern union {
limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb,
};
};
test "hash same value different representation" {
const zero_1 = Value.initTag(.zero);
var payload_1 = Value.Payload.U64{
.base = .{ .tag = .int_u64 },
.data = 0,
};
const zero_2 = Value.initPayload(&payload_1.base);
try std.testing.expectEqual(zero_1.hash(), zero_2.hash());
var payload_2 = Value.Payload.I64{
.base = .{ .tag = .int_i64 },
.data = 0,
};
const zero_3 = Value.initPayload(&payload_2.base);
try std.testing.expectEqual(zero_2.hash(), zero_3.hash());
var payload_3 = Value.Payload.BigInt{
.base = .{ .tag = .int_big_negative },
.data = &[_]std.math.big.Limb{0},
};
const zero_4 = Value.initPayload(&payload_3.base);
try std.testing.expectEqual(zero_3.hash(), zero_4.hash());
}