Merge pull request #9310 from ziglang/stage1-better-hashing

Speed up stage 1 by improving hash functions
This commit is contained in:
Andrew Kelley 2021-07-06 13:20:15 -04:00 committed by GitHub
commit 6ba843ee0f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 539 additions and 313 deletions

View File

@ -1879,8 +1879,8 @@ struct TypeId {
} data;
};
uint32_t type_id_hash(TypeId);
bool type_id_eql(TypeId a, TypeId b);
uint32_t type_id_hash(TypeId const *);
bool type_id_eql(TypeId const *a, TypeId const *b);
enum ZigLLVMFnId {
ZigLLVMFnIdCtz,
@ -1935,8 +1935,8 @@ struct ZigLLVMFnKey {
} data;
};
uint32_t zig_llvm_fn_key_hash(ZigLLVMFnKey);
bool zig_llvm_fn_key_eql(ZigLLVMFnKey a, ZigLLVMFnKey b);
uint32_t zig_llvm_fn_key_hash(ZigLLVMFnKey const *);
bool zig_llvm_fn_key_eql(ZigLLVMFnKey const *a, ZigLLVMFnKey const *b);
struct TimeEvent {
double time;

View File

@ -5507,34 +5507,48 @@ bool handle_is_ptr(CodeGen *g, ZigType *type_entry) {
zig_unreachable();
}
static uint32_t hash_ptr(void *ptr) {
return (uint32_t)(((uintptr_t)ptr) % UINT32_MAX);
static const uint32_t HASH_INIT = 0x811c9dc5U;
template<typename T>
static uint32_t hash_combine(uint32_t hash, const T *value, size_t count = 1) {
// Simple FNV32 hash
size_t len = sizeof(T) * count;
const unsigned char *char_bytes = (const unsigned char*)value;
for (size_t c = 0; c < len; ++c) {
hash ^= char_bytes[c];
hash *= 0x01000193U;
}
return hash;
}
static uint32_t hash_size(size_t x) {
return (uint32_t)(x % UINT32_MAX);
static uint32_t hash_combine_bigint(uint32_t hash, const BigInt *value) {
return hash_combine(hash, bigint_ptr(value), value->digit_count);
}
static uint32_t hash_combine_buf(uint32_t hash, const Buf *buf) {
return hash_combine(hash, buf_ptr(buf), buf_len(buf));
}
uint32_t fn_table_entry_hash(ZigFn* value) {
return ptr_hash(value);
return hash_combine(HASH_INIT, &value);
}
bool fn_table_entry_eql(ZigFn *a, ZigFn *b) {
return ptr_eq(a, b);
return a == b;
}
uint32_t fn_type_id_hash(FnTypeId *id) {
uint32_t result = 0;
result += ((uint32_t)(id->cc)) * (uint32_t)3349388391;
result += id->is_var_args ? (uint32_t)1931444534 : 0;
result += hash_ptr(id->return_type);
result += id->alignment * 0xd3b3f3e2;
uint32_t hash = HASH_INIT;
hash = hash_combine(hash, &id->cc);
hash = hash_combine(hash, &id->is_var_args);
hash = hash_combine(hash, &id->return_type);
hash = hash_combine(hash, &id->alignment);
for (size_t i = 0; i < id->param_count; i += 1) {
FnTypeParamInfo *info = &id->param_info[i];
result += info->is_noalias ? (uint32_t)892356923 : 0;
result += hash_ptr(info->type);
hash = hash_combine(hash, &info->is_noalias);
hash = hash_combine(hash, &info->type);
}
return result;
return hash;
}
bool fn_type_id_eql(FnTypeId *a, FnTypeId *b) {
@ -5559,194 +5573,200 @@ bool fn_type_id_eql(FnTypeId *a, FnTypeId *b) {
return true;
}
static uint32_t hash_const_val_error_set(ZigValue *const_val) {
static uint32_t hash_combine_const_val_error_set(uint32_t hash_val, ZigValue *const_val) {
assert(const_val->data.x_err_set != nullptr);
return const_val->data.x_err_set->value ^ 2630160122;
return hash_combine(hash_val, &const_val->data.x_err_set->value);
}
static uint32_t hash_const_val_ptr(ZigValue *const_val) {
uint32_t hash_val = 0;
switch (const_val->data.x_ptr.mut) {
case ConstPtrMutRuntimeVar:
hash_val += (uint32_t)3500721036;
break;
case ConstPtrMutComptimeConst:
hash_val += (uint32_t)4214318515;
break;
case ConstPtrMutInfer:
case ConstPtrMutComptimeVar:
hash_val += (uint32_t)1103195694;
break;
}
static uint32_t hash_combine_const_val_ptr(uint32_t hash_val, ZigValue *const_val) {
hash_val = hash_combine(hash_val, &const_val->data.x_ptr.special);
switch (const_val->data.x_ptr.special) {
case ConstPtrSpecialInvalid:
zig_unreachable();
case ConstPtrSpecialRef:
hash_val += (uint32_t)2478261866;
hash_val += hash_ptr(const_val->data.x_ptr.data.ref.pointee);
hash_val = hash_combine(hash_val, &const_val->data.x_ptr.data.ref.pointee);
return hash_val;
case ConstPtrSpecialBaseArray:
hash_val += (uint32_t)1764906839;
hash_val += hash_ptr(const_val->data.x_ptr.data.base_array.array_val);
hash_val += hash_size(const_val->data.x_ptr.data.base_array.elem_index);
hash_val = hash_combine(hash_val, &const_val->data.x_ptr.data.base_array.array_val);
hash_val = hash_combine(hash_val, &const_val->data.x_ptr.data.base_array.elem_index);
return hash_val;
case ConstPtrSpecialSubArray:
hash_val += (uint32_t)2643358777;
hash_val += hash_ptr(const_val->data.x_ptr.data.base_array.array_val);
hash_val += hash_size(const_val->data.x_ptr.data.base_array.elem_index);
hash_val = hash_combine(hash_val, &const_val->data.x_ptr.data.base_array.array_val);
hash_val = hash_combine(hash_val, &const_val->data.x_ptr.data.base_array.elem_index);
return hash_val;
case ConstPtrSpecialBaseStruct:
hash_val += (uint32_t)3518317043;
hash_val += hash_ptr(const_val->data.x_ptr.data.base_struct.struct_val);
hash_val += hash_size(const_val->data.x_ptr.data.base_struct.field_index);
hash_val = hash_combine(hash_val, &const_val->data.x_ptr.data.base_struct.struct_val);
hash_val = hash_combine(hash_val, &const_val->data.x_ptr.data.base_struct.field_index);
return hash_val;
case ConstPtrSpecialBaseErrorUnionCode:
hash_val += (uint32_t)2994743799;
hash_val += hash_ptr(const_val->data.x_ptr.data.base_err_union_code.err_union_val);
hash_val = hash_combine(hash_val, &const_val->data.x_ptr.data.base_err_union_code.err_union_val);
return hash_val;
case ConstPtrSpecialBaseErrorUnionPayload:
hash_val += (uint32_t)3456080131;
hash_val += hash_ptr(const_val->data.x_ptr.data.base_err_union_payload.err_union_val);
hash_val = hash_combine(hash_val, &const_val->data.x_ptr.data.base_err_union_payload.err_union_val);
return hash_val;
case ConstPtrSpecialBaseOptionalPayload:
hash_val += (uint32_t)3163140517;
hash_val += hash_ptr(const_val->data.x_ptr.data.base_optional_payload.optional_val);
hash_val = hash_combine(hash_val, &const_val->data.x_ptr.data.base_optional_payload.optional_val);
return hash_val;
case ConstPtrSpecialHardCodedAddr:
hash_val += (uint32_t)4048518294;
hash_val += hash_size(const_val->data.x_ptr.data.hard_coded_addr.addr);
return hash_val;
case ConstPtrSpecialDiscard:
hash_val += 2010123162;
hash_val = hash_combine(hash_val, &const_val->data.x_ptr.data.hard_coded_addr.addr);
return hash_val;
case ConstPtrSpecialFunction:
hash_val += (uint32_t)2590901619;
hash_val += hash_ptr(const_val->data.x_ptr.data.fn.fn_entry);
hash_val = hash_combine(hash_val, &const_val->data.x_ptr.data.fn.fn_entry);
return hash_val;
case ConstPtrSpecialDiscard:
case ConstPtrSpecialNull:
hash_val += (uint32_t)1486246455;
// No fields to hash
return hash_val;
}
zig_unreachable();
}
static uint32_t hash_const_val(ZigValue *const_val) {
static uint32_t hash_combine_const_val(uint32_t hash_val, ZigValue *const_val);
static uint32_t hash_combine_const_val_array(uint32_t hash_val, ZigValue *array, size_t len) {
if (array->data.x_array.special == ConstArraySpecialUndef) {
char undef_tag = 56;
return hash_combine(hash_val, &undef_tag);
} else if (array->data.x_array.special == ConstArraySpecialBuf) {
// Hash in a way that is compatible with standard byte arrays
// If any of these asserts fails, the if after this needs to be modified
// to handle the new type in SpecialBuf.
assert(array->type->data.array.child_type->id == ZigTypeIdInt);
assert(array->type->data.array.child_type->data.integral.bit_count == 8);
assert(array->type->data.array.child_type->data.integral.is_signed == false);
const char *buf_pos = buf_ptr(array->data.x_array.data.s_buf);
const char *buf_end = buf_pos + buf_len(array->data.x_array.data.s_buf);
while (buf_pos < buf_end) {
hash_val = hash_combine(hash_val, buf_pos);
buf_pos++;
}
return hash_val;
} else if (array->type->data.array.child_type->id == ZigTypeIdInt &&
array->type->data.array.child_type->data.integral.bit_count == 8 &&
array->type->data.array.child_type->data.integral.is_signed == false) {
// If the type is u8, we hash it as if it's a ConstArraySpecialBuf,
// to maintain compatibility.
ZigValue *elems = array->data.x_array.data.s_none.elements;
for (size_t i = 0; i < len; i += 1) {
ZigValue *value = &elems[i];
assert(value->type == array->type->data.array.child_type);
// N.B. Using char here instead of uint8_t to match the const char*
// returned by buf_ptr.
const char byte_value = (char) bigint_as_u8(&value->data.x_bigint);
hash_val = hash_combine(hash_val, &byte_value);
}
return hash_val;
} else {
ZigValue *elems = array->data.x_array.data.s_none.elements;
for (size_t i = 0; i < len; i += 1) {
hash_val = hash_combine_const_val(hash_val, &elems[i]);
}
return hash_val;
}
}
static uint32_t hash_combine_const_val(uint32_t hash_val, ZigValue *const_val) {
hash_val = hash_combine(hash_val, &const_val->special);
if (const_val->special == ConstValSpecialUndef) {
return hash_val;
}
assert(const_val->special == ConstValSpecialStatic);
hash_val = hash_combine(hash_val, &const_val->type->id);
switch (const_val->type->id) {
case ZigTypeIdOpaque:
zig_unreachable();
case ZigTypeIdBool:
return const_val->data.x_bool ? (uint32_t)127863866 : (uint32_t)215080464;
return hash_combine(hash_val, &const_val->data.x_bool);
case ZigTypeIdMetaType:
return hash_ptr(const_val->data.x_type);
case ZigTypeIdVoid:
return (uint32_t)4149439618;
return hash_combine(hash_val, &const_val->data.x_type);
case ZigTypeIdInt:
case ZigTypeIdComptimeInt:
{
uint32_t result = 1331471175;
for (size_t i = 0; i < const_val->data.x_bigint.digit_count; i += 1) {
uint64_t digit = bigint_ptr(&const_val->data.x_bigint)[i];
result ^= ((uint32_t)(digit >> 32)) ^ (uint32_t)(result);
}
return result;
}
return hash_combine_bigint(hash_val, &const_val->data.x_bigint);
case ZigTypeIdEnumLiteral:
return buf_hash(const_val->data.x_enum_literal) * (uint32_t)2691276464;
return hash_combine_buf(hash_val, const_val->data.x_enum_literal);
case ZigTypeIdEnum:
{
uint32_t result = 31643936;
for (size_t i = 0; i < const_val->data.x_enum_tag.digit_count; i += 1) {
uint64_t digit = bigint_ptr(&const_val->data.x_enum_tag)[i];
result ^= ((uint32_t)(digit >> 32)) ^ (uint32_t)(result);
}
return result;
}
return hash_combine_bigint(hash_val, &const_val->data.x_enum_tag);
case ZigTypeIdFloat:
hash_val = hash_combine(hash_val, &const_val->type->data.floating.bit_count);
switch (const_val->type->data.floating.bit_count) {
case 16:
{
uint16_t result;
static_assert(sizeof(result) == sizeof(const_val->data.x_f16), "");
memcpy(&result, &const_val->data.x_f16, sizeof(result));
return result * 65537u;
}
case 32:
{
uint32_t result;
memcpy(&result, &const_val->data.x_f32, 4);
return result ^ 4084870010;
}
case 64:
{
uint32_t ints[2];
memcpy(&ints[0], &const_val->data.x_f64, 8);
return ints[0] ^ ints[1] ^ 0x22ed43c6;
}
case 128:
{
uint32_t ints[4];
memcpy(&ints[0], &const_val->data.x_f128, 16);
return ints[0] ^ ints[1] ^ ints[2] ^ ints[3] ^ 0xb5ffef27;
}
default:
zig_unreachable();
case 16: return hash_combine(hash_val, &const_val->data.x_f16);
case 32: return hash_combine(hash_val, &const_val->data.x_f32);
case 64: return hash_combine(hash_val, &const_val->data.x_f64);
case 128: return hash_combine(hash_val, &const_val->data.x_f128);
default: zig_unreachable();
}
case ZigTypeIdComptimeFloat:
{
float128_t f128 = bigfloat_to_f128(&const_val->data.x_bigfloat);
uint32_t ints[4];
memcpy(&ints[0], &f128, 16);
return ints[0] ^ ints[1] ^ ints[2] ^ ints[3] ^ 0xed8b3dfb;
}
return hash_combine(hash_val, &const_val->data.x_bigfloat.value);
case ZigTypeIdFn:
assert(const_val->data.x_ptr.mut == ConstPtrMutComptimeConst);
assert(const_val->data.x_ptr.special == ConstPtrSpecialFunction);
return 3677364617 ^ hash_ptr(const_val->data.x_ptr.data.fn.fn_entry);
return hash_combine(hash_val, &const_val->data.x_ptr.data.fn.fn_entry);
case ZigTypeIdPointer:
return hash_const_val_ptr(const_val);
return hash_combine_const_val_ptr(hash_val, const_val);
case ZigTypeIdVoid:
case ZigTypeIdUndefined:
return 162837799;
case ZigTypeIdNull:
return 844854567;
return hash_val;
case ZigTypeIdArray:
// TODO better hashing algorithm
return 1166190605;
case ZigTypeIdStruct:
// TODO better hashing algorithm
return 1532530855;
case ZigTypeIdUnion:
// TODO better hashing algorithm
return 2709806591;
return hash_combine_const_val_array(hash_val, const_val, const_val->type->data.array.len);
case ZigTypeIdStruct: {
size_t field_count = const_val->type->data.structure.src_field_count;
for (size_t i = 0; i < field_count; i += 1) {
if (const_val->type->data.structure.fields[i]->is_comptime) {
// The values of comptime struct fields are part of the
// type, not the value, so they do not participate in equality
// or hash of comptime values.
continue;
}
ZigValue *field = const_val->data.x_struct.fields[i];
hash_val = hash_combine_const_val(hash_val, field);
}
return hash_val;
}
case ZigTypeIdUnion: {
ConstUnionValue *union_value = &const_val->data.x_union;
hash_val = hash_combine_bigint(hash_val, &union_value->tag);
return hash_combine_const_val(hash_val, union_value->payload);
}
case ZigTypeIdOptional:
if (get_src_ptr_type(const_val->type) != nullptr) {
return hash_const_val_ptr(const_val) * (uint32_t)1992916303;
char tag = 1;
hash_val = hash_combine(hash_val, &tag);
return hash_combine_const_val_ptr(hash_val, const_val);
} else if (const_val->type->data.maybe.child_type->id == ZigTypeIdErrorSet) {
return hash_const_val_error_set(const_val) * (uint32_t)3147031929;
char tag = 2;
hash_val = hash_combine(hash_val, &tag);
return hash_combine_const_val_error_set(hash_val, const_val);
} else if (const_val->data.x_optional) {
char tag = 3;
hash_val = hash_combine(hash_val, &tag);
return hash_combine_const_val(hash_val, const_val->data.x_optional);
} else {
if (const_val->data.x_optional) {
return hash_const_val(const_val->data.x_optional) * (uint32_t)1992916303;
} else {
return 4016830364;
}
char tag = 4;
hash_val = hash_combine(hash_val, &tag);
return hash_val;
}
case ZigTypeIdErrorUnion:
// TODO better hashing algorithm
return 3415065496;
case ZigTypeIdErrorUnion: {
bool is_err = const_val->data.x_err_union.error_set->data.x_err_set != nullptr;
hash_val = hash_combine(hash_val, &is_err);
if (is_err) {
hash_val = hash_combine_const_val(hash_val, const_val->data.x_err_union.error_set);
} else {
hash_val = hash_combine_const_val(hash_val, const_val->data.x_err_union.payload);
}
return hash_val;
}
case ZigTypeIdErrorSet:
return hash_const_val_error_set(const_val);
return hash_combine_const_val_error_set(hash_val, const_val);
case ZigTypeIdVector:
// TODO better hashing algorithm
return 3647867726;
return hash_combine_const_val_array(hash_val, const_val, const_val->type->data.vector.len);
case ZigTypeIdFnFrame:
// TODO better hashing algorithm
return 675741936;
return hash_val;
case ZigTypeIdAnyFrame:
// TODO better hashing algorithm
return 3747294894;
return hash_val;
case ZigTypeIdBoundFn: {
assert(const_val->data.x_bound_fn.fn != nullptr);
return 3677364617 ^ hash_ptr(const_val->data.x_bound_fn.fn);
return hash_combine(hash_val, &const_val->data.x_bound_fn.fn);
}
case ZigTypeIdInvalid:
case ZigTypeIdUnreachable:
@ -5756,13 +5776,13 @@ static uint32_t hash_const_val(ZigValue *const_val) {
}
uint32_t generic_fn_type_id_hash(GenericFnTypeId *id) {
uint32_t result = 0;
result += hash_ptr(id->fn_entry);
uint32_t result = HASH_INIT;
result = hash_combine(result, &id->fn_entry);
for (size_t i = 0; i < id->param_count; i += 1) {
ZigValue *generic_param = &id->params[i];
if (generic_param->special != ConstValSpecialRuntime) {
result += hash_const_val(generic_param);
result += hash_ptr(generic_param->type);
result = hash_combine_const_val(result, generic_param);
result = hash_combine(result, &generic_param->type);
}
}
return result;
@ -5957,15 +5977,15 @@ bool fn_eval_cacheable(Scope *scope, ZigType *return_type) {
}
uint32_t fn_eval_hash(Scope* scope) {
uint32_t result = 0;
uint32_t hash = HASH_INIT;
while (scope) {
if (scope->id == ScopeIdVarDecl) {
ScopeVarDecl *var_scope = (ScopeVarDecl *)scope;
result += hash_const_val(var_scope->var->const_value);
hash = hash_combine_const_val(hash, var_scope->var->const_value);
} else if (scope->id == ScopeIdFnDef) {
ScopeFnDef *fn_scope = (ScopeFnDef *)scope;
result += hash_ptr(fn_scope->fn_entry);
return result;
hash = hash_combine(hash, &fn_scope->fn_entry);
return hash;
} else {
zig_unreachable();
}
@ -6151,7 +6171,8 @@ ZigValue *get_the_one_possible_value(CodeGen *g, ZigType *type_entry) {
for (size_t i = 0; i < field_count; i += 1) {
TypeStructField *field = struct_type->data.structure.fields[i];
if (field->is_comptime) {
copy_const_val(g, result->data.x_struct.fields[i], field->init_val);
// Comptime fields are part of the type, and do not need to
// be initialized.
continue;
}
ZigType *field_type = resolve_struct_field_type(g, field);
@ -7260,6 +7281,8 @@ bool const_values_equal(CodeGen *g, ZigValue *a, ZigValue *b) {
case ZigTypeIdMetaType:
return a->data.x_type == b->data.x_type;
case ZigTypeIdVoid:
case ZigTypeIdUndefined:
case ZigTypeIdNull:
return true;
case ZigTypeIdErrorSet:
return a->data.x_err_set->value == b->data.x_err_set->value;
@ -7292,12 +7315,17 @@ bool const_values_equal(CodeGen *g, ZigValue *a, ZigValue *b) {
case ZigTypeIdVector:
assert(a->type->data.vector.len == b->type->data.vector.len);
return const_values_equal_array(g, a, b, a->type->data.vector.len);
case ZigTypeIdArray: {
case ZigTypeIdArray:
assert(a->type->data.array.len == b->type->data.array.len);
return const_values_equal_array(g, a, b, a->type->data.array.len);
}
case ZigTypeIdStruct:
for (size_t i = 0; i < a->type->data.structure.src_field_count; i += 1) {
if (a->type->data.structure.fields[i]->is_comptime) {
// The values of comptime struct fields are part of the
// type, not the value, so they do not participate in equality
// or hash of comptime values.
continue;
}
ZigValue *field_a = a->data.x_struct.fields[i];
ZigValue *field_b = b->data.x_struct.fields[i];
if (!const_values_equal(g, field_a, field_b))
@ -7308,10 +7336,6 @@ bool const_values_equal(CodeGen *g, ZigValue *a, ZigValue *b) {
zig_panic("TODO: const_values_equal ZigTypeIdFnFrame");
case ZigTypeIdAnyFrame:
zig_panic("TODO: const_values_equal ZigTypeIdAnyFrame");
case ZigTypeIdUndefined:
zig_panic("TODO: const_values_equal ZigTypeIdUndefined");
case ZigTypeIdNull:
zig_panic("TODO: const_values_equal ZigTypeIdNull");
case ZigTypeIdOptional:
if (get_src_ptr_type(a->type) != nullptr)
return const_values_equal_ptr(a, b);
@ -7718,8 +7742,9 @@ ZigType *make_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits) {
return entry;
}
uint32_t type_id_hash(TypeId x) {
switch (x.id) {
uint32_t type_id_hash(TypeId const *x) {
uint32_t hash = hash_combine(HASH_INIT, &x->id);
switch (x->id) {
case ZigTypeIdInvalid:
case ZigTypeIdOpaque:
case ZigTypeIdMetaType:
@ -7743,35 +7768,50 @@ uint32_t type_id_hash(TypeId x) {
case ZigTypeIdAnyFrame:
zig_unreachable();
case ZigTypeIdErrorUnion:
return hash_ptr(x.data.error_union.err_set_type) ^ hash_ptr(x.data.error_union.payload_type);
hash = hash_combine(hash, &x->data.error_union.err_set_type);
hash = hash_combine(hash, &x->data.error_union.payload_type);
return hash;
case ZigTypeIdPointer:
return hash_ptr(x.data.pointer.child_type) +
(uint32_t)x.data.pointer.ptr_len * 1120226602u +
(x.data.pointer.is_const ? (uint32_t)2749109194 : (uint32_t)4047371087) +
(x.data.pointer.is_volatile ? (uint32_t)536730450 : (uint32_t)1685612214) +
(x.data.pointer.allow_zero ? (uint32_t)3324284834 : (uint32_t)3584904923) +
(((uint32_t)x.data.pointer.alignment) ^ (uint32_t)0x777fbe0e) +
(((uint32_t)x.data.pointer.bit_offset_in_host) ^ (uint32_t)2639019452) +
(((uint32_t)x.data.pointer.vector_index) ^ (uint32_t)0x19199716) +
(((uint32_t)x.data.pointer.host_int_bytes) ^ (uint32_t)529908881) *
(x.data.pointer.sentinel ? hash_const_val(x.data.pointer.sentinel) : (uint32_t)2955491856);
hash = hash_combine(hash, &x->data.pointer.child_type);
hash = hash_combine(hash, &x->data.pointer.ptr_len);
hash = hash_combine(hash, &x->data.pointer.is_const);
hash = hash_combine(hash, &x->data.pointer.is_volatile);
hash = hash_combine(hash, &x->data.pointer.allow_zero);
hash = hash_combine(hash, &x->data.pointer.alignment);
hash = hash_combine(hash, &x->data.pointer.bit_offset_in_host);
hash = hash_combine(hash, &x->data.pointer.vector_index);
hash = hash_combine(hash, &x->data.pointer.host_int_bytes);
if (x->data.pointer.sentinel != nullptr) {
hash = hash_combine_const_val(hash, x->data.pointer.sentinel);
}
if (x->data.pointer.inferred_struct_field) {
hash = hash_combine(hash, &x->data.pointer.inferred_struct_field->inferred_struct_type);
hash = hash_combine_buf(hash, x->data.pointer.inferred_struct_field->field_name);
}
return hash;
case ZigTypeIdArray:
return hash_ptr(x.data.array.child_type) *
((uint32_t)x.data.array.size ^ (uint32_t)2122979968) *
(x.data.array.sentinel ? hash_const_val(x.data.array.sentinel) : (uint32_t)1927201585);
hash = hash_combine(hash, &x->data.array.child_type);
hash = hash_combine(hash, &x->data.array.size);
if (x->data.array.sentinel != nullptr) {
hash = hash_combine_const_val(hash, x->data.array.sentinel);
}
return hash;
case ZigTypeIdInt:
return (x.data.integer.is_signed ? (uint32_t)2652528194 : (uint32_t)163929201) +
(((uint32_t)x.data.integer.bit_count) ^ (uint32_t)2998081557);
hash = hash_combine(hash, &x->data.integer.is_signed);
hash = hash_combine(hash, &x->data.integer.bit_count);
return hash;
case ZigTypeIdVector:
return hash_ptr(x.data.vector.elem_type) * (x.data.vector.len * 526582681);
hash = hash_combine(hash, &x->data.vector.elem_type);
hash = hash_combine(hash, &x->data.vector.len);
return hash;
}
zig_unreachable();
}
bool type_id_eql(TypeId a, TypeId b) {
if (a.id != b.id)
bool type_id_eql(TypeId const *a, TypeId const *b) {
if (a->id != b->id)
return false;
switch (a.id) {
switch (a->id) {
case ZigTypeIdInvalid:
case ZigTypeIdMetaType:
case ZigTypeIdVoid:
@ -7795,107 +7835,107 @@ bool type_id_eql(TypeId a, TypeId b) {
case ZigTypeIdAnyFrame:
zig_unreachable();
case ZigTypeIdErrorUnion:
return a.data.error_union.err_set_type == b.data.error_union.err_set_type &&
a.data.error_union.payload_type == b.data.error_union.payload_type;
return a->data.error_union.err_set_type == b->data.error_union.err_set_type &&
a->data.error_union.payload_type == b->data.error_union.payload_type;
case ZigTypeIdPointer:
return a.data.pointer.child_type == b.data.pointer.child_type &&
a.data.pointer.ptr_len == b.data.pointer.ptr_len &&
a.data.pointer.is_const == b.data.pointer.is_const &&
a.data.pointer.is_volatile == b.data.pointer.is_volatile &&
a.data.pointer.allow_zero == b.data.pointer.allow_zero &&
a.data.pointer.alignment == b.data.pointer.alignment &&
a.data.pointer.bit_offset_in_host == b.data.pointer.bit_offset_in_host &&
a.data.pointer.vector_index == b.data.pointer.vector_index &&
a.data.pointer.host_int_bytes == b.data.pointer.host_int_bytes &&
return a->data.pointer.child_type == b->data.pointer.child_type &&
a->data.pointer.ptr_len == b->data.pointer.ptr_len &&
a->data.pointer.is_const == b->data.pointer.is_const &&
a->data.pointer.is_volatile == b->data.pointer.is_volatile &&
a->data.pointer.allow_zero == b->data.pointer.allow_zero &&
a->data.pointer.alignment == b->data.pointer.alignment &&
a->data.pointer.bit_offset_in_host == b->data.pointer.bit_offset_in_host &&
a->data.pointer.vector_index == b->data.pointer.vector_index &&
a->data.pointer.host_int_bytes == b->data.pointer.host_int_bytes &&
(
a.data.pointer.sentinel == b.data.pointer.sentinel ||
(a.data.pointer.sentinel != nullptr && b.data.pointer.sentinel != nullptr &&
const_values_equal(a.data.pointer.codegen, a.data.pointer.sentinel, b.data.pointer.sentinel))
a->data.pointer.sentinel == b->data.pointer.sentinel ||
(a->data.pointer.sentinel != nullptr && b->data.pointer.sentinel != nullptr &&
const_values_equal(a->data.pointer.codegen, a->data.pointer.sentinel, b->data.pointer.sentinel))
) &&
(
a.data.pointer.inferred_struct_field == b.data.pointer.inferred_struct_field ||
(a.data.pointer.inferred_struct_field != nullptr &&
b.data.pointer.inferred_struct_field != nullptr &&
a.data.pointer.inferred_struct_field->inferred_struct_type ==
b.data.pointer.inferred_struct_field->inferred_struct_type &&
buf_eql_buf(a.data.pointer.inferred_struct_field->field_name,
b.data.pointer.inferred_struct_field->field_name))
a->data.pointer.inferred_struct_field == b->data.pointer.inferred_struct_field ||
(a->data.pointer.inferred_struct_field != nullptr &&
b->data.pointer.inferred_struct_field != nullptr &&
a->data.pointer.inferred_struct_field->inferred_struct_type ==
b->data.pointer.inferred_struct_field->inferred_struct_type &&
buf_eql_buf(a->data.pointer.inferred_struct_field->field_name,
b->data.pointer.inferred_struct_field->field_name))
);
case ZigTypeIdArray:
return a.data.array.child_type == b.data.array.child_type &&
a.data.array.size == b.data.array.size &&
return a->data.array.child_type == b->data.array.child_type &&
a->data.array.size == b->data.array.size &&
(
a.data.array.sentinel == b.data.array.sentinel ||
(a.data.array.sentinel != nullptr && b.data.array.sentinel != nullptr &&
const_values_equal(a.data.array.codegen, a.data.array.sentinel, b.data.array.sentinel))
a->data.array.sentinel == b->data.array.sentinel ||
(a->data.array.sentinel != nullptr && b->data.array.sentinel != nullptr &&
const_values_equal(a->data.array.codegen, a->data.array.sentinel, b->data.array.sentinel))
);
case ZigTypeIdInt:
return a.data.integer.is_signed == b.data.integer.is_signed &&
a.data.integer.bit_count == b.data.integer.bit_count;
return a->data.integer.is_signed == b->data.integer.is_signed &&
a->data.integer.bit_count == b->data.integer.bit_count;
case ZigTypeIdVector:
return a.data.vector.elem_type == b.data.vector.elem_type &&
a.data.vector.len == b.data.vector.len;
return a->data.vector.elem_type == b->data.vector.elem_type &&
a->data.vector.len == b->data.vector.len;
}
zig_unreachable();
}
uint32_t zig_llvm_fn_key_hash(ZigLLVMFnKey x) {
switch (x.id) {
uint32_t zig_llvm_fn_key_hash(ZigLLVMFnKey const *x) {
switch (x->id) {
case ZigLLVMFnIdCtz:
return (uint32_t)(x.data.ctz.bit_count) * (uint32_t)810453934;
return (uint32_t)(x->data.ctz.bit_count) * (uint32_t)810453934;
case ZigLLVMFnIdClz:
return (uint32_t)(x.data.clz.bit_count) * (uint32_t)2428952817;
return (uint32_t)(x->data.clz.bit_count) * (uint32_t)2428952817;
case ZigLLVMFnIdPopCount:
return (uint32_t)(x.data.clz.bit_count) * (uint32_t)101195049;
return (uint32_t)(x->data.clz.bit_count) * (uint32_t)101195049;
case ZigLLVMFnIdFloatOp:
return (uint32_t)(x.data.floating.bit_count) * ((uint32_t)x.id + 1025) +
(uint32_t)(x.data.floating.vector_len) * (((uint32_t)x.id << 5) + 1025) +
(uint32_t)(x.data.floating.op) * (uint32_t)43789879;
return (uint32_t)(x->data.floating.bit_count) * ((uint32_t)x->id + 1025) +
(uint32_t)(x->data.floating.vector_len) * (((uint32_t)x->id << 5) + 1025) +
(uint32_t)(x->data.floating.op) * (uint32_t)43789879;
case ZigLLVMFnIdFMA:
return (uint32_t)(x.data.floating.bit_count) * ((uint32_t)x.id + 1025) +
(uint32_t)(x.data.floating.vector_len) * (((uint32_t)x.id << 5) + 1025);
return (uint32_t)(x->data.floating.bit_count) * ((uint32_t)x->id + 1025) +
(uint32_t)(x->data.floating.vector_len) * (((uint32_t)x->id << 5) + 1025);
case ZigLLVMFnIdBswap:
return (uint32_t)(x.data.bswap.bit_count) * ((uint32_t)3661994335) +
(uint32_t)(x.data.bswap.vector_len) * (((uint32_t)x.id << 5) + 1025);
return (uint32_t)(x->data.bswap.bit_count) * ((uint32_t)3661994335) +
(uint32_t)(x->data.bswap.vector_len) * (((uint32_t)x->id << 5) + 1025);
case ZigLLVMFnIdBitReverse:
return (uint32_t)(x.data.bit_reverse.bit_count) * (uint32_t)2621398431;
return (uint32_t)(x->data.bit_reverse.bit_count) * (uint32_t)2621398431;
case ZigLLVMFnIdOverflowArithmetic:
return ((uint32_t)(x.data.overflow_arithmetic.bit_count) * 87135777) +
((uint32_t)(x.data.overflow_arithmetic.add_sub_mul) * 31640542) +
((uint32_t)(x.data.overflow_arithmetic.is_signed) ? 1062315172 : 314955820) +
x.data.overflow_arithmetic.vector_len * 1435156945;
return ((uint32_t)(x->data.overflow_arithmetic.bit_count) * 87135777) +
((uint32_t)(x->data.overflow_arithmetic.add_sub_mul) * 31640542) +
((uint32_t)(x->data.overflow_arithmetic.is_signed) ? 1062315172 : 314955820) +
x->data.overflow_arithmetic.vector_len * 1435156945;
}
zig_unreachable();
}
bool zig_llvm_fn_key_eql(ZigLLVMFnKey a, ZigLLVMFnKey b) {
if (a.id != b.id)
bool zig_llvm_fn_key_eql(ZigLLVMFnKey const *a, ZigLLVMFnKey const *b) {
if (a->id != b->id)
return false;
switch (a.id) {
switch (a->id) {
case ZigLLVMFnIdCtz:
return a.data.ctz.bit_count == b.data.ctz.bit_count;
return a->data.ctz.bit_count == b->data.ctz.bit_count;
case ZigLLVMFnIdClz:
return a.data.clz.bit_count == b.data.clz.bit_count;
return a->data.clz.bit_count == b->data.clz.bit_count;
case ZigLLVMFnIdPopCount:
return a.data.pop_count.bit_count == b.data.pop_count.bit_count;
return a->data.pop_count.bit_count == b->data.pop_count.bit_count;
case ZigLLVMFnIdBswap:
return a.data.bswap.bit_count == b.data.bswap.bit_count &&
a.data.bswap.vector_len == b.data.bswap.vector_len;
return a->data.bswap.bit_count == b->data.bswap.bit_count &&
a->data.bswap.vector_len == b->data.bswap.vector_len;
case ZigLLVMFnIdBitReverse:
return a.data.bit_reverse.bit_count == b.data.bit_reverse.bit_count;
return a->data.bit_reverse.bit_count == b->data.bit_reverse.bit_count;
case ZigLLVMFnIdFloatOp:
return a.data.floating.bit_count == b.data.floating.bit_count &&
a.data.floating.vector_len == b.data.floating.vector_len &&
a.data.floating.op == b.data.floating.op;
return a->data.floating.bit_count == b->data.floating.bit_count &&
a->data.floating.vector_len == b->data.floating.vector_len &&
a->data.floating.op == b->data.floating.op;
case ZigLLVMFnIdFMA:
return a.data.floating.bit_count == b.data.floating.bit_count &&
a.data.floating.vector_len == b.data.floating.vector_len;
return a->data.floating.bit_count == b->data.floating.bit_count &&
a->data.floating.vector_len == b->data.floating.vector_len;
case ZigLLVMFnIdOverflowArithmetic:
return (a.data.overflow_arithmetic.bit_count == b.data.overflow_arithmetic.bit_count) &&
(a.data.overflow_arithmetic.add_sub_mul == b.data.overflow_arithmetic.add_sub_mul) &&
(a.data.overflow_arithmetic.is_signed == b.data.overflow_arithmetic.is_signed) &&
(a.data.overflow_arithmetic.vector_len == b.data.overflow_arithmetic.vector_len);
return (a->data.overflow_arithmetic.bit_count == b->data.overflow_arithmetic.bit_count) &&
(a->data.overflow_arithmetic.add_sub_mul == b->data.overflow_arithmetic.add_sub_mul) &&
(a->data.overflow_arithmetic.is_signed == b->data.overflow_arithmetic.is_signed) &&
(a->data.overflow_arithmetic.vector_len == b->data.overflow_arithmetic.vector_len);
}
zig_unreachable();
}
@ -7915,6 +7955,13 @@ static void init_const_undefined(CodeGen *g, ZigValue *const_val) {
size_t field_count = wanted_type->data.structure.src_field_count;
const_val->data.x_struct.fields = alloc_const_vals_ptrs(g, field_count);
for (size_t i = 0; i < field_count; i += 1) {
TypeStructField *field = wanted_type->data.structure.fields[i];
if (field->is_comptime) {
// Comptime fields are part of the type, and do not need to
// be initialized.
continue;
}
ZigValue *field_val = const_val->data.x_struct.fields[i];
field_val->type = resolve_struct_field_type(g, wanted_type->data.structure.fields[i]);
assert(field_val->type);
@ -8155,7 +8202,7 @@ ZigType *get_align_amt_type(CodeGen *g) {
}
uint32_t type_ptr_hash(const ZigType *ptr) {
return hash_ptr((void*)ptr);
return hash_combine(HASH_INIT, &ptr);
}
bool type_ptr_eql(const ZigType *a, const ZigType *b) {
@ -8163,7 +8210,7 @@ bool type_ptr_eql(const ZigType *a, const ZigType *b) {
}
uint32_t pkg_ptr_hash(const ZigPackage *ptr) {
return hash_ptr((void*)ptr);
return hash_combine(HASH_INIT, &ptr);
}
bool pkg_ptr_eql(const ZigPackage *a, const ZigPackage *b) {
@ -8171,7 +8218,7 @@ bool pkg_ptr_eql(const ZigPackage *a, const ZigPackage *b) {
}
uint32_t tld_ptr_hash(const Tld *ptr) {
return hash_ptr((void*)ptr);
return hash_combine(HASH_INIT, &ptr);
}
bool tld_ptr_eql(const Tld *a, const Tld *b) {
@ -8179,7 +8226,7 @@ bool tld_ptr_eql(const Tld *a, const Tld *b) {
}
uint32_t node_ptr_hash(const AstNode *ptr) {
return hash_ptr((void*)ptr);
return hash_combine(HASH_INIT, &ptr);
}
bool node_ptr_eql(const AstNode *a, const AstNode *b) {
@ -8187,7 +8234,7 @@ bool node_ptr_eql(const AstNode *a, const AstNode *b) {
}
uint32_t fn_ptr_hash(const ZigFn *ptr) {
return hash_ptr((void*)ptr);
return hash_combine(HASH_INIT, &ptr);
}
bool fn_ptr_eql(const ZigFn *a, const ZigFn *b) {
@ -8195,7 +8242,7 @@ bool fn_ptr_eql(const ZigFn *a, const ZigFn *b) {
}
uint32_t err_ptr_hash(const ErrorTableEntry *ptr) {
return hash_ptr((void*)ptr);
return hash_combine(HASH_INIT, &ptr);
}
bool err_ptr_eql(const ErrorTableEntry *a, const ErrorTableEntry *b) {
@ -9914,10 +9961,13 @@ void copy_const_val(CodeGen *g, ZigValue *dest, ZigValue *src) {
dest->data.x_struct.fields = alloc_const_vals_ptrs(g, dest->type->data.structure.src_field_count);
for (size_t i = 0; i < dest->type->data.structure.src_field_count; i += 1) {
TypeStructField *type_struct_field = dest->type->data.structure.fields[i];
// comptime-known values are stored in the field init_val inside
// the struct type.
if (type_struct_field->is_comptime)
if (type_struct_field->is_comptime) {
// comptime-known values are stored in the field init_val inside
// the struct type. The data stored here is not supposed to be read
// at all; the code should look at the type system and notice the field
// is comptime and look at the type to learn the value.
continue;
}
copy_const_val(g, dest->data.x_struct.fields[i], src->data.x_struct.fields[i]);
dest->data.x_struct.fields[i]->parent.id = ConstParentIdStruct;
dest->data.x_struct.fields[i]->parent.data.p_struct.struct_val = dest;
@ -10128,8 +10178,11 @@ static void dump_value_indent(ZigValue *val, int indent) {
for (int j = 0; j < indent; j += 1) {
fprintf(stderr, " ");
}
fprintf(stderr, "%s: ", buf_ptr(val->type->data.structure.fields[i]->name));
if (val->data.x_struct.fields == nullptr) {
TypeStructField *field = val->type->data.structure.fields[i];
fprintf(stderr, "%s: ", buf_ptr(field->name));
if (field->is_comptime) {
fprintf(stderr, "<comptime field>");
} else if (val->data.x_struct.fields == nullptr) {
fprintf(stderr, "<null>\n");
} else {
dump_value_indent(val->data.x_struct.fields[i], 1);

View File

@ -1694,6 +1694,13 @@ uint32_t bigint_as_u32(const BigInt *bigint) {
return value32;
}
uint8_t bigint_as_u8(const BigInt *bigint) {
uint64_t value64 = bigint_as_unsigned(bigint);
uint8_t value8 = (uint8_t)value64;
assert (value64 == value8);
return value8;
}
size_t bigint_as_usize(const BigInt *bigint) {
uint64_t value64 = bigint_as_unsigned(bigint);
size_t valueUsize = (size_t)value64;
@ -1726,16 +1733,16 @@ Cmp bigint_cmp_zero(const BigInt *op) {
return op->is_negative ? CmpLT : CmpGT;
}
uint32_t bigint_hash(BigInt x) {
if (x.digit_count == 0) {
uint32_t bigint_hash(BigInt const *x) {
if (x->digit_count == 0) {
return 0;
} else {
return bigint_ptr(&x)[0];
return bigint_ptr(x)[0];
}
}
bool bigint_eql(BigInt a, BigInt b) {
return bigint_cmp(&a, &b) == CmpEQ;
bool bigint_eql(BigInt const *a, BigInt const *b) {
return bigint_cmp(a, b) == CmpEQ;
}
void bigint_incr(BigInt *x) {

View File

@ -39,6 +39,7 @@ void bigint_deinit(BigInt *bi);
// panics if number won't fit
uint64_t bigint_as_u64(const BigInt *bigint);
uint32_t bigint_as_u32(const BigInt *bigint);
uint8_t bigint_as_u8(const BigInt *bigint);
size_t bigint_as_usize(const BigInt *bigint);
int64_t bigint_as_signed(const BigInt *bigint);
@ -99,7 +100,7 @@ void bigint_decr(BigInt *value);
bool mul_u64_overflow(uint64_t op1, uint64_t op2, uint64_t *result);
uint32_t bigint_hash(BigInt x);
bool bigint_eql(BigInt a, BigInt b);
uint32_t bigint_hash(BigInt const *x);
bool bigint_eql(BigInt const *a, BigInt const *b);
#endif

View File

@ -26,7 +26,7 @@ Buf *buf_sprintf(const char *format, ...)
ATTRIBUTE_PRINTF(1, 2);
Buf *buf_vprintf(const char *format, va_list ap);
static inline size_t buf_len(Buf *buf) {
static inline size_t buf_len(const Buf *buf) {
assert(buf);
assert(buf->list.length);
return buf->list.length - 1;

View File

@ -3778,6 +3778,12 @@ static bool value_is_all_undef(CodeGen *g, ZigValue *const_val) {
case ConstValSpecialStatic:
if (const_val->type->id == ZigTypeIdStruct) {
for (size_t i = 0; i < const_val->type->data.structure.src_field_count; i += 1) {
TypeStructField *field = const_val->type->data.structure.fields[i];
if (field->is_comptime) {
// Comptime fields are part of the type, may be uninitialized,
// and should not be inspected.
continue;
}
if (!value_is_all_undef(g, const_val->data.x_struct.fields[i]))
return false;
}
@ -7285,7 +7291,7 @@ static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, Zig
size_t used_bits = 0;
for (size_t i = 0; i < type_entry->data.structure.src_field_count; i += 1) {
TypeStructField *field = type_entry->data.structure.fields[i];
if (field->gen_index == SIZE_MAX) {
if (field->gen_index == SIZE_MAX || field->is_comptime) {
continue;
}
LLVMValueRef child_val = pack_const_int(g, big_int_type_ref, const_val->data.x_struct.fields[i]);
@ -7573,7 +7579,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ZigValue *const_val, const char *n
size_t src_field_index = 0;
while (src_field_index < src_field_count) {
TypeStructField *type_struct_field = type_entry->data.structure.fields[src_field_index];
if (type_struct_field->gen_index == SIZE_MAX) {
if (type_struct_field->gen_index == SIZE_MAX || type_struct_field->is_comptime) {
src_field_index += 1;
continue;
}
@ -7642,7 +7648,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ZigValue *const_val, const char *n
} else {
for (uint32_t i = 0; i < src_field_count; i += 1) {
TypeStructField *type_struct_field = type_entry->data.structure.fields[i];
if (type_struct_field->gen_index == SIZE_MAX) {
if (type_struct_field->gen_index == SIZE_MAX || type_struct_field->is_comptime) {
continue;
}
ZigValue *field_val = const_val->data.x_struct.fields[i];

View File

@ -12,7 +12,33 @@
#include <stdint.h>
template<typename K, typename V, uint32_t (*HashFunction)(K key), bool (*EqualFn)(K a, K b)>
template<typename K>
struct MakePointer {
typedef K const *Type;
static Type convert(K const &val) {
return &val;
}
};
template<typename K>
struct MakePointer<K*> {
typedef K *Type;
static Type convert(K * const &val) {
return val;
}
};
template<typename K>
struct MakePointer<K const *> {
typedef K const *Type;
static Type convert(K const * const &val) {
return val;
}
};
template<typename K, typename V,
uint32_t (*HashFunction)(typename MakePointer<K>::Type key),
bool (*EqualFn)(typename MakePointer<K>::Type a, typename MakePointer<K>::Type b)>
class HashMap {
public:
void init(int capacity) {
@ -51,7 +77,7 @@ public:
if (_index_bytes == nullptr) {
if (_entries.length < 16) {
_entries.append({HashFunction(key), 0, key, value});
_entries.append({HashFunction(MakePointer<K>::convert(key)), 0, key, value});
return;
} else {
_indexes_len = 32;
@ -131,9 +157,9 @@ public:
bool maybe_remove(const K &key) {
_modification_count += 1;
if (_index_bytes == nullptr) {
uint32_t hash = HashFunction(key);
uint32_t hash = HashFunction(MakePointer<K>::convert(key));
for (size_t i = 0; i < _entries.length; i += 1) {
if (_entries.items[i].hash == hash && EqualFn(_entries.items[i].key, key)) {
if (_entries.items[i].hash == hash && EqualFn(MakePointer<K>::convert(_entries.items[i].key), MakePointer<K>::convert(key))) {
_entries.swap_remove(i);
return true;
}
@ -223,7 +249,7 @@ private:
template <typename I>
void internal_put(const K &key, const V &value, I *indexes) {
uint32_t hash = HashFunction(key);
uint32_t hash = HashFunction(MakePointer<K>::convert(key));
uint32_t distance_from_start_index = 0;
size_t start_index = hash_to_index(hash);
for (size_t roll_over = 0; roll_over < _indexes_len;
@ -241,7 +267,7 @@ private:
// This pointer survives the following append because we call
// _entries.ensure_capacity before internal_put.
Entry *entry = &_entries.items[index_data - 1];
if (entry->hash == hash && EqualFn(entry->key, key)) {
if (entry->hash == hash && EqualFn(MakePointer<K>::convert(entry->key), MakePointer<K>::convert(key))) {
*entry = {hash, distance_from_start_index, key, value};
if (distance_from_start_index > _max_distance_from_start_index)
_max_distance_from_start_index = distance_from_start_index;
@ -322,9 +348,9 @@ private:
Entry *internal_get(const K &key) const {
if (_index_bytes == nullptr) {
uint32_t hash = HashFunction(key);
uint32_t hash = HashFunction(MakePointer<K>::convert(key));
for (size_t i = 0; i < _entries.length; i += 1) {
if (_entries.items[i].hash == hash && EqualFn(_entries.items[i].key, key)) {
if (_entries.items[i].hash == hash && EqualFn(MakePointer<K>::convert(_entries.items[i].key), MakePointer<K>::convert(key))) {
return &_entries.items[i];
}
}
@ -340,7 +366,7 @@ private:
template <typename I>
Entry *internal_get2(const K &key, I *indexes) const {
uint32_t hash = HashFunction(key);
uint32_t hash = HashFunction(MakePointer<K>::convert(key));
size_t start_index = hash_to_index(hash);
for (size_t roll_over = 0; roll_over <= _max_distance_from_start_index; roll_over += 1) {
size_t index_index = (start_index + roll_over) % _indexes_len;
@ -349,7 +375,7 @@ private:
return nullptr;
Entry *entry = &_entries.items[index_data - 1];
if (entry->hash == hash && EqualFn(entry->key, key))
if (entry->hash == hash && EqualFn(MakePointer<K>::convert(entry->key), MakePointer<K>::convert(key)))
return entry;
}
return nullptr;
@ -361,7 +387,7 @@ private:
template <typename I>
bool internal_remove(const K &key, I *indexes) {
uint32_t hash = HashFunction(key);
uint32_t hash = HashFunction(MakePointer<K>::convert(key));
size_t start_index = hash_to_index(hash);
for (size_t roll_over = 0; roll_over <= _max_distance_from_start_index; roll_over += 1) {
size_t index_index = (start_index + roll_over) % _indexes_len;
@ -371,7 +397,7 @@ private:
size_t index = index_data - 1;
Entry *entry = &_entries.items[index];
if (entry->hash != hash || !EqualFn(entry->key, key))
if (entry->hash != hash || !EqualFn(MakePointer<K>::convert(entry->key), MakePointer<K>::convert(key)))
continue;
size_t prev_index = index_index;

View File

@ -272,6 +272,10 @@ static bool value_cmp_numeric_val_all(ZigValue *left, Cmp predicate, ZigValue *r
static void memoize_field_init_val(CodeGen *codegen, ZigType *container_type, TypeStructField *field);
static void value_to_bigfloat(BigFloat *out, ZigValue *val);
static Error ir_resolve_lazy_recurse(AstNode *source_node, ZigValue *val);
static Error ir_resolve_lazy_recurse_array(AstNode *source_node, ZigValue *val, size_t len);
static void ir_assert_impl(bool ok, IrInstGen *source_instruction, char const *file, unsigned int line) {
if (ok) return;
src_assert_impl(ok, source_instruction->source_node, file, line);
@ -554,7 +558,10 @@ static ZigValue *const_ptr_pointee_unchecked_no_isf(CodeGen *g, ZigValue *const_
case ConstPtrSpecialBaseStruct: {
ZigValue *struct_val = const_val->data.x_ptr.data.base_struct.struct_val;
expand_undef_struct(g, struct_val);
result = struct_val->data.x_struct.fields[const_val->data.x_ptr.data.base_struct.field_index];
size_t field_index = const_val->data.x_ptr.data.base_struct.field_index;
assert(struct_val->type->id == ZigTypeIdStruct);
assert(!struct_val->type->data.structure.fields[field_index]->is_comptime);
result = struct_val->data.x_struct.fields[field_index];
break;
}
case ConstPtrSpecialBaseErrorUnionCode:
@ -7018,6 +7025,17 @@ static IrInstGen *ir_analyze_struct_literal_to_struct(IrAnalyze *ira, Scope *sco
buf_sprintf("field '%s' declared here", buf_ptr(src_field->name)));
return ira->codegen->invalid_inst_gen;
}
if (dst_field->is_comptime) {
ErrorMsg *msg = ir_add_error_node(ira, source_node, buf_sprintf("field '%s' in struct '%s' is comptime, it cannot be assigned",
buf_ptr(src_field->name), buf_ptr(&wanted_type->name)));
if (wanted_type->data.structure.decl_node) {
add_error_note(ira->codegen, msg, wanted_type->data.structure.decl_node,
buf_sprintf("struct '%s' declared here", buf_ptr(&wanted_type->name)));
}
add_error_note(ira->codegen, msg, src_field->decl_node,
buf_sprintf("field '%s' declared here", buf_ptr(src_field->name)));
return ira->codegen->invalid_inst_gen;
}
src_assert(src_field->decl_node != nullptr, source_node);
AstNode *existing_assign_node = field_assign_nodes[dst_field->src_index];
@ -7062,6 +7080,7 @@ static IrInstGen *ir_analyze_struct_literal_to_struct(IrAnalyze *ira, Scope *sco
// look for a default field value
TypeStructField *field = wanted_type->data.structure.fields[i];
assert(!field->is_comptime); // field_assign_nodes[i] should be null for comptime fields
memoize_field_init_val(ira->codegen, wanted_type, field);
if (field->init_val == nullptr) {
ir_add_error_node(ira, source_node,
@ -7097,6 +7116,9 @@ static IrInstGen *ir_analyze_struct_literal_to_struct(IrAnalyze *ira, Scope *sco
for (size_t i = 0; i < actual_field_count; i += 1) {
TypeStructField *field = wanted_type->data.structure.fields[i];
if (field->is_comptime)
continue;
IrInstGen *field_ptr = ir_analyze_struct_field_ptr(ira, scope, source_node, field, result_loc_inst, wanted_type, true);
if (type_is_invalid(field_ptr->value->type))
return ira->codegen->invalid_inst_gen;
@ -12750,6 +12772,29 @@ static IrInstGen *ir_analyze_fn_call(IrAnalyze *ira, Scope *scope, AstNode *sour
bool cacheable = fn_eval_cacheable(exec_scope, return_type);
ZigValue *result = nullptr;
if (cacheable) {
// We are about to put ZigValues into a hash map. The hash of a lazy value and a
// fully resolved value must equal, and so we must resolve the lazy values here.
// The hash function asserts that none of the values are lazy.
{
Scope *scope = exec_scope;
while (scope) {
if (scope->id == ScopeIdVarDecl) {
ScopeVarDecl *var_scope = (ScopeVarDecl *)scope;
if ((err = ir_resolve_lazy_recurse(
var_scope->var->decl_node,
var_scope->var->const_value)))
{
return ira->codegen->invalid_inst_gen;
}
} else if (scope->id == ScopeIdFnDef) {
break;
} else {
zig_unreachable();
}
scope = scope->parent;
}
}
auto entry = ira->codegen->memoized_fn_eval_table.maybe_get(exec_scope);
if (entry)
result = entry->value;
@ -12935,6 +12980,18 @@ static IrInstGen *ir_analyze_fn_call(IrAnalyze *ira, Scope *scope, AstNode *sour
break;
}
// We are about to put ZigValues into a hash map. The hash of a lazy value and a
// fully resolved value must equal, and so we must resolve the lazy values here.
// The hash function asserts that none of the values are lazy.
for (size_t i = 0; i < generic_id->param_count; i += 1) {
ZigValue *generic_param = &generic_id->params[i];
if (generic_param->special != ConstValSpecialRuntime) {
if ((err = ir_resolve_lazy_recurse(source_node, generic_param))) {
return ira->codegen->invalid_inst_gen;
}
}
}
auto existing_entry = ira->codegen->generic_table.put_unique(generic_id, impl_fn);
if (existing_entry) {
// throw away all our work and use the existing function
@ -14873,6 +14930,8 @@ static IrInstGen *ir_analyze_struct_field_ptr(IrAnalyze *ira, Scope *scope, AstN
struct_val->data.x_struct.fields = alloc_const_vals_ptrs(ira->codegen, struct_type->data.structure.src_field_count);
struct_val->special = ConstValSpecialStatic;
for (size_t i = 0; i < struct_type->data.structure.src_field_count; i += 1) {
if (struct_type->data.structure.fields[i]->is_comptime)
continue;
ZigValue *field_val = struct_val->data.x_struct.fields[i];
field_val->special = ConstValSpecialUndef;
field_val->type = resolve_struct_field_type(ira->codegen,
@ -18247,7 +18306,9 @@ static ZigValue *get_const_field(IrAnalyze *ira, AstNode *source_node, ZigValue
{
Error err;
ensure_field_index(struct_value->type, name, field_index);
ZigValue *val = struct_value->data.x_struct.fields[field_index];
TypeStructField *field = struct_value->type->data.structure.fields[field_index];
ZigValue *val = field->is_comptime ? field->init_val :
struct_value->data.x_struct.fields[field_index];
if ((err = ir_resolve_const_val(ira->codegen, ira->new_irb.exec, source_node, val, UndefBad)))
return nullptr;
return val;
@ -22422,7 +22483,7 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ZigValue *val)
size_t src_field_count = val->type->data.structure.src_field_count;
for (size_t field_i = 0; field_i < src_field_count; field_i += 1) {
TypeStructField *struct_field = val->type->data.structure.fields[field_i];
if (struct_field->gen_index == SIZE_MAX)
if (struct_field->gen_index == SIZE_MAX || struct_field->is_comptime)
continue;
ZigValue *field_val = val->data.x_struct.fields[field_i];
size_t offset = struct_field->offset;
@ -22451,6 +22512,10 @@ static void buf_write_value_bytes(CodeGen *codegen, uint8_t *buf, ZigValue *val)
size_t used_bits = 0;
while (src_i < src_field_count) {
TypeStructField *field = val->type->data.structure.fields[src_i];
if (field->is_comptime) {
src_i += 1;
continue;
}
assert(field->gen_index != SIZE_MAX);
if (field->gen_index != gen_i)
break;
@ -22599,9 +22664,11 @@ static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *sou
size_t src_field_count = val->type->data.structure.src_field_count;
val->data.x_struct.fields = alloc_const_vals_ptrs(codegen, src_field_count);
for (size_t field_i = 0; field_i < src_field_count; field_i += 1) {
TypeStructField *struct_field = val->type->data.structure.fields[field_i];
if (struct_field->is_comptime)
continue;
ZigValue *field_val = val->data.x_struct.fields[field_i];
field_val->special = ConstValSpecialStatic;
TypeStructField *struct_field = val->type->data.structure.fields[field_i];
field_val->type = struct_field->type_entry;
if (struct_field->gen_index == SIZE_MAX)
continue;
@ -22634,6 +22701,10 @@ static Error buf_read_value_bytes(IrAnalyze *ira, CodeGen *codegen, AstNode *sou
uint64_t bit_offset = 0;
while (src_i < src_field_count) {
TypeStructField *field = val->type->data.structure.fields[src_i];
if (field->is_comptime) {
src_i += 1;
continue;
}
src_assert(field->gen_index != SIZE_MAX, source_node);
if (field->gen_index != gen_i)
break;
@ -25515,6 +25586,95 @@ static Error ir_resolve_lazy_raw(AstNode *source_node, ZigValue *val) {
zig_unreachable();
}
static Error ir_resolve_lazy_recurse_array(AstNode *source_node, ZigValue *val, size_t len) {
Error err;
switch (val->data.x_array.special) {
case ConstArraySpecialUndef:
case ConstArraySpecialBuf:
return ErrorNone;
case ConstArraySpecialNone:
break;
}
ZigValue *elems = val->data.x_array.data.s_none.elements;
for (size_t i = 0; i < len; i += 1) {
if ((err = ir_resolve_lazy_recurse(source_node, &elems[i])))
return err;
}
return ErrorNone;
}
static Error ir_resolve_lazy_recurse(AstNode *source_node, ZigValue *val) {
Error err;
if ((err = ir_resolve_lazy_raw(source_node, val)))
return err;
assert(val->special != ConstValSpecialRuntime);
assert(val->special != ConstValSpecialLazy);
if (val->special != ConstValSpecialStatic)
return ErrorNone;
switch (val->type->id) {
case ZigTypeIdOpaque:
case ZigTypeIdEnum:
case ZigTypeIdMetaType:
case ZigTypeIdBool:
case ZigTypeIdVoid:
case ZigTypeIdComptimeFloat:
case ZigTypeIdInt:
case ZigTypeIdComptimeInt:
case ZigTypeIdEnumLiteral:
case ZigTypeIdErrorSet:
case ZigTypeIdUndefined:
case ZigTypeIdNull:
case ZigTypeIdPointer:
case ZigTypeIdFn:
case ZigTypeIdAnyFrame:
case ZigTypeIdBoundFn:
case ZigTypeIdInvalid:
case ZigTypeIdUnreachable:
case ZigTypeIdFloat:
return ErrorNone;
case ZigTypeIdFnFrame:
zig_panic("TODO: ir_resolve_lazy_recurse ZigTypeIdFnFrame");
case ZigTypeIdUnion: {
ConstUnionValue *union_val = &val->data.x_union;
return ir_resolve_lazy_recurse(source_node, union_val->payload);
}
case ZigTypeIdVector:
return ir_resolve_lazy_recurse_array(source_node, val, val->type->data.vector.len);
case ZigTypeIdArray:
return ir_resolve_lazy_recurse_array(source_node, val, val->type->data.array.len);
case ZigTypeIdStruct:
for (size_t i = 0; i < val->type->data.structure.src_field_count; i += 1) {
ZigValue *field = val->data.x_struct.fields[i];
if (val->type->data.structure.fields[i]->is_comptime) {
// comptime struct fields do not need to be resolved because
// they are not part of the value.
continue;
}
if ((err = ir_resolve_lazy_recurse(source_node, field)))
return err;
}
return ErrorNone;
case ZigTypeIdOptional:
if (get_src_ptr_type(val->type) != nullptr)
return ErrorNone;
if (val->data.x_optional == nullptr)
return ErrorNone;
return ir_resolve_lazy_recurse(source_node, val->data.x_optional);
case ZigTypeIdErrorUnion: {
bool is_err = val->data.x_err_union.error_set->data.x_err_set != nullptr;
if (is_err) {
return ir_resolve_lazy_recurse(source_node, val->data.x_err_union.error_set);
} else {
return ir_resolve_lazy_recurse(source_node, val->data.x_err_union.payload);
}
}
}
zig_unreachable();
}
Error ir_resolve_lazy(CodeGen *codegen, AstNode *source_node, ZigValue *val) {
Error err;
if ((err = ir_resolve_lazy_raw(source_node, val))) {

View File

@ -21,29 +21,6 @@ void zig_panic(const char *format, ...) {
abort();
}
uint32_t int_hash(int i) {
return (uint32_t)(i % UINT32_MAX);
}
bool int_eq(int a, int b) {
return a == b;
}
uint32_t uint64_hash(uint64_t i) {
return (uint32_t)(i % UINT32_MAX);
}
bool uint64_eq(uint64_t a, uint64_t b) {
return a == b;
}
uint32_t ptr_hash(const void *ptr) {
return (uint32_t)(((uintptr_t)ptr) % UINT32_MAX);
}
bool ptr_eq(const void *a, const void *b) {
return a == b;
}
// Ported from std/mem.zig.
bool SplitIterator_isSplitByte(SplitIterator *self, uint8_t byte) {
for (size_t i = 0; i < self->split_bytes.len; i += 1) {

View File

@ -129,13 +129,6 @@ static inline uint64_t round_to_next_power_of_2(uint64_t x) {
return x + 1;
}
uint32_t int_hash(int i);
bool int_eq(int a, int b);
uint32_t uint64_hash(uint64_t i);
bool uint64_eq(uint64_t a, uint64_t b);
uint32_t ptr_hash(const void *ptr);
bool ptr_eq(const void *a, const void *b);
static inline uint8_t log2_u64(uint64_t x) {
return (63 - clzll(x));
}

View File

@ -265,6 +265,7 @@ int main(int argc, char **argv) {
const char *override_lib_dir = nullptr;
const char *mcpu = nullptr;
bool single_threaded = false;
bool is_test_build = false;
for (int i = 1; i < argc; i += 1) {
char *arg = argv[i];
@ -272,6 +273,8 @@ int main(int argc, char **argv) {
if (arg[0] == '-') {
if (strcmp(arg, "--") == 0) {
fprintf(stderr, "Unexpected end-of-parameter mark: %s\n", arg);
} else if (strcmp(arg, "--test") == 0) {
is_test_build = true;
} else if (strcmp(arg, "-ODebug") == 0) {
optimize_mode = BuildModeDebug;
} else if (strcmp(arg, "-OReleaseFast") == 0) {
@ -446,7 +449,7 @@ int main(int argc, char **argv) {
nullptr, 0,
in_file, strlen(in_file),
override_lib_dir, strlen(override_lib_dir),
&target, false);
&target, is_test_build);
stage1->main_progress_node = root_progress_node;
stage1->root_name_ptr = out_name;