mirror of
https://github.com/ziglang/zig.git
synced 2026-02-20 16:24:51 +00:00
Merge remote-tracking branch 'origin/master' into llvm9
This commit is contained in:
commit
928ce5e326
@ -47,6 +47,19 @@ struct ResultLocPeer;
|
||||
struct ResultLocPeerParent;
|
||||
struct ResultLocBitCast;
|
||||
|
||||
enum PtrLen {
|
||||
PtrLenUnknown,
|
||||
PtrLenSingle,
|
||||
PtrLenC,
|
||||
};
|
||||
|
||||
enum UndefAllowed {
|
||||
UndefOk,
|
||||
UndefBad,
|
||||
LazyOkNoUndef,
|
||||
LazyOk,
|
||||
};
|
||||
|
||||
enum X64CABIClass {
|
||||
X64CABIClass_Unknown,
|
||||
X64CABIClass_MEMORY,
|
||||
@ -69,9 +82,9 @@ struct IrExecutable {
|
||||
IrExecutable *source_exec;
|
||||
IrAnalyze *analysis;
|
||||
Scope *begin_scope;
|
||||
ErrorMsg *first_err_trace_msg;
|
||||
ZigList<Tld *> tld_list;
|
||||
|
||||
bool invalid;
|
||||
bool is_inline;
|
||||
bool is_generic_instantiation;
|
||||
bool need_err_code_spill;
|
||||
@ -255,6 +268,7 @@ enum ConstValSpecial {
|
||||
ConstValSpecialRuntime,
|
||||
ConstValSpecialStatic,
|
||||
ConstValSpecialUndef,
|
||||
ConstValSpecialLazy,
|
||||
};
|
||||
|
||||
enum RuntimeHintErrorUnion {
|
||||
@ -291,6 +305,82 @@ struct ConstGlobalRefs {
|
||||
uint32_t align;
|
||||
};
|
||||
|
||||
enum LazyValueId {
|
||||
LazyValueIdInvalid,
|
||||
LazyValueIdAlignOf,
|
||||
LazyValueIdPtrType,
|
||||
LazyValueIdOptType,
|
||||
LazyValueIdSliceType,
|
||||
LazyValueIdFnType,
|
||||
LazyValueIdErrUnionType,
|
||||
};
|
||||
|
||||
struct LazyValue {
|
||||
LazyValueId id;
|
||||
};
|
||||
|
||||
struct LazyValueAlignOf {
|
||||
LazyValue base;
|
||||
|
||||
IrAnalyze *ira;
|
||||
IrInstruction *target_type;
|
||||
};
|
||||
|
||||
struct LazyValueSliceType {
|
||||
LazyValue base;
|
||||
|
||||
IrAnalyze *ira;
|
||||
IrInstruction *elem_type;
|
||||
IrInstruction *align_inst; // can be null
|
||||
|
||||
bool is_const;
|
||||
bool is_volatile;
|
||||
bool is_allowzero;
|
||||
};
|
||||
|
||||
struct LazyValuePtrType {
|
||||
LazyValue base;
|
||||
|
||||
IrAnalyze *ira;
|
||||
IrInstruction *elem_type;
|
||||
IrInstruction *align_inst; // can be null
|
||||
|
||||
PtrLen ptr_len;
|
||||
uint32_t bit_offset_in_host;
|
||||
|
||||
uint32_t host_int_bytes;
|
||||
bool is_const;
|
||||
bool is_volatile;
|
||||
bool is_allowzero;
|
||||
};
|
||||
|
||||
struct LazyValueOptType {
|
||||
LazyValue base;
|
||||
|
||||
IrAnalyze *ira;
|
||||
IrInstruction *payload_type;
|
||||
};
|
||||
|
||||
struct LazyValueFnType {
|
||||
LazyValue base;
|
||||
|
||||
IrAnalyze *ira;
|
||||
AstNode *proto_node;
|
||||
IrInstruction **param_types;
|
||||
IrInstruction *align_inst; // can be null
|
||||
IrInstruction *return_type;
|
||||
|
||||
bool is_generic;
|
||||
};
|
||||
|
||||
struct LazyValueErrUnionType {
|
||||
LazyValue base;
|
||||
|
||||
IrAnalyze *ira;
|
||||
IrInstruction *err_set_type;
|
||||
IrInstruction *payload_type;
|
||||
};
|
||||
|
||||
struct ConstExprValue {
|
||||
ZigType *type;
|
||||
ConstValSpecial special;
|
||||
@ -318,6 +408,7 @@ struct ConstExprValue {
|
||||
ConstPtrValue x_ptr;
|
||||
ConstArgTuple x_arg_tuple;
|
||||
Buf *x_enum_literal;
|
||||
LazyValue *x_lazy;
|
||||
|
||||
// populated if special == ConstValSpecialRuntime
|
||||
RuntimeHintErrorUnion rh_error_union;
|
||||
@ -364,6 +455,7 @@ enum TldResolution {
|
||||
TldResolutionUnresolved,
|
||||
TldResolutionResolving,
|
||||
TldResolutionInvalid,
|
||||
TldResolutionOkLazy,
|
||||
TldResolutionOk,
|
||||
};
|
||||
|
||||
@ -420,10 +512,12 @@ struct TypeEnumField {
|
||||
|
||||
struct TypeUnionField {
|
||||
Buf *name;
|
||||
ZigType *type_entry; // available after ResolveStatusSizeKnown
|
||||
ConstExprValue *type_val; // available after ResolveStatusZeroBitsKnown
|
||||
TypeEnumField *enum_field;
|
||||
ZigType *type_entry;
|
||||
AstNode *decl_node;
|
||||
uint32_t gen_index;
|
||||
uint32_t align;
|
||||
};
|
||||
|
||||
enum NodeType {
|
||||
@ -849,6 +943,8 @@ struct AstNodeStructField {
|
||||
Buf *name;
|
||||
AstNode *type;
|
||||
AstNode *value;
|
||||
// populated if the "align(A)" is present
|
||||
AstNode *align_expr;
|
||||
};
|
||||
|
||||
struct AstNodeStringLiteral {
|
||||
@ -944,6 +1040,7 @@ struct AstNodeEnumLiteral {
|
||||
|
||||
struct AstNode {
|
||||
enum NodeType type;
|
||||
bool already_traced_this_node;
|
||||
size_t line;
|
||||
size_t column;
|
||||
ZigType *owner;
|
||||
@ -1039,12 +1136,6 @@ struct FnTypeId {
|
||||
uint32_t fn_type_id_hash(FnTypeId*);
|
||||
bool fn_type_id_eql(FnTypeId *a, FnTypeId *b);
|
||||
|
||||
enum PtrLen {
|
||||
PtrLenUnknown,
|
||||
PtrLenSingle,
|
||||
PtrLenC,
|
||||
};
|
||||
|
||||
struct ZigTypePointer {
|
||||
ZigType *child_type;
|
||||
ZigType *slice_parent;
|
||||
@ -1055,6 +1146,7 @@ struct ZigTypePointer {
|
||||
bool is_const;
|
||||
bool is_volatile;
|
||||
bool allow_zero;
|
||||
bool resolve_loop_flag_zero_bits;
|
||||
};
|
||||
|
||||
struct ZigTypeInt {
|
||||
@ -1073,7 +1165,8 @@ struct ZigTypeArray {
|
||||
|
||||
struct TypeStructField {
|
||||
Buf *name;
|
||||
ZigType *type_entry;
|
||||
ZigType *type_entry; // available after ResolveStatusSizeKnown
|
||||
ConstExprValue *type_val; // available after ResolveStatusZeroBitsKnown
|
||||
size_t src_index;
|
||||
size_t gen_index;
|
||||
size_t offset; // byte offset from beginning of struct
|
||||
@ -1129,15 +1222,16 @@ struct ZigTypeStruct {
|
||||
ResolveStatus resolve_status;
|
||||
|
||||
bool is_slice;
|
||||
bool resolve_loop_flag; // set this flag temporarily to detect infinite loops
|
||||
bool reported_infinite_err;
|
||||
// whether any of the fields require comptime
|
||||
// known after ResolveStatusZeroBitsKnown
|
||||
bool requires_comptime;
|
||||
bool resolve_loop_flag_zero_bits;
|
||||
bool resolve_loop_flag_other;
|
||||
};
|
||||
|
||||
struct ZigTypeOptional {
|
||||
ZigType *child_type;
|
||||
ResolveStatus resolve_status;
|
||||
};
|
||||
|
||||
struct ZigTypeErrorUnion {
|
||||
@ -1155,26 +1249,20 @@ struct ZigTypeErrorSet {
|
||||
|
||||
struct ZigTypeEnum {
|
||||
AstNode *decl_node;
|
||||
ContainerLayout layout;
|
||||
uint32_t src_field_count;
|
||||
TypeEnumField *fields;
|
||||
bool is_invalid; // true if any fields are invalid
|
||||
ZigType *tag_int_type;
|
||||
|
||||
ScopeDecls *decls_scope;
|
||||
|
||||
// set this flag temporarily to detect infinite loops
|
||||
bool embedded_in_current;
|
||||
bool reported_infinite_err;
|
||||
// whether we've finished resolving it
|
||||
bool complete;
|
||||
|
||||
bool zero_bits_loop_flag;
|
||||
bool zero_bits_known;
|
||||
|
||||
LLVMValueRef name_function;
|
||||
|
||||
HashMap<Buf *, TypeEnumField *, buf_hash, buf_eql_buf> fields_by_name;
|
||||
uint32_t src_field_count;
|
||||
|
||||
ContainerLayout layout;
|
||||
ResolveStatus resolve_status;
|
||||
|
||||
bool resolve_loop_flag;
|
||||
};
|
||||
|
||||
uint32_t type_ptr_hash(const ZigType *ptr);
|
||||
@ -1187,7 +1275,7 @@ struct ZigTypeUnion {
|
||||
HashMap<Buf *, TypeUnionField *, buf_hash, buf_eql_buf> fields_by_name;
|
||||
ZigType *tag_type; // always an enum or null
|
||||
LLVMTypeRef union_llvm_type;
|
||||
ZigType *most_aligned_union_member;
|
||||
TypeUnionField *most_aligned_union_member;
|
||||
size_t gen_union_index;
|
||||
size_t gen_tag_index;
|
||||
size_t union_abi_size;
|
||||
@ -1199,11 +1287,11 @@ struct ZigTypeUnion {
|
||||
ResolveStatus resolve_status;
|
||||
|
||||
bool have_explicit_tag_type;
|
||||
bool resolve_loop_flag; // set this flag temporarily to detect infinite loops
|
||||
bool reported_infinite_err;
|
||||
// whether any of the fields require comptime
|
||||
// the value is not valid until zero_bits_known == true
|
||||
bool requires_comptime;
|
||||
bool resolve_loop_flag_zero_bits;
|
||||
bool resolve_loop_flag_other;
|
||||
};
|
||||
|
||||
struct FnGenParamInfo {
|
||||
@ -1715,6 +1803,7 @@ struct CodeGen {
|
||||
//////////////////////////// Runtime State
|
||||
LLVMModuleRef module;
|
||||
ZigList<ErrorMsg*> errors;
|
||||
ErrorMsg *trace_err;
|
||||
LLVMBuilderRef builder;
|
||||
ZigLLVMDIBuilder *dbuilder;
|
||||
ZigLLVMDICompileUnit *compile_unit;
|
||||
@ -1767,7 +1856,6 @@ struct CodeGen {
|
||||
ZigList<Tld *> resolve_queue;
|
||||
size_t resolve_queue_index;
|
||||
ZigList<TimeEvent> timing_events;
|
||||
ZigList<AstNode *> tld_ref_source_node_stack;
|
||||
ZigList<ZigFn *> inline_fns;
|
||||
ZigList<ZigFn *> test_fns;
|
||||
ZigList<ErrorTableEntry *> errors_by_index;
|
||||
@ -1852,7 +1940,6 @@ struct CodeGen {
|
||||
ZigFn *main_fn;
|
||||
ZigFn *panic_fn;
|
||||
TldFn *panic_tld_fn;
|
||||
AstNode *root_export_decl;
|
||||
|
||||
WantPIC want_pic;
|
||||
WantStackCheck want_stack_check;
|
||||
@ -1940,7 +2027,7 @@ struct CodeGen {
|
||||
Buf *zig_lib_dir;
|
||||
Buf *zig_std_dir;
|
||||
Buf *dynamic_linker_path;
|
||||
Buf *version_script_path;
|
||||
Buf *version_script_path;
|
||||
|
||||
const char **llvm_argv;
|
||||
size_t llvm_argv_len;
|
||||
@ -3657,13 +3744,13 @@ enum ResultLocId {
|
||||
ResultLocIdBitCast,
|
||||
};
|
||||
|
||||
// Additions to this struct may need to be handled in
|
||||
// Additions to this struct may need to be handled in
|
||||
// ir_reset_result
|
||||
struct ResultLoc {
|
||||
ResultLocId id;
|
||||
bool written;
|
||||
bool allow_write_through_const;
|
||||
IrInstruction *resolved_loc; // result ptr
|
||||
IrInstruction *resolved_loc; // result ptr
|
||||
IrInstruction *source_instruction;
|
||||
IrInstruction *gen_instruction; // value to store to the result loc
|
||||
ZigType *implicit_elem_type;
|
||||
|
||||
1354
src/analyze.cpp
1354
src/analyze.cpp
File diff suppressed because it is too large
Load Diff
@ -11,10 +11,9 @@
|
||||
#include "all_types.hpp"
|
||||
|
||||
void semantic_analyze(CodeGen *g);
|
||||
ErrorMsg *add_node_error(CodeGen *g, const AstNode *node, Buf *msg);
|
||||
ErrorMsg *add_node_error(CodeGen *g, AstNode *node, Buf *msg);
|
||||
ErrorMsg *add_token_error(CodeGen *g, ZigType *owner, Token *token, Buf *msg);
|
||||
ErrorMsg *add_error_note(CodeGen *g, ErrorMsg *parent_msg, const AstNode *node, Buf *msg);
|
||||
void emit_error_notes_for_ref_stack(CodeGen *g, ErrorMsg *msg);
|
||||
ZigType *new_type_table_entry(ZigTypeId id);
|
||||
ZigType *get_fn_frame_type(CodeGen *g, ZigFn *fn);
|
||||
ZigType *get_pointer_to_type(CodeGen *g, ZigType *child_type, bool is_const);
|
||||
@ -59,7 +58,7 @@ ZigType *add_source_file(CodeGen *g, ZigPackage *package, Buf *abs_full_path, Bu
|
||||
ZigVar *find_variable(CodeGen *g, Scope *orig_context, Buf *name, ScopeFnDef **crossed_fndef_scope);
|
||||
Tld *find_decl(CodeGen *g, Scope *scope, Buf *name);
|
||||
Tld *find_container_decl(CodeGen *g, ScopeDecls *decls_scope, Buf *name);
|
||||
void resolve_top_level_decl(CodeGen *g, Tld *tld, AstNode *source_node);
|
||||
void resolve_top_level_decl(CodeGen *g, Tld *tld, AstNode *source_node, bool allow_lazy);
|
||||
|
||||
ZigType *get_src_ptr_type(ZigType *type);
|
||||
ZigType *get_codegen_ptr_type(ZigType *type);
|
||||
@ -71,7 +70,6 @@ bool type_is_complete(ZigType *type_entry);
|
||||
bool type_is_resolved(ZigType *type_entry, ResolveStatus status);
|
||||
bool type_is_invalid(ZigType *type_entry);
|
||||
bool type_is_global_error_set(ZigType *err_set_type);
|
||||
Error resolve_container_type(CodeGen *g, ZigType *type_entry);
|
||||
ScopeDecls *get_container_scope(ZigType *type_entry);
|
||||
TypeStructField *find_struct_type_field(ZigType *type_entry, Buf *name);
|
||||
TypeEnumField *find_enum_type_field(ZigType *enum_type, Buf *name);
|
||||
@ -95,7 +93,6 @@ ZigFn *create_fn(CodeGen *g, AstNode *proto_node);
|
||||
ZigFn *create_fn_raw(CodeGen *g, FnInline inline_value);
|
||||
void init_fn_type_id(FnTypeId *fn_type_id, AstNode *proto_node, size_t param_count_alloc);
|
||||
AstNode *get_param_decl_node(ZigFn *fn_entry, size_t index);
|
||||
Error ATTRIBUTE_MUST_USE ensure_complete_type(CodeGen *g, ZigType *type_entry);
|
||||
Error ATTRIBUTE_MUST_USE type_resolve(CodeGen *g, ZigType *type_entry, ResolveStatus status);
|
||||
void complete_enum(CodeGen *g, ZigType *enum_type);
|
||||
bool ir_get_var_is_comptime(ZigVar *var);
|
||||
@ -169,12 +166,11 @@ ConstExprValue *create_const_slice(CodeGen *g, ConstExprValue *array_val, size_t
|
||||
void init_const_arg_tuple(CodeGen *g, ConstExprValue *const_val, size_t arg_index_start, size_t arg_index_end);
|
||||
ConstExprValue *create_const_arg_tuple(CodeGen *g, size_t arg_index_start, size_t arg_index_end);
|
||||
|
||||
void init_const_undefined(CodeGen *g, ConstExprValue *const_val);
|
||||
|
||||
ConstExprValue *create_const_vals(size_t count);
|
||||
|
||||
ZigType *make_int_type(CodeGen *g, bool is_signed, uint32_t size_in_bits);
|
||||
void expand_undef_array(CodeGen *g, ConstExprValue *const_val);
|
||||
void expand_undef_struct(CodeGen *g, ConstExprValue *const_val);
|
||||
void update_compile_var(CodeGen *g, Buf *name, ConstExprValue *value);
|
||||
|
||||
const char *type_id_name(ZigTypeId id);
|
||||
@ -244,9 +240,14 @@ void add_cc_args(CodeGen *g, ZigList<const char *> &args, const char *out_dep_pa
|
||||
|
||||
void src_assert(bool ok, AstNode *source_node);
|
||||
bool is_container(ZigType *type_entry);
|
||||
ConstExprValue *analyze_const_value(CodeGen *g, Scope *scope, AstNode *node, ZigType *type_entry, Buf *type_name);
|
||||
ConstExprValue *analyze_const_value(CodeGen *g, Scope *scope, AstNode *node, ZigType *type_entry,
|
||||
Buf *type_name, UndefAllowed undef);
|
||||
|
||||
void resolve_llvm_types_fn(CodeGen *g, ZigFn *fn);
|
||||
bool fn_is_async(ZigFn *fn);
|
||||
|
||||
Error type_val_resolve_abi_align(CodeGen *g, ConstExprValue *type_val, uint32_t *abi_align);
|
||||
ZigType *resolve_union_field_type(CodeGen *g, TypeUnionField *union_field);
|
||||
ZigType *resolve_struct_field_type(CodeGen *g, TypeStructField *struct_field);
|
||||
|
||||
#endif
|
||||
|
||||
@ -15,6 +15,8 @@
|
||||
#include <limits>
|
||||
#include <algorithm>
|
||||
|
||||
static uint64_t bigint_as_unsigned(const BigInt *bigint);
|
||||
|
||||
static void bigint_normalize(BigInt *dest) {
|
||||
const uint64_t *digits = bigint_ptr(dest);
|
||||
|
||||
@ -1660,7 +1662,7 @@ size_t bigint_clz(const BigInt *bi, size_t bit_count) {
|
||||
return count;
|
||||
}
|
||||
|
||||
uint64_t bigint_as_unsigned(const BigInt *bigint) {
|
||||
static uint64_t bigint_as_unsigned(const BigInt *bigint) {
|
||||
assert(!bigint->is_negative);
|
||||
if (bigint->digit_count == 0) {
|
||||
return 0;
|
||||
@ -1671,6 +1673,25 @@ uint64_t bigint_as_unsigned(const BigInt *bigint) {
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t bigint_as_u64(const BigInt *bigint)
|
||||
{
|
||||
return bigint_as_unsigned(bigint);
|
||||
}
|
||||
|
||||
uint32_t bigint_as_u32(const BigInt *bigint) {
|
||||
uint64_t value64 = bigint_as_unsigned(bigint);
|
||||
uint32_t value32 = (uint32_t)value64;
|
||||
assert (value64 == value32);
|
||||
return value32;
|
||||
}
|
||||
|
||||
size_t bigint_as_usize(const BigInt *bigint) {
|
||||
uint64_t value64 = bigint_as_unsigned(bigint);
|
||||
size_t valueUsize = (size_t)value64;
|
||||
assert (value64 == valueUsize);
|
||||
return valueUsize;
|
||||
}
|
||||
|
||||
int64_t bigint_as_signed(const BigInt *bigint) {
|
||||
if (bigint->digit_count == 0) {
|
||||
return 0;
|
||||
|
||||
@ -36,7 +36,10 @@ void bigint_init_bigfloat(BigInt *dest, const BigFloat *op);
|
||||
void bigint_init_data(BigInt *dest, const uint64_t *digits, size_t digit_count, bool is_negative);
|
||||
|
||||
// panics if number won't fit
|
||||
uint64_t bigint_as_unsigned(const BigInt *bigint);
|
||||
uint64_t bigint_as_u64(const BigInt *bigint);
|
||||
uint32_t bigint_as_u32(const BigInt *bigint);
|
||||
size_t bigint_as_usize(const BigInt *bigint);
|
||||
|
||||
int64_t bigint_as_signed(const BigInt *bigint);
|
||||
|
||||
static inline const uint64_t *bigint_ptr(const BigInt *bigint) {
|
||||
|
||||
@ -2872,7 +2872,7 @@ static void add_error_range_check(CodeGen *g, ZigType *err_set_type, ZigType *in
|
||||
eval_min_max_value_int(g, int_type, &biggest_possible_err_val, true);
|
||||
|
||||
if (bigint_fits_in_bits(&biggest_possible_err_val, 64, false) &&
|
||||
bigint_as_unsigned(&biggest_possible_err_val) < g->errors_by_index.length)
|
||||
bigint_as_usize(&biggest_possible_err_val) < g->errors_by_index.length)
|
||||
{
|
||||
ok_bit = neq_zero_bit;
|
||||
} else {
|
||||
@ -3052,8 +3052,10 @@ static LLVMValueRef ir_render_ptr_of_array_to_slice(CodeGen *g, IrExecutable *ex
|
||||
IrInstructionPtrOfArrayToSlice *instruction)
|
||||
{
|
||||
ZigType *actual_type = instruction->operand->value.type;
|
||||
LLVMValueRef expr_val = ir_llvm_value(g, instruction->operand);
|
||||
assert(expr_val);
|
||||
ZigType *slice_type = instruction->base.value.type;
|
||||
ZigType *slice_ptr_type = slice_type->data.structure.fields[slice_ptr_index].type_entry;
|
||||
size_t ptr_index = slice_type->data.structure.fields[slice_ptr_index].gen_index;
|
||||
size_t len_index = slice_type->data.structure.fields[slice_len_index].gen_index;
|
||||
|
||||
LLVMValueRef result_loc = ir_llvm_value(g, instruction->result_loc);
|
||||
|
||||
@ -3061,15 +3063,21 @@ static LLVMValueRef ir_render_ptr_of_array_to_slice(CodeGen *g, IrExecutable *ex
|
||||
ZigType *array_type = actual_type->data.pointer.child_type;
|
||||
assert(array_type->id == ZigTypeIdArray);
|
||||
|
||||
LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, result_loc, slice_ptr_index, "");
|
||||
LLVMValueRef indices[] = {
|
||||
LLVMConstNull(g->builtin_types.entry_usize->llvm_type),
|
||||
LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 0, false),
|
||||
};
|
||||
LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, expr_val, indices, 2, "");
|
||||
gen_store_untyped(g, slice_start_ptr, ptr_field_ptr, 0, false);
|
||||
if (type_has_bits(actual_type)) {
|
||||
LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, result_loc, ptr_index, "");
|
||||
LLVMValueRef indices[] = {
|
||||
LLVMConstNull(g->builtin_types.entry_usize->llvm_type),
|
||||
LLVMConstInt(g->builtin_types.entry_usize->llvm_type, 0, false),
|
||||
};
|
||||
LLVMValueRef expr_val = ir_llvm_value(g, instruction->operand);
|
||||
LLVMValueRef slice_start_ptr = LLVMBuildInBoundsGEP(g->builder, expr_val, indices, 2, "");
|
||||
gen_store_untyped(g, slice_start_ptr, ptr_field_ptr, 0, false);
|
||||
} else if (ir_want_runtime_safety(g, &instruction->base)) {
|
||||
LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, result_loc, ptr_index, "");
|
||||
gen_undef_init(g, slice_ptr_type->abi_align, slice_ptr_type, ptr_field_ptr);
|
||||
}
|
||||
|
||||
LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, result_loc, slice_len_index, "");
|
||||
LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, result_loc, len_index, "");
|
||||
LLVMValueRef len_value = LLVMConstInt(g->builtin_types.entry_usize->llvm_type,
|
||||
array_type->data.array.len, false);
|
||||
gen_store_untyped(g, len_value, len_field_ptr, 0, false);
|
||||
@ -3386,6 +3394,8 @@ static bool value_is_all_undef_array(ConstExprValue *const_val, size_t len) {
|
||||
|
||||
static bool value_is_all_undef(ConstExprValue *const_val) {
|
||||
switch (const_val->special) {
|
||||
case ConstValSpecialLazy:
|
||||
zig_unreachable();
|
||||
case ConstValSpecialRuntime:
|
||||
return false;
|
||||
case ConstValSpecialUndef:
|
||||
@ -3525,6 +3535,8 @@ static LLVMValueRef ir_render_store_ptr(CodeGen *g, IrExecutable *executable, Ir
|
||||
}
|
||||
|
||||
static LLVMValueRef ir_render_var_ptr(CodeGen *g, IrExecutable *executable, IrInstructionVarPtr *instruction) {
|
||||
if (instruction->base.value.special != ConstValSpecialRuntime)
|
||||
return ir_llvm_value(g, &instruction->base);
|
||||
ZigVar *var = instruction->var;
|
||||
if (type_has_bits(var->var_type)) {
|
||||
assert(var->value_ref);
|
||||
@ -6041,6 +6053,7 @@ static LLVMValueRef gen_const_ptr_union_recursive(CodeGen *g, ConstExprValue *un
|
||||
|
||||
static LLVMValueRef pack_const_int(CodeGen *g, LLVMTypeRef big_int_type_ref, ConstExprValue *const_val) {
|
||||
switch (const_val->special) {
|
||||
case ConstValSpecialLazy:
|
||||
case ConstValSpecialRuntime:
|
||||
zig_unreachable();
|
||||
case ConstValSpecialUndef:
|
||||
@ -6300,6 +6313,8 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
|
||||
assert(type_has_bits(type_entry));
|
||||
|
||||
switch (const_val->special) {
|
||||
case ConstValSpecialLazy:
|
||||
zig_unreachable();
|
||||
case ConstValSpecialRuntime:
|
||||
zig_unreachable();
|
||||
case ConstValSpecialUndef:
|
||||
@ -6563,7 +6578,7 @@ static LLVMValueRef gen_const_val(CodeGen *g, ConstExprValue *const_val, const c
|
||||
uint64_t pad_bytes = type_entry->data.unionation.union_abi_size - field_type_bytes;
|
||||
LLVMValueRef correctly_typed_value = gen_const_val(g, payload_value, "");
|
||||
make_unnamed_struct = is_llvm_value_unnamed_type(g, payload_value->type, correctly_typed_value) ||
|
||||
payload_value->type != type_entry->data.unionation.most_aligned_union_member;
|
||||
payload_value->type != type_entry->data.unionation.most_aligned_union_member->type_entry;
|
||||
|
||||
{
|
||||
if (pad_bytes == 0) {
|
||||
@ -8905,7 +8920,7 @@ static void gen_root_source(CodeGen *g) {
|
||||
}
|
||||
Tld *panic_tld = find_decl(g, &get_container_scope(import_with_panic)->base, buf_create_from_str("panic"));
|
||||
assert(panic_tld != nullptr);
|
||||
resolve_top_level_decl(g, panic_tld, nullptr);
|
||||
resolve_top_level_decl(g, panic_tld, nullptr, false);
|
||||
}
|
||||
|
||||
|
||||
|
||||
1576
src/ir.cpp
1576
src/ir.cpp
File diff suppressed because it is too large
Load Diff
@ -16,7 +16,9 @@ bool ir_gen_fn(CodeGen *g, ZigFn *fn_entry);
|
||||
ConstExprValue *ir_eval_const_value(CodeGen *codegen, Scope *scope, AstNode *node,
|
||||
ZigType *expected_type, size_t *backward_branch_count, size_t *backward_branch_quota,
|
||||
ZigFn *fn_entry, Buf *c_import_buf, AstNode *source_node, Buf *exec_name,
|
||||
IrExecutable *parent_exec, AstNode *expected_type_source_node);
|
||||
IrExecutable *parent_exec, AstNode *expected_type_source_node, UndefAllowed undef);
|
||||
|
||||
Error ir_resolve_lazy(CodeGen *codegen, AstNode *source_node, ConstExprValue *val);
|
||||
|
||||
ZigType *ir_analyze(CodeGen *g, IrExecutable *old_executable, IrExecutable *new_executable,
|
||||
ZigType *expected_type, AstNode *expected_type_source_node);
|
||||
@ -28,6 +30,4 @@ ConstExprValue *const_ptr_pointee(IrAnalyze *ira, CodeGen *codegen, ConstExprVal
|
||||
AstNode *source_node);
|
||||
const char *float_op_to_name(BuiltinFnId op, bool llvm_name);
|
||||
|
||||
void ir_add_analysis_trace(IrAnalyze *ira, ErrorMsg *err_msg, Buf *text);
|
||||
|
||||
#endif
|
||||
|
||||
34
src/os.cpp
34
src/os.cpp
@ -1125,29 +1125,27 @@ Error os_get_cwd(Buf *out_cwd) {
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(ZIG_OS_WINDOWS)
|
||||
#define is_wprefix(s, prefix) \
|
||||
(wcsncmp((s), (prefix), sizeof(prefix) / sizeof(WCHAR) - 1) == 0)
|
||||
static bool is_stderr_cyg_pty(void) {
|
||||
HANDLE stderr_handle = GetStdHandle(STD_ERROR_HANDLE);
|
||||
if (stderr_handle == INVALID_HANDLE_VALUE)
|
||||
return false;
|
||||
|
||||
int size = sizeof(FILE_NAME_INFO) + sizeof(WCHAR) * MAX_PATH;
|
||||
FILE_NAME_INFO *nameinfo;
|
||||
WCHAR *p = NULL;
|
||||
bool ATTRIBUTE_MUST_USE os_is_cygwin_pty(int fd) {
|
||||
#if defined(ZIG_OS_WINDOWS)
|
||||
HANDLE handle = (HANDLE)_get_osfhandle(fd);
|
||||
|
||||
// Cygwin/msys's pty is a pipe.
|
||||
if (GetFileType(stderr_handle) != FILE_TYPE_PIPE) {
|
||||
return 0;
|
||||
if (handle == INVALID_HANDLE_VALUE || GetFileType(handle) != FILE_TYPE_PIPE) {
|
||||
return false;
|
||||
}
|
||||
nameinfo = (FILE_NAME_INFO *)allocate<char>(size);
|
||||
|
||||
int size = sizeof(FILE_NAME_INFO) + sizeof(WCHAR) * MAX_PATH;
|
||||
WCHAR *p = NULL;
|
||||
|
||||
FILE_NAME_INFO *nameinfo = (FILE_NAME_INFO *)allocate<char>(size);
|
||||
if (nameinfo == NULL) {
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
// Check the name of the pipe:
|
||||
// '\{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master'
|
||||
if (GetFileInformationByHandleEx(stderr_handle, FileNameInfo, nameinfo, size)) {
|
||||
if (GetFileInformationByHandleEx(handle, FileNameInfo, nameinfo, size)) {
|
||||
nameinfo->FileName[nameinfo->FileNameLength / sizeof(WCHAR)] = L'\0';
|
||||
p = nameinfo->FileName;
|
||||
if (is_wprefix(p, L"\\cygwin-")) { /* Cygwin */
|
||||
@ -1180,12 +1178,14 @@ static bool is_stderr_cyg_pty(void) {
|
||||
}
|
||||
free(nameinfo);
|
||||
return (p != NULL);
|
||||
}
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool os_stderr_tty(void) {
|
||||
#if defined(ZIG_OS_WINDOWS)
|
||||
return _isatty(_fileno(stderr)) != 0 || is_stderr_cyg_pty();
|
||||
return _isatty(fileno(stderr)) != 0 || os_is_cygwin_pty(fileno(stderr));
|
||||
#elif defined(ZIG_OS_POSIX)
|
||||
return isatty(STDERR_FILENO) != 0;
|
||||
#else
|
||||
@ -1486,7 +1486,7 @@ WORD original_console_attributes = FOREGROUND_RED|FOREGROUND_GREEN|FOREGROUND_BL
|
||||
|
||||
void os_stderr_set_color(TermColor color) {
|
||||
#if defined(ZIG_OS_WINDOWS)
|
||||
if (is_stderr_cyg_pty()) {
|
||||
if (os_stderr_tty()) {
|
||||
set_color_posix(color);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -11,6 +11,7 @@
|
||||
#include "list.hpp"
|
||||
#include "buffer.hpp"
|
||||
#include "error.hpp"
|
||||
#include "target.hpp"
|
||||
#include "zig_llvm.h"
|
||||
#include "windows_sdk.h"
|
||||
|
||||
@ -88,6 +89,11 @@ struct Termination {
|
||||
#define OsFile int
|
||||
#endif
|
||||
|
||||
#if defined(ZIG_OS_WINDOWS)
|
||||
#undef fileno
|
||||
#define fileno _fileno
|
||||
#endif
|
||||
|
||||
struct OsTimeStamp {
|
||||
uint64_t sec;
|
||||
uint64_t nsec;
|
||||
@ -152,6 +158,8 @@ Error ATTRIBUTE_MUST_USE os_get_win32_ucrt_include_path(ZigWindowsSDK *sdk, Buf
|
||||
Error ATTRIBUTE_MUST_USE os_get_win32_ucrt_lib_path(ZigWindowsSDK *sdk, Buf *output_buf, ZigLLVM_ArchType platform_type);
|
||||
Error ATTRIBUTE_MUST_USE os_get_win32_kern32_path(ZigWindowsSDK *sdk, Buf *output_buf, ZigLLVM_ArchType platform_type);
|
||||
|
||||
bool ATTRIBUTE_MUST_USE os_is_cygwin_pty(int fd);
|
||||
|
||||
Error ATTRIBUTE_MUST_USE os_self_exe_shared_libs(ZigList<Buf *> &paths);
|
||||
|
||||
#endif
|
||||
|
||||
@ -782,24 +782,26 @@ static AstNode *ast_parse_var_decl(ParseContext *pc) {
|
||||
return res;
|
||||
}
|
||||
|
||||
// ContainerField <- IDENTIFIER (COLON TypeExpr)? (EQUAL Expr)?
|
||||
// ContainerField <- IDENTIFIER (COLON TypeExpr ByteAlign?)? (EQUAL Expr)?
|
||||
static AstNode *ast_parse_container_field(ParseContext *pc) {
|
||||
Token *identifier = eat_token_if(pc, TokenIdSymbol);
|
||||
if (identifier == nullptr)
|
||||
return nullptr;
|
||||
|
||||
AstNode *type_expr = nullptr;
|
||||
if (eat_token_if(pc, TokenIdColon) != nullptr)
|
||||
if (eat_token_if(pc, TokenIdColon) != nullptr) {
|
||||
type_expr = ast_expect(pc, ast_parse_type_expr);
|
||||
}
|
||||
AstNode *align_expr = ast_parse_byte_align(pc);
|
||||
AstNode *expr = nullptr;
|
||||
if (eat_token_if(pc, TokenIdEq) != nullptr)
|
||||
expr = ast_expect(pc, ast_parse_expr);
|
||||
|
||||
|
||||
AstNode *res = ast_create_node(pc, NodeTypeStructField, identifier);
|
||||
res->data.struct_field.name = token_buf(identifier);
|
||||
res->data.struct_field.type = type_expr;
|
||||
res->data.struct_field.value = expr;
|
||||
res->data.struct_field.align_expr = align_expr;
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
@ -504,6 +504,16 @@ Error target_parse_glibc_version(ZigGLibCVersion *glibc_ver, const char *text) {
|
||||
return ErrorNone;
|
||||
}
|
||||
|
||||
static ZigLLVM_EnvironmentType target_get_win32_abi() {
|
||||
FILE* files[] = { stdin, stdout, stderr, nullptr };
|
||||
for (int i = 0; files[i] != nullptr; i++) {
|
||||
if (os_is_cygwin_pty(fileno(files[i]))) {
|
||||
return ZigLLVM_GNU;
|
||||
}
|
||||
}
|
||||
return ZigLLVM_MSVC;
|
||||
}
|
||||
|
||||
void get_native_target(ZigTarget *target) {
|
||||
// first zero initialize
|
||||
*target = {};
|
||||
@ -518,6 +528,9 @@ void get_native_target(ZigTarget *target) {
|
||||
&target->abi,
|
||||
&oformat);
|
||||
target->os = get_zig_os_type(os_type);
|
||||
if (target->os == OsWindows) {
|
||||
target->abi = target_get_win32_abi();
|
||||
}
|
||||
target->is_native = true;
|
||||
if (target->abi == ZigLLVM_UnknownEnvironment) {
|
||||
target->abi = target_default_abi(target->arch, target->os);
|
||||
@ -1651,7 +1664,7 @@ ZigLLVM_EnvironmentType target_default_abi(ZigLLVM_ArchType arch, Os os) {
|
||||
return ZigLLVM_GNU;
|
||||
case OsUefi:
|
||||
case OsWindows:
|
||||
return ZigLLVM_MSVC;
|
||||
return ZigLLVM_MSVC;
|
||||
case OsLinux:
|
||||
case OsWASI:
|
||||
case OsEmscripten:
|
||||
|
||||
@ -841,7 +841,7 @@ void tokenize(Buf *buf, Tokenization *out) {
|
||||
case TokenizeStateSawAmpersand:
|
||||
switch (c) {
|
||||
case '&':
|
||||
tokenize_error(&t, "`&&` is invalid. Note that `and` is boolean AND.");
|
||||
tokenize_error(&t, "`&&` is invalid. Note that `and` is boolean AND");
|
||||
break;
|
||||
case '=':
|
||||
set_token_id(&t, t.cur_tok, TokenIdBitAndEq);
|
||||
|
||||
@ -15,9 +15,9 @@
|
||||
|
||||
#include "zig_llvm.h"
|
||||
|
||||
#if __GNUC__ >= 8
|
||||
#if __GNUC__ >= 9
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wclass-memaccess"
|
||||
#pragma GCC diagnostic ignored "-Winit-list-lifetime"
|
||||
#endif
|
||||
|
||||
#include <llvm/Analysis/TargetLibraryInfo.h>
|
||||
@ -50,7 +50,7 @@
|
||||
|
||||
#include <lld/Common/Driver.h>
|
||||
|
||||
#if __GNUC__ >= 8
|
||||
#if __GNUC__ >= 9
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
|
||||
@ -6,20 +6,23 @@ const mem = std.mem;
|
||||
const Allocator = mem.Allocator;
|
||||
|
||||
pub fn ArrayList(comptime T: type) type {
|
||||
return AlignedArrayList(T, @alignOf(T));
|
||||
return AlignedArrayList(T, null);
|
||||
}
|
||||
|
||||
pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
|
||||
pub fn AlignedArrayList(comptime T: type, comptime alignment: ?u29) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
/// Use toSlice instead of slicing this directly, because if you don't
|
||||
/// specify the end position of the slice, this will potentially give
|
||||
/// you uninitialized memory.
|
||||
items: []align(A) T,
|
||||
items: Slice,
|
||||
len: usize,
|
||||
allocator: *Allocator,
|
||||
|
||||
pub const Slice = if (alignment) |a| ([]align(a) T) else []T;
|
||||
pub const SliceConst = if (alignment) |a| ([]align(a) const T) else []const T;
|
||||
|
||||
/// Deinitialize with `deinit` or use `toOwnedSlice`.
|
||||
pub fn init(allocator: *Allocator) Self {
|
||||
return Self{
|
||||
@ -33,11 +36,11 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
|
||||
self.allocator.free(self.items);
|
||||
}
|
||||
|
||||
pub fn toSlice(self: Self) []align(A) T {
|
||||
pub fn toSlice(self: Self) Slice {
|
||||
return self.items[0..self.len];
|
||||
}
|
||||
|
||||
pub fn toSliceConst(self: Self) []align(A) const T {
|
||||
pub fn toSliceConst(self: Self) SliceConst {
|
||||
return self.items[0..self.len];
|
||||
}
|
||||
|
||||
@ -69,7 +72,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
|
||||
/// ArrayList takes ownership of the passed in slice. The slice must have been
|
||||
/// allocated with `allocator`.
|
||||
/// Deinitialize with `deinit` or use `toOwnedSlice`.
|
||||
pub fn fromOwnedSlice(allocator: *Allocator, slice: []align(A) T) Self {
|
||||
pub fn fromOwnedSlice(allocator: *Allocator, slice: Slice) Self {
|
||||
return Self{
|
||||
.items = slice,
|
||||
.len = slice.len,
|
||||
@ -78,7 +81,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
|
||||
}
|
||||
|
||||
/// The caller owns the returned memory. ArrayList becomes empty.
|
||||
pub fn toOwnedSlice(self: *Self) []align(A) T {
|
||||
pub fn toOwnedSlice(self: *Self) Slice {
|
||||
const allocator = self.allocator;
|
||||
const result = allocator.shrink(self.items, self.len);
|
||||
self.* = init(allocator);
|
||||
@ -93,7 +96,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
|
||||
self.items[n] = item;
|
||||
}
|
||||
|
||||
pub fn insertSlice(self: *Self, n: usize, items: []align(A) const T) !void {
|
||||
pub fn insertSlice(self: *Self, n: usize, items: SliceConst) !void {
|
||||
try self.ensureCapacity(self.len + items.len);
|
||||
self.len += items.len;
|
||||
|
||||
@ -141,7 +144,7 @@ pub fn AlignedArrayList(comptime T: type, comptime A: u29) type {
|
||||
return self.swapRemove(i);
|
||||
}
|
||||
|
||||
pub fn appendSlice(self: *Self, items: []align(A) const T) !void {
|
||||
pub fn appendSlice(self: *Self, items: SliceConst) !void {
|
||||
try self.ensureCapacity(self.len + items.len);
|
||||
mem.copy(T, self.items[self.len..], items);
|
||||
self.len += items.len;
|
||||
|
||||
@ -1,8 +1,10 @@
|
||||
// zig run benchmark.zig --release-fast --override-std-dir ..
|
||||
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("std");
|
||||
const std = @import("../std.zig");
|
||||
const time = std.time;
|
||||
const Timer = time.Timer;
|
||||
const crypto = @import("../crypto.zig");
|
||||
const crypto = std.crypto;
|
||||
|
||||
const KiB = 1024;
|
||||
const MiB = 1024 * KiB;
|
||||
@ -14,7 +16,7 @@ const Crypto = struct {
|
||||
name: []const u8,
|
||||
};
|
||||
|
||||
const hashes = []Crypto{
|
||||
const hashes = [_]Crypto{
|
||||
Crypto{ .ty = crypto.Md5, .name = "md5" },
|
||||
Crypto{ .ty = crypto.Sha1, .name = "sha1" },
|
||||
Crypto{ .ty = crypto.Sha256, .name = "sha256" },
|
||||
@ -45,7 +47,7 @@ pub fn benchmarkHash(comptime Hash: var, comptime bytes: comptime_int) !u64 {
|
||||
return throughput;
|
||||
}
|
||||
|
||||
const macs = []Crypto{
|
||||
const macs = [_]Crypto{
|
||||
Crypto{ .ty = crypto.Poly1305, .name = "poly1305" },
|
||||
Crypto{ .ty = crypto.HmacMd5, .name = "hmac-md5" },
|
||||
Crypto{ .ty = crypto.HmacSha1, .name = "hmac-sha1" },
|
||||
@ -75,7 +77,7 @@ pub fn benchmarkMac(comptime Mac: var, comptime bytes: comptime_int) !u64 {
|
||||
return throughput;
|
||||
}
|
||||
|
||||
const exchanges = []Crypto{Crypto{ .ty = crypto.X25519, .name = "x25519" }};
|
||||
const exchanges = [_]Crypto{Crypto{ .ty = crypto.X25519, .name = "x25519" }};
|
||||
|
||||
pub fn benchmarkKeyExchange(comptime DhKeyExchange: var, comptime exchange_count: comptime_int) !u64 {
|
||||
std.debug.assert(DhKeyExchange.minimum_key_length >= DhKeyExchange.secret_length);
|
||||
@ -135,13 +137,16 @@ pub fn main() !void {
|
||||
|
||||
var buffer: [1024]u8 = undefined;
|
||||
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
|
||||
const args = try std.os.argsAlloc(&fixed.allocator);
|
||||
const args = try std.process.argsAlloc(&fixed.allocator);
|
||||
|
||||
var filter: ?[]u8 = "";
|
||||
|
||||
var i: usize = 1;
|
||||
while (i < args.len) : (i += 1) {
|
||||
if (std.mem.eql(u8, args[i], "--seed")) {
|
||||
if (std.mem.eql(u8, args[i], "--mode")) {
|
||||
try stdout.print("{}\n", builtin.mode);
|
||||
return;
|
||||
} else if (std.mem.eql(u8, args[i], "--seed")) {
|
||||
i += 1;
|
||||
if (i == args.len) {
|
||||
usage();
|
||||
@ -269,8 +269,8 @@ pub const Blake2b512 = Blake2b(512);
|
||||
fn Blake2b(comptime out_len: usize) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
const block_length = 128;
|
||||
const digest_length = out_len / 8;
|
||||
pub const block_length = 128;
|
||||
pub const digest_length = out_len / 8;
|
||||
|
||||
const iv = [8]u64{
|
||||
0x6a09e667f3bcc908,
|
||||
|
||||
@ -420,8 +420,8 @@ pub const Sha512 = Sha2_64(Sha512Params);
|
||||
fn Sha2_64(comptime params: Sha2Params64) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
const block_length = 128;
|
||||
const digest_length = params.out_len / 8;
|
||||
pub const block_length = 128;
|
||||
pub const digest_length = params.out_len / 8;
|
||||
|
||||
s: [8]u64,
|
||||
// Streaming Cache
|
||||
|
||||
@ -719,6 +719,16 @@ pub const WatchEventId = enum {
|
||||
Delete,
|
||||
};
|
||||
|
||||
fn eqlString(a: []const u16, b: []const u16) bool {
|
||||
if (a.len != b.len) return false;
|
||||
if (a.ptr == b.ptr) return true;
|
||||
return mem.compare(u16, a, b) == .Equal;
|
||||
}
|
||||
|
||||
fn hashString(s: []const u16) u32 {
|
||||
return @truncate(u32, std.hash.Wyhash.hash(0, @sliceToBytes(s)));
|
||||
}
|
||||
|
||||
//pub const WatchEventError = error{
|
||||
// UserResourceLimitReached,
|
||||
// SystemResources,
|
||||
@ -736,7 +746,7 @@ pub const WatchEventId = enum {
|
||||
// file_table: FileTable,
|
||||
// table_lock: event.Lock,
|
||||
//
|
||||
// const FileTable = std.AutoHashMap([]const u8, *Put);
|
||||
// const FileTable = std.StringHashmap(*Put);
|
||||
// const Put = struct {
|
||||
// putter: anyframe,
|
||||
// value_ptr: *V,
|
||||
@ -755,8 +765,8 @@ pub const WatchEventId = enum {
|
||||
// all_putters: std.atomic.Queue(anyframe),
|
||||
// ref_count: std.atomic.Int(usize),
|
||||
//
|
||||
// const DirTable = std.AutoHashMap([]const u8, *Dir);
|
||||
// const FileTable = std.AutoHashMap([]const u16, V);
|
||||
// const DirTable = std.StringHashMap(*Dir);
|
||||
// const FileTable = std.HashMap([]const u16, V, hashString, eqlString);
|
||||
//
|
||||
// const Dir = struct {
|
||||
// putter: anyframe,
|
||||
@ -772,7 +782,7 @@ pub const WatchEventId = enum {
|
||||
// table_lock: event.Lock,
|
||||
//
|
||||
// const WdTable = std.AutoHashMap(i32, Dir);
|
||||
// const FileTable = std.AutoHashMap([]const u8, V);
|
||||
// const FileTable = std.StringHashMap(V);
|
||||
//
|
||||
// const Dir = struct {
|
||||
// dirname: []const u8,
|
||||
@ -780,7 +790,7 @@ pub const WatchEventId = enum {
|
||||
// };
|
||||
// };
|
||||
//
|
||||
// const FileToHandle = std.AutoHashMap([]const u8, anyframe);
|
||||
// const FileToHandle = std.StringHashMap(anyframe);
|
||||
//
|
||||
// const Self = @This();
|
||||
//
|
||||
|
||||
@ -1,11 +1,79 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
const meta = std.meta;
|
||||
|
||||
/// Describes how pointer types should be hashed.
|
||||
pub const HashStrategy = enum {
|
||||
/// Do not follow pointers, only hash their value.
|
||||
Shallow,
|
||||
|
||||
/// Follow pointers, hash the pointee content.
|
||||
/// Only dereferences one level, ie. it is changed into .Shallow when a
|
||||
/// pointer type is encountered.
|
||||
Deep,
|
||||
|
||||
/// Follow pointers, hash the pointee content.
|
||||
/// Dereferences all pointers encountered.
|
||||
/// Assumes no cycle.
|
||||
DeepRecursive,
|
||||
};
|
||||
|
||||
/// Helper function to hash a pointer and mutate the strategy if needed.
|
||||
pub fn hashPointer(hasher: var, key: var, comptime strat: HashStrategy) void {
|
||||
const info = @typeInfo(@typeOf(key));
|
||||
|
||||
switch (info.Pointer.size) {
|
||||
builtin.TypeInfo.Pointer.Size.One => switch (strat) {
|
||||
.Shallow => hash(hasher, @ptrToInt(key), .Shallow),
|
||||
.Deep => hash(hasher, key.*, .Shallow),
|
||||
.DeepRecursive => hash(hasher, key.*, .DeepRecursive),
|
||||
},
|
||||
|
||||
builtin.TypeInfo.Pointer.Size.Slice => switch (strat) {
|
||||
.Shallow => {
|
||||
hashPointer(hasher, key.ptr, .Shallow);
|
||||
hash(hasher, key.len, .Shallow);
|
||||
},
|
||||
.Deep => hashArray(hasher, key, .Shallow),
|
||||
.DeepRecursive => hashArray(hasher, key, .DeepRecursive),
|
||||
},
|
||||
|
||||
builtin.TypeInfo.Pointer.Size.Many,
|
||||
builtin.TypeInfo.Pointer.Size.C,
|
||||
=> switch (strat) {
|
||||
.Shallow => hash(hasher, @ptrToInt(key), .Shallow),
|
||||
else => @compileError(
|
||||
\\ unknown-length pointers and C pointers cannot be hashed deeply.
|
||||
\\ Consider providing your own hash function.
|
||||
),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper function to hash a set of contiguous objects, from an array or slice.
|
||||
pub fn hashArray(hasher: var, key: var, comptime strat: HashStrategy) void {
|
||||
switch (strat) {
|
||||
.Shallow => {
|
||||
// TODO detect via a trait when Key has no padding bits to
|
||||
// hash it as an array of bytes.
|
||||
// Otherwise, hash every element.
|
||||
for (key) |element| {
|
||||
hash(hasher, element, .Shallow);
|
||||
}
|
||||
},
|
||||
else => {
|
||||
for (key) |element| {
|
||||
hash(hasher, element, strat);
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Provides generic hashing for any eligible type.
|
||||
/// Only hashes `key` itself, pointers are not followed.
|
||||
pub fn autoHash(hasher: var, key: var) void {
|
||||
/// Strategy is provided to determine if pointers should be followed or not.
|
||||
pub fn hash(hasher: var, key: var, comptime strat: HashStrategy) void {
|
||||
const Key = @typeOf(key);
|
||||
switch (@typeInfo(Key)) {
|
||||
.NoReturn,
|
||||
@ -26,35 +94,18 @@ pub fn autoHash(hasher: var, key: var) void {
|
||||
// TODO Check if the situation is better after #561 is resolved.
|
||||
.Int => @inlineCall(hasher.update, std.mem.asBytes(&key)),
|
||||
|
||||
.Float => |info| autoHash(hasher, @bitCast(@IntType(false, info.bits), key)),
|
||||
.Float => |info| hash(hasher, @bitCast(@IntType(false, info.bits), key), strat),
|
||||
|
||||
.Bool => autoHash(hasher, @boolToInt(key)),
|
||||
.Enum => autoHash(hasher, @enumToInt(key)),
|
||||
.ErrorSet => autoHash(hasher, @errorToInt(key)),
|
||||
.AnyFrame, .Fn => autoHash(hasher, @ptrToInt(key)),
|
||||
.Bool => hash(hasher, @boolToInt(key), strat),
|
||||
.Enum => hash(hasher, @enumToInt(key), strat),
|
||||
.ErrorSet => hash(hasher, @errorToInt(key), strat),
|
||||
.AnyFrame, .Fn => hash(hasher, @ptrToInt(key), strat),
|
||||
|
||||
.Pointer => |info| switch (info.size) {
|
||||
builtin.TypeInfo.Pointer.Size.One,
|
||||
builtin.TypeInfo.Pointer.Size.Many,
|
||||
builtin.TypeInfo.Pointer.Size.C,
|
||||
=> autoHash(hasher, @ptrToInt(key)),
|
||||
.Pointer => @inlineCall(hashPointer, hasher, key, strat),
|
||||
|
||||
builtin.TypeInfo.Pointer.Size.Slice => {
|
||||
autoHash(hasher, key.ptr);
|
||||
autoHash(hasher, key.len);
|
||||
},
|
||||
},
|
||||
.Optional => if (key) |k| hash(hasher, k, strat),
|
||||
|
||||
.Optional => if (key) |k| autoHash(hasher, k),
|
||||
|
||||
.Array => {
|
||||
// TODO detect via a trait when Key has no padding bits to
|
||||
// hash it as an array of bytes.
|
||||
// Otherwise, hash every element.
|
||||
for (key) |element| {
|
||||
autoHash(hasher, element);
|
||||
}
|
||||
},
|
||||
.Array => hashArray(hasher, key, strat),
|
||||
|
||||
.Vector => |info| {
|
||||
if (info.child.bit_count % 8 == 0) {
|
||||
@ -67,7 +118,7 @@ pub fn autoHash(hasher: var, key: var) void {
|
||||
const array: [info.len]info.child = key;
|
||||
comptime var i: u32 = 0;
|
||||
inline while (i < info.len) : (i += 1) {
|
||||
autoHash(hasher, array[i]);
|
||||
hash(hasher, array[i], strat);
|
||||
}
|
||||
}
|
||||
},
|
||||
@ -79,19 +130,19 @@ pub fn autoHash(hasher: var, key: var) void {
|
||||
inline for (info.fields) |field| {
|
||||
// We reuse the hash of the previous field as the seed for the
|
||||
// next one so that they're dependant.
|
||||
autoHash(hasher, @field(key, field.name));
|
||||
hash(hasher, @field(key, field.name), strat);
|
||||
}
|
||||
},
|
||||
|
||||
.Union => |info| blk: {
|
||||
if (info.tag_type) |tag_type| {
|
||||
const tag = meta.activeTag(key);
|
||||
const s = autoHash(hasher, tag);
|
||||
const s = hash(hasher, tag, strat);
|
||||
inline for (info.fields) |field| {
|
||||
const enum_field = field.enum_field.?;
|
||||
if (enum_field.value == @enumToInt(tag)) {
|
||||
autoHash(hasher, @field(key, enum_field.name));
|
||||
// TODO use a labelled break when it does not crash the compiler.
|
||||
hash(hasher, @field(key, enum_field.name), strat);
|
||||
// TODO use a labelled break when it does not crash the compiler. cf #2908
|
||||
// break :blk;
|
||||
return;
|
||||
}
|
||||
@ -102,25 +153,86 @@ pub fn autoHash(hasher: var, key: var) void {
|
||||
|
||||
.ErrorUnion => blk: {
|
||||
const payload = key catch |err| {
|
||||
autoHash(hasher, err);
|
||||
hash(hasher, err, strat);
|
||||
break :blk;
|
||||
};
|
||||
autoHash(hasher, payload);
|
||||
hash(hasher, payload, strat);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Provides generic hashing for any eligible type.
|
||||
/// Only hashes `key` itself, pointers are not followed.
|
||||
/// Slices are rejected to avoid ambiguity on the user's intention.
|
||||
pub fn autoHash(hasher: var, key: var) void {
|
||||
const Key = @typeOf(key);
|
||||
if (comptime meta.trait.isSlice(Key)) {
|
||||
comptime assert(@hasDecl(std, "StringHashMap")); // detect when the following message needs updated
|
||||
const extra_help = if (Key == []const u8)
|
||||
" Consider std.StringHashMap for hashing the contents of []const u8."
|
||||
else
|
||||
"";
|
||||
|
||||
@compileError("std.auto_hash.autoHash does not allow slices (here " ++ @typeName(Key) ++
|
||||
") because the intent is unclear. Consider using std.auto_hash.hash or providing your own hash function instead." ++
|
||||
extra_help);
|
||||
}
|
||||
|
||||
hash(hasher, key, .Shallow);
|
||||
}
|
||||
|
||||
const testing = std.testing;
|
||||
const Wyhash = std.hash.Wyhash;
|
||||
|
||||
fn testAutoHash(key: var) u64 {
|
||||
fn testHash(key: var) u64 {
|
||||
// Any hash could be used here, for testing autoHash.
|
||||
var hasher = Wyhash.init(0);
|
||||
autoHash(&hasher, key);
|
||||
hash(&hasher, key, .Shallow);
|
||||
return hasher.final();
|
||||
}
|
||||
|
||||
test "autoHash slice" {
|
||||
fn testHashShallow(key: var) u64 {
|
||||
// Any hash could be used here, for testing autoHash.
|
||||
var hasher = Wyhash.init(0);
|
||||
hash(&hasher, key, .Shallow);
|
||||
return hasher.final();
|
||||
}
|
||||
|
||||
fn testHashDeep(key: var) u64 {
|
||||
// Any hash could be used here, for testing autoHash.
|
||||
var hasher = Wyhash.init(0);
|
||||
hash(&hasher, key, .Deep);
|
||||
return hasher.final();
|
||||
}
|
||||
|
||||
fn testHashDeepRecursive(key: var) u64 {
|
||||
// Any hash could be used here, for testing autoHash.
|
||||
var hasher = Wyhash.init(0);
|
||||
hash(&hasher, key, .DeepRecursive);
|
||||
return hasher.final();
|
||||
}
|
||||
|
||||
test "hash pointer" {
|
||||
const array = [_]u32{ 123, 123, 123 };
|
||||
const a = &array[0];
|
||||
const b = &array[1];
|
||||
const c = &array[2];
|
||||
const d = a;
|
||||
|
||||
testing.expect(testHashShallow(a) == testHashShallow(d));
|
||||
testing.expect(testHashShallow(a) != testHashShallow(c));
|
||||
testing.expect(testHashShallow(a) != testHashShallow(b));
|
||||
|
||||
testing.expect(testHashDeep(a) == testHashDeep(a));
|
||||
testing.expect(testHashDeep(a) == testHashDeep(c));
|
||||
testing.expect(testHashDeep(a) == testHashDeep(b));
|
||||
|
||||
testing.expect(testHashDeepRecursive(a) == testHashDeepRecursive(a));
|
||||
testing.expect(testHashDeepRecursive(a) == testHashDeepRecursive(c));
|
||||
testing.expect(testHashDeepRecursive(a) == testHashDeepRecursive(b));
|
||||
}
|
||||
|
||||
test "hash slice shallow" {
|
||||
// Allocate one array dynamically so that we're assured it is not merged
|
||||
// with the other by the optimization passes.
|
||||
const array1 = try std.heap.direct_allocator.create([6]u32);
|
||||
@ -130,23 +242,78 @@ test "autoHash slice" {
|
||||
const a = array1[0..];
|
||||
const b = array2[0..];
|
||||
const c = array1[0..3];
|
||||
testing.expect(testAutoHash(a) == testAutoHash(a));
|
||||
testing.expect(testAutoHash(a) != testAutoHash(array1));
|
||||
testing.expect(testAutoHash(a) != testAutoHash(b));
|
||||
testing.expect(testAutoHash(a) != testAutoHash(c));
|
||||
testing.expect(testHashShallow(a) == testHashShallow(a));
|
||||
testing.expect(testHashShallow(a) != testHashShallow(array1));
|
||||
testing.expect(testHashShallow(a) != testHashShallow(b));
|
||||
testing.expect(testHashShallow(a) != testHashShallow(c));
|
||||
}
|
||||
|
||||
test "testAutoHash optional" {
|
||||
test "hash slice deep" {
|
||||
// Allocate one array dynamically so that we're assured it is not merged
|
||||
// with the other by the optimization passes.
|
||||
const array1 = try std.heap.direct_allocator.create([6]u32);
|
||||
defer std.heap.direct_allocator.destroy(array1);
|
||||
array1.* = [_]u32{ 1, 2, 3, 4, 5, 6 };
|
||||
const array2 = [_]u32{ 1, 2, 3, 4, 5, 6 };
|
||||
const a = array1[0..];
|
||||
const b = array2[0..];
|
||||
const c = array1[0..3];
|
||||
testing.expect(testHashDeep(a) == testHashDeep(a));
|
||||
testing.expect(testHashDeep(a) == testHashDeep(array1));
|
||||
testing.expect(testHashDeep(a) == testHashDeep(b));
|
||||
testing.expect(testHashDeep(a) != testHashDeep(c));
|
||||
}
|
||||
|
||||
test "hash struct deep" {
|
||||
const Foo = struct {
|
||||
a: u32,
|
||||
b: f64,
|
||||
c: *bool,
|
||||
|
||||
const Self = @This();
|
||||
|
||||
pub fn init(allocator: *mem.Allocator, a_: u32, b_: f64, c_: bool) !Self {
|
||||
const ptr = try allocator.create(bool);
|
||||
ptr.* = c_;
|
||||
return Self{ .a = a_, .b = b_, .c = ptr };
|
||||
}
|
||||
};
|
||||
|
||||
const allocator = std.heap.direct_allocator;
|
||||
const foo = try Foo.init(allocator, 123, 1.0, true);
|
||||
const bar = try Foo.init(allocator, 123, 1.0, true);
|
||||
const baz = try Foo.init(allocator, 123, 1.0, false);
|
||||
defer allocator.destroy(foo.c);
|
||||
defer allocator.destroy(bar.c);
|
||||
defer allocator.destroy(baz.c);
|
||||
|
||||
testing.expect(testHashDeep(foo) == testHashDeep(bar));
|
||||
testing.expect(testHashDeep(foo) != testHashDeep(baz));
|
||||
testing.expect(testHashDeep(bar) != testHashDeep(baz));
|
||||
|
||||
var hasher = Wyhash.init(0);
|
||||
const h = testHashDeep(foo);
|
||||
autoHash(&hasher, foo.a);
|
||||
autoHash(&hasher, foo.b);
|
||||
autoHash(&hasher, foo.c.*);
|
||||
testing.expectEqual(h, hasher.final());
|
||||
|
||||
const h2 = testHashDeepRecursive(&foo);
|
||||
testing.expect(h2 != testHashDeep(&foo));
|
||||
testing.expect(h2 == testHashDeep(foo));
|
||||
}
|
||||
|
||||
test "testHash optional" {
|
||||
const a: ?u32 = 123;
|
||||
const b: ?u32 = null;
|
||||
testing.expectEqual(testAutoHash(a), testAutoHash(u32(123)));
|
||||
testing.expect(testAutoHash(a) != testAutoHash(b));
|
||||
testing.expectEqual(testAutoHash(b), 0);
|
||||
testing.expectEqual(testHash(a), testHash(u32(123)));
|
||||
testing.expect(testHash(a) != testHash(b));
|
||||
testing.expectEqual(testHash(b), 0);
|
||||
}
|
||||
|
||||
test "testAutoHash array" {
|
||||
test "testHash array" {
|
||||
const a = [_]u32{ 1, 2, 3 };
|
||||
const h = testAutoHash(a);
|
||||
const h = testHash(a);
|
||||
var hasher = Wyhash.init(0);
|
||||
autoHash(&hasher, u32(1));
|
||||
autoHash(&hasher, u32(2));
|
||||
@ -154,14 +321,14 @@ test "testAutoHash array" {
|
||||
testing.expectEqual(h, hasher.final());
|
||||
}
|
||||
|
||||
test "testAutoHash struct" {
|
||||
test "testHash struct" {
|
||||
const Foo = struct {
|
||||
a: u32 = 1,
|
||||
b: u32 = 2,
|
||||
c: u32 = 3,
|
||||
};
|
||||
const f = Foo{};
|
||||
const h = testAutoHash(f);
|
||||
const h = testHash(f);
|
||||
var hasher = Wyhash.init(0);
|
||||
autoHash(&hasher, u32(1));
|
||||
autoHash(&hasher, u32(2));
|
||||
@ -169,7 +336,7 @@ test "testAutoHash struct" {
|
||||
testing.expectEqual(h, hasher.final());
|
||||
}
|
||||
|
||||
test "testAutoHash union" {
|
||||
test "testHash union" {
|
||||
const Foo = union(enum) {
|
||||
A: u32,
|
||||
B: f32,
|
||||
@ -179,24 +346,24 @@ test "testAutoHash union" {
|
||||
const a = Foo{ .A = 18 };
|
||||
var b = Foo{ .B = 12.34 };
|
||||
const c = Foo{ .C = 18 };
|
||||
testing.expect(testAutoHash(a) == testAutoHash(a));
|
||||
testing.expect(testAutoHash(a) != testAutoHash(b));
|
||||
testing.expect(testAutoHash(a) != testAutoHash(c));
|
||||
testing.expect(testHash(a) == testHash(a));
|
||||
testing.expect(testHash(a) != testHash(b));
|
||||
testing.expect(testHash(a) != testHash(c));
|
||||
|
||||
b = Foo{ .A = 18 };
|
||||
testing.expect(testAutoHash(a) == testAutoHash(b));
|
||||
testing.expect(testHash(a) == testHash(b));
|
||||
}
|
||||
|
||||
test "testAutoHash vector" {
|
||||
test "testHash vector" {
|
||||
const a: @Vector(4, u32) = [_]u32{ 1, 2, 3, 4 };
|
||||
const b: @Vector(4, u32) = [_]u32{ 1, 2, 3, 5 };
|
||||
const c: @Vector(4, u31) = [_]u31{ 1, 2, 3, 4 };
|
||||
testing.expect(testAutoHash(a) == testAutoHash(a));
|
||||
testing.expect(testAutoHash(a) != testAutoHash(b));
|
||||
testing.expect(testAutoHash(a) != testAutoHash(c));
|
||||
testing.expect(testHash(a) == testHash(a));
|
||||
testing.expect(testHash(a) != testHash(b));
|
||||
testing.expect(testHash(a) != testHash(c));
|
||||
}
|
||||
|
||||
test "testAutoHash error union" {
|
||||
test "testHash error union" {
|
||||
const Errors = error{Test};
|
||||
const Foo = struct {
|
||||
a: u32 = 1,
|
||||
@ -205,7 +372,7 @@ test "testAutoHash error union" {
|
||||
};
|
||||
const f = Foo{};
|
||||
const g: Errors!Foo = Errors.Test;
|
||||
testing.expect(testAutoHash(f) != testAutoHash(g));
|
||||
testing.expect(testAutoHash(f) == testAutoHash(Foo{}));
|
||||
testing.expect(testAutoHash(g) == testAutoHash(Errors.Test));
|
||||
testing.expect(testHash(f) != testHash(g));
|
||||
testing.expect(testHash(f) == testHash(Foo{}));
|
||||
testing.expect(testHash(g) == testHash(Errors.Test));
|
||||
}
|
||||
|
||||
273
std/hash/benchmark.zig
Normal file
273
std/hash/benchmark.zig
Normal file
@ -0,0 +1,273 @@
|
||||
// zig run benchmark.zig --release-fast --override-std-dir ..
|
||||
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("std");
|
||||
const time = std.time;
|
||||
const Timer = time.Timer;
|
||||
const hash = std.hash;
|
||||
|
||||
const KiB = 1024;
|
||||
const MiB = 1024 * KiB;
|
||||
const GiB = 1024 * MiB;
|
||||
|
||||
var prng = std.rand.DefaultPrng.init(0);
|
||||
|
||||
const Hash = struct {
|
||||
ty: type,
|
||||
name: []const u8,
|
||||
has_iterative_api: bool = true,
|
||||
init_u8s: ?[]const u8 = null,
|
||||
init_u64: ?u64 = null,
|
||||
};
|
||||
|
||||
const siphash_key = "0123456789abcdef";
|
||||
|
||||
const hashes = [_]Hash{
|
||||
Hash{
|
||||
.ty = hash.Wyhash,
|
||||
.name = "wyhash",
|
||||
.init_u64 = 0,
|
||||
},
|
||||
Hash{
|
||||
.ty = hash.SipHash64(1, 3),
|
||||
.name = "siphash(1,3)",
|
||||
.init_u8s = siphash_key,
|
||||
},
|
||||
Hash{
|
||||
.ty = hash.SipHash64(2, 4),
|
||||
.name = "siphash(2,4)",
|
||||
.init_u8s = siphash_key,
|
||||
},
|
||||
Hash{
|
||||
.ty = hash.Fnv1a_64,
|
||||
.name = "fnv1a",
|
||||
},
|
||||
Hash{
|
||||
.ty = hash.Adler32,
|
||||
.name = "adler32",
|
||||
},
|
||||
Hash{
|
||||
.ty = hash.crc.Crc32WithPoly(hash.crc.Polynomial.IEEE),
|
||||
.name = "crc32-slicing-by-8",
|
||||
},
|
||||
Hash{
|
||||
.ty = hash.crc.Crc32SmallWithPoly(hash.crc.Polynomial.IEEE),
|
||||
.name = "crc32-half-byte-lookup",
|
||||
},
|
||||
Hash{
|
||||
.ty = hash.CityHash32,
|
||||
.name = "cityhash-32",
|
||||
.has_iterative_api = false,
|
||||
},
|
||||
Hash{
|
||||
.ty = hash.CityHash64,
|
||||
.name = "cityhash-64",
|
||||
.has_iterative_api = false,
|
||||
},
|
||||
Hash{
|
||||
.ty = hash.Murmur2_32,
|
||||
.name = "murmur2-32",
|
||||
.has_iterative_api = false,
|
||||
},
|
||||
Hash{
|
||||
.ty = hash.Murmur2_64,
|
||||
.name = "murmur2-64",
|
||||
.has_iterative_api = false,
|
||||
},
|
||||
Hash{
|
||||
.ty = hash.Murmur3_32,
|
||||
.name = "murmur3-32",
|
||||
.has_iterative_api = false,
|
||||
},
|
||||
};
|
||||
|
||||
const Result = struct {
|
||||
hash: u64,
|
||||
throughput: u64,
|
||||
};
|
||||
|
||||
const block_size: usize = 8 * 8192;
|
||||
|
||||
pub fn benchmarkHash(comptime H: var, bytes: usize) !Result {
|
||||
var h = blk: {
|
||||
if (H.init_u8s) |init| {
|
||||
break :blk H.ty.init(init);
|
||||
}
|
||||
if (H.init_u64) |init| {
|
||||
break :blk H.ty.init(init);
|
||||
}
|
||||
break :blk H.ty.init();
|
||||
};
|
||||
|
||||
var block: [block_size]u8 = undefined;
|
||||
prng.random.bytes(block[0..]);
|
||||
|
||||
var offset: usize = 0;
|
||||
var timer = try Timer.start();
|
||||
const start = timer.lap();
|
||||
while (offset < bytes) : (offset += block.len) {
|
||||
h.update(block[0..]);
|
||||
}
|
||||
const end = timer.read();
|
||||
|
||||
const elapsed_s = @intToFloat(f64, end - start) / time.ns_per_s;
|
||||
const throughput = @floatToInt(u64, @intToFloat(f64, bytes) / elapsed_s);
|
||||
|
||||
return Result{
|
||||
.hash = h.final(),
|
||||
.throughput = throughput,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn benchmarkHashSmallKeys(comptime H: var, key_size: usize, bytes: usize) !Result {
|
||||
const key_count = bytes / key_size;
|
||||
var block: [block_size]u8 = undefined;
|
||||
prng.random.bytes(block[0..]);
|
||||
|
||||
var i: usize = 0;
|
||||
var timer = try Timer.start();
|
||||
const start = timer.lap();
|
||||
|
||||
var sum: u64 = 0;
|
||||
while (i < key_count) : (i += 1) {
|
||||
const small_key = block[0..key_size];
|
||||
sum +%= blk: {
|
||||
if (H.init_u8s) |init| {
|
||||
break :blk H.ty.hash(init, small_key);
|
||||
}
|
||||
if (H.init_u64) |init| {
|
||||
break :blk H.ty.hash(init, small_key);
|
||||
}
|
||||
break :blk H.ty.hash(small_key);
|
||||
};
|
||||
}
|
||||
const end = timer.read();
|
||||
|
||||
const elapsed_s = @intToFloat(f64, end - start) / time.ns_per_s;
|
||||
const throughput = @floatToInt(u64, @intToFloat(f64, bytes) / elapsed_s);
|
||||
|
||||
return Result{
|
||||
.hash = sum,
|
||||
.throughput = throughput,
|
||||
};
|
||||
}
|
||||
|
||||
fn usage() void {
|
||||
std.debug.warn(
|
||||
\\throughput_test [options]
|
||||
\\
|
||||
\\Options:
|
||||
\\ --filter [test-name]
|
||||
\\ --seed [int]
|
||||
\\ --count [int]
|
||||
\\ --key-size [int]
|
||||
\\ --iterative-only
|
||||
\\ --help
|
||||
\\
|
||||
);
|
||||
}
|
||||
|
||||
fn mode(comptime x: comptime_int) comptime_int {
|
||||
return if (builtin.mode == builtin.Mode.Debug) x / 64 else x;
|
||||
}
|
||||
|
||||
// TODO(#1358): Replace with builtin formatted padding when available.
|
||||
fn printPad(stdout: var, s: []const u8) !void {
|
||||
var i: usize = 0;
|
||||
while (i < 12 - s.len) : (i += 1) {
|
||||
try stdout.print(" ");
|
||||
}
|
||||
try stdout.print("{}", s);
|
||||
}
|
||||
|
||||
pub fn main() !void {
|
||||
var stdout_file = try std.io.getStdOut();
|
||||
var stdout_out_stream = stdout_file.outStream();
|
||||
const stdout = &stdout_out_stream.stream;
|
||||
|
||||
var buffer: [1024]u8 = undefined;
|
||||
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
|
||||
const args = try std.process.argsAlloc(&fixed.allocator);
|
||||
|
||||
var filter: ?[]u8 = "";
|
||||
var count: usize = mode(128 * MiB);
|
||||
var key_size: usize = 32;
|
||||
var seed: u32 = 0;
|
||||
var test_iterative_only = false;
|
||||
|
||||
var i: usize = 1;
|
||||
while (i < args.len) : (i += 1) {
|
||||
if (std.mem.eql(u8, args[i], "--mode")) {
|
||||
try stdout.print("{}\n", builtin.mode);
|
||||
return;
|
||||
} else if (std.mem.eql(u8, args[i], "--seed")) {
|
||||
i += 1;
|
||||
if (i == args.len) {
|
||||
usage();
|
||||
std.os.exit(1);
|
||||
}
|
||||
|
||||
seed = try std.fmt.parseUnsigned(u32, args[i], 10);
|
||||
// we seed later
|
||||
} else if (std.mem.eql(u8, args[i], "--filter")) {
|
||||
i += 1;
|
||||
if (i == args.len) {
|
||||
usage();
|
||||
std.os.exit(1);
|
||||
}
|
||||
|
||||
filter = args[i];
|
||||
} else if (std.mem.eql(u8, args[i], "--count")) {
|
||||
i += 1;
|
||||
if (i == args.len) {
|
||||
usage();
|
||||
std.os.exit(1);
|
||||
}
|
||||
|
||||
const c = try std.fmt.parseUnsigned(usize, args[i], 10);
|
||||
count = c * MiB;
|
||||
} else if (std.mem.eql(u8, args[i], "--key-size")) {
|
||||
i += 1;
|
||||
if (i == args.len) {
|
||||
usage();
|
||||
std.os.exit(1);
|
||||
}
|
||||
|
||||
key_size = try std.fmt.parseUnsigned(usize, args[i], 10);
|
||||
if (key_size > block_size) {
|
||||
try stdout.print("key_size cannot exceed block size of {}\n", block_size);
|
||||
std.os.exit(1);
|
||||
}
|
||||
} else if (std.mem.eql(u8, args[i], "--iterative-only")) {
|
||||
test_iterative_only = true;
|
||||
} else if (std.mem.eql(u8, args[i], "--help")) {
|
||||
usage();
|
||||
return;
|
||||
} else {
|
||||
usage();
|
||||
std.os.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
inline for (hashes) |H| {
|
||||
if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) {
|
||||
if (!test_iterative_only or H.has_iterative_api) {
|
||||
try stdout.print("{}\n", H.name);
|
||||
|
||||
// Always reseed prior to every call so we are hashing the same buffer contents.
|
||||
// This allows easier comparison between different implementations.
|
||||
if (H.has_iterative_api) {
|
||||
prng.seed(seed);
|
||||
const result = try benchmarkHash(H, count);
|
||||
try stdout.print(" iterative: {:4} MiB/s [{x:0<16}]\n", result.throughput / (1 * MiB), result.hash);
|
||||
}
|
||||
|
||||
if (!test_iterative_only) {
|
||||
prng.seed(seed);
|
||||
const result_small = try benchmarkHashSmallKeys(H, key_size, count);
|
||||
try stdout.print(" small keys: {:4} MiB/s [{x:0<16}]\n", result_small.throughput / (1 * MiB), result_small.hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -10,9 +10,9 @@ const debug = std.debug;
|
||||
const testing = std.testing;
|
||||
|
||||
pub const Polynomial = struct {
|
||||
const IEEE = 0xedb88320;
|
||||
const Castagnoli = 0x82f63b78;
|
||||
const Koopman = 0xeb31d82e;
|
||||
pub const IEEE = 0xedb88320;
|
||||
pub const Castagnoli = 0x82f63b78;
|
||||
pub const Koopman = 0xeb31d82e;
|
||||
};
|
||||
|
||||
// IEEE is by far the most common CRC and so is aliased by default.
|
||||
|
||||
@ -21,7 +21,7 @@ pub fn SipHash128(comptime c_rounds: usize, comptime d_rounds: usize) type {
|
||||
return SipHash(u128, c_rounds, d_rounds);
|
||||
}
|
||||
|
||||
fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize) type {
|
||||
fn SipHashStateless(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize) type {
|
||||
assert(T == u64 or T == u128);
|
||||
assert(c_rounds > 0 and d_rounds > 0);
|
||||
|
||||
@ -34,10 +34,6 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
|
||||
v1: u64,
|
||||
v2: u64,
|
||||
v3: u64,
|
||||
|
||||
// streaming cache
|
||||
buf: [8]u8,
|
||||
buf_len: usize,
|
||||
msg_len: u8,
|
||||
|
||||
pub fn init(key: []const u8) Self {
|
||||
@ -51,9 +47,6 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
|
||||
.v1 = k1 ^ 0x646f72616e646f6d,
|
||||
.v2 = k0 ^ 0x6c7967656e657261,
|
||||
.v3 = k1 ^ 0x7465646279746573,
|
||||
|
||||
.buf = undefined,
|
||||
.buf_len = 0,
|
||||
.msg_len = 0,
|
||||
};
|
||||
|
||||
@ -64,73 +57,66 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
|
||||
return d;
|
||||
}
|
||||
|
||||
pub fn update(d: *Self, b: []const u8) void {
|
||||
pub fn update(self: *Self, b: []const u8) void {
|
||||
std.debug.assert(b.len % 8 == 0);
|
||||
|
||||
var off: usize = 0;
|
||||
|
||||
// Partial from previous.
|
||||
if (d.buf_len != 0 and d.buf_len + b.len > 8) {
|
||||
off += 8 - d.buf_len;
|
||||
mem.copy(u8, d.buf[d.buf_len..], b[0..off]);
|
||||
d.round(d.buf[0..]);
|
||||
d.buf_len = 0;
|
||||
while (off < b.len) : (off += 8) {
|
||||
@inlineCall(self.round, b[off .. off + 8]);
|
||||
}
|
||||
|
||||
// Full middle blocks.
|
||||
while (off + 8 <= b.len) : (off += 8) {
|
||||
d.round(b[off .. off + 8]);
|
||||
}
|
||||
|
||||
// Remainder for next pass.
|
||||
mem.copy(u8, d.buf[d.buf_len..], b[off..]);
|
||||
d.buf_len += @intCast(u8, b[off..].len);
|
||||
d.msg_len +%= @truncate(u8, b.len);
|
||||
self.msg_len +%= @truncate(u8, b.len);
|
||||
}
|
||||
|
||||
pub fn final(d: *Self) T {
|
||||
// Padding
|
||||
mem.set(u8, d.buf[d.buf_len..], 0);
|
||||
d.buf[7] = d.msg_len;
|
||||
d.round(d.buf[0..]);
|
||||
pub fn final(self: *Self, b: []const u8) T {
|
||||
std.debug.assert(b.len < 8);
|
||||
|
||||
self.msg_len +%= @truncate(u8, b.len);
|
||||
|
||||
var buf = [_]u8{0} ** 8;
|
||||
mem.copy(u8, buf[0..], b[0..]);
|
||||
buf[7] = self.msg_len;
|
||||
self.round(buf[0..]);
|
||||
|
||||
if (T == u128) {
|
||||
d.v2 ^= 0xee;
|
||||
self.v2 ^= 0xee;
|
||||
} else {
|
||||
d.v2 ^= 0xff;
|
||||
self.v2 ^= 0xff;
|
||||
}
|
||||
|
||||
comptime var i: usize = 0;
|
||||
inline while (i < d_rounds) : (i += 1) {
|
||||
@inlineCall(sipRound, d);
|
||||
@inlineCall(sipRound, self);
|
||||
}
|
||||
|
||||
const b1 = d.v0 ^ d.v1 ^ d.v2 ^ d.v3;
|
||||
const b1 = self.v0 ^ self.v1 ^ self.v2 ^ self.v3;
|
||||
if (T == u64) {
|
||||
return b1;
|
||||
}
|
||||
|
||||
d.v1 ^= 0xdd;
|
||||
self.v1 ^= 0xdd;
|
||||
|
||||
comptime var j: usize = 0;
|
||||
inline while (j < d_rounds) : (j += 1) {
|
||||
@inlineCall(sipRound, d);
|
||||
@inlineCall(sipRound, self);
|
||||
}
|
||||
|
||||
const b2 = d.v0 ^ d.v1 ^ d.v2 ^ d.v3;
|
||||
const b2 = self.v0 ^ self.v1 ^ self.v2 ^ self.v3;
|
||||
return (u128(b2) << 64) | b1;
|
||||
}
|
||||
|
||||
fn round(d: *Self, b: []const u8) void {
|
||||
fn round(self: *Self, b: []const u8) void {
|
||||
assert(b.len == 8);
|
||||
|
||||
const m = mem.readIntSliceLittle(u64, b[0..]);
|
||||
d.v3 ^= m;
|
||||
self.v3 ^= m;
|
||||
|
||||
comptime var i: usize = 0;
|
||||
inline while (i < c_rounds) : (i += 1) {
|
||||
@inlineCall(sipRound, d);
|
||||
@inlineCall(sipRound, self);
|
||||
}
|
||||
|
||||
d.v0 ^= m;
|
||||
self.v0 ^= m;
|
||||
}
|
||||
|
||||
fn sipRound(d: *Self) void {
|
||||
@ -151,9 +137,61 @@ fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize)
|
||||
}
|
||||
|
||||
pub fn hash(key: []const u8, input: []const u8) T {
|
||||
const aligned_len = input.len - (input.len % 8);
|
||||
|
||||
var c = Self.init(key);
|
||||
c.update(input);
|
||||
return c.final();
|
||||
@inlineCall(c.update, input[0..aligned_len]);
|
||||
return @inlineCall(c.final, input[aligned_len..]);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
pub fn SipHash(comptime T: type, comptime c_rounds: usize, comptime d_rounds: usize) type {
|
||||
assert(T == u64 or T == u128);
|
||||
assert(c_rounds > 0 and d_rounds > 0);
|
||||
|
||||
return struct {
|
||||
const State = SipHashStateless(T, c_rounds, d_rounds);
|
||||
const Self = @This();
|
||||
const digest_size = 64;
|
||||
const block_size = 64;
|
||||
|
||||
state: State,
|
||||
buf: [8]u8,
|
||||
buf_len: usize,
|
||||
|
||||
pub fn init(key: []const u8) Self {
|
||||
return Self{
|
||||
.state = State.init(key),
|
||||
.buf = undefined,
|
||||
.buf_len = 0,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn update(self: *Self, b: []const u8) void {
|
||||
var off: usize = 0;
|
||||
|
||||
if (self.buf_len != 0 and self.buf_len + b.len >= 8) {
|
||||
off += 8 - self.buf_len;
|
||||
mem.copy(u8, self.buf[self.buf_len..], b[0..off]);
|
||||
self.state.update(self.buf[0..]);
|
||||
self.buf_len = 0;
|
||||
}
|
||||
|
||||
const remain_len = b.len - off;
|
||||
const aligned_len = remain_len - (remain_len % 8);
|
||||
self.state.update(b[off .. off + aligned_len]);
|
||||
|
||||
mem.copy(u8, self.buf[self.buf_len..], b[off + aligned_len ..]);
|
||||
self.buf_len += @intCast(u8, b[off + aligned_len ..].len);
|
||||
}
|
||||
|
||||
pub fn final(self: *Self) T {
|
||||
return self.state.final(self.buf[0..self.buf_len]);
|
||||
}
|
||||
|
||||
pub fn hash(key: []const u8, input: []const u8) T {
|
||||
return State.hash(key, input);
|
||||
}
|
||||
};
|
||||
}
|
||||
@ -237,7 +275,7 @@ test "siphash64-2-4 sanity" {
|
||||
buffer[i] = @intCast(u8, i);
|
||||
|
||||
const expected = mem.readIntLittle(u64, &vector);
|
||||
testing.expect(siphash.hash(test_key, buffer[0..i]) == expected);
|
||||
testing.expectEqual(siphash.hash(test_key, buffer[0..i]), expected);
|
||||
}
|
||||
}
|
||||
|
||||
@ -316,6 +354,30 @@ test "siphash128-2-4 sanity" {
|
||||
buffer[i] = @intCast(u8, i);
|
||||
|
||||
const expected = mem.readIntLittle(u128, &vector);
|
||||
testing.expect(siphash.hash(test_key, buffer[0..i]) == expected);
|
||||
testing.expectEqual(siphash.hash(test_key, buffer[0..i]), expected);
|
||||
}
|
||||
}
|
||||
|
||||
test "iterative non-divisible update" {
|
||||
var buf: [1024]u8 = undefined;
|
||||
for (buf) |*e, i| {
|
||||
e.* = @truncate(u8, i);
|
||||
}
|
||||
|
||||
const key = "0x128dad08f12307";
|
||||
const Siphash = SipHash64(2, 4);
|
||||
|
||||
var end: usize = 9;
|
||||
while (end < buf.len) : (end += 9) {
|
||||
const non_iterative_hash = Siphash.hash(key, buf[0..end]);
|
||||
|
||||
var wy = Siphash.init(key);
|
||||
var i: usize = 0;
|
||||
while (i < end) : (i += 7) {
|
||||
wy.update(buf[i..std.math.min(i + 7, end)]);
|
||||
}
|
||||
const iterative_hash = wy.final();
|
||||
|
||||
std.testing.expectEqual(iterative_hash, non_iterative_hash);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,148 +0,0 @@
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("std");
|
||||
const time = std.time;
|
||||
const Timer = time.Timer;
|
||||
const hash = std.hash;
|
||||
|
||||
const KiB = 1024;
|
||||
const MiB = 1024 * KiB;
|
||||
const GiB = 1024 * MiB;
|
||||
|
||||
var prng = std.rand.DefaultPrng.init(0);
|
||||
|
||||
const Hash = struct {
|
||||
ty: type,
|
||||
name: []const u8,
|
||||
init_u8s: ?[]const u8 = null,
|
||||
init_u64: ?u64 = null,
|
||||
};
|
||||
|
||||
const siphash_key = "0123456789abcdef";
|
||||
|
||||
const hashes = [_]Hash{
|
||||
Hash{ .ty = hash.Wyhash, .name = "wyhash", .init_u64 = 0 },
|
||||
Hash{ .ty = hash.SipHash64(1, 3), .name = "siphash(1,3)", .init_u8s = siphash_key },
|
||||
Hash{ .ty = hash.SipHash64(2, 4), .name = "siphash(2,4)", .init_u8s = siphash_key },
|
||||
Hash{ .ty = hash.Fnv1a_64, .name = "fnv1a" },
|
||||
Hash{ .ty = hash.Crc32, .name = "crc32" },
|
||||
};
|
||||
|
||||
const Result = struct {
|
||||
hash: u64,
|
||||
throughput: u64,
|
||||
};
|
||||
|
||||
pub fn benchmarkHash(comptime H: var, bytes: usize) !Result {
|
||||
var h = blk: {
|
||||
if (H.init_u8s) |init| {
|
||||
break :blk H.ty.init(init);
|
||||
}
|
||||
if (H.init_u64) |init| {
|
||||
break :blk H.ty.init(init);
|
||||
}
|
||||
break :blk H.ty.init();
|
||||
};
|
||||
|
||||
var block: [8192]u8 = undefined;
|
||||
prng.random.bytes(block[0..]);
|
||||
|
||||
var offset: usize = 0;
|
||||
var timer = try Timer.start();
|
||||
const start = timer.lap();
|
||||
while (offset < bytes) : (offset += block.len) {
|
||||
h.update(block[0..]);
|
||||
}
|
||||
const end = timer.read();
|
||||
|
||||
const elapsed_s = @intToFloat(f64, end - start) / time.ns_per_s;
|
||||
const throughput = @floatToInt(u64, @intToFloat(f64, bytes) / elapsed_s);
|
||||
|
||||
return Result{
|
||||
.hash = h.final(),
|
||||
.throughput = throughput,
|
||||
};
|
||||
}
|
||||
|
||||
fn usage() void {
|
||||
std.debug.warn(
|
||||
\\throughput_test [options]
|
||||
\\
|
||||
\\Options:
|
||||
\\ --filter [test-name]
|
||||
\\ --seed [int]
|
||||
\\ --count [int]
|
||||
\\ --help
|
||||
\\
|
||||
);
|
||||
}
|
||||
|
||||
fn mode(comptime x: comptime_int) comptime_int {
|
||||
return if (builtin.mode == builtin.Mode.Debug) x / 64 else x;
|
||||
}
|
||||
|
||||
// TODO(#1358): Replace with builtin formatted padding when available.
|
||||
fn printPad(stdout: var, s: []const u8) !void {
|
||||
var i: usize = 0;
|
||||
while (i < 12 - s.len) : (i += 1) {
|
||||
try stdout.print(" ");
|
||||
}
|
||||
try stdout.print("{}", s);
|
||||
}
|
||||
|
||||
pub fn main() !void {
|
||||
var stdout_file = try std.io.getStdOut();
|
||||
var stdout_out_stream = stdout_file.outStream();
|
||||
const stdout = &stdout_out_stream.stream;
|
||||
|
||||
var buffer: [1024]u8 = undefined;
|
||||
var fixed = std.heap.FixedBufferAllocator.init(buffer[0..]);
|
||||
const args = try std.process.argsAlloc(&fixed.allocator);
|
||||
|
||||
var filter: ?[]u8 = "";
|
||||
var count: usize = mode(128 * MiB);
|
||||
|
||||
var i: usize = 1;
|
||||
while (i < args.len) : (i += 1) {
|
||||
if (std.mem.eql(u8, args[i], "--seed")) {
|
||||
i += 1;
|
||||
if (i == args.len) {
|
||||
usage();
|
||||
std.os.exit(1);
|
||||
}
|
||||
|
||||
const seed = try std.fmt.parseUnsigned(u32, args[i], 10);
|
||||
prng.seed(seed);
|
||||
} else if (std.mem.eql(u8, args[i], "--filter")) {
|
||||
i += 1;
|
||||
if (i == args.len) {
|
||||
usage();
|
||||
std.os.exit(1);
|
||||
}
|
||||
|
||||
filter = args[i];
|
||||
} else if (std.mem.eql(u8, args[i], "--count")) {
|
||||
i += 1;
|
||||
if (i == args.len) {
|
||||
usage();
|
||||
std.os.exit(1);
|
||||
}
|
||||
|
||||
const c = try std.fmt.parseUnsigned(u32, args[i], 10);
|
||||
count = c * MiB;
|
||||
} else if (std.mem.eql(u8, args[i], "--help")) {
|
||||
usage();
|
||||
return;
|
||||
} else {
|
||||
usage();
|
||||
std.os.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
inline for (hashes) |H| {
|
||||
if (filter == null or std.mem.indexOf(u8, H.name, filter.?) != null) {
|
||||
const result = try benchmarkHash(H, count);
|
||||
try printPad(stdout, H.name);
|
||||
try stdout.print(": {:4} MiB/s [{:16}]\n", result.throughput / (1 * MiB), result.hash);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -10,7 +10,8 @@ const primes = [_]u64{
|
||||
};
|
||||
|
||||
fn read_bytes(comptime bytes: u8, data: []const u8) u64 {
|
||||
return mem.readVarInt(u64, data[0..bytes], .Little);
|
||||
const T = @IntType(false, 8 * bytes);
|
||||
return mem.readIntSliceLittle(T, data[0..bytes]);
|
||||
}
|
||||
|
||||
fn read_8bytes_swapped(data: []const u8) u64 {
|
||||
@ -31,18 +32,21 @@ fn mix1(a: u64, b: u64, seed: u64) u64 {
|
||||
return mum(a ^ seed ^ primes[2], b ^ seed ^ primes[3]);
|
||||
}
|
||||
|
||||
pub const Wyhash = struct {
|
||||
// Wyhash version which does not store internal state for handling partial buffers.
|
||||
// This is needed so that we can maximize the speed for the short key case, which will
|
||||
// use the non-iterative api which the public Wyhash exposes.
|
||||
const WyhashStateless = struct {
|
||||
seed: u64,
|
||||
msg_len: usize,
|
||||
|
||||
pub fn init(seed: u64) Wyhash {
|
||||
return Wyhash{
|
||||
pub fn init(seed: u64) WyhashStateless {
|
||||
return WyhashStateless{
|
||||
.seed = seed,
|
||||
.msg_len = 0,
|
||||
};
|
||||
}
|
||||
|
||||
fn round(self: *Wyhash, b: []const u8) void {
|
||||
fn round(self: *WyhashStateless, b: []const u8) void {
|
||||
std.debug.assert(b.len == 32);
|
||||
|
||||
self.seed = mix0(
|
||||
@ -56,12 +60,25 @@ pub const Wyhash = struct {
|
||||
);
|
||||
}
|
||||
|
||||
fn partial(self: *Wyhash, b: []const u8) void {
|
||||
const rem_key = b;
|
||||
const rem_len = b.len;
|
||||
pub fn update(self: *WyhashStateless, b: []const u8) void {
|
||||
std.debug.assert(b.len % 32 == 0);
|
||||
|
||||
var seed = self.seed;
|
||||
seed = switch (@intCast(u5, rem_len)) {
|
||||
var off: usize = 0;
|
||||
while (off < b.len) : (off += 32) {
|
||||
@inlineCall(self.round, b[off .. off + 32]);
|
||||
}
|
||||
|
||||
self.msg_len += b.len;
|
||||
}
|
||||
|
||||
pub fn final(self: *WyhashStateless, b: []const u8) u64 {
|
||||
std.debug.assert(b.len < 32);
|
||||
|
||||
const seed = self.seed;
|
||||
const rem_len = @intCast(u5, b.len);
|
||||
const rem_key = b[0..rem_len];
|
||||
|
||||
self.seed = switch (rem_len) {
|
||||
0 => seed,
|
||||
1 => mix0(read_bytes(1, rem_key), primes[4], seed),
|
||||
2 => mix0(read_bytes(2, rem_key), primes[4], seed),
|
||||
@ -95,34 +112,70 @@ pub const Wyhash = struct {
|
||||
30 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 16) | read_bytes(2, rem_key[28..]), seed),
|
||||
31 => mix0(read_8bytes_swapped(rem_key), read_8bytes_swapped(rem_key[8..]), seed) ^ mix1(read_8bytes_swapped(rem_key[16..]), (read_bytes(4, rem_key[24..]) << 24) | (read_bytes(2, rem_key[28..]) << 8) | read_bytes(1, rem_key[30..]), seed),
|
||||
};
|
||||
self.seed = seed;
|
||||
|
||||
self.msg_len += b.len;
|
||||
return mum(self.seed ^ self.msg_len, primes[4]);
|
||||
}
|
||||
|
||||
pub fn hash(seed: u64, input: []const u8) u64 {
|
||||
const aligned_len = input.len - (input.len % 32);
|
||||
|
||||
var c = WyhashStateless.init(seed);
|
||||
@inlineCall(c.update, input[0..aligned_len]);
|
||||
return @inlineCall(c.final, input[aligned_len..]);
|
||||
}
|
||||
};
|
||||
|
||||
/// Fast non-cryptographic 64bit hash function.
|
||||
/// See https://github.com/wangyi-fudan/wyhash
|
||||
pub const Wyhash = struct {
|
||||
state: WyhashStateless,
|
||||
|
||||
buf: [32]u8,
|
||||
buf_len: usize,
|
||||
|
||||
pub fn init(seed: u64) Wyhash {
|
||||
return Wyhash{
|
||||
.state = WyhashStateless.init(seed),
|
||||
.buf = undefined,
|
||||
.buf_len = 0,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn update(self: *Wyhash, b: []const u8) void {
|
||||
var off: usize = 0;
|
||||
|
||||
// Full middle blocks.
|
||||
while (off + 32 <= b.len) : (off += 32) {
|
||||
@inlineCall(self.round, b[off .. off + 32]);
|
||||
if (self.buf_len != 0 and self.buf_len + b.len >= 32) {
|
||||
off += 32 - self.buf_len;
|
||||
mem.copy(u8, self.buf[self.buf_len..], b[0..off]);
|
||||
self.state.update(self.buf[0..]);
|
||||
self.buf_len = 0;
|
||||
}
|
||||
|
||||
self.partial(b[off..]);
|
||||
self.msg_len += b.len;
|
||||
const remain_len = b.len - off;
|
||||
const aligned_len = remain_len - (remain_len % 32);
|
||||
self.state.update(b[off .. off + aligned_len]);
|
||||
|
||||
mem.copy(u8, self.buf[self.buf_len..], b[off + aligned_len ..]);
|
||||
self.buf_len += @intCast(u8, b[off + aligned_len ..].len);
|
||||
}
|
||||
|
||||
pub fn final(self: *Wyhash) u64 {
|
||||
return mum(self.seed ^ self.msg_len, primes[4]);
|
||||
const seed = self.state.seed;
|
||||
const rem_len = @intCast(u5, self.buf_len);
|
||||
const rem_key = self.buf[0..self.buf_len];
|
||||
|
||||
return self.state.final(rem_key);
|
||||
}
|
||||
|
||||
pub fn hash(seed: u64, input: []const u8) u64 {
|
||||
var c = Wyhash.init(seed);
|
||||
c.update(input);
|
||||
return c.final();
|
||||
return WyhashStateless.hash(seed, input);
|
||||
}
|
||||
};
|
||||
|
||||
const expectEqual = std.testing.expectEqual;
|
||||
|
||||
test "test vectors" {
|
||||
const expectEqual = std.testing.expectEqual;
|
||||
const hash = Wyhash.hash;
|
||||
|
||||
expectEqual(hash(0, ""), 0x0);
|
||||
@ -133,3 +186,46 @@ test "test vectors" {
|
||||
expectEqual(hash(5, "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"), 0x602a1894d3bbfe7f);
|
||||
expectEqual(hash(6, "12345678901234567890123456789012345678901234567890123456789012345678901234567890"), 0x829e9c148b75970e);
|
||||
}
|
||||
|
||||
test "test vectors streaming" {
|
||||
var wh = Wyhash.init(5);
|
||||
for ("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") |e| {
|
||||
wh.update(mem.asBytes(&e));
|
||||
}
|
||||
expectEqual(wh.final(), 0x602a1894d3bbfe7f);
|
||||
|
||||
const pattern = "1234567890";
|
||||
const count = 8;
|
||||
const result = 0x829e9c148b75970e;
|
||||
expectEqual(Wyhash.hash(6, pattern ** 8), result);
|
||||
|
||||
wh = Wyhash.init(6);
|
||||
var i: u32 = 0;
|
||||
while (i < count) : (i += 1) {
|
||||
wh.update(pattern);
|
||||
}
|
||||
expectEqual(wh.final(), result);
|
||||
}
|
||||
|
||||
test "iterative non-divisible update" {
|
||||
var buf: [8192]u8 = undefined;
|
||||
for (buf) |*e, i| {
|
||||
e.* = @truncate(u8, i);
|
||||
}
|
||||
|
||||
const seed = 0x128dad08f;
|
||||
|
||||
var end: usize = 32;
|
||||
while (end < buf.len) : (end += 32) {
|
||||
const non_iterative_hash = Wyhash.hash(seed, buf[0..end]);
|
||||
|
||||
var wy = Wyhash.init(seed);
|
||||
var i: usize = 0;
|
||||
while (i < end) : (i += 33) {
|
||||
wy.update(buf[i..std.math.min(i + 33, end)]);
|
||||
}
|
||||
const iterative_hash = wy.final();
|
||||
|
||||
std.testing.expectEqual(iterative_hash, non_iterative_hash);
|
||||
}
|
||||
}
|
||||
|
||||
@ -17,6 +17,21 @@ pub fn AutoHashMap(comptime K: type, comptime V: type) type {
|
||||
return HashMap(K, V, getAutoHashFn(K), getAutoEqlFn(K));
|
||||
}
|
||||
|
||||
/// Builtin hashmap for strings as keys.
|
||||
pub fn StringHashMap(comptime V: type) type {
|
||||
return HashMap([]const u8, V, hashString, eqlString);
|
||||
}
|
||||
|
||||
pub fn eqlString(a: []const u8, b: []const u8) bool {
|
||||
if (a.len != b.len) return false;
|
||||
if (a.ptr == b.ptr) return true;
|
||||
return mem.compare(u8, a, b) == .Equal;
|
||||
}
|
||||
|
||||
pub fn hashString(s: []const u8) u32 {
|
||||
return @truncate(u32, std.hash.Wyhash.hash(0, s));
|
||||
}
|
||||
|
||||
pub fn HashMap(comptime K: type, comptime V: type, comptime hash: fn (key: K) u32, comptime eql: fn (a: K, b: K) bool) type {
|
||||
return struct {
|
||||
entries: []Entry,
|
||||
|
||||
@ -102,19 +102,9 @@ test "HeaderEntry" {
|
||||
testing.expectEqualSlices(u8, "x", e.value);
|
||||
}
|
||||
|
||||
fn stringEql(a: []const u8, b: []const u8) bool {
|
||||
if (a.len != b.len) return false;
|
||||
if (a.ptr == b.ptr) return true;
|
||||
return mem.compare(u8, a, b) == .Equal;
|
||||
}
|
||||
|
||||
fn stringHash(s: []const u8) u32 {
|
||||
return @truncate(u32, std.hash.Wyhash.hash(0, s));
|
||||
}
|
||||
|
||||
const HeaderList = std.ArrayList(HeaderEntry);
|
||||
const HeaderIndexList = std.ArrayList(usize);
|
||||
const HeaderIndex = std.HashMap([]const u8, HeaderIndexList, stringHash, stringEql);
|
||||
const HeaderIndex = std.StringHashMap(HeaderIndexList);
|
||||
|
||||
pub const Headers = struct {
|
||||
// the owned header field name is stored in the index as part of the key
|
||||
|
||||
@ -65,7 +65,7 @@ pub const CreateFileError = error{
|
||||
InvalidUtf8,
|
||||
|
||||
/// On Windows, file paths cannot contain these characters:
|
||||
/// '/', '*', '?', '"', '<', '>', '|'
|
||||
/// '*', '?', '"', '<', '>', '|', and '/' (when the ABI is not GNU)
|
||||
BadPathName,
|
||||
|
||||
Unexpected,
|
||||
@ -836,11 +836,10 @@ pub fn sliceToPrefixedSuffixedFileW(s: []const u8, comptime suffix: []const u16)
|
||||
// > converting the name to an NT-style name, except when using the "\\?\"
|
||||
// > prefix as detailed in the following sections.
|
||||
// from https://docs.microsoft.com/en-us/windows/desktop/FileIO/naming-a-file#maximum-path-length-limitation
|
||||
// Because we want the larger maximum path length for absolute paths, we
|
||||
// disallow forward slashes in zig std lib file functions on Windows.
|
||||
for (s) |byte| {
|
||||
switch (byte) {
|
||||
'/', '*', '?', '"', '<', '>', '|' => return error.BadPathName,
|
||||
'*', '?', '"', '<', '>', '|' => return error.BadPathName,
|
||||
'/' => if (builtin.abi == .msvc) return error.BadPathName,
|
||||
else => {},
|
||||
}
|
||||
}
|
||||
|
||||
@ -17,6 +17,7 @@ pub const SinglyLinkedList = @import("linked_list.zig").SinglyLinkedList;
|
||||
pub const StaticallyInitializedMutex = @import("statically_initialized_mutex.zig").StaticallyInitializedMutex;
|
||||
pub const SegmentedList = @import("segmented_list.zig").SegmentedList;
|
||||
pub const SpinLock = @import("spinlock.zig").SpinLock;
|
||||
pub const StringHashMap = @import("hash_map.zig").StringHashMap;
|
||||
pub const ChildProcess = @import("child_process.zig").ChildProcess;
|
||||
pub const TailQueue = @import("linked_list.zig").TailQueue;
|
||||
pub const Thread = @import("thread.zig").Thread;
|
||||
|
||||
@ -761,6 +761,7 @@ pub const Node = struct {
|
||||
name_token: TokenIndex,
|
||||
type_expr: ?*Node,
|
||||
value_expr: ?*Node,
|
||||
align_expr: ?*Node,
|
||||
|
||||
pub fn iterate(self: *ContainerField, index: usize) ?*Node {
|
||||
var i = index;
|
||||
|
||||
@ -380,16 +380,18 @@ fn parseVarDecl(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
|
||||
return &node.base;
|
||||
}
|
||||
|
||||
/// ContainerField <- IDENTIFIER (COLON TypeExpr)? (EQUAL Expr)?
|
||||
/// ContainerField <- IDENTIFIER (COLON TypeExpr ByteAlign?)? (EQUAL Expr)?
|
||||
fn parseContainerField(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*Node {
|
||||
const name_token = eatToken(it, .Identifier) orelse return null;
|
||||
|
||||
const type_expr = if (eatToken(it, .Colon)) |_|
|
||||
try expectNode(arena, it, tree, parseTypeExpr, AstError{
|
||||
var align_expr: ?*Node = null;
|
||||
var type_expr: ?*Node = null;
|
||||
if (eatToken(it, .Colon)) |_| {
|
||||
type_expr = try expectNode(arena, it, tree, parseTypeExpr, AstError{
|
||||
.ExpectedTypeExpr = AstError.ExpectedTypeExpr{ .token = it.index },
|
||||
})
|
||||
else
|
||||
null;
|
||||
});
|
||||
align_expr = try parseByteAlign(arena, it, tree);
|
||||
}
|
||||
|
||||
const value_expr = if (eatToken(it, .Equal)) |_|
|
||||
try expectNode(arena, it, tree, parseExpr, AstError{
|
||||
@ -406,6 +408,7 @@ fn parseContainerField(arena: *Allocator, it: *TokenIterator, tree: *Tree) !?*No
|
||||
.name_token = name_token,
|
||||
.type_expr = type_expr,
|
||||
.value_expr = value_expr,
|
||||
.align_expr = align_expr,
|
||||
};
|
||||
return &node.base;
|
||||
}
|
||||
|
||||
@ -166,6 +166,15 @@ test "zig fmt: doc comments on param decl" {
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: aligned struct field" {
|
||||
try testCanonical(
|
||||
\\pub const S = struct {
|
||||
\\ f: i32 align(32),
|
||||
\\};
|
||||
\\
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: preserve space between async fn definitions" {
|
||||
try testCanonical(
|
||||
\\async fn a() void {}
|
||||
@ -201,6 +210,103 @@ test "zig fmt: comment to disable/enable zig fmt" {
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: line comment following 'zig fmt: off'" {
|
||||
try testCanonical(
|
||||
\\// zig fmt: off
|
||||
\\// Test
|
||||
\\const e = f;
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: doc comment following 'zig fmt: off'" {
|
||||
try testCanonical(
|
||||
\\// zig fmt: off
|
||||
\\/// test
|
||||
\\const e = f;
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: line and doc comment following 'zig fmt: off'" {
|
||||
try testCanonical(
|
||||
\\// zig fmt: off
|
||||
\\// test 1
|
||||
\\/// test 2
|
||||
\\const e = f;
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: doc and line comment following 'zig fmt: off'" {
|
||||
try testCanonical(
|
||||
\\// zig fmt: off
|
||||
\\/// test 1
|
||||
\\// test 2
|
||||
\\const e = f;
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: alternating 'zig fmt: off' and 'zig fmt: on'" {
|
||||
try testCanonical(
|
||||
\\// zig fmt: off
|
||||
\\// zig fmt: on
|
||||
\\// zig fmt: off
|
||||
\\const e = f;
|
||||
\\// zig fmt: off
|
||||
\\// zig fmt: on
|
||||
\\// zig fmt: off
|
||||
\\const a = b;
|
||||
\\// zig fmt: on
|
||||
\\const c = d;
|
||||
\\// zig fmt: on
|
||||
\\
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: line comment following 'zig fmt: on'" {
|
||||
try testCanonical(
|
||||
\\// zig fmt: off
|
||||
\\const e = f;
|
||||
\\// zig fmt: on
|
||||
\\// test
|
||||
\\const e = f;
|
||||
\\
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: doc comment following 'zig fmt: on'" {
|
||||
try testCanonical(
|
||||
\\// zig fmt: off
|
||||
\\const e = f;
|
||||
\\// zig fmt: on
|
||||
\\/// test
|
||||
\\const e = f;
|
||||
\\
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: line and doc comment following 'zig fmt: on'" {
|
||||
try testCanonical(
|
||||
\\// zig fmt: off
|
||||
\\const e = f;
|
||||
\\// zig fmt: on
|
||||
\\// test1
|
||||
\\/// test2
|
||||
\\const e = f;
|
||||
\\
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: doc and line comment following 'zig fmt: on'" {
|
||||
try testCanonical(
|
||||
\\// zig fmt: off
|
||||
\\const e = f;
|
||||
\\// zig fmt: on
|
||||
\\/// test1
|
||||
\\// test2
|
||||
\\const e = f;
|
||||
\\
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: pointer of unknown length" {
|
||||
try testCanonical(
|
||||
\\fn foo(ptr: [*]u8) void {}
|
||||
@ -2269,7 +2375,6 @@ test "zig fmt: if type expr" {
|
||||
\\
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: file ends with struct field" {
|
||||
try testTransform(
|
||||
\\a: bool
|
||||
@ -2279,6 +2384,20 @@ test "zig fmt: file ends with struct field" {
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: comment after empty comment" {
|
||||
try testTransform(
|
||||
\\const x = true; //
|
||||
\\//
|
||||
\\//
|
||||
\\//a
|
||||
\\
|
||||
,
|
||||
\\const x = true;
|
||||
\\//a
|
||||
\\
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: comments at several places in struct init" {
|
||||
try testTransform(
|
||||
\\var bar = Bar{
|
||||
|
||||
@ -89,41 +89,98 @@ fn renderRoot(
|
||||
var it = tree.root_node.decls.iterator(0);
|
||||
while (true) {
|
||||
var decl = (it.next() orelse return).*;
|
||||
// look for zig fmt: off comment
|
||||
var start_token_index = decl.firstToken();
|
||||
zig_fmt_loop: while (start_token_index != 0) {
|
||||
start_token_index -= 1;
|
||||
const start_token = tree.tokens.at(start_token_index);
|
||||
switch (start_token.id) {
|
||||
|
||||
// This loop does the following:
|
||||
//
|
||||
// - Iterates through line/doc comment tokens that precedes the current
|
||||
// decl.
|
||||
// - Figures out the first token index (`copy_start_token_index`) which
|
||||
// hasn't been copied to the output stream yet.
|
||||
// - Detects `zig fmt: (off|on)` in the line comment tokens, and
|
||||
// determines whether the current decl should be reformatted or not.
|
||||
//
|
||||
var token_index = decl.firstToken();
|
||||
var fmt_active = true;
|
||||
var found_fmt_directive = false;
|
||||
|
||||
var copy_start_token_index = token_index;
|
||||
|
||||
while (token_index != 0) {
|
||||
token_index -= 1;
|
||||
const token = tree.tokens.at(token_index);
|
||||
switch (token.id) {
|
||||
Token.Id.LineComment => {},
|
||||
Token.Id.DocComment => continue,
|
||||
Token.Id.DocComment => {
|
||||
copy_start_token_index = token_index;
|
||||
continue;
|
||||
},
|
||||
else => break,
|
||||
}
|
||||
if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(start_token)[2..], " "), "zig fmt: off")) {
|
||||
var end_token_index = start_token_index;
|
||||
while (true) {
|
||||
end_token_index += 1;
|
||||
const end_token = tree.tokens.at(end_token_index);
|
||||
switch (end_token.id) {
|
||||
|
||||
if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(token)[2..], " "), "zig fmt: off")) {
|
||||
if (!found_fmt_directive) {
|
||||
fmt_active = false;
|
||||
found_fmt_directive = true;
|
||||
}
|
||||
} else if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(token)[2..], " "), "zig fmt: on")) {
|
||||
if (!found_fmt_directive) {
|
||||
fmt_active = true;
|
||||
found_fmt_directive = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!fmt_active) {
|
||||
// Reformatting is disabled for the current decl and possibly some
|
||||
// more decls that follow.
|
||||
// Find the next `decl` for which reformatting is re-enabled.
|
||||
token_index = decl.firstToken();
|
||||
|
||||
while (!fmt_active) {
|
||||
decl = (it.next() orelse {
|
||||
// If there's no next reformatted `decl`, just copy the
|
||||
// remaining input tokens and bail out.
|
||||
const start = tree.tokens.at(copy_start_token_index).start;
|
||||
try copyFixingWhitespace(stream, tree.source[start..]);
|
||||
return;
|
||||
}).*;
|
||||
var decl_first_token_index = decl.firstToken();
|
||||
|
||||
while (token_index < decl_first_token_index) : (token_index += 1) {
|
||||
const token = tree.tokens.at(token_index);
|
||||
switch (token.id) {
|
||||
Token.Id.LineComment => {},
|
||||
Token.Id.Eof => {
|
||||
const start = tree.tokens.at(start_token_index + 1).start;
|
||||
try copyFixingWhitespace(stream, tree.source[start..]);
|
||||
return;
|
||||
},
|
||||
Token.Id.Eof => unreachable,
|
||||
else => continue,
|
||||
}
|
||||
if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(end_token)[2..], " "), "zig fmt: on")) {
|
||||
const start = tree.tokens.at(start_token_index + 1).start;
|
||||
try copyFixingWhitespace(stream, tree.source[start..end_token.end]);
|
||||
try stream.writeByte('\n');
|
||||
while (tree.tokens.at(decl.firstToken()).start < end_token.end) {
|
||||
decl = (it.next() orelse return).*;
|
||||
}
|
||||
break :zig_fmt_loop;
|
||||
if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(token)[2..], " "), "zig fmt: on")) {
|
||||
fmt_active = true;
|
||||
} else if (mem.eql(u8, mem.trim(u8, tree.tokenSlicePtr(token)[2..], " "), "zig fmt: off")) {
|
||||
fmt_active = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Found the next `decl` for which reformatting is enabled. Copy
|
||||
// the input tokens before the `decl` that haven't been copied yet.
|
||||
var copy_end_token_index = decl.firstToken();
|
||||
token_index = copy_end_token_index;
|
||||
while (token_index != 0) {
|
||||
token_index -= 1;
|
||||
const token = tree.tokens.at(token_index);
|
||||
switch (token.id) {
|
||||
Token.Id.LineComment => {},
|
||||
Token.Id.DocComment => {
|
||||
copy_end_token_index = token_index;
|
||||
continue;
|
||||
},
|
||||
else => break,
|
||||
}
|
||||
}
|
||||
|
||||
const start = tree.tokens.at(copy_start_token_index).start;
|
||||
const end = tree.tokens.at(copy_end_token_index).start;
|
||||
try copyFixingWhitespace(stream, tree.source[start..end]);
|
||||
}
|
||||
|
||||
try renderTopLevelDecl(allocator, stream, tree, 0, &start_col, decl);
|
||||
@ -206,7 +263,20 @@ fn renderTopLevelDecl(allocator: *mem.Allocator, stream: var, tree: *ast.Tree, i
|
||||
} else if (field.type_expr != null and field.value_expr == null) {
|
||||
try renderToken(tree, stream, field.name_token, indent, start_col, Space.None); // name
|
||||
try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, Space.Space); // :
|
||||
return renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, Space.Comma); // type,
|
||||
|
||||
if (field.align_expr) |align_value_expr| {
|
||||
try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, Space.Space); // type
|
||||
const lparen_token = tree.prevToken(align_value_expr.firstToken());
|
||||
const align_kw = tree.prevToken(lparen_token);
|
||||
const rparen_token = tree.nextToken(align_value_expr.lastToken());
|
||||
try renderToken(tree, stream, align_kw, indent, start_col, Space.None); // align
|
||||
try renderToken(tree, stream, lparen_token, indent, start_col, Space.None); // (
|
||||
try renderExpression(allocator, stream, tree, indent, start_col, align_value_expr, Space.None); // alignment
|
||||
try renderToken(tree, stream, rparen_token, indent, start_col, Space.Comma); // )
|
||||
} else {
|
||||
try renderExpression(allocator, stream, tree, indent, start_col, field.type_expr.?, Space.Comma); // type,
|
||||
}
|
||||
|
||||
} else if (field.type_expr == null and field.value_expr != null) {
|
||||
try renderToken(tree, stream, field.name_token, indent, start_col, Space.Space); // name
|
||||
try renderToken(tree, stream, tree.nextToken(field.name_token), indent, start_col, Space.Space); // =
|
||||
@ -1924,15 +1994,24 @@ fn renderTokenOffset(
|
||||
}
|
||||
}
|
||||
|
||||
const comment_is_empty = mem.trimRight(u8, tree.tokenSlicePtr(next_token), " ").len == 2;
|
||||
if (comment_is_empty) {
|
||||
switch (space) {
|
||||
Space.Newline => {
|
||||
try stream.writeByte('\n');
|
||||
start_col.* = 0;
|
||||
return;
|
||||
},
|
||||
else => {},
|
||||
while (true) {
|
||||
const comment_is_empty = mem.trimRight(u8, tree.tokenSlicePtr(next_token), " ").len == 2;
|
||||
if (comment_is_empty) {
|
||||
switch (space) {
|
||||
Space.Newline => {
|
||||
offset += 1;
|
||||
token = next_token;
|
||||
next_token = tree.tokens.at(token_index + offset);
|
||||
if (next_token.id != .LineComment) {
|
||||
try stream.writeByte('\n');
|
||||
start_col.* = 0;
|
||||
return;
|
||||
}
|
||||
},
|
||||
else => break,
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2,6 +2,51 @@ const tests = @import("tests.zig");
|
||||
const builtin = @import("builtin");
|
||||
|
||||
pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
cases.add(
|
||||
"struct depends on itself via optional field",
|
||||
\\const LhsExpr = struct {
|
||||
\\ rhsExpr: ?AstObject,
|
||||
\\};
|
||||
\\const AstObject = union {
|
||||
\\ lhsExpr: LhsExpr,
|
||||
\\};
|
||||
\\export fn entry() void {
|
||||
\\ const lhsExpr = LhsExpr{ .rhsExpr = null };
|
||||
\\ const obj = AstObject{ .lhsExpr = lhsExpr };
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:1:17: error: struct 'LhsExpr' depends on itself",
|
||||
"tmp.zig:5:5: note: while checking this field",
|
||||
"tmp.zig:2:5: note: while checking this field",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
"alignment of enum field specified",
|
||||
\\const Number = enum {
|
||||
\\ a,
|
||||
\\ b align(i32),
|
||||
\\};
|
||||
\\export fn entry1() void {
|
||||
\\ var x: Number = undefined;
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:3:13: error: structs and unions, not enums, support field alignment",
|
||||
"tmp.zig:1:16: note: consider 'union(enum)' here",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
"bad alignment type",
|
||||
\\export fn entry1() void {
|
||||
\\ var x: []align(true) i32 = undefined;
|
||||
\\}
|
||||
\\export fn entry2() void {
|
||||
\\ var x: *align(f64(12.34)) i32 = undefined;
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:2:20: error: expected type 'u29', found 'bool'",
|
||||
"tmp.zig:5:22: error: fractional component prevents float value 12.340000 from being casted to type 'u29'",
|
||||
);
|
||||
|
||||
cases.addCase(x: {
|
||||
var tc = cases.create("variable in inline assembly template cannot be found",
|
||||
\\export fn entry() void {
|
||||
@ -10,7 +55,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\ : [bar] "=r" (-> usize)
|
||||
\\ );
|
||||
\\}
|
||||
, "tmp.zig:2:14: error: could not find 'foo' in the inputs or outputs.");
|
||||
, "tmp.zig:2:14: error: could not find 'foo' in the inputs or outputs");
|
||||
tc.target = tests.Target{
|
||||
.Cross = tests.CrossTarget{
|
||||
.arch = .x86_64,
|
||||
@ -53,8 +98,8 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:8:1: error: '@Frame(rangeSum)' depends on itself",
|
||||
"tmp.zig:15:33: note: when analyzing type '@Frame(rangeSumIndirect)' here",
|
||||
"tmp.zig:26:25: note: when analyzing type '@Frame(rangeSum)' here",
|
||||
"tmp.zig:15:33: note: when analyzing type '@Frame(rangeSum)' here",
|
||||
"tmp.zig:26:25: note: when analyzing type '@Frame(rangeSumIndirect)' here",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -245,7 +290,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
,
|
||||
"tmp.zig:4:1: error: unable to determine async function frame of 'amain'",
|
||||
"tmp.zig:5:10: note: analysis of function 'other' depends on the frame",
|
||||
"tmp.zig:8:13: note: depends on the frame here",
|
||||
"tmp.zig:8:13: note: referenced here",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -258,7 +303,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:4:1: error: cannot resolve '@Frame(amain)': function not fully analyzed yet",
|
||||
"tmp.zig:5:13: note: depends on its own frame here",
|
||||
"tmp.zig:5:13: note: referenced here",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -404,7 +449,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\ const foo: Foo = undefined;
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:2:8: error: expected type 'type', found '(undefined)'",
|
||||
"tmp.zig:2:8: error: use of undefined value here causes undefined behavior",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -470,7 +515,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
);
|
||||
|
||||
cases.add(
|
||||
"Generic function where return type is self-referenced",
|
||||
"generic function where return type is self-referenced",
|
||||
\\fn Foo(comptime T: type) Foo(T) {
|
||||
\\ return struct{ x: T };
|
||||
\\}
|
||||
@ -481,7 +526,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:1:29: error: evaluation exceeded 1000 backwards branches",
|
||||
"tmp.zig:1:29: note: called from here",
|
||||
"tmp.zig:5:18: note: referenced here",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -645,7 +690,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\const A = struct { a : A, };
|
||||
\\export fn entry() usize { return @sizeOf(A); }
|
||||
,
|
||||
"tmp.zig:1:11: error: struct 'A' contains itself",
|
||||
"tmp.zig:1:11: error: struct 'A' depends on itself",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -655,7 +700,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\const C = struct { a : A, };
|
||||
\\export fn entry() usize { return @sizeOf(A); }
|
||||
,
|
||||
"tmp.zig:1:11: error: struct 'A' contains itself",
|
||||
"tmp.zig:1:11: error: struct 'A' depends on itself",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -670,7 +715,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\ return @sizeOf(@typeOf(foo.x));
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:1:13: error: struct 'Foo' contains itself",
|
||||
"tmp.zig:1:13: error: struct 'Foo' depends on itself",
|
||||
"tmp.zig:8:28: note: referenced here",
|
||||
);
|
||||
|
||||
@ -689,7 +734,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:7:9: error: dependency loop detected",
|
||||
"tmp.zig:2:19: note: called from here",
|
||||
"tmp.zig:2:19: note: referenced here",
|
||||
"tmp.zig:10:21: note: referenced here",
|
||||
);
|
||||
|
||||
@ -703,7 +748,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\ var s: Foo = Foo.E;
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:1:17: error: 'Foo' depends on itself",
|
||||
"tmp.zig:1:17: error: enum 'Foo' depends on itself",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -866,7 +911,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
break :x tc;
|
||||
});
|
||||
|
||||
cases.addTest(
|
||||
cases.add(
|
||||
"export generic function",
|
||||
\\export fn foo(num: var) i32 {
|
||||
\\ return 0;
|
||||
@ -875,17 +920,17 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
"tmp.zig:1:15: error: parameter of type 'var' not allowed in function with calling convention 'ccc'",
|
||||
);
|
||||
|
||||
cases.addTest(
|
||||
cases.add(
|
||||
"C pointer to c_void",
|
||||
\\export fn a() void {
|
||||
\\ var x: *c_void = undefined;
|
||||
\\ var y: [*c]c_void = x;
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:3:12: error: C pointers cannot point opaque types",
|
||||
"tmp.zig:3:16: error: C pointers cannot point opaque types",
|
||||
);
|
||||
|
||||
cases.addTest(
|
||||
cases.add(
|
||||
"directly embedding opaque type in struct and union",
|
||||
\\const O = @OpaqueType();
|
||||
\\const Foo = struct {
|
||||
@ -906,7 +951,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
"tmp.zig:7:10: error: opaque types have unknown size and therefore cannot be directly embedded in unions",
|
||||
);
|
||||
|
||||
cases.addTest(
|
||||
cases.add(
|
||||
"implicit cast between C pointer and Zig pointer - bad const/align/child",
|
||||
\\export fn a() void {
|
||||
\\ var x: [*c]u8 = undefined;
|
||||
@ -942,7 +987,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
"tmp.zig:23:22: error: expected type '[*c]u32', found '*u8'",
|
||||
);
|
||||
|
||||
cases.addTest(
|
||||
cases.add(
|
||||
"implicit casting null c pointer to zig pointer",
|
||||
\\comptime {
|
||||
\\ var c_ptr: [*c]u8 = 0;
|
||||
@ -952,7 +997,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
"tmp.zig:3:24: error: null pointer casted to type '*u8'",
|
||||
);
|
||||
|
||||
cases.addTest(
|
||||
cases.add(
|
||||
"implicit casting undefined c pointer to zig pointer",
|
||||
\\comptime {
|
||||
\\ var c_ptr: [*c]u8 = undefined;
|
||||
@ -962,7 +1007,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
"tmp.zig:3:24: error: use of undefined value here causes undefined behavior",
|
||||
);
|
||||
|
||||
cases.addTest(
|
||||
cases.add(
|
||||
"implicit casting C pointers which would mess up null semantics",
|
||||
\\export fn entry() void {
|
||||
\\ var slice: []const u8 = "aoeu";
|
||||
@ -987,7 +1032,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
"tmp.zig:13:35: note: mutable '[*c]const u8' allows illegal null values stored to type '[*]u8'",
|
||||
);
|
||||
|
||||
cases.addTest(
|
||||
cases.add(
|
||||
"implicit casting too big integers to C pointers",
|
||||
\\export fn a() void {
|
||||
\\ var ptr: [*c]u8 = (1 << 64) + 1;
|
||||
@ -1001,14 +1046,14 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
"tmp.zig:6:23: error: integer type 'u65' too big for implicit @intToPtr to type '[*c]u8'",
|
||||
);
|
||||
|
||||
cases.addTest(
|
||||
cases.add(
|
||||
"C pointer pointing to non C ABI compatible type or has align attr",
|
||||
\\const Foo = struct {};
|
||||
\\export fn a() void {
|
||||
\\ const T = [*c]Foo;
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:3:15: error: C pointers cannot point to non-C-ABI-compatible type 'Foo'",
|
||||
"tmp.zig:3:19: error: C pointers cannot point to non-C-ABI-compatible type 'Foo'",
|
||||
);
|
||||
|
||||
cases.addCase(x: {
|
||||
@ -1029,7 +1074,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
break :x tc;
|
||||
});
|
||||
|
||||
cases.addTest(
|
||||
cases.add(
|
||||
"assign to invalid dereference",
|
||||
\\export fn entry() void {
|
||||
\\ 'a'.* = 1;
|
||||
@ -1038,7 +1083,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
"tmp.zig:2:8: error: attempt to dereference non-pointer type 'comptime_int'",
|
||||
);
|
||||
|
||||
cases.addTest(
|
||||
cases.add(
|
||||
"take slice of invalid dereference",
|
||||
\\export fn entry() void {
|
||||
\\ const x = 'a'.*[0..];
|
||||
@ -1047,7 +1092,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
"tmp.zig:2:18: error: attempt to dereference non-pointer type 'comptime_int'",
|
||||
);
|
||||
|
||||
cases.addTest(
|
||||
cases.add(
|
||||
"@truncate undefined value",
|
||||
\\export fn entry() void {
|
||||
\\ var z = @truncate(u8, u16(undefined));
|
||||
@ -1091,7 +1136,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\ return 5678;
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:2:12: error: `&&` is invalid. Note that `and` is boolean AND.",
|
||||
"tmp.zig:2:12: error: `&&` is invalid. Note that `and` is boolean AND",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -1935,7 +1980,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
"unknown length pointer to opaque",
|
||||
\\export const T = [*]@OpaqueType();
|
||||
,
|
||||
"tmp.zig:1:18: error: unknown-length pointer to opaque",
|
||||
"tmp.zig:1:21: error: unknown-length pointer to opaque",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -2924,7 +2969,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\fn a() *noreturn {}
|
||||
\\export fn entry() void { _ = a(); }
|
||||
,
|
||||
"tmp.zig:1:8: error: pointer to noreturn not allowed",
|
||||
"tmp.zig:1:9: error: pointer to noreturn not allowed",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -3596,7 +3641,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
);
|
||||
|
||||
cases.add(
|
||||
"non constant expression in array size outside function",
|
||||
"non constant expression in array size",
|
||||
\\const Foo = struct {
|
||||
\\ y: [get()]u8,
|
||||
\\};
|
||||
@ -3606,8 +3651,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\export fn entry() usize { return @sizeOf(@typeOf(Foo)); }
|
||||
,
|
||||
"tmp.zig:5:25: error: unable to evaluate constant expression",
|
||||
"tmp.zig:2:12: note: called from here",
|
||||
"tmp.zig:2:8: note: called from here",
|
||||
"tmp.zig:2:12: note: referenced here",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -3701,7 +3745,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\export fn entry() usize { return @sizeOf(@typeOf(y)); }
|
||||
,
|
||||
"tmp.zig:3:14: error: division by zero",
|
||||
"tmp.zig:1:14: note: called from here",
|
||||
"tmp.zig:1:14: note: referenced here",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -4133,7 +4177,8 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\export fn entry() usize { return @sizeOf(@typeOf(seventh_fib_number)); }
|
||||
,
|
||||
"tmp.zig:3:21: error: evaluation exceeded 1000 backwards branches",
|
||||
"tmp.zig:3:21: note: called from here",
|
||||
"tmp.zig:1:37: note: referenced here",
|
||||
"tmp.zig:6:50: note: referenced here",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -4174,7 +4219,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\export fn entry() usize { return @sizeOf(@typeOf(a)); }
|
||||
,
|
||||
"tmp.zig:6:26: error: unable to evaluate constant expression",
|
||||
"tmp.zig:4:17: note: called from here",
|
||||
"tmp.zig:4:17: note: referenced here",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -4257,7 +4302,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\export fn entry() usize { return @sizeOf(@typeOf(y)); }
|
||||
,
|
||||
"tmp.zig:3:12: error: negation caused overflow",
|
||||
"tmp.zig:1:14: note: called from here",
|
||||
"tmp.zig:1:14: note: referenced here",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -4270,7 +4315,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\export fn entry() usize { return @sizeOf(@typeOf(y)); }
|
||||
,
|
||||
"tmp.zig:3:14: error: operation caused overflow",
|
||||
"tmp.zig:1:14: note: called from here",
|
||||
"tmp.zig:1:14: note: referenced here",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -4283,7 +4328,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\export fn entry() usize { return @sizeOf(@typeOf(y)); }
|
||||
,
|
||||
"tmp.zig:3:14: error: operation caused overflow",
|
||||
"tmp.zig:1:14: note: called from here",
|
||||
"tmp.zig:1:14: note: referenced here",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -4296,7 +4341,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\export fn entry() usize { return @sizeOf(@typeOf(y)); }
|
||||
,
|
||||
"tmp.zig:3:14: error: operation caused overflow",
|
||||
"tmp.zig:1:14: note: called from here",
|
||||
"tmp.zig:1:14: note: referenced here",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -4388,7 +4433,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:3:7: error: unable to evaluate constant expression",
|
||||
"tmp.zig:16:19: note: called from here",
|
||||
"tmp.zig:16:19: note: referenced here",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -4717,7 +4762,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:10:14: error: unable to evaluate constant expression",
|
||||
"tmp.zig:6:20: note: called from here",
|
||||
"tmp.zig:6:20: note: referenced here",
|
||||
);
|
||||
|
||||
cases.add(
|
||||
@ -5864,7 +5909,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:4:25: error: aoeu",
|
||||
"tmp.zig:1:36: note: called from here",
|
||||
"tmp.zig:1:36: note: referenced here",
|
||||
"tmp.zig:12:20: note: referenced here",
|
||||
);
|
||||
|
||||
@ -5939,7 +5984,7 @@ pub fn addCases(cases: *tests.CompileErrorContext) void {
|
||||
\\ var x: MultipleChoice = undefined;
|
||||
\\}
|
||||
,
|
||||
"tmp.zig:2:14: error: non-enum union field assignment",
|
||||
"tmp.zig:2:14: error: untagged union field assignment",
|
||||
"tmp.zig:1:24: note: consider 'union(enum)' here",
|
||||
);
|
||||
|
||||
|
||||
@ -15,6 +15,7 @@ comptime {
|
||||
_ = @import("behavior/bugs/1111.zig");
|
||||
_ = @import("behavior/bugs/1120.zig");
|
||||
_ = @import("behavior/bugs/1277.zig");
|
||||
_ = @import("behavior/bugs/1310.zig");
|
||||
_ = @import("behavior/bugs/1322.zig");
|
||||
_ = @import("behavior/bugs/1381.zig");
|
||||
_ = @import("behavior/bugs/1421.zig");
|
||||
@ -22,15 +23,18 @@ comptime {
|
||||
_ = @import("behavior/bugs/1486.zig");
|
||||
_ = @import("behavior/bugs/1500.zig");
|
||||
_ = @import("behavior/bugs/1607.zig");
|
||||
_ = @import("behavior/bugs/1735.zig");
|
||||
_ = @import("behavior/bugs/1851.zig");
|
||||
_ = @import("behavior/bugs/1914.zig");
|
||||
_ = @import("behavior/bugs/2006.zig");
|
||||
_ = @import("behavior/bugs/2114.zig");
|
||||
_ = @import("behavior/bugs/2346.zig");
|
||||
_ = @import("behavior/bugs/2578.zig");
|
||||
_ = @import("behavior/bugs/3112.zig");
|
||||
_ = @import("behavior/bugs/394.zig");
|
||||
_ = @import("behavior/bugs/421.zig");
|
||||
_ = @import("behavior/bugs/529.zig");
|
||||
_ = @import("behavior/bugs/624.zig");
|
||||
_ = @import("behavior/bugs/655.zig");
|
||||
_ = @import("behavior/bugs/656.zig");
|
||||
_ = @import("behavior/bugs/679.zig");
|
||||
|
||||
@ -290,3 +290,18 @@ test "read 128-bit field from default aligned struct in global memory" {
|
||||
expect((@ptrToInt(&default_aligned_global.badguy) % 16) == 0);
|
||||
expect(12 == default_aligned_global.badguy);
|
||||
}
|
||||
|
||||
test "struct field explicit alignment" {
|
||||
const S = struct {
|
||||
const Node = struct {
|
||||
next: *Node,
|
||||
massive_byte: u8 align(64),
|
||||
};
|
||||
};
|
||||
|
||||
var node: S.Node = undefined;
|
||||
node.massive_byte = 100;
|
||||
expect(node.massive_byte == 100);
|
||||
comptime expect(@typeOf(&node.massive_byte) == *align(64) u8);
|
||||
expect(@ptrToInt(&node.massive_byte) % 64 == 0);
|
||||
}
|
||||
|
||||
@ -292,3 +292,9 @@ test "read/write through global variable array of struct fields initialized via
|
||||
};
|
||||
S.doTheTest();
|
||||
}
|
||||
|
||||
test "implicit cast zero sized array ptr to slice" {
|
||||
var b = "";
|
||||
const c: []const u8 = &b;
|
||||
expect(c.len == 0);
|
||||
}
|
||||
|
||||
24
test/stage1/behavior/bugs/1310.zig
Normal file
24
test/stage1/behavior/bugs/1310.zig
Normal file
@ -0,0 +1,24 @@
|
||||
const std = @import("std");
|
||||
const expect = std.testing.expect;
|
||||
|
||||
pub const VM = ?[*]const struct_InvocationTable_;
|
||||
pub const struct_InvocationTable_ = extern struct {
|
||||
GetVM: ?extern fn (?[*]VM) c_int,
|
||||
};
|
||||
|
||||
pub const struct_VM_ = extern struct {
|
||||
functions: ?[*]const struct_InvocationTable_,
|
||||
};
|
||||
|
||||
//excised output from stdlib.h etc
|
||||
|
||||
pub const InvocationTable_ = struct_InvocationTable_;
|
||||
pub const VM_ = struct_VM_;
|
||||
|
||||
extern fn agent_callback(_vm: [*]VM, options: [*]u8) i32 {
|
||||
return 11;
|
||||
}
|
||||
|
||||
test "fixed" {
|
||||
expect(agent_callback(undefined, undefined) == 11);
|
||||
}
|
||||
46
test/stage1/behavior/bugs/1735.zig
Normal file
46
test/stage1/behavior/bugs/1735.zig
Normal file
@ -0,0 +1,46 @@
|
||||
const std = @import("std");
|
||||
|
||||
const mystruct = struct {
|
||||
pending: ?listofstructs,
|
||||
};
|
||||
pub fn TailQueue(comptime T: type) type {
|
||||
return struct {
|
||||
const Self = @This();
|
||||
|
||||
pub const Node = struct {
|
||||
prev: ?*Node,
|
||||
next: ?*Node,
|
||||
data: T,
|
||||
};
|
||||
|
||||
first: ?*Node,
|
||||
last: ?*Node,
|
||||
len: usize,
|
||||
|
||||
pub fn init() Self {
|
||||
return Self{
|
||||
.first = null,
|
||||
.last = null,
|
||||
.len = 0,
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
const listofstructs = TailQueue(mystruct);
|
||||
|
||||
const a = struct {
|
||||
const Self = @This();
|
||||
|
||||
foo: listofstructs,
|
||||
|
||||
pub fn init() Self {
|
||||
return Self{
|
||||
.foo = listofstructs.init(),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
test "intialization" {
|
||||
var t = a.init();
|
||||
std.testing.expect(t.foo.len == 0);
|
||||
}
|
||||
17
test/stage1/behavior/bugs/3112.zig
Normal file
17
test/stage1/behavior/bugs/3112.zig
Normal file
@ -0,0 +1,17 @@
|
||||
const std = @import("std");
|
||||
const expect = std.testing.expect;
|
||||
|
||||
const State = struct {
|
||||
const Self = @This();
|
||||
enter: fn (previous: ?Self) void,
|
||||
};
|
||||
|
||||
fn prev(p: ?State) void {
|
||||
expect(p == null);
|
||||
}
|
||||
|
||||
test "zig test crash" {
|
||||
var global: State = undefined;
|
||||
global.enter = prev;
|
||||
global.enter(null);
|
||||
}
|
||||
23
test/stage1/behavior/bugs/624.zig
Normal file
23
test/stage1/behavior/bugs/624.zig
Normal file
@ -0,0 +1,23 @@
|
||||
const std = @import("std");
|
||||
const expect = std.testing.expect;
|
||||
|
||||
const TestContext = struct {
|
||||
server_context: *ListenerContext,
|
||||
};
|
||||
|
||||
const ListenerContext = struct {
|
||||
context_alloc: *ContextAllocator,
|
||||
};
|
||||
|
||||
const ContextAllocator = MemoryPool(TestContext);
|
||||
|
||||
fn MemoryPool(comptime T: type) type {
|
||||
return struct {
|
||||
n: usize,
|
||||
};
|
||||
}
|
||||
|
||||
test "foo" {
|
||||
var allocator = ContextAllocator{ .n = 10 };
|
||||
expect(allocator.n == 10);
|
||||
}
|
||||
@ -375,3 +375,23 @@ test "implicit cast to optional to error union to return result loc" {
|
||||
S.entry();
|
||||
//comptime S.entry(); TODO
|
||||
}
|
||||
|
||||
test "function pointer with return type that is error union with payload which is pointer of parent struct" {
|
||||
const S = struct {
|
||||
const Foo = struct {
|
||||
fun: fn (a: i32) (anyerror!*Foo),
|
||||
};
|
||||
|
||||
const Err = error{UnspecifiedErr};
|
||||
|
||||
fn bar(a: i32) anyerror!*Foo {
|
||||
return Err.UnspecifiedErr;
|
||||
}
|
||||
|
||||
fn doTheTest() void {
|
||||
var x = Foo{ .fun = bar };
|
||||
expectError(error.UnspecifiedErr, x.fun(1));
|
||||
}
|
||||
};
|
||||
S.doTheTest();
|
||||
}
|
||||
|
||||
@ -706,3 +706,18 @@ test "result location zero sized array inside struct field implicit cast to slic
|
||||
var foo = E{ .entries = [_]u32{} };
|
||||
expect(foo.entries.len == 0);
|
||||
}
|
||||
|
||||
var global_foo: *i32 = undefined;
|
||||
|
||||
test "global variable assignment with optional unwrapping with var initialized to undefined" {
|
||||
const S = struct {
|
||||
var data: i32 = 1234;
|
||||
fn foo() ?*i32 {
|
||||
return &data;
|
||||
}
|
||||
};
|
||||
global_foo = S.foo() orelse {
|
||||
@panic("bad");
|
||||
};
|
||||
expect(global_foo.* == 1234);
|
||||
}
|
||||
|
||||
@ -100,3 +100,22 @@ test "nested orelse" {
|
||||
S.entry();
|
||||
comptime S.entry();
|
||||
}
|
||||
|
||||
test "self-referential struct through a slice of optional" {
|
||||
const S = struct {
|
||||
const Node = struct {
|
||||
children: []?Node,
|
||||
data: ?u8,
|
||||
|
||||
fn new() Node {
|
||||
return Node{
|
||||
.children = undefined,
|
||||
.data = null,
|
||||
};
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
var n = S.Node.new();
|
||||
expect(n.data == null);
|
||||
}
|
||||
|
||||
@ -504,12 +504,9 @@ const Contents = struct {
|
||||
}
|
||||
};
|
||||
|
||||
comptime {
|
||||
@compileError("the behavior of std.AutoHashMap changed and []const u8 will be treated as a pointer. will need to update the hash maps to actually do some kind of hashing on the slices.");
|
||||
}
|
||||
const HashToContents = std.AutoHashMap([]const u8, Contents);
|
||||
const HashToContents = std.StringHashMap(Contents);
|
||||
const TargetToHash = std.HashMap(DestTarget, []const u8, DestTarget.hash, DestTarget.eql);
|
||||
const PathTable = std.AutoHashMap([]const u8, *TargetToHash);
|
||||
const PathTable = std.StringHashMap(*TargetToHash);
|
||||
|
||||
const LibCVendor = enum {
|
||||
musl,
|
||||
|
||||
@ -118,7 +118,7 @@ const FunctionSet = struct {
|
||||
list: std.ArrayList(VersionedFn),
|
||||
fn_vers_list: FnVersionList,
|
||||
};
|
||||
const FnVersionList = std.AutoHashMap([]const u8, std.ArrayList(usize));
|
||||
const FnVersionList = std.StringHashMap(std.ArrayList(usize));
|
||||
|
||||
const VersionedFn = struct {
|
||||
ver: []const u8, // example: "GLIBC_2.15"
|
||||
@ -140,8 +140,8 @@ pub fn main() !void {
|
||||
const prefix = try fs.path.join(allocator, [_][]const u8{ in_glibc_dir, "sysdeps", "unix", "sysv", "linux" });
|
||||
const glibc_out_dir = try fs.path.join(allocator, [_][]const u8{ zig_src_dir, "libc", "glibc" });
|
||||
|
||||
var global_fn_set = std.AutoHashMap([]const u8, Function).init(allocator);
|
||||
var global_ver_set = std.AutoHashMap([]const u8, usize).init(allocator);
|
||||
var global_fn_set = std.StringHashMap(Function).init(allocator);
|
||||
var global_ver_set = std.StringHashMap(usize).init(allocator);
|
||||
var target_functions = std.AutoHashMap(usize, FunctionSet).init(allocator);
|
||||
|
||||
for (abi_lists) |*abi_list| {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user