diff --git a/CMakeLists.txt b/CMakeLists.txt index 252b07e6dd..8a090d9f29 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -364,6 +364,11 @@ set(ZIG_STD_FILES "c/index.zig" "c/linux.zig" "c/windows.zig" + "crypto/index.zig" + "crypto/md5.zig" + "crypto/sha1.zig" + "crypto/sha2.zig" + "crypto/blake2.zig" "cstr.zig" "debug/failing_allocator.zig" "debug/index.zig" diff --git a/README.md b/README.md index 87b737e580..abdd2a3184 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ that counts as "freestanding" for the purposes of this table. | | freestanding | linux | macosx | windows | other | |-------------|--------------|---------|---------|---------|---------| -|i386 | OK | planned | OK | OK | planned | +|i386 | OK | planned | OK | planned | planned | |x86_64 | OK | OK | OK | OK | planned | |arm | OK | planned | planned | N/A | planned | |aarch64 | OK | planned | planned | planned | planned | @@ -125,17 +125,20 @@ libc. Create demo games using Zig. ##### POSIX - * gcc >= 5.0.0 or clang >= 3.6.0 * cmake >= 2.8.5 + * gcc >= 5.0.0 or clang >= 3.6.0 * LLVM, Clang, LLD libraries == 6.x, compiled with the same gcc or clang version above ##### Windows + * cmake >= 2.8.5 * Microsoft Visual Studio 2015 * LLVM, Clang, LLD libraries == 6.x, compiled with the same MSVC version above #### Instructions +##### POSIX + If you have gcc or clang installed, you can find out what `ZIG_LIBC_LIB_DIR`, `ZIG_LIBC_STATIC_LIB_DIR`, and `ZIG_LIBC_INCLUDE_DIR` should be set to (example below). diff --git a/build.zig b/build.zig index 7c1563c706..1fa984dcff 100644 --- a/build.zig +++ b/build.zig @@ -10,7 +10,7 @@ const ArrayList = std.ArrayList; const Buffer = std.Buffer; const io = std.io; -pub fn build(b: &Builder) { +pub fn build(b: &Builder) -> %void { const mode = b.standardReleaseOptions(); var docgen_exe = b.addExecutable("docgen", "doc/docgen.zig"); @@ -36,7 +36,7 @@ pub fn build(b: &Builder) { const test_step = b.step("test", "Run all the tests"); // find the stage0 build artifacts because we're going to re-use config.h and zig_cpp library - const build_info = b.exec([][]const u8{b.zig_exe, "BUILD_INFO"}); + const build_info = try b.exec([][]const u8{b.zig_exe, "BUILD_INFO"}); var index: usize = 0; const cmake_binary_dir = nextValue(&index, build_info); const cxx_compiler = nextValue(&index, build_info); @@ -68,7 +68,7 @@ pub fn build(b: &Builder) { dependOnLib(exe, llvm); if (exe.target.getOs() == builtin.Os.linux) { - const libstdcxx_path_padded = b.exec([][]const u8{cxx_compiler, "-print-file-name=libstdc++.a"}); + const libstdcxx_path_padded = try b.exec([][]const u8{cxx_compiler, "-print-file-name=libstdc++.a"}); const libstdcxx_path = ??mem.split(libstdcxx_path_padded, "\r\n").next(); exe.addObjectFile(libstdcxx_path); @@ -155,9 +155,9 @@ const LibraryDep = struct { }; fn findLLVM(b: &Builder, llvm_config_exe: []const u8) -> %LibraryDep { - const libs_output = b.exec([][]const u8{llvm_config_exe, "--libs", "--system-libs"}); - const includes_output = b.exec([][]const u8{llvm_config_exe, "--includedir"}); - const libdir_output = b.exec([][]const u8{llvm_config_exe, "--libdir"}); + const libs_output = try b.exec([][]const u8{llvm_config_exe, "--libs", "--system-libs"}); + const includes_output = try b.exec([][]const u8{llvm_config_exe, "--includedir"}); + const libdir_output = try b.exec([][]const u8{llvm_config_exe, "--libdir"}); var result = LibraryDep { .libs = ArrayList([]const u8).init(b.allocator), diff --git a/doc/langref.html.in b/doc/langref.html.in index 2476790105..94bc780959 100644 --- a/doc/langref.html.in +++ b/doc/langref.html.in @@ -142,6 +142,7 @@
  • @TagType
  • @EnumTagType
  • @errorName
  • +
  • @errorReturnTrace
  • @fence
  • @fieldParentPtr
  • @frameAddress
  • @@ -4412,6 +4413,13 @@ test.zig:6:2: error: found compile log statement or all calls have a compile-time known value for err, then no error name table will be generated.

    +

    @errorReturnTrace

    +
    @errorReturnTrace() -> ?&builtin.StackTrace
    +

    + If the binary is built with error return tracing, and this function is invoked in a + function that calls a function with an error or error union return type, returns a + stack trace object. Otherwise returns `null`. +

    @fence

    @fence(order: AtomicOrder)

    diff --git a/example/mix_o_files/build.zig b/example/mix_o_files/build.zig index 391af9924a..4bba69b091 100644 --- a/example/mix_o_files/build.zig +++ b/example/mix_o_files/build.zig @@ -1,6 +1,6 @@ const Builder = @import("std").build.Builder; -pub fn build(b: &Builder) { +pub fn build(b: &Builder) -> %void { const obj = b.addObject("base64", "base64.zig"); const exe = b.addCExecutable("test"); diff --git a/example/shared_library/build.zig b/example/shared_library/build.zig index 9b7f3793c6..147b54401c 100644 --- a/example/shared_library/build.zig +++ b/example/shared_library/build.zig @@ -1,6 +1,6 @@ const Builder = @import("std").build.Builder; -pub fn build(b: &Builder) { +pub fn build(b: &Builder) -> %void { const lib = b.addSharedLibrary("mathtest", "mathtest.zig", b.version(1, 0, 0)); const exe = b.addCExecutable("test"); diff --git a/src-self-hosted/parser.zig b/src-self-hosted/parser.zig index b4bf9a1377..88b6dd9bb0 100644 --- a/src-self-hosted/parser.zig +++ b/src-self-hosted/parser.zig @@ -1146,12 +1146,6 @@ fn testCanonical(source: []const u8) { } test "zig fmt" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } - testCanonical( \\extern fn puts(s: &const u8) -> c_int; \\ diff --git a/src/all_types.hpp b/src/all_types.hpp index 024c78eb73..b401097647 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -1205,6 +1205,7 @@ struct FnTableEntry { uint32_t alignstack_value; ZigList export_list; + bool calls_errorable_function; }; uint32_t fn_table_entry_hash(FnTableEntry*); @@ -1273,6 +1274,7 @@ enum BuiltinFnId { BuiltinFnIdSetAlignStack, BuiltinFnIdArgType, BuiltinFnIdExport, + BuiltinFnIdErrorReturnTrace, }; struct BuiltinFnEntry { @@ -1498,6 +1500,7 @@ struct CodeGen { Buf triple_str; BuildMode build_mode; bool is_test_build; + bool have_err_ret_tracing; uint32_t target_os_index; uint32_t target_arch_index; uint32_t target_environ_index; @@ -1530,6 +1533,7 @@ struct CodeGen { FnTableEntry *panic_fn; LLVMValueRef cur_ret_ptr; LLVMValueRef cur_fn_val; + LLVMValueRef cur_err_ret_trace_val; bool c_want_stdint; bool c_want_stdbool; AstNode *root_export_decl; @@ -1572,6 +1576,8 @@ struct CodeGen { size_t largest_err_name_len; LLVMValueRef safety_crash_err_fn; + LLVMValueRef return_err_fn; + IrInstruction *invalid_instruction; ConstExprValue const_void_val; @@ -1595,6 +1601,8 @@ struct CodeGen { ZigList tld_ref_source_node_stack; TypeTableEntry *align_amt_type; + TypeTableEntry *stack_trace_type; + TypeTableEntry *ptr_to_stack_trace_type; }; enum VarLinkage { @@ -1896,6 +1904,7 @@ enum IrInstructionId { IrInstructionIdSetAlignStack, IrInstructionIdArgType, IrInstructionIdExport, + IrInstructionIdErrorReturnTrace, }; struct IrInstruction { @@ -2717,6 +2726,10 @@ struct IrInstructionExport { IrInstruction *target; }; +struct IrInstructionErrorReturnTrace { + IrInstruction base; +}; + static const size_t slice_ptr_index = 0; static const size_t slice_len_index = 1; diff --git a/src/analyze.cpp b/src/analyze.cpp index 7e4a861f0f..9da8485014 100644 --- a/src/analyze.cpp +++ b/src/analyze.cpp @@ -869,6 +869,16 @@ static const char *calling_convention_fn_type_str(CallingConvention cc) { zig_unreachable(); } +TypeTableEntry *get_ptr_to_stack_trace_type(CodeGen *g) { + if (g->stack_trace_type == nullptr) { + ConstExprValue *stack_trace_type_val = get_builtin_value(g, "StackTrace"); + assert(stack_trace_type_val->type->id == TypeTableEntryIdMetaType); + g->stack_trace_type = stack_trace_type_val->data.x_type; + g->ptr_to_stack_trace_type = get_pointer_to_type(g, g->stack_trace_type, false); + } + return g->ptr_to_stack_trace_type; +} + TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { auto table_entry = g->fn_type_table.maybe_get(fn_type_id); if (table_entry) { @@ -915,10 +925,16 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { if (!skip_debug_info) { bool first_arg_return = calling_convention_does_first_arg_return(fn_type_id->cc) && handle_is_ptr(fn_type_id->return_type); + bool prefix_arg_error_return_trace = g->have_err_ret_tracing && + (fn_type_id->return_type->id == TypeTableEntryIdErrorUnion || + fn_type_id->return_type->id == TypeTableEntryIdPureError); // +1 for maybe making the first argument the return value - LLVMTypeRef *gen_param_types = allocate(1 + fn_type_id->param_count); - // +1 because 0 is the return type and +1 for maybe making first arg ret val - ZigLLVMDIType **param_di_types = allocate(2 + fn_type_id->param_count); + // +1 for maybe last argument the error return trace + LLVMTypeRef *gen_param_types = allocate(2 + fn_type_id->param_count); + // +1 because 0 is the return type and + // +1 for maybe making first arg ret val and + // +1 for maybe last argument the error return trace + ZigLLVMDIType **param_di_types = allocate(3 + fn_type_id->param_count); param_di_types[0] = fn_type_id->return_type->di_type; size_t gen_param_index = 0; TypeTableEntry *gen_return_type; @@ -936,6 +952,14 @@ TypeTableEntry *get_fn_type(CodeGen *g, FnTypeId *fn_type_id) { } fn_type->data.fn.gen_return_type = gen_return_type; + if (prefix_arg_error_return_trace) { + TypeTableEntry *gen_type = get_ptr_to_stack_trace_type(g); + gen_param_types[gen_param_index] = gen_type->type_ref; + gen_param_index += 1; + // after the gen_param_index += 1 because 0 is the return type + param_di_types[gen_param_index] = gen_type->di_type; + } + fn_type->data.fn.gen_param_info = allocate(fn_type_id->param_count); for (size_t i = 0; i < fn_type_id->param_count; i += 1) { FnTypeParamInfo *src_param_info = &fn_type->data.fn.fn_type_id.param_info[i]; @@ -1168,6 +1192,9 @@ static TypeTableEntry *analyze_fn_type(CodeGen *g, AstNode *proto_node, Scope *c } TypeTableEntry *type_entry = analyze_type_expr(g, child_scope, param_node->data.param_decl.type); + if (type_is_invalid(type_entry)) { + return g->builtin_types.entry_invalid; + } if (fn_type_id.cc != CallingConventionUnspecified) { type_ensure_zero_bits_known(g, type_entry); if (!type_has_bits(type_entry)) { @@ -2204,6 +2231,7 @@ static void resolve_union_zero_bits(CodeGen *g, TypeTableEntry *union_type) { // is a pointer to this very struct, or a function pointer with parameters that // reference such a type. union_type->data.unionation.zero_bits_known = true; + union_type->data.unionation.zero_bits_loop_flag = false; if (union_type->data.unionation.abi_alignment == 0) { if (union_type->data.unionation.layout == ContainerLayoutPacked) { union_type->data.unionation.abi_alignment = 1; @@ -2558,7 +2586,7 @@ static bool scope_is_root_decls(Scope *scope) { static void wrong_panic_prototype(CodeGen *g, AstNode *proto_node, TypeTableEntry *fn_type) { add_node_error(g, proto_node, - buf_sprintf("expected 'fn([]const u8) -> unreachable', found '%s'", + buf_sprintf("expected 'fn([]const u8, ?&builtin.StackTrace) -> unreachable', found '%s'", buf_ptr(&fn_type->name))); } @@ -2567,7 +2595,7 @@ static void typecheck_panic_fn(CodeGen *g, FnTableEntry *panic_fn) { assert(proto_node->type == NodeTypeFnProto); TypeTableEntry *fn_type = panic_fn->type_entry; FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id; - if (fn_type_id->param_count != 1) { + if (fn_type_id->param_count != 2) { return wrong_panic_prototype(g, proto_node, fn_type); } TypeTableEntry *const_u8_ptr = get_pointer_to_type(g, g->builtin_types.entry_u8, true); @@ -2576,6 +2604,11 @@ static void typecheck_panic_fn(CodeGen *g, FnTableEntry *panic_fn) { return wrong_panic_prototype(g, proto_node, fn_type); } + TypeTableEntry *nullable_ptr_to_stack_trace_type = get_maybe_type(g, get_ptr_to_stack_trace_type(g)); + if (fn_type_id->param_info[1].type != nullable_ptr_to_stack_trace_type) { + return wrong_panic_prototype(g, proto_node, fn_type); + } + TypeTableEntry *actual_return_type = fn_type_id->return_type; if (actual_return_type != g->builtin_types.entry_unreachable) { return wrong_panic_prototype(g, proto_node, fn_type); @@ -2680,13 +2713,6 @@ static void resolve_decl_fn(CodeGen *g, TldFn *tld_fn) { { if (g->have_pub_main && buf_eql_str(&fn_table_entry->symbol_name, "main")) { g->main_fn = fn_table_entry; - TypeTableEntry *err_void = get_error_type(g, g->builtin_types.entry_void); - TypeTableEntry *actual_return_type = fn_table_entry->type_entry->data.fn.fn_type_id.return_type; - if (actual_return_type != err_void) { - add_node_error(g, fn_proto->return_type, - buf_sprintf("expected return type of main to be '%%void', instead is '%s'", - buf_ptr(&actual_return_type->name))); - } } else if ((import->package == g->panic_package || g->have_pub_panic) && buf_eql_str(&fn_table_entry->symbol_name, "panic")) { @@ -5527,3 +5553,13 @@ bool type_ptr_eql(const TypeTableEntry *a, const TypeTableEntry *b) { return a == b; } +ConstExprValue *get_builtin_value(CodeGen *codegen, const char *name) { + Tld *tld = codegen->compile_var_import->decls_scope->decl_table.get(buf_create_from_str(name)); + resolve_top_level_decl(codegen, tld, false, nullptr); + assert(tld->id == TldIdVar); + TldVar *tld_var = (TldVar *)tld; + ConstExprValue *var_value = tld_var->var->value; + assert(var_value != nullptr); + return var_value; +} + diff --git a/src/analyze.hpp b/src/analyze.hpp index 6224e64dd5..3992cefdfc 100644 --- a/src/analyze.hpp +++ b/src/analyze.hpp @@ -185,4 +185,9 @@ PackageTableEntry *new_anonymous_package(void); Buf *const_value_to_buffer(ConstExprValue *const_val); void add_fn_export(CodeGen *g, FnTableEntry *fn_table_entry, Buf *symbol_name, GlobalLinkageId linkage, bool ccc); + +ConstExprValue *get_builtin_value(CodeGen *codegen, const char *name); +TypeTableEntry *get_ptr_to_stack_trace_type(CodeGen *g); + + #endif diff --git a/src/bigint.cpp b/src/bigint.cpp index f01436d232..a68dd3a4b8 100644 --- a/src/bigint.cpp +++ b/src/bigint.cpp @@ -12,6 +12,9 @@ #include "os.hpp" #include "softfloat.hpp" +#include +#include + static void bigint_normalize(BigInt *dest) { const uint64_t *digits = bigint_ptr(dest); @@ -539,7 +542,7 @@ void bigint_add(BigInt *dest, const BigInt *op1, const BigInt *op2) { dest->data.digits[i] = x; i += 1; - if (!found_digit) + if (!found_digit || i >= bigger_op->digit_count) break; } assert(overflow == 0); @@ -670,19 +673,417 @@ void bigint_mul_wrap(BigInt *dest, const BigInt *op1, const BigInt *op2, size_t bigint_truncate(dest, &unwrapped, bit_count, is_signed); } +enum ZeroBehavior { + /// \brief The returned value is undefined. + ZB_Undefined, + /// \brief The returned value is numeric_limits::max() + ZB_Max, + /// \brief The returned value is numeric_limits::digits + ZB_Width +}; + +template struct LeadingZerosCounter { + static std::size_t count(T Val, ZeroBehavior) { + if (!Val) + return std::numeric_limits::digits; + + // Bisection method. + std::size_t ZeroBits = 0; + for (T Shift = std::numeric_limits::digits >> 1; Shift; Shift >>= 1) { + T Tmp = Val >> Shift; + if (Tmp) + Val = Tmp; + else + ZeroBits |= Shift; + } + return ZeroBits; + } +}; + +#if __GNUC__ >= 4 || defined(_MSC_VER) +template struct LeadingZerosCounter { + static std::size_t count(T Val, ZeroBehavior ZB) { + if (ZB != ZB_Undefined && Val == 0) + return 32; + +#if defined(_MSC_VER) + unsigned long Index; + _BitScanReverse(&Index, Val); + return Index ^ 31; +#else + return __builtin_clz(Val); +#endif + } +}; + +#if !defined(_MSC_VER) || defined(_M_X64) +template struct LeadingZerosCounter { + static std::size_t count(T Val, ZeroBehavior ZB) { + if (ZB != ZB_Undefined && Val == 0) + return 64; + +#if defined(_MSC_VER) + unsigned long Index; + _BitScanReverse64(&Index, Val); + return Index ^ 63; +#else + return __builtin_clzll(Val); +#endif + } +}; +#endif +#endif + +/// \brief Count number of 0's from the most significant bit to the least +/// stopping at the first 1. +/// +/// Only unsigned integral types are allowed. +/// +/// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are +/// valid arguments. +template +std::size_t countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) { + static_assert(std::numeric_limits::is_integer && + !std::numeric_limits::is_signed, + "Only unsigned integral types are allowed."); + return LeadingZerosCounter::count(Val, ZB); +} + +/// Make a 64-bit integer from a high / low pair of 32-bit integers. +constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) { + return ((uint64_t)High << 32) | (uint64_t)Low; +} + +/// Return the high 32 bits of a 64 bit value. +constexpr inline uint32_t Hi_32(uint64_t Value) { + return static_cast(Value >> 32); +} + +/// Return the low 32 bits of a 64 bit value. +constexpr inline uint32_t Lo_32(uint64_t Value) { + return static_cast(Value); +} + +/// Implementation of Knuth's Algorithm D (Division of nonnegative integers) +/// from "Art of Computer Programming, Volume 2", section 4.3.1, p. 272. The +/// variables here have the same names as in the algorithm. Comments explain +/// the algorithm and any deviation from it. +static void KnuthDiv(uint32_t *u, uint32_t *v, uint32_t *q, uint32_t* r, + unsigned m, unsigned n) +{ + assert(u && "Must provide dividend"); + assert(v && "Must provide divisor"); + assert(q && "Must provide quotient"); + assert(u != v && u != q && v != q && "Must use different memory"); + assert(n>1 && "n must be > 1"); + + // b denotes the base of the number system. In our case b is 2^32. + const uint64_t b = uint64_t(1) << 32; + + // D1. [Normalize.] Set d = b / (v[n-1] + 1) and multiply all the digits of + // u and v by d. Note that we have taken Knuth's advice here to use a power + // of 2 value for d such that d * v[n-1] >= b/2 (b is the base). A power of + // 2 allows us to shift instead of multiply and it is easy to determine the + // shift amount from the leading zeros. We are basically normalizing the u + // and v so that its high bits are shifted to the top of v's range without + // overflow. Note that this can require an extra word in u so that u must + // be of length m+n+1. + unsigned shift = countLeadingZeros(v[n-1]); + uint32_t v_carry = 0; + uint32_t u_carry = 0; + if (shift) { + for (unsigned i = 0; i < m+n; ++i) { + uint32_t u_tmp = u[i] >> (32 - shift); + u[i] = (u[i] << shift) | u_carry; + u_carry = u_tmp; + } + for (unsigned i = 0; i < n; ++i) { + uint32_t v_tmp = v[i] >> (32 - shift); + v[i] = (v[i] << shift) | v_carry; + v_carry = v_tmp; + } + } + u[m+n] = u_carry; + + // D2. [Initialize j.] Set j to m. This is the loop counter over the places. + int j = m; + do { + // D3. [Calculate q'.]. + // Set qp = (u[j+n]*b + u[j+n-1]) / v[n-1]. (qp=qprime=q') + // Set rp = (u[j+n]*b + u[j+n-1]) % v[n-1]. (rp=rprime=r') + // Now test if qp == b or qp*v[n-2] > b*rp + u[j+n-2]; if so, decrease + // qp by 1, increase rp by v[n-1], and repeat this test if rp < b. The test + // on v[n-2] determines at high speed most of the cases in which the trial + // value qp is one too large, and it eliminates all cases where qp is two + // too large. + uint64_t dividend = Make_64(u[j+n], u[j+n-1]); + uint64_t qp = dividend / v[n-1]; + uint64_t rp = dividend % v[n-1]; + if (qp == b || qp*v[n-2] > b*rp + u[j+n-2]) { + qp--; + rp += v[n-1]; + if (rp < b && (qp == b || qp*v[n-2] > b*rp + u[j+n-2])) + qp--; + } + + // D4. [Multiply and subtract.] Replace (u[j+n]u[j+n-1]...u[j]) with + // (u[j+n]u[j+n-1]..u[j]) - qp * (v[n-1]...v[1]v[0]). This computation + // consists of a simple multiplication by a one-place number, combined with + // a subtraction. + // The digits (u[j+n]...u[j]) should be kept positive; if the result of + // this step is actually negative, (u[j+n]...u[j]) should be left as the + // true value plus b**(n+1), namely as the b's complement of + // the true value, and a "borrow" to the left should be remembered. + int64_t borrow = 0; + for (unsigned i = 0; i < n; ++i) { + uint64_t p = uint64_t(qp) * uint64_t(v[i]); + int64_t subres = int64_t(u[j+i]) - borrow - Lo_32(p); + u[j+i] = Lo_32(subres); + borrow = Hi_32(p) - Hi_32(subres); + } + bool isNeg = u[j+n] < borrow; + u[j+n] -= Lo_32(borrow); + + // D5. [Test remainder.] Set q[j] = qp. If the result of step D4 was + // negative, go to step D6; otherwise go on to step D7. + q[j] = Lo_32(qp); + if (isNeg) { + // D6. [Add back]. The probability that this step is necessary is very + // small, on the order of only 2/b. Make sure that test data accounts for + // this possibility. Decrease q[j] by 1 + q[j]--; + // and add (0v[n-1]...v[1]v[0]) to (u[j+n]u[j+n-1]...u[j+1]u[j]). + // A carry will occur to the left of u[j+n], and it should be ignored + // since it cancels with the borrow that occurred in D4. + bool carry = false; + for (unsigned i = 0; i < n; i++) { + uint32_t limit = std::min(u[j+i],v[i]); + u[j+i] += v[i] + carry; + carry = u[j+i] < limit || (carry && u[j+i] == limit); + } + u[j+n] += carry; + } + + // D7. [Loop on j.] Decrease j by one. Now if j >= 0, go back to D3. + } while (--j >= 0); + + // D8. [Unnormalize]. Now q[...] is the desired quotient, and the desired + // remainder may be obtained by dividing u[...] by d. If r is non-null we + // compute the remainder (urem uses this). + if (r) { + // The value d is expressed by the "shift" value above since we avoided + // multiplication by d by using a shift left. So, all we have to do is + // shift right here. + if (shift) { + uint32_t carry = 0; + for (int i = n-1; i >= 0; i--) { + r[i] = (u[i] >> shift) | carry; + carry = u[i] << (32 - shift); + } + } else { + for (int i = n-1; i >= 0; i--) { + r[i] = u[i]; + } + } + } +} + +// Implementation ported from LLVM/lib/Support/APInt.cpp +static void bigint_unsigned_division(const BigInt *op1, const BigInt *op2, BigInt *Quotient, BigInt *Remainder) { + Cmp cmp = bigint_cmp(op1, op2); + if (cmp == CmpLT) { + if (Quotient != nullptr) { + bigint_init_unsigned(Quotient, 0); + } + if (Remainder != nullptr) { + bigint_init_bigint(Remainder, op1); + } + return; + } + if (cmp == CmpEQ) { + if (Quotient != nullptr) { + bigint_init_unsigned(Quotient, 1); + } + if (Remainder != nullptr) { + bigint_init_unsigned(Remainder, 0); + } + return; + } + + const uint64_t *LHS = bigint_ptr(op1); + const uint64_t *RHS = bigint_ptr(op2); + unsigned lhsWords = op1->digit_count; + unsigned rhsWords = op2->digit_count; + + // First, compose the values into an array of 32-bit words instead of + // 64-bit words. This is a necessity of both the "short division" algorithm + // and the Knuth "classical algorithm" which requires there to be native + // operations for +, -, and * on an m bit value with an m*2 bit result. We + // can't use 64-bit operands here because we don't have native results of + // 128-bits. Furthermore, casting the 64-bit values to 32-bit values won't + // work on large-endian machines. + unsigned n = rhsWords * 2; + unsigned m = (lhsWords * 2) - n; + + // Allocate space for the temporary values we need either on the stack, if + // it will fit, or on the heap if it won't. + uint32_t SPACE[128]; + uint32_t *U = nullptr; + uint32_t *V = nullptr; + uint32_t *Q = nullptr; + uint32_t *R = nullptr; + if ((Remainder?4:3)*n+2*m+1 <= 128) { + U = &SPACE[0]; + V = &SPACE[m+n+1]; + Q = &SPACE[(m+n+1) + n]; + if (Remainder) + R = &SPACE[(m+n+1) + n + (m+n)]; + } else { + U = new uint32_t[m + n + 1]; + V = new uint32_t[n]; + Q = new uint32_t[m+n]; + if (Remainder) + R = new uint32_t[n]; + } + + // Initialize the dividend + memset(U, 0, (m+n+1)*sizeof(uint32_t)); + for (unsigned i = 0; i < lhsWords; ++i) { + uint64_t tmp = LHS[i]; + U[i * 2] = Lo_32(tmp); + U[i * 2 + 1] = Hi_32(tmp); + } + U[m+n] = 0; // this extra word is for "spill" in the Knuth algorithm. + + // Initialize the divisor + memset(V, 0, (n)*sizeof(uint32_t)); + for (unsigned i = 0; i < rhsWords; ++i) { + uint64_t tmp = RHS[i]; + V[i * 2] = Lo_32(tmp); + V[i * 2 + 1] = Hi_32(tmp); + } + + // initialize the quotient and remainder + memset(Q, 0, (m+n) * sizeof(uint32_t)); + if (Remainder) + memset(R, 0, n * sizeof(uint32_t)); + + // Now, adjust m and n for the Knuth division. n is the number of words in + // the divisor. m is the number of words by which the dividend exceeds the + // divisor (i.e. m+n is the length of the dividend). These sizes must not + // contain any zero words or the Knuth algorithm fails. + for (unsigned i = n; i > 0 && V[i-1] == 0; i--) { + n--; + m++; + } + for (unsigned i = m+n; i > 0 && U[i-1] == 0; i--) + m--; + + // If we're left with only a single word for the divisor, Knuth doesn't work + // so we implement the short division algorithm here. This is much simpler + // and faster because we are certain that we can divide a 64-bit quantity + // by a 32-bit quantity at hardware speed and short division is simply a + // series of such operations. This is just like doing short division but we + // are using base 2^32 instead of base 10. + assert(n != 0 && "Divide by zero?"); + if (n == 1) { + uint32_t divisor = V[0]; + uint32_t remainder = 0; + for (int i = m; i >= 0; i--) { + uint64_t partial_dividend = Make_64(remainder, U[i]); + if (partial_dividend == 0) { + Q[i] = 0; + remainder = 0; + } else if (partial_dividend < divisor) { + Q[i] = 0; + remainder = Lo_32(partial_dividend); + } else if (partial_dividend == divisor) { + Q[i] = 1; + remainder = 0; + } else { + Q[i] = Lo_32(partial_dividend / divisor); + remainder = Lo_32(partial_dividend - (Q[i] * divisor)); + } + } + if (R) + R[0] = remainder; + } else { + // Now we're ready to invoke the Knuth classical divide algorithm. In this + // case n > 1. + KnuthDiv(U, V, Q, R, m, n); + } + + // If the caller wants the quotient + if (Quotient) { + Quotient->is_negative = false; + Quotient->digit_count = lhsWords; + if (lhsWords == 1) { + Quotient->data.digit = Make_64(Q[1], Q[0]); + } else { + Quotient->data.digits = allocate(lhsWords); + for (size_t i = 0; i < lhsWords; i += 1) { + Quotient->data.digits[i] = Make_64(Q[i*2+1], Q[i*2]); + } + } + } + + // If the caller wants the remainder + if (Remainder) { + Remainder->is_negative = false; + Remainder->digit_count = rhsWords; + if (rhsWords == 1) { + Remainder->data.digit = Make_64(R[1], R[0]); + } else { + Remainder->data.digits = allocate(rhsWords); + for (size_t i = 0; i < rhsWords; i += 1) { + Remainder->data.digits[i] = Make_64(R[i*2+1], R[i*2]); + } + } + } +} + void bigint_div_trunc(BigInt *dest, const BigInt *op1, const BigInt *op2) { assert(op2->digit_count != 0); // division by zero if (op1->digit_count == 0) { bigint_init_unsigned(dest, 0); return; } - if (op1->digit_count != 1 || op2->digit_count != 1) { - zig_panic("TODO bigint div_trunc with >1 digits"); - } const uint64_t *op1_digits = bigint_ptr(op1); const uint64_t *op2_digits = bigint_ptr(op2); - dest->data.digit = op1_digits[0] / op2_digits[0]; - dest->digit_count = 1; + if (op1->digit_count == 1 && op2->digit_count == 1) { + dest->data.digit = op1_digits[0] / op2_digits[0]; + dest->digit_count = 1; + dest->is_negative = op1->is_negative != op2->is_negative; + bigint_normalize(dest); + return; + } + if (op2->digit_count == 1 && op2_digits[0] == 1) { + // X / 1 == X + bigint_init_bigint(dest, op1); + dest->is_negative = op1->is_negative != op2->is_negative; + bigint_normalize(dest); + return; + } + + const BigInt *op1_positive; + BigInt op1_positive_data; + if (op1->is_negative) { + bigint_negate(&op1_positive_data, op1); + op1_positive = &op1_positive_data; + } else { + op1_positive = op1; + } + + const BigInt *op2_positive; + BigInt op2_positive_data; + if (op2->is_negative) { + bigint_negate(&op2_positive_data, op2); + op2_positive = &op2_positive_data; + } else { + op2_positive = op2; + } + + bigint_unsigned_division(op1_positive, op2_positive, dest, nullptr); dest->is_negative = op1->is_negative != op2->is_negative; bigint_normalize(dest); } @@ -714,6 +1115,14 @@ void bigint_rem(BigInt *dest, const BigInt *op1, const BigInt *op2) { } const uint64_t *op1_digits = bigint_ptr(op1); const uint64_t *op2_digits = bigint_ptr(op2); + + if (op1->digit_count == 1 && op2->digit_count == 1) { + dest->data.digit = op1_digits[0] % op2_digits[0]; + dest->digit_count = 1; + dest->is_negative = op1->is_negative; + bigint_normalize(dest); + return; + } if (op2->digit_count == 2 && op2_digits[0] == 0 && op2_digits[1] == 1) { // special case this divisor bigint_init_unsigned(dest, op1_digits[0]); @@ -721,11 +1130,32 @@ void bigint_rem(BigInt *dest, const BigInt *op1, const BigInt *op2) { bigint_normalize(dest); return; } - if (op1->digit_count != 1 || op2->digit_count != 1) { - zig_panic("TODO bigint rem with >1 digits"); + + if (op2->digit_count == 1 && op2_digits[0] == 1) { + // X % 1 == 0 + bigint_init_unsigned(dest, 0); + return; } - dest->data.digit = op1_digits[0] % op2_digits[0]; - dest->digit_count = 1; + + const BigInt *op1_positive; + BigInt op1_positive_data; + if (op1->is_negative) { + bigint_negate(&op1_positive_data, op1); + op1_positive = &op1_positive_data; + } else { + op1_positive = op1; + } + + const BigInt *op2_positive; + BigInt op2_positive_data; + if (op2->is_negative) { + bigint_negate(&op2_positive_data, op2); + op2_positive = &op2_positive_data; + } else { + op2_positive = op2; + } + + bigint_unsigned_division(op1_positive, op2_positive, nullptr, dest); dest->is_negative = op1->is_negative; bigint_normalize(dest); } diff --git a/src/codegen.cpp b/src/codegen.cpp index 9ad71a936e..c72bb07c8b 100644 --- a/src/codegen.cpp +++ b/src/codegen.cpp @@ -404,6 +404,19 @@ static LLVMLinkage to_llvm_linkage(GlobalLinkageId id) { zig_unreachable(); } +static uint32_t get_err_ret_trace_arg_index(CodeGen *g, FnTableEntry *fn_table_entry) { + if (!g->have_err_ret_tracing) { + return UINT32_MAX; + } + TypeTableEntry *fn_type = fn_table_entry->type_entry; + TypeTableEntry *return_type = fn_type->data.fn.fn_type_id.return_type; + if (return_type->id != TypeTableEntryIdErrorUnion && return_type->id != TypeTableEntryIdPureError) { + return UINT32_MAX; + } + bool first_arg_ret = type_has_bits(return_type) && handle_is_ptr(return_type); + return first_arg_ret ? 1 : 0; +} + static LLVMValueRef fn_llvm_value(CodeGen *g, FnTableEntry *fn_table_entry) { if (fn_table_entry->llvm_value) return fn_table_entry->llvm_value; @@ -483,7 +496,8 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, FnTableEntry *fn_table_entry) { LLVMSetUnnamedAddr(fn_table_entry->llvm_value, true); } - if (fn_type->data.fn.fn_type_id.return_type->id == TypeTableEntryIdUnreachable) { + TypeTableEntry *return_type = fn_type->data.fn.fn_type_id.return_type; + if (return_type->id == TypeTableEntryIdUnreachable) { addLLVMFnAttr(fn_table_entry->llvm_value, "noreturn"); } @@ -520,13 +534,11 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, FnTableEntry *fn_table_entry) { // use the ABI alignment, which is fine. } - if (!type_has_bits(fn_type->data.fn.fn_type_id.return_type)) { + if (!type_has_bits(return_type)) { // nothing to do - } else if (fn_type->data.fn.fn_type_id.return_type->id == TypeTableEntryIdPointer || - fn_type->data.fn.fn_type_id.return_type->id == TypeTableEntryIdFn) - { + } else if (return_type->id == TypeTableEntryIdPointer || return_type->id == TypeTableEntryIdFn) { addLLVMAttr(fn_table_entry->llvm_value, 0, "nonnull"); - } else if (handle_is_ptr(fn_type->data.fn.fn_type_id.return_type) && + } else if (handle_is_ptr(return_type) && calling_convention_does_first_arg_return(fn_type->data.fn.fn_type_id.cc)) { addLLVMArgAttr(fn_table_entry->llvm_value, 0, "sret"); @@ -563,6 +575,11 @@ static LLVMValueRef fn_llvm_value(CodeGen *g, FnTableEntry *fn_table_entry) { } } + uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry); + if (err_ret_trace_arg_index != UINT32_MAX) { + addLLVMArgAttr(fn_table_entry->llvm_value, (unsigned)err_ret_trace_arg_index, "nonnull"); + } + return fn_table_entry->llvm_value; } @@ -864,16 +881,25 @@ static LLVMValueRef get_panic_msg_ptr_val(CodeGen *g, PanicMsgId msg_id) { return LLVMConstBitCast(val->global_refs->llvm_global, LLVMPointerType(str_type->type_ref, 0)); } -static void gen_panic(CodeGen *g, LLVMValueRef msg_arg) { +static void gen_panic(CodeGen *g, LLVMValueRef msg_arg, LLVMValueRef stack_trace_arg) { assert(g->panic_fn != nullptr); LLVMValueRef fn_val = fn_llvm_value(g, g->panic_fn); LLVMCallConv llvm_cc = get_llvm_cc(g, g->panic_fn->type_entry->data.fn.fn_type_id.cc); - ZigLLVMBuildCall(g->builder, fn_val, &msg_arg, 1, llvm_cc, ZigLLVM_FnInlineAuto, ""); + if (stack_trace_arg == nullptr) { + TypeTableEntry *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g); + stack_trace_arg = LLVMConstNull(ptr_to_stack_trace_type->type_ref); + } + LLVMValueRef args[] = { + msg_arg, + stack_trace_arg, + }; + LLVMValueRef call_instruction = ZigLLVMBuildCall(g->builder, fn_val, args, 2, llvm_cc, ZigLLVM_FnInlineAuto, ""); + LLVMSetTailCall(call_instruction, true); LLVMBuildUnreachable(g->builder); } static void gen_debug_safety_crash(CodeGen *g, PanicMsgId msg_id) { - gen_panic(g, get_panic_msg_ptr_val(g, msg_id)); + gen_panic(g, get_panic_msg_ptr_val(g, msg_id), nullptr); } static LLVMValueRef get_memcpy_fn_val(CodeGen *g) { @@ -895,6 +921,87 @@ static LLVMValueRef get_memcpy_fn_val(CodeGen *g) { return g->memcpy_fn_val; } +static LLVMValueRef get_return_err_fn(CodeGen *g) { + if (g->return_err_fn != nullptr) + return g->return_err_fn; + + assert(g->err_tag_type != nullptr); + + LLVMTypeRef ptr_u8 = LLVMPointerType(LLVMInt8Type(), 0); + + LLVMTypeRef arg_types[] = { + // error return trace pointer + get_ptr_to_stack_trace_type(g)->type_ref, + // return address + ptr_u8, + }; + LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 2, false); + + Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_return_error"), false); + LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref); + addLLVMFnAttr(fn_val, "cold"); + LLVMSetLinkage(fn_val, LLVMInternalLinkage); + LLVMSetFunctionCallConv(fn_val, get_llvm_cc(g, CallingConventionUnspecified)); + addLLVMFnAttr(fn_val, "nounwind"); + add_uwtable_attr(g, fn_val); + addLLVMArgAttr(fn_val, (unsigned)0, "nonnull"); + addLLVMArgAttr(fn_val, (unsigned)1, "nonnull"); + if (g->build_mode == BuildModeDebug) { + ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim", "true"); + ZigLLVMAddFunctionAttr(fn_val, "no-frame-pointer-elim-non-leaf", nullptr); + } + + LLVMBasicBlockRef entry_block = LLVMAppendBasicBlock(fn_val, "Entry"); + LLVMBasicBlockRef prev_block = LLVMGetInsertBlock(g->builder); + LLVMValueRef prev_debug_location = LLVMGetCurrentDebugLocation(g->builder); + LLVMPositionBuilderAtEnd(g->builder, entry_block); + ZigLLVMClearCurrentDebugLocation(g->builder); + + LLVMTypeRef usize_type_ref = g->builtin_types.entry_usize->type_ref; + + // stack_trace.instruction_addresses[stack_trace.index % stack_trace.instruction_addresses.len] = return_address; + + LLVMValueRef err_ret_trace_ptr = LLVMGetParam(fn_val, 0); + size_t index_field_index = g->stack_trace_type->data.structure.fields[0].gen_index; + LLVMValueRef index_field_ptr = LLVMBuildStructGEP(g->builder, err_ret_trace_ptr, (unsigned)index_field_index, ""); + size_t addresses_field_index = g->stack_trace_type->data.structure.fields[1].gen_index; + LLVMValueRef addresses_field_ptr = LLVMBuildStructGEP(g->builder, err_ret_trace_ptr, (unsigned)addresses_field_index, ""); + + TypeTableEntry *slice_type = g->stack_trace_type->data.structure.fields[1].type_entry; + size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index; + LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, addresses_field_ptr, (unsigned)ptr_field_index, ""); + size_t len_field_index = slice_type->data.structure.fields[slice_len_index].gen_index; + LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, addresses_field_ptr, (unsigned)len_field_index, ""); + + LLVMValueRef len_value = gen_load_untyped(g, len_field_ptr, 0, false, ""); + LLVMValueRef index_val = gen_load_untyped(g, index_field_ptr, 0, false, ""); + LLVMValueRef modded_val = LLVMBuildURem(g->builder, index_val, len_value, ""); + LLVMValueRef address_indices[] = { + modded_val, + }; + + LLVMValueRef ptr_value = gen_load_untyped(g, ptr_field_ptr, 0, false, ""); + LLVMValueRef address_slot = LLVMBuildInBoundsGEP(g->builder, ptr_value, address_indices, 1, ""); + + LLVMValueRef return_address = LLVMBuildPtrToInt(g->builder, LLVMGetParam(fn_val, 1), usize_type_ref, ""); + + LLVMValueRef address_value = LLVMBuildPtrToInt(g->builder, return_address, usize_type_ref, ""); + gen_store_untyped(g, address_value, address_slot, 0, false); + + // stack_trace.index += 1; + LLVMValueRef index_plus_one_val = LLVMBuildAdd(g->builder, index_val, LLVMConstInt(usize_type_ref, 1, false), ""); + gen_store_untyped(g, index_plus_one_val, index_field_ptr, 0, false); + + // return; + LLVMBuildRetVoid(g->builder); + + LLVMPositionBuilderAtEnd(g->builder, prev_block); + LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); + + g->return_err_fn = fn_val; + return fn_val; +} + static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) { if (g->safety_crash_err_fn != nullptr) return g->safety_crash_err_fn; @@ -953,7 +1060,11 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) { LLVMValueRef offset_buf_ptr = LLVMConstInBoundsGEP(global_array, offset_ptr_indices, 2); Buf *fn_name = get_mangled_name(g, buf_create_from_str("__zig_fail_unwrap"), false); - LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), &g->err_tag_type->type_ref, 1, false); + LLVMTypeRef arg_types[] = { + g->ptr_to_stack_trace_type->type_ref, + g->err_tag_type->type_ref, + }; + LLVMTypeRef fn_type_ref = LLVMFunctionType(LLVMVoidType(), arg_types, 2, false); LLVMValueRef fn_val = LLVMAddFunction(g->module, buf_ptr(fn_name), fn_type_ref); addLLVMFnAttr(fn_val, "noreturn"); addLLVMFnAttr(fn_val, "cold"); @@ -975,7 +1086,7 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) { LLVMPositionBuilderAtEnd(g->builder, entry_block); ZigLLVMClearCurrentDebugLocation(g->builder); - LLVMValueRef err_val = LLVMGetParam(fn_val, 0); + LLVMValueRef err_val = LLVMGetParam(fn_val, 1); LLVMValueRef err_table_indices[] = { LLVMConstNull(g->builtin_types.entry_usize->type_ref), @@ -1005,7 +1116,7 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) { LLVMValueRef global_slice_len_field_ptr = LLVMBuildStructGEP(g->builder, global_slice, slice_len_index, ""); gen_store(g, full_buf_len, global_slice_len_field_ptr, u8_ptr_type); - gen_panic(g, global_slice); + gen_panic(g, global_slice, LLVMGetParam(fn_val, 0)); LLVMPositionBuilderAtEnd(g->builder, prev_block); LLVMSetCurrentDebugLocation(g->builder, prev_debug_location); @@ -1016,8 +1127,18 @@ static LLVMValueRef get_safety_crash_err_fn(CodeGen *g) { static void gen_debug_safety_crash_for_err(CodeGen *g, LLVMValueRef err_val) { LLVMValueRef safety_crash_err_fn = get_safety_crash_err_fn(g); - ZigLLVMBuildCall(g->builder, safety_crash_err_fn, &err_val, 1, get_llvm_cc(g, CallingConventionUnspecified), + LLVMValueRef err_ret_trace_val = g->cur_err_ret_trace_val; + if (err_ret_trace_val == nullptr) { + TypeTableEntry *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g); + err_ret_trace_val = LLVMConstNull(ptr_to_stack_trace_type->type_ref); + } + LLVMValueRef args[] = { + err_ret_trace_val, + err_val, + }; + LLVMValueRef call_instruction = ZigLLVMBuildCall(g->builder, safety_crash_err_fn, args, 2, get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); + LLVMSetTailCall(call_instruction, true); LLVMBuildUnreachable(g->builder); } @@ -1296,6 +1417,35 @@ static LLVMValueRef ir_llvm_value(CodeGen *g, IrInstruction *instruction) { static LLVMValueRef ir_render_return(CodeGen *g, IrExecutable *executable, IrInstructionReturn *return_instruction) { LLVMValueRef value = ir_llvm_value(g, return_instruction->value); TypeTableEntry *return_type = return_instruction->value->value.type; + + if (g->have_err_ret_tracing) { + bool is_err_return = false; + if (return_type->id == TypeTableEntryIdErrorUnion) { + if (return_instruction->value->value.special == ConstValSpecialStatic) { + is_err_return = return_instruction->value->value.data.x_err_union.err != nullptr; + } else if (return_instruction->value->value.special == ConstValSpecialRuntime) { + is_err_return = return_instruction->value->value.data.rh_error_union == RuntimeHintErrorUnionError; + // TODO: emit a branch to check if the return value is an error + } + } else if (return_type->id == TypeTableEntryIdPureError) { + is_err_return = true; + } + if (is_err_return) { + LLVMBasicBlockRef return_block = LLVMAppendBasicBlock(g->cur_fn_val, "ReturnError"); + LLVMValueRef block_address = LLVMBlockAddress(g->cur_fn_val, return_block); + + LLVMValueRef return_err_fn = get_return_err_fn(g); + LLVMValueRef args[] = { + g->cur_err_ret_trace_val, + block_address, + }; + LLVMBuildBr(g->builder, return_block); + LLVMPositionBuilderAtEnd(g->builder, return_block); + LLVMValueRef call_instruction = ZigLLVMBuildCall(g->builder, return_err_fn, args, 2, + get_llvm_cc(g, CallingConventionUnspecified), ZigLLVM_FnInlineAuto, ""); + LLVMSetTailCall(call_instruction, true); + } + } if (handle_is_ptr(return_type)) { if (calling_convention_does_first_arg_return(g->cur_fn->type_entry->data.fn.fn_type_id.cc)) { assert(g->cur_ret_ptr); @@ -2330,7 +2480,8 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr TypeTableEntry *src_return_type = fn_type_id->return_type; bool ret_has_bits = type_has_bits(src_return_type); bool first_arg_ret = ret_has_bits && handle_is_ptr(src_return_type); - size_t actual_param_count = instruction->arg_count + (first_arg_ret ? 1 : 0); + bool prefix_arg_err_ret_stack = g->have_err_ret_tracing && (src_return_type->id == TypeTableEntryIdErrorUnion || src_return_type->id == TypeTableEntryIdPureError); + size_t actual_param_count = instruction->arg_count + (first_arg_ret ? 1 : 0) + (prefix_arg_err_ret_stack ? 1 : 0); bool is_var_args = fn_type_id->is_var_args; LLVMValueRef *gen_param_values = allocate(actual_param_count); size_t gen_param_index = 0; @@ -2338,6 +2489,10 @@ static LLVMValueRef ir_render_call(CodeGen *g, IrExecutable *executable, IrInstr gen_param_values[gen_param_index] = instruction->tmp_ptr; gen_param_index += 1; } + if (prefix_arg_err_ret_stack) { + gen_param_values[gen_param_index] = g->cur_err_ret_trace_val; + gen_param_index += 1; + } for (size_t call_i = 0; call_i < instruction->arg_count; call_i += 1) { IrInstruction *param_instruction = instruction->args[call_i]; TypeTableEntry *param_type = param_instruction->value.type; @@ -2881,6 +3036,16 @@ static LLVMValueRef ir_render_align_cast(CodeGen *g, IrExecutable *executable, I return target_val; } +static LLVMValueRef ir_render_error_return_trace(CodeGen *g, IrExecutable *executable, + IrInstructionErrorReturnTrace *instruction) +{ + TypeTableEntry *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(g); + if (g->cur_err_ret_trace_val == nullptr) { + return LLVMConstNull(ptr_to_stack_trace_type->type_ref); + } + return g->cur_err_ret_trace_val; +} + static LLVMAtomicOrdering to_LLVMAtomicOrdering(AtomicOrder atomic_order) { switch (atomic_order) { case AtomicOrderUnordered: return LLVMAtomicOrderingUnordered; @@ -3474,7 +3639,7 @@ static LLVMValueRef ir_render_container_init_list(CodeGen *g, IrExecutable *exec } static LLVMValueRef ir_render_panic(CodeGen *g, IrExecutable *executable, IrInstructionPanic *instruction) { - gen_panic(g, ir_llvm_value(g, instruction->msg)); + gen_panic(g, ir_llvm_value(g, instruction->msg), nullptr); return nullptr; } @@ -3654,6 +3819,8 @@ static LLVMValueRef ir_render_instruction(CodeGen *g, IrExecutable *executable, return ir_render_field_parent_ptr(g, executable, (IrInstructionFieldParentPtr *)instruction); case IrInstructionIdAlignCast: return ir_render_align_cast(g, executable, (IrInstructionAlignCast *)instruction); + case IrInstructionIdErrorReturnTrace: + return ir_render_error_return_trace(g, executable, (IrInstructionErrorReturnTrace *)instruction); } zig_unreachable(); } @@ -4493,7 +4660,8 @@ static void do_code_gen(CodeGen *g) { LLVMValueRef fn = fn_llvm_value(g, fn_table_entry); g->cur_fn = fn_table_entry; g->cur_fn_val = fn; - if (handle_is_ptr(fn_table_entry->type_entry->data.fn.fn_type_id.return_type)) { + TypeTableEntry *return_type = fn_table_entry->type_entry->data.fn.fn_type_id.return_type; + if (handle_is_ptr(return_type)) { g->cur_ret_ptr = LLVMGetParam(fn, 0); } else { g->cur_ret_ptr = nullptr; @@ -4502,6 +4670,42 @@ static void do_code_gen(CodeGen *g) { build_all_basic_blocks(g, fn_table_entry); clear_debug_source_node(g); + uint32_t err_ret_trace_arg_index = get_err_ret_trace_arg_index(g, fn_table_entry); + if (err_ret_trace_arg_index != UINT32_MAX) { + g->cur_err_ret_trace_val = LLVMGetParam(fn, err_ret_trace_arg_index); + } else if (g->have_err_ret_tracing && fn_table_entry->calls_errorable_function) { + // TODO call graph analysis to find out what this number needs to be for every function + static const size_t stack_trace_ptr_count = 30; + + TypeTableEntry *usize = g->builtin_types.entry_usize; + TypeTableEntry *array_type = get_array_type(g, usize, stack_trace_ptr_count); + LLVMValueRef err_ret_array_val = build_alloca(g, array_type, "error_return_trace_addresses", + get_abi_alignment(g, array_type)); + g->cur_err_ret_trace_val = build_alloca(g, g->stack_trace_type, "error_return_trace", get_abi_alignment(g, g->stack_trace_type)); + size_t index_field_index = g->stack_trace_type->data.structure.fields[0].gen_index; + LLVMValueRef index_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_err_ret_trace_val, (unsigned)index_field_index, ""); + gen_store_untyped(g, LLVMConstNull(usize->type_ref), index_field_ptr, 0, false); + + size_t addresses_field_index = g->stack_trace_type->data.structure.fields[1].gen_index; + LLVMValueRef addresses_field_ptr = LLVMBuildStructGEP(g->builder, g->cur_err_ret_trace_val, (unsigned)addresses_field_index, ""); + + TypeTableEntry *slice_type = g->stack_trace_type->data.structure.fields[1].type_entry; + size_t ptr_field_index = slice_type->data.structure.fields[slice_ptr_index].gen_index; + LLVMValueRef ptr_field_ptr = LLVMBuildStructGEP(g->builder, addresses_field_ptr, (unsigned)ptr_field_index, ""); + LLVMValueRef zero = LLVMConstNull(usize->type_ref); + LLVMValueRef indices[] = {zero, zero}; + LLVMValueRef err_ret_array_val_elem0_ptr = LLVMBuildInBoundsGEP(g->builder, err_ret_array_val, + indices, 2, ""); + gen_store(g, err_ret_array_val_elem0_ptr, ptr_field_ptr, + get_pointer_to_type(g, get_pointer_to_type(g, usize, false), false)); + + size_t len_field_index = slice_type->data.structure.fields[slice_len_index].gen_index; + LLVMValueRef len_field_ptr = LLVMBuildStructGEP(g->builder, addresses_field_ptr, (unsigned)len_field_index, ""); + gen_store(g, LLVMConstInt(usize->type_ref, stack_trace_ptr_count, false), len_field_ptr, get_pointer_to_type(g, usize, false)); + } else { + g->cur_err_ret_trace_val = nullptr; + } + // allocate temporary stack data for (size_t alloca_i = 0; alloca_i < fn_table_entry->alloca_list.length; alloca_i += 1) { IrInstruction *instruction = fn_table_entry->alloca_list.at(alloca_i); @@ -5064,6 +5268,7 @@ static void define_builtin_fns(CodeGen *g) { create_builtin_fn(g, BuiltinFnIdSetAlignStack, "setAlignStack", 1); create_builtin_fn(g, BuiltinFnIdArgType, "ArgType", 2); create_builtin_fn(g, BuiltinFnIdExport, "export", 3); + create_builtin_fn(g, BuiltinFnIdErrorReturnTrace, "errorReturnTrace", 0); } static const char *bool_to_str(bool b) { @@ -5088,6 +5293,12 @@ static void define_builtin_compile_vars(CodeGen *g) { os_path_join(g->cache_dir, buf_create_from_str(builtin_zig_basename), builtin_zig_path); Buf *contents = buf_alloc(); + buf_append_str(contents, + "pub const StackTrace = struct {\n" + " index: usize,\n" + " instruction_addresses: []usize,\n" + "};\n\n"); + const char *cur_os = nullptr; { buf_appendf(contents, "pub const Os = enum {\n"); @@ -5233,6 +5444,7 @@ static void define_builtin_compile_vars(CodeGen *g) { buf_appendf(contents, "pub const object_format = ObjectFormat.%s;\n", cur_obj_fmt); buf_appendf(contents, "pub const mode = %s;\n", build_mode_to_str(g->build_mode)); buf_appendf(contents, "pub const link_libc = %s;\n", bool_to_str(g->libc_link_lib != nullptr)); + buf_appendf(contents, "pub const have_error_return_tracing = %s;\n", bool_to_str(g->have_err_ret_tracing)); buf_appendf(contents, "pub const __zig_test_fn_slice = {}; // overwritten later\n"); @@ -5251,6 +5463,7 @@ static void define_builtin_compile_vars(CodeGen *g) { g->root_package->package_table.put(buf_create_from_str("builtin"), g->compile_var_package); g->std_package->package_table.put(buf_create_from_str("builtin"), g->compile_var_package); g->compile_var_import = add_source_file(g, g->compile_var_package, abs_full_path, contents); + scan_import(g, g->compile_var_import); } static void init(CodeGen *g) { @@ -5359,6 +5572,8 @@ static void init(CodeGen *g) { } } + g->have_err_ret_tracing = g->build_mode != BuildModeFastRelease; + define_builtin_fns(g); define_builtin_compile_vars(g); } diff --git a/src/ir.cpp b/src/ir.cpp index f236910250..4dd022c09f 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -572,6 +572,10 @@ static constexpr IrInstructionId ir_instruction_id(IrInstructionArgType *) { return IrInstructionIdArgType; } +static constexpr IrInstructionId ir_instruction_id(IrInstructionErrorReturnTrace *) { + return IrInstructionIdErrorReturnTrace; +} + template static T *ir_create_instruction(IrBuilder *irb, Scope *scope, AstNode *source_node) { T *special_instruction = allocate(1); @@ -2305,6 +2309,12 @@ static IrInstruction *ir_build_arg_type(IrBuilder *irb, Scope *scope, AstNode *s return &instruction->base; } +static IrInstruction *ir_build_error_return_trace(IrBuilder *irb, Scope *scope, AstNode *source_node) { + IrInstructionErrorReturnTrace *instruction = ir_build_instruction(irb, scope, source_node); + + return &instruction->base; +} + static void ir_count_defers(IrBuilder *irb, Scope *inner_scope, Scope *outer_scope, size_t *results) { results[ReturnKindUnconditional] = 0; results[ReturnKindError] = 0; @@ -3731,6 +3741,10 @@ static IrInstruction *ir_gen_builtin_fn_call(IrBuilder *irb, Scope *scope, AstNo return ir_build_export(irb, scope, node, arg0_value, arg1_value, arg2_value); } + case BuiltinFnIdErrorReturnTrace: + { + return ir_build_error_return_trace(irb, scope, node); + } } zig_unreachable(); } @@ -8230,16 +8244,6 @@ static bool ir_resolve_comptime(IrAnalyze *ira, IrInstruction *value, bool *out) return ir_resolve_bool(ira, value, out); } -static ConstExprValue *get_builtin_value(CodeGen *codegen, const char *name) { - Tld *tld = codegen->compile_var_import->decls_scope->decl_table.get(buf_create_from_str(name)); - resolve_top_level_decl(codegen, tld, false, nullptr); - assert(tld->id == TldIdVar); - TldVar *tld_var = (TldVar *)tld; - ConstExprValue *var_value = tld_var->var->value; - assert(var_value != nullptr); - return var_value; -} - static bool ir_resolve_atomic_order(IrAnalyze *ira, IrInstruction *value, AtomicOrder *out) { if (type_is_invalid(value->value.type)) return false; @@ -9578,6 +9582,24 @@ static TypeTableEntry *ir_analyze_instruction_export(IrAnalyze *ira, IrInstructi return ira->codegen->builtin_types.entry_void; } +static TypeTableEntry *ir_analyze_instruction_error_return_trace(IrAnalyze *ira, + IrInstructionErrorReturnTrace *instruction) +{ + FnTableEntry *fn_entry = exec_fn_entry(ira->new_irb.exec); + TypeTableEntry *ptr_to_stack_trace_type = get_ptr_to_stack_trace_type(ira->codegen); + TypeTableEntry *nullable_type = get_maybe_type(ira->codegen, ptr_to_stack_trace_type); + if (fn_entry == nullptr || !fn_entry->calls_errorable_function || !ira->codegen->have_err_ret_tracing) { + ConstExprValue *out_val = ir_build_const_from(ira, &instruction->base); + out_val->data.x_maybe = nullptr; + return nullable_type; + } + + IrInstruction *new_instruction = ir_build_error_return_trace(&ira->new_irb, instruction->base.scope, + instruction->base.source_node); + ir_link_new_instruction(new_instruction, &instruction->base); + return nullable_type; +} + static bool ir_analyze_fn_call_inline_arg(IrAnalyze *ira, AstNode *fn_proto_node, IrInstruction *arg, Scope **exec_scope, size_t *next_proto_i) { @@ -9836,7 +9858,7 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal if (fn_proto_node->data.fn_proto.is_var_args) { ir_add_error(ira, &call_instruction->base, - buf_sprintf("compiler bug: unable to call var args function at compile time. https://github.com/andrewrk/zig/issues/313")); + buf_sprintf("compiler bug: unable to call var args function at compile time. https://github.com/zig-lang/zig/issues/313")); return ira->codegen->builtin_types.entry_invalid; } @@ -10053,9 +10075,21 @@ static TypeTableEntry *ir_analyze_fn_call(IrAnalyze *ira, IrInstructionCall *cal TypeTableEntry *return_type = impl_fn->type_entry->data.fn.fn_type_id.return_type; ir_add_alloca(ira, new_call_instruction, return_type); + if (return_type->id == TypeTableEntryIdPureError || return_type->id == TypeTableEntryIdErrorUnion) { + parent_fn_entry->calls_errorable_function = true; + } + return ir_finish_anal(ira, return_type); } + FnTableEntry *parent_fn_entry = exec_fn_entry(ira->new_irb.exec); + assert(fn_type_id->return_type != nullptr); + assert(parent_fn_entry != nullptr); + if (fn_type_id->return_type->id == TypeTableEntryIdPureError || fn_type_id->return_type->id == TypeTableEntryIdErrorUnion) { + parent_fn_entry->calls_errorable_function = true; + } + + IrInstruction **casted_args = allocate(call_param_count); size_t next_arg_index = 0; if (first_arg_ptr) { @@ -13977,7 +14011,7 @@ static TypeTableEntry *ir_analyze_instruction_align_of(IrAnalyze *ira, IrInstruc return ira->codegen->builtin_types.entry_invalid; TypeTableEntry *type_entry = ir_resolve_type(ira, type_value); - ensure_complete_type(ira->codegen, type_entry); + type_ensure_zero_bits_known(ira->codegen, type_entry); if (type_is_invalid(type_entry)) return ira->codegen->builtin_types.entry_invalid; @@ -15322,6 +15356,8 @@ static TypeTableEntry *ir_analyze_instruction_nocast(IrAnalyze *ira, IrInstructi return ir_analyze_instruction_tag_type(ira, (IrInstructionTagType *)instruction); case IrInstructionIdExport: return ir_analyze_instruction_export(ira, (IrInstructionExport *)instruction); + case IrInstructionIdErrorReturnTrace: + return ir_analyze_instruction_error_return_trace(ira, (IrInstructionErrorReturnTrace *)instruction); } zig_unreachable(); } @@ -15505,6 +15541,7 @@ bool ir_has_side_effects(IrInstruction *instruction) { case IrInstructionIdOpaqueType: case IrInstructionIdArgType: case IrInstructionIdTagType: + case IrInstructionIdErrorReturnTrace: return false; case IrInstructionIdAsm: { diff --git a/src/ir_print.cpp b/src/ir_print.cpp index f5aba2a45d..930d22f21a 100644 --- a/src/ir_print.cpp +++ b/src/ir_print.cpp @@ -996,6 +996,10 @@ static void ir_print_export(IrPrint *irp, IrInstructionExport *instruction) { } } +static void ir_print_error_return_trace(IrPrint *irp, IrInstructionErrorReturnTrace *instruction) { + fprintf(irp->f, "@errorReturnTrace()"); +} + static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { ir_print_prefix(irp, instruction); @@ -1308,6 +1312,9 @@ static void ir_print_instruction(IrPrint *irp, IrInstruction *instruction) { case IrInstructionIdExport: ir_print_export(irp, (IrInstructionExport *)instruction); break; + case IrInstructionIdErrorReturnTrace: + ir_print_error_return_trace(irp, (IrInstructionErrorReturnTrace *)instruction); + break; } fprintf(irp->f, "\n"); } diff --git a/src/translate_c.cpp b/src/translate_c.cpp index 93e4810b95..ce4dc5a1fe 100644 --- a/src/translate_c.cpp +++ b/src/translate_c.cpp @@ -1964,6 +1964,8 @@ static int trans_local_declaration(Context *c, TransScope *scope, const DeclStmt if (init_node == nullptr) return ErrorUnexpected; + } else { + init_node = trans_create_node(c, NodeTypeUndefinedLiteral); } AstNode *type_node = trans_qual_type(c, qual_type, stmt->getLocStart()); if (type_node == nullptr) @@ -2224,12 +2226,6 @@ static AstNode *trans_if_statement(Context *c, TransScope *scope, const IfStmt * // if (c) t else e AstNode *if_node = trans_create_node(c, NodeTypeIfBoolExpr); - // TODO: condition != 0 - AstNode *condition_node = trans_expr(c, ResultUsedYes, scope, stmt->getCond(), TransRValue); - if (condition_node == nullptr) - return nullptr; - if_node->data.if_bool_expr.condition = condition_node; - TransScope *then_scope = trans_stmt(c, scope, stmt->getThen(), &if_node->data.if_bool_expr.then_block); if (then_scope == nullptr) return nullptr; @@ -2240,7 +2236,87 @@ static AstNode *trans_if_statement(Context *c, TransScope *scope, const IfStmt * return nullptr; } - return if_node; + AstNode *condition_node = trans_expr(c, ResultUsedYes, scope, stmt->getCond(), TransRValue); + if (condition_node == nullptr) + return nullptr; + + switch (condition_node->type) { + case NodeTypeBinOpExpr: + switch (condition_node->data.bin_op_expr.bin_op) { + case BinOpTypeBoolOr: + case BinOpTypeBoolAnd: + case BinOpTypeCmpEq: + case BinOpTypeCmpNotEq: + case BinOpTypeCmpLessThan: + case BinOpTypeCmpGreaterThan: + case BinOpTypeCmpLessOrEq: + case BinOpTypeCmpGreaterOrEq: + if_node->data.if_bool_expr.condition = condition_node; + return if_node; + default: + goto convert_to_bitcast; + } + + case NodeTypePrefixOpExpr: + switch (condition_node->data.prefix_op_expr.prefix_op) { + case PrefixOpBoolNot: + if_node->data.if_bool_expr.condition = condition_node; + return if_node; + default: + goto convert_to_bitcast; + } + + case NodeTypeBoolLiteral: + if_node->data.if_bool_expr.condition = condition_node; + return if_node; + + default: { + // In Zig, float, int and pointer does not work in if statements. + // To make it work, we bitcast any value we get to an int of the right size + // and comp it to 0 + // TODO: This doesn't work for pointers, as they become nullable on + // translate + // c: if (cond) { } + // zig: { + // zig: const _tmp = cond; + // zig: if (@bitCast(@IntType(false, @sizeOf(@typeOf(_tmp)) * 8), _tmp) != 0) { } + // zig: } + convert_to_bitcast: + TransScopeBlock *child_scope = trans_scope_block_create(c, scope); + + // const _tmp = cond; + // TODO: avoid name collisions with generated variable names + Buf* tmp_var_name = buf_create_from_str("_tmp"); + AstNode *tmp_var_decl = trans_create_node_var_decl_local(c, true, tmp_var_name, nullptr, condition_node); + child_scope->node->data.block.statements.append(tmp_var_decl); + + // @sizeOf(@typeOf(_tmp)) * 8 + AstNode *typeof_tmp = trans_create_node_builtin_fn_call_str(c, "typeOf"); + typeof_tmp->data.fn_call_expr.params.append(trans_create_node_symbol(c, tmp_var_name)); + AstNode *sizeof_tmp = trans_create_node_builtin_fn_call_str(c, "sizeOf"); + sizeof_tmp->data.fn_call_expr.params.append(typeof_tmp); + AstNode *sizeof_tmp_in_bits = trans_create_node_bin_op( + c, sizeof_tmp, BinOpTypeMult, + trans_create_node_unsigned_negative(c, 8, false)); + + // @IntType(false, @sizeOf(@typeOf(_tmp)) * 8) + AstNode *int_type = trans_create_node_builtin_fn_call_str(c, "IntType"); + int_type->data.fn_call_expr.params.append(trans_create_node_bool(c, false)); + int_type->data.fn_call_expr.params.append(sizeof_tmp_in_bits); + + // @bitCast(@IntType(false, @sizeOf(@typeOf(_tmp)) * 8), _tmp) + AstNode *bit_cast = trans_create_node_builtin_fn_call_str(c, "bitCast"); + bit_cast->data.fn_call_expr.params.append(int_type); + bit_cast->data.fn_call_expr.params.append(trans_create_node_symbol(c, tmp_var_name)); + + // if (@bitCast(@IntType(false, @sizeOf(@typeOf(_tmp)) * 8), _tmp) != 0) { } + AstNode *not_eql_zero = trans_create_node_bin_op(c, bit_cast, BinOpTypeCmpNotEq, trans_create_node_unsigned_negative(c, 0, false)); + if_node->data.if_bool_expr.condition = not_eql_zero; + child_scope->node->data.block.statements.append(if_node); + + return child_scope->node; + } + } } static AstNode *trans_call_expr(Context *c, ResultUsed result_used, TransScope *scope, const CallExpr *stmt) { diff --git a/std/build.zig b/std/build.zig index cd6b3811ea..5d79b00c4f 100644 --- a/std/build.zig +++ b/std/build.zig @@ -247,11 +247,11 @@ pub const Builder = struct { defer wanted_steps.deinit(); if (step_names.len == 0) { - wanted_steps.append(&self.default_step) catch unreachable; + try wanted_steps.append(&self.default_step); } else { for (step_names) |step_name| { const s = try self.getTopLevelStepByName(step_name); - wanted_steps.append(s) catch unreachable; + try wanted_steps.append(s); } } @@ -721,11 +721,9 @@ pub const Builder = struct { return error.FileNotFound; } - pub fn exec(self: &Builder, argv: []const []const u8) -> []u8 { + pub fn exec(self: &Builder, argv: []const []const u8) -> %[]u8 { const max_output_size = 100 * 1024; - const result = os.ChildProcess.exec(self.allocator, argv, null, null, max_output_size) catch |err| { - std.debug.panic("Unable to spawn {}: {}", argv[0], @errorName(err)); - }; + const result = try os.ChildProcess.exec(self.allocator, argv, null, null, max_output_size); switch (result.term) { os.ChildProcess.Term.Exited => |code| { if (code != 0) { diff --git a/std/crypto/blake2.zig b/std/crypto/blake2.zig new file mode 100644 index 0000000000..b08caa480e --- /dev/null +++ b/std/crypto/blake2.zig @@ -0,0 +1,445 @@ +const mem = @import("../mem.zig"); +const math = @import("../math/index.zig"); +const endian = @import("../endian.zig"); +const debug = @import("../debug/index.zig"); +const builtin = @import("builtin"); +const htest = @import("test.zig"); + +const RoundParam = struct { + a: usize, b: usize, c: usize, d: usize, x: usize, y: usize, +}; + +fn Rp(a: usize, b: usize, c: usize, d: usize, x: usize, y: usize) -> RoundParam { + return RoundParam { .a = a, .b = b, .c = c, .d = d, .x = x, .y = y, }; +} + +///////////////////// +// Blake2s + +pub const Blake2s224 = Blake2s(224); +pub const Blake2s256 = Blake2s(256); + +fn Blake2s(comptime out_len: usize) -> type { return struct { + const Self = this; + + const iv = [8]u32 { + 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, + 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19, + }; + + const sigma = [10][16]u8 { + []const u8 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + []const u8 { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, + []const u8 { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, + []const u8 { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, + []const u8 { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, + []const u8 { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, + []const u8 { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, + []const u8 { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, + []const u8 { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, + []const u8 { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13, 0 }, + }; + + h: [8]u32, + t: u64, + // Streaming cache + buf: [64]u8, + buf_len: u8, + + pub fn init() -> Self { + debug.assert(8 <= out_len and out_len <= 512); + + var s: Self = undefined; + s.reset(); + return s; + } + + pub fn reset(d: &Self) { + mem.copy(u32, d.h[0..], iv[0..]); + + // No key plus default parameters + d.h[0] ^= 0x01010000 ^ u32(out_len >> 3); + d.t = 0; + d.buf_len = 0; + } + + pub fn hash(b: []const u8, out: []u8) { + var d = Self.init(); + d.update(b); + d.final(out); + } + + pub fn update(d: &Self, b: []const u8) { + var off: usize = 0; + + // Partial buffer exists from previous update. Copy into buffer then hash. + if (d.buf_len != 0 and d.buf_len + b.len > 64) { + off += 64 - d.buf_len; + mem.copy(u8, d.buf[d.buf_len..], b[0..off]); + d.t += 64; + d.round(d.buf[0..], false); + d.buf_len = 0; + } + + // Full middle blocks. + while (off + 64 < b.len) : (off += 64) { + d.t += 64; + d.round(b[off..off + 64], false); + } + + // Copy any remainder for next pass. + mem.copy(u8, d.buf[d.buf_len..], b[off..]); + d.buf_len += u8(b[off..].len); + } + + pub fn final(d: &Self, out: []u8) { + debug.assert(out.len >= out_len / 8); + + mem.set(u8, d.buf[d.buf_len..], 0); + d.t += d.buf_len; + d.round(d.buf[0..], true); + + const rr = d.h[0 .. out_len / 32]; + + for (rr) |s, j| { + mem.writeInt(out[4*j .. 4*j + 4], s, builtin.Endian.Little); + } + } + + fn round(d: &Self, b: []const u8, last: bool) { + debug.assert(b.len == 64); + + var m: [16]u32 = undefined; + var v: [16]u32 = undefined; + + for (m) |*r, i| { + *r = mem.readIntLE(u32, b[4*i .. 4*i + 4]); + } + + var k: usize = 0; + while (k < 8) : (k += 1) { + v[k] = d.h[k]; + v[k+8] = iv[k]; + } + + v[12] ^= @truncate(u32, d.t); + v[13] ^= u32(d.t >> 32); + if (last) v[14] = ~v[14]; + + const rounds = comptime []RoundParam { + Rp(0, 4, 8, 12, 0, 1), + Rp(1, 5, 9, 13, 2, 3), + Rp(2, 6, 10, 14, 4, 5), + Rp(3, 7, 11, 15, 6, 7), + Rp(0, 5, 10, 15, 8, 9), + Rp(1, 6, 11, 12, 10, 11), + Rp(2, 7, 8, 13, 12, 13), + Rp(3, 4, 9, 14, 14, 15), + }; + + comptime var j: usize = 0; + inline while (j < 10) : (j += 1) { + inline for (rounds) |r| { + v[r.a] = v[r.a] +% v[r.b] +% m[sigma[j][r.x]]; + v[r.d] = math.rotr(u32, v[r.d] ^ v[r.a], usize(16)); + v[r.c] = v[r.c] +% v[r.d]; + v[r.b] = math.rotr(u32, v[r.b] ^ v[r.c], usize(12)); + v[r.a] = v[r.a] +% v[r.b] +% m[sigma[j][r.y]]; + v[r.d] = math.rotr(u32, v[r.d] ^ v[r.a], usize(8)); + v[r.c] = v[r.c] +% v[r.d]; + v[r.b] = math.rotr(u32, v[r.b] ^ v[r.c], usize(7)); + } + } + + for (d.h) |*r, i| { + *r ^= v[i] ^ v[i + 8]; + } + } +};} + +test "blake2s224 single" { + const h1 = "1fa1291e65248b37b3433475b2a0dd63d54a11ecc4e3e034e7bc1ef4"; + htest.assertEqualHash(Blake2s224, h1, ""); + + const h2 = "0b033fc226df7abde29f67a05d3dc62cf271ef3dfea4d387407fbd55"; + htest.assertEqualHash(Blake2s224, h2, "abc"); + + const h3 = "e4e5cb6c7cae41982b397bf7b7d2d9d1949823ae78435326e8db4912"; + htest.assertEqualHash(Blake2s224, h3, "The quick brown fox jumps over the lazy dog"); +} + +test "blake2s224 streaming" { + var h = Blake2s224.init(); + var out: [28]u8 = undefined; + + const h1 = "1fa1291e65248b37b3433475b2a0dd63d54a11ecc4e3e034e7bc1ef4"; + + h.final(out[0..]); + htest.assertEqual(h1, out[0..]); + + const h2 = "0b033fc226df7abde29f67a05d3dc62cf271ef3dfea4d387407fbd55"; + + h.reset(); + h.update("abc"); + h.final(out[0..]); + htest.assertEqual(h2, out[0..]); + + h.reset(); + h.update("a"); + h.update("b"); + h.update("c"); + h.final(out[0..]); + htest.assertEqual(h2, out[0..]); +} + +test "blake2s256 single" { + const h1 = "69217a3079908094e11121d042354a7c1f55b6482ca1a51e1b250dfd1ed0eef9"; + htest.assertEqualHash(Blake2s256, h1, ""); + + const h2 = "508c5e8c327c14e2e1a72ba34eeb452f37458b209ed63a294d999b4c86675982"; + htest.assertEqualHash(Blake2s256, h2, "abc"); + + const h3 = "606beeec743ccbeff6cbcdf5d5302aa855c256c29b88c8ed331ea1a6bf3c8812"; + htest.assertEqualHash(Blake2s256, h3, "The quick brown fox jumps over the lazy dog"); +} + +test "blake2s256 streaming" { + var h = Blake2s256.init(); + var out: [32]u8 = undefined; + + const h1 = "69217a3079908094e11121d042354a7c1f55b6482ca1a51e1b250dfd1ed0eef9"; + + h.final(out[0..]); + htest.assertEqual(h1, out[0..]); + + const h2 = "508c5e8c327c14e2e1a72ba34eeb452f37458b209ed63a294d999b4c86675982"; + + h.reset(); + h.update("abc"); + h.final(out[0..]); + htest.assertEqual(h2, out[0..]); + + h.reset(); + h.update("a"); + h.update("b"); + h.update("c"); + h.final(out[0..]); + htest.assertEqual(h2, out[0..]); +} + + +///////////////////// +// Blake2b + +pub const Blake2b384 = Blake2b(384); +pub const Blake2b512 = Blake2b(512); + +fn Blake2b(comptime out_len: usize) -> type { return struct { + const Self = this; + + const iv = [8]u64 { + 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, + 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, + 0x510e527fade682d1, 0x9b05688c2b3e6c1f, + 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179, + }; + + const sigma = [12][16]u8 { + []const u8 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + []const u8 { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, + []const u8 { 11, 8, 12, 0, 5, 2, 15, 13, 10, 14, 3, 6, 7, 1, 9, 4 }, + []const u8 { 7, 9, 3, 1, 13, 12, 11, 14, 2, 6, 5, 10, 4, 0, 15, 8 }, + []const u8 { 9, 0, 5, 7, 2, 4, 10, 15, 14, 1, 11, 12, 6, 8, 3, 13 }, + []const u8 { 2, 12, 6, 10, 0, 11, 8, 3, 4, 13, 7, 5, 15, 14, 1, 9 }, + []const u8 { 12, 5, 1, 15, 14, 13, 4, 10, 0, 7, 6, 3, 9, 2, 8, 11 }, + []const u8 { 13, 11, 7, 14, 12, 1, 3, 9, 5, 0, 15, 4, 8, 6, 2, 10 }, + []const u8 { 6, 15, 14, 9, 11, 3, 0, 8, 12, 2, 13, 7, 1, 4, 10, 5 }, + []const u8 { 10, 2, 8, 4, 7, 6, 1, 5, 15, 11, 9, 14, 3, 12, 13 , 0 }, + []const u8 { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + []const u8 { 14, 10, 4, 8, 9, 15, 13, 6, 1, 12, 0, 2, 11, 7, 5, 3 }, + }; + + h: [8]u64, + t: u128, + // Streaming cache + buf: [128]u8, + buf_len: u8, + + pub fn init() -> Self { + debug.assert(8 <= out_len and out_len <= 512); + + var s: Self = undefined; + s.reset(); + return s; + } + + pub fn reset(d: &Self) { + mem.copy(u64, d.h[0..], iv[0..]); + + // No key plus default parameters + d.h[0] ^= 0x01010000 ^ (out_len >> 3); + d.t = 0; + d.buf_len = 0; + } + + pub fn hash(b: []const u8, out: []u8) { + var d = Self.init(); + d.update(b); + d.final(out); + } + + pub fn update(d: &Self, b: []const u8) { + var off: usize = 0; + + // Partial buffer exists from previous update. Copy into buffer then hash. + if (d.buf_len != 0 and d.buf_len + b.len > 128) { + off += 128 - d.buf_len; + mem.copy(u8, d.buf[d.buf_len..], b[0..off]); + d.t += 128; + d.round(d.buf[0..], false); + d.buf_len = 0; + } + + // Full middle blocks. + while (off + 128 < b.len) : (off += 128) { + d.t += 128; + d.round(b[off..off + 128], false); + } + + // Copy any remainder for next pass. + mem.copy(u8, d.buf[d.buf_len..], b[off..]); + d.buf_len += u8(b[off..].len); + } + + pub fn final(d: &Self, out: []u8) { + mem.set(u8, d.buf[d.buf_len..], 0); + d.t += d.buf_len; + d.round(d.buf[0..], true); + + const rr = d.h[0 .. out_len / 64]; + + for (rr) |s, j| { + mem.writeInt(out[8*j .. 8*j + 8], s, builtin.Endian.Little); + } + } + + fn round(d: &Self, b: []const u8, last: bool) { + debug.assert(b.len == 128); + + var m: [16]u64 = undefined; + var v: [16]u64 = undefined; + + for (m) |*r, i| { + *r = mem.readIntLE(u64, b[8*i .. 8*i + 8]); + } + + var k: usize = 0; + while (k < 8) : (k += 1) { + v[k] = d.h[k]; + v[k+8] = iv[k]; + } + + v[12] ^= @truncate(u64, d.t); + v[13] ^= u64(d.t >> 64); + if (last) v[14] = ~v[14]; + + const rounds = comptime []RoundParam { + Rp(0, 4, 8, 12, 0, 1), + Rp(1, 5, 9, 13, 2, 3), + Rp(2, 6, 10, 14, 4, 5), + Rp(3, 7, 11, 15, 6, 7), + Rp(0, 5, 10, 15, 8, 9), + Rp(1, 6, 11, 12, 10, 11), + Rp(2, 7, 8, 13, 12, 13), + Rp(3, 4, 9, 14, 14, 15), + }; + + comptime var j: usize = 0; + inline while (j < 12) : (j += 1) { + inline for (rounds) |r| { + v[r.a] = v[r.a] +% v[r.b] +% m[sigma[j][r.x]]; + v[r.d] = math.rotr(u64, v[r.d] ^ v[r.a], usize(32)); + v[r.c] = v[r.c] +% v[r.d]; + v[r.b] = math.rotr(u64, v[r.b] ^ v[r.c], usize(24)); + v[r.a] = v[r.a] +% v[r.b] +% m[sigma[j][r.y]]; + v[r.d] = math.rotr(u64, v[r.d] ^ v[r.a], usize(16)); + v[r.c] = v[r.c] +% v[r.d]; + v[r.b] = math.rotr(u64, v[r.b] ^ v[r.c], usize(63)); + } + } + + for (d.h) |*r, i| { + *r ^= v[i] ^ v[i + 8]; + } + } +};} + +test "blake2b384 single" { + const h1 = "b32811423377f52d7862286ee1a72ee540524380fda1724a6f25d7978c6fd3244a6caf0498812673c5e05ef583825100"; + htest.assertEqualHash(Blake2b384, h1, ""); + + const h2 = "6f56a82c8e7ef526dfe182eb5212f7db9df1317e57815dbda46083fc30f54ee6c66ba83be64b302d7cba6ce15bb556f4"; + htest.assertEqualHash(Blake2b384, h2, "abc"); + + const h3 = "b7c81b228b6bd912930e8f0b5387989691c1cee1e65aade4da3b86a3c9f678fc8018f6ed9e2906720c8d2a3aeda9c03d"; + htest.assertEqualHash(Blake2b384, h3, "The quick brown fox jumps over the lazy dog"); +} + +test "blake2b384 streaming" { + var h = Blake2b384.init(); + var out: [48]u8 = undefined; + + const h1 = "b32811423377f52d7862286ee1a72ee540524380fda1724a6f25d7978c6fd3244a6caf0498812673c5e05ef583825100"; + + h.final(out[0..]); + htest.assertEqual(h1, out[0..]); + + const h2 = "6f56a82c8e7ef526dfe182eb5212f7db9df1317e57815dbda46083fc30f54ee6c66ba83be64b302d7cba6ce15bb556f4"; + + h.reset(); + h.update("abc"); + h.final(out[0..]); + htest.assertEqual(h2, out[0..]); + + h.reset(); + h.update("a"); + h.update("b"); + h.update("c"); + h.final(out[0..]); + htest.assertEqual(h2, out[0..]); +} + +test "blake2b512 single" { + const h1 = "786a02f742015903c6c6fd852552d272912f4740e15847618a86e217f71f5419d25e1031afee585313896444934eb04b903a685b1448b755d56f701afe9be2ce"; + htest.assertEqualHash(Blake2b512, h1, ""); + + const h2 = "ba80a53f981c4d0d6a2797b69f12f6e94c212f14685ac4b74b12bb6fdbffa2d17d87c5392aab792dc252d5de4533cc9518d38aa8dbf1925ab92386edd4009923"; + htest.assertEqualHash(Blake2b512, h2, "abc"); + + const h3 = "a8add4bdddfd93e4877d2746e62817b116364a1fa7bc148d95090bc7333b3673f82401cf7aa2e4cb1ecd90296e3f14cb5413f8ed77be73045b13914cdcd6a918"; + htest.assertEqualHash(Blake2b512, h3, "The quick brown fox jumps over the lazy dog"); +} + +test "blake2b512 streaming" { + var h = Blake2b512.init(); + var out: [64]u8 = undefined; + + const h1 = "786a02f742015903c6c6fd852552d272912f4740e15847618a86e217f71f5419d25e1031afee585313896444934eb04b903a685b1448b755d56f701afe9be2ce"; + + h.final(out[0..]); + htest.assertEqual(h1, out[0..]); + + const h2 = "ba80a53f981c4d0d6a2797b69f12f6e94c212f14685ac4b74b12bb6fdbffa2d17d87c5392aab792dc252d5de4533cc9518d38aa8dbf1925ab92386edd4009923"; + + h.reset(); + h.update("abc"); + h.final(out[0..]); + htest.assertEqual(h2, out[0..]); + + h.reset(); + h.update("a"); + h.update("b"); + h.update("c"); + h.final(out[0..]); + htest.assertEqual(h2, out[0..]); +} diff --git a/std/crypto/index.zig b/std/crypto/index.zig new file mode 100644 index 0000000000..839704e3e7 --- /dev/null +++ b/std/crypto/index.zig @@ -0,0 +1,21 @@ +pub const Md5 = @import("sha1.zig").Md5; +pub const Sha1 = @import("md5.zig").Sha1; + +const sha2 = @import("sha2.zig"); +pub const Sha224 = sha2.Sha224; +pub const Sha256 = sha2.Sha256; +pub const Sha384 = sha2.Sha384; +pub const Sha512 = sha2.Sha512; + +const blake2 = @import("blake2.zig"); +pub const Blake2s224 = blake2.Blake2s224; +pub const Blake2s256 = blake2.Blake2s256; +pub const Blake2b384 = blake2.Blake2b384; +pub const Blake2b512 = blake2.Blake2b512; + +test "crypto" { + _ = @import("md5.zig"); + _ = @import("sha1.zig"); + _ = @import("sha2.zig"); + _ = @import("blake2.zig"); +} diff --git a/std/crypto/md5.zig b/std/crypto/md5.zig new file mode 100644 index 0000000000..02f1954380 --- /dev/null +++ b/std/crypto/md5.zig @@ -0,0 +1,260 @@ +const mem = @import("../mem.zig"); +const math = @import("../math/index.zig"); +const endian = @import("../endian.zig"); +const builtin = @import("builtin"); +const debug = @import("../debug/index.zig"); +const fmt = @import("../fmt/index.zig"); + +const RoundParam = struct { + a: usize, b: usize, c: usize, d: usize, + k: usize, s: u32, t: u32 +}; + +fn Rp(a: usize, b: usize, c: usize, d: usize, k: usize, s: u32, t: u32) -> RoundParam { + return RoundParam { .a = a, .b = b, .c = c, .d = d, .k = k, .s = s, .t = t }; +} + +/// const hash1 = Md5.hash("my input"); +/// +/// const hasher = Md5.init(); +/// hasher.update("my "); +/// hasher.update("input"); +/// const hash2 = hasher.final(); +pub const Md5 = struct { + const Self = this; + + s: [4]u32, + // Streaming Cache + buf: [64]u8, + buf_len: u8, + total_len: u64, + + pub fn init() -> Self { + var d: Self = undefined; + d.reset(); + return d; + } + + pub fn reset(d: &Self) { + d.s[0] = 0x67452301; + d.s[1] = 0xEFCDAB89; + d.s[2] = 0x98BADCFE; + d.s[3] = 0x10325476; + d.buf_len = 0; + d.total_len = 0; + } + + pub fn hash(b: []const u8, out: []u8) { + var d = Md5.init(); + d.update(b); + d.final(out); + } + + pub fn update(d: &Self, b: []const u8) { + var off: usize = 0; + + // Partial buffer exists from previous update. Copy into buffer then hash. + if (d.buf_len != 0 and d.buf_len + b.len > 64) { + off += 64 - d.buf_len; + mem.copy(u8, d.buf[d.buf_len..], b[0..off]); + + d.round(d.buf[0..]); + d.buf_len = 0; + } + + // Full middle blocks. + while (off + 64 < b.len) : (off += 64) { + d.round(b[off..off + 64]); + } + + // Copy any remainder for next pass. + mem.copy(u8, d.buf[d.buf_len..], b[off..]); + d.buf_len += u8(b[off..].len); + + // Md5 uses the bottom 64-bits for length padding + d.total_len +%= b.len; + } + + pub fn final(d: &Self, out: []u8) { + debug.assert(out.len >= 16); + + // The buffer here will never be completely full. + mem.set(u8, d.buf[d.buf_len..], 0); + + // Append padding bits. + d.buf[d.buf_len] = 0x80; + d.buf_len += 1; + + // > 448 mod 512 so need to add an extra round to wrap around. + if (64 - d.buf_len < 8) { + d.round(d.buf[0..]); + mem.set(u8, d.buf[0..], 0); + } + + // Append message length. + var i: usize = 1; + var len = d.total_len >> 5; + d.buf[56] = u8(d.total_len & 0x1f) << 3; + while (i < 8) : (i += 1) { + d.buf[56 + i] = u8(len & 0xff); + len >>= 8; + } + + d.round(d.buf[0..]); + + for (d.s) |s, j| { + mem.writeInt(out[4*j .. 4*j + 4], s, builtin.Endian.Little); + } + } + + fn round(d: &Self, b: []const u8) { + debug.assert(b.len == 64); + + var s: [16]u32 = undefined; + + // ERROR: cannot unroll this at comptime + var i: usize = 0; + while (i < 16) : (i += 1) { + // NOTE: Performing or's separately improves perf by ~10% + s[i] = 0; + s[i] |= u32(b[i*4+0]); + s[i] |= u32(b[i*4+1]) << 8; + s[i] |= u32(b[i*4+2]) << 16; + s[i] |= u32(b[i*4+3]) << 24; + } + + var v: [4]u32 = []u32 { + d.s[0], d.s[1], d.s[2], d.s[3], + }; + + const round0 = comptime []RoundParam { + Rp(0, 1, 2, 3, 0, 7, 0xD76AA478), + Rp(3, 0, 1, 2, 1, 12, 0xE8C7B756), + Rp(2, 3, 0, 1, 2, 17, 0x242070DB), + Rp(1, 2, 3, 0, 3, 22, 0xC1BDCEEE), + Rp(0, 1, 2, 3, 4, 7, 0xF57C0FAF), + Rp(3, 0, 1, 2, 5, 12, 0x4787C62A), + Rp(2, 3, 0, 1, 6, 17, 0xA8304613), + Rp(1, 2, 3, 0, 7, 22, 0xFD469501), + Rp(0, 1, 2, 3, 8, 7, 0x698098D8), + Rp(3, 0, 1, 2, 9, 12, 0x8B44F7AF), + Rp(2, 3, 0, 1, 10, 17, 0xFFFF5BB1), + Rp(1, 2, 3, 0, 11, 22, 0x895CD7BE), + Rp(0, 1, 2, 3, 12, 7, 0x6B901122), + Rp(3, 0, 1, 2, 13, 12, 0xFD987193), + Rp(2, 3, 0, 1, 14, 17, 0xA679438E), + Rp(1, 2, 3, 0, 15, 22, 0x49B40821), + }; + inline for (round0) |r| { + v[r.a] = v[r.a] +% (v[r.d] ^ (v[r.b] & (v[r.c] ^ v[r.d]))) +% r.t +% s[r.k]; + v[r.a] = v[r.b] +% math.rotl(u32, v[r.a], r.s); + } + + const round1 = comptime []RoundParam { + Rp(0, 1, 2, 3, 1, 5, 0xF61E2562), + Rp(3, 0, 1, 2, 6, 9, 0xC040B340), + Rp(2, 3, 0, 1, 11, 14, 0x265E5A51), + Rp(1, 2, 3, 0, 0, 20, 0xE9B6C7AA), + Rp(0, 1, 2, 3, 5, 5, 0xD62F105D), + Rp(3, 0, 1, 2, 10, 9, 0x02441453), + Rp(2, 3, 0, 1, 15, 14, 0xD8A1E681), + Rp(1, 2, 3, 0, 4, 20, 0xE7D3FBC8), + Rp(0, 1, 2, 3, 9, 5, 0x21E1CDE6), + Rp(3, 0, 1, 2, 14, 9, 0xC33707D6), + Rp(2, 3, 0, 1, 3, 14, 0xF4D50D87), + Rp(1, 2, 3, 0, 8, 20, 0x455A14ED), + Rp(0, 1, 2, 3, 13, 5, 0xA9E3E905), + Rp(3, 0, 1, 2, 2, 9, 0xFCEFA3F8), + Rp(2, 3, 0, 1, 7, 14, 0x676F02D9), + Rp(1, 2, 3, 0, 12, 20, 0x8D2A4C8A), + }; + inline for (round1) |r| { + v[r.a] = v[r.a] +% (v[r.c] ^ (v[r.d] & (v[r.b] ^ v[r.c]))) +% r.t +% s[r.k]; + v[r.a] = v[r.b] +% math.rotl(u32, v[r.a], r.s); + } + + const round2 = comptime []RoundParam { + Rp(0, 1, 2, 3, 5, 4, 0xFFFA3942), + Rp(3, 0, 1, 2, 8, 11, 0x8771F681), + Rp(2, 3, 0, 1, 11, 16, 0x6D9D6122), + Rp(1, 2, 3, 0, 14, 23, 0xFDE5380C), + Rp(0, 1, 2, 3, 1, 4, 0xA4BEEA44), + Rp(3, 0, 1, 2, 4, 11, 0x4BDECFA9), + Rp(2, 3, 0, 1, 7, 16, 0xF6BB4B60), + Rp(1, 2, 3, 0, 10, 23, 0xBEBFBC70), + Rp(0, 1, 2, 3, 13, 4, 0x289B7EC6), + Rp(3, 0, 1, 2, 0, 11, 0xEAA127FA), + Rp(2, 3, 0, 1, 3, 16, 0xD4EF3085), + Rp(1, 2, 3, 0, 6, 23, 0x04881D05), + Rp(0, 1, 2, 3, 9, 4, 0xD9D4D039), + Rp(3, 0, 1, 2, 12, 11, 0xE6DB99E5), + Rp(2, 3, 0, 1, 15, 16, 0x1FA27CF8), + Rp(1, 2, 3, 0, 2, 23, 0xC4AC5665), + }; + inline for (round2) |r| { + v[r.a] = v[r.a] +% (v[r.b] ^ v[r.c] ^ v[r.d]) +% r.t +% s[r.k]; + v[r.a] = v[r.b] +% math.rotl(u32, v[r.a], r.s); + } + + const round3 = comptime []RoundParam { + Rp(0, 1, 2, 3, 0, 6, 0xF4292244), + Rp(3, 0, 1, 2, 7, 10, 0x432AFF97), + Rp(2, 3, 0, 1, 14, 15, 0xAB9423A7), + Rp(1, 2, 3, 0, 5, 21, 0xFC93A039), + Rp(0, 1, 2, 3, 12, 6, 0x655B59C3), + Rp(3, 0, 1, 2, 3, 10, 0x8F0CCC92), + Rp(2, 3, 0, 1, 10, 15, 0xFFEFF47D), + Rp(1, 2, 3, 0, 1, 21, 0x85845DD1), + Rp(0, 1, 2, 3, 8, 6, 0x6FA87E4F), + Rp(3, 0, 1, 2, 15, 10, 0xFE2CE6E0), + Rp(2, 3, 0, 1, 6, 15, 0xA3014314), + Rp(1, 2, 3, 0, 13, 21, 0x4E0811A1), + Rp(0, 1, 2, 3, 4, 6, 0xF7537E82), + Rp(3, 0, 1, 2, 11, 10, 0xBD3AF235), + Rp(2, 3, 0, 1, 2, 15, 0x2AD7D2BB), + Rp(1, 2, 3, 0, 9, 21, 0xEB86D391), + }; + inline for (round3) |r| { + v[r.a] = v[r.a] +% (v[r.c] ^ (v[r.b] | ~v[r.d])) +% r.t +% s[r.k]; + v[r.a] = v[r.b] +% math.rotl(u32, v[r.a], r.s); + } + + d.s[0] +%= v[0]; + d.s[1] +%= v[1]; + d.s[2] +%= v[2]; + d.s[3] +%= v[3]; + } +}; + +const htest = @import("test.zig"); + +test "md5 single" { + htest.assertEqualHash(Md5, "d41d8cd98f00b204e9800998ecf8427e", ""); + htest.assertEqualHash(Md5, "0cc175b9c0f1b6a831c399e269772661", "a"); + htest.assertEqualHash(Md5, "900150983cd24fb0d6963f7d28e17f72", "abc"); + htest.assertEqualHash(Md5, "f96b697d7cb7938d525a2f31aaf161d0", "message digest"); + htest.assertEqualHash(Md5, "c3fcd3d76192e4007dfb496cca67e13b", "abcdefghijklmnopqrstuvwxyz"); + htest.assertEqualHash(Md5, "d174ab98d277d9f5a5611c2c9f419d9f", "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"); + htest.assertEqualHash(Md5, "57edf4a22be3c955ac49da2e2107b67a", "12345678901234567890123456789012345678901234567890123456789012345678901234567890"); +} + +test "md5 streaming" { + var h = Md5.init(); + var out: [16]u8 = undefined; + + h.final(out[0..]); + htest.assertEqual("d41d8cd98f00b204e9800998ecf8427e", out[0..]); + + h.reset(); + h.update("abc"); + h.final(out[0..]); + htest.assertEqual("900150983cd24fb0d6963f7d28e17f72", out[0..]); + + h.reset(); + h.update("a"); + h.update("b"); + h.update("c"); + h.final(out[0..]); + + htest.assertEqual("900150983cd24fb0d6963f7d28e17f72", out[0..]); +} diff --git a/std/crypto/sha1.zig b/std/crypto/sha1.zig new file mode 100644 index 0000000000..bb68b62983 --- /dev/null +++ b/std/crypto/sha1.zig @@ -0,0 +1,284 @@ +const mem = @import("../mem.zig"); +const math = @import("../math/index.zig"); +const endian = @import("../endian.zig"); +const debug = @import("../debug/index.zig"); +const builtin = @import("builtin"); + +pub const u160 = @IntType(false, 160); + +const RoundParam = struct { + a: usize, b: usize, c: usize, d: usize, e: usize, i: u32, +}; + +fn Rp(a: usize, b: usize, c: usize, d: usize, e: usize, i: u32) -> RoundParam { + return RoundParam { .a = a, .b = b, .c = c, .d = d, .e = e, .i = i }; +} + +pub const Sha1 = struct { + const Self = this; + + s: [5]u32, + // Streaming Cache + buf: [64]u8, + buf_len: u8, + total_len: u64, + + pub fn init() -> Self { + var d: Self = undefined; + d.reset(); + return d; + } + + pub fn reset(d: &Self) { + d.s[0] = 0x67452301; + d.s[1] = 0xEFCDAB89; + d.s[2] = 0x98BADCFE; + d.s[3] = 0x10325476; + d.s[4] = 0xC3D2E1F0; + d.buf_len = 0; + d.total_len = 0; + } + + pub fn hash(b: []const u8, out: []u8) { + var d = Sha1.init(); + d.update(b); + d.final(out); + } + + pub fn update(d: &Self, b: []const u8) { + var off: usize = 0; + + // Partial buffer exists from previous update. Copy into buffer then hash. + if (d.buf_len != 0 and d.buf_len + b.len > 64) { + off += 64 - d.buf_len; + mem.copy(u8, d.buf[d.buf_len..], b[0..off]); + + d.round(d.buf[0..]); + d.buf_len = 0; + } + + // Full middle blocks. + while (off + 64 < b.len) : (off += 64) { + d.round(b[off..off + 64]); + } + + // Copy any remainder for next pass. + mem.copy(u8, d.buf[d.buf_len..], b[off..]); + d.buf_len += u8(b[off..].len); + + d.total_len += b.len; + } + + pub fn final(d: &Self, out: []u8) { + debug.assert(out.len >= 20); + + // The buffer here will never be completely full. + mem.set(u8, d.buf[d.buf_len..], 0); + + // Append padding bits. + d.buf[d.buf_len] = 0x80; + d.buf_len += 1; + + // > 448 mod 512 so need to add an extra round to wrap around. + if (64 - d.buf_len < 8) { + d.round(d.buf[0..]); + mem.set(u8, d.buf[0..], 0); + } + + // Append message length. + var i: usize = 1; + var len = d.total_len >> 5; + d.buf[63] = u8(d.total_len & 0x1f) << 3; + while (i < 8) : (i += 1) { + d.buf[63 - i] = u8(len & 0xff); + len >>= 8; + } + + d.round(d.buf[0..]); + + for (d.s) |s, j| { + mem.writeInt(out[4*j .. 4*j + 4], s, builtin.Endian.Big); + } + } + + fn round(d: &Self, b: []const u8) { + debug.assert(b.len == 64); + + var s: [16]u32 = undefined; + + var v: [5]u32 = []u32 { + d.s[0], d.s[1], d.s[2], d.s[3], d.s[4], + }; + + const round0a = comptime []RoundParam { + Rp(0, 1, 2, 3, 4, 0), + Rp(4, 0, 1, 2, 3, 1), + Rp(3, 4, 0, 1, 2, 2), + Rp(2, 3, 4, 0, 1, 3), + Rp(1, 2, 3, 4, 0, 4), + Rp(0, 1, 2, 3, 4, 5), + Rp(4, 0, 1, 2, 3, 6), + Rp(3, 4, 0, 1, 2, 7), + Rp(2, 3, 4, 0, 1, 8), + Rp(1, 2, 3, 4, 0, 9), + Rp(0, 1, 2, 3, 4, 10), + Rp(4, 0, 1, 2, 3, 11), + Rp(3, 4, 0, 1, 2, 12), + Rp(2, 3, 4, 0, 1, 13), + Rp(1, 2, 3, 4, 0, 14), + Rp(0, 1, 2, 3, 4, 15), + }; + inline for (round0a) |r| { + s[r.i] = (u32(b[r.i * 4 + 0]) << 24) | + (u32(b[r.i * 4 + 1]) << 16) | + (u32(b[r.i * 4 + 2]) << 8) | + (u32(b[r.i * 4 + 3]) << 0); + + v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0x5A827999 +% s[r.i & 0xf] + +% ((v[r.b] & v[r.c]) | (~v[r.b] & v[r.d])); + v[r.b] = math.rotl(u32, v[r.b], u32(30)); + } + + const round0b = comptime []RoundParam { + Rp(4, 0, 1, 2, 3, 16), + Rp(3, 4, 0, 1, 2, 17), + Rp(2, 3, 4, 0, 1, 18), + Rp(1, 2, 3, 4, 0, 19), + }; + inline for (round0b) |r| { + const t = s[(r.i-3) & 0xf] ^ s[(r.i-8) & 0xf] ^ s[(r.i-14) & 0xf] ^ s[(r.i-16) & 0xf]; + s[r.i & 0xf] = math.rotl(u32, t, u32(1)); + + v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0x5A827999 +% s[r.i & 0xf] + +% ((v[r.b] & v[r.c]) | (~v[r.b] & v[r.d])); + v[r.b] = math.rotl(u32, v[r.b], u32(30)); + } + + const round1 = comptime []RoundParam { + Rp(0, 1, 2, 3, 4, 20), + Rp(4, 0, 1, 2, 3, 21), + Rp(3, 4, 0, 1, 2, 22), + Rp(2, 3, 4, 0, 1, 23), + Rp(1, 2, 3, 4, 0, 24), + Rp(0, 1, 2, 3, 4, 25), + Rp(4, 0, 1, 2, 3, 26), + Rp(3, 4, 0, 1, 2, 27), + Rp(2, 3, 4, 0, 1, 28), + Rp(1, 2, 3, 4, 0, 29), + Rp(0, 1, 2, 3, 4, 30), + Rp(4, 0, 1, 2, 3, 31), + Rp(3, 4, 0, 1, 2, 32), + Rp(2, 3, 4, 0, 1, 33), + Rp(1, 2, 3, 4, 0, 34), + Rp(0, 1, 2, 3, 4, 35), + Rp(4, 0, 1, 2, 3, 36), + Rp(3, 4, 0, 1, 2, 37), + Rp(2, 3, 4, 0, 1, 38), + Rp(1, 2, 3, 4, 0, 39), + }; + inline for (round1) |r| { + const t = s[(r.i-3) & 0xf] ^ s[(r.i-8) & 0xf] ^ s[(r.i-14) & 0xf] ^ s[(r.i-16) & 0xf]; + s[r.i & 0xf] = math.rotl(u32, t, u32(1)); + + v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0x6ED9EBA1 +% s[r.i & 0xf] + +% (v[r.b] ^ v[r.c] ^ v[r.d]); + v[r.b] = math.rotl(u32, v[r.b], u32(30)); + } + + const round2 = comptime []RoundParam { + Rp(0, 1, 2, 3, 4, 40), + Rp(4, 0, 1, 2, 3, 41), + Rp(3, 4, 0, 1, 2, 42), + Rp(2, 3, 4, 0, 1, 43), + Rp(1, 2, 3, 4, 0, 44), + Rp(0, 1, 2, 3, 4, 45), + Rp(4, 0, 1, 2, 3, 46), + Rp(3, 4, 0, 1, 2, 47), + Rp(2, 3, 4, 0, 1, 48), + Rp(1, 2, 3, 4, 0, 49), + Rp(0, 1, 2, 3, 4, 50), + Rp(4, 0, 1, 2, 3, 51), + Rp(3, 4, 0, 1, 2, 52), + Rp(2, 3, 4, 0, 1, 53), + Rp(1, 2, 3, 4, 0, 54), + Rp(0, 1, 2, 3, 4, 55), + Rp(4, 0, 1, 2, 3, 56), + Rp(3, 4, 0, 1, 2, 57), + Rp(2, 3, 4, 0, 1, 58), + Rp(1, 2, 3, 4, 0, 59), + }; + inline for (round2) |r| { + const t = s[(r.i-3) & 0xf] ^ s[(r.i-8) & 0xf] ^ s[(r.i-14) & 0xf] ^ s[(r.i-16) & 0xf]; + s[r.i & 0xf] = math.rotl(u32, t, u32(1)); + + v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0x8F1BBCDC +% s[r.i & 0xf] + +% ((v[r.b] & v[r.c]) ^ (v[r.b] & v[r.d]) ^ (v[r.c] & v[r.d])); + v[r.b] = math.rotl(u32, v[r.b], u32(30)); + } + + const round3 = comptime []RoundParam { + Rp(0, 1, 2, 3, 4, 60), + Rp(4, 0, 1, 2, 3, 61), + Rp(3, 4, 0, 1, 2, 62), + Rp(2, 3, 4, 0, 1, 63), + Rp(1, 2, 3, 4, 0, 64), + Rp(0, 1, 2, 3, 4, 65), + Rp(4, 0, 1, 2, 3, 66), + Rp(3, 4, 0, 1, 2, 67), + Rp(2, 3, 4, 0, 1, 68), + Rp(1, 2, 3, 4, 0, 69), + Rp(0, 1, 2, 3, 4, 70), + Rp(4, 0, 1, 2, 3, 71), + Rp(3, 4, 0, 1, 2, 72), + Rp(2, 3, 4, 0, 1, 73), + Rp(1, 2, 3, 4, 0, 74), + Rp(0, 1, 2, 3, 4, 75), + Rp(4, 0, 1, 2, 3, 76), + Rp(3, 4, 0, 1, 2, 77), + Rp(2, 3, 4, 0, 1, 78), + Rp(1, 2, 3, 4, 0, 79), + }; + inline for (round3) |r| { + const t = s[(r.i-3) & 0xf] ^ s[(r.i-8) & 0xf] ^ s[(r.i-14) & 0xf] ^ s[(r.i-16) & 0xf]; + s[r.i & 0xf] = math.rotl(u32, t, u32(1)); + + v[r.e] = v[r.e] +% math.rotl(u32, v[r.a], u32(5)) +% 0xCA62C1D6 +% s[r.i & 0xf] + +% (v[r.b] ^ v[r.c] ^ v[r.d]); + v[r.b] = math.rotl(u32, v[r.b], u32(30)); + } + + d.s[0] +%= v[0]; + d.s[1] +%= v[1]; + d.s[2] +%= v[2]; + d.s[3] +%= v[3]; + d.s[4] +%= v[4]; + } +}; + +const htest = @import("test.zig"); + +test "sha1 single" { + htest.assertEqualHash(Sha1, "da39a3ee5e6b4b0d3255bfef95601890afd80709", ""); + htest.assertEqualHash(Sha1, "a9993e364706816aba3e25717850c26c9cd0d89d", "abc"); + htest.assertEqualHash(Sha1, "a49b2446a02c645bf419f995b67091253a04a259", "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"); +} + +test "sha1 streaming" { + var h = Sha1.init(); + var out: [20]u8 = undefined; + + h.final(out[0..]); + htest.assertEqual("da39a3ee5e6b4b0d3255bfef95601890afd80709", out[0..]); + + h.reset(); + h.update("abc"); + h.final(out[0..]); + htest.assertEqual("a9993e364706816aba3e25717850c26c9cd0d89d", out[0..]); + + h.reset(); + h.update("a"); + h.update("b"); + h.update("c"); + h.final(out[0..]); + htest.assertEqual("a9993e364706816aba3e25717850c26c9cd0d89d", out[0..]); +} diff --git a/std/crypto/sha2.zig b/std/crypto/sha2.zig new file mode 100644 index 0000000000..5ddcb7df91 --- /dev/null +++ b/std/crypto/sha2.zig @@ -0,0 +1,670 @@ +const mem = @import("../mem.zig"); +const math = @import("../math/index.zig"); +const endian = @import("../endian.zig"); +const debug = @import("../debug/index.zig"); +const builtin = @import("builtin"); +const htest = @import("test.zig"); + +///////////////////// +// Sha224 + Sha256 + +const RoundParam256 = struct { + a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, g: usize, h: usize, + i: usize, k: u32, +}; + +fn Rp256(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, g: usize, h: usize, i: usize, k: u32) -> RoundParam256 { + return RoundParam256 { .a = a, .b = b, .c = c, .d = d, .e = e, .f = f, .g = g, .h = h, .i = i, .k = k }; +} + +const Sha2Params32 = struct { + iv0: u32, + iv1: u32, + iv2: u32, + iv3: u32, + iv4: u32, + iv5: u32, + iv6: u32, + iv7: u32, + out_len: usize, +}; + +const Sha224Params = Sha2Params32 { + .iv0 = 0xC1059ED8, + .iv1 = 0x367CD507, + .iv2 = 0x3070DD17, + .iv3 = 0xF70E5939, + .iv4 = 0xFFC00B31, + .iv5 = 0x68581511, + .iv6 = 0x64F98FA7, + .iv7 = 0xBEFA4FA4, + .out_len = 224, +}; + +const Sha256Params = Sha2Params32 { + .iv0 = 0x6A09E667, + .iv1 = 0xBB67AE85, + .iv2 = 0x3C6EF372, + .iv3 = 0xA54FF53A, + .iv4 = 0x510E527F, + .iv5 = 0x9B05688C, + .iv6 = 0x1F83D9AB, + .iv7 = 0x5BE0CD19, + .out_len = 256, +}; + +pub const Sha224 = Sha2_32(Sha224Params); +pub const Sha256 = Sha2_32(Sha256Params); + +fn Sha2_32(comptime params: Sha2Params32) -> type { return struct { + const Self = this; + + s: [8]u32, + // Streaming Cache + buf: [64]u8, + buf_len: u8, + total_len: u64, + + pub fn init() -> Self { + var d: Self = undefined; + d.reset(); + return d; + } + + pub fn reset(d: &Self) { + d.s[0] = params.iv0; + d.s[1] = params.iv1; + d.s[2] = params.iv2; + d.s[3] = params.iv3; + d.s[4] = params.iv4; + d.s[5] = params.iv5; + d.s[6] = params.iv6; + d.s[7] = params.iv7; + d.buf_len = 0; + d.total_len = 0; + } + + pub fn hash(b: []const u8, out: []u8) { + var d = Self.init(); + d.update(b); + d.final(out); + } + + pub fn update(d: &Self, b: []const u8) { + var off: usize = 0; + + // Partial buffer exists from previous update. Copy into buffer then hash. + if (d.buf_len != 0 and d.buf_len + b.len > 64) { + off += 64 - d.buf_len; + mem.copy(u8, d.buf[d.buf_len..], b[0..off]); + + d.round(d.buf[0..]); + d.buf_len = 0; + } + + // Full middle blocks. + while (off + 64 < b.len) : (off += 64) { + d.round(b[off..off + 64]); + } + + // Copy any remainder for next pass. + mem.copy(u8, d.buf[d.buf_len..], b[off..]); + d.buf_len += u8(b[off..].len); + + d.total_len += b.len; + } + + pub fn final(d: &Self, out: []u8) { + debug.assert(out.len >= params.out_len / 8); + + // The buffer here will never be completely full. + mem.set(u8, d.buf[d.buf_len..], 0); + + // Append padding bits. + d.buf[d.buf_len] = 0x80; + d.buf_len += 1; + + // > 448 mod 512 so need to add an extra round to wrap around. + if (64 - d.buf_len < 8) { + d.round(d.buf[0..]); + mem.set(u8, d.buf[0..], 0); + } + + // Append message length. + var i: usize = 1; + var len = d.total_len >> 5; + d.buf[63] = u8(d.total_len & 0x1f) << 3; + while (i < 8) : (i += 1) { + d.buf[63 - i] = u8(len & 0xff); + len >>= 8; + } + + d.round(d.buf[0..]); + + // May truncate for possible 224 output + const rr = d.s[0 .. params.out_len / 32]; + + for (rr) |s, j| { + mem.writeInt(out[4*j .. 4*j + 4], s, builtin.Endian.Big); + } + } + + fn round(d: &Self, b: []const u8) { + debug.assert(b.len == 64); + + var s: [64]u32 = undefined; + + // ERROR: Cannot unroll at compile-time. + var i: usize = 0; + while (i < 16) : (i += 1) { + s[i] = 0; + s[i] |= u32(b[i*4+0]) << 24; + s[i] |= u32(b[i*4+1]) << 16; + s[i] |= u32(b[i*4+2]) << 8; + s[i] |= u32(b[i*4+3]) << 0; + } + while (i < 64) : (i += 1) { + s[i] = + s[i-16] +% s[i-7] +% + (math.rotr(u32, s[i-15], u32(7)) ^ math.rotr(u32, s[i-15], u32(18)) ^ (s[i-15] >> 3)) +% + (math.rotr(u32, s[i-2], u32(17)) ^ math.rotr(u32, s[i-2], u32(19)) ^ (s[i-2] >> 10)); + } + + var v: [8]u32 = []u32 { + d.s[0], d.s[1], d.s[2], d.s[3], d.s[4], d.s[5], d.s[6], d.s[7], + }; + + const round0 = comptime []RoundParam256 { + Rp256(0, 1, 2, 3, 4, 5, 6, 7, 0, 0x428A2F98), + Rp256(7, 0, 1, 2, 3, 4, 5, 6, 1, 0x71374491), + Rp256(6, 7, 0, 1, 2, 3, 4, 5, 2, 0xB5C0FBCF), + Rp256(5, 6, 7, 0, 1, 2, 3, 4, 3, 0xE9B5DBA5), + Rp256(4, 5, 6, 7, 0, 1, 2, 3, 4, 0x3956C25B), + Rp256(3, 4, 5, 6, 7, 0, 1, 2, 5, 0x59F111F1), + Rp256(2, 3, 4, 5, 6, 7, 0, 1, 6, 0x923F82A4), + Rp256(1, 2, 3, 4, 5, 6, 7, 0, 7, 0xAB1C5ED5), + Rp256(0, 1, 2, 3, 4, 5, 6, 7, 8, 0xD807AA98), + Rp256(7, 0, 1, 2, 3, 4, 5, 6, 9, 0x12835B01), + Rp256(6, 7, 0, 1, 2, 3, 4, 5, 10, 0x243185BE), + Rp256(5, 6, 7, 0, 1, 2, 3, 4, 11, 0x550C7DC3), + Rp256(4, 5, 6, 7, 0, 1, 2, 3, 12, 0x72BE5D74), + Rp256(3, 4, 5, 6, 7, 0, 1, 2, 13, 0x80DEB1FE), + Rp256(2, 3, 4, 5, 6, 7, 0, 1, 14, 0x9BDC06A7), + Rp256(1, 2, 3, 4, 5, 6, 7, 0, 15, 0xC19BF174), + Rp256(0, 1, 2, 3, 4, 5, 6, 7, 16, 0xE49B69C1), + Rp256(7, 0, 1, 2, 3, 4, 5, 6, 17, 0xEFBE4786), + Rp256(6, 7, 0, 1, 2, 3, 4, 5, 18, 0x0FC19DC6), + Rp256(5, 6, 7, 0, 1, 2, 3, 4, 19, 0x240CA1CC), + Rp256(4, 5, 6, 7, 0, 1, 2, 3, 20, 0x2DE92C6F), + Rp256(3, 4, 5, 6, 7, 0, 1, 2, 21, 0x4A7484AA), + Rp256(2, 3, 4, 5, 6, 7, 0, 1, 22, 0x5CB0A9DC), + Rp256(1, 2, 3, 4, 5, 6, 7, 0, 23, 0x76F988DA), + Rp256(0, 1, 2, 3, 4, 5, 6, 7, 24, 0x983E5152), + Rp256(7, 0, 1, 2, 3, 4, 5, 6, 25, 0xA831C66D), + Rp256(6, 7, 0, 1, 2, 3, 4, 5, 26, 0xB00327C8), + Rp256(5, 6, 7, 0, 1, 2, 3, 4, 27, 0xBF597FC7), + Rp256(4, 5, 6, 7, 0, 1, 2, 3, 28, 0xC6E00BF3), + Rp256(3, 4, 5, 6, 7, 0, 1, 2, 29, 0xD5A79147), + Rp256(2, 3, 4, 5, 6, 7, 0, 1, 30, 0x06CA6351), + Rp256(1, 2, 3, 4, 5, 6, 7, 0, 31, 0x14292967), + Rp256(0, 1, 2, 3, 4, 5, 6, 7, 32, 0x27B70A85), + Rp256(7, 0, 1, 2, 3, 4, 5, 6, 33, 0x2E1B2138), + Rp256(6, 7, 0, 1, 2, 3, 4, 5, 34, 0x4D2C6DFC), + Rp256(5, 6, 7, 0, 1, 2, 3, 4, 35, 0x53380D13), + Rp256(4, 5, 6, 7, 0, 1, 2, 3, 36, 0x650A7354), + Rp256(3, 4, 5, 6, 7, 0, 1, 2, 37, 0x766A0ABB), + Rp256(2, 3, 4, 5, 6, 7, 0, 1, 38, 0x81C2C92E), + Rp256(1, 2, 3, 4, 5, 6, 7, 0, 39, 0x92722C85), + Rp256(0, 1, 2, 3, 4, 5, 6, 7, 40, 0xA2BFE8A1), + Rp256(7, 0, 1, 2, 3, 4, 5, 6, 41, 0xA81A664B), + Rp256(6, 7, 0, 1, 2, 3, 4, 5, 42, 0xC24B8B70), + Rp256(5, 6, 7, 0, 1, 2, 3, 4, 43, 0xC76C51A3), + Rp256(4, 5, 6, 7, 0, 1, 2, 3, 44, 0xD192E819), + Rp256(3, 4, 5, 6, 7, 0, 1, 2, 45, 0xD6990624), + Rp256(2, 3, 4, 5, 6, 7, 0, 1, 46, 0xF40E3585), + Rp256(1, 2, 3, 4, 5, 6, 7, 0, 47, 0x106AA070), + Rp256(0, 1, 2, 3, 4, 5, 6, 7, 48, 0x19A4C116), + Rp256(7, 0, 1, 2, 3, 4, 5, 6, 49, 0x1E376C08), + Rp256(6, 7, 0, 1, 2, 3, 4, 5, 50, 0x2748774C), + Rp256(5, 6, 7, 0, 1, 2, 3, 4, 51, 0x34B0BCB5), + Rp256(4, 5, 6, 7, 0, 1, 2, 3, 52, 0x391C0CB3), + Rp256(3, 4, 5, 6, 7, 0, 1, 2, 53, 0x4ED8AA4A), + Rp256(2, 3, 4, 5, 6, 7, 0, 1, 54, 0x5B9CCA4F), + Rp256(1, 2, 3, 4, 5, 6, 7, 0, 55, 0x682E6FF3), + Rp256(0, 1, 2, 3, 4, 5, 6, 7, 56, 0x748F82EE), + Rp256(7, 0, 1, 2, 3, 4, 5, 6, 57, 0x78A5636F), + Rp256(6, 7, 0, 1, 2, 3, 4, 5, 58, 0x84C87814), + Rp256(5, 6, 7, 0, 1, 2, 3, 4, 59, 0x8CC70208), + Rp256(4, 5, 6, 7, 0, 1, 2, 3, 60, 0x90BEFFFA), + Rp256(3, 4, 5, 6, 7, 0, 1, 2, 61, 0xA4506CEB), + Rp256(2, 3, 4, 5, 6, 7, 0, 1, 62, 0xBEF9A3F7), + Rp256(1, 2, 3, 4, 5, 6, 7, 0, 63, 0xC67178F2), + }; + inline for (round0) |r| { + v[r.h] = + v[r.h] +% + (math.rotr(u32, v[r.e], u32(6)) ^ math.rotr(u32, v[r.e], u32(11)) ^ math.rotr(u32, v[r.e], u32(25))) +% + (v[r.g] ^ (v[r.e] & (v[r.f] ^ v[r.g]))) +% + r.k +% s[r.i]; + + v[r.d] = v[r.d] +% v[r.h]; + + v[r.h] = + v[r.h] +% + (math.rotr(u32, v[r.a], u32(2)) ^ math.rotr(u32, v[r.a], u32(13)) ^ math.rotr(u32, v[r.a], u32(22))) +% + ((v[r.a] & (v[r.b] | v[r.c])) | (v[r.b] & v[r.c])); + } + + d.s[0] +%= v[0]; + d.s[1] +%= v[1]; + d.s[2] +%= v[2]; + d.s[3] +%= v[3]; + d.s[4] +%= v[4]; + d.s[5] +%= v[5]; + d.s[6] +%= v[6]; + d.s[7] +%= v[7]; + } +};} + +test "sha224 single" { + htest.assertEqualHash(Sha224, "d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f", ""); + htest.assertEqualHash(Sha224, "23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7", "abc"); + htest.assertEqualHash(Sha224, "c97ca9a559850ce97a04a96def6d99a9e0e0e2ab14e6b8df265fc0b3", "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"); +} + +test "sha224 streaming" { + var h = Sha224.init(); + var out: [28]u8 = undefined; + + h.final(out[0..]); + htest.assertEqual("d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f", out[0..]); + + h.reset(); + h.update("abc"); + h.final(out[0..]); + htest.assertEqual("23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7", out[0..]); + + h.reset(); + h.update("a"); + h.update("b"); + h.update("c"); + h.final(out[0..]); + htest.assertEqual("23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7", out[0..]); +} + +test "sha256 single" { + htest.assertEqualHash(Sha256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", ""); + htest.assertEqualHash(Sha256, "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", "abc"); + htest.assertEqualHash(Sha256, "cf5b16a778af8380036ce59e7b0492370b249b11e8f07a51afac45037afee9d1", "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"); +} + +test "sha256 streaming" { + var h = Sha256.init(); + var out: [32]u8 = undefined; + + h.final(out[0..]); + htest.assertEqual("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", out[0..]); + + h.reset(); + h.update("abc"); + h.final(out[0..]); + htest.assertEqual("ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", out[0..]); + + h.reset(); + h.update("a"); + h.update("b"); + h.update("c"); + h.final(out[0..]); + htest.assertEqual("ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", out[0..]); +} + + +///////////////////// +// Sha384 + Sha512 + +const RoundParam512 = struct { + a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, g: usize, h: usize, + i: usize, k: u64, +}; + +fn Rp512(a: usize, b: usize, c: usize, d: usize, e: usize, f: usize, g: usize, h: usize, i: usize, k: u64) -> RoundParam512 { + return RoundParam512 { .a = a, .b = b, .c = c, .d = d, .e = e, .f = f, .g = g, .h = h, .i = i, .k = k }; +} + +const Sha2Params64 = struct { + iv0: u64, + iv1: u64, + iv2: u64, + iv3: u64, + iv4: u64, + iv5: u64, + iv6: u64, + iv7: u64, + out_len: usize, +}; + +const Sha384Params = Sha2Params64 { + .iv0 = 0xCBBB9D5DC1059ED8, + .iv1 = 0x629A292A367CD507, + .iv2 = 0x9159015A3070DD17, + .iv3 = 0x152FECD8F70E5939, + .iv4 = 0x67332667FFC00B31, + .iv5 = 0x8EB44A8768581511, + .iv6 = 0xDB0C2E0D64F98FA7, + .iv7 = 0x47B5481DBEFA4FA4, + .out_len = 384, +}; + +const Sha512Params = Sha2Params64 { + .iv0 = 0x6A09E667F3BCC908, + .iv1 = 0xBB67AE8584CAA73B, + .iv2 = 0x3C6EF372FE94F82B, + .iv3 = 0xA54FF53A5F1D36F1, + .iv4 = 0x510E527FADE682D1, + .iv5 = 0x9B05688C2B3E6C1F, + .iv6 = 0x1F83D9ABFB41BD6B, + .iv7 = 0x5BE0CD19137E2179, + .out_len = 512 +}; + +pub const Sha384 = Sha2_64(Sha384Params); +pub const Sha512 = Sha2_64(Sha512Params); + +fn Sha2_64(comptime params: Sha2Params64) -> type { return struct { + const Self = this; + const u9 = @IntType(false, 9); + + s: [8]u64, + // Streaming Cache + buf: [128]u8, + buf_len: u8, + total_len: u128, + + pub fn init() -> Self { + var d: Self = undefined; + d.reset(); + return d; + } + + pub fn reset(d: &Self) { + d.s[0] = params.iv0; + d.s[1] = params.iv1; + d.s[2] = params.iv2; + d.s[3] = params.iv3; + d.s[4] = params.iv4; + d.s[5] = params.iv5; + d.s[6] = params.iv6; + d.s[7] = params.iv7; + d.buf_len = 0; + d.total_len = 0; + } + + pub fn hash(b: []const u8, out: []u8) { + var d = Self.init(); + d.update(b); + d.final(out); + } + + pub fn update(d: &Self, b: []const u8) { + var off: usize = 0; + + // Partial buffer exists from previous update. Copy into buffer then hash. + if (d.buf_len != 0 and d.buf_len + b.len > 128) { + off += 128 - d.buf_len; + mem.copy(u8, d.buf[d.buf_len..], b[0..off]); + + d.round(d.buf[0..]); + d.buf_len = 0; + } + + // Full middle blocks. + while (off + 128 < b.len) : (off += 128) { + d.round(b[off..off + 128]); + } + + // Copy any remainder for next pass. + mem.copy(u8, d.buf[d.buf_len..], b[off..]); + d.buf_len += u8(b[off..].len); + + d.total_len += b.len; + } + + pub fn final(d: &Self, out: []u8) { + debug.assert(out.len >= params.out_len / 8); + + // The buffer here will never be completely full. + mem.set(u8, d.buf[d.buf_len..], 0); + + // Append padding bits. + d.buf[d.buf_len] = 0x80; + d.buf_len += 1; + + // > 896 mod 1024 so need to add an extra round to wrap around. + if (128 - d.buf_len < 16) { + d.round(d.buf[0..]); + mem.set(u8, d.buf[0..], 0); + } + + // Append message length. + var i: usize = 1; + var len = d.total_len >> 5; + d.buf[127] = u8(d.total_len & 0x1f) << 3; + while (i < 16) : (i += 1) { + d.buf[127 - i] = u8(len & 0xff); + len >>= 8; + } + + d.round(d.buf[0..]); + + // May truncate for possible 384 output + const rr = d.s[0 .. params.out_len / 64]; + + for (rr) |s, j| { + mem.writeInt(out[8*j .. 8*j + 8], s, builtin.Endian.Big); + } + } + + fn round(d: &Self, b: []const u8) { + debug.assert(b.len == 128); + + var s: [80]u64 = undefined; + + // ERROR: Cannot unroll at compile-time. + var i: usize = 0; + while (i < 16) : (i += 1) { + s[i] = 0; + s[i] |= u64(b[i*8+0]) << 56; + s[i] |= u64(b[i*8+1]) << 48; + s[i] |= u64(b[i*8+2]) << 40; + s[i] |= u64(b[i*8+3]) << 32; + s[i] |= u64(b[i*8+4]) << 24; + s[i] |= u64(b[i*8+5]) << 16; + s[i] |= u64(b[i*8+6]) << 8; + s[i] |= u64(b[i*8+7]) << 0; + } + while (i < 80) : (i += 1) { + s[i] = + s[i-16] +% s[i-7] +% + (math.rotr(u64, s[i-15], u64(1)) ^ math.rotr(u64, s[i-15], u64(8)) ^ (s[i-15] >> 7)) +% + (math.rotr(u64, s[i-2], u64(19)) ^ math.rotr(u64, s[i-2], u64(61)) ^ (s[i-2] >> 6)); + } + + var v: [8]u64 = []u64 { + d.s[0], d.s[1], d.s[2], d.s[3], d.s[4], d.s[5], d.s[6], d.s[7], + }; + + const round0 = comptime []RoundParam512 { + Rp512(0, 1, 2, 3, 4, 5, 6, 7, 0, 0x428A2F98D728AE22), + Rp512(7, 0, 1, 2, 3, 4, 5, 6, 1, 0x7137449123EF65CD), + Rp512(6, 7, 0, 1, 2, 3, 4, 5, 2, 0xB5C0FBCFEC4D3B2F), + Rp512(5, 6, 7, 0, 1, 2, 3, 4, 3, 0xE9B5DBA58189DBBC), + Rp512(4, 5, 6, 7, 0, 1, 2, 3, 4, 0x3956C25BF348B538), + Rp512(3, 4, 5, 6, 7, 0, 1, 2, 5, 0x59F111F1B605D019), + Rp512(2, 3, 4, 5, 6, 7, 0, 1, 6, 0x923F82A4AF194F9B), + Rp512(1, 2, 3, 4, 5, 6, 7, 0, 7, 0xAB1C5ED5DA6D8118), + Rp512(0, 1, 2, 3, 4, 5, 6, 7, 8, 0xD807AA98A3030242), + Rp512(7, 0, 1, 2, 3, 4, 5, 6, 9, 0x12835B0145706FBE), + Rp512(6, 7, 0, 1, 2, 3, 4, 5, 10, 0x243185BE4EE4B28C), + Rp512(5, 6, 7, 0, 1, 2, 3, 4, 11, 0x550C7DC3D5FFB4E2), + Rp512(4, 5, 6, 7, 0, 1, 2, 3, 12, 0x72BE5D74F27B896F), + Rp512(3, 4, 5, 6, 7, 0, 1, 2, 13, 0x80DEB1FE3B1696B1), + Rp512(2, 3, 4, 5, 6, 7, 0, 1, 14, 0x9BDC06A725C71235), + Rp512(1, 2, 3, 4, 5, 6, 7, 0, 15, 0xC19BF174CF692694), + Rp512(0, 1, 2, 3, 4, 5, 6, 7, 16, 0xE49B69C19EF14AD2), + Rp512(7, 0, 1, 2, 3, 4, 5, 6, 17, 0xEFBE4786384F25E3), + Rp512(6, 7, 0, 1, 2, 3, 4, 5, 18, 0x0FC19DC68B8CD5B5), + Rp512(5, 6, 7, 0, 1, 2, 3, 4, 19, 0x240CA1CC77AC9C65), + Rp512(4, 5, 6, 7, 0, 1, 2, 3, 20, 0x2DE92C6F592B0275), + Rp512(3, 4, 5, 6, 7, 0, 1, 2, 21, 0x4A7484AA6EA6E483), + Rp512(2, 3, 4, 5, 6, 7, 0, 1, 22, 0x5CB0A9DCBD41FBD4), + Rp512(1, 2, 3, 4, 5, 6, 7, 0, 23, 0x76F988DA831153B5), + Rp512(0, 1, 2, 3, 4, 5, 6, 7, 24, 0x983E5152EE66DFAB), + Rp512(7, 0, 1, 2, 3, 4, 5, 6, 25, 0xA831C66D2DB43210), + Rp512(6, 7, 0, 1, 2, 3, 4, 5, 26, 0xB00327C898FB213F), + Rp512(5, 6, 7, 0, 1, 2, 3, 4, 27, 0xBF597FC7BEEF0EE4), + Rp512(4, 5, 6, 7, 0, 1, 2, 3, 28, 0xC6E00BF33DA88FC2), + Rp512(3, 4, 5, 6, 7, 0, 1, 2, 29, 0xD5A79147930AA725), + Rp512(2, 3, 4, 5, 6, 7, 0, 1, 30, 0x06CA6351E003826F), + Rp512(1, 2, 3, 4, 5, 6, 7, 0, 31, 0x142929670A0E6E70), + Rp512(0, 1, 2, 3, 4, 5, 6, 7, 32, 0x27B70A8546D22FFC), + Rp512(7, 0, 1, 2, 3, 4, 5, 6, 33, 0x2E1B21385C26C926), + Rp512(6, 7, 0, 1, 2, 3, 4, 5, 34, 0x4D2C6DFC5AC42AED), + Rp512(5, 6, 7, 0, 1, 2, 3, 4, 35, 0x53380D139D95B3DF), + Rp512(4, 5, 6, 7, 0, 1, 2, 3, 36, 0x650A73548BAF63DE), + Rp512(3, 4, 5, 6, 7, 0, 1, 2, 37, 0x766A0ABB3C77B2A8), + Rp512(2, 3, 4, 5, 6, 7, 0, 1, 38, 0x81C2C92E47EDAEE6), + Rp512(1, 2, 3, 4, 5, 6, 7, 0, 39, 0x92722C851482353B), + Rp512(0, 1, 2, 3, 4, 5, 6, 7, 40, 0xA2BFE8A14CF10364), + Rp512(7, 0, 1, 2, 3, 4, 5, 6, 41, 0xA81A664BBC423001), + Rp512(6, 7, 0, 1, 2, 3, 4, 5, 42, 0xC24B8B70D0F89791), + Rp512(5, 6, 7, 0, 1, 2, 3, 4, 43, 0xC76C51A30654BE30), + Rp512(4, 5, 6, 7, 0, 1, 2, 3, 44, 0xD192E819D6EF5218), + Rp512(3, 4, 5, 6, 7, 0, 1, 2, 45, 0xD69906245565A910), + Rp512(2, 3, 4, 5, 6, 7, 0, 1, 46, 0xF40E35855771202A), + Rp512(1, 2, 3, 4, 5, 6, 7, 0, 47, 0x106AA07032BBD1B8), + Rp512(0, 1, 2, 3, 4, 5, 6, 7, 48, 0x19A4C116B8D2D0C8), + Rp512(7, 0, 1, 2, 3, 4, 5, 6, 49, 0x1E376C085141AB53), + Rp512(6, 7, 0, 1, 2, 3, 4, 5, 50, 0x2748774CDF8EEB99), + Rp512(5, 6, 7, 0, 1, 2, 3, 4, 51, 0x34B0BCB5E19B48A8), + Rp512(4, 5, 6, 7, 0, 1, 2, 3, 52, 0x391C0CB3C5C95A63), + Rp512(3, 4, 5, 6, 7, 0, 1, 2, 53, 0x4ED8AA4AE3418ACB), + Rp512(2, 3, 4, 5, 6, 7, 0, 1, 54, 0x5B9CCA4F7763E373), + Rp512(1, 2, 3, 4, 5, 6, 7, 0, 55, 0x682E6FF3D6B2B8A3), + Rp512(0, 1, 2, 3, 4, 5, 6, 7, 56, 0x748F82EE5DEFB2FC), + Rp512(7, 0, 1, 2, 3, 4, 5, 6, 57, 0x78A5636F43172F60), + Rp512(6, 7, 0, 1, 2, 3, 4, 5, 58, 0x84C87814A1F0AB72), + Rp512(5, 6, 7, 0, 1, 2, 3, 4, 59, 0x8CC702081A6439EC), + Rp512(4, 5, 6, 7, 0, 1, 2, 3, 60, 0x90BEFFFA23631E28), + Rp512(3, 4, 5, 6, 7, 0, 1, 2, 61, 0xA4506CEBDE82BDE9), + Rp512(2, 3, 4, 5, 6, 7, 0, 1, 62, 0xBEF9A3F7B2C67915), + Rp512(1, 2, 3, 4, 5, 6, 7, 0, 63, 0xC67178F2E372532B), + Rp512(0, 1, 2, 3, 4, 5, 6, 7, 64, 0xCA273ECEEA26619C), + Rp512(7, 0, 1, 2, 3, 4, 5, 6, 65, 0xD186B8C721C0C207), + Rp512(6, 7, 0, 1, 2, 3, 4, 5, 66, 0xEADA7DD6CDE0EB1E), + Rp512(5, 6, 7, 0, 1, 2, 3, 4, 67, 0xF57D4F7FEE6ED178), + Rp512(4, 5, 6, 7, 0, 1, 2, 3, 68, 0x06F067AA72176FBA), + Rp512(3, 4, 5, 6, 7, 0, 1, 2, 69, 0x0A637DC5A2C898A6), + Rp512(2, 3, 4, 5, 6, 7, 0, 1, 70, 0x113F9804BEF90DAE), + Rp512(1, 2, 3, 4, 5, 6, 7, 0, 71, 0x1B710B35131C471B), + Rp512(0, 1, 2, 3, 4, 5, 6, 7, 72, 0x28DB77F523047D84), + Rp512(7, 0, 1, 2, 3, 4, 5, 6, 73, 0x32CAAB7B40C72493), + Rp512(6, 7, 0, 1, 2, 3, 4, 5, 74, 0x3C9EBE0A15C9BEBC), + Rp512(5, 6, 7, 0, 1, 2, 3, 4, 75, 0x431D67C49C100D4C), + Rp512(4, 5, 6, 7, 0, 1, 2, 3, 76, 0x4CC5D4BECB3E42B6), + Rp512(3, 4, 5, 6, 7, 0, 1, 2, 77, 0x597F299CFC657E2A), + Rp512(2, 3, 4, 5, 6, 7, 0, 1, 78, 0x5FCB6FAB3AD6FAEC), + Rp512(1, 2, 3, 4, 5, 6, 7, 0, 79, 0x6C44198C4A475817), + }; + inline for (round0) |r| { + v[r.h] = + v[r.h] +% + (math.rotr(u64, v[r.e], u64(14)) ^ math.rotr(u64, v[r.e], u64(18)) ^ math.rotr(u64, v[r.e], u64(41))) +% + (v[r.g] ^ (v[r.e] & (v[r.f] ^ v[r.g]))) +% + r.k +% s[r.i]; + + v[r.d] = v[r.d] +% v[r.h]; + + v[r.h] = + v[r.h] +% + (math.rotr(u64, v[r.a], u64(28)) ^ math.rotr(u64, v[r.a], u64(34)) ^ math.rotr(u64, v[r.a], u64(39))) +% + ((v[r.a] & (v[r.b] | v[r.c])) | (v[r.b] & v[r.c])); + } + + d.s[0] +%= v[0]; + d.s[1] +%= v[1]; + d.s[2] +%= v[2]; + d.s[3] +%= v[3]; + d.s[4] +%= v[4]; + d.s[5] +%= v[5]; + d.s[6] +%= v[6]; + d.s[7] +%= v[7]; + } +};} + +test "sha384 single" { + const h1 = "38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b"; + htest.assertEqualHash(Sha384, h1, ""); + + const h2 = "cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed8086072ba1e7cc2358baeca134c825a7"; + htest.assertEqualHash(Sha384, h2, "abc"); + + const h3 = "09330c33f71147e83d192fc782cd1b4753111b173b3b05d22fa08086e3b0f712fcc7c71a557e2db966c3e9fa91746039"; + htest.assertEqualHash(Sha384, h3, "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"); +} + +test "sha384 streaming" { + var h = Sha384.init(); + var out: [48]u8 = undefined; + + const h1 = "38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da274edebfe76f65fbd51ad2f14898b95b"; + h.final(out[0..]); + htest.assertEqual(h1, out[0..]); + + const h2 = "cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed8086072ba1e7cc2358baeca134c825a7"; + + h.reset(); + h.update("abc"); + h.final(out[0..]); + htest.assertEqual(h2, out[0..]); + + h.reset(); + h.update("a"); + h.update("b"); + h.update("c"); + h.final(out[0..]); + htest.assertEqual(h2, out[0..]); +} + +test "sha512 single" { + const h1 = "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"; + htest.assertEqualHash(Sha512, h1, ""); + + const h2 = "ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f"; + htest.assertEqualHash(Sha512, h2, "abc"); + + const h3 = "8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909"; + htest.assertEqualHash(Sha512, h3, "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu"); +} + +test "sha512 streaming" { + var h = Sha512.init(); + var out: [64]u8 = undefined; + + const h1 = "cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e"; + h.final(out[0..]); + htest.assertEqual(h1, out[0..]); + + const h2 = "ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f"; + + h.reset(); + h.update("abc"); + h.final(out[0..]); + htest.assertEqual(h2, out[0..]); + + h.reset(); + h.update("a"); + h.update("b"); + h.update("c"); + h.final(out[0..]); + htest.assertEqual(h2, out[0..]); +} diff --git a/std/crypto/test.zig b/std/crypto/test.zig new file mode 100644 index 0000000000..a357988901 --- /dev/null +++ b/std/crypto/test.zig @@ -0,0 +1,22 @@ +const debug = @import("../debug/index.zig"); +const mem = @import("../mem.zig"); +const fmt = @import("../fmt/index.zig"); + +// Hash using the specified hasher `H` asserting `expected == H(input)`. +pub fn assertEqualHash(comptime Hasher: var, comptime expected: []const u8, input: []const u8) { + var h: [expected.len / 2]u8 = undefined; + Hasher.hash(input, h[0..]); + + assertEqual(expected, h); +} + +// Assert `expected` == `input` where `input` is a bytestring. +pub fn assertEqual(comptime expected: []const u8, input: []const u8) { + var expected_bytes: [expected.len / 2]u8 = undefined; + for (expected_bytes) |*r, i| { + *r = fmt.parseInt(u8, expected[2*i .. 2*i+2], 16) catch unreachable; + } + + debug.assert(mem.eql(u8, expected_bytes, input)); +} + diff --git a/std/debug/index.zig b/std/debug/index.zig index 464974b7de..87e81ab8c2 100644 --- a/std/debug/index.zig +++ b/std/debug/index.zig @@ -13,6 +13,10 @@ pub const FailingAllocator = @import("failing_allocator.zig").FailingAllocator; error MissingDebugInfo; error InvalidDebugInfo; error UnsupportedDebugInfo; +error UnknownObjectFormat; +error TodoSupportCoffDebugInfo; +error TodoSupportMachoDebugInfo; +error TodoSupportCOFFDebugInfo; /// Tries to write to stderr, unbuffered, and ignores any error returned. @@ -37,10 +41,43 @@ fn getStderrStream() -> %&io.OutStream { } } -/// Tries to print a stack trace to stderr, unbuffered, and ignores any error returned. -pub fn dumpStackTrace() { +var self_debug_info: ?&ElfStackTrace = null; +pub fn getSelfDebugInfo() -> %&ElfStackTrace { + if (self_debug_info) |info| { + return info; + } else { + const info = try openSelfDebugInfo(global_allocator); + self_debug_info = info; + return info; + } +} + +/// Tries to print the current stack trace to stderr, unbuffered, and ignores any error returned. +pub fn dumpCurrentStackTrace() { const stderr = getStderrStream() catch return; - writeStackTrace(stderr, global_allocator, stderr_file.isTty(), 1) catch return; + const debug_info = getSelfDebugInfo() catch |err| { + stderr.print("Unable to open debug info: {}\n", @errorName(err)) catch return; + return; + }; + defer debug_info.close(); + writeCurrentStackTrace(stderr, global_allocator, debug_info, stderr_file.isTty(), 1) catch |err| { + stderr.print("Unable to dump stack trace: {}\n", @errorName(err)) catch return; + return; + }; +} + +/// Tries to print a stack trace to stderr, unbuffered, and ignores any error returned. +pub fn dumpStackTrace(stack_trace: &const builtin.StackTrace) { + const stderr = getStderrStream() catch return; + const debug_info = getSelfDebugInfo() catch |err| { + stderr.print("Unable to open debug info: {}\n", @errorName(err)) catch return; + return; + }; + defer debug_info.close(); + writeStackTrace(stack_trace, stderr, global_allocator, debug_info, stderr_file.isTty()) catch |err| { + stderr.print("Unable to dump stack trace: {}\n", @errorName(err)) catch return; + return; + }; } /// This function invokes undefined behavior when `ok` is `false`. @@ -88,7 +125,21 @@ pub fn panic(comptime format: []const u8, args: ...) -> noreturn { const stderr = getStderrStream() catch os.abort(); stderr.print(format ++ "\n", args) catch os.abort(); - writeStackTrace(stderr, global_allocator, stderr_file.isTty(), 1) catch os.abort(); + dumpCurrentStackTrace(); + + os.abort(); +} + +pub fn panicWithTrace(trace: &const builtin.StackTrace, comptime format: []const u8, args: ...) -> noreturn { + if (panicking) { + os.abort(); + } else { + panicking = true; + } + const stderr = getStderrStream() catch os.abort(); + stderr.print(format ++ "\n", args) catch os.abort(); + dumpStackTrace(trace); + dumpCurrentStackTrace(); os.abort(); } @@ -101,12 +152,91 @@ const RESET = "\x1b[0m"; error PathNotFound; error InvalidDebugInfo; -pub fn writeStackTrace(out_stream: &io.OutStream, allocator: &mem.Allocator, tty_color: bool, - ignore_frame_count: usize) -> %void +pub fn writeStackTrace(stack_trace: &const builtin.StackTrace, out_stream: &io.OutStream, allocator: &mem.Allocator, + debug_info: &ElfStackTrace, tty_color: bool) -> %void { + var frame_index: usize = undefined; + var frames_left: usize = undefined; + if (stack_trace.index < stack_trace.instruction_addresses.len) { + frame_index = 0; + frames_left = stack_trace.index; + } else { + frame_index = (stack_trace.index + 1) % stack_trace.instruction_addresses.len; + frames_left = stack_trace.instruction_addresses.len; + } + + while (frames_left != 0) : ({ + frames_left -= 1; + frame_index = (frame_index + 1) % stack_trace.instruction_addresses.len; + }) { + const return_address = stack_trace.instruction_addresses[frame_index]; + try printSourceAtAddress(debug_info, out_stream, return_address); + } +} + +pub fn writeCurrentStackTrace(out_stream: &io.OutStream, allocator: &mem.Allocator, + debug_info: &ElfStackTrace, tty_color: bool, ignore_frame_count: usize) -> %void +{ + var ignored_count: usize = 0; + + var fp = @ptrToInt(@frameAddress()); + while (fp != 0) : (fp = *@intToPtr(&const usize, fp)) { + if (ignored_count < ignore_frame_count) { + ignored_count += 1; + continue; + } + + const return_address = *@intToPtr(&const usize, fp + @sizeOf(usize)); + try printSourceAtAddress(debug_info, out_stream, return_address); + } +} + +fn printSourceAtAddress(debug_info: &ElfStackTrace, out_stream: &io.OutStream, address: usize) -> %void { + if (builtin.os == builtin.Os.windows) { + return error.UnsupportedDebugInfo; + } + // TODO we really should be able to convert @sizeOf(usize) * 2 to a string literal + // at compile time. I'll call it issue #313 + const ptr_hex = if (@sizeOf(usize) == 4) "0x{x8}" else "0x{x16}"; + + const compile_unit = findCompileUnit(debug_info, address) catch { + try out_stream.print("???:?:?: " ++ DIM ++ ptr_hex ++ " in ??? (???)" ++ RESET ++ "\n ???\n\n", + address); + return; + }; + const compile_unit_name = try compile_unit.die.getAttrString(debug_info, DW.AT_name); + if (getLineNumberInfo(debug_info, compile_unit, address - 1)) |line_info| { + defer line_info.deinit(); + try out_stream.print(WHITE ++ "{}:{}:{}" ++ RESET ++ ": " ++ + DIM ++ ptr_hex ++ " in ??? ({})" ++ RESET ++ "\n", + line_info.file_name, line_info.line, line_info.column, + address, compile_unit_name); + if (printLineFromFile(debug_info.allocator(), out_stream, line_info)) { + if (line_info.column == 0) { + try out_stream.write("\n"); + } else { + {var col_i: usize = 1; while (col_i < line_info.column) : (col_i += 1) { + try out_stream.writeByte(' '); + }} + try out_stream.write(GREEN ++ "^" ++ RESET ++ "\n"); + } + } else |err| switch (err) { + error.EndOfFile, error.PathNotFound => {}, + else => return err, + } + } else |err| switch (err) { + error.MissingDebugInfo, error.InvalidDebugInfo => { + try out_stream.print(ptr_hex ++ " in ??? ({})\n", address, compile_unit_name); + }, + else => return err, + } +} + +pub fn openSelfDebugInfo(allocator: &mem.Allocator) -> %&ElfStackTrace { switch (builtin.object_format) { builtin.ObjectFormat.elf => { - var stack_trace = ElfStackTrace { + const st = try allocator.create(ElfStackTrace); + *st = ElfStackTrace { .self_exe_file = undefined, .elf = undefined, .debug_info = undefined, @@ -117,12 +247,11 @@ pub fn writeStackTrace(out_stream: &io.OutStream, allocator: &mem.Allocator, tty .abbrev_table_list = ArrayList(AbbrevTableHeader).init(allocator), .compile_unit_list = ArrayList(CompileUnit).init(allocator), }; - const st = &stack_trace; st.self_exe_file = try os.openSelfExe(); - defer st.self_exe_file.close(); + %defer st.self_exe_file.close(); try st.elf.openFile(allocator, &st.self_exe_file); - defer st.elf.close(); + %defer st.elf.close(); st.debug_info = (try st.elf.findSection(".debug_info")) ?? return error.MissingDebugInfo; st.debug_abbrev = (try st.elf.findSection(".debug_abbrev")) ?? return error.MissingDebugInfo; @@ -130,67 +259,19 @@ pub fn writeStackTrace(out_stream: &io.OutStream, allocator: &mem.Allocator, tty st.debug_line = (try st.elf.findSection(".debug_line")) ?? return error.MissingDebugInfo; st.debug_ranges = (try st.elf.findSection(".debug_ranges")); try scanAllCompileUnits(st); - - var ignored_count: usize = 0; - - var fp = @ptrToInt(@frameAddress()); - while (fp != 0) : (fp = *@intToPtr(&const usize, fp)) { - if (ignored_count < ignore_frame_count) { - ignored_count += 1; - continue; - } - - const return_address = *@intToPtr(&const usize, fp + @sizeOf(usize)); - - // TODO we really should be able to convert @sizeOf(usize) * 2 to a string literal - // at compile time. I'll call it issue #313 - const ptr_hex = if (@sizeOf(usize) == 4) "0x{x8}" else "0x{x16}"; - - const compile_unit = findCompileUnit(st, return_address) catch { - try out_stream.print("???:?:?: " ++ DIM ++ ptr_hex ++ " in ??? (???)" ++ RESET ++ "\n ???\n\n", - return_address); - continue; - }; - const compile_unit_name = try compile_unit.die.getAttrString(st, DW.AT_name); - if (getLineNumberInfo(st, compile_unit, usize(return_address) - 1)) |line_info| { - defer line_info.deinit(); - try out_stream.print(WHITE ++ "{}:{}:{}" ++ RESET ++ ": " ++ - DIM ++ ptr_hex ++ " in ??? ({})" ++ RESET ++ "\n", - line_info.file_name, line_info.line, line_info.column, - return_address, compile_unit_name); - if (printLineFromFile(st.allocator(), out_stream, line_info)) { - if (line_info.column == 0) { - try out_stream.write("\n"); - } else { - {var col_i: usize = 1; while (col_i < line_info.column) : (col_i += 1) { - try out_stream.writeByte(' '); - }} - try out_stream.write(GREEN ++ "^" ++ RESET ++ "\n"); - } - } else |err| switch (err) { - error.EndOfFile, error.PathNotFound => {}, - else => return err, - } - } else |err| switch (err) { - error.MissingDebugInfo, error.InvalidDebugInfo => { - try out_stream.print(ptr_hex ++ " in ??? ({})\n", - return_address, compile_unit_name); - }, - else => return err, - } - } + return st; }, builtin.ObjectFormat.coff => { - try out_stream.write("(stack trace unavailable for COFF object format)\n"); + return error.TodoSupportCoffDebugInfo; }, builtin.ObjectFormat.macho => { - try out_stream.write("(stack trace unavailable for Mach-O object format)\n"); + return error.TodoSupportMachoDebugInfo; }, builtin.ObjectFormat.wasm => { - try out_stream.write("(stack trace unavailable for WASM object format)\n"); + return error.TodoSupportCOFFDebugInfo; }, builtin.ObjectFormat.unknown => { - try out_stream.write("(stack trace unavailable for unknown object format)\n"); + return error.UnknownObjectFormat; }, } } @@ -228,7 +309,7 @@ fn printLineFromFile(allocator: &mem.Allocator, out_stream: &io.OutStream, line_ } } -const ElfStackTrace = struct { +pub const ElfStackTrace = struct { self_exe_file: io.File, elf: elf.Elf, debug_info: &elf.SectionHeader, @@ -248,6 +329,11 @@ const ElfStackTrace = struct { const in_stream = &in_file_stream.stream; return readStringRaw(self.allocator(), in_stream); } + + pub fn close(self: &ElfStackTrace) { + self.self_exe_file.close(); + self.elf.close(); + } }; const PcRange = struct { diff --git a/std/index.zig b/std/index.zig index a9a0038e60..3986063068 100644 --- a/std/index.zig +++ b/std/index.zig @@ -10,6 +10,7 @@ pub const LinkedList = @import("linked_list.zig").LinkedList; pub const base64 = @import("base64.zig"); pub const build = @import("build.zig"); pub const c = @import("c/index.zig"); +pub const crypto = @import("crypto/index.zig"); pub const cstr = @import("cstr.zig"); pub const debug = @import("debug/index.zig"); pub const dwarf = @import("dwarf.zig"); @@ -39,6 +40,7 @@ test "std" { _ = @import("base64.zig"); _ = @import("build.zig"); _ = @import("c/index.zig"); + _ = @import("crypto/index.zig"); _ = @import("cstr.zig"); _ = @import("debug/index.zig"); _ = @import("dwarf.zig"); diff --git a/std/io.zig b/std/io.zig index c6fd5502a2..605553b0ea 100644 --- a/std/io.zig +++ b/std/io.zig @@ -224,7 +224,7 @@ pub const File = struct { }; } }, - else => @compileError("unsupported OS"), + else => @compileError("unsupported OS: " ++ @tagName(builtin.os)), } } diff --git a/std/io_test.zig b/std/io_test.zig index 3b1f646616..1767a546ea 100644 --- a/std/io_test.zig +++ b/std/io_test.zig @@ -8,11 +8,6 @@ const os = std.os; const builtin = @import("builtin"); test "write a file, read it, then delete it" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } var data: [1024]u8 = undefined; var rng = Rand.init(1234); rng.fillBytes(data[0..]); diff --git a/std/math/acosh.zig b/std/math/acosh.zig index 150bcd4543..91094302dd 100644 --- a/std/math/acosh.zig +++ b/std/math/acosh.zig @@ -55,11 +55,6 @@ fn acosh64(x: f64) -> f64 { } test "math.acosh" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } assert(acosh(f32(1.5)) == acosh32(1.5)); assert(acosh(f64(1.5)) == acosh64(1.5)); } diff --git a/std/math/cos.zig b/std/math/cos.zig index a65ea1ace8..88a0193db1 100644 --- a/std/math/cos.zig +++ b/std/math/cos.zig @@ -146,11 +146,6 @@ test "math.cos" { } test "math.cos32" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } const epsilon = 0.000001; assert(math.approxEq(f32, cos32(0.0), 1.0, epsilon)); diff --git a/std/math/cosh.zig b/std/math/cosh.zig index 05565d6b3a..6e57e85d14 100644 --- a/std/math/cosh.zig +++ b/std/math/cosh.zig @@ -81,11 +81,6 @@ fn cosh64(x: f64) -> f64 { } test "math.cosh" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } assert(cosh(f32(1.5)) == cosh32(1.5)); assert(cosh(f64(1.5)) == cosh64(1.5)); } diff --git a/std/math/index.zig b/std/math/index.zig index 081c59a88a..07a4d9a731 100644 --- a/std/math/index.zig +++ b/std/math/index.zig @@ -267,6 +267,45 @@ test "math.shr" { assert(shr(u8, 0b11111111, isize(-2)) == 0b11111100); } +/// Rotates right. Only unsigned values can be rotated. +/// Negative shift values results in shift modulo the bit count. +pub fn rotr(comptime T: type, x: T, r: var) -> T { + if (T.is_signed) { + @compileError("cannot rotate signed integer"); + } else { + const ar = @mod(r, T.bit_count); + return shr(T, x, ar) | shl(T, x, T.bit_count - ar); + } +} + +test "math.rotr" { + assert(rotr(u8, 0b00000001, usize(0)) == 0b00000001); + assert(rotr(u8, 0b00000001, usize(9)) == 0b10000000); + assert(rotr(u8, 0b00000001, usize(8)) == 0b00000001); + assert(rotr(u8, 0b00000001, usize(4)) == 0b00010000); + assert(rotr(u8, 0b00000001, isize(-1)) == 0b00000010); +} + +/// Rotates left. Only unsigned values can be rotated. +/// Negative shift values results in shift modulo the bit count. +pub fn rotl(comptime T: type, x: T, r: var) -> T { + if (T.is_signed) { + @compileError("cannot rotate signed integer"); + } else { + const ar = @mod(r, T.bit_count); + return shl(T, x, ar) | shr(T, x, T.bit_count - ar); + } +} + +test "math.rotl" { + assert(rotl(u8, 0b00000001, usize(0)) == 0b00000001); + assert(rotl(u8, 0b00000001, usize(9)) == 0b00000010); + assert(rotl(u8, 0b00000001, usize(8)) == 0b00000001); + assert(rotl(u8, 0b00000001, usize(4)) == 0b00010000); + assert(rotl(u8, 0b00000001, isize(-1)) == 0b10000000); +} + + pub fn Log2Int(comptime T: type) -> type { return @IntType(false, log2(T.bit_count)); } @@ -320,11 +359,6 @@ pub fn divTrunc(comptime T: type, numerator: T, denominator: T) -> %T { } test "math.divTrunc" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } testDivTrunc(); comptime testDivTrunc(); } @@ -350,11 +384,6 @@ pub fn divFloor(comptime T: type, numerator: T, denominator: T) -> %T { } test "math.divFloor" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } testDivFloor(); comptime testDivFloor(); } @@ -384,11 +413,6 @@ pub fn divExact(comptime T: type, numerator: T, denominator: T) -> %T { } test "math.divExact" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } testDivExact(); comptime testDivExact(); } diff --git a/std/math/ln.zig b/std/math/ln.zig index c5a5c93842..57e2ffdec8 100644 --- a/std/math/ln.zig +++ b/std/math/ln.zig @@ -147,11 +147,6 @@ pub fn ln_64(x_: f64) -> f64 { } test "math.ln" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } assert(ln(f32(0.2)) == ln_32(0.2)); assert(ln(f64(0.2)) == ln_64(0.2)); } diff --git a/std/math/log.zig b/std/math/log.zig index 4edb77bb1a..6bed4c0da1 100644 --- a/std/math/log.zig +++ b/std/math/log.zig @@ -56,11 +56,6 @@ test "math.log float" { } test "math.log float_special" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } assert(log(f32, 2, 0.2301974) == math.log2(f32(0.2301974))); assert(log(f32, 10, 0.2301974) == math.log10(f32(0.2301974))); diff --git a/std/math/log10.zig b/std/math/log10.zig index 0b6fc31aa7..e27d3eb272 100644 --- a/std/math/log10.zig +++ b/std/math/log10.zig @@ -172,11 +172,6 @@ pub fn log10_64(x_: f64) -> f64 { } test "math.log10" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } assert(log10(f32(0.2)) == log10_32(0.2)); assert(log10(f64(0.2)) == log10_64(0.2)); } diff --git a/std/math/log2.zig b/std/math/log2.zig index a9789e47cf..7839759974 100644 --- a/std/math/log2.zig +++ b/std/math/log2.zig @@ -170,11 +170,6 @@ pub fn log2_64(x_: f64) -> f64 { } test "math.log2" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } assert(log2(f32(0.2)) == log2_32(0.2)); assert(log2(f64(0.2)) == log2_64(0.2)); } diff --git a/std/math/pow.zig b/std/math/pow.zig index b21fb3a921..c0157ff16e 100644 --- a/std/math/pow.zig +++ b/std/math/pow.zig @@ -176,12 +176,6 @@ fn isOddInteger(x: f64) -> bool { } test "math.pow" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } - const epsilon = 0.000001; assert(math.approxEq(f32, pow(f32, 0.0, 3.3), 0.0, epsilon)); diff --git a/std/math/round.zig b/std/math/round.zig index 3abee040ef..06b9e41f58 100644 --- a/std/math/round.zig +++ b/std/math/round.zig @@ -98,11 +98,6 @@ test "math.round" { } test "math.round32" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } assert(round32(1.3) == 1.0); assert(round32(-1.3) == -1.0); assert(round32(0.2) == 0.0); diff --git a/std/math/sin.zig b/std/math/sin.zig index 99008af469..5b1090e99e 100644 --- a/std/math/sin.zig +++ b/std/math/sin.zig @@ -150,11 +150,6 @@ test "math.sin" { } test "math.sin32" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } const epsilon = 0.000001; assert(math.approxEq(f32, sin32(0.0), 0.0, epsilon)); diff --git a/std/math/sinh.zig b/std/math/sinh.zig index ca06bc615a..b0057f897b 100644 --- a/std/math/sinh.zig +++ b/std/math/sinh.zig @@ -88,11 +88,6 @@ fn sinh64(x: f64) -> f64 { } test "math.sinh" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } assert(sinh(f32(1.5)) == sinh32(1.5)); assert(sinh(f64(1.5)) == sinh64(1.5)); } diff --git a/std/math/tan.zig b/std/math/tan.zig index e62e9b8899..9afe102d64 100644 --- a/std/math/tan.zig +++ b/std/math/tan.zig @@ -136,11 +136,6 @@ test "math.tan" { } test "math.tan32" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } const epsilon = 0.000001; assert(math.approxEq(f32, tan32(0.0), 0.0, epsilon)); diff --git a/std/math/tanh.zig b/std/math/tanh.zig index 813b4f4561..8560538cdf 100644 --- a/std/math/tanh.zig +++ b/std/math/tanh.zig @@ -112,11 +112,6 @@ fn tanh64(x: f64) -> f64 { } test "math.tanh" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } assert(tanh(f32(1.5)) == tanh32(1.5)); assert(tanh(f64(1.5)) == tanh64(1.5)); } diff --git a/std/os/index.zig b/std/os/index.zig index e338359522..fd8eb84ab4 100644 --- a/std/os/index.zig +++ b/std/os/index.zig @@ -148,7 +148,7 @@ pub coldcc fn abort() -> noreturn { } /// Exits the program cleanly with the specified status code. -pub coldcc fn exit(status: i32) -> noreturn { +pub coldcc fn exit(status: u8) -> noreturn { if (builtin.link_libc) { c.exit(status); } @@ -157,14 +157,7 @@ pub coldcc fn exit(status: i32) -> noreturn { posix.exit(status); }, Os.windows => { - // Map a possibly negative status code to a non-negative status for the systems default - // integer width. - const p_status = if (@sizeOf(c_uint) < @sizeOf(u32)) - @truncate(c_uint, @bitCast(u32, status)) - else - c_uint(@bitCast(u32, status)); - - windows.ExitProcess(p_status); + windows.ExitProcess(status); }, else => @compileError("Unsupported OS"), } diff --git a/std/rand.zig b/std/rand.zig index 09109cfbf7..5f4c9c2015 100644 --- a/std/rand.zig +++ b/std/rand.zig @@ -194,11 +194,6 @@ fn MersenneTwister( } test "rand float 32" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } var r = Rand.init(42); var i: usize = 0; while (i < 1000) : (i += 1) { @@ -209,11 +204,6 @@ test "rand float 32" { } test "rand.MT19937_64" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } var rng = MT19937_64.init(rand_test.mt64_seed); for (rand_test.mt64_data) |value| { assert(value == rng.get()); @@ -221,11 +211,6 @@ test "rand.MT19937_64" { } test "rand.MT19937_32" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } var rng = MT19937_32.init(rand_test.mt32_seed); for (rand_test.mt32_data) |value| { assert(value == rng.get()); @@ -233,11 +218,6 @@ test "rand.MT19937_32" { } test "rand.Rand.range" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } var r = Rand.init(42); testRange(&r, -4, 3); testRange(&r, -4, -1); diff --git a/std/sort.zig b/std/sort.zig index 02cb9acb2e..8554ab495a 100644 --- a/std/sort.zig +++ b/std/sort.zig @@ -1020,11 +1020,6 @@ fn cmpByValue(a: &const IdAndValue, b: &const IdAndValue) -> bool { } test "std.sort" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } const u8cases = [][]const []const u8 { [][]const u8{"", ""}, [][]const u8{"a", "a"}, @@ -1061,11 +1056,6 @@ test "std.sort" { } test "std.sort descending" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } const rev_cases = [][]const []const i32 { [][]const i32{[]i32{}, []i32{}}, [][]const i32{[]i32{1}, []i32{1}}, @@ -1085,11 +1075,6 @@ test "std.sort descending" { } test "another sort case" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } var arr = []i32{ 5, 3, 1, 2, 4 }; sort(i32, arr[0..], i32asc); @@ -1097,11 +1082,6 @@ test "another sort case" { } test "sort fuzz testing" { - if (builtin.os == builtin.Os.windows and builtin.arch == builtin.Arch.i386) { - // TODO get this test passing - // https://github.com/zig-lang/zig/issues/537 - return; - } var rng = std.rand.Rand.init(0x12345678); const test_case_count = 10; var i: usize = 0; diff --git a/std/special/bootstrap.zig b/std/special/bootstrap.zig index 55d28a2d0e..77bb7316a9 100644 --- a/std/special/bootstrap.zig +++ b/std/special/bootstrap.zig @@ -21,8 +21,7 @@ comptime { } extern fn zenMain() -> noreturn { - root.main() catch std.os.posix.exit(1); - std.os.posix.exit(0); + std.os.posix.exit(callMain()); } nakedcc fn _start() -> noreturn { @@ -43,29 +42,55 @@ nakedcc fn _start() -> noreturn { extern fn WinMainCRTStartup() -> noreturn { @setAlignStack(16); - root.main() catch std.os.windows.ExitProcess(1); - std.os.windows.ExitProcess(0); + std.os.windows.ExitProcess(callMain()); } fn posixCallMainAndExit() -> noreturn { const argc = *argc_ptr; const argv = @ptrCast(&&u8, &argc_ptr[1]); const envp = @ptrCast(&?&u8, &argv[argc + 1]); - callMain(argc, argv, envp) catch std.os.posix.exit(1); - std.os.posix.exit(0); + std.os.posix.exit(callMainWithArgs(argc, argv, envp)); } -fn callMain(argc: usize, argv: &&u8, envp: &?&u8) -> %void { +fn callMainWithArgs(argc: usize, argv: &&u8, envp: &?&u8) -> u8 { std.os.ArgIteratorPosix.raw = argv[0..argc]; var env_count: usize = 0; while (envp[env_count] != null) : (env_count += 1) {} std.os.posix_environ_raw = @ptrCast(&&u8, envp)[0..env_count]; - return root.main(); + return callMain(); } extern fn main(c_argc: i32, c_argv: &&u8, c_envp: &?&u8) -> i32 { - callMain(usize(c_argc), c_argv, c_envp) catch return 1; - return 0; + return callMainWithArgs(usize(c_argc), c_argv, c_envp); +} + +fn callMain() -> u8 { + switch (@typeId(@typeOf(root.main).ReturnType)) { + builtin.TypeId.NoReturn => { + root.main(); + }, + builtin.TypeId.Void => { + root.main(); + return 0; + }, + builtin.TypeId.Int => { + if (@typeOf(root.main).ReturnType.bit_count != 8) { + @compileError("expected return type of main to be 'u8', 'noreturn', 'void', or '%void'"); + } + return root.main(); + }, + builtin.TypeId.ErrorUnion => { + root.main() catch |err| { + std.debug.warn("error: {}\n", @errorName(err)); + if (@errorReturnTrace()) |trace| { + std.debug.dumpStackTrace(trace); + } + return 1; + }; + return 0; + }, + else => @compileError("expected return type of main to be 'u8', 'noreturn', 'void', or '%void'"), + } } diff --git a/std/special/build_runner.zig b/std/special/build_runner.zig index 24d9c756e7..4722aff07a 100644 --- a/std/special/build_runner.zig +++ b/std/special/build_runner.zig @@ -14,7 +14,7 @@ pub fn main() -> %void { var arg_it = os.args(); // TODO use a more general purpose allocator here - var inc_allocator = std.heap.IncrementingAllocator.init(40 * 1024 * 1024) catch unreachable; + var inc_allocator = try std.heap.IncrementingAllocator.init(40 * 1024 * 1024); defer inc_allocator.deinit(); const allocator = &inc_allocator.allocator; @@ -107,12 +107,12 @@ pub fn main() -> %void { return usageAndErr(&builder, false, try stderr_stream); } } else { - targets.append(arg) catch unreachable; + try targets.append(arg); } } builder.setInstallPrefix(prefix); - root.build(&builder); + try root.build(&builder); if (builder.validateUserInputDidItFail()) return usageAndErr(&builder, true, try stderr_stream); @@ -129,7 +129,7 @@ fn usage(builder: &Builder, already_ran_build: bool, out_stream: &io.OutStream) // run the build script to collect the options if (!already_ran_build) { builder.setInstallPrefix(null); - root.build(builder); + try root.build(builder); } // This usage text has to be synchronized with src/main.cpp diff --git a/std/special/builtin.zig b/std/special/builtin.zig index e6c09863ca..dd77ba9c75 100644 --- a/std/special/builtin.zig +++ b/std/special/builtin.zig @@ -5,7 +5,7 @@ const builtin = @import("builtin"); // Avoid dragging in the debug safety mechanisms into this .o file, // unless we're trying to test this file. -pub coldcc fn panic(msg: []const u8) -> noreturn { +pub coldcc fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) -> noreturn { if (builtin.is_test) { @import("std").debug.panic("{}", msg); } else { diff --git a/std/special/compiler_rt/index.zig b/std/special/compiler_rt/index.zig index a554da5bf2..5c18f9ecc3 100644 --- a/std/special/compiler_rt/index.zig +++ b/std/special/compiler_rt/index.zig @@ -74,7 +74,7 @@ const __udivmoddi4 = @import("udivmoddi4.zig").__udivmoddi4; // Avoid dragging in the debug safety mechanisms into this .o file, // unless we're trying to test this file. -pub coldcc fn panic(msg: []const u8) -> noreturn { +pub coldcc fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) -> noreturn { if (is_test) { @import("std").debug.panic("{}", msg); } else { diff --git a/std/special/panic.zig b/std/special/panic.zig index 03c2586739..1b22658c7f 100644 --- a/std/special/panic.zig +++ b/std/special/panic.zig @@ -4,14 +4,18 @@ // have to be added in the compiler. const builtin = @import("builtin"); +const std = @import("std"); -pub coldcc fn panic(msg: []const u8) -> noreturn { +pub coldcc fn panic(msg: []const u8, error_return_trace: ?&builtin.StackTrace) -> noreturn { switch (builtin.os) { // TODO: fix panic in zen. builtin.Os.freestanding, builtin.Os.zen => { while (true) {} }, else => { + if (error_return_trace) |trace| { + @import("std").debug.panicWithTrace(trace, "{}", msg); + } @import("std").debug.panic("{}", msg); }, } diff --git a/std/special/test_runner.zig b/std/special/test_runner.zig index 1a36f50b62..c5c0bad40c 100644 --- a/std/special/test_runner.zig +++ b/std/special/test_runner.zig @@ -8,10 +8,7 @@ pub fn main() -> %void { for (test_fn_list) |test_fn, i| { warn("Test {}/{} {}...", i + 1, test_fn_list.len, test_fn.name); - test_fn.func() catch |err| { - warn("{}\n", err); - return err; - }; + try test_fn.func(); warn("OK\n"); } diff --git a/test/build_examples.zig b/test/build_examples.zig index 29a15fd515..666dac8df1 100644 --- a/test/build_examples.zig +++ b/test/build_examples.zig @@ -16,4 +16,5 @@ pub fn addCases(cases: &tests.BuildExamplesContext) { cases.addBuildFile("test/standalone/issue_339/build.zig"); cases.addBuildFile("test/standalone/pkg_import/build.zig"); cases.addBuildFile("test/standalone/use_alias/build.zig"); + cases.addBuildFile("test/standalone/brace_expansion/build.zig"); } diff --git a/test/cases/math.zig b/test/cases/math.zig index b4e0e4cfd6..090e2b9dfd 100644 --- a/test/cases/math.zig +++ b/test/cases/math.zig @@ -26,6 +26,29 @@ fn testDivision() { assert(divTrunc(i32, -5, 3) == -1); assert(divTrunc(f32, 5.0, 3.0) == 1.0); assert(divTrunc(f32, -5.0, 3.0) == -1.0); + + comptime { + assert( + 1194735857077236777412821811143690633098347576 % + 508740759824825164163191790951174292733114988 == + 177254337427586449086438229241342047632117600); + assert(@rem(-1194735857077236777412821811143690633098347576, + 508740759824825164163191790951174292733114988) == + -177254337427586449086438229241342047632117600); + assert(1194735857077236777412821811143690633098347576 / + 508740759824825164163191790951174292733114988 == + 2); + assert(@divTrunc(-1194735857077236777412821811143690633098347576, + 508740759824825164163191790951174292733114988) == + -2); + assert(@divTrunc(1194735857077236777412821811143690633098347576, + -508740759824825164163191790951174292733114988) == + -2); + assert(@divTrunc(-1194735857077236777412821811143690633098347576, + -508740759824825164163191790951174292733114988) == + 2); + assert(4126227191251978491697987544882340798050766755606969681711 % 10 == 1); + } } fn div(comptime T: type, a: T, b: T) -> T { return a / b; diff --git a/test/compile_errors.zig b/test/compile_errors.zig index 005fe55e41..6b2ea545ed 100644 --- a/test/compile_errors.zig +++ b/test/compile_errors.zig @@ -1,6 +1,15 @@ const tests = @import("tests.zig"); pub fn addCases(cases: &tests.CompileErrorContext) { + cases.add("wrong return type for main", + \\pub fn main() -> f32 { } + , "error: expected return type of main to be 'u8', 'noreturn', 'void', or '%void'"); + + cases.add("double ?? on main return value", + \\pub fn main() -> ??void { + \\} + , "error: expected return type of main to be 'u8', 'noreturn', 'void', or '%void'"); + cases.add("bad identifier in function with struct defined inside function which references local const", \\export fn entry() { \\ const BlockKind = u32; @@ -1059,15 +1068,6 @@ pub fn addCases(cases: &tests.CompileErrorContext) { , ".tmp_source.zig:2:5: error: expected type 'void', found 'error'"); - cases.add("wrong return type for main", - \\pub fn main() { } - , ".tmp_source.zig:1:15: error: expected return type of main to be '%void', instead is 'void'"); - - cases.add("double ?? on main return value", - \\pub fn main() -> ??void { - \\} - , ".tmp_source.zig:1:18: error: expected return type of main to be '%void', instead is '??void'"); - cases.add("invalid pointer for var type", \\extern fn ext() -> usize; \\var bytes: [ext()]u8 = undefined; diff --git a/test/debug_safety.zig b/test/debug_safety.zig index fde5b061ee..b32ffb34f0 100644 --- a/test/debug_safety.zig +++ b/test/debug_safety.zig @@ -2,7 +2,7 @@ const tests = @import("tests.zig"); pub fn addCases(cases: &tests.CompareOutputContext) { cases.addDebugSafety("calling panic", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\pub fn main() -> %void { @@ -11,7 +11,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("out of bounds slice access", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\pub fn main() -> %void { @@ -25,7 +25,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("integer addition overflow", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\error Whatever; @@ -39,7 +39,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("integer subtraction overflow", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\error Whatever; @@ -53,7 +53,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("integer multiplication overflow", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\error Whatever; @@ -67,7 +67,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("integer negation overflow", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\error Whatever; @@ -81,7 +81,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("signed integer division overflow", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\error Whatever; @@ -95,7 +95,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("signed shift left overflow", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\error Whatever; @@ -109,7 +109,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("unsigned shift left overflow", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\error Whatever; @@ -123,7 +123,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("signed shift right overflow", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\error Whatever; @@ -137,7 +137,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("unsigned shift right overflow", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\error Whatever; @@ -151,7 +151,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("integer division by zero", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\error Whatever; @@ -164,7 +164,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("exact division failure", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\error Whatever; @@ -178,7 +178,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("cast []u8 to bigger slice of wrong size", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\error Whatever; @@ -192,7 +192,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("value does not fit in shortening cast", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\error Whatever; @@ -206,7 +206,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("signed integer not fitting in cast to unsigned integer", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\error Whatever; @@ -220,7 +220,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("unwrap error", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ if (@import("std").mem.eql(u8, message, "attempt to unwrap error: Whatever")) { \\ @import("std").os.exit(126); // good \\ } @@ -236,7 +236,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("cast integer to error and no code matches", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\pub fn main() -> %void { @@ -248,7 +248,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("@alignCast misaligned", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\error Wrong; @@ -265,7 +265,7 @@ pub fn addCases(cases: &tests.CompareOutputContext) { ); cases.addDebugSafety("bad union field access", - \\pub fn panic(message: []const u8) -> noreturn { + \\pub fn panic(message: []const u8, stack_trace: ?&@import("builtin").StackTrace) -> noreturn { \\ @import("std").os.exit(126); \\} \\ diff --git a/test/standalone/brace_expansion/build.zig b/test/standalone/brace_expansion/build.zig new file mode 100644 index 0000000000..d170b50fe0 --- /dev/null +++ b/test/standalone/brace_expansion/build.zig @@ -0,0 +1,9 @@ +const Builder = @import("std").build.Builder; + +pub fn build(b: &Builder) -> %void { + const main = b.addTest("main.zig"); + main.setBuildMode(b.standardReleaseOptions()); + + const test_step = b.step("test", "Test it"); + test_step.dependOn(&main.step); +} diff --git a/test/standalone/brace_expansion/main.zig b/test/standalone/brace_expansion/main.zig new file mode 100644 index 0000000000..7b03e230f7 --- /dev/null +++ b/test/standalone/brace_expansion/main.zig @@ -0,0 +1,254 @@ +const std = @import("std"); +const io = std.io; +const mem = std.mem; +const debug = std.debug; +const assert = debug.assert; +const Buffer = std.Buffer; +const ArrayList = std.ArrayList; + +error InvalidInput; +error OutOfMem; + +const Token = union(enum) { + Word: []const u8, + OpenBrace, + CloseBrace, + Comma, + Eof, +}; + +var global_allocator: &mem.Allocator = undefined; + +fn tokenize(input:[] const u8) -> %ArrayList(Token) { + const State = enum { + Start, + Word, + }; + + var token_list = ArrayList(Token).init(global_allocator); + var tok_begin: usize = undefined; + var state = State.Start; + + for (input) |b, i| { + switch (state) { + State.Start => switch (b) { + 'a'...'z', 'A'...'Z' => { + state = State.Word; + tok_begin = i; + }, + '{' => try token_list.append(Token.OpenBrace), + '}' => try token_list.append(Token.CloseBrace), + ',' => try token_list.append(Token.Comma), + else => return error.InvalidInput, + }, + State.Word => switch (b) { + 'a'...'z', 'A'...'Z' => {}, + '{', '}', ',' => { + try token_list.append(Token { .Word = input[tok_begin..i] }); + switch (b) { + '{' => try token_list.append(Token.OpenBrace), + '}' => try token_list.append(Token.CloseBrace), + ',' => try token_list.append(Token.Comma), + else => unreachable, + } + state = State.Start; + }, + else => return error.InvalidInput, + }, + } + } + switch (state) { + State.Start => {}, + State.Word => try token_list.append(Token {.Word = input[tok_begin..] }), + } + try token_list.append(Token.Eof); + return token_list; +} + +const Node = union(enum) { + Scalar: []const u8, + List: ArrayList(Node), + Combine: []Node, +}; + +fn parse(tokens: &const ArrayList(Token), token_index: &usize) -> %Node { + const first_token = tokens.items[*token_index]; + *token_index += 1; + + const result_node = switch (first_token) { + Token.Word => |word| Node { .Scalar = word }, + Token.OpenBrace => blk: { + var list = ArrayList(Node).init(global_allocator); + while (true) { + try list.append(try parse(tokens, token_index)); + + const token = tokens.items[*token_index]; + *token_index += 1; + + switch (token) { + Token.CloseBrace => break, + Token.Comma => continue, + else => return error.InvalidInput, + } + } + break :blk Node { .List = list }; + }, + else => return error.InvalidInput, + }; + + switch (tokens.items[*token_index]) { + Token.Word, Token.OpenBrace => { + const pair = try global_allocator.alloc(Node, 2); + pair[0] = result_node; + pair[1] = try parse(tokens, token_index); + return Node { .Combine = pair }; + }, + else => return result_node, + } +} + +fn expandString(input: []const u8, output: &Buffer) -> %void { + const tokens = try tokenize(input); + if (tokens.len == 1) { + return output.resize(0); + } + + var token_index: usize = 0; + const root = try parse(tokens, &token_index); + const last_token = tokens.items[token_index]; + switch (last_token) { + Token.Eof => {}, + else => return error.InvalidInput, + } + + var result_list = ArrayList(Buffer).init(global_allocator); + defer result_list.deinit(); + + try expandNode(root, &result_list); + + try output.resize(0); + for (result_list.toSliceConst()) |buf, i| { + if (i != 0) { + try output.appendByte(' '); + } + try output.append(buf.toSliceConst()); + } +} + +const ListOfBuffer0 = ArrayList(Buffer); // TODO this is working around a compiler bug, fix and delete this + +fn expandNode(node: &const Node, output: &ListOfBuffer0) -> %void { + assert(output.len == 0); + switch (*node) { + Node.Scalar => |scalar| { + try output.append(try Buffer.init(global_allocator, scalar)); + }, + Node.Combine => |pair| { + const a_node = pair[0]; + const b_node = pair[1]; + + var child_list_a = ArrayList(Buffer).init(global_allocator); + try expandNode(a_node, &child_list_a); + + var child_list_b = ArrayList(Buffer).init(global_allocator); + try expandNode(b_node, &child_list_b); + + for (child_list_a.toSliceConst()) |buf_a| { + for (child_list_b.toSliceConst()) |buf_b| { + var combined_buf = try Buffer.initFromBuffer(buf_a); + try combined_buf.append(buf_b.toSliceConst()); + try output.append(combined_buf); + } + } + }, + Node.List => |list| { + for (list.toSliceConst()) |child_node| { + var child_list = ArrayList(Buffer).init(global_allocator); + try expandNode(child_node, &child_list); + + for (child_list.toSliceConst()) |buf| { + try output.append(buf); + } + } + }, + } +} + +pub fn main() -> %void { + var stdin_file = try io.getStdIn(); + var stdout_file = try io.getStdOut(); + + var inc_allocator = try std.heap.IncrementingAllocator.init(2 * 1024 * 1024); + defer inc_allocator.deinit(); + + global_allocator = &inc_allocator.allocator; + + var stdin_buf = try Buffer.initSize(global_allocator, 0); + defer stdin_buf.deinit(); + + var stdin_adapter = io.FileInStream.init(&stdin_file); + try stdin_adapter.stream.readAllBuffer(&stdin_buf, @maxValue(usize)); + + var result_buf = try Buffer.initSize(global_allocator, 0); + defer result_buf.deinit(); + + try expandString(stdin_buf.toSlice(), &result_buf); + try stdout_file.write(result_buf.toSliceConst()); +} + +test "invalid inputs" { + global_allocator = std.debug.global_allocator; + + expectError("}ABC", error.InvalidInput); + expectError("{ABC", error.InvalidInput); + expectError("}{", error.InvalidInput); + expectError("{}", error.InvalidInput); + expectError("A,B,C", error.InvalidInput); + expectError("{A{B,C}", error.InvalidInput); + expectError("{A,}", error.InvalidInput); + + expectError("\n", error.InvalidInput); +} + +fn expectError(test_input: []const u8, expected_err: error) { + var output_buf = Buffer.initSize(global_allocator, 0) catch unreachable; + defer output_buf.deinit(); + + if (expandString("}ABC", &output_buf)) { + unreachable; + } else |err| { + assert(expected_err == err); + } +} + +test "valid inputs" { + global_allocator = std.debug.global_allocator; + + expectExpansion("{x,y,z}", "x y z"); + expectExpansion("{A,B}{x,y}", "Ax Ay Bx By"); + expectExpansion("{A,B{x,y}}", "A Bx By"); + + expectExpansion("{ABC}", "ABC"); + expectExpansion("{A,B,C}", "A B C"); + expectExpansion("ABC", "ABC"); + + expectExpansion("", ""); + expectExpansion("{A,B}{C,{x,y}}{g,h}", "ACg ACh Axg Axh Ayg Ayh BCg BCh Bxg Bxh Byg Byh"); + expectExpansion("{A,B}{C,C{x,y}}{g,h}", "ACg ACh ACxg ACxh ACyg ACyh BCg BCh BCxg BCxh BCyg BCyh"); + expectExpansion("{A,B}a", "Aa Ba"); + expectExpansion("{C,{x,y}}", "C x y"); + expectExpansion("z{C,{x,y}}", "zC zx zy"); + expectExpansion("a{b,c{d,e{f,g}}}", "ab acd acef aceg"); + expectExpansion("a{x,y}b", "axb ayb"); + expectExpansion("z{{a,b}}", "za zb"); + expectExpansion("a{b}", "ab"); +} + +fn expectExpansion(test_input: []const u8, expected_result: []const u8) { + var result = Buffer.initSize(global_allocator, 0) catch unreachable; + defer result.deinit(); + + expandString(test_input, &result) catch unreachable; + + assert(mem.eql(u8, result.toSlice(), expected_result)); +} diff --git a/test/standalone/issue_339/build.zig b/test/standalone/issue_339/build.zig index 50fa10c593..b7b288f7a1 100644 --- a/test/standalone/issue_339/build.zig +++ b/test/standalone/issue_339/build.zig @@ -1,6 +1,6 @@ const Builder = @import("std").build.Builder; -pub fn build(b: &Builder) { +pub fn build(b: &Builder) -> %void { const obj = b.addObject("test", "test.zig"); const test_step = b.step("test", "Test the program"); diff --git a/test/standalone/issue_339/test.zig b/test/standalone/issue_339/test.zig index e0d6c3f3e5..dd97ffa9b1 100644 --- a/test/standalone/issue_339/test.zig +++ b/test/standalone/issue_339/test.zig @@ -1,4 +1,5 @@ -pub fn panic(msg: []const u8) -> noreturn { @breakpoint(); while (true) {} } +const StackTrace = @import("builtin").StackTrace; +pub fn panic(msg: []const u8, stack_trace: ?&StackTrace) -> noreturn { @breakpoint(); while (true) {} } fn bar() -> %void {} diff --git a/test/standalone/pkg_import/build.zig b/test/standalone/pkg_import/build.zig index 044787dd4d..f40011d834 100644 --- a/test/standalone/pkg_import/build.zig +++ b/test/standalone/pkg_import/build.zig @@ -1,6 +1,6 @@ const Builder = @import("std").build.Builder; -pub fn build(b: &Builder) { +pub fn build(b: &Builder) -> %void { const exe = b.addExecutable("test", "test.zig"); exe.addPackagePath("my_pkg", "pkg.zig"); diff --git a/test/standalone/use_alias/build.zig b/test/standalone/use_alias/build.zig index 03398f1a41..beca6bc304 100644 --- a/test/standalone/use_alias/build.zig +++ b/test/standalone/use_alias/build.zig @@ -1,6 +1,6 @@ const Builder = @import("std").build.Builder; -pub fn build(b: &Builder) { +pub fn build(b: &Builder) -> %void { b.addCIncludePath("."); const main = b.addTest("main.zig"); diff --git a/test/tests.zig b/test/tests.zig index 20e1e94459..1136e365a8 100644 --- a/test/tests.zig +++ b/test/tests.zig @@ -42,14 +42,10 @@ const test_targets = []TestTarget { .arch = builtin.Arch.x86_64, .environ = builtin.Environ.msvc, }, - TestTarget { - .os = builtin.Os.windows, - .arch = builtin.Arch.i386, - .environ = builtin.Environ.msvc, - }, }; error TestFailed; +error CompilationIncorrectlySucceeded; const max_stdout_size = 1 * 1024 * 1024; // 1 MB @@ -607,8 +603,7 @@ pub const CompileErrorContext = struct { switch (term) { Term.Exited => |code| { if (code == 0) { - warn("Compilation incorrectly succeeded\n"); - return error.TestFailed; + return error.CompilationIncorrectlySucceeded; } }, else => { diff --git a/test/translate_c.zig b/test/translate_c.zig index cdf900c8b2..8e6a8dda2c 100644 --- a/test/translate_c.zig +++ b/test/translate_c.zig @@ -408,7 +408,7 @@ pub fn addCases(cases: &tests.TranslateCContext) { \\} , \\pub export fn s(a: c_int, b: c_int) -> c_int { - \\ var c: c_int; + \\ var c: c_int = undefined; \\ c = (a + b); \\ c = (a - b); \\ c = (a * b); @@ -416,7 +416,7 @@ pub fn addCases(cases: &tests.TranslateCContext) { \\ c = @rem(a, b); \\} \\pub export fn u(a: c_uint, b: c_uint) -> c_uint { - \\ var c: c_uint; + \\ var c: c_uint = undefined; \\ c = (a +% b); \\ c = (a -% b); \\ c = (a *% b); @@ -460,7 +460,7 @@ pub fn addCases(cases: &tests.TranslateCContext) { , \\pub export fn max(_arg_a: c_int) -> c_int { \\ var a = _arg_a; - \\ var tmp: c_int; + \\ var tmp: c_int = undefined; \\ tmp = a; \\ a = tmp; \\} @@ -473,8 +473,8 @@ pub fn addCases(cases: &tests.TranslateCContext) { \\} , \\pub export fn max(a: c_int) { - \\ var b: c_int; - \\ var c: c_int; + \\ var b: c_int = undefined; + \\ var c: c_int = undefined; \\ c = x: { \\ const _tmp = a; \\ b = _tmp; @@ -1114,4 +1114,25 @@ pub fn addCases(cases: &tests.TranslateCContext) { , \\pub const NRF_GPIO = if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Pointer) @ptrCast(&NRF_GPIO_Type, NRF_GPIO_BASE) else if (@typeId(@typeOf(NRF_GPIO_BASE)) == @import("builtin").TypeId.Int) @intToPtr(&NRF_GPIO_Type, NRF_GPIO_BASE) else (&NRF_GPIO_Type)(NRF_GPIO_BASE); ); + + cases.add("if on int", + \\int if_int(int i) { + \\ if (i) { + \\ return 0; + \\ } else { + \\ return 1; + \\ } + \\} + , + \\pub fn if_int(i: c_int) -> c_int { + \\ { + \\ const _tmp = i; + \\ if (@bitCast(@IntType(false, @sizeOf(@typeOf(_tmp)) * 8), _tmp) != 0) { + \\ return 0; + \\ } else { + \\ return 1; + \\ }; + \\ }; + \\} + ); }