From e7b18a7ce69f30c85f21ec8ad6a70211abf5f24b Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Sat, 12 Jul 2025 20:55:48 -0700 Subject: [PATCH] std.crypto: remove `inline` from most functions To quote the language reference, It is generally better to let the compiler decide when to inline a function, except for these scenarios: * To change how many stack frames are in the call stack, for debugging purposes. * To force comptime-ness of the arguments to propagate to the return value of the function, as in the above example. * Real world performance measurements demand it. Don't guess! Note that inline actually restricts what the compiler is allowed to do. This can harm binary size, compilation speed, and even runtime performance. `zig run lib/std/crypto/benchmark.zig -OReleaseFast` [-before-] vs {+after+} md5: [-990-] {+998+} MiB/s sha1: [-1144-] {+1140+} MiB/s sha256: [-2267-] {+2275+} MiB/s sha512: [-762-] {+767+} MiB/s sha3-256: [-680-] {+683+} MiB/s sha3-512: [-362-] {+363+} MiB/s shake-128: [-835-] {+839+} MiB/s shake-256: [-680-] {+681+} MiB/s turboshake-128: [-1567-] {+1570+} MiB/s turboshake-256: [-1276-] {+1282+} MiB/s blake2s: [-778-] {+789+} MiB/s blake2b: [-1071-] {+1086+} MiB/s blake3: [-1148-] {+1137+} MiB/s ghash: [-10044-] {+10033+} MiB/s polyval: [-9726-] {+10033+} MiB/s poly1305: [-2486-] {+2703+} MiB/s hmac-md5: [-991-] {+998+} MiB/s hmac-sha1: [-1134-] {+1137+} MiB/s hmac-sha256: [-2265-] {+2288+} MiB/s hmac-sha512: [-765-] {+764+} MiB/s siphash-2-4: [-4410-] {+4438+} MiB/s siphash-1-3: [-7144-] {+7225+} MiB/s siphash128-2-4: [-4397-] {+4449+} MiB/s siphash128-1-3: [-7281-] {+7374+} MiB/s aegis-128x4 mac: [-73385-] {+74523+} MiB/s aegis-256x4 mac: [-30160-] {+30539+} MiB/s aegis-128x2 mac: [-66662-] {+67267+} MiB/s aegis-256x2 mac: [-16812-] {+16806+} MiB/s aegis-128l mac: [-33876-] {+34055+} MiB/s aegis-256 mac: [-8993-] {+9087+} MiB/s aes-cmac: 2036 MiB/s x25519: [-20670-] {+16844+} exchanges/s ed25519: [-29763-] {+29576+} signatures/s ecdsa-p256: [-4762-] {+4900+} signatures/s ecdsa-p384: [-1465-] {+1500+} signatures/s ecdsa-secp256k1: [-5643-] {+5769+} signatures/s ed25519: [-21926-] {+21721+} verifications/s ed25519: [-51200-] {+50880+} verifications/s (batch) chacha20Poly1305: [-1189-] {+1109+} MiB/s xchacha20Poly1305: [-1196-] {+1107+} MiB/s xchacha8Poly1305: [-1466-] {+1555+} MiB/s xsalsa20Poly1305: [-660-] {+620+} MiB/s aegis-128x4: [-76389-] {+78181+} MiB/s aegis-128x2: [-53946-] {+53495+} MiB/s aegis-128l: [-27219-] {+25621+} MiB/s aegis-256x4: [-49351-] {+49542+} MiB/s aegis-256x2: [-32390-] {+32366+} MiB/s aegis-256: [-8881-] {+8944+} MiB/s aes128-gcm: [-6095-] {+6205+} MiB/s aes256-gcm: [-5306-] {+5427+} MiB/s aes128-ocb: [-8529-] {+13974+} MiB/s aes256-ocb: [-7241-] {+9442+} MiB/s isapa128a: [-204-] {+214+} MiB/s aes128-single: [-133857882-] {+134170944+} ops/s aes256-single: [-96306962-] {+96408639+} ops/s aes128-8: [-1083210101-] {+1073727253+} ops/s aes256-8: [-762042466-] {+767091778+} ops/s bcrypt: 0.009 s/ops scrypt: [-0.018-] {+0.017+} s/ops argon2: [-0.037-] {+0.060+} s/ops kyber512d00: [-206057-] {+205779+} encaps/s kyber768d00: [-156074-] {+150711+} encaps/s kyber1024d00: [-116626-] {+115469+} encaps/s kyber512d00: [-181149-] {+182046+} decaps/s kyber768d00: [-136965-] {+135676+} decaps/s kyber1024d00: [-101307-] {+100643+} decaps/s kyber512d00: [-123624-] {+123375+} keygen/s kyber768d00: [-69465-] {+70828+} keygen/s kyber1024d00: [-43117-] {+43208+} keygen/s --- lib/std/crypto.zig | 2 +- lib/std/crypto/25519/curve25519.zig | 4 +- lib/std/crypto/25519/edwards25519.zig | 6 +-- lib/std/crypto/25519/field.zig | 22 ++++---- lib/std/crypto/25519/ristretto255.zig | 10 ++-- lib/std/crypto/25519/scalar.zig | 4 +- lib/std/crypto/aegis.zig | 4 +- lib/std/crypto/aes/aesni.zig | 52 +++++++++---------- lib/std/crypto/aes/armcrypto.zig | 52 +++++++++---------- lib/std/crypto/aes/soft.zig | 44 ++++++++-------- lib/std/crypto/aes_ocb.zig | 6 +-- lib/std/crypto/ascon.zig | 8 +-- lib/std/crypto/blake3.zig | 6 +-- lib/std/crypto/chacha20.zig | 12 ++--- lib/std/crypto/codecs/base64_hex_ct.zig | 14 ++--- lib/std/crypto/ghash_polyval.zig | 10 ++-- lib/std/crypto/pcurves/p256/p256_64.zig | 8 +-- .../crypto/pcurves/p256/p256_scalar_64.zig | 8 +-- lib/std/crypto/pcurves/p384/p384_64.zig | 8 +-- .../crypto/pcurves/p384/p384_scalar_64.zig | 8 +-- .../crypto/pcurves/secp256k1/secp256k1_64.zig | 8 +-- .../pcurves/secp256k1/secp256k1_scalar_64.zig | 8 +-- lib/std/crypto/poly1305.zig | 4 +- lib/std/crypto/salsa20.zig | 6 +-- lib/std/crypto/tls.zig | 6 +-- lib/std/crypto/tls/Client.zig | 2 +- 26 files changed, 161 insertions(+), 161 deletions(-) diff --git a/lib/std/crypto.zig b/lib/std/crypto.zig index 5a22643d0a..9307016038 100644 --- a/lib/std/crypto.zig +++ b/lib/std/crypto.zig @@ -386,7 +386,7 @@ test "issue #4532: no index out of bounds" { /// Sets a slice to zeroes. /// Prevents the store from being optimized out. -pub inline fn secureZero(comptime T: type, s: []volatile T) void { +pub fn secureZero(comptime T: type, s: []volatile T) void { @memset(s, 0); } diff --git a/lib/std/crypto/25519/curve25519.zig b/lib/std/crypto/25519/curve25519.zig index 825f0bd94c..0cc0dec3d1 100644 --- a/lib/std/crypto/25519/curve25519.zig +++ b/lib/std/crypto/25519/curve25519.zig @@ -15,12 +15,12 @@ pub const Curve25519 = struct { x: Fe, /// Decode a Curve25519 point from its compressed (X) coordinates. - pub inline fn fromBytes(s: [32]u8) Curve25519 { + pub fn fromBytes(s: [32]u8) Curve25519 { return .{ .x = Fe.fromBytes(s) }; } /// Encode a Curve25519 point. - pub inline fn toBytes(p: Curve25519) [32]u8 { + pub fn toBytes(p: Curve25519) [32]u8 { return p.x.toBytes(); } diff --git a/lib/std/crypto/25519/edwards25519.zig b/lib/std/crypto/25519/edwards25519.zig index 47c07939ac..da8b84a96b 100644 --- a/lib/std/crypto/25519/edwards25519.zig +++ b/lib/std/crypto/25519/edwards25519.zig @@ -138,7 +138,7 @@ pub const Edwards25519 = struct { } /// Flip the sign of the X coordinate. - pub inline fn neg(p: Edwards25519) Edwards25519 { + pub fn neg(p: Edwards25519) Edwards25519 { return .{ .x = p.x.neg(), .y = p.y, .z = p.z, .t = p.t.neg() }; } @@ -190,14 +190,14 @@ pub const Edwards25519 = struct { return q; } - inline fn cMov(p: *Edwards25519, a: Edwards25519, c: u64) void { + fn cMov(p: *Edwards25519, a: Edwards25519, c: u64) void { p.x.cMov(a.x, c); p.y.cMov(a.y, c); p.z.cMov(a.z, c); p.t.cMov(a.t, c); } - inline fn pcSelect(comptime n: usize, pc: *const [n]Edwards25519, b: u8) Edwards25519 { + fn pcSelect(comptime n: usize, pc: *const [n]Edwards25519, b: u8) Edwards25519 { var t = Edwards25519.identityElement; comptime var i: u8 = 1; inline while (i < pc.len) : (i += 1) { diff --git a/lib/std/crypto/25519/field.zig b/lib/std/crypto/25519/field.zig index c2fc8408bd..d10f92550a 100644 --- a/lib/std/crypto/25519/field.zig +++ b/lib/std/crypto/25519/field.zig @@ -56,7 +56,7 @@ pub const Fe = struct { pub const edwards25519sqrtam2 = Fe{ .limbs = .{ 1693982333959686, 608509411481997, 2235573344831311, 947681270984193, 266558006233600 } }; /// Return true if the field element is zero - pub inline fn isZero(fe: Fe) bool { + pub fn isZero(fe: Fe) bool { var reduced = fe; reduced.reduce(); const limbs = reduced.limbs; @@ -64,7 +64,7 @@ pub const Fe = struct { } /// Return true if both field elements are equivalent - pub inline fn equivalent(a: Fe, b: Fe) bool { + pub fn equivalent(a: Fe, b: Fe) bool { return a.sub(b).isZero(); } @@ -168,7 +168,7 @@ pub const Fe = struct { } /// Add a field element - pub inline fn add(a: Fe, b: Fe) Fe { + pub fn add(a: Fe, b: Fe) Fe { var fe: Fe = undefined; comptime var i = 0; inline while (i < 5) : (i += 1) { @@ -178,7 +178,7 @@ pub const Fe = struct { } /// Subtract a field element - pub inline fn sub(a: Fe, b: Fe) Fe { + pub fn sub(a: Fe, b: Fe) Fe { var fe = b; comptime var i = 0; inline while (i < 4) : (i += 1) { @@ -197,17 +197,17 @@ pub const Fe = struct { } /// Negate a field element - pub inline fn neg(a: Fe) Fe { + pub fn neg(a: Fe) Fe { return zero.sub(a); } /// Return true if a field element is negative - pub inline fn isNegative(a: Fe) bool { + pub fn isNegative(a: Fe) bool { return (a.toBytes()[0] & 1) != 0; } /// Conditonally replace a field element with `a` if `c` is positive - pub inline fn cMov(fe: *Fe, a: Fe, c: u64) void { + pub fn cMov(fe: *Fe, a: Fe, c: u64) void { const mask: u64 = 0 -% c; var x = fe.*; comptime var i = 0; @@ -248,7 +248,7 @@ pub const Fe = struct { } } - inline fn _carry128(r: *[5]u128) Fe { + fn _carry128(r: *[5]u128) Fe { var rs: [5]u64 = undefined; comptime var i = 0; inline while (i < 4) : (i += 1) { @@ -321,17 +321,17 @@ pub const Fe = struct { } /// Square a field element - pub inline fn sq(a: Fe) Fe { + pub fn sq(a: Fe) Fe { return _sq(a, false); } /// Square and double a field element - pub inline fn sq2(a: Fe) Fe { + pub fn sq2(a: Fe) Fe { return _sq(a, true); } /// Multiply a field element with a small (32-bit) integer - pub inline fn mul32(a: Fe, comptime n: u32) Fe { + pub fn mul32(a: Fe, comptime n: u32) Fe { const sn = @as(u128, @intCast(n)); var fe: Fe = undefined; var x: u128 = 0; diff --git a/lib/std/crypto/25519/ristretto255.zig b/lib/std/crypto/25519/ristretto255.zig index dd1a8a236e..a09e540a77 100644 --- a/lib/std/crypto/25519/ristretto255.zig +++ b/lib/std/crypto/25519/ristretto255.zig @@ -42,7 +42,7 @@ pub const Ristretto255 = struct { } /// Reject the neutral element. - pub inline fn rejectIdentity(p: Ristretto255) IdentityElementError!void { + pub fn rejectIdentity(p: Ristretto255) IdentityElementError!void { return p.p.rejectIdentity(); } @@ -141,24 +141,24 @@ pub const Ristretto255 = struct { } /// Double a Ristretto255 element. - pub inline fn dbl(p: Ristretto255) Ristretto255 { + pub fn dbl(p: Ristretto255) Ristretto255 { return .{ .p = p.p.dbl() }; } /// Add two Ristretto255 elements. - pub inline fn add(p: Ristretto255, q: Ristretto255) Ristretto255 { + pub fn add(p: Ristretto255, q: Ristretto255) Ristretto255 { return .{ .p = p.p.add(q.p) }; } /// Subtract two Ristretto255 elements. - pub inline fn sub(p: Ristretto255, q: Ristretto255) Ristretto255 { + pub fn sub(p: Ristretto255, q: Ristretto255) Ristretto255 { return .{ .p = p.p.sub(q.p) }; } /// Multiply a Ristretto255 element with a scalar. /// Return error.WeakPublicKey if the resulting element is /// the identity element. - pub inline fn mul(p: Ristretto255, s: [encoded_length]u8) (IdentityElementError || WeakPublicKeyError)!Ristretto255 { + pub fn mul(p: Ristretto255, s: [encoded_length]u8) (IdentityElementError || WeakPublicKeyError)!Ristretto255 { return .{ .p = try p.p.mul(s) }; } diff --git a/lib/std/crypto/25519/scalar.zig b/lib/std/crypto/25519/scalar.zig index b07b1c774c..59a4b41f7a 100644 --- a/lib/std/crypto/25519/scalar.zig +++ b/lib/std/crypto/25519/scalar.zig @@ -50,7 +50,7 @@ pub fn reduce64(s: [64]u8) CompressedScalar { /// Perform the X25519 "clamping" operation. /// The scalar is then guaranteed to be a multiple of the cofactor. -pub inline fn clamp(s: *CompressedScalar) void { +pub fn clamp(s: *CompressedScalar) void { s[0] &= 248; s[31] = (s[31] & 127) | 64; } @@ -514,7 +514,7 @@ pub const Scalar = struct { } /// Square a scalar `n` times - inline fn sqn(x: Scalar, comptime n: comptime_int) Scalar { + fn sqn(x: Scalar, comptime n: comptime_int) Scalar { var i: usize = 0; var t = x; while (i < n) : (i += 1) { diff --git a/lib/std/crypto/aegis.zig b/lib/std/crypto/aegis.zig index ea4176d13d..b3839e6d9f 100644 --- a/lib/std/crypto/aegis.zig +++ b/lib/std/crypto/aegis.zig @@ -104,7 +104,7 @@ fn State128X(comptime degree: u7) type { return state; } - inline fn update(state: *State, d1: AesBlockVec, d2: AesBlockVec) void { + fn update(state: *State, d1: AesBlockVec, d2: AesBlockVec) void { const blocks = &state.blocks; const tmp = blocks[7]; comptime var i: usize = 7; @@ -413,7 +413,7 @@ fn State256X(comptime degree: u7) type { return state; } - inline fn update(state: *State, d: AesBlockVec) void { + fn update(state: *State, d: AesBlockVec) void { const blocks = &state.blocks; const tmp = blocks[5].encrypt(blocks[0]); comptime var i: usize = 5; diff --git a/lib/std/crypto/aes/aesni.zig b/lib/std/crypto/aes/aesni.zig index 027fbca646..64bf37b46e 100644 --- a/lib/std/crypto/aes/aesni.zig +++ b/lib/std/crypto/aes/aesni.zig @@ -17,24 +17,24 @@ pub const Block = struct { repr: Repr, /// Convert a byte sequence into an internal representation. - pub inline fn fromBytes(bytes: *const [16]u8) Block { + pub fn fromBytes(bytes: *const [16]u8) Block { const repr = mem.bytesToValue(Repr, bytes); return Block{ .repr = repr }; } /// Convert the internal representation of a block into a byte sequence. - pub inline fn toBytes(block: Block) [16]u8 { + pub fn toBytes(block: Block) [16]u8 { return mem.toBytes(block.repr); } /// XOR the block with a byte sequence. - pub inline fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 { + pub fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 { const x = block.repr ^ fromBytes(bytes).repr; return mem.toBytes(x); } /// Encrypt a block with a round key. - pub inline fn encrypt(block: Block, round_key: Block) Block { + pub fn encrypt(block: Block, round_key: Block) Block { return Block{ .repr = asm ( \\ vaesenc %[rk], %[in], %[out] @@ -46,7 +46,7 @@ pub const Block = struct { } /// Encrypt a block with the last round key. - pub inline fn encryptLast(block: Block, round_key: Block) Block { + pub fn encryptLast(block: Block, round_key: Block) Block { return Block{ .repr = asm ( \\ vaesenclast %[rk], %[in], %[out] @@ -58,7 +58,7 @@ pub const Block = struct { } /// Decrypt a block with a round key. - pub inline fn decrypt(block: Block, inv_round_key: Block) Block { + pub fn decrypt(block: Block, inv_round_key: Block) Block { return Block{ .repr = asm ( \\ vaesdec %[rk], %[in], %[out] @@ -70,7 +70,7 @@ pub const Block = struct { } /// Decrypt a block with the last round key. - pub inline fn decryptLast(block: Block, inv_round_key: Block) Block { + pub fn decryptLast(block: Block, inv_round_key: Block) Block { return Block{ .repr = asm ( \\ vaesdeclast %[rk], %[in], %[out] @@ -82,17 +82,17 @@ pub const Block = struct { } /// Apply the bitwise XOR operation to the content of two blocks. - pub inline fn xorBlocks(block1: Block, block2: Block) Block { + pub fn xorBlocks(block1: Block, block2: Block) Block { return Block{ .repr = block1.repr ^ block2.repr }; } /// Apply the bitwise AND operation to the content of two blocks. - pub inline fn andBlocks(block1: Block, block2: Block) Block { + pub fn andBlocks(block1: Block, block2: Block) Block { return Block{ .repr = block1.repr & block2.repr }; } /// Apply the bitwise OR operation to the content of two blocks. - pub inline fn orBlocks(block1: Block, block2: Block) Block { + pub fn orBlocks(block1: Block, block2: Block) Block { return Block{ .repr = block1.repr | block2.repr }; } @@ -112,7 +112,7 @@ pub const Block = struct { }; /// Encrypt multiple blocks in parallel, each their own round key. - pub inline fn encryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block { + pub fn encryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block { comptime var i = 0; var out: [count]Block = undefined; inline while (i < count) : (i += 1) { @@ -122,7 +122,7 @@ pub const Block = struct { } /// Decrypt multiple blocks in parallel, each their own round key. - pub inline fn decryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block { + pub fn decryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block { comptime var i = 0; var out: [count]Block = undefined; inline while (i < count) : (i += 1) { @@ -132,7 +132,7 @@ pub const Block = struct { } /// Encrypt multiple blocks in parallel with the same round key. - pub inline fn encryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block { + pub fn encryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block { comptime var i = 0; var out: [count]Block = undefined; inline while (i < count) : (i += 1) { @@ -142,7 +142,7 @@ pub const Block = struct { } /// Decrypt multiple blocks in parallel with the same round key. - pub inline fn decryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block { + pub fn decryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block { comptime var i = 0; var out: [count]Block = undefined; inline while (i < count) : (i += 1) { @@ -152,7 +152,7 @@ pub const Block = struct { } /// Encrypt multiple blocks in parallel with the same last round key. - pub inline fn encryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block { + pub fn encryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block { comptime var i = 0; var out: [count]Block = undefined; inline while (i < count) : (i += 1) { @@ -162,7 +162,7 @@ pub const Block = struct { } /// Decrypt multiple blocks in parallel with the same last round key. - pub inline fn decryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block { + pub fn decryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block { comptime var i = 0; var out: [count]Block = undefined; inline while (i < count) : (i += 1) { @@ -200,7 +200,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { pub const block_length: usize = blocks_count * 16; /// Convert a byte sequence into an internal representation. - pub inline fn fromBytes(bytes: *const [blocks_count * 16]u8) Self { + pub fn fromBytes(bytes: *const [blocks_count * 16]u8) Self { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = mem.bytesToValue(Repr, bytes[i * native_word_size ..][0..native_word_size]); @@ -209,7 +209,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Convert the internal representation of a block vector into a byte sequence. - pub inline fn toBytes(block_vec: Self) [blocks_count * 16]u8 { + pub fn toBytes(block_vec: Self) [blocks_count * 16]u8 { var out: [blocks_count * 16]u8 = undefined; inline for (0..native_words) |i| { out[i * native_word_size ..][0..native_word_size].* = mem.toBytes(block_vec.repr[i]); @@ -218,7 +218,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// XOR the block vector with a byte sequence. - pub inline fn xorBytes(block_vec: Self, bytes: *const [blocks_count * 16]u8) [blocks_count * 16]u8 { + pub fn xorBytes(block_vec: Self, bytes: *const [blocks_count * 16]u8) [blocks_count * 16]u8 { var x: Self = undefined; inline for (0..native_words) |i| { x.repr[i] = block_vec.repr[i] ^ mem.bytesToValue(Repr, bytes[i * native_word_size ..][0..native_word_size]); @@ -227,7 +227,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the forward AES operation to the block vector with a vector of round keys. - pub inline fn encrypt(block_vec: Self, round_key_vec: Self) Self { + pub fn encrypt(block_vec: Self, round_key_vec: Self) Self { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = asm ( @@ -241,7 +241,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the forward AES operation to the block vector with a vector of last round keys. - pub inline fn encryptLast(block_vec: Self, round_key_vec: Self) Self { + pub fn encryptLast(block_vec: Self, round_key_vec: Self) Self { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = asm ( @@ -255,7 +255,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the inverse AES operation to the block vector with a vector of round keys. - pub inline fn decrypt(block_vec: Self, inv_round_key_vec: Self) Self { + pub fn decrypt(block_vec: Self, inv_round_key_vec: Self) Self { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = asm ( @@ -269,7 +269,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the inverse AES operation to the block vector with a vector of last round keys. - pub inline fn decryptLast(block_vec: Self, inv_round_key_vec: Self) Self { + pub fn decryptLast(block_vec: Self, inv_round_key_vec: Self) Self { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = asm ( @@ -283,7 +283,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the bitwise XOR operation to the content of two block vectors. - pub inline fn xorBlocks(block_vec1: Self, block_vec2: Self) Self { + pub fn xorBlocks(block_vec1: Self, block_vec2: Self) Self { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = block_vec1.repr[i] ^ block_vec2.repr[i]; @@ -292,7 +292,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the bitwise AND operation to the content of two block vectors. - pub inline fn andBlocks(block_vec1: Self, block_vec2: Self) Self { + pub fn andBlocks(block_vec1: Self, block_vec2: Self) Self { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = block_vec1.repr[i] & block_vec2.repr[i]; @@ -301,7 +301,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the bitwise OR operation to the content of two block vectors. - pub inline fn orBlocks(block_vec1: Self, block_vec2: Block) Self { + pub fn orBlocks(block_vec1: Self, block_vec2: Block) Self { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = block_vec1.repr[i] | block_vec2.repr[i]; diff --git a/lib/std/crypto/aes/armcrypto.zig b/lib/std/crypto/aes/armcrypto.zig index 2487ab7e72..714f3c0c32 100644 --- a/lib/std/crypto/aes/armcrypto.zig +++ b/lib/std/crypto/aes/armcrypto.zig @@ -12,18 +12,18 @@ pub const Block = struct { repr: Repr, /// Convert a byte sequence into an internal representation. - pub inline fn fromBytes(bytes: *const [16]u8) Block { + pub fn fromBytes(bytes: *const [16]u8) Block { const repr = mem.bytesToValue(Repr, bytes); return Block{ .repr = repr }; } /// Convert the internal representation of a block into a byte sequence. - pub inline fn toBytes(block: Block) [16]u8 { + pub fn toBytes(block: Block) [16]u8 { return mem.toBytes(block.repr); } /// XOR the block with a byte sequence. - pub inline fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 { + pub fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 { const x = block.repr ^ fromBytes(bytes).repr; return mem.toBytes(x); } @@ -31,7 +31,7 @@ pub const Block = struct { const zero = @Vector(2, u64){ 0, 0 }; /// Encrypt a block with a round key. - pub inline fn encrypt(block: Block, round_key: Block) Block { + pub fn encrypt(block: Block, round_key: Block) Block { return Block{ .repr = (asm ( \\ mov %[out].16b, %[in].16b @@ -45,7 +45,7 @@ pub const Block = struct { } /// Encrypt a block with the last round key. - pub inline fn encryptLast(block: Block, round_key: Block) Block { + pub fn encryptLast(block: Block, round_key: Block) Block { return Block{ .repr = (asm ( \\ mov %[out].16b, %[in].16b @@ -58,7 +58,7 @@ pub const Block = struct { } /// Decrypt a block with a round key. - pub inline fn decrypt(block: Block, inv_round_key: Block) Block { + pub fn decrypt(block: Block, inv_round_key: Block) Block { return Block{ .repr = (asm ( \\ mov %[out].16b, %[in].16b @@ -72,7 +72,7 @@ pub const Block = struct { } /// Decrypt a block with the last round key. - pub inline fn decryptLast(block: Block, inv_round_key: Block) Block { + pub fn decryptLast(block: Block, inv_round_key: Block) Block { return Block{ .repr = (asm ( \\ mov %[out].16b, %[in].16b @@ -85,17 +85,17 @@ pub const Block = struct { } /// Apply the bitwise XOR operation to the content of two blocks. - pub inline fn xorBlocks(block1: Block, block2: Block) Block { + pub fn xorBlocks(block1: Block, block2: Block) Block { return Block{ .repr = block1.repr ^ block2.repr }; } /// Apply the bitwise AND operation to the content of two blocks. - pub inline fn andBlocks(block1: Block, block2: Block) Block { + pub fn andBlocks(block1: Block, block2: Block) Block { return Block{ .repr = block1.repr & block2.repr }; } /// Apply the bitwise OR operation to the content of two blocks. - pub inline fn orBlocks(block1: Block, block2: Block) Block { + pub fn orBlocks(block1: Block, block2: Block) Block { return Block{ .repr = block1.repr | block2.repr }; } @@ -105,7 +105,7 @@ pub const Block = struct { pub const optimal_parallel_blocks = 6; /// Encrypt multiple blocks in parallel, each their own round key. - pub inline fn encryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block { + pub fn encryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block { comptime var i = 0; var out: [count]Block = undefined; inline while (i < count) : (i += 1) { @@ -115,7 +115,7 @@ pub const Block = struct { } /// Decrypt multiple blocks in parallel, each their own round key. - pub inline fn decryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block { + pub fn decryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block { comptime var i = 0; var out: [count]Block = undefined; inline while (i < count) : (i += 1) { @@ -125,7 +125,7 @@ pub const Block = struct { } /// Encrypt multiple blocks in parallel with the same round key. - pub inline fn encryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block { + pub fn encryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block { comptime var i = 0; var out: [count]Block = undefined; inline while (i < count) : (i += 1) { @@ -135,7 +135,7 @@ pub const Block = struct { } /// Decrypt multiple blocks in parallel with the same round key. - pub inline fn decryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block { + pub fn decryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block { comptime var i = 0; var out: [count]Block = undefined; inline while (i < count) : (i += 1) { @@ -145,7 +145,7 @@ pub const Block = struct { } /// Encrypt multiple blocks in parallel with the same last round key. - pub inline fn encryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block { + pub fn encryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block { comptime var i = 0; var out: [count]Block = undefined; inline while (i < count) : (i += 1) { @@ -155,7 +155,7 @@ pub const Block = struct { } /// Decrypt multiple blocks in parallel with the same last round key. - pub inline fn decryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block { + pub fn decryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block { comptime var i = 0; var out: [count]Block = undefined; inline while (i < count) : (i += 1) { @@ -187,7 +187,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { pub const block_length: usize = blocks_count * 16; /// Convert a byte sequence into an internal representation. - pub inline fn fromBytes(bytes: *const [blocks_count * 16]u8) Self { + pub fn fromBytes(bytes: *const [blocks_count * 16]u8) Self { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = Block.fromBytes(bytes[i * native_word_size ..][0..native_word_size]); @@ -196,7 +196,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Convert the internal representation of a block vector into a byte sequence. - pub inline fn toBytes(block_vec: Self) [blocks_count * 16]u8 { + pub fn toBytes(block_vec: Self) [blocks_count * 16]u8 { var out: [blocks_count * 16]u8 = undefined; inline for (0..native_words) |i| { out[i * native_word_size ..][0..native_word_size].* = block_vec.repr[i].toBytes(); @@ -205,7 +205,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// XOR the block vector with a byte sequence. - pub inline fn xorBytes(block_vec: Self, bytes: *const [blocks_count * 16]u8) [32]u8 { + pub fn xorBytes(block_vec: Self, bytes: *const [blocks_count * 16]u8) [32]u8 { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = block_vec.repr[i].xorBytes(bytes[i * native_word_size ..][0..native_word_size]); @@ -214,7 +214,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the forward AES operation to the block vector with a vector of round keys. - pub inline fn encrypt(block_vec: Self, round_key_vec: Self) Self { + pub fn encrypt(block_vec: Self, round_key_vec: Self) Self { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = block_vec.repr[i].encrypt(round_key_vec.repr[i]); @@ -223,7 +223,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the forward AES operation to the block vector with a vector of last round keys. - pub inline fn encryptLast(block_vec: Self, round_key_vec: Self) Self { + pub fn encryptLast(block_vec: Self, round_key_vec: Self) Self { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = block_vec.repr[i].encryptLast(round_key_vec.repr[i]); @@ -232,7 +232,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the inverse AES operation to the block vector with a vector of round keys. - pub inline fn decrypt(block_vec: Self, inv_round_key_vec: Self) Self { + pub fn decrypt(block_vec: Self, inv_round_key_vec: Self) Self { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = block_vec.repr[i].decrypt(inv_round_key_vec.repr[i]); @@ -241,7 +241,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the inverse AES operation to the block vector with a vector of last round keys. - pub inline fn decryptLast(block_vec: Self, inv_round_key_vec: Self) Self { + pub fn decryptLast(block_vec: Self, inv_round_key_vec: Self) Self { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = block_vec.repr[i].decryptLast(inv_round_key_vec.repr[i]); @@ -250,7 +250,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the bitwise XOR operation to the content of two block vectors. - pub inline fn xorBlocks(block_vec1: Self, block_vec2: Self) Self { + pub fn xorBlocks(block_vec1: Self, block_vec2: Self) Self { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = block_vec1.repr[i].xorBlocks(block_vec2.repr[i]); @@ -259,7 +259,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the bitwise AND operation to the content of two block vectors. - pub inline fn andBlocks(block_vec1: Self, block_vec2: Self) Self { + pub fn andBlocks(block_vec1: Self, block_vec2: Self) Self { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = block_vec1.repr[i].andBlocks(block_vec2.repr[i]); @@ -268,7 +268,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the bitwise OR operation to the content of two block vectors. - pub inline fn orBlocks(block_vec1: Self, block_vec2: Block) Self { + pub fn orBlocks(block_vec1: Self, block_vec2: Block) Self { var out: Self = undefined; inline for (0..native_words) |i| { out.repr[i] = block_vec1.repr[i].orBlocks(block_vec2.repr[i]); diff --git a/lib/std/crypto/aes/soft.zig b/lib/std/crypto/aes/soft.zig index 7f3d298a3a..cec5abff48 100644 --- a/lib/std/crypto/aes/soft.zig +++ b/lib/std/crypto/aes/soft.zig @@ -14,7 +14,7 @@ pub const Block = struct { repr: Repr align(16), /// Convert a byte sequence into an internal representation. - pub inline fn fromBytes(bytes: *const [16]u8) Block { + pub fn fromBytes(bytes: *const [16]u8) Block { const s0 = mem.readInt(u32, bytes[0..4], .little); const s1 = mem.readInt(u32, bytes[4..8], .little); const s2 = mem.readInt(u32, bytes[8..12], .little); @@ -23,7 +23,7 @@ pub const Block = struct { } /// Convert the internal representation of a block into a byte sequence. - pub inline fn toBytes(block: Block) [16]u8 { + pub fn toBytes(block: Block) [16]u8 { var bytes: [16]u8 = undefined; mem.writeInt(u32, bytes[0..4], block.repr[0], .little); mem.writeInt(u32, bytes[4..8], block.repr[1], .little); @@ -33,7 +33,7 @@ pub const Block = struct { } /// XOR the block with a byte sequence. - pub inline fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 { + pub fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 { const block_bytes = block.toBytes(); var x: [16]u8 = undefined; comptime var i: usize = 0; @@ -44,7 +44,7 @@ pub const Block = struct { } /// Encrypt a block with a round key. - pub inline fn encrypt(block: Block, round_key: Block) Block { + pub fn encrypt(block: Block, round_key: Block) Block { const s0 = block.repr[0]; const s1 = block.repr[1]; const s2 = block.repr[2]; @@ -69,7 +69,7 @@ pub const Block = struct { } /// Encrypt a block with a round key *WITHOUT ANY PROTECTION AGAINST SIDE CHANNELS* - pub inline fn encryptUnprotected(block: Block, round_key: Block) Block { + pub fn encryptUnprotected(block: Block, round_key: Block) Block { const s0 = block.repr[0]; const s1 = block.repr[1]; const s2 = block.repr[2]; @@ -114,7 +114,7 @@ pub const Block = struct { } /// Encrypt a block with the last round key. - pub inline fn encryptLast(block: Block, round_key: Block) Block { + pub fn encryptLast(block: Block, round_key: Block) Block { const s0 = block.repr[0]; const s1 = block.repr[1]; const s2 = block.repr[2]; @@ -140,7 +140,7 @@ pub const Block = struct { } /// Decrypt a block with a round key. - pub inline fn decrypt(block: Block, round_key: Block) Block { + pub fn decrypt(block: Block, round_key: Block) Block { const s0 = block.repr[0]; const s1 = block.repr[1]; const s2 = block.repr[2]; @@ -165,7 +165,7 @@ pub const Block = struct { } /// Decrypt a block with a round key *WITHOUT ANY PROTECTION AGAINST SIDE CHANNELS* - pub inline fn decryptUnprotected(block: Block, round_key: Block) Block { + pub fn decryptUnprotected(block: Block, round_key: Block) Block { const s0 = block.repr[0]; const s1 = block.repr[1]; const s2 = block.repr[2]; @@ -210,7 +210,7 @@ pub const Block = struct { } /// Decrypt a block with the last round key. - pub inline fn decryptLast(block: Block, round_key: Block) Block { + pub fn decryptLast(block: Block, round_key: Block) Block { const s0 = block.repr[0]; const s1 = block.repr[1]; const s2 = block.repr[2]; @@ -236,7 +236,7 @@ pub const Block = struct { } /// Apply the bitwise XOR operation to the content of two blocks. - pub inline fn xorBlocks(block1: Block, block2: Block) Block { + pub fn xorBlocks(block1: Block, block2: Block) Block { var x: Repr = undefined; comptime var i = 0; inline while (i < 4) : (i += 1) { @@ -246,7 +246,7 @@ pub const Block = struct { } /// Apply the bitwise AND operation to the content of two blocks. - pub inline fn andBlocks(block1: Block, block2: Block) Block { + pub fn andBlocks(block1: Block, block2: Block) Block { var x: Repr = undefined; comptime var i = 0; inline while (i < 4) : (i += 1) { @@ -256,7 +256,7 @@ pub const Block = struct { } /// Apply the bitwise OR operation to the content of two blocks. - pub inline fn orBlocks(block1: Block, block2: Block) Block { + pub fn orBlocks(block1: Block, block2: Block) Block { var x: Repr = undefined; comptime var i = 0; inline while (i < 4) : (i += 1) { @@ -353,7 +353,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { pub const block_length: usize = blocks_count * 16; /// Convert a byte sequence into an internal representation. - pub inline fn fromBytes(bytes: *const [blocks_count * 16]u8) Self { + pub fn fromBytes(bytes: *const [blocks_count * 16]u8) Self { var out: Self = undefined; for (0..native_words) |i| { out.repr[i] = Block.fromBytes(bytes[i * native_word_size ..][0..native_word_size]); @@ -362,7 +362,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Convert the internal representation of a block vector into a byte sequence. - pub inline fn toBytes(block_vec: Self) [blocks_count * 16]u8 { + pub fn toBytes(block_vec: Self) [blocks_count * 16]u8 { var out: [blocks_count * 16]u8 = undefined; for (0..native_words) |i| { out[i * native_word_size ..][0..native_word_size].* = block_vec.repr[i].toBytes(); @@ -371,7 +371,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// XOR the block vector with a byte sequence. - pub inline fn xorBytes(block_vec: Self, bytes: *const [blocks_count * 16]u8) [32]u8 { + pub fn xorBytes(block_vec: Self, bytes: *const [blocks_count * 16]u8) [32]u8 { var out: Self = undefined; for (0..native_words) |i| { out.repr[i] = block_vec.repr[i].xorBytes(bytes[i * native_word_size ..][0..native_word_size]); @@ -380,7 +380,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the forward AES operation to the block vector with a vector of round keys. - pub inline fn encrypt(block_vec: Self, round_key_vec: Self) Self { + pub fn encrypt(block_vec: Self, round_key_vec: Self) Self { var out: Self = undefined; for (0..native_words) |i| { out.repr[i] = block_vec.repr[i].encrypt(round_key_vec.repr[i]); @@ -389,7 +389,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the forward AES operation to the block vector with a vector of last round keys. - pub inline fn encryptLast(block_vec: Self, round_key_vec: Self) Self { + pub fn encryptLast(block_vec: Self, round_key_vec: Self) Self { var out: Self = undefined; for (0..native_words) |i| { out.repr[i] = block_vec.repr[i].encryptLast(round_key_vec.repr[i]); @@ -398,7 +398,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the inverse AES operation to the block vector with a vector of round keys. - pub inline fn decrypt(block_vec: Self, inv_round_key_vec: Self) Self { + pub fn decrypt(block_vec: Self, inv_round_key_vec: Self) Self { var out: Self = undefined; for (0..native_words) |i| { out.repr[i] = block_vec.repr[i].decrypt(inv_round_key_vec.repr[i]); @@ -407,7 +407,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the inverse AES operation to the block vector with a vector of last round keys. - pub inline fn decryptLast(block_vec: Self, inv_round_key_vec: Self) Self { + pub fn decryptLast(block_vec: Self, inv_round_key_vec: Self) Self { var out: Self = undefined; for (0..native_words) |i| { out.repr[i] = block_vec.repr[i].decryptLast(inv_round_key_vec.repr[i]); @@ -416,7 +416,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the bitwise XOR operation to the content of two block vectors. - pub inline fn xorBlocks(block_vec1: Self, block_vec2: Self) Self { + pub fn xorBlocks(block_vec1: Self, block_vec2: Self) Self { var out: Self = undefined; for (0..native_words) |i| { out.repr[i] = block_vec1.repr[i].xorBlocks(block_vec2.repr[i]); @@ -425,7 +425,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the bitwise AND operation to the content of two block vectors. - pub inline fn andBlocks(block_vec1: Self, block_vec2: Self) Self { + pub fn andBlocks(block_vec1: Self, block_vec2: Self) Self { var out: Self = undefined; for (0..native_words) |i| { out.repr[i] = block_vec1.repr[i].andBlocks(block_vec2.repr[i]); @@ -434,7 +434,7 @@ pub fn BlockVec(comptime blocks_count: comptime_int) type { } /// Apply the bitwise OR operation to the content of two block vectors. - pub inline fn orBlocks(block_vec1: Self, block_vec2: Block) Self { + pub fn orBlocks(block_vec1: Self, block_vec2: Block) Self { var out: Self = undefined; for (0..native_words) |i| { out.repr[i] = block_vec1.repr[i].orBlocks(block_vec2.repr[i]); diff --git a/lib/std/crypto/aes_ocb.zig b/lib/std/crypto/aes_ocb.zig index 2aa1fdc4b0..2d6817dc69 100644 --- a/lib/std/crypto/aes_ocb.zig +++ b/lib/std/crypto/aes_ocb.zig @@ -28,7 +28,7 @@ fn AesOcb(comptime Aes: anytype) type { table: [56]Block align(16) = undefined, upto: usize, - inline fn double(l: Block) Block { + fn double(l: Block) Block { const l_ = mem.readInt(u128, &l, .big); const l_2 = (l_ << 1) ^ (0x87 & -%(l_ >> 127)); var l2: Block = undefined; @@ -244,7 +244,7 @@ fn AesOcb(comptime Aes: anytype) type { }; } -inline fn xorBlocks(x: Block, y: Block) Block { +fn xorBlocks(x: Block, y: Block) Block { var z: Block = x; for (&z, 0..) |*v, i| { v.* = x[i] ^ y[i]; @@ -252,7 +252,7 @@ inline fn xorBlocks(x: Block, y: Block) Block { return z; } -inline fn xorWith(x: *Block, y: Block) void { +fn xorWith(x: *Block, y: Block) void { for (x, 0..) |*v, i| { v.* ^= y[i]; } diff --git a/lib/std/crypto/ascon.zig b/lib/std/crypto/ascon.zig index a2168b8a9d..c0ba5de939 100644 --- a/lib/std/crypto/ascon.zig +++ b/lib/std/crypto/ascon.zig @@ -157,7 +157,7 @@ pub fn State(comptime endian: std.builtin.Endian) type { } /// Apply a reduced-round permutation to the state. - pub inline fn permuteR(state: *Self, comptime rounds: u4) void { + pub fn permuteR(state: *Self, comptime rounds: u4) void { const rks = [16]u64{ 0x3c, 0x2d, 0x1e, 0x0f, 0xf0, 0xe1, 0xd2, 0xc3, 0xb4, 0xa5, 0x96, 0x87, 0x78, 0x69, 0x5a, 0x4b }; inline for (rks[rks.len - rounds ..]) |rk| { state.round(rk); @@ -165,13 +165,13 @@ pub fn State(comptime endian: std.builtin.Endian) type { } /// Apply a full-round permutation to the state. - pub inline fn permute(state: *Self) void { + pub fn permute(state: *Self) void { state.permuteR(12); } /// Apply a permutation to the state and prevent backtracking. /// The rate is expressed in bytes and must be a multiple of the word size (8). - pub inline fn permuteRatchet(state: *Self, comptime rounds: u4, comptime rate: u6) void { + pub fn permuteRatchet(state: *Self, comptime rounds: u4, comptime rate: u6) void { const capacity = block_bytes - rate; debug.assert(capacity > 0 and capacity % 8 == 0); // capacity must be a multiple of 64 bits var mask: [capacity / 8]u64 = undefined; @@ -181,7 +181,7 @@ pub fn State(comptime endian: std.builtin.Endian) type { } // Core Ascon permutation. - inline fn round(state: *Self, rk: u64) void { + fn round(state: *Self, rk: u64) void { const x = &state.st; x[2] ^= rk; diff --git a/lib/std/crypto/blake3.zig b/lib/std/crypto/blake3.zig index eecb794c9d..bc771524e8 100644 --- a/lib/std/crypto/blake3.zig +++ b/lib/std/crypto/blake3.zig @@ -61,7 +61,7 @@ const CompressVectorized = struct { const Lane = @Vector(4, u32); const Rows = [4]Lane; - inline fn g(comptime even: bool, rows: *Rows, m: Lane) void { + fn g(comptime even: bool, rows: *Rows, m: Lane) void { rows[0] +%= rows[1] +% m; rows[3] ^= rows[0]; rows[3] = math.rotr(Lane, rows[3], if (even) 8 else 16); @@ -70,13 +70,13 @@ const CompressVectorized = struct { rows[1] = math.rotr(Lane, rows[1], if (even) 7 else 12); } - inline fn diagonalize(rows: *Rows) void { + fn diagonalize(rows: *Rows) void { rows[0] = @shuffle(u32, rows[0], undefined, [_]i32{ 3, 0, 1, 2 }); rows[3] = @shuffle(u32, rows[3], undefined, [_]i32{ 2, 3, 0, 1 }); rows[2] = @shuffle(u32, rows[2], undefined, [_]i32{ 1, 2, 3, 0 }); } - inline fn undiagonalize(rows: *Rows) void { + fn undiagonalize(rows: *Rows) void { rows[0] = @shuffle(u32, rows[0], undefined, [_]i32{ 1, 2, 3, 0 }); rows[3] = @shuffle(u32, rows[3], undefined, [_]i32{ 2, 3, 0, 1 }); rows[2] = @shuffle(u32, rows[2], undefined, [_]i32{ 3, 0, 1, 2 }); diff --git a/lib/std/crypto/chacha20.zig b/lib/std/crypto/chacha20.zig index c605a6cb34..c4e84aad53 100644 --- a/lib/std/crypto/chacha20.zig +++ b/lib/std/crypto/chacha20.zig @@ -151,7 +151,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type } } - inline fn chacha20Core(x: *BlockVec, input: BlockVec) void { + fn chacha20Core(x: *BlockVec, input: BlockVec) void { x.* = input; const m0 = switch (degree) { @@ -215,7 +215,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type } } - inline fn hashToBytes(comptime dm: usize, out: *[64 * dm]u8, x: BlockVec) void { + fn hashToBytes(comptime dm: usize, out: *[64 * dm]u8, x: BlockVec) void { for (0..dm) |d| { for (0..4) |i| { mem.writeInt(u32, out[64 * d + 16 * i + 0 ..][0..4], x[i][0 + 4 * d], .little); @@ -226,7 +226,7 @@ fn ChaChaVecImpl(comptime rounds_nb: usize, comptime degree: comptime_int) type } } - inline fn contextFeedback(x: *BlockVec, ctx: BlockVec) void { + fn contextFeedback(x: *BlockVec, ctx: BlockVec) void { x[0] +%= ctx[0]; x[1] +%= ctx[1]; x[2] +%= ctx[2]; @@ -365,7 +365,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type { }; } - inline fn chacha20Core(x: *BlockVec, input: BlockVec) void { + fn chacha20Core(x: *BlockVec, input: BlockVec) void { x.* = input; const rounds = comptime [_]QuarterRound{ @@ -394,7 +394,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type { } } - inline fn hashToBytes(out: *[64]u8, x: BlockVec) void { + fn hashToBytes(out: *[64]u8, x: BlockVec) void { for (0..4) |i| { mem.writeInt(u32, out[16 * i + 0 ..][0..4], x[i * 4 + 0], .little); mem.writeInt(u32, out[16 * i + 4 ..][0..4], x[i * 4 + 1], .little); @@ -403,7 +403,7 @@ fn ChaChaNonVecImpl(comptime rounds_nb: usize) type { } } - inline fn contextFeedback(x: *BlockVec, ctx: BlockVec) void { + fn contextFeedback(x: *BlockVec, ctx: BlockVec) void { for (0..16) |i| { x[i] +%= ctx[i]; } diff --git a/lib/std/crypto/codecs/base64_hex_ct.zig b/lib/std/crypto/codecs/base64_hex_ct.zig index 2a2a3c3005..3842e3d592 100644 --- a/lib/std/crypto/codecs/base64_hex_ct.zig +++ b/lib/std/crypto/codecs/base64_hex_ct.zig @@ -280,34 +280,34 @@ pub const base64 = struct { return DecoderWithIgnore{ .ignored_chars = ignored_chars }; } - inline fn eq(x: u8, y: u8) u8 { + fn eq(x: u8, y: u8) u8 { return ~@as(u8, @truncate((0 -% (@as(u16, x) ^ @as(u16, y))) >> 8)); } - inline fn gt(x: u8, y: u8) u8 { + fn gt(x: u8, y: u8) u8 { return @truncate((@as(u16, y) -% @as(u16, x)) >> 8); } - inline fn ge(x: u8, y: u8) u8 { + fn ge(x: u8, y: u8) u8 { return ~gt(y, x); } - inline fn lt(x: u8, y: u8) u8 { + fn lt(x: u8, y: u8) u8 { return gt(y, x); } - inline fn le(x: u8, y: u8) u8 { + fn le(x: u8, y: u8) u8 { return ge(y, x); } - inline fn charFromByte(x: u8, comptime urlsafe: bool) u8 { + fn charFromByte(x: u8, comptime urlsafe: bool) u8 { return (lt(x, 26) & (x +% 'A')) | (ge(x, 26) & lt(x, 52) & (x +% 'a' -% 26)) | (ge(x, 52) & lt(x, 62) & (x +% '0' -% 52)) | (eq(x, 62) & '+') | (eq(x, 63) & if (urlsafe) '_' else '/'); } - inline fn byteFromChar(c: u8, comptime urlsafe: bool) u8 { + fn byteFromChar(c: u8, comptime urlsafe: bool) u8 { const x = (ge(c, 'A') & le(c, 'Z') & (c -% 'A')) | (ge(c, 'a') & le(c, 'z') & (c -% 'a' +% 26)) | diff --git a/lib/std/crypto/ghash_polyval.zig b/lib/std/crypto/ghash_polyval.zig index cf93ae7ca5..45b2beb91b 100644 --- a/lib/std/crypto/ghash_polyval.zig +++ b/lib/std/crypto/ghash_polyval.zig @@ -89,7 +89,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { const Selector = enum { lo, hi, hi_lo }; // Carryless multiplication of two 64-bit integers for x86_64. - inline fn clmulPclmul(x: u128, y: u128, comptime half: Selector) u128 { + fn clmulPclmul(x: u128, y: u128, comptime half: Selector) u128 { switch (half) { .hi => { const product = asm ( @@ -122,7 +122,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { } // Carryless multiplication of two 64-bit integers for ARM crypto. - inline fn clmulPmull(x: u128, y: u128, comptime half: Selector) u128 { + fn clmulPmull(x: u128, y: u128, comptime half: Selector) u128 { switch (half) { .hi => { const product = asm ( @@ -231,7 +231,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { mid: u128, }; - inline fn xor256(x: *I256, y: I256) void { + fn xor256(x: *I256, y: I256) void { x.* = I256{ .hi = x.hi ^ y.hi, .lo = x.lo ^ y.lo, @@ -249,7 +249,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { } // Multiply two 128-bit integers in GF(2^128). - inline fn clmul128(x: u128, y: u128) I256 { + fn clmul128(x: u128, y: u128) I256 { if (mul_algorithm == .karatsuba) { const x_hi = @as(u64, @truncate(x >> 64)); const y_hi = @as(u64, @truncate(y >> 64)); @@ -273,7 +273,7 @@ fn Hash(comptime endian: std.builtin.Endian, comptime shift_key: bool) type { // Reduce a 256-bit representative of a polynomial modulo the irreducible polynomial x^128 + x^127 + x^126 + x^121 + 1. // This is done using Shay Gueron's black magic demysticated here: // https://blog.quarkslab.com/reversing-a-finite-field-multiplication-optimization.html - inline fn reduce(x: I256) u128 { + fn reduce(x: I256) u128 { const hi = x.hi ^ (x.mid >> 64); const lo = x.lo ^ (x.mid << 64); const p64 = (((1 << 121) | (1 << 126) | (1 << 127)) >> 64); diff --git a/lib/std/crypto/pcurves/p256/p256_64.zig b/lib/std/crypto/pcurves/p256/p256_64.zig index f3d38ca3e6..e79d51814e 100644 --- a/lib/std/crypto/pcurves/p256/p256_64.zig +++ b/lib/std/crypto/pcurves/p256/p256_64.zig @@ -72,7 +72,7 @@ pub const NonMontgomeryDomainFieldElement = [4]u64; /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0x1] -inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { +fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { const x = @as(u128, arg2) +% arg3 +% arg1; out1.* = @truncate(x); out2.* = @truncate(x >> 64); @@ -91,7 +91,7 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0x1] -inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { +fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { const x = @as(u128, arg2) -% arg3 -% arg1; out1.* = @truncate(x); out2.* = @truncate(x >> 64); @@ -109,7 +109,7 @@ inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) v /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0xffffffffffffffff] -inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { +fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); @@ -128,7 +128,7 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { /// arg3: [0x0 ~> 0xffffffffffffffff] /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] -inline fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void { +fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void { @setRuntimeSafety(mode == .Debug); const mask = 0 -% @as(u64, arg1); diff --git a/lib/std/crypto/pcurves/p256/p256_scalar_64.zig b/lib/std/crypto/pcurves/p256/p256_scalar_64.zig index 736a3ea8b7..c549831b20 100644 --- a/lib/std/crypto/pcurves/p256/p256_scalar_64.zig +++ b/lib/std/crypto/pcurves/p256/p256_scalar_64.zig @@ -72,7 +72,7 @@ pub const NonMontgomeryDomainFieldElement = [4]u64; /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0x1] -inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { +fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { const x = @as(u128, arg2) +% arg3 +% arg1; out1.* = @truncate(x); out2.* = @truncate(x >> 64); @@ -91,7 +91,7 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0x1] -inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { +fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { const x = @as(u128, arg2) -% arg3 -% arg1; out1.* = @truncate(x); out2.* = @truncate(x >> 64); @@ -109,7 +109,7 @@ inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) v /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0xffffffffffffffff] -inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { +fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); @@ -128,7 +128,7 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { /// arg3: [0x0 ~> 0xffffffffffffffff] /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] -inline fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void { +fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void { @setRuntimeSafety(mode == .Debug); const mask = 0 -% @as(u64, arg1); diff --git a/lib/std/crypto/pcurves/p384/p384_64.zig b/lib/std/crypto/pcurves/p384/p384_64.zig index e1419e7c81..d6f33028f7 100644 --- a/lib/std/crypto/pcurves/p384/p384_64.zig +++ b/lib/std/crypto/pcurves/p384/p384_64.zig @@ -41,7 +41,7 @@ pub const NonMontgomeryDomainFieldElement = [6]u64; /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0x1] -inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { +fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { const x = @as(u128, arg2) +% arg3 +% arg1; out1.* = @truncate(x); out2.* = @truncate(x >> 64); @@ -60,7 +60,7 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0x1] -inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { +fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { const x = @as(u128, arg2) -% arg3 -% arg1; out1.* = @truncate(x); out2.* = @truncate(x >> 64); @@ -78,7 +78,7 @@ inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) v /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0xffffffffffffffff] -inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { +fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); @@ -97,7 +97,7 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { /// arg3: [0x0 ~> 0xffffffffffffffff] /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] -inline fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void { +fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void { @setRuntimeSafety(mode == .Debug); const mask = 0 -% @as(u64, arg1); diff --git a/lib/std/crypto/pcurves/p384/p384_scalar_64.zig b/lib/std/crypto/pcurves/p384/p384_scalar_64.zig index 68a0a0ca2f..74f7e7813f 100644 --- a/lib/std/crypto/pcurves/p384/p384_scalar_64.zig +++ b/lib/std/crypto/pcurves/p384/p384_scalar_64.zig @@ -41,7 +41,7 @@ pub const NonMontgomeryDomainFieldElement = [6]u64; /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0x1] -inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { +fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { const x = @as(u128, arg2) +% arg3 +% arg1; out1.* = @truncate(x); out2.* = @truncate(x >> 64); @@ -60,7 +60,7 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0x1] -inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { +fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { const x = @as(u128, arg2) -% arg3 -% arg1; out1.* = @truncate(x); out2.* = @truncate(x >> 64); @@ -78,7 +78,7 @@ inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) v /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0xffffffffffffffff] -inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { +fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); @@ -97,7 +97,7 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { /// arg3: [0x0 ~> 0xffffffffffffffff] /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] -inline fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void { +fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void { @setRuntimeSafety(mode == .Debug); const mask = 0 -% @as(u64, arg1); diff --git a/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig b/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig index 1c69b90eea..547da19bea 100644 --- a/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig +++ b/lib/std/crypto/pcurves/secp256k1/secp256k1_64.zig @@ -41,7 +41,7 @@ pub const NonMontgomeryDomainFieldElement = [4]u64; /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0x1] -inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { +fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { const x = @as(u128, arg2) +% arg3 +% arg1; out1.* = @truncate(x); out2.* = @truncate(x >> 64); @@ -60,7 +60,7 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0x1] -inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { +fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { const x = @as(u128, arg2) -% arg3 -% arg1; out1.* = @truncate(x); out2.* = @truncate(x >> 64); @@ -78,7 +78,7 @@ inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) v /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0xffffffffffffffff] -inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { +fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); @@ -97,7 +97,7 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { /// arg3: [0x0 ~> 0xffffffffffffffff] /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] -inline fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void { +fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void { @setRuntimeSafety(mode == .Debug); const mask = 0 -% @as(u64, arg1); diff --git a/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig b/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig index 97bf5f0a45..71e6f1baba 100644 --- a/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig +++ b/lib/std/crypto/pcurves/secp256k1/secp256k1_scalar_64.zig @@ -41,7 +41,7 @@ pub const NonMontgomeryDomainFieldElement = [4]u64; /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0x1] -inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { +fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { const x = @as(u128, arg2) +% arg3 +% arg1; out1.* = @truncate(x); out2.* = @truncate(x >> 64); @@ -60,7 +60,7 @@ inline fn addcarryxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) vo /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0x1] -inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { +fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) void { const x = @as(u128, arg2) -% arg3 -% arg1; out1.* = @truncate(x); out2.* = @truncate(x >> 64); @@ -78,7 +78,7 @@ inline fn subborrowxU64(out1: *u64, out2: *u1, arg1: u1, arg2: u64, arg3: u64) v /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] /// out2: [0x0 ~> 0xffffffffffffffff] -inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { +fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { @setRuntimeSafety(mode == .Debug); const x = @as(u128, arg1) * @as(u128, arg2); @@ -97,7 +97,7 @@ inline fn mulxU64(out1: *u64, out2: *u64, arg1: u64, arg2: u64) void { /// arg3: [0x0 ~> 0xffffffffffffffff] /// Output Bounds: /// out1: [0x0 ~> 0xffffffffffffffff] -inline fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void { +fn cmovznzU64(out1: *u64, arg1: u1, arg2: u64, arg3: u64) void { @setRuntimeSafety(mode == .Debug); const mask = 0 -% @as(u64, arg1); diff --git a/lib/std/crypto/poly1305.zig b/lib/std/crypto/poly1305.zig index 787f105904..5bf5a57428 100644 --- a/lib/std/crypto/poly1305.zig +++ b/lib/std/crypto/poly1305.zig @@ -31,13 +31,13 @@ pub const Poly1305 = struct { }; } - inline fn add(a: u64, b: u64, c: u1) struct { u64, u1 } { + fn add(a: u64, b: u64, c: u1) struct { u64, u1 } { const v1 = @addWithOverflow(a, b); const v2 = @addWithOverflow(v1[0], c); return .{ v2[0], v1[1] | v2[1] }; } - inline fn sub(a: u64, b: u64, c: u1) struct { u64, u1 } { + fn sub(a: u64, b: u64, c: u1) struct { u64, u1 } { const v1 = @subWithOverflow(a, b); const v2 = @subWithOverflow(v1[0], c); return .{ v2[0], v1[1] | v2[1] }; diff --git a/lib/std/crypto/salsa20.zig b/lib/std/crypto/salsa20.zig index 0660c5ba06..eb434ed15a 100644 --- a/lib/std/crypto/salsa20.zig +++ b/lib/std/crypto/salsa20.zig @@ -41,7 +41,7 @@ fn SalsaVecImpl(comptime rounds: comptime_int) type { }; } - inline fn salsaCore(x: *BlockVec, input: BlockVec, comptime feedback: bool) void { + fn salsaCore(x: *BlockVec, input: BlockVec, comptime feedback: bool) void { const n1n2n3n0 = Lane{ input[3][1], input[3][2], input[3][3], input[3][0] }; const n1n2 = Half{ n1n2n3n0[0], n1n2n3n0[1] }; const n3n0 = Half{ n1n2n3n0[2], n1n2n3n0[3] }; @@ -203,7 +203,7 @@ fn SalsaNonVecImpl(comptime rounds: comptime_int) type { d: u6, }; - inline fn Rp(a: usize, b: usize, c: usize, d: u6) QuarterRound { + fn Rp(a: usize, b: usize, c: usize, d: u6) QuarterRound { return QuarterRound{ .a = a, .b = b, @@ -212,7 +212,7 @@ fn SalsaNonVecImpl(comptime rounds: comptime_int) type { }; } - inline fn salsaCore(x: *BlockVec, input: BlockVec, comptime feedback: bool) void { + fn salsaCore(x: *BlockVec, input: BlockVec, comptime feedback: bool) void { const arx_steps = comptime [_]QuarterRound{ Rp(4, 0, 12, 7), Rp(8, 4, 0, 9), Rp(12, 8, 4, 13), Rp(0, 12, 8, 18), Rp(9, 5, 1, 7), Rp(13, 9, 5, 9), Rp(1, 13, 9, 13), Rp(5, 1, 13, 18), diff --git a/lib/std/crypto/tls.zig b/lib/std/crypto/tls.zig index 74113225cb..da6a431840 100644 --- a/lib/std/crypto/tls.zig +++ b/lib/std/crypto/tls.zig @@ -588,11 +588,11 @@ pub fn hmac(comptime Hmac: type, message: []const u8, key: [Hmac.key_length]u8) return result; } -pub inline fn extension(et: ExtensionType, bytes: anytype) [2 + 2 + bytes.len]u8 { +pub fn extension(et: ExtensionType, bytes: anytype) [2 + 2 + bytes.len]u8 { return int(u16, @intFromEnum(et)) ++ array(u16, u8, bytes); } -pub inline fn array( +pub fn array( comptime Len: type, comptime Elem: type, elems: anytype, @@ -617,7 +617,7 @@ pub inline fn array( return arr; } -pub inline fn int(comptime Int: type, val: Int) [@divExact(@bitSizeOf(Int), 8)]u8 { +pub fn int(comptime Int: type, val: Int) [@divExact(@bitSizeOf(Int), 8)]u8 { var arr: [@divExact(@bitSizeOf(Int), 8)]u8 = undefined; std.mem.writeInt(Int, &arr, val, .big); return arr; diff --git a/lib/std/crypto/tls/Client.zig b/lib/std/crypto/tls/Client.zig index d0cf762521..3fa7b73d06 100644 --- a/lib/std/crypto/tls/Client.zig +++ b/lib/std/crypto/tls/Client.zig @@ -1576,7 +1576,7 @@ fn straddleByte(s1: []const u8, s2: []const u8, index: usize) u8 { const builtin = @import("builtin"); const native_endian = builtin.cpu.arch.endian(); -inline fn big(x: anytype) @TypeOf(x) { +fn big(x: anytype) @TypeOf(x) { return switch (native_endian) { .big => x, .little => @byteSwap(x),