zig/lib/std/crypto/aes/aesni.zig
Andrew Kelley e7b18a7ce6 std.crypto: remove inline from most functions
To quote the language reference,

It is generally better to let the compiler decide when to inline a
function, except for these scenarios:

* To change how many stack frames are in the call stack, for debugging
  purposes.
* To force comptime-ness of the arguments to propagate to the return
  value of the function, as in the above example.
* Real world performance measurements demand it. Don't guess!

Note that inline actually restricts what the compiler is allowed to do.
This can harm binary size, compilation speed, and even runtime
performance.

`zig run lib/std/crypto/benchmark.zig -OReleaseFast`
[-before-] vs {+after+}

              md5:        [-990-]        {+998+} MiB/s
             sha1:       [-1144-]       {+1140+} MiB/s
           sha256:       [-2267-]       {+2275+} MiB/s
           sha512:        [-762-]        {+767+} MiB/s
         sha3-256:        [-680-]        {+683+} MiB/s
         sha3-512:        [-362-]        {+363+} MiB/s
        shake-128:        [-835-]        {+839+} MiB/s
        shake-256:        [-680-]        {+681+} MiB/s
   turboshake-128:       [-1567-]       {+1570+} MiB/s
   turboshake-256:       [-1276-]       {+1282+} MiB/s
          blake2s:        [-778-]        {+789+} MiB/s
          blake2b:       [-1071-]       {+1086+} MiB/s
           blake3:       [-1148-]       {+1137+} MiB/s
            ghash:      [-10044-]      {+10033+} MiB/s
          polyval:       [-9726-]      {+10033+} MiB/s
         poly1305:       [-2486-]       {+2703+} MiB/s
         hmac-md5:        [-991-]        {+998+} MiB/s
        hmac-sha1:       [-1134-]       {+1137+} MiB/s
      hmac-sha256:       [-2265-]       {+2288+} MiB/s
      hmac-sha512:        [-765-]        {+764+} MiB/s
      siphash-2-4:       [-4410-]       {+4438+} MiB/s
      siphash-1-3:       [-7144-]       {+7225+} MiB/s
   siphash128-2-4:       [-4397-]       {+4449+} MiB/s
   siphash128-1-3:       [-7281-]       {+7374+} MiB/s
  aegis-128x4 mac:      [-73385-]      {+74523+} MiB/s
  aegis-256x4 mac:      [-30160-]      {+30539+} MiB/s
  aegis-128x2 mac:      [-66662-]      {+67267+} MiB/s
  aegis-256x2 mac:      [-16812-]      {+16806+} MiB/s
   aegis-128l mac:      [-33876-]      {+34055+} MiB/s
    aegis-256 mac:       [-8993-]       {+9087+} MiB/s
         aes-cmac:       2036 MiB/s
           x25519:      [-20670-]      {+16844+} exchanges/s
          ed25519:      [-29763-]      {+29576+} signatures/s
       ecdsa-p256:       [-4762-]       {+4900+} signatures/s
       ecdsa-p384:       [-1465-]       {+1500+} signatures/s
  ecdsa-secp256k1:       [-5643-]       {+5769+} signatures/s
          ed25519:      [-21926-]      {+21721+} verifications/s
          ed25519:      [-51200-]      {+50880+} verifications/s (batch)
 chacha20Poly1305:       [-1189-]       {+1109+} MiB/s
xchacha20Poly1305:       [-1196-]       {+1107+} MiB/s
 xchacha8Poly1305:       [-1466-]       {+1555+} MiB/s
 xsalsa20Poly1305:        [-660-]        {+620+} MiB/s
      aegis-128x4:      [-76389-]      {+78181+} MiB/s
      aegis-128x2:      [-53946-]      {+53495+} MiB/s
       aegis-128l:      [-27219-]      {+25621+} MiB/s
      aegis-256x4:      [-49351-]      {+49542+} MiB/s
      aegis-256x2:      [-32390-]      {+32366+} MiB/s
        aegis-256:       [-8881-]       {+8944+} MiB/s
       aes128-gcm:       [-6095-]       {+6205+} MiB/s
       aes256-gcm:       [-5306-]       {+5427+} MiB/s
       aes128-ocb:       [-8529-]      {+13974+} MiB/s
       aes256-ocb:       [-7241-]       {+9442+} MiB/s
        isapa128a:        [-204-]        {+214+} MiB/s
    aes128-single:  [-133857882-]  {+134170944+} ops/s
    aes256-single:   [-96306962-]   {+96408639+} ops/s
         aes128-8: [-1083210101-] {+1073727253+} ops/s
         aes256-8:  [-762042466-]  {+767091778+} ops/s
           bcrypt:      0.009 s/ops
           scrypt:      [-0.018-]      {+0.017+} s/ops
           argon2:      [-0.037-]      {+0.060+} s/ops
      kyber512d00:     [-206057-]     {+205779+} encaps/s
      kyber768d00:     [-156074-]     {+150711+} encaps/s
     kyber1024d00:     [-116626-]     {+115469+} encaps/s
      kyber512d00:     [-181149-]     {+182046+} decaps/s
      kyber768d00:     [-136965-]     {+135676+} decaps/s
     kyber1024d00:     [-101307-]     {+100643+} decaps/s
      kyber512d00:     [-123624-]     {+123375+} keygen/s
      kyber768d00:      [-69465-]      {+70828+} keygen/s
     kyber1024d00:      [-43117-]      {+43208+} keygen/s
2025-07-13 18:26:13 +02:00

573 lines
22 KiB
Zig

const std = @import("../../std.zig");
const builtin = @import("builtin");
const mem = std.mem;
const debug = std.debug;
const has_vaes = builtin.cpu.arch == .x86_64 and builtin.cpu.has(.x86, .vaes);
const has_avx512f = builtin.cpu.arch == .x86_64 and builtin.zig_backend != .stage2_x86_64 and builtin.cpu.has(.x86, .avx512f);
/// A single AES block.
pub const Block = struct {
const Repr = @Vector(2, u64);
/// The length of an AES block in bytes.
pub const block_length: usize = 16;
/// Internal representation of a block.
repr: Repr,
/// Convert a byte sequence into an internal representation.
pub fn fromBytes(bytes: *const [16]u8) Block {
const repr = mem.bytesToValue(Repr, bytes);
return Block{ .repr = repr };
}
/// Convert the internal representation of a block into a byte sequence.
pub fn toBytes(block: Block) [16]u8 {
return mem.toBytes(block.repr);
}
/// XOR the block with a byte sequence.
pub fn xorBytes(block: Block, bytes: *const [16]u8) [16]u8 {
const x = block.repr ^ fromBytes(bytes).repr;
return mem.toBytes(x);
}
/// Encrypt a block with a round key.
pub fn encrypt(block: Block, round_key: Block) Block {
return Block{
.repr = asm (
\\ vaesenc %[rk], %[in], %[out]
: [out] "=x" (-> Repr),
: [in] "x" (block.repr),
[rk] "x" (round_key.repr),
),
};
}
/// Encrypt a block with the last round key.
pub fn encryptLast(block: Block, round_key: Block) Block {
return Block{
.repr = asm (
\\ vaesenclast %[rk], %[in], %[out]
: [out] "=x" (-> Repr),
: [in] "x" (block.repr),
[rk] "x" (round_key.repr),
),
};
}
/// Decrypt a block with a round key.
pub fn decrypt(block: Block, inv_round_key: Block) Block {
return Block{
.repr = asm (
\\ vaesdec %[rk], %[in], %[out]
: [out] "=x" (-> Repr),
: [in] "x" (block.repr),
[rk] "x" (inv_round_key.repr),
),
};
}
/// Decrypt a block with the last round key.
pub fn decryptLast(block: Block, inv_round_key: Block) Block {
return Block{
.repr = asm (
\\ vaesdeclast %[rk], %[in], %[out]
: [out] "=x" (-> Repr),
: [in] "x" (block.repr),
[rk] "x" (inv_round_key.repr),
),
};
}
/// Apply the bitwise XOR operation to the content of two blocks.
pub fn xorBlocks(block1: Block, block2: Block) Block {
return Block{ .repr = block1.repr ^ block2.repr };
}
/// Apply the bitwise AND operation to the content of two blocks.
pub fn andBlocks(block1: Block, block2: Block) Block {
return Block{ .repr = block1.repr & block2.repr };
}
/// Apply the bitwise OR operation to the content of two blocks.
pub fn orBlocks(block1: Block, block2: Block) Block {
return Block{ .repr = block1.repr | block2.repr };
}
/// Perform operations on multiple blocks in parallel.
pub const parallel = struct {
const cpu = std.Target.x86.cpu;
/// The recommended number of AES encryption/decryption to perform in parallel for the chosen implementation.
pub const optimal_parallel_blocks = switch (builtin.cpu.model) {
&cpu.westmere, &cpu.goldmont => 3,
&cpu.cannonlake, &cpu.skylake, &cpu.skylake_avx512, &cpu.tremont, &cpu.goldmont_plus, &cpu.cascadelake => 4,
&cpu.icelake_client, &cpu.icelake_server, &cpu.tigerlake, &cpu.rocketlake, &cpu.alderlake => 6,
&cpu.haswell, &cpu.broadwell => 7,
&cpu.sandybridge, &cpu.ivybridge => 8,
&cpu.znver1, &cpu.znver2, &cpu.znver3, &cpu.znver4 => 8,
else => 8,
};
/// Encrypt multiple blocks in parallel, each their own round key.
pub fn encryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
out[i] = blocks[i].encrypt(round_keys[i]);
}
return out;
}
/// Decrypt multiple blocks in parallel, each their own round key.
pub fn decryptParallel(comptime count: usize, blocks: [count]Block, round_keys: [count]Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
out[i] = blocks[i].decrypt(round_keys[i]);
}
return out;
}
/// Encrypt multiple blocks in parallel with the same round key.
pub fn encryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
out[i] = blocks[i].encrypt(round_key);
}
return out;
}
/// Decrypt multiple blocks in parallel with the same round key.
pub fn decryptWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
out[i] = blocks[i].decrypt(round_key);
}
return out;
}
/// Encrypt multiple blocks in parallel with the same last round key.
pub fn encryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
out[i] = blocks[i].encryptLast(round_key);
}
return out;
}
/// Decrypt multiple blocks in parallel with the same last round key.
pub fn decryptLastWide(comptime count: usize, blocks: [count]Block, round_key: Block) [count]Block {
comptime var i = 0;
var out: [count]Block = undefined;
inline while (i < count) : (i += 1) {
out[i] = blocks[i].decryptLast(round_key);
}
return out;
}
};
};
/// A fixed-size vector of AES blocks.
/// All operations are performed in parallel, using SIMD instructions when available.
pub fn BlockVec(comptime blocks_count: comptime_int) type {
return struct {
const Self = @This();
/// The number of AES blocks the target architecture can process with a single instruction.
pub const native_vector_size = w: {
if (has_avx512f and blocks_count % 4 == 0) break :w 4;
if (has_vaes and blocks_count % 2 == 0) break :w 2;
break :w 1;
};
/// The size of the AES block vector that the target architecture can process with a single instruction, in bytes.
pub const native_word_size = native_vector_size * 16;
const native_words = blocks_count / native_vector_size;
const Repr = @Vector(native_vector_size * 2, u64);
/// Internal representation of a block vector.
repr: [native_words]Repr,
/// Length of the block vector in bytes.
pub const block_length: usize = blocks_count * 16;
/// Convert a byte sequence into an internal representation.
pub fn fromBytes(bytes: *const [blocks_count * 16]u8) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = mem.bytesToValue(Repr, bytes[i * native_word_size ..][0..native_word_size]);
}
return out;
}
/// Convert the internal representation of a block vector into a byte sequence.
pub fn toBytes(block_vec: Self) [blocks_count * 16]u8 {
var out: [blocks_count * 16]u8 = undefined;
inline for (0..native_words) |i| {
out[i * native_word_size ..][0..native_word_size].* = mem.toBytes(block_vec.repr[i]);
}
return out;
}
/// XOR the block vector with a byte sequence.
pub fn xorBytes(block_vec: Self, bytes: *const [blocks_count * 16]u8) [blocks_count * 16]u8 {
var x: Self = undefined;
inline for (0..native_words) |i| {
x.repr[i] = block_vec.repr[i] ^ mem.bytesToValue(Repr, bytes[i * native_word_size ..][0..native_word_size]);
}
return x.toBytes();
}
/// Apply the forward AES operation to the block vector with a vector of round keys.
pub fn encrypt(block_vec: Self, round_key_vec: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = asm (
\\ vaesenc %[rk], %[in], %[out]
: [out] "=x" (-> Repr),
: [in] "x" (block_vec.repr[i]),
[rk] "x" (round_key_vec.repr[i]),
);
}
return out;
}
/// Apply the forward AES operation to the block vector with a vector of last round keys.
pub fn encryptLast(block_vec: Self, round_key_vec: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = asm (
\\ vaesenclast %[rk], %[in], %[out]
: [out] "=x" (-> Repr),
: [in] "x" (block_vec.repr[i]),
[rk] "x" (round_key_vec.repr[i]),
);
}
return out;
}
/// Apply the inverse AES operation to the block vector with a vector of round keys.
pub fn decrypt(block_vec: Self, inv_round_key_vec: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = asm (
\\ vaesdec %[rk], %[in], %[out]
: [out] "=x" (-> Repr),
: [in] "x" (block_vec.repr[i]),
[rk] "x" (inv_round_key_vec.repr[i]),
);
}
return out;
}
/// Apply the inverse AES operation to the block vector with a vector of last round keys.
pub fn decryptLast(block_vec: Self, inv_round_key_vec: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = asm (
\\ vaesdeclast %[rk], %[in], %[out]
: [out] "=x" (-> Repr),
: [in] "x" (block_vec.repr[i]),
[rk] "x" (inv_round_key_vec.repr[i]),
);
}
return out;
}
/// Apply the bitwise XOR operation to the content of two block vectors.
pub fn xorBlocks(block_vec1: Self, block_vec2: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = block_vec1.repr[i] ^ block_vec2.repr[i];
}
return out;
}
/// Apply the bitwise AND operation to the content of two block vectors.
pub fn andBlocks(block_vec1: Self, block_vec2: Self) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = block_vec1.repr[i] & block_vec2.repr[i];
}
return out;
}
/// Apply the bitwise OR operation to the content of two block vectors.
pub fn orBlocks(block_vec1: Self, block_vec2: Block) Self {
var out: Self = undefined;
inline for (0..native_words) |i| {
out.repr[i] = block_vec1.repr[i] | block_vec2.repr[i];
}
return out;
}
};
}
fn KeySchedule(comptime Aes: type) type {
std.debug.assert(Aes.rounds == 10 or Aes.rounds == 14);
const rounds = Aes.rounds;
return struct {
const Self = @This();
const Repr = Aes.block.Repr;
round_keys: [rounds + 1]Block,
fn drc(comptime second: bool, comptime rc: u8, t: Repr, tx: Repr) Repr {
var s: Repr = undefined;
var ts: Repr = undefined;
return asm (
\\ vaeskeygenassist %[rc], %[t], %[s]
\\ vpslldq $4, %[tx], %[ts]
\\ vpxor %[ts], %[tx], %[r]
\\ vpslldq $8, %[r], %[ts]
\\ vpxor %[ts], %[r], %[r]
\\ vpshufd %[mask], %[s], %[ts]
\\ vpxor %[ts], %[r], %[r]
: [r] "=&x" (-> Repr),
[s] "=&x" (s),
[ts] "=&x" (ts),
: [rc] "n" (rc),
[t] "x" (t),
[tx] "x" (tx),
[mask] "n" (@as(u8, if (second) 0xaa else 0xff)),
);
}
fn expand128(t1: *Block) Self {
var round_keys: [11]Block = undefined;
const rcs = [_]u8{ 1, 2, 4, 8, 16, 32, 64, 128, 27, 54 };
inline for (rcs, 0..) |rc, round| {
round_keys[round] = t1.*;
t1.repr = drc(false, rc, t1.repr, t1.repr);
}
round_keys[rcs.len] = t1.*;
return Self{ .round_keys = round_keys };
}
fn expand256(t1: *Block, t2: *Block) Self {
var round_keys: [15]Block = undefined;
const rcs = [_]u8{ 1, 2, 4, 8, 16, 32 };
round_keys[0] = t1.*;
inline for (rcs, 0..) |rc, round| {
round_keys[round * 2 + 1] = t2.*;
t1.repr = drc(false, rc, t2.repr, t1.repr);
round_keys[round * 2 + 2] = t1.*;
t2.repr = drc(true, rc, t1.repr, t2.repr);
}
round_keys[rcs.len * 2 + 1] = t2.*;
t1.repr = drc(false, 64, t2.repr, t1.repr);
round_keys[rcs.len * 2 + 2] = t1.*;
return Self{ .round_keys = round_keys };
}
/// Invert the key schedule.
pub fn invert(key_schedule: Self) Self {
const round_keys = &key_schedule.round_keys;
var inv_round_keys: [rounds + 1]Block = undefined;
inv_round_keys[0] = round_keys[rounds];
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
inv_round_keys[i] = Block{
.repr = asm (
\\ vaesimc %[rk], %[inv_rk]
: [inv_rk] "=x" (-> Repr),
: [rk] "x" (round_keys[rounds - i].repr),
),
};
}
inv_round_keys[rounds] = round_keys[0];
return Self{ .round_keys = inv_round_keys };
}
};
}
/// A context to perform encryption using the standard AES key schedule.
pub fn AesEncryptCtx(comptime Aes: type) type {
std.debug.assert(Aes.key_bits == 128 or Aes.key_bits == 256);
const rounds = Aes.rounds;
return struct {
const Self = @This();
pub const block = Aes.block;
pub const block_length = block.block_length;
key_schedule: KeySchedule(Aes),
/// Create a new encryption context with the given key.
pub fn init(key: [Aes.key_bits / 8]u8) Self {
var t1 = Block.fromBytes(key[0..16]);
const key_schedule = if (Aes.key_bits == 128) ks: {
break :ks KeySchedule(Aes).expand128(&t1);
} else ks: {
var t2 = Block.fromBytes(key[16..32]);
break :ks KeySchedule(Aes).expand256(&t1, &t2);
};
return Self{
.key_schedule = key_schedule,
};
}
/// Encrypt a single block.
pub fn encrypt(ctx: Self, dst: *[16]u8, src: *const [16]u8) void {
const round_keys = ctx.key_schedule.round_keys;
var t = Block.fromBytes(src).xorBlocks(round_keys[0]);
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
t = t.encrypt(round_keys[i]);
}
t = t.encryptLast(round_keys[rounds]);
dst.* = t.toBytes();
}
/// Encrypt+XOR a single block.
pub fn xor(ctx: Self, dst: *[16]u8, src: *const [16]u8, counter: [16]u8) void {
const round_keys = ctx.key_schedule.round_keys;
var t = Block.fromBytes(&counter).xorBlocks(round_keys[0]);
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
t = t.encrypt(round_keys[i]);
}
t = t.encryptLast(round_keys[rounds]);
dst.* = t.xorBytes(src);
}
/// Encrypt multiple blocks, possibly leveraging parallelization.
pub fn encryptWide(ctx: Self, comptime count: usize, dst: *[16 * count]u8, src: *const [16 * count]u8) void {
const round_keys = ctx.key_schedule.round_keys;
var ts: [count]Block = undefined;
comptime var j = 0;
inline while (j < count) : (j += 1) {
ts[j] = Block.fromBytes(src[j * 16 .. j * 16 + 16][0..16]).xorBlocks(round_keys[0]);
}
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
ts = Block.parallel.encryptWide(count, ts, round_keys[i]);
}
ts = Block.parallel.encryptLastWide(count, ts, round_keys[i]);
j = 0;
inline while (j < count) : (j += 1) {
dst[16 * j .. 16 * j + 16].* = ts[j].toBytes();
}
}
/// Encrypt+XOR multiple blocks, possibly leveraging parallelization.
pub fn xorWide(ctx: Self, comptime count: usize, dst: *[16 * count]u8, src: *const [16 * count]u8, counters: [16 * count]u8) void {
const round_keys = ctx.key_schedule.round_keys;
var ts: [count]Block = undefined;
comptime var j = 0;
inline while (j < count) : (j += 1) {
ts[j] = Block.fromBytes(counters[j * 16 .. j * 16 + 16][0..16]).xorBlocks(round_keys[0]);
}
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
ts = Block.parallel.encryptWide(count, ts, round_keys[i]);
}
ts = Block.parallel.encryptLastWide(count, ts, round_keys[i]);
j = 0;
inline while (j < count) : (j += 1) {
dst[16 * j .. 16 * j + 16].* = ts[j].xorBytes(src[16 * j .. 16 * j + 16]);
}
}
};
}
/// A context to perform decryption using the standard AES key schedule.
pub fn AesDecryptCtx(comptime Aes: type) type {
std.debug.assert(Aes.key_bits == 128 or Aes.key_bits == 256);
const rounds = Aes.rounds;
return struct {
const Self = @This();
pub const block = Aes.block;
pub const block_length = block.block_length;
key_schedule: KeySchedule(Aes),
/// Create a decryption context from an existing encryption context.
pub fn initFromEnc(ctx: AesEncryptCtx(Aes)) Self {
return Self{
.key_schedule = ctx.key_schedule.invert(),
};
}
/// Create a new decryption context with the given key.
pub fn init(key: [Aes.key_bits / 8]u8) Self {
const enc_ctx = AesEncryptCtx(Aes).init(key);
return initFromEnc(enc_ctx);
}
/// Decrypt a single block.
pub fn decrypt(ctx: Self, dst: *[16]u8, src: *const [16]u8) void {
const inv_round_keys = ctx.key_schedule.round_keys;
var t = Block.fromBytes(src).xorBlocks(inv_round_keys[0]);
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
t = t.decrypt(inv_round_keys[i]);
}
t = t.decryptLast(inv_round_keys[rounds]);
dst.* = t.toBytes();
}
/// Decrypt multiple blocks, possibly leveraging parallelization.
pub fn decryptWide(ctx: Self, comptime count: usize, dst: *[16 * count]u8, src: *const [16 * count]u8) void {
const inv_round_keys = ctx.key_schedule.round_keys;
var ts: [count]Block = undefined;
comptime var j = 0;
inline while (j < count) : (j += 1) {
ts[j] = Block.fromBytes(src[j * 16 .. j * 16 + 16][0..16]).xorBlocks(inv_round_keys[0]);
}
comptime var i = 1;
inline while (i < rounds) : (i += 1) {
ts = Block.parallel.decryptWide(count, ts, inv_round_keys[i]);
}
ts = Block.parallel.decryptLastWide(count, ts, inv_round_keys[i]);
j = 0;
inline while (j < count) : (j += 1) {
dst[16 * j .. 16 * j + 16].* = ts[j].toBytes();
}
}
};
}
/// AES-128 with the standard key schedule.
pub const Aes128 = struct {
pub const key_bits: usize = 128;
pub const rounds = ((key_bits - 64) / 32 + 8);
pub const block = Block;
/// Create a new context for encryption.
pub fn initEnc(key: [key_bits / 8]u8) AesEncryptCtx(Aes128) {
return AesEncryptCtx(Aes128).init(key);
}
/// Create a new context for decryption.
pub fn initDec(key: [key_bits / 8]u8) AesDecryptCtx(Aes128) {
return AesDecryptCtx(Aes128).init(key);
}
};
/// AES-256 with the standard key schedule.
pub const Aes256 = struct {
pub const key_bits: usize = 256;
pub const rounds = ((key_bits - 64) / 32 + 8);
pub const block = Block;
/// Create a new context for encryption.
pub fn initEnc(key: [key_bits / 8]u8) AesEncryptCtx(Aes256) {
return AesEncryptCtx(Aes256).init(key);
}
/// Create a new context for decryption.
pub fn initDec(key: [key_bits / 8]u8) AesDecryptCtx(Aes256) {
return AesDecryptCtx(Aes256).init(key);
}
};