std.crypto: Add isComptime guard around intrinsics

Comptime code can't execute assembly code, so we need some way to
force comptime code to use the generic path. This should be replaced
with whatever is implemented for #868, when that day comes.

I am seeing that the result for the hash is incorrect in stage1 and
crashes stage2, so presumably this never worked correctly. I will follow
up on that soon.
This commit is contained in:
Cody Tapscott 2022-10-28 08:24:12 -07:00
parent 4c1f71e866
commit f9fe548e41

View File

@ -71,6 +71,12 @@ const Sha256Params = Sha2Params32{
const v4u32 = @Vector(4, u32);
// TODO: Remove once https://github.com/ziglang/zig/issues/868 is resolved.
fn isComptime() bool {
var a: u8 = 0;
return @typeInfo(@TypeOf(.{a})).Struct.fields[0].is_comptime;
}
/// SHA-224
pub const Sha224 = Sha2x32(Sha224Params);
@ -187,7 +193,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
}
switch (builtin.cpu.arch) {
.aarch64 => if (comptime builtin.cpu.features.isEnabled(@enumToInt(std.Target.aarch64.Feature.sha2))) {
.aarch64 => if (!isComptime() and comptime builtin.cpu.features.isEnabled(@enumToInt(std.Target.aarch64.Feature.sha2))) {
var x: v4u32 = d.s[0..4].*;
var y: v4u32 = d.s[4..8].*;
const s_v = @ptrCast(*[16]v4u32, &s);
@ -224,7 +230,7 @@ fn Sha2x32(comptime params: Sha2Params32) type {
d.s[4..8].* = y +% @as(v4u32, d.s[4..8].*);
return;
},
.x86_64 => if (comptime builtin.cpu.features.isEnabled(@enumToInt(std.Target.x86.Feature.sha))) {
.x86_64 => if (!isComptime() and comptime builtin.cpu.features.isEnabled(@enumToInt(std.Target.x86.Feature.sha))) {
var x: v4u32 = [_]u32{ d.s[5], d.s[4], d.s[1], d.s[0] };
var y: v4u32 = [_]u32{ d.s[7], d.s[6], d.s[3], d.s[2] };
const s_v = @ptrCast(*[16]v4u32, &s);