mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
x86_64: implement enough to pass unicode tests
* implement vector comparison * implement reduce for bool vectors * fix `@memcpy` bug * enable passing std tests
This commit is contained in:
parent
794dc694b1
commit
fe93332ba2
@ -2156,5 +2156,6 @@ pub fn hex64(x: u64) [16]u8 {
|
||||
}
|
||||
|
||||
test {
|
||||
_ = Cache;
|
||||
_ = Step;
|
||||
}
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
const std = @import("std.zig");
|
||||
const builtin = @import("builtin");
|
||||
const debug = std.debug;
|
||||
const assert = debug.assert;
|
||||
const testing = std.testing;
|
||||
@ -2138,8 +2137,6 @@ test "ensure capacity leak" {
|
||||
}
|
||||
|
||||
test "big map" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
|
||||
defer map.deinit();
|
||||
|
||||
@ -2193,8 +2190,6 @@ test "big map" {
|
||||
}
|
||||
|
||||
test "clone" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var original = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
|
||||
defer original.deinit();
|
||||
|
||||
@ -2221,8 +2216,6 @@ test "clone" {
|
||||
}
|
||||
|
||||
test "shrink" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
|
||||
defer map.deinit();
|
||||
|
||||
@ -2263,8 +2256,6 @@ test "shrink" {
|
||||
}
|
||||
|
||||
test "pop" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
|
||||
defer map.deinit();
|
||||
|
||||
@ -2283,8 +2274,6 @@ test "pop" {
|
||||
}
|
||||
|
||||
test "popOrNull" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
|
||||
defer map.deinit();
|
||||
|
||||
@ -2305,7 +2294,7 @@ test "popOrNull" {
|
||||
}
|
||||
|
||||
test "reIndex" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var map = ArrayHashMap(i32, i32, AutoContext(i32), true).init(std.testing.allocator);
|
||||
defer map.deinit();
|
||||
@ -2351,8 +2340,6 @@ test "auto store_hash" {
|
||||
}
|
||||
|
||||
test "sort" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var map = AutoArrayHashMap(i32, i32).init(std.testing.allocator);
|
||||
defer map.deinit();
|
||||
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
const std = @import("std.zig");
|
||||
const builtin = @import("builtin");
|
||||
const debug = std.debug;
|
||||
const assert = debug.assert;
|
||||
const testing = std.testing;
|
||||
@ -1184,8 +1183,6 @@ test "std.ArrayList/ArrayListUnmanaged.initCapacity" {
|
||||
}
|
||||
|
||||
test "std.ArrayList/ArrayListUnmanaged.clone" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const a = testing.allocator;
|
||||
{
|
||||
var array = ArrayList(i32).init(a);
|
||||
@ -1227,8 +1224,6 @@ test "std.ArrayList/ArrayListUnmanaged.clone" {
|
||||
}
|
||||
|
||||
test "std.ArrayList/ArrayListUnmanaged.basic" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const a = testing.allocator;
|
||||
{
|
||||
var list = ArrayList(i32).init(a);
|
||||
@ -1513,8 +1508,6 @@ test "std.ArrayList/ArrayListUnmanaged.insert" {
|
||||
}
|
||||
|
||||
test "std.ArrayList/ArrayListUnmanaged.insertSlice" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const a = testing.allocator;
|
||||
{
|
||||
var list = ArrayList(i32).init(a);
|
||||
@ -1561,8 +1554,6 @@ test "std.ArrayList/ArrayListUnmanaged.insertSlice" {
|
||||
}
|
||||
|
||||
test "std.ArrayList/ArrayListUnmanaged.replaceRange" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var arena = std.heap.ArenaAllocator.init(testing.allocator);
|
||||
defer arena.deinit();
|
||||
const a = arena.allocator();
|
||||
@ -1734,8 +1725,6 @@ test "shrink still sets length when resizing is disabled" {
|
||||
}
|
||||
|
||||
test "shrinkAndFree with a copy" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var failing_allocator = testing.FailingAllocator.init(testing.allocator, .{ .resize_fail_index = 0 });
|
||||
const a = failing_allocator.allocator();
|
||||
|
||||
|
||||
@ -1637,10 +1637,8 @@ fn testStaticBitSet(comptime Set: type) !void {
|
||||
}
|
||||
|
||||
test "IntegerBitSet" {
|
||||
switch (builtin.zig_backend) {
|
||||
.stage2_c, .stage2_x86_64 => return error.SkipZigTest,
|
||||
else => {},
|
||||
}
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testStaticBitSet(IntegerBitSet(0));
|
||||
try testStaticBitSet(IntegerBitSet(1));
|
||||
@ -1653,10 +1651,7 @@ test "IntegerBitSet" {
|
||||
}
|
||||
|
||||
test "ArrayBitSet" {
|
||||
switch (builtin.zig_backend) {
|
||||
.stage2_x86_64 => return error.SkipZigTest,
|
||||
else => {},
|
||||
}
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
inline for (.{ 0, 1, 2, 31, 32, 33, 63, 64, 65, 254, 500, 3000 }) |size| {
|
||||
try testStaticBitSet(ArrayBitSet(u8, size));
|
||||
@ -1668,8 +1663,6 @@ test "ArrayBitSet" {
|
||||
}
|
||||
|
||||
test "DynamicBitSetUnmanaged" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const allocator = std.testing.allocator;
|
||||
var a = try DynamicBitSetUnmanaged.initEmpty(allocator, 300);
|
||||
try testing.expectEqual(@as(usize, 0), a.count());
|
||||
@ -1723,8 +1716,6 @@ test "DynamicBitSetUnmanaged" {
|
||||
}
|
||||
|
||||
test "DynamicBitSet" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const allocator = std.testing.allocator;
|
||||
var a = try DynamicBitSet.initEmpty(allocator, 300);
|
||||
try testing.expectEqual(@as(usize, 0), a.count());
|
||||
|
||||
@ -622,8 +622,6 @@ pub fn strVerify(
|
||||
}
|
||||
|
||||
test "argon2d" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const password = [_]u8{0x01} ** 32;
|
||||
const salt = [_]u8{0x02} ** 16;
|
||||
const secret = [_]u8{0x03} ** 8;
|
||||
@ -649,8 +647,6 @@ test "argon2d" {
|
||||
}
|
||||
|
||||
test "argon2i" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const password = [_]u8{0x01} ** 32;
|
||||
const salt = [_]u8{0x02} ** 16;
|
||||
const secret = [_]u8{0x03} ** 8;
|
||||
@ -676,8 +672,6 @@ test "argon2i" {
|
||||
}
|
||||
|
||||
test "argon2id" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const password = [_]u8{0x01} ** 32;
|
||||
const salt = [_]u8{0x02} ** 16;
|
||||
const secret = [_]u8{0x03} ** 8;
|
||||
@ -703,8 +697,6 @@ test "argon2id" {
|
||||
}
|
||||
|
||||
test "kdf" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const password = "password";
|
||||
const salt = "somesalt";
|
||||
|
||||
@ -936,8 +928,6 @@ test "password hash and password verify" {
|
||||
}
|
||||
|
||||
test "kdf derived key length" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const allocator = std.testing.allocator;
|
||||
|
||||
const password = "testpass";
|
||||
|
||||
@ -682,8 +682,6 @@ fn testBlake3(hasher: *Blake3, input_len: usize, expected_hex: [262]u8) !void {
|
||||
}
|
||||
|
||||
test "BLAKE3 reference test cases" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var hash_state = Blake3.init(.{});
|
||||
const hash = &hash_state;
|
||||
var keyed_hash_state = Blake3.init(.{ .key = reference_test.key.* });
|
||||
|
||||
@ -759,8 +759,6 @@ fn XChaChaPoly1305(comptime rounds_nb: usize) type {
|
||||
}
|
||||
|
||||
test "chacha20 AEAD API" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const aeads = [_]type{ ChaCha20Poly1305, XChaCha20Poly1305 };
|
||||
const m = "Ladies and Gentlemen of the class of '99: If I could offer you only one tip for the future, sunscreen would be it.";
|
||||
const ad = "Additional data";
|
||||
@ -782,8 +780,6 @@ test "chacha20 AEAD API" {
|
||||
|
||||
// https://tools.ietf.org/html/rfc7539#section-2.4.2
|
||||
test "crypto.chacha20 test vector sunscreen" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const expected_result = [_]u8{
|
||||
0x6e, 0x2e, 0x35, 0x9a, 0x25, 0x68, 0xf9, 0x80,
|
||||
0x41, 0xba, 0x07, 0x28, 0xdd, 0x0d, 0x69, 0x81,
|
||||
|
||||
@ -65,8 +65,6 @@ pub const Sha384oSha384 = Composition(sha2.Sha384, sha2.Sha384);
|
||||
pub const Sha512oSha512 = Composition(sha2.Sha512, sha2.Sha512);
|
||||
|
||||
test "Hash composition" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const Sha256 = sha2.Sha256;
|
||||
const msg = "test";
|
||||
|
||||
|
||||
@ -72,8 +72,6 @@ pub fn Hkdf(comptime Hmac: type) type {
|
||||
const htest = @import("test.zig");
|
||||
|
||||
test "Hkdf" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const ikm = [_]u8{0x0b} ** 22;
|
||||
const salt = [_]u8{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c };
|
||||
const context = [_]u8{ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9 };
|
||||
|
||||
@ -553,8 +553,6 @@ const inv_ntt_reductions = [_]i16{
|
||||
};
|
||||
|
||||
test "invNTTReductions bounds" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
// Checks whether the reductions proposed by invNTTReductions
|
||||
// don't overflow during invNTT().
|
||||
var xs = [_]i32{1} ** 256; // start at |x| ≤ q
|
||||
@ -658,8 +656,6 @@ fn montReduce(x: i32) i16 {
|
||||
}
|
||||
|
||||
test "Test montReduce" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var rnd = RndGen.init(0);
|
||||
for (0..1000) |_| {
|
||||
const bound = comptime @as(i32, Q) * (1 << 15);
|
||||
@ -678,8 +674,6 @@ fn feToMont(x: i16) i16 {
|
||||
}
|
||||
|
||||
test "Test feToMont" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var x: i32 = -(1 << 15);
|
||||
while (x < 1 << 15) : (x += 1) {
|
||||
const y = feToMont(@as(i16, @intCast(x)));
|
||||
@ -713,8 +707,6 @@ fn feBarrettReduce(x: i16) i16 {
|
||||
}
|
||||
|
||||
test "Test Barrett reduction" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var x: i32 = -(1 << 15);
|
||||
while (x < 1 << 15) : (x += 1) {
|
||||
var y1 = feBarrettReduce(@as(i16, @intCast(x)));
|
||||
@ -735,8 +727,6 @@ fn csubq(x: i16) i16 {
|
||||
}
|
||||
|
||||
test "Test csubq" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var x: i32 = -29439;
|
||||
while (x < 1 << 15) : (x += 1) {
|
||||
const y1 = csubq(@as(i16, @intCast(x)));
|
||||
@ -1476,8 +1466,6 @@ fn cmov(comptime len: usize, dst: *[len]u8, src: [len]u8, b: u1) void {
|
||||
}
|
||||
|
||||
test "MulHat" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var rnd = RndGen.init(0);
|
||||
|
||||
for (0..100) |_| {
|
||||
@ -1509,8 +1497,6 @@ test "MulHat" {
|
||||
}
|
||||
|
||||
test "NTT" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var rnd = RndGen.init(0);
|
||||
|
||||
for (0..1000) |_| {
|
||||
@ -1534,8 +1520,6 @@ test "NTT" {
|
||||
}
|
||||
|
||||
test "Compression" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var rnd = RndGen.init(0);
|
||||
inline for (.{ 1, 4, 5, 10, 11 }) |d| {
|
||||
for (0..1000) |_| {
|
||||
@ -1548,8 +1532,6 @@ test "Compression" {
|
||||
}
|
||||
|
||||
test "noise" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var seed: [32]u8 = undefined;
|
||||
for (&seed, 0..) |*s, i| {
|
||||
s.* = @as(u8, @intCast(i));
|
||||
@ -1596,8 +1578,6 @@ test "noise" {
|
||||
}
|
||||
|
||||
test "uniform sampling" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var seed: [32]u8 = undefined;
|
||||
for (&seed, 0..) |*s, i| {
|
||||
s.* = @as(u8, @intCast(i));
|
||||
@ -1631,8 +1611,6 @@ test "uniform sampling" {
|
||||
}
|
||||
|
||||
test "Polynomial packing" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var rnd = RndGen.init(0);
|
||||
|
||||
for (0..1000) |_| {
|
||||
@ -1642,8 +1620,6 @@ test "Polynomial packing" {
|
||||
}
|
||||
|
||||
test "Test inner PKE" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var seed: [32]u8 = undefined;
|
||||
var pt: [32]u8 = undefined;
|
||||
for (&seed, &pt, 0..) |*s, *p, i| {
|
||||
@ -1665,8 +1641,6 @@ test "Test inner PKE" {
|
||||
}
|
||||
|
||||
test "Test happy flow" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var seed: [64]u8 = undefined;
|
||||
for (&seed, 0..) |*s, i| {
|
||||
s.* = @as(u8, @intCast(i));
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const mem = std.mem;
|
||||
const maxInt = std.math.maxInt;
|
||||
const OutputTooLongError = std.crypto.errors.OutputTooLongError;
|
||||
@ -152,8 +151,6 @@ const HmacSha1 = std.crypto.auth.hmac.HmacSha1;
|
||||
// RFC 6070 PBKDF2 HMAC-SHA1 Test Vectors
|
||||
|
||||
test "RFC 6070 one iteration" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const p = "password";
|
||||
const s = "salt";
|
||||
const c = 1;
|
||||
@ -169,8 +166,6 @@ test "RFC 6070 one iteration" {
|
||||
}
|
||||
|
||||
test "RFC 6070 two iterations" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const p = "password";
|
||||
const s = "salt";
|
||||
const c = 2;
|
||||
@ -186,8 +181,6 @@ test "RFC 6070 two iterations" {
|
||||
}
|
||||
|
||||
test "RFC 6070 4096 iterations" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const p = "password";
|
||||
const s = "salt";
|
||||
const c = 4096;
|
||||
@ -203,8 +196,6 @@ test "RFC 6070 4096 iterations" {
|
||||
}
|
||||
|
||||
test "RFC 6070 16,777,216 iterations" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
// These iteration tests are slow so we always skip them. Results have been verified.
|
||||
if (true) {
|
||||
return error.SkipZigTest;
|
||||
@ -225,8 +216,6 @@ test "RFC 6070 16,777,216 iterations" {
|
||||
}
|
||||
|
||||
test "RFC 6070 multi-block salt and password" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const p = "passwordPASSWORDpassword";
|
||||
const s = "saltSALTsaltSALTsaltSALTsaltSALTsalt";
|
||||
const c = 4096;
|
||||
@ -242,8 +231,6 @@ test "RFC 6070 multi-block salt and password" {
|
||||
}
|
||||
|
||||
test "RFC 6070 embedded NUL" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const p = "pass\x00word";
|
||||
const s = "sa\x00lt";
|
||||
const c = 4096;
|
||||
@ -259,8 +246,6 @@ test "RFC 6070 embedded NUL" {
|
||||
}
|
||||
|
||||
test "Very large dk_len" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
// This test allocates 8GB of memory and is expected to take several hours to run.
|
||||
if (true) {
|
||||
return error.SkipZigTest;
|
||||
|
||||
@ -351,16 +351,12 @@ test "phc format - encoding/decoding" {
|
||||
}
|
||||
|
||||
test "phc format - empty input string" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const s = "";
|
||||
const v = deserialize(struct { alg_id: []const u8 }, s);
|
||||
try std.testing.expectError(Error.InvalidEncoding, v);
|
||||
}
|
||||
|
||||
test "phc format - hash without salt" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const s = "$scrypt";
|
||||
const v = deserialize(struct { alg_id: []const u8, hash: BinValue(16) }, s);
|
||||
try std.testing.expectError(Error.InvalidEncoding, v);
|
||||
|
||||
@ -302,7 +302,7 @@ fn SalsaNonVecImpl(comptime rounds: comptime_int) type {
|
||||
};
|
||||
}
|
||||
|
||||
const SalsaImpl = if (builtin.cpu.arch == .x86_64) SalsaVecImpl else SalsaNonVecImpl;
|
||||
const SalsaImpl = if (builtin.cpu.arch == .x86_64 and builtin.zig_backend != .stage2_x86_64) SalsaVecImpl else SalsaNonVecImpl;
|
||||
|
||||
fn keyToWords(key: [32]u8) [8]u32 {
|
||||
var k: [8]u32 = undefined;
|
||||
@ -555,8 +555,6 @@ pub const SealedBox = struct {
|
||||
const htest = @import("test.zig");
|
||||
|
||||
test "(x)salsa20" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const key = [_]u8{0x69} ** 32;
|
||||
const nonce = [_]u8{0x42} ** 8;
|
||||
const msg = [_]u8{0} ** 20;
|
||||
@ -571,8 +569,6 @@ test "(x)salsa20" {
|
||||
}
|
||||
|
||||
test "xsalsa20poly1305" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var msg: [100]u8 = undefined;
|
||||
var msg2: [msg.len]u8 = undefined;
|
||||
var c: [msg.len]u8 = undefined;
|
||||
@ -588,8 +584,6 @@ test "xsalsa20poly1305" {
|
||||
}
|
||||
|
||||
test "xsalsa20poly1305 secretbox" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var msg: [100]u8 = undefined;
|
||||
var msg2: [msg.len]u8 = undefined;
|
||||
var key: [XSalsa20Poly1305.key_length]u8 = undefined;
|
||||
|
||||
@ -3,7 +3,6 @@
|
||||
// https://github.com/Tarsnap/scrypt
|
||||
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const crypto = std.crypto;
|
||||
const fmt = std.fmt;
|
||||
const io = std.io;
|
||||
@ -684,8 +683,6 @@ test "unix-scrypt" {
|
||||
}
|
||||
|
||||
test "crypt format" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const str = "$7$C6..../....SodiumChloride$kBGj9fHznVYFQMEn/qDCfrDevf9YDtcDdKvEqHJLV8D";
|
||||
const params = try crypt_format.deserialize(crypt_format.HashResult(32), str);
|
||||
var buf: [str.len]u8 = undefined;
|
||||
@ -694,8 +691,6 @@ test "crypt format" {
|
||||
}
|
||||
|
||||
test "kdf fast" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const TestVector = struct {
|
||||
password: []const u8,
|
||||
salt: []const u8,
|
||||
|
||||
@ -406,16 +406,12 @@ fn Sha2x32(comptime params: Sha2Params32) type {
|
||||
}
|
||||
|
||||
test "sha224 single" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try htest.assertEqualHash(Sha224, "d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f", "");
|
||||
try htest.assertEqualHash(Sha224, "23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7", "abc");
|
||||
try htest.assertEqualHash(Sha224, "c97ca9a559850ce97a04a96def6d99a9e0e0e2ab14e6b8df265fc0b3", "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu");
|
||||
}
|
||||
|
||||
test "sha224 streaming" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var h = Sha224.init(.{});
|
||||
var out: [28]u8 = undefined;
|
||||
|
||||
@ -436,16 +432,12 @@ test "sha224 streaming" {
|
||||
}
|
||||
|
||||
test "sha256 single" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try htest.assertEqualHash(Sha256, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", "");
|
||||
try htest.assertEqualHash(Sha256, "ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad", "abc");
|
||||
try htest.assertEqualHash(Sha256, "cf5b16a778af8380036ce59e7b0492370b249b11e8f07a51afac45037afee9d1", "abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmnhijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu");
|
||||
}
|
||||
|
||||
test "sha256 streaming" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var h = Sha256.init(.{});
|
||||
var out: [32]u8 = undefined;
|
||||
|
||||
@ -466,8 +458,6 @@ test "sha256 streaming" {
|
||||
}
|
||||
|
||||
test "sha256 aligned final" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var block = [_]u8{0} ** Sha256.block_length;
|
||||
var out: [Sha256.digest_length]u8 = undefined;
|
||||
|
||||
|
||||
@ -505,8 +505,6 @@ test "LinearFifo(u8, .Dynamic)" {
|
||||
}
|
||||
|
||||
test "LinearFifo" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
inline for ([_]type{ u1, u8, u16, u64 }) |T| {
|
||||
inline for ([_]LinearFifoBufferType{ LinearFifoBufferType{ .Static = 32 }, .Slice, .Dynamic }) |bt| {
|
||||
const FifoType = LinearFifo(T, bt);
|
||||
|
||||
@ -2751,8 +2751,6 @@ test "formatType max_depth" {
|
||||
}
|
||||
|
||||
test "positional" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try expectFmt("2 1 0", "{2} {1} {0}", .{ @as(usize, 0), @as(usize, 1), @as(usize, 2) });
|
||||
try expectFmt("2 1 0", "{2} {1} {}", .{ @as(usize, 0), @as(usize, 1), @as(usize, 2) });
|
||||
try expectFmt("0 0", "{0} {0}", .{@as(usize, 0)});
|
||||
|
||||
@ -38,8 +38,6 @@ test parseFromSlice {
|
||||
}
|
||||
|
||||
test Value {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var parsed = try parseFromSlice(Value, testing.allocator, "{\"anything\": \"goes\"}", .{});
|
||||
defer parsed.deinit();
|
||||
try testing.expectEqualSlices(u8, "goes", parsed.value.object.get("anything").?.string);
|
||||
@ -65,8 +63,6 @@ test writeStream {
|
||||
}
|
||||
|
||||
test stringify {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var out = ArrayList(u8).init(testing.allocator);
|
||||
defer out.deinit();
|
||||
|
||||
|
||||
@ -104,8 +104,6 @@ test "i_string_utf16LE_no_BOM.json" {
|
||||
try any("[\x00\"\x00\xe9\x00\"\x00]\x00");
|
||||
}
|
||||
test "i_structure_500_nested_arrays.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try any("[" ** 500 ++ "]" ** 500);
|
||||
}
|
||||
test "i_structure_UTF-8_BOM_empty_object.json" {
|
||||
@ -361,21 +359,15 @@ test "n_object_bracket_key.json" {
|
||||
try err("{[: \"x\"}\n");
|
||||
}
|
||||
test "n_object_comma_instead_of_colon.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"x\", null}");
|
||||
}
|
||||
test "n_object_double_colon.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"x\"::\"b\"}");
|
||||
}
|
||||
test "n_object_emoji.json" {
|
||||
try err("{\xf0\x9f\x87\xa8\xf0\x9f\x87\xad}");
|
||||
}
|
||||
test "n_object_garbage_at_end.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"a\":\"a\" 123}");
|
||||
}
|
||||
test "n_object_key_with_single_quotes.json" {
|
||||
@ -385,26 +377,18 @@ test "n_object_lone_continuation_byte_in_key_and_trailing_comma.json" {
|
||||
try err("{\"\xb9\":\"0\",}");
|
||||
}
|
||||
test "n_object_missing_colon.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"a\" b}");
|
||||
}
|
||||
test "n_object_missing_key.json" {
|
||||
try err("{:\"b\"}");
|
||||
}
|
||||
test "n_object_missing_semicolon.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"a\" \"b\"}");
|
||||
}
|
||||
test "n_object_missing_value.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"a\":");
|
||||
}
|
||||
test "n_object_no-colon.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"a\"");
|
||||
}
|
||||
test "n_object_non_string_key.json" {
|
||||
@ -417,59 +401,39 @@ test "n_object_repeated_null_null.json" {
|
||||
try err("{null:null,null:null}");
|
||||
}
|
||||
test "n_object_several_trailing_commas.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"id\":0,,,,,}");
|
||||
}
|
||||
test "n_object_single_quote.json" {
|
||||
try err("{'a':0}");
|
||||
}
|
||||
test "n_object_trailing_comma.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"id\":0,}");
|
||||
}
|
||||
test "n_object_trailing_comment.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"a\":\"b\"}/**/");
|
||||
}
|
||||
test "n_object_trailing_comment_open.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"a\":\"b\"}/**//");
|
||||
}
|
||||
test "n_object_trailing_comment_slash_open.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"a\":\"b\"}//");
|
||||
}
|
||||
test "n_object_trailing_comment_slash_open_incomplete.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"a\":\"b\"}/");
|
||||
}
|
||||
test "n_object_two_commas_in_a_row.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"a\":\"b\",,\"c\":\"d\"}");
|
||||
}
|
||||
test "n_object_unquoted_key.json" {
|
||||
try err("{a: \"b\"}");
|
||||
}
|
||||
test "n_object_unterminated-value.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"a\":\"a");
|
||||
}
|
||||
test "n_object_with_single_string.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{ \"foo\" : \"bar\", \"a\" }");
|
||||
}
|
||||
test "n_object_with_trailing_garbage.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"a\":\"b\"}#");
|
||||
}
|
||||
test "n_single_space.json" {
|
||||
@ -596,8 +560,6 @@ test "n_structure_close_unopened_array.json" {
|
||||
try err("1]");
|
||||
}
|
||||
test "n_structure_comma_instead_of_closing_brace.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"x\": true,");
|
||||
}
|
||||
test "n_structure_double_array.json" {
|
||||
@ -628,18 +590,12 @@ test "n_structure_object_followed_by_closing_object.json" {
|
||||
try err("{}}");
|
||||
}
|
||||
test "n_structure_object_unclosed_no_value.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"\":");
|
||||
}
|
||||
test "n_structure_object_with_comment.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"a\":/*comment*/\"b\"}");
|
||||
}
|
||||
test "n_structure_object_with_trailing_garbage.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"a\": true} \"x\"");
|
||||
}
|
||||
test "n_structure_open_array_apostrophe.json" {
|
||||
@ -649,8 +605,6 @@ test "n_structure_open_array_comma.json" {
|
||||
try err("[,");
|
||||
}
|
||||
test "n_structure_open_array_object.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("[{\"\":" ** 50000 ++ "\n");
|
||||
}
|
||||
test "n_structure_open_array_open_object.json" {
|
||||
@ -690,8 +644,6 @@ test "n_structure_single_star.json" {
|
||||
try err("*");
|
||||
}
|
||||
test "n_structure_trailing_#.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"a\":\"b\"}#{}");
|
||||
}
|
||||
test "n_structure_uescaped_LF_before_string.json" {
|
||||
@ -710,8 +662,6 @@ test "n_structure_unclosed_array_unfinished_true.json" {
|
||||
try err("[ false, tru");
|
||||
}
|
||||
test "n_structure_unclosed_object.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err("{\"asd\":\"asd\"");
|
||||
}
|
||||
test "n_structure_unicode-identifier.json" {
|
||||
@ -819,31 +769,21 @@ test "y_object.json" {
|
||||
try ok("{\"asd\":\"sdf\", \"dfg\":\"fgh\"}");
|
||||
}
|
||||
test "y_object_basic.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try ok("{\"asd\":\"sdf\"}");
|
||||
}
|
||||
test "y_object_duplicated_key.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try ok("{\"a\":\"b\",\"a\":\"c\"}");
|
||||
}
|
||||
test "y_object_duplicated_key_and_value.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try ok("{\"a\":\"b\",\"a\":\"b\"}");
|
||||
}
|
||||
test "y_object_empty.json" {
|
||||
try ok("{}");
|
||||
}
|
||||
test "y_object_empty_key.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try ok("{\"\":0}");
|
||||
}
|
||||
test "y_object_escaped_null_in_key.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try ok("{\"foo\\u0000bar\": 42}");
|
||||
}
|
||||
test "y_object_extreme_numbers.json" {
|
||||
@ -857,18 +797,12 @@ test "y_object_long_strings.json" {
|
||||
try ok("{\"x\":[{\"id\": \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"}], \"id\": \"xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"}");
|
||||
}
|
||||
test "y_object_simple.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try ok("{\"a\":[]}");
|
||||
}
|
||||
test "y_object_string_unicode.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try ok("{\"title\":\"\\u041f\\u043e\\u043b\\u0442\\u043e\\u0440\\u0430 \\u0417\\u0435\\u043c\\u043b\\u0435\\u043a\\u043e\\u043f\\u0430\" }");
|
||||
}
|
||||
test "y_object_with_newlines.json" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try ok("{\n\"a\": \"b\"\n}");
|
||||
}
|
||||
test "y_string_1_2_3_bytes_UTF-8_sequences.json" {
|
||||
|
||||
@ -125,16 +125,12 @@ fn testParse(allocator: std.mem.Allocator, json_str: []const u8) !Value {
|
||||
}
|
||||
|
||||
test "parsing empty string gives appropriate error" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(std.testing.allocator);
|
||||
defer arena_allocator.deinit();
|
||||
try testing.expectError(error.UnexpectedEndOfInput, testParse(arena_allocator.allocator(), ""));
|
||||
}
|
||||
|
||||
test "Value.array allocator should still be usable after parsing" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var parsed = try parseFromSlice(Value, std.testing.allocator, "[]", .{});
|
||||
defer parsed.deinit();
|
||||
|
||||
@ -195,8 +191,6 @@ test "escaped characters" {
|
||||
}
|
||||
|
||||
test "Value.jsonStringify" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var vals = [_]Value{
|
||||
.{ .integer = 1 },
|
||||
.{ .integer = 2 },
|
||||
@ -263,8 +257,6 @@ test "parseFromValue(std.json.Value,...)" {
|
||||
}
|
||||
|
||||
test "polymorphic parsing" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
if (true) return error.SkipZigTest; // See https://github.com/ziglang/zig/issues/16108
|
||||
const doc =
|
||||
\\{ "type": "div",
|
||||
@ -310,8 +302,6 @@ test "polymorphic parsing" {
|
||||
}
|
||||
|
||||
test "long object value" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const value = "01234567890123456789";
|
||||
const doc = "{\"key\":\"" ++ value ++ "\"}";
|
||||
var fbs = std.io.fixedBufferStream(doc);
|
||||
@ -324,8 +314,6 @@ test "long object value" {
|
||||
}
|
||||
|
||||
test "ParseOptions.max_value_len" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var arena = ArenaAllocator.init(testing.allocator);
|
||||
defer arena.deinit();
|
||||
|
||||
|
||||
@ -392,8 +392,6 @@ test "parse" {
|
||||
}
|
||||
|
||||
test "parse into enum" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const T = enum(u32) {
|
||||
Foo = 42,
|
||||
Bar,
|
||||
@ -478,8 +476,6 @@ test "parse into tagged union errors" {
|
||||
}
|
||||
|
||||
test "parse into struct with no fields" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const T = struct {};
|
||||
const parsed = try parseFromSlice(T, testing.allocator, "{}", .{});
|
||||
defer parsed.deinit();
|
||||
@ -949,8 +945,6 @@ test "json parse allocate when streaming" {
|
||||
}
|
||||
|
||||
test "parse at comptime" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const doc =
|
||||
\\{
|
||||
\\ "vals": {
|
||||
|
||||
@ -100,8 +100,6 @@ fn getJsonObject(allocator: std.mem.Allocator) !Value {
|
||||
}
|
||||
|
||||
test "stringify null optional fields" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const MyStruct = struct {
|
||||
optional: ?[]const u8 = null,
|
||||
required: []const u8 = "something",
|
||||
@ -123,8 +121,6 @@ test "stringify null optional fields" {
|
||||
}
|
||||
|
||||
test "stringify basic types" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testStringify("false", false, .{});
|
||||
try testStringify("true", true, .{});
|
||||
try testStringify("null", @as(?u8, null), .{});
|
||||
@ -141,8 +137,6 @@ test "stringify basic types" {
|
||||
}
|
||||
|
||||
test "stringify string" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testStringify("\"hello\"", "hello", .{});
|
||||
try testStringify("\"with\\nescapes\\r\"", "with\nescapes\r", .{});
|
||||
try testStringify("\"with\\nescapes\\r\"", "with\nescapes\r", .{ .escape_unicode = true });
|
||||
@ -167,16 +161,12 @@ test "stringify string" {
|
||||
}
|
||||
|
||||
test "stringify many-item sentinel-terminated string" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testStringify("\"hello\"", @as([*:0]const u8, "hello"), .{});
|
||||
try testStringify("\"with\\nescapes\\r\"", @as([*:0]const u8, "with\nescapes\r"), .{ .escape_unicode = true });
|
||||
try testStringify("\"with unicode\\u0001\"", @as([*:0]const u8, "with unicode\u{1}"), .{ .escape_unicode = true });
|
||||
}
|
||||
|
||||
test "stringify enums" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const E = enum {
|
||||
foo,
|
||||
bar,
|
||||
@ -186,15 +176,11 @@ test "stringify enums" {
|
||||
}
|
||||
|
||||
test "stringify enum literals" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testStringify("\"foo\"", .foo, .{});
|
||||
try testStringify("\"bar\"", .bar, .{});
|
||||
}
|
||||
|
||||
test "stringify tagged unions" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const T = union(enum) {
|
||||
nothing,
|
||||
foo: u32,
|
||||
@ -206,8 +192,6 @@ test "stringify tagged unions" {
|
||||
}
|
||||
|
||||
test "stringify struct" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testStringify("{\"foo\":42}", struct {
|
||||
foo: u32,
|
||||
}{ .foo = 42 }, .{});
|
||||
@ -230,8 +214,6 @@ test "emit_strings_as_arrays" {
|
||||
}
|
||||
|
||||
test "stringify struct with indentation" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testStringify(
|
||||
\\{
|
||||
\\ "foo": 42,
|
||||
@ -277,8 +259,6 @@ test "stringify struct with indentation" {
|
||||
}
|
||||
|
||||
test "stringify struct with void field" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testStringify("{\"foo\":42}", struct {
|
||||
foo: u32,
|
||||
bar: void = {},
|
||||
@ -286,8 +266,6 @@ test "stringify struct with void field" {
|
||||
}
|
||||
|
||||
test "stringify array of structs" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const MyStruct = struct {
|
||||
foo: u32,
|
||||
};
|
||||
@ -299,8 +277,6 @@ test "stringify array of structs" {
|
||||
}
|
||||
|
||||
test "stringify struct with custom stringifier" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testStringify("[\"something special\",42]", struct {
|
||||
foo: u32,
|
||||
const Self = @This();
|
||||
@ -315,16 +291,12 @@ test "stringify struct with custom stringifier" {
|
||||
}
|
||||
|
||||
test "stringify vector" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testStringify("[1,1]", @as(@Vector(2, u32), @splat(1)), .{});
|
||||
try testStringify("\"AA\"", @as(@Vector(2, u8), @splat('A')), .{});
|
||||
try testStringify("[65,65]", @as(@Vector(2, u8), @splat('A')), .{ .emit_strings_as_arrays = true });
|
||||
}
|
||||
|
||||
test "stringify tuple" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testStringify("[\"foo\",42]", std.meta.Tuple(&.{ []const u8, usize }){ "foo", 42 }, .{});
|
||||
}
|
||||
|
||||
@ -411,8 +383,6 @@ fn testStringifyArbitraryDepth(expected: []const u8, value: anytype, options: St
|
||||
}
|
||||
|
||||
test "stringify alloc" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const allocator = std.testing.allocator;
|
||||
const expected =
|
||||
\\{"foo":"bar","answer":42,"my_friend":"sammy"}
|
||||
@ -424,8 +394,6 @@ test "stringify alloc" {
|
||||
}
|
||||
|
||||
test "comptime stringify" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
comptime testStringifyMaxDepth("false", false, .{}, null) catch unreachable;
|
||||
comptime testStringifyMaxDepth("false", false, .{}, 0) catch unreachable;
|
||||
comptime testStringifyArbitraryDepth("false", false, .{}) catch unreachable;
|
||||
@ -446,8 +414,6 @@ test "comptime stringify" {
|
||||
}
|
||||
|
||||
test "print" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var out_buf: [1024]u8 = undefined;
|
||||
var slice_stream = std.io.fixedBufferStream(&out_buf);
|
||||
const out = slice_stream.writer();
|
||||
@ -479,8 +445,6 @@ test "print" {
|
||||
}
|
||||
|
||||
test "nonportable numbers" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testStringify("9999999999999999", 9999999999999999, .{});
|
||||
try testStringify("\"9999999999999999\"", 9999999999999999, .{ .emit_nonportable_numbers_as_strings = true });
|
||||
}
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const testing = std.testing;
|
||||
const parseFromSlice = @import("./static.zig").parseFromSlice;
|
||||
const validate = @import("./scanner.zig").validate;
|
||||
@ -35,15 +34,13 @@ fn testHighLevelDynamicParser(s: []const u8) !void {
|
||||
|
||||
// Additional tests not part of test JSONTestSuite.
|
||||
test "y_trailing_comma_after_empty" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try roundTrip(
|
||||
\\{"1":[],"2":{},"3":"4"}
|
||||
);
|
||||
}
|
||||
test "n_object_closed_missing_value" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try err(
|
||||
\\{"a":}
|
||||
);
|
||||
|
||||
@ -246,8 +246,6 @@ test "big.int fits" {
|
||||
}
|
||||
|
||||
test "big.int string set" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.init(testing.allocator);
|
||||
defer a.deinit();
|
||||
|
||||
@ -264,8 +262,6 @@ test "big.int string negative" {
|
||||
}
|
||||
|
||||
test "big.int string set number with underscores" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.init(testing.allocator);
|
||||
defer a.deinit();
|
||||
|
||||
@ -274,8 +270,6 @@ test "big.int string set number with underscores" {
|
||||
}
|
||||
|
||||
test "big.int string set case insensitive number" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.init(testing.allocator);
|
||||
defer a.deinit();
|
||||
|
||||
@ -326,8 +320,6 @@ test "big.int twos complement limit set" {
|
||||
}
|
||||
|
||||
test "big.int string to" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.initSet(testing.allocator, 120317241209124781241290847124);
|
||||
defer a.deinit();
|
||||
|
||||
@ -368,8 +360,6 @@ test "big.int string to base 16" {
|
||||
}
|
||||
|
||||
test "big.int neg string to" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.initSet(testing.allocator, -123907434);
|
||||
defer a.deinit();
|
||||
|
||||
@ -392,8 +382,6 @@ test "big.int zero string to" {
|
||||
}
|
||||
|
||||
test "big.int clone" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.initSet(testing.allocator, 1234);
|
||||
defer a.deinit();
|
||||
var b = try a.clone();
|
||||
@ -634,8 +622,6 @@ test "big.int addWrap single-single, unsigned" {
|
||||
}
|
||||
|
||||
test "big.int subWrap single-single, unsigned" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.initSet(testing.allocator, 0);
|
||||
defer a.deinit();
|
||||
|
||||
@ -963,8 +949,6 @@ test "big.int mul multi-multi" {
|
||||
}
|
||||
|
||||
test "big.int mul alias r with a" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.initSet(testing.allocator, maxInt(Limb));
|
||||
defer a.deinit();
|
||||
var b = try Managed.initSet(testing.allocator, 2);
|
||||
@ -976,8 +960,6 @@ test "big.int mul alias r with a" {
|
||||
}
|
||||
|
||||
test "big.int mul alias r with b" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.initSet(testing.allocator, maxInt(Limb));
|
||||
defer a.deinit();
|
||||
var b = try Managed.initSet(testing.allocator, 2);
|
||||
@ -989,8 +971,6 @@ test "big.int mul alias r with b" {
|
||||
}
|
||||
|
||||
test "big.int mul alias r with a and b" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.initSet(testing.allocator, maxInt(Limb));
|
||||
defer a.deinit();
|
||||
|
||||
@ -1096,7 +1076,7 @@ test "big.int mulWrap multi-multi unsigned" {
|
||||
|
||||
test "big.int mulWrap multi-multi signed" {
|
||||
switch (builtin.zig_backend) {
|
||||
.stage2_c, .stage2_x86_64 => return error.SkipZigTest,
|
||||
.stage2_c => return error.SkipZigTest,
|
||||
else => {},
|
||||
}
|
||||
|
||||
@ -1171,8 +1151,6 @@ test "big.int div single-half with rem" {
|
||||
}
|
||||
|
||||
test "big.int div single-single no rem" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
// assumes usize is <= 64 bits.
|
||||
var a = try Managed.initSet(testing.allocator, 1 << 52);
|
||||
defer a.deinit();
|
||||
@ -1190,8 +1168,6 @@ test "big.int div single-single no rem" {
|
||||
}
|
||||
|
||||
test "big.int div single-single with rem" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.initSet(testing.allocator, (1 << 52) | (1 << 33));
|
||||
defer a.deinit();
|
||||
var b = try Managed.initSet(testing.allocator, (1 << 35));
|
||||
@ -1271,8 +1247,6 @@ test "big.int div multi>2-single" {
|
||||
}
|
||||
|
||||
test "big.int div single-single q < r" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.initSet(testing.allocator, 0x0078f432);
|
||||
defer a.deinit();
|
||||
var b = try Managed.initSet(testing.allocator, 0x01000000);
|
||||
@ -1317,10 +1291,7 @@ test "big.int div q=0 alias" {
|
||||
}
|
||||
|
||||
test "big.int div multi-multi q < r" {
|
||||
switch (builtin.zig_backend) {
|
||||
.stage2_c, .stage2_x86_64 => return error.SkipZigTest,
|
||||
else => {},
|
||||
}
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
|
||||
const op1 = 0x1ffffffff0078f432;
|
||||
const op2 = 0x1ffffffff01000000;
|
||||
@ -1642,8 +1613,6 @@ test "big.int div floor single-single -/-" {
|
||||
}
|
||||
|
||||
test "big.int div floor no remainder negative quotient" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const u: i32 = -0x80000000;
|
||||
const v: i32 = 1;
|
||||
|
||||
@ -1743,10 +1712,7 @@ test "big.int div multi-multi no rem" {
|
||||
}
|
||||
|
||||
test "big.int div multi-multi (2 branch)" {
|
||||
switch (builtin.zig_backend) {
|
||||
.stage2_c, .stage2_x86_64 => return error.SkipZigTest,
|
||||
else => {},
|
||||
}
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.initSet(testing.allocator, 0x866666665555555588888887777777761111111111111111);
|
||||
defer a.deinit();
|
||||
@ -1785,10 +1751,7 @@ test "big.int div multi-multi (3.1/3.3 branch)" {
|
||||
}
|
||||
|
||||
test "big.int div multi-single zero-limb trailing" {
|
||||
switch (builtin.zig_backend) {
|
||||
.stage2_c, .stage2_x86_64 => return error.SkipZigTest,
|
||||
else => {},
|
||||
}
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.initSet(testing.allocator, 0x60000000000000000000000000000000000000000000000000000000000000000);
|
||||
defer a.deinit();
|
||||
@ -1808,10 +1771,7 @@ test "big.int div multi-single zero-limb trailing" {
|
||||
}
|
||||
|
||||
test "big.int div multi-multi zero-limb trailing (with rem)" {
|
||||
switch (builtin.zig_backend) {
|
||||
.stage2_c, .stage2_x86_64 => return error.SkipZigTest,
|
||||
else => {},
|
||||
}
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.initSet(testing.allocator, 0x86666666555555558888888777777776111111111111111100000000000000000000000000000000);
|
||||
defer a.deinit();
|
||||
@ -1908,8 +1868,6 @@ test "big.int div multi-multi fuzz case #1" {
|
||||
}
|
||||
|
||||
test "big.int div multi-multi fuzz case #2" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.init(testing.allocator);
|
||||
defer a.deinit();
|
||||
var b = try Managed.init(testing.allocator);
|
||||
@ -2672,8 +2630,6 @@ test "big.int mutable to managed" {
|
||||
}
|
||||
|
||||
test "big.int const to managed" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Managed.initSet(testing.allocator, 123423453456);
|
||||
defer a.deinit();
|
||||
|
||||
|
||||
@ -494,8 +494,6 @@ fn extractLowBits(a: Int, comptime T: type) T {
|
||||
}
|
||||
|
||||
test "big.rational extractLowBits" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Int.initSet(testing.allocator, 0x11112222333344441234567887654321);
|
||||
defer a.deinit();
|
||||
|
||||
@ -649,8 +647,6 @@ test "big.rational copy" {
|
||||
}
|
||||
|
||||
test "big.rational negate" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Rational.init(testing.allocator);
|
||||
defer a.deinit();
|
||||
|
||||
@ -668,8 +664,6 @@ test "big.rational negate" {
|
||||
}
|
||||
|
||||
test "big.rational abs" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var a = try Rational.init(testing.allocator);
|
||||
defer a.deinit();
|
||||
|
||||
|
||||
@ -147,7 +147,6 @@ test "oldlog10 doesn't work" {
|
||||
test "log10_int vs old implementation" {
|
||||
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_sparc64) return error.SkipZigTest; // TODO
|
||||
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
|
||||
|
||||
@ -56,8 +56,6 @@ pub fn log_int(comptime T: type, base: T, x: T) Log2Int(T) {
|
||||
}
|
||||
|
||||
test "math.log_int" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
// Test all unsigned integers with 2, 3, ..., 64 bits.
|
||||
// We cannot test 0 or 1 bits since base must be > 1.
|
||||
inline for (2..64 + 1) |bits| {
|
||||
|
||||
@ -1,4 +1,3 @@
|
||||
const builtin = @import("builtin");
|
||||
const std = @import("../std.zig");
|
||||
const math = std.math;
|
||||
const assert = std.debug.assert;
|
||||
@ -103,7 +102,7 @@ fn nextAfterFloat(comptime T: type, x: T, y: T) T {
|
||||
}
|
||||
|
||||
test "math.nextAfter.int" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try expect(nextAfter(i0, 0, 0) == 0);
|
||||
try expect(nextAfter(u0, 0, 0) == 0);
|
||||
|
||||
@ -1741,8 +1741,6 @@ pub fn readIntSlice(comptime T: type, bytes: []const u8, endian: Endian) T {
|
||||
}
|
||||
|
||||
test "comptime read/write int" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
comptime {
|
||||
var bytes: [2]u8 = undefined;
|
||||
writeIntLittle(u16, &bytes, 0x1234);
|
||||
@ -3309,8 +3307,6 @@ test "testStringEquality" {
|
||||
}
|
||||
|
||||
test "testReadInt" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testReadIntImpl();
|
||||
try comptime testReadIntImpl();
|
||||
}
|
||||
|
||||
@ -583,8 +583,6 @@ pub fn MultiArrayList(comptime T: type) type {
|
||||
}
|
||||
|
||||
test "basic usage" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const ally = testing.allocator;
|
||||
|
||||
const Foo = struct {
|
||||
@ -679,8 +677,6 @@ test "basic usage" {
|
||||
// This was observed to fail on aarch64 with LLVM 11, when the capacityInBytes
|
||||
// function used the @reduce code path.
|
||||
test "regression test for @reduce bug" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const ally = testing.allocator;
|
||||
var list = MultiArrayList(struct {
|
||||
tag: std.zig.Token.Tag,
|
||||
@ -758,8 +754,6 @@ test "regression test for @reduce bug" {
|
||||
}
|
||||
|
||||
test "ensure capacity on empty list" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const ally = testing.allocator;
|
||||
|
||||
const Foo = struct {
|
||||
@ -795,8 +789,6 @@ test "ensure capacity on empty list" {
|
||||
}
|
||||
|
||||
test "insert elements" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const ally = testing.allocator;
|
||||
|
||||
const Foo = struct {
|
||||
@ -816,8 +808,6 @@ test "insert elements" {
|
||||
}
|
||||
|
||||
test "union" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const ally = testing.allocator;
|
||||
|
||||
const Foo = union(enum) {
|
||||
@ -873,8 +863,6 @@ test "union" {
|
||||
}
|
||||
|
||||
test "sorting a span" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var list: MultiArrayList(struct { score: u32, chr: u8 }) = .{};
|
||||
defer list.deinit(testing.allocator);
|
||||
|
||||
@ -915,8 +903,6 @@ test "sorting a span" {
|
||||
}
|
||||
|
||||
test "0 sized struct field" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const ally = testing.allocator;
|
||||
|
||||
const Foo = struct {
|
||||
@ -944,8 +930,6 @@ test "0 sized struct field" {
|
||||
}
|
||||
|
||||
test "0 sized struct" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const ally = testing.allocator;
|
||||
|
||||
const Foo = struct {
|
||||
|
||||
@ -109,9 +109,8 @@ test "parse and render UNIX addresses" {
|
||||
}
|
||||
|
||||
test "resolve DNS" {
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (builtin.os.tag == .wasi) return error.SkipZigTest;
|
||||
|
||||
if (builtin.os.tag == .windows) {
|
||||
_ = try std.os.windows.WSAStartup(2, 2);
|
||||
|
||||
@ -443,8 +443,6 @@ fn testRangeBias(r: Random, start: i8, end: i8, biased: bool) !void {
|
||||
}
|
||||
|
||||
test "CSPRNG" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var secret_seed: [DefaultCsprng.secret_seed_length]u8 = undefined;
|
||||
std.crypto.random.bytes(&secret_seed);
|
||||
var csprng = DefaultCsprng.init(secret_seed);
|
||||
|
||||
@ -409,8 +409,6 @@ pub fn SegmentedList(comptime T: type, comptime prealloc_item_count: usize) type
|
||||
}
|
||||
|
||||
test "SegmentedList basic usage" {
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testSegmentedList(0);
|
||||
try testSegmentedList(1);
|
||||
try testSegmentedList(2);
|
||||
|
||||
@ -13,7 +13,7 @@ pub fn suggestVectorSizeForCpu(comptime T: type, comptime cpu: std.Target.Cpu) ?
|
||||
const vector_bit_size: u16 = blk: {
|
||||
if (cpu.arch.isX86()) {
|
||||
if (T == bool and std.Target.x86.featureSetHas(cpu.features, .prefer_mask_registers)) return 64;
|
||||
if (std.Target.x86.featureSetHas(cpu.features, .avx512f) and !std.Target.x86.featureSetHasAny(cpu.features, .{ .prefer_256_bit, .prefer_128_bit })) break :blk 512;
|
||||
if (builtin.zig_backend != .stage2_x86_64 and std.Target.x86.featureSetHas(cpu.features, .avx512f) and !std.Target.x86.featureSetHasAny(cpu.features, .{ .prefer_256_bit, .prefer_128_bit })) break :blk 512;
|
||||
if (std.Target.x86.featureSetHasAny(cpu.features, .{ .prefer_256_bit, .avx2 }) and !std.Target.x86.featureSetHas(cpu.features, .prefer_128_bit)) break :blk 256;
|
||||
if (std.Target.x86.featureSetHas(cpu.features, .sse)) break :blk 128;
|
||||
if (std.Target.x86.featureSetHasAny(cpu.features, .{ .mmx, .@"3dnow" })) break :blk 64;
|
||||
@ -62,10 +62,15 @@ pub fn suggestVectorSize(comptime T: type) ?comptime_int {
|
||||
test "suggestVectorSizeForCpu works with signed and unsigned values" {
|
||||
comptime var cpu = std.Target.Cpu.baseline(std.Target.Cpu.Arch.x86_64);
|
||||
comptime cpu.features.addFeature(@intFromEnum(std.Target.x86.Feature.avx512f));
|
||||
comptime cpu.features.populateDependencies(&std.Target.x86.all_features);
|
||||
const expected_size: usize = switch (builtin.zig_backend) {
|
||||
.stage2_x86_64 => 8,
|
||||
else => 16,
|
||||
};
|
||||
const signed_integer_size = suggestVectorSizeForCpu(i32, cpu).?;
|
||||
const unsigned_integer_size = suggestVectorSizeForCpu(u32, cpu).?;
|
||||
try std.testing.expectEqual(@as(usize, 16), unsigned_integer_size);
|
||||
try std.testing.expectEqual(@as(usize, 16), signed_integer_size);
|
||||
try std.testing.expectEqual(expected_size, unsigned_integer_size);
|
||||
try std.testing.expectEqual(expected_size, signed_integer_size);
|
||||
}
|
||||
|
||||
fn vectorLength(comptime VectorType: type) comptime_int {
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
const std = @import("std.zig");
|
||||
const assert = std.debug.assert;
|
||||
const builtin = @import("builtin");
|
||||
const testing = std.testing;
|
||||
const mem = std.mem;
|
||||
const math = std.math;
|
||||
@ -177,8 +176,6 @@ const IdAndValue = struct {
|
||||
};
|
||||
|
||||
test "stable sort" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const expected = [_]IdAndValue{
|
||||
IdAndValue{ .id = 0, .value = 0 },
|
||||
IdAndValue{ .id = 1, .value = 0 },
|
||||
@ -226,8 +223,6 @@ test "stable sort" {
|
||||
}
|
||||
|
||||
test "sort" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const u8cases = [_][]const []const u8{
|
||||
&[_][]const u8{
|
||||
"",
|
||||
@ -306,8 +301,6 @@ test "sort" {
|
||||
}
|
||||
|
||||
test "sort descending" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const rev_cases = [_][]const []const i32{
|
||||
&[_][]const i32{
|
||||
&[_]i32{},
|
||||
@ -347,8 +340,6 @@ test "sort descending" {
|
||||
}
|
||||
|
||||
test "sort with context in the middle of a slice" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const Context = struct {
|
||||
items: []i32,
|
||||
|
||||
@ -388,8 +379,6 @@ test "sort with context in the middle of a slice" {
|
||||
}
|
||||
|
||||
test "sort fuzz testing" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var prng = std.rand.DefaultPrng.init(0x12345678);
|
||||
const random = prng.random();
|
||||
const test_case_count = 10;
|
||||
|
||||
@ -1,6 +1,5 @@
|
||||
const std = @import("./std.zig");
|
||||
const assert = std.debug.assert;
|
||||
const builtin = @import("builtin");
|
||||
const testing = std.testing;
|
||||
const mem = std.mem;
|
||||
|
||||
@ -497,15 +496,11 @@ fn testUtf16CountCodepoints() !void {
|
||||
}
|
||||
|
||||
test "utf16 count codepoints" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testUtf16CountCodepoints();
|
||||
try comptime testUtf16CountCodepoints();
|
||||
}
|
||||
|
||||
test "utf8 encode" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try comptime testUtf8Encode();
|
||||
try testUtf8Encode();
|
||||
}
|
||||
@ -532,8 +527,6 @@ fn testUtf8Encode() !void {
|
||||
}
|
||||
|
||||
test "utf8 encode error" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try comptime testUtf8EncodeError();
|
||||
try testUtf8EncodeError();
|
||||
}
|
||||
@ -550,8 +543,6 @@ fn testErrorEncode(codePoint: u21, array: []u8, expectedErr: anyerror) !void {
|
||||
}
|
||||
|
||||
test "utf8 iterator on ascii" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try comptime testUtf8IteratorOnAscii();
|
||||
try testUtf8IteratorOnAscii();
|
||||
}
|
||||
@ -572,8 +563,6 @@ fn testUtf8IteratorOnAscii() !void {
|
||||
}
|
||||
|
||||
test "utf8 view bad" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try comptime testUtf8ViewBad();
|
||||
try testUtf8ViewBad();
|
||||
}
|
||||
@ -584,8 +573,6 @@ fn testUtf8ViewBad() !void {
|
||||
}
|
||||
|
||||
test "utf8 view ok" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try comptime testUtf8ViewOk();
|
||||
try testUtf8ViewOk();
|
||||
}
|
||||
@ -606,8 +593,6 @@ fn testUtf8ViewOk() !void {
|
||||
}
|
||||
|
||||
test "validate slice" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try comptime testValidateSlice();
|
||||
try testValidateSlice();
|
||||
|
||||
@ -648,8 +633,6 @@ fn testValidateSlice() !void {
|
||||
}
|
||||
|
||||
test "valid utf8" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try comptime testValidUtf8();
|
||||
try testValidUtf8();
|
||||
}
|
||||
@ -669,8 +652,6 @@ fn testValidUtf8() !void {
|
||||
}
|
||||
|
||||
test "invalid utf8 continuation bytes" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try comptime testInvalidUtf8ContinuationBytes();
|
||||
try testInvalidUtf8ContinuationBytes();
|
||||
}
|
||||
@ -703,8 +684,6 @@ fn testInvalidUtf8ContinuationBytes() !void {
|
||||
}
|
||||
|
||||
test "overlong utf8 codepoint" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try comptime testOverlongUtf8Codepoint();
|
||||
try testOverlongUtf8Codepoint();
|
||||
}
|
||||
@ -718,8 +697,6 @@ fn testOverlongUtf8Codepoint() !void {
|
||||
}
|
||||
|
||||
test "misc invalid utf8" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try comptime testMiscInvalidUtf8();
|
||||
try testMiscInvalidUtf8();
|
||||
}
|
||||
@ -735,8 +712,6 @@ fn testMiscInvalidUtf8() !void {
|
||||
}
|
||||
|
||||
test "utf8 iterator peeking" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try comptime testUtf8Peeking();
|
||||
try testUtf8Peeking();
|
||||
}
|
||||
@ -821,8 +796,6 @@ pub fn utf16leToUtf8(utf8: []u8, utf16le: []const u16) !usize {
|
||||
}
|
||||
|
||||
test "utf16leToUtf8" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var utf16le: [2]u16 = undefined;
|
||||
const utf16le_as_bytes = mem.sliceAsBytes(utf16le[0..]);
|
||||
|
||||
@ -935,8 +908,6 @@ pub fn utf8ToUtf16Le(utf16le: []u16, utf8: []const u8) !usize {
|
||||
}
|
||||
|
||||
test "utf8ToUtf16Le" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
var utf16le: [2]u16 = [_]u16{0} ** 2;
|
||||
{
|
||||
const length = try utf8ToUtf16Le(utf16le[0..], "𐐷");
|
||||
@ -955,8 +926,6 @@ test "utf8ToUtf16Le" {
|
||||
}
|
||||
|
||||
test "utf8ToUtf16LeWithNull" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
{
|
||||
const utf16 = try utf8ToUtf16LeWithNull(testing.allocator, "𐐷");
|
||||
defer testing.allocator.free(utf16);
|
||||
@ -1015,8 +984,6 @@ fn testCalcUtf16LeLen() !void {
|
||||
}
|
||||
|
||||
test "calculate utf16 string length of given utf8 string in u16" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testCalcUtf16LeLen();
|
||||
try comptime testCalcUtf16LeLen();
|
||||
}
|
||||
@ -1050,8 +1017,6 @@ pub fn fmtUtf16le(utf16le: []const u16) std.fmt.Formatter(formatUtf16le) {
|
||||
}
|
||||
|
||||
test "fmtUtf16le" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const expectFmt = std.testing.expectFmt;
|
||||
try expectFmt("", "{}", .{fmtUtf16le(utf8ToUtf16LeStringLiteral(""))});
|
||||
try expectFmt("foo", "{}", .{fmtUtf16le(utf8ToUtf16LeStringLiteral("foo"))});
|
||||
@ -1065,8 +1030,6 @@ test "fmtUtf16le" {
|
||||
}
|
||||
|
||||
test "utf8ToUtf16LeStringLiteral" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
{
|
||||
const bytes = [_:0]u16{
|
||||
mem.nativeToLittle(u16, 0x41),
|
||||
@ -1127,8 +1090,6 @@ fn testUtf8CountCodepoints() !void {
|
||||
}
|
||||
|
||||
test "utf8 count codepoints" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testUtf8CountCodepoints();
|
||||
try comptime testUtf8CountCodepoints();
|
||||
}
|
||||
@ -1145,8 +1106,6 @@ fn testUtf8ValidCodepoint() !void {
|
||||
}
|
||||
|
||||
test "utf8 valid codepoint" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testUtf8ValidCodepoint();
|
||||
try comptime testUtf8ValidCodepoint();
|
||||
}
|
||||
|
||||
@ -1,5 +1,4 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const mem = std.mem;
|
||||
|
||||
/// Print the string as a Zig identifier escaping it with @"" syntax if needed.
|
||||
@ -96,7 +95,7 @@ pub fn fmtEscapes(bytes: []const u8) std.fmt.Formatter(stringEscape) {
|
||||
}
|
||||
|
||||
test "escape invalid identifiers" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
if (@import("builtin").zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
const expectFmt = std.testing.expectFmt;
|
||||
try expectFmt("@\"while\"", "{}", .{fmtId("while")});
|
||||
|
||||
@ -1481,8 +1481,6 @@ test "utf8" {
|
||||
}
|
||||
|
||||
test "invalid utf8" {
|
||||
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest;
|
||||
|
||||
try testTokenize("//\x80", &.{
|
||||
.invalid,
|
||||
});
|
||||
|
||||
@ -656,11 +656,14 @@ const InstTracking = struct {
|
||||
fn reuse(
|
||||
self: *InstTracking,
|
||||
function: *Self,
|
||||
new_inst: Air.Inst.Index,
|
||||
new_inst: ?Air.Inst.Index,
|
||||
old_inst: Air.Inst.Index,
|
||||
) void {
|
||||
self.short = .{ .dead = function.scope_generation };
|
||||
tracking_log.debug("%{d} => {} (reuse %{d})", .{ new_inst, self.*, old_inst });
|
||||
if (new_inst) |inst|
|
||||
tracking_log.debug("%{d} => {} (reuse %{d})", .{ inst, self.*, old_inst })
|
||||
else
|
||||
tracking_log.debug("tmp => {} (reuse %{d})", .{ self.*, old_inst });
|
||||
}
|
||||
|
||||
fn liveOut(self: *InstTracking, function: *Self, inst: Air.Inst.Index) void {
|
||||
@ -1560,6 +1563,10 @@ fn asmRegisterMemoryImmediate(
|
||||
m: Memory,
|
||||
imm: Immediate,
|
||||
) !void {
|
||||
if (switch (imm) {
|
||||
.signed => |s| if (math.cast(i16, s)) |x| @as(u16, @bitCast(x)) else null,
|
||||
.unsigned => |u| math.cast(u16, u),
|
||||
}) |small_imm| {
|
||||
_ = try self.addInst(.{
|
||||
.tag = tag[1],
|
||||
.ops = switch (m) {
|
||||
@ -1570,7 +1577,7 @@ fn asmRegisterMemoryImmediate(
|
||||
.data = .{ .rix = .{
|
||||
.fixes = tag[0],
|
||||
.r1 = reg,
|
||||
.i = @as(u8, @intCast(imm.unsigned)),
|
||||
.i = small_imm,
|
||||
.payload = switch (m) {
|
||||
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
|
||||
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
|
||||
@ -1578,6 +1585,36 @@ fn asmRegisterMemoryImmediate(
|
||||
},
|
||||
} },
|
||||
});
|
||||
} else {
|
||||
const payload = try self.addExtra(Mir.Imm32{ .imm = switch (imm) {
|
||||
.signed => |s| @bitCast(s),
|
||||
.unsigned => unreachable,
|
||||
} });
|
||||
assert(payload + 1 == switch (m) {
|
||||
.sib => try self.addExtra(Mir.MemorySib.encode(m)),
|
||||
.rip => try self.addExtra(Mir.MemoryRip.encode(m)),
|
||||
else => unreachable,
|
||||
});
|
||||
_ = try self.addInst(.{
|
||||
.tag = tag[1],
|
||||
.ops = switch (m) {
|
||||
.sib => switch (imm) {
|
||||
.signed => .rmi_sib_s,
|
||||
.unsigned => .rmi_sib_u,
|
||||
},
|
||||
.rip => switch (imm) {
|
||||
.signed => .rmi_rip_s,
|
||||
.unsigned => .rmi_rip_u,
|
||||
},
|
||||
else => unreachable,
|
||||
},
|
||||
.data = .{ .rx = .{
|
||||
.fixes = tag[0],
|
||||
.r1 = reg,
|
||||
.payload = payload,
|
||||
} },
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
fn asmRegisterRegisterMemoryImmediate(
|
||||
@ -3713,14 +3750,22 @@ fn genIntMulDivOpMir(self: *Self, tag: Mir.Inst.FixedTag, ty: Type, lhs: MCValue
|
||||
else => unreachable,
|
||||
.mul => {},
|
||||
.div => switch (tag[0]) {
|
||||
._ => try self.asmRegisterRegister(.{ ._, .xor }, .edx, .edx),
|
||||
.i_ => switch (self.regBitSize(ty)) {
|
||||
8 => try self.asmOpOnly(.{ ._, .cbw }),
|
||||
16 => try self.asmOpOnly(.{ ._, .cwd }),
|
||||
32 => try self.asmOpOnly(.{ ._, .cdq }),
|
||||
64 => try self.asmOpOnly(.{ ._, .cqo }),
|
||||
._ => {
|
||||
const hi_reg: Register =
|
||||
switch (self.regBitSize(ty)) {
|
||||
8 => .ah,
|
||||
16, 32, 64 => .edx,
|
||||
else => unreachable,
|
||||
};
|
||||
try self.asmRegisterRegister(.{ ._, .xor }, hi_reg, hi_reg);
|
||||
},
|
||||
.i_ => try self.asmOpOnly(.{ ._, switch (self.regBitSize(ty)) {
|
||||
8 => .cbw,
|
||||
16 => .cwd,
|
||||
32 => .cdq,
|
||||
64 => .cqo,
|
||||
else => unreachable,
|
||||
} }),
|
||||
else => unreachable,
|
||||
},
|
||||
}
|
||||
@ -5210,13 +5255,11 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type)
|
||||
.child = (try mod.intType(.signed, scalar_bits)).ip_index,
|
||||
});
|
||||
|
||||
const sign_val = switch (tag) {
|
||||
const sign_mcv = try self.genTypedValue(.{ .ty = vec_ty, .val = switch (tag) {
|
||||
.neg => try vec_ty.minInt(mod, vec_ty),
|
||||
.abs => try vec_ty.maxInt(mod, vec_ty),
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
const sign_mcv = try self.genTypedValue(.{ .ty = vec_ty, .val = sign_val });
|
||||
} });
|
||||
const sign_mem = if (sign_mcv.isMemory())
|
||||
sign_mcv.mem(Memory.PtrSize.fromSize(abi_size))
|
||||
else
|
||||
@ -5285,7 +5328,6 @@ fn floatSign(self: *Self, inst: Air.Inst.Index, operand: Air.Inst.Ref, ty: Type)
|
||||
fn airFloatSign(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const un_op = self.air.instructions.items(.data)[inst].un_op;
|
||||
const ty = self.typeOf(un_op);
|
||||
|
||||
return self.floatSign(inst, un_op, ty);
|
||||
}
|
||||
|
||||
@ -5782,7 +5824,7 @@ fn reuseOperandAdvanced(
|
||||
operand: Air.Inst.Ref,
|
||||
op_index: Liveness.OperandInt,
|
||||
mcv: MCValue,
|
||||
tracked_inst: Air.Inst.Index,
|
||||
maybe_tracked_inst: ?Air.Inst.Index,
|
||||
) bool {
|
||||
if (!self.liveness.operandDies(inst, op_index))
|
||||
return false;
|
||||
@ -5791,11 +5833,13 @@ fn reuseOperandAdvanced(
|
||||
.register, .register_pair => for (mcv.getRegs()) |reg| {
|
||||
// If it's in the registers table, need to associate the register(s) with the
|
||||
// new instruction.
|
||||
if (maybe_tracked_inst) |tracked_inst| {
|
||||
if (!self.register_manager.isRegFree(reg)) {
|
||||
if (RegisterManager.indexOfRegIntoTracked(reg)) |index| {
|
||||
self.register_manager.registers[index] = tracked_inst;
|
||||
}
|
||||
}
|
||||
} else self.register_manager.freeReg(reg);
|
||||
},
|
||||
.load_frame => |frame_addr| if (frame_addr.index.isNamed()) return false,
|
||||
else => return false,
|
||||
@ -5804,7 +5848,7 @@ fn reuseOperandAdvanced(
|
||||
// Prevent the operand deaths processing code from deallocating it.
|
||||
self.liveness.clearOperandDeath(inst, op_index);
|
||||
const op_inst = Air.refToIndex(operand).?;
|
||||
self.getResolvedInstValue(op_inst).reuse(self, tracked_inst, op_inst);
|
||||
self.getResolvedInstValue(op_inst).reuse(self, maybe_tracked_inst, op_inst);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -7234,12 +7278,18 @@ fn genBinOp(
|
||||
if (maybe_mask_reg) |mask_reg| self.register_manager.lockRegAssumeUnused(mask_reg) else null;
|
||||
defer if (mask_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const lhs_mcv = try self.resolveInst(lhs_air);
|
||||
const rhs_mcv = try self.resolveInst(rhs_air);
|
||||
const ordered_air = if (lhs_ty.isVector(mod) and lhs_ty.childType(mod).isAbiInt(mod) and
|
||||
switch (air_tag) {
|
||||
.cmp_lt, .cmp_gte => true,
|
||||
else => false,
|
||||
}) .{ .lhs = rhs_air, .rhs = lhs_air } else .{ .lhs = lhs_air, .rhs = rhs_air };
|
||||
|
||||
const lhs_mcv = try self.resolveInst(ordered_air.lhs);
|
||||
const rhs_mcv = try self.resolveInst(ordered_air.rhs);
|
||||
switch (lhs_mcv) {
|
||||
.immediate => |imm| switch (imm) {
|
||||
0 => switch (air_tag) {
|
||||
.sub, .sub_wrap => return self.genUnOp(maybe_inst, .neg, rhs_air),
|
||||
.sub, .sub_wrap => return self.genUnOp(maybe_inst, .neg, ordered_air.rhs),
|
||||
else => {},
|
||||
},
|
||||
else => {},
|
||||
@ -7288,11 +7338,15 @@ fn genBinOp(
|
||||
var copied_to_dst = true;
|
||||
const dst_mcv: MCValue = dst: {
|
||||
if (maybe_inst) |inst| {
|
||||
if ((!vec_op or lhs_mcv.isRegister()) and self.reuseOperand(inst, lhs_air, 0, lhs_mcv)) {
|
||||
const tracked_inst = switch (air_tag) {
|
||||
else => inst,
|
||||
.cmp_lt, .cmp_lte, .cmp_eq, .cmp_gte, .cmp_gt, .cmp_neq => null,
|
||||
};
|
||||
if ((!vec_op or lhs_mcv.isRegister()) and
|
||||
self.reuseOperandAdvanced(inst, ordered_air.lhs, 0, lhs_mcv, tracked_inst))
|
||||
break :dst lhs_mcv;
|
||||
}
|
||||
if (is_commutative and (!vec_op or rhs_mcv.isRegister()) and
|
||||
self.reuseOperand(inst, rhs_air, 1, rhs_mcv))
|
||||
self.reuseOperandAdvanced(inst, ordered_air.rhs, 1, rhs_mcv, tracked_inst))
|
||||
{
|
||||
flipped = true;
|
||||
break :dst rhs_mcv;
|
||||
@ -7657,7 +7711,10 @@ fn genBinOp(
|
||||
.sub,
|
||||
.sub_wrap,
|
||||
=> if (self.hasFeature(.avx)) .{ .vp_b, .sub } else .{ .p_b, .sub },
|
||||
.bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
|
||||
.bit_and => if (self.hasFeature(.avx))
|
||||
.{ .vp_, .@"and" }
|
||||
else
|
||||
.{ .p_, .@"and" },
|
||||
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
|
||||
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
|
||||
.min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
|
||||
@ -7688,6 +7745,20 @@ fn genBinOp(
|
||||
else
|
||||
null,
|
||||
},
|
||||
.cmp_lt,
|
||||
.cmp_lte,
|
||||
.cmp_gte,
|
||||
.cmp_gt,
|
||||
=> switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
|
||||
.signed => if (self.hasFeature(.avx))
|
||||
.{ .vp_b, .cmpgt }
|
||||
else
|
||||
.{ .p_b, .cmpgt },
|
||||
.unsigned => null,
|
||||
},
|
||||
.cmp_eq,
|
||||
.cmp_neq,
|
||||
=> if (self.hasFeature(.avx)) .{ .vp_b, .cmpeq } else .{ .p_b, .cmpeq },
|
||||
else => null,
|
||||
},
|
||||
17...32 => switch (air_tag) {
|
||||
@ -7708,6 +7779,17 @@ fn genBinOp(
|
||||
.signed => if (self.hasFeature(.avx2)) .{ .vp_b, .maxs } else null,
|
||||
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_b, .maxu } else null,
|
||||
},
|
||||
.cmp_lt,
|
||||
.cmp_lte,
|
||||
.cmp_gte,
|
||||
.cmp_gt,
|
||||
=> switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
|
||||
.signed => if (self.hasFeature(.avx)) .{ .vp_b, .cmpgt } else null,
|
||||
.unsigned => null,
|
||||
},
|
||||
.cmp_eq,
|
||||
.cmp_neq,
|
||||
=> if (self.hasFeature(.avx)) .{ .vp_b, .cmpeq } else null,
|
||||
else => null,
|
||||
},
|
||||
else => null,
|
||||
@ -7723,7 +7805,10 @@ fn genBinOp(
|
||||
.mul,
|
||||
.mul_wrap,
|
||||
=> if (self.hasFeature(.avx)) .{ .vp_w, .mull } else .{ .p_d, .mull },
|
||||
.bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
|
||||
.bit_and => if (self.hasFeature(.avx))
|
||||
.{ .vp_, .@"and" }
|
||||
else
|
||||
.{ .p_, .@"and" },
|
||||
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
|
||||
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
|
||||
.min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
|
||||
@ -7746,6 +7831,20 @@ fn genBinOp(
|
||||
else
|
||||
.{ .p_w, .maxu },
|
||||
},
|
||||
.cmp_lt,
|
||||
.cmp_lte,
|
||||
.cmp_gte,
|
||||
.cmp_gt,
|
||||
=> switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
|
||||
.signed => if (self.hasFeature(.avx))
|
||||
.{ .vp_w, .cmpgt }
|
||||
else
|
||||
.{ .p_w, .cmpgt },
|
||||
.unsigned => null,
|
||||
},
|
||||
.cmp_eq,
|
||||
.cmp_neq,
|
||||
=> if (self.hasFeature(.avx)) .{ .vp_w, .cmpeq } else .{ .p_w, .cmpeq },
|
||||
else => null,
|
||||
},
|
||||
9...16 => switch (air_tag) {
|
||||
@ -7769,6 +7868,17 @@ fn genBinOp(
|
||||
.signed => if (self.hasFeature(.avx2)) .{ .vp_w, .maxs } else null,
|
||||
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_w, .maxu } else null,
|
||||
},
|
||||
.cmp_lt,
|
||||
.cmp_lte,
|
||||
.cmp_gte,
|
||||
.cmp_gt,
|
||||
=> switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
|
||||
.signed => if (self.hasFeature(.avx)) .{ .vp_w, .cmpgt } else null,
|
||||
.unsigned => null,
|
||||
},
|
||||
.cmp_eq,
|
||||
.cmp_neq,
|
||||
=> if (self.hasFeature(.avx)) .{ .vp_w, .cmpeq } else null,
|
||||
else => null,
|
||||
},
|
||||
else => null,
|
||||
@ -7789,7 +7899,10 @@ fn genBinOp(
|
||||
.{ .p_d, .mull }
|
||||
else
|
||||
null,
|
||||
.bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
|
||||
.bit_and => if (self.hasFeature(.avx))
|
||||
.{ .vp_, .@"and" }
|
||||
else
|
||||
.{ .p_, .@"and" },
|
||||
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
|
||||
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
|
||||
.min => switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
|
||||
@ -7820,6 +7933,20 @@ fn genBinOp(
|
||||
else
|
||||
null,
|
||||
},
|
||||
.cmp_lt,
|
||||
.cmp_lte,
|
||||
.cmp_gte,
|
||||
.cmp_gt,
|
||||
=> switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
|
||||
.signed => if (self.hasFeature(.avx))
|
||||
.{ .vp_d, .cmpgt }
|
||||
else
|
||||
.{ .p_d, .cmpgt },
|
||||
.unsigned => null,
|
||||
},
|
||||
.cmp_eq,
|
||||
.cmp_neq,
|
||||
=> if (self.hasFeature(.avx)) .{ .vp_d, .cmpeq } else .{ .p_d, .cmpeq },
|
||||
else => null,
|
||||
},
|
||||
5...8 => switch (air_tag) {
|
||||
@ -7843,6 +7970,17 @@ fn genBinOp(
|
||||
.signed => if (self.hasFeature(.avx2)) .{ .vp_d, .maxs } else null,
|
||||
.unsigned => if (self.hasFeature(.avx2)) .{ .vp_d, .maxu } else null,
|
||||
},
|
||||
.cmp_lt,
|
||||
.cmp_lte,
|
||||
.cmp_gte,
|
||||
.cmp_gt,
|
||||
=> switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
|
||||
.signed => if (self.hasFeature(.avx)) .{ .vp_d, .cmpgt } else null,
|
||||
.unsigned => null,
|
||||
},
|
||||
.cmp_eq,
|
||||
.cmp_neq,
|
||||
=> if (self.hasFeature(.avx)) .{ .vp_d, .cmpeq } else null,
|
||||
else => null,
|
||||
},
|
||||
else => null,
|
||||
@ -7855,9 +7993,33 @@ fn genBinOp(
|
||||
.sub,
|
||||
.sub_wrap,
|
||||
=> if (self.hasFeature(.avx)) .{ .vp_q, .sub } else .{ .p_q, .sub },
|
||||
.bit_and => if (self.hasFeature(.avx)) .{ .vp_, .@"and" } else .{ .p_, .@"and" },
|
||||
.bit_and => if (self.hasFeature(.avx))
|
||||
.{ .vp_, .@"and" }
|
||||
else
|
||||
.{ .p_, .@"and" },
|
||||
.bit_or => if (self.hasFeature(.avx)) .{ .vp_, .@"or" } else .{ .p_, .@"or" },
|
||||
.xor => if (self.hasFeature(.avx)) .{ .vp_, .xor } else .{ .p_, .xor },
|
||||
.cmp_lt,
|
||||
.cmp_lte,
|
||||
.cmp_gte,
|
||||
.cmp_gt,
|
||||
=> switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
|
||||
.signed => if (self.hasFeature(.avx))
|
||||
.{ .vp_q, .cmpgt }
|
||||
else if (self.hasFeature(.sse4_2))
|
||||
.{ .p_q, .cmpgt }
|
||||
else
|
||||
null,
|
||||
.unsigned => null,
|
||||
},
|
||||
.cmp_eq,
|
||||
.cmp_neq,
|
||||
=> if (self.hasFeature(.avx))
|
||||
.{ .vp_q, .cmpeq }
|
||||
else if (self.hasFeature(.sse4_1))
|
||||
.{ .p_q, .cmpeq }
|
||||
else
|
||||
null,
|
||||
else => null,
|
||||
},
|
||||
3...4 => switch (air_tag) {
|
||||
@ -7870,6 +8032,17 @@ fn genBinOp(
|
||||
.bit_and => if (self.hasFeature(.avx2)) .{ .vp_, .@"and" } else null,
|
||||
.bit_or => if (self.hasFeature(.avx2)) .{ .vp_, .@"or" } else null,
|
||||
.xor => if (self.hasFeature(.avx2)) .{ .vp_, .xor } else null,
|
||||
.cmp_eq,
|
||||
.cmp_neq,
|
||||
=> if (self.hasFeature(.avx)) .{ .vp_d, .cmpeq } else null,
|
||||
.cmp_lt,
|
||||
.cmp_lte,
|
||||
.cmp_gt,
|
||||
.cmp_gte,
|
||||
=> switch (lhs_ty.childType(mod).intInfo(mod).signedness) {
|
||||
.signed => if (self.hasFeature(.avx)) .{ .vp_d, .cmpgt } else null,
|
||||
.unsigned => null,
|
||||
},
|
||||
else => null,
|
||||
},
|
||||
else => null,
|
||||
@ -8435,6 +8608,62 @@ fn genBinOp(
|
||||
);
|
||||
}
|
||||
},
|
||||
.cmp_lt,
|
||||
.cmp_lte,
|
||||
.cmp_eq,
|
||||
.cmp_gte,
|
||||
.cmp_gt,
|
||||
.cmp_neq,
|
||||
=> {
|
||||
switch (air_tag) {
|
||||
.cmp_lt,
|
||||
.cmp_eq,
|
||||
.cmp_gt,
|
||||
=> {},
|
||||
.cmp_lte,
|
||||
.cmp_gte,
|
||||
.cmp_neq,
|
||||
=> {
|
||||
const unsigned_ty = try lhs_ty.toUnsigned(mod);
|
||||
const not_mcv = try self.genTypedValue(.{
|
||||
.ty = lhs_ty,
|
||||
.val = try unsigned_ty.maxInt(mod, unsigned_ty),
|
||||
});
|
||||
const not_mem = if (not_mcv.isMemory())
|
||||
not_mcv.mem(Memory.PtrSize.fromSize(abi_size))
|
||||
else
|
||||
Memory.sib(Memory.PtrSize.fromSize(abi_size), .{ .base = .{
|
||||
.reg = try self.copyToTmpRegister(Type.usize, not_mcv.address()),
|
||||
} });
|
||||
switch (mir_tag[0]) {
|
||||
.vp_b, .vp_d, .vp_q, .vp_w => try self.asmRegisterRegisterMemory(
|
||||
.{ .vp_, .xor },
|
||||
dst_reg,
|
||||
dst_reg,
|
||||
not_mem,
|
||||
),
|
||||
.p_b, .p_d, .p_q, .p_w => try self.asmRegisterMemory(
|
||||
.{ .p_, .xor },
|
||||
dst_reg,
|
||||
not_mem,
|
||||
),
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
const gp_reg = try self.register_manager.allocReg(maybe_inst, abi.RegisterClass.gp);
|
||||
const gp_lock = self.register_manager.lockRegAssumeUnused(gp_reg);
|
||||
defer self.register_manager.unlockReg(gp_lock);
|
||||
|
||||
try self.asmRegisterRegister(switch (mir_tag[0]) {
|
||||
.vp_b, .vp_d, .vp_q, .vp_w => .{ .vp_b, .movmsk },
|
||||
.p_b, .p_d, .p_q, .p_w => .{ .p_b, .movmsk },
|
||||
else => unreachable,
|
||||
}, gp_reg.to32(), dst_reg);
|
||||
return .{ .register = gp_reg };
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
|
||||
@ -9741,8 +9970,15 @@ fn airCmp(self: *Self, inst: Air.Inst.Index, op: math.CompareOperator) !void {
|
||||
}
|
||||
|
||||
fn airCmpVector(self: *Self, inst: Air.Inst.Index) !void {
|
||||
_ = inst;
|
||||
return self.fail("TODO implement airCmpVector for {}", .{self.target.cpu.arch});
|
||||
const ty_pl = self.air.instructions.items(.data)[inst].ty_pl;
|
||||
const extra = self.air.extraData(Air.VectorCmp, ty_pl.payload).data;
|
||||
const dst_mcv = try self.genBinOp(
|
||||
inst,
|
||||
Air.Inst.Tag.fromCmpOp(extra.compareOperator(), false),
|
||||
extra.lhs,
|
||||
extra.rhs,
|
||||
);
|
||||
return self.finishAir(inst, dst_mcv, .{ extra.lhs, extra.rhs, .none });
|
||||
}
|
||||
|
||||
fn airCmpLtErrorsLen(self: *Self, inst: Air.Inst.Index) !void {
|
||||
@ -12592,7 +12828,7 @@ fn airMemset(self: *Self, inst: Air.Inst.Index, safety: bool) !void {
|
||||
.{ .i_, .mul },
|
||||
len_reg,
|
||||
len_reg,
|
||||
Immediate.u(elem_abi_size),
|
||||
Immediate.s(elem_abi_size),
|
||||
);
|
||||
try self.genInlineMemcpy(second_elem_ptr_mcv, ptr, len_mcv);
|
||||
|
||||
@ -12645,8 +12881,23 @@ fn airMemcpy(self: *Self, inst: Air.Inst.Index) !void {
|
||||
defer if (src_ptr_lock) |lock| self.register_manager.unlockReg(lock);
|
||||
|
||||
const len: MCValue = switch (dst_ptr_ty.ptrSize(mod)) {
|
||||
.Slice => dst_ptr.address().offset(8).deref(),
|
||||
.One => .{ .immediate = dst_ptr_ty.childType(mod).arrayLen(mod) },
|
||||
.Slice => len: {
|
||||
const len_reg = try self.register_manager.allocReg(null, abi.RegisterClass.gp);
|
||||
const len_lock = self.register_manager.lockRegAssumeUnused(len_reg);
|
||||
defer self.register_manager.unlockReg(len_lock);
|
||||
|
||||
try self.asmRegisterMemoryImmediate(
|
||||
.{ .i_, .mul },
|
||||
len_reg,
|
||||
dst_ptr.address().offset(8).deref().mem(.qword),
|
||||
Immediate.s(@intCast(dst_ptr_ty.childType(mod).abiSize(mod))),
|
||||
);
|
||||
break :len .{ .register = len_reg };
|
||||
},
|
||||
.One => len: {
|
||||
const array_ty = dst_ptr_ty.childType(mod);
|
||||
break :len .{ .immediate = array_ty.arrayLen(mod) * array_ty.childType(mod).abiSize(mod) };
|
||||
},
|
||||
.C, .Many => unreachable,
|
||||
};
|
||||
const len_lock: ?RegisterLock = switch (len) {
|
||||
@ -12999,10 +13250,60 @@ fn airShuffle(self: *Self, inst: Air.Inst.Index) !void {
|
||||
}
|
||||
|
||||
fn airReduce(self: *Self, inst: Air.Inst.Index) !void {
|
||||
const mod = self.bin_file.options.module.?;
|
||||
const reduce = self.air.instructions.items(.data)[inst].reduce;
|
||||
_ = reduce;
|
||||
return self.fail("TODO implement airReduce for x86_64", .{});
|
||||
//return self.finishAir(inst, result, .{ reduce.operand, .none, .none });
|
||||
|
||||
const result: MCValue = result: {
|
||||
const operand_ty = self.typeOf(reduce.operand);
|
||||
if (operand_ty.isVector(mod) and operand_ty.childType(mod).toIntern() == .bool_type) {
|
||||
try self.spillEflagsIfOccupied();
|
||||
|
||||
const operand_mcv = try self.resolveInst(reduce.operand);
|
||||
const mask_len = (std.math.cast(u6, operand_ty.vectorLen(mod)) orelse
|
||||
return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(mod)}));
|
||||
const mask = (@as(u64, 1) << mask_len) - 1;
|
||||
const abi_size: u32 = @intCast(operand_ty.abiSize(mod));
|
||||
switch (reduce.operation) {
|
||||
.Or => {
|
||||
if (operand_mcv.isMemory()) try self.asmMemoryImmediate(
|
||||
.{ ._, .@"test" },
|
||||
operand_mcv.mem(Memory.PtrSize.fromSize(abi_size)),
|
||||
Immediate.u(mask),
|
||||
) else {
|
||||
const operand_reg = registerAlias(if (operand_mcv.isRegister())
|
||||
operand_mcv.getReg().?
|
||||
else
|
||||
try self.copyToTmpRegister(operand_ty, operand_mcv), abi_size);
|
||||
if (mask_len < abi_size * 8) try self.asmRegisterImmediate(
|
||||
.{ ._, .@"test" },
|
||||
operand_reg,
|
||||
Immediate.u(mask),
|
||||
) else try self.asmRegisterRegister(
|
||||
.{ ._, .@"test" },
|
||||
operand_reg,
|
||||
operand_reg,
|
||||
);
|
||||
}
|
||||
break :result .{ .eflags = .nz };
|
||||
},
|
||||
.And => {
|
||||
const tmp_reg = try self.copyToTmpRegister(operand_ty, operand_mcv);
|
||||
const tmp_lock = self.register_manager.lockRegAssumeUnused(tmp_reg);
|
||||
defer self.register_manager.unlockReg(tmp_lock);
|
||||
|
||||
try self.asmRegister(.{ ._, .not }, tmp_reg);
|
||||
if (mask_len < abi_size * 8)
|
||||
try self.asmRegisterImmediate(.{ ._, .@"test" }, tmp_reg, Immediate.u(mask))
|
||||
else
|
||||
try self.asmRegisterRegister(.{ ._, .@"test" }, tmp_reg, tmp_reg);
|
||||
break :result .{ .eflags = .z };
|
||||
},
|
||||
else => return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(mod)}),
|
||||
}
|
||||
}
|
||||
return self.fail("TODO implement airReduce for {}", .{operand_ty.fmt(mod)});
|
||||
};
|
||||
return self.finishAir(inst, result, .{ reduce.operand, .none, .none });
|
||||
}
|
||||
|
||||
fn airAggregateInit(self: *Self, inst: Air.Inst.Index) !void {
|
||||
|
||||
@ -19,18 +19,18 @@ pub const Error = Lower.Error || error{
|
||||
|
||||
pub fn emitMir(emit: *Emit) Error!void {
|
||||
for (0..emit.lower.mir.instructions.len) |mir_i| {
|
||||
const mir_index = @as(Mir.Inst.Index, @intCast(mir_i));
|
||||
const mir_index: Mir.Inst.Index = @intCast(mir_i);
|
||||
try emit.code_offset_mapping.putNoClobber(
|
||||
emit.lower.allocator,
|
||||
mir_index,
|
||||
@as(u32, @intCast(emit.code.items.len)),
|
||||
@intCast(emit.code.items.len),
|
||||
);
|
||||
const lowered = try emit.lower.lowerMir(mir_index);
|
||||
var lowered_relocs = lowered.relocs;
|
||||
for (lowered.insts, 0..) |lowered_inst, lowered_index| {
|
||||
const start_offset = @as(u32, @intCast(emit.code.items.len));
|
||||
const start_offset: u32 = @intCast(emit.code.items.len);
|
||||
try lowered_inst.encode(emit.code.writer(), .{});
|
||||
const end_offset = @as(u32, @intCast(emit.code.items.len));
|
||||
const end_offset: u32 = @intCast(emit.code.items.len);
|
||||
while (lowered_relocs.len > 0 and
|
||||
lowered_relocs[0].lowered_inst_index == lowered_index) : ({
|
||||
lowered_relocs = lowered_relocs[1..];
|
||||
@ -39,7 +39,7 @@ pub fn emitMir(emit: *Emit) Error!void {
|
||||
.source = start_offset,
|
||||
.target = target,
|
||||
.offset = end_offset - 4,
|
||||
.length = @as(u5, @intCast(end_offset - start_offset)),
|
||||
.length = @intCast(end_offset - start_offset),
|
||||
}),
|
||||
.linker_extern_fn => |symbol| if (emit.bin_file.cast(link.File.Elf)) |elf_file| {
|
||||
// Add relocation to the decl.
|
||||
@ -220,7 +220,7 @@ const Reloc = struct {
|
||||
/// Target of the relocation.
|
||||
target: Mir.Inst.Index,
|
||||
/// Offset of the relocation within the instruction.
|
||||
offset: usize,
|
||||
offset: u32,
|
||||
/// Length of the instruction.
|
||||
length: u5,
|
||||
};
|
||||
|
||||
@ -266,6 +266,8 @@ pub const Mnemonic = enum {
|
||||
packssdw, packsswb, packuswb,
|
||||
paddb, paddd, paddq, paddsb, paddsw, paddusb, paddusw, paddw,
|
||||
pand, pandn, por, pxor,
|
||||
pcmpeqb, pcmpeqd, pcmpeqw,
|
||||
pcmpgtb, pcmpgtd, pcmpgtw,
|
||||
pmulhw, pmullw,
|
||||
psubb, psubd, psubq, psubsb, psubsw, psubusb, psubusw, psubw,
|
||||
// SSE
|
||||
@ -278,11 +280,12 @@ pub const Mnemonic = enum {
|
||||
maxps, maxss,
|
||||
minps, minss,
|
||||
movaps, movhlps, movlhps,
|
||||
movmskps,
|
||||
movss, movups,
|
||||
mulps, mulss,
|
||||
orps,
|
||||
pextrw, pinsrw,
|
||||
pmaxsw, pmaxub, pminsw, pminub,
|
||||
pmaxsw, pmaxub, pminsw, pminub, pmovmskb,
|
||||
shufps,
|
||||
sqrtps, sqrtss,
|
||||
subps, subss,
|
||||
@ -301,6 +304,7 @@ pub const Mnemonic = enum {
|
||||
minpd, minsd,
|
||||
movapd,
|
||||
movdqa, movdqu,
|
||||
movmskpd,
|
||||
//movsd,
|
||||
movupd,
|
||||
mulpd, mulsd,
|
||||
@ -323,11 +327,14 @@ pub const Mnemonic = enum {
|
||||
extractps,
|
||||
insertps,
|
||||
packusdw,
|
||||
pcmpeqq,
|
||||
pextrb, pextrd, pextrq,
|
||||
pinsrb, pinsrd, pinsrq,
|
||||
pmaxsb, pmaxsd, pmaxud, pmaxuw, pminsb, pminsd, pminud, pminuw,
|
||||
pmulld,
|
||||
roundpd, roundps, roundsd, roundss,
|
||||
// SSE4.2
|
||||
pcmpgtq,
|
||||
// AVX
|
||||
vaddpd, vaddps, vaddsd, vaddss,
|
||||
vandnpd, vandnps, vandpd, vandps,
|
||||
@ -348,6 +355,7 @@ pub const Mnemonic = enum {
|
||||
vmovddup,
|
||||
vmovdqa, vmovdqu,
|
||||
vmovhlps, vmovlhps,
|
||||
vmovmskpd, vmovmskps,
|
||||
vmovq,
|
||||
vmovsd,
|
||||
vmovshdup, vmovsldup,
|
||||
@ -359,10 +367,13 @@ pub const Mnemonic = enum {
|
||||
vpackssdw, vpacksswb, vpackusdw, vpackuswb,
|
||||
vpaddb, vpaddd, vpaddq, vpaddsb, vpaddsw, vpaddusb, vpaddusw, vpaddw,
|
||||
vpand, vpandn,
|
||||
vpcmpeqb, vpcmpeqd, vpcmpeqq, vpcmpeqw,
|
||||
vpcmpgtb, vpcmpgtd, vpcmpgtq, vpcmpgtw,
|
||||
vpextrb, vpextrd, vpextrq, vpextrw,
|
||||
vpinsrb, vpinsrd, vpinsrq, vpinsrw,
|
||||
vpmaxsb, vpmaxsd, vpmaxsw, vpmaxub, vpmaxud, vpmaxuw,
|
||||
vpminsb, vpminsd, vpminsw, vpminub, vpminud, vpminuw,
|
||||
vpmovmskb,
|
||||
vpmulhw, vpmulld, vpmullw,
|
||||
vpor,
|
||||
vpshufhw, vpshuflw,
|
||||
@ -754,6 +765,7 @@ pub const Feature = enum {
|
||||
sse2,
|
||||
sse3,
|
||||
sse4_1,
|
||||
sse4_2,
|
||||
ssse3,
|
||||
x87,
|
||||
};
|
||||
|
||||
@ -190,7 +190,7 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
|
||||
.pseudo_probe_align_ri_s => {
|
||||
try lower.emit(.none, .@"test", &.{
|
||||
.{ .reg = inst.data.ri.r1 },
|
||||
.{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.ri.i))) },
|
||||
.{ .imm = Immediate.s(@bitCast(inst.data.ri.i)) },
|
||||
});
|
||||
try lower.emit(.none, .jz, &.{
|
||||
.{ .imm = lower.reloc(.{ .inst = index + 1 }) },
|
||||
@ -226,14 +226,14 @@ pub fn lowerMir(lower: *Lower, index: Mir.Inst.Index) Error!struct {
|
||||
}
|
||||
try lower.emit(.none, .sub, &.{
|
||||
.{ .reg = inst.data.ri.r1 },
|
||||
.{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.ri.i))) },
|
||||
.{ .imm = Immediate.s(@bitCast(inst.data.ri.i)) },
|
||||
});
|
||||
assert(lower.result_insts_len <= pseudo_probe_adjust_unrolled_max_insts);
|
||||
},
|
||||
.pseudo_probe_adjust_setup_rri_s => {
|
||||
try lower.emit(.none, .mov, &.{
|
||||
.{ .reg = inst.data.rri.r2.to32() },
|
||||
.{ .imm = Immediate.s(@as(i32, @bitCast(inst.data.rri.i))) },
|
||||
.{ .imm = Immediate.s(@bitCast(inst.data.rri.i)) },
|
||||
});
|
||||
try lower.emit(.none, .sub, &.{
|
||||
.{ .reg = inst.data.rri.r1 },
|
||||
@ -291,7 +291,9 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
|
||||
.i_s,
|
||||
.mi_sib_s,
|
||||
.mi_rip_s,
|
||||
=> Immediate.s(@as(i32, @bitCast(i))),
|
||||
.rmi_sib_s,
|
||||
.rmi_rip_s,
|
||||
=> Immediate.s(@bitCast(i)),
|
||||
|
||||
.rrri,
|
||||
.rri_u,
|
||||
@ -301,6 +303,8 @@ fn imm(lower: Lower, ops: Mir.Inst.Ops, i: u32) Immediate {
|
||||
.mi_rip_u,
|
||||
.rmi_sib,
|
||||
.rmi_rip,
|
||||
.rmi_sib_u,
|
||||
.rmi_rip_u,
|
||||
.mri_sib,
|
||||
.mri_rip,
|
||||
.rrm_sib,
|
||||
@ -319,6 +323,8 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory {
|
||||
return lower.mir.resolveFrameLoc(switch (ops) {
|
||||
.rm_sib,
|
||||
.rmi_sib,
|
||||
.rmi_sib_s,
|
||||
.rmi_sib_u,
|
||||
.m_sib,
|
||||
.mi_sib_u,
|
||||
.mi_sib_s,
|
||||
@ -335,6 +341,8 @@ fn mem(lower: Lower, ops: Mir.Inst.Ops, payload: u32) Memory {
|
||||
|
||||
.rm_rip,
|
||||
.rmi_rip,
|
||||
.rmi_rip_s,
|
||||
.rmi_rip_u,
|
||||
.m_rip,
|
||||
.mi_rip_u,
|
||||
.mi_rip_s,
|
||||
@ -383,13 +391,29 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
|
||||
.rrri => inst.data.rrri.fixes,
|
||||
.rri_s, .rri_u => inst.data.rri.fixes,
|
||||
.ri_s, .ri_u => inst.data.ri.fixes,
|
||||
.ri64, .rm_sib, .rm_rip, .mr_sib, .mr_rip => inst.data.rx.fixes,
|
||||
.ri64,
|
||||
.rm_sib,
|
||||
.rm_rip,
|
||||
.rmi_sib_s,
|
||||
.rmi_sib_u,
|
||||
.rmi_rip_s,
|
||||
.rmi_rip_u,
|
||||
.mr_sib,
|
||||
.mr_rip,
|
||||
=> inst.data.rx.fixes,
|
||||
.mrr_sib, .mrr_rip, .rrm_sib, .rrm_rip => inst.data.rrx.fixes,
|
||||
.rmi_sib, .rmi_rip, .mri_sib, .mri_rip => inst.data.rix.fixes,
|
||||
.rrmi_sib, .rrmi_rip => inst.data.rrix.fixes,
|
||||
.mi_sib_u, .mi_rip_u, .mi_sib_s, .mi_rip_s => inst.data.x.fixes,
|
||||
.m_sib, .m_rip, .rax_moffs, .moffs_rax => inst.data.x.fixes,
|
||||
.extern_fn_reloc, .got_reloc, .extern_got_reloc, .direct_reloc, .direct_got_reloc, .import_reloc, .tlv_reloc => ._,
|
||||
.extern_fn_reloc,
|
||||
.got_reloc,
|
||||
.extern_got_reloc,
|
||||
.direct_reloc,
|
||||
.direct_got_reloc,
|
||||
.import_reloc,
|
||||
.tlv_reloc,
|
||||
=> ._,
|
||||
else => return lower.fail("TODO lower .{s}", .{@tagName(inst.ops)}),
|
||||
};
|
||||
try lower.emit(switch (fixes) {
|
||||
@ -461,7 +485,7 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
|
||||
.m_sib, .m_rip => &.{
|
||||
.{ .mem = lower.mem(inst.ops, inst.data.x.payload) },
|
||||
},
|
||||
.mi_sib_s, .mi_sib_u, .mi_rip_u, .mi_rip_s => &.{
|
||||
.mi_sib_s, .mi_sib_u, .mi_rip_s, .mi_rip_u => &.{
|
||||
.{ .mem = lower.mem(inst.ops, inst.data.x.payload + 1) },
|
||||
.{ .imm = lower.imm(
|
||||
inst.ops,
|
||||
@ -477,6 +501,14 @@ fn generic(lower: *Lower, inst: Mir.Inst) Error!void {
|
||||
.{ .mem = lower.mem(inst.ops, inst.data.rix.payload) },
|
||||
.{ .imm = lower.imm(inst.ops, inst.data.rix.i) },
|
||||
},
|
||||
.rmi_sib_s, .rmi_sib_u, .rmi_rip_s, .rmi_rip_u => &.{
|
||||
.{ .reg = inst.data.rx.r1 },
|
||||
.{ .mem = lower.mem(inst.ops, inst.data.rx.payload + 1) },
|
||||
.{ .imm = lower.imm(
|
||||
inst.ops,
|
||||
lower.mir.extraData(Mir.Imm32, inst.data.rx.payload).data.imm,
|
||||
) },
|
||||
},
|
||||
.mr_sib, .mr_rip => &.{
|
||||
.{ .mem = lower.mem(inst.ops, inst.data.rx.payload) },
|
||||
.{ .reg = inst.data.rx.r1 },
|
||||
|
||||
@ -474,6 +474,10 @@ pub const Inst = struct {
|
||||
/// Bitwise logical and not of packed single-precision floating-point values
|
||||
/// Bitwise logical and not of packed double-precision floating-point values
|
||||
andn,
|
||||
/// Compare packed data for equal
|
||||
cmpeq,
|
||||
/// Compare packed data for greater than
|
||||
cmpgt,
|
||||
/// Maximum of packed signed integers
|
||||
maxs,
|
||||
/// Maximum of packed unsigned integers
|
||||
@ -482,6 +486,10 @@ pub const Inst = struct {
|
||||
mins,
|
||||
/// Minimum of packed unsigned integers
|
||||
minu,
|
||||
/// Move byte mask
|
||||
/// Extract packed single precision floating-point sign mask
|
||||
/// Extract packed double precision floating-point sign mask
|
||||
movmsk,
|
||||
/// Multiply packed signed integers and store low result
|
||||
mull,
|
||||
/// Multiply packed signed integers and store high result
|
||||
@ -720,9 +728,24 @@ pub const Inst = struct {
|
||||
/// Register, memory (RIP) operands.
|
||||
/// Uses `rx` payload.
|
||||
rm_rip,
|
||||
/// Register, memory (SIB), immediate (byte) operands.
|
||||
/// Register, memory (SIB), immediate (word) operands.
|
||||
/// Uses `rix` payload with extra data of type `MemorySib`.
|
||||
rmi_sib,
|
||||
/// Register, memory (RIP), immediate (word) operands.
|
||||
/// Uses `rix` payload with extra data of type `MemoryRip`.
|
||||
rmi_rip,
|
||||
/// Register, memory (SIB), immediate (signed) operands.
|
||||
/// Uses `rx` payload with extra data of type `Imm32` followed by `MemorySib`.
|
||||
rmi_sib_s,
|
||||
/// Register, memory (SIB), immediate (unsigned) operands.
|
||||
/// Uses `rx` payload with extra data of type `Imm32` followed by `MemorySib`.
|
||||
rmi_sib_u,
|
||||
/// Register, memory (RIP), immediate (signed) operands.
|
||||
/// Uses `rx` payload with extra data of type `Imm32` followed by `MemoryRip`.
|
||||
rmi_rip_s,
|
||||
/// Register, memory (RIP), immediate (unsigned) operands.
|
||||
/// Uses `rx` payload with extra data of type `Imm32` followed by `MemoryRip`.
|
||||
rmi_rip_u,
|
||||
/// Register, register, memory (RIP).
|
||||
/// Uses `rrix` payload with extra data of type `MemoryRip`.
|
||||
rrm_rip,
|
||||
@ -735,27 +758,24 @@ pub const Inst = struct {
|
||||
/// Register, register, memory (SIB), immediate (byte) operands.
|
||||
/// Uses `rrix` payload with extra data of type `MemorySib`.
|
||||
rrmi_sib,
|
||||
/// Register, memory (RIP), immediate (byte) operands.
|
||||
/// Uses `rix` payload with extra data of type `MemoryRip`.
|
||||
rmi_rip,
|
||||
/// Single memory (SIB) operand.
|
||||
/// Uses `x` with extra data of type `MemorySib`.
|
||||
m_sib,
|
||||
/// Single memory (RIP) operand.
|
||||
/// Uses `x` with extra data of type `MemoryRip`.
|
||||
m_rip,
|
||||
/// Memory (SIB), immediate (unsigned) operands.
|
||||
/// Uses `x` payload with extra data of type `Imm32` followed by `MemorySib`.
|
||||
mi_sib_u,
|
||||
/// Memory (RIP), immediate (unsigned) operands.
|
||||
/// Uses `x` payload with extra data of type `Imm32` followed by `MemoryRip`.
|
||||
mi_rip_u,
|
||||
/// Memory (SIB), immediate (sign-extend) operands.
|
||||
/// Uses `x` payload with extra data of type `Imm32` followed by `MemorySib`.
|
||||
mi_sib_s,
|
||||
/// Memory (SIB), immediate (unsigned) operands.
|
||||
/// Uses `x` payload with extra data of type `Imm32` followed by `MemorySib`.
|
||||
mi_sib_u,
|
||||
/// Memory (RIP), immediate (sign-extend) operands.
|
||||
/// Uses `x` payload with extra data of type `Imm32` followed by `MemoryRip`.
|
||||
mi_rip_s,
|
||||
/// Memory (RIP), immediate (unsigned) operands.
|
||||
/// Uses `x` payload with extra data of type `Imm32` followed by `MemoryRip`.
|
||||
mi_rip_u,
|
||||
/// Memory (SIB), register operands.
|
||||
/// Uses `rx` payload with extra data of type `MemorySib`.
|
||||
mr_sib,
|
||||
@ -768,10 +788,10 @@ pub const Inst = struct {
|
||||
/// Memory (RIP), register, register operands.
|
||||
/// Uses `rrx` payload with extra data of type `MemoryRip`.
|
||||
mrr_rip,
|
||||
/// Memory (SIB), register, immediate (byte) operands.
|
||||
/// Memory (SIB), register, immediate (word) operands.
|
||||
/// Uses `rix` payload with extra data of type `MemorySib`.
|
||||
mri_sib,
|
||||
/// Memory (RIP), register, immediate (byte) operands.
|
||||
/// Memory (RIP), register, immediate (word) operands.
|
||||
/// Uses `rix` payload with extra data of type `MemoryRip`.
|
||||
mri_rip,
|
||||
/// Rax, Memory moffs.
|
||||
@ -955,7 +975,7 @@ pub const Inst = struct {
|
||||
rix: struct {
|
||||
fixes: Fixes = ._,
|
||||
r1: Register,
|
||||
i: u8,
|
||||
i: u16,
|
||||
payload: u32,
|
||||
},
|
||||
/// Register, register, byte immediate, followed by Custom payload found in extra.
|
||||
@ -1010,7 +1030,7 @@ pub const RegisterList = struct {
|
||||
|
||||
fn getIndexForReg(registers: []const Register, reg: Register) BitSet.MaskInt {
|
||||
for (registers, 0..) |cpreg, i| {
|
||||
if (reg.id() == cpreg.id()) return @as(u32, @intCast(i));
|
||||
if (reg.id() == cpreg.id()) return @intCast(i);
|
||||
}
|
||||
unreachable; // register not in input register list!
|
||||
}
|
||||
@ -1030,7 +1050,7 @@ pub const RegisterList = struct {
|
||||
}
|
||||
|
||||
pub fn count(self: Self) u32 {
|
||||
return @as(u32, @intCast(self.bitset.count()));
|
||||
return @intCast(self.bitset.count());
|
||||
}
|
||||
};
|
||||
|
||||
@ -1044,14 +1064,14 @@ pub const Imm64 = struct {
|
||||
|
||||
pub fn encode(v: u64) Imm64 {
|
||||
return .{
|
||||
.msb = @as(u32, @truncate(v >> 32)),
|
||||
.lsb = @as(u32, @truncate(v)),
|
||||
.msb = @truncate(v >> 32),
|
||||
.lsb = @truncate(v),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn decode(imm: Imm64) u64 {
|
||||
var res: u64 = 0;
|
||||
res |= (@as(u64, @intCast(imm.msb)) << 32);
|
||||
res |= @as(u64, @intCast(imm.msb)) << 32;
|
||||
res |= @as(u64, @intCast(imm.lsb));
|
||||
return res;
|
||||
}
|
||||
@ -1075,7 +1095,7 @@ pub const MemorySib = struct {
|
||||
assert(sib.scale_index.scale == 0 or std.math.isPowerOfTwo(sib.scale_index.scale));
|
||||
return .{
|
||||
.ptr_size = @intFromEnum(sib.ptr_size),
|
||||
.base_tag = @intFromEnum(@as(Memory.Base.Tag, sib.base)),
|
||||
.base_tag = @intFromEnum(sib.base),
|
||||
.base = switch (sib.base) {
|
||||
.none => undefined,
|
||||
.reg => |r| @intFromEnum(r),
|
||||
@ -1091,18 +1111,18 @@ pub const MemorySib = struct {
|
||||
}
|
||||
|
||||
pub fn decode(msib: MemorySib) Memory {
|
||||
const scale = @as(u4, @truncate(msib.scale_index));
|
||||
const scale: u4 = @truncate(msib.scale_index);
|
||||
assert(scale == 0 or std.math.isPowerOfTwo(scale));
|
||||
return .{ .sib = .{
|
||||
.ptr_size = @as(Memory.PtrSize, @enumFromInt(msib.ptr_size)),
|
||||
.ptr_size = @enumFromInt(msib.ptr_size),
|
||||
.base = switch (@as(Memory.Base.Tag, @enumFromInt(msib.base_tag))) {
|
||||
.none => .none,
|
||||
.reg => .{ .reg = @as(Register, @enumFromInt(msib.base)) },
|
||||
.frame => .{ .frame = @as(bits.FrameIndex, @enumFromInt(msib.base)) },
|
||||
.reg => .{ .reg = @enumFromInt(msib.base) },
|
||||
.frame => .{ .frame = @enumFromInt(msib.base) },
|
||||
},
|
||||
.scale_index = .{
|
||||
.scale = scale,
|
||||
.index = if (scale > 0) @as(Register, @enumFromInt(msib.scale_index >> 4)) else undefined,
|
||||
.index = if (scale > 0) @enumFromInt(msib.scale_index >> 4) else undefined,
|
||||
},
|
||||
.disp = msib.disp,
|
||||
} };
|
||||
@ -1124,7 +1144,7 @@ pub const MemoryRip = struct {
|
||||
|
||||
pub fn decode(mrip: MemoryRip) Memory {
|
||||
return .{ .rip = .{
|
||||
.ptr_size = @as(Memory.PtrSize, @enumFromInt(mrip.ptr_size)),
|
||||
.ptr_size = @enumFromInt(mrip.ptr_size),
|
||||
.disp = mrip.disp,
|
||||
} };
|
||||
}
|
||||
@ -1141,14 +1161,14 @@ pub const MemoryMoffs = struct {
|
||||
pub fn encode(seg: Register, offset: u64) MemoryMoffs {
|
||||
return .{
|
||||
.seg = @intFromEnum(seg),
|
||||
.msb = @as(u32, @truncate(offset >> 32)),
|
||||
.lsb = @as(u32, @truncate(offset >> 0)),
|
||||
.msb = @truncate(offset >> 32),
|
||||
.lsb = @truncate(offset >> 0),
|
||||
};
|
||||
}
|
||||
|
||||
pub fn decode(moffs: MemoryMoffs) Memory {
|
||||
return .{ .moffs = .{
|
||||
.seg = @as(Register, @enumFromInt(moffs.seg)),
|
||||
.seg = @enumFromInt(moffs.seg),
|
||||
.offset = @as(u64, moffs.msb) << 32 | @as(u64, moffs.lsb) << 0,
|
||||
} };
|
||||
}
|
||||
@ -1168,7 +1188,7 @@ pub fn extraData(mir: Mir, comptime T: type, index: u32) struct { data: T, end:
|
||||
inline for (fields) |field| {
|
||||
@field(result, field.name) = switch (field.type) {
|
||||
u32 => mir.extra[i],
|
||||
i32 => @as(i32, @bitCast(mir.extra[i])),
|
||||
i32 => @bitCast(mir.extra[i]),
|
||||
else => @compileError("bad field type"),
|
||||
};
|
||||
i += 1;
|
||||
|
||||
@ -232,7 +232,7 @@ pub const Register = enum(u7) {
|
||||
else => unreachable,
|
||||
// zig fmt: on
|
||||
};
|
||||
return @as(u6, @intCast(@intFromEnum(reg) - base));
|
||||
return @intCast(@intFromEnum(reg) - base);
|
||||
}
|
||||
|
||||
pub fn bitSize(reg: Register) u64 {
|
||||
@ -291,11 +291,11 @@ pub const Register = enum(u7) {
|
||||
else => unreachable,
|
||||
// zig fmt: on
|
||||
};
|
||||
return @as(u4, @truncate(@intFromEnum(reg) - base));
|
||||
return @truncate(@intFromEnum(reg) - base);
|
||||
}
|
||||
|
||||
pub fn lowEnc(reg: Register) u3 {
|
||||
return @as(u3, @truncate(reg.enc()));
|
||||
return @truncate(reg.enc());
|
||||
}
|
||||
|
||||
pub fn toBitSize(reg: Register, bit_size: u64) Register {
|
||||
@ -325,19 +325,19 @@ pub const Register = enum(u7) {
|
||||
}
|
||||
|
||||
pub fn to64(reg: Register) Register {
|
||||
return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.rax)));
|
||||
return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.rax));
|
||||
}
|
||||
|
||||
pub fn to32(reg: Register) Register {
|
||||
return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.eax)));
|
||||
return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.eax));
|
||||
}
|
||||
|
||||
pub fn to16(reg: Register) Register {
|
||||
return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.ax)));
|
||||
return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.ax));
|
||||
}
|
||||
|
||||
pub fn to8(reg: Register) Register {
|
||||
return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.al)));
|
||||
return @enumFromInt(@intFromEnum(reg) - reg.gpBase() + @intFromEnum(Register.al));
|
||||
}
|
||||
|
||||
fn sseBase(reg: Register) u7 {
|
||||
@ -350,11 +350,11 @@ pub const Register = enum(u7) {
|
||||
}
|
||||
|
||||
pub fn to256(reg: Register) Register {
|
||||
return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.ymm0)));
|
||||
return @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.ymm0));
|
||||
}
|
||||
|
||||
pub fn to128(reg: Register) Register {
|
||||
return @as(Register, @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.xmm0)));
|
||||
return @enumFromInt(@intFromEnum(reg) - reg.sseBase() + @intFromEnum(Register.xmm0));
|
||||
}
|
||||
|
||||
/// DWARF register encoding
|
||||
@ -619,7 +619,7 @@ pub const Immediate = union(enum) {
|
||||
1, 8 => @as(i8, @bitCast(@as(u8, @intCast(x)))),
|
||||
16 => @as(i16, @bitCast(@as(u16, @intCast(x)))),
|
||||
32 => @as(i32, @bitCast(@as(u32, @intCast(x)))),
|
||||
64 => @as(i64, @bitCast(x)),
|
||||
64 => @bitCast(x),
|
||||
else => unreachable,
|
||||
},
|
||||
};
|
||||
|
||||
@ -905,6 +905,9 @@ pub const table = [_]Entry{
|
||||
|
||||
.{ .movlhps, .rm, &.{ .xmm, .xmm }, &.{ 0x0f, 0x16 }, 0, .none, .sse },
|
||||
|
||||
.{ .movmskps, .rm, &.{ .r32, .xmm }, &.{ 0x0f, 0x50 }, 0, .none, .sse },
|
||||
.{ .movmskps, .rm, &.{ .r64, .xmm }, &.{ 0x0f, 0x50 }, 0, .none, .sse },
|
||||
|
||||
.{ .movss, .rm, &.{ .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x10 }, 0, .none, .sse },
|
||||
.{ .movss, .mr, &.{ .xmm_m32, .xmm }, &.{ 0xf3, 0x0f, 0x11 }, 0, .none, .sse },
|
||||
|
||||
@ -917,6 +920,9 @@ pub const table = [_]Entry{
|
||||
|
||||
.{ .orps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x56 }, 0, .none, .sse },
|
||||
|
||||
.{ .pmovmskb, .rm, &.{ .r32, .xmm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .none, .sse },
|
||||
.{ .pmovmskb, .rm, &.{ .r64, .xmm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .none, .sse },
|
||||
|
||||
.{ .shufps, .rmi, &.{ .xmm, .xmm_m128, .imm8 }, &.{ 0x0f, 0xc6 }, 0, .none, .sse },
|
||||
|
||||
.{ .sqrtps, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x0f, 0x51 }, 0, .none, .sse },
|
||||
@ -1005,6 +1011,12 @@ pub const table = [_]Entry{
|
||||
.{ .movdqu, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0xf3, 0x0f, 0x6f }, 0, .none, .sse2 },
|
||||
.{ .movdqu, .mr, &.{ .xmm_m128, .xmm }, &.{ 0xf3, 0x0f, 0x7f }, 0, .none, .sse2 },
|
||||
|
||||
.{ .movmskpd, .rm, &.{ .r32, .xmm }, &.{ 0x66, 0x0f, 0x50 }, 0, .none, .sse2 },
|
||||
.{ .movmskpd, .rm, &.{ .r64, .xmm }, &.{ 0x66, 0x0f, 0x50 }, 0, .none, .sse2 },
|
||||
|
||||
.{ .movsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .none, .sse2 },
|
||||
.{ .movsd, .mr, &.{ .xmm_m64, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .none, .sse2 },
|
||||
|
||||
.{ .movq, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf3, 0x0f, 0x7e }, 0, .none, .sse2 },
|
||||
.{ .movq, .mr, &.{ .xmm_m64, .xmm }, &.{ 0x66, 0x0f, 0xd6 }, 0, .none, .sse2 },
|
||||
|
||||
@ -1037,6 +1049,14 @@ pub const table = [_]Entry{
|
||||
|
||||
.{ .pandn, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdf }, 0, .none, .sse2 },
|
||||
|
||||
.{ .pcmpeqb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x74 }, 0, .none, .sse2 },
|
||||
.{ .pcmpeqw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x75 }, 0, .none, .sse2 },
|
||||
.{ .pcmpeqd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x76 }, 0, .none, .sse2 },
|
||||
|
||||
.{ .pcmpgtb, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x64 }, 0, .none, .sse2 },
|
||||
.{ .pcmpgtw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x65 }, 0, .none, .sse2 },
|
||||
.{ .pcmpgtd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x66 }, 0, .none, .sse2 },
|
||||
|
||||
.{ .pextrw, .rmi, &.{ .r32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0xc5 }, 0, .none, .sse2 },
|
||||
|
||||
.{ .pinsrw, .rmi, &.{ .xmm, .r32_m16, .imm8 }, &.{ 0x66, 0x0f, 0xc4 }, 0, .none, .sse2 },
|
||||
@ -1100,9 +1120,6 @@ pub const table = [_]Entry{
|
||||
|
||||
.{ .subsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x5c }, 0, .none, .sse2 },
|
||||
|
||||
.{ .movsd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0xf2, 0x0f, 0x10 }, 0, .none, .sse2 },
|
||||
.{ .movsd, .mr, &.{ .xmm_m64, .xmm }, &.{ 0xf2, 0x0f, 0x11 }, 0, .none, .sse2 },
|
||||
|
||||
.{ .ucomisd, .rm, &.{ .xmm, .xmm_m64 }, &.{ 0x66, 0x0f, 0x2e }, 0, .none, .sse2 },
|
||||
|
||||
.{ .xorpd, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x57 }, 0, .none, .sse2 },
|
||||
@ -1137,6 +1154,8 @@ pub const table = [_]Entry{
|
||||
|
||||
.{ .packusdw, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x2b }, 0, .none, .sse4_1 },
|
||||
|
||||
.{ .pcmpeqq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x29 }, 0, .none, .sse4_1 },
|
||||
|
||||
.{ .pextrb, .mri, &.{ .r32_m8, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x14 }, 0, .none, .sse4_1 },
|
||||
.{ .pextrd, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .none, .sse4_1 },
|
||||
.{ .pextrq, .mri, &.{ .rm64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .long, .sse4_1 },
|
||||
@ -1171,6 +1190,9 @@ pub const table = [_]Entry{
|
||||
|
||||
.{ .roundss, .rmi, &.{ .xmm, .xmm_m32, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x0a }, 0, .none, .sse4_1 },
|
||||
|
||||
// SSE4.2
|
||||
.{ .pcmpgtq, .rm, &.{ .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x37 }, 0, .none, .sse4_2 },
|
||||
|
||||
// AVX
|
||||
.{ .vaddpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x58 }, 0, .vex_128_wig, .avx },
|
||||
.{ .vaddpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x58 }, 0, .vex_256_wig, .avx },
|
||||
@ -1295,6 +1317,16 @@ pub const table = [_]Entry{
|
||||
|
||||
.{ .vmaxss, .rvm, &.{ .xmm, .xmm, .xmm_m32 }, &.{ 0xf3, 0x0f, 0x5f }, 0, .vex_lig_wig, .avx },
|
||||
|
||||
.{ .vmovmskps, .rm, &.{ .r32, .xmm }, &.{ 0x0f, 0x50 }, 0, .vex_128_wig, .avx },
|
||||
.{ .vmovmskps, .rm, &.{ .r64, .xmm }, &.{ 0x0f, 0x50 }, 0, .vex_128_wig, .avx },
|
||||
.{ .vmovmskps, .rm, &.{ .r32, .ymm }, &.{ 0x0f, 0x50 }, 0, .vex_256_wig, .avx },
|
||||
.{ .vmovmskps, .rm, &.{ .r64, .ymm }, &.{ 0x0f, 0x50 }, 0, .vex_256_wig, .avx },
|
||||
|
||||
.{ .vmovmskpd, .rm, &.{ .r32, .xmm }, &.{ 0x66, 0x0f, 0x50 }, 0, .vex_128_wig, .avx },
|
||||
.{ .vmovmskpd, .rm, &.{ .r64, .xmm }, &.{ 0x66, 0x0f, 0x50 }, 0, .vex_128_wig, .avx },
|
||||
.{ .vmovmskpd, .rm, &.{ .r32, .ymm }, &.{ 0x66, 0x0f, 0x50 }, 0, .vex_256_wig, .avx },
|
||||
.{ .vmovmskpd, .rm, &.{ .r64, .ymm }, &.{ 0x66, 0x0f, 0x50 }, 0, .vex_256_wig, .avx },
|
||||
|
||||
.{ .vminpd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x5d }, 0, .vex_128_wig, .avx },
|
||||
.{ .vminpd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x5d }, 0, .vex_256_wig, .avx },
|
||||
|
||||
@ -1408,6 +1440,18 @@ pub const table = [_]Entry{
|
||||
|
||||
.{ .vpandn, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xdf }, 0, .vex_128_wig, .avx },
|
||||
|
||||
.{ .vpcmpeqb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x74 }, 0, .vex_128_wig, .avx },
|
||||
.{ .vpcmpeqw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x75 }, 0, .vex_128_wig, .avx },
|
||||
.{ .vpcmpeqd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x76 }, 0, .vex_128_wig, .avx },
|
||||
|
||||
.{ .vpcmpeqq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x29 }, 0, .vex_128_wig, .avx },
|
||||
|
||||
.{ .vpcmpgtb, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x64 }, 0, .vex_128_wig, .avx },
|
||||
.{ .vpcmpgtw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x65 }, 0, .vex_128_wig, .avx },
|
||||
.{ .vpcmpgtd, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x66 }, 0, .vex_128_wig, .avx },
|
||||
|
||||
.{ .vpcmpgtq, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x37 }, 0, .vex_128_wig, .avx },
|
||||
|
||||
.{ .vpextrb, .mri, &.{ .r32_m8, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x14 }, 0, .vex_128_w0, .avx },
|
||||
.{ .vpextrd, .mri, &.{ .rm32, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .vex_128_w0, .avx },
|
||||
.{ .vpextrq, .mri, &.{ .rm64, .xmm, .imm8 }, &.{ 0x66, 0x0f, 0x3a, 0x16 }, 0, .vex_128_w1, .avx },
|
||||
@ -1439,6 +1483,9 @@ pub const table = [_]Entry{
|
||||
|
||||
.{ .vpminud, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x3b }, 0, .vex_128_wig, .avx },
|
||||
|
||||
.{ .vpmovmskb, .rm, &.{ .r32, .xmm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .vex_128_wig, .avx },
|
||||
.{ .vpmovmskb, .rm, &.{ .r64, .xmm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .vex_128_wig, .avx },
|
||||
|
||||
.{ .vpmulhw, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0xe5 }, 0, .vex_128_wig, .avx },
|
||||
|
||||
.{ .vpmulld, .rvm, &.{ .xmm, .xmm, .xmm_m128 }, &.{ 0x66, 0x0f, 0x38, 0x40 }, 0, .vex_128_wig, .avx },
|
||||
@ -1581,29 +1628,44 @@ pub const table = [_]Entry{
|
||||
|
||||
.{ .vpandn, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xdf }, 0, .vex_256_wig, .avx2 },
|
||||
|
||||
.{ .vpmaxsb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3c }, 0, .vex_256_wig, .avx },
|
||||
.{ .vpmaxsw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xee }, 0, .vex_256_wig, .avx },
|
||||
.{ .vpmaxsd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3d }, 0, .vex_256_wig, .avx },
|
||||
.{ .vpcmpeqb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x74 }, 0, .vex_256_wig, .avx2 },
|
||||
.{ .vpcmpeqw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x75 }, 0, .vex_256_wig, .avx2 },
|
||||
.{ .vpcmpeqd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x76 }, 0, .vex_256_wig, .avx2 },
|
||||
|
||||
.{ .vpmaxub, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xde }, 0, .vex_256_wig, .avx },
|
||||
.{ .vpmaxuw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3e }, 0, .vex_256_wig, .avx },
|
||||
.{ .vpcmpeqq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x29 }, 0, .vex_256_wig, .avx2 },
|
||||
|
||||
.{ .vpmaxud, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3f }, 0, .vex_256_wig, .avx },
|
||||
.{ .vpcmpgtb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x64 }, 0, .vex_256_wig, .avx2 },
|
||||
.{ .vpcmpgtw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x65 }, 0, .vex_256_wig, .avx2 },
|
||||
.{ .vpcmpgtd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x66 }, 0, .vex_256_wig, .avx2 },
|
||||
|
||||
.{ .vpminsb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x38 }, 0, .vex_256_wig, .avx },
|
||||
.{ .vpminsw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xea }, 0, .vex_256_wig, .avx },
|
||||
.{ .vpminsd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x39 }, 0, .vex_256_wig, .avx },
|
||||
.{ .vpcmpgtq, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x37 }, 0, .vex_256_wig, .avx2 },
|
||||
|
||||
.{ .vpminub, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xda }, 0, .vex_256_wig, .avx },
|
||||
.{ .vpminuw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3a }, 0, .vex_256_wig, .avx },
|
||||
.{ .vpmaxsb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3c }, 0, .vex_256_wig, .avx2 },
|
||||
.{ .vpmaxsw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xee }, 0, .vex_256_wig, .avx2 },
|
||||
.{ .vpmaxsd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3d }, 0, .vex_256_wig, .avx2 },
|
||||
|
||||
.{ .vpminud, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3b }, 0, .vex_256_wig, .avx },
|
||||
.{ .vpmaxub, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xde }, 0, .vex_256_wig, .avx2 },
|
||||
.{ .vpmaxuw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3e }, 0, .vex_256_wig, .avx2 },
|
||||
|
||||
.{ .vpmulhw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xe5 }, 0, .vex_256_wig, .avx },
|
||||
.{ .vpmaxud, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3f }, 0, .vex_256_wig, .avx2 },
|
||||
|
||||
.{ .vpmulld, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x40 }, 0, .vex_256_wig, .avx },
|
||||
.{ .vpminsb, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x38 }, 0, .vex_256_wig, .avx2 },
|
||||
.{ .vpminsw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xea }, 0, .vex_256_wig, .avx2 },
|
||||
.{ .vpminsd, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x39 }, 0, .vex_256_wig, .avx2 },
|
||||
|
||||
.{ .vpmullw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xd5 }, 0, .vex_256_wig, .avx },
|
||||
.{ .vpminub, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xda }, 0, .vex_256_wig, .avx2 },
|
||||
.{ .vpminuw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3a }, 0, .vex_256_wig, .avx2 },
|
||||
|
||||
.{ .vpminud, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x3b }, 0, .vex_256_wig, .avx2 },
|
||||
|
||||
.{ .vpmovmskb, .rm, &.{ .r32, .ymm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .vex_256_wig, .avx2 },
|
||||
.{ .vpmovmskb, .rm, &.{ .r64, .ymm }, &.{ 0x66, 0x0f, 0xd7 }, 0, .vex_256_wig, .avx2 },
|
||||
|
||||
.{ .vpmulhw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xe5 }, 0, .vex_256_wig, .avx2 },
|
||||
|
||||
.{ .vpmulld, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0x38, 0x40 }, 0, .vex_256_wig, .avx2 },
|
||||
|
||||
.{ .vpmullw, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xd5 }, 0, .vex_256_wig, .avx2 },
|
||||
|
||||
.{ .vpor, .rvm, &.{ .ymm, .ymm, .ymm_m256 }, &.{ 0x66, 0x0f, 0xeb }, 0, .vex_256_wig, .avx2 },
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user