diff --git a/build.zig b/build.zig index 68db2061bd..ab1d985b74 100644 --- a/build.zig +++ b/build.zig @@ -44,7 +44,7 @@ pub fn build(b: *Builder) !void { try findAndReadConfigH(b); var test_stage2 = b.addTest("src-self-hosted/test.zig"); - test_stage2.setBuildMode(builtin.Mode.Debug); + test_stage2.setBuildMode(.Debug); // note this is only the mode of the test harness test_stage2.addPackagePath("stage2_tests", "test/stage2/test.zig"); const fmt_build_zig = b.addFmt(&[_][]const u8{"build.zig"}); @@ -68,7 +68,6 @@ pub fn build(b: *Builder) !void { var ctx = parseConfigH(b, config_h_text); ctx.llvm = try findLLVM(b, ctx.llvm_config_exe); - try configureStage2(b, test_stage2, ctx); try configureStage2(b, exe, ctx); b.default_step.dependOn(&exe.step); diff --git a/lib/std/child_process.zig b/lib/std/child_process.zig index 267c32a860..6cf10664c6 100644 --- a/lib/std/child_process.zig +++ b/lib/std/child_process.zig @@ -46,6 +46,10 @@ pub const ChildProcess = struct { /// Set to change the current working directory when spawning the child process. cwd: ?[]const u8, + /// Set to change the current working directory when spawning the child process. + /// This is not yet implemented for Windows. See https://github.com/ziglang/zig/issues/5190 + /// Once that is done, `cwd` will be deprecated in favor of this field. + cwd_dir: ?fs.Dir = null, err_pipe: if (builtin.os.tag == .windows) void else [2]os.fd_t, @@ -183,6 +187,7 @@ pub const ChildProcess = struct { allocator: *mem.Allocator, argv: []const []const u8, cwd: ?[]const u8 = null, + cwd_dir: ?fs.Dir = null, env_map: ?*const BufMap = null, max_output_bytes: usize = 50 * 1024, expand_arg0: Arg0Expand = .no_expand, @@ -194,6 +199,7 @@ pub const ChildProcess = struct { child.stdout_behavior = .Pipe; child.stderr_behavior = .Pipe; child.cwd = args.cwd; + child.cwd_dir = args.cwd_dir; child.env_map = args.env_map; child.expand_arg0 = args.expand_arg0; @@ -414,7 +420,19 @@ pub const ChildProcess = struct { os.close(stderr_pipe[1]); } - if (self.cwd) |cwd| { + if (self.cwd_dir) |cwd| { + // Remove the O_CLOEXEC flag. This is the only safe time to do it, between fork() and execve(). + var flags = os.fcntl(cwd.fd, os.F_GETFD, 0) catch |err| switch (err) { + error.Locked => unreachable, + else => |e| forkChildErrReport(err_pipe[1], e), + }; + flags &= ~@as(u32, os.O_CLOEXEC); + _ = os.fcntl(cwd.fd, os.F_SETFD, flags) catch |err| switch (err) { + error.Locked => unreachable, + else => |e| forkChildErrReport(err_pipe[1], e), + }; + os.fchdir(cwd.fd) catch |err| forkChildErrReport(err_pipe[1], err); + } else if (self.cwd) |cwd| { os.chdir(cwd) catch |err| forkChildErrReport(err_pipe[1], err); } diff --git a/lib/std/fmt.zig b/lib/std/fmt.zig index 107b50e123..7421357a61 100644 --- a/lib/std/fmt.zig +++ b/lib/std/fmt.zig @@ -1058,7 +1058,7 @@ pub fn charToDigit(c: u8, radix: u8) (error{InvalidCharacter}!u8) { return value; } -fn digitToChar(digit: u8, uppercase: bool) u8 { +pub fn digitToChar(digit: u8, uppercase: bool) u8 { return switch (digit) { 0...9 => digit + '0', 10...35 => digit + ((if (uppercase) @as(u8, 'A') else @as(u8, 'a')) - 10), diff --git a/lib/std/fs.zig b/lib/std/fs.zig index efcad99f40..f34ac5baa0 100644 --- a/lib/std/fs.zig +++ b/lib/std/fs.zig @@ -803,6 +803,15 @@ pub const Dir = struct { } } + /// This function performs `makePath`, followed by `openDir`. + /// If supported by the OS, this operation is atomic. It is not atomic on + /// all operating systems. + pub fn makeOpenPath(self: Dir, sub_path: []const u8, open_dir_options: OpenDirOptions) !Dir { + // TODO improve this implementation on Windows; we can avoid 1 call to NtClose + try self.makePath(sub_path); + return self.openDir(sub_path, open_dir_options); + } + /// Changes the current working directory to the open directory handle. /// This modifies global state and can have surprising effects in multi- /// threaded applications. Most applications and especially libraries should diff --git a/lib/std/math.zig b/lib/std/math.zig index 8c07f3c8be..5cf6d40d8a 100644 --- a/lib/std/math.zig +++ b/lib/std/math.zig @@ -986,6 +986,43 @@ pub const Order = enum { /// Greater than (`>`) gt, + + pub fn invert(self: Order) Order { + return switch (self) { + .lt => .gt, + .eq => .eq, + .gt => .gt, + }; + } + + pub fn compare(self: Order, op: CompareOperator) bool { + return switch (self) { + .lt => switch (op) { + .lt => true, + .lte => true, + .eq => false, + .gte => false, + .gt => false, + .neq => true, + }, + .eq => switch (op) { + .lt => false, + .lte => true, + .eq => true, + .gte => true, + .gt => false, + .neq => false, + }, + .gt => switch (op) { + .lt => false, + .lte => false, + .eq => false, + .gte => true, + .gt => true, + .neq => true, + }, + }; + } }; /// Given two numbers, this function returns the order they are with respect to each other. diff --git a/lib/std/math/big.zig b/lib/std/math/big.zig index 8105beb506..ab651c05c6 100644 --- a/lib/std/math/big.zig +++ b/lib/std/math/big.zig @@ -1,7 +1,24 @@ -pub usingnamespace @import("big/int.zig"); -pub usingnamespace @import("big/rational.zig"); +const std = @import("../std.zig"); +const assert = std.debug.assert; -test "math.big" { - _ = @import("big/int.zig"); - _ = @import("big/rational.zig"); +pub const Rational = @import("big/rational.zig").Rational; +pub const int = @import("big/int.zig"); +pub const Limb = usize; +pub const DoubleLimb = std.meta.IntType(false, 2 * Limb.bit_count); +pub const SignedDoubleLimb = std.meta.IntType(true, DoubleLimb.bit_count); +pub const Log2Limb = std.math.Log2Int(Limb); + +comptime { + assert(std.math.floorPowerOfTwo(usize, Limb.bit_count) == Limb.bit_count); + assert(Limb.bit_count <= 64); // u128 set is unsupported + assert(Limb.is_signed == false); +} + +test "" { + _ = int; + _ = Rational; + _ = Limb; + _ = DoubleLimb; + _ = SignedDoubleLimb; + _ = Log2Limb; } diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index f5d65a6866..8ee1474275 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -1,293 +1,196 @@ const std = @import("../../std.zig"); -const debug = std.debug; -const testing = std.testing; const math = std.math; +const Limb = std.math.big.Limb; +const DoubleLimb = std.math.big.DoubleLimb; +const SignedDoubleLimb = std.math.big.SignedDoubleLimb; +const Log2Limb = std.math.big.Log2Limb; +const Allocator = std.mem.Allocator; const mem = std.mem; -const Allocator = mem.Allocator; -const ArrayList = std.ArrayList; const maxInt = std.math.maxInt; const minInt = std.math.minInt; +const assert = std.debug.assert; -pub const Limb = usize; -pub const DoubleLimb = std.meta.Int(false, 2 * Limb.bit_count); -pub const SignedDoubleLimb = std.meta.Int(true, DoubleLimb.bit_count); -pub const Log2Limb = math.Log2Int(Limb); - -comptime { - debug.assert(math.floorPowerOfTwo(usize, Limb.bit_count) == Limb.bit_count); - debug.assert(Limb.bit_count <= 64); // u128 set is unsupported - debug.assert(Limb.is_signed == false); +/// Returns the number of limbs needed to store `scalar`, which must be a +/// primitive integer value. +pub fn calcLimbLen(scalar: var) usize { + const T = @TypeOf(scalar); + switch (@typeInfo(T)) { + .Int => |info| { + const UT = if (info.is_signed) std.meta.Int(false, info.bits - 1) else T; + return @sizeOf(UT) / @sizeOf(Limb); + }, + .ComptimeInt => { + const w_value = if (scalar < 0) -scalar else scalar; + return @divFloor(math.log2(w_value), Limb.bit_count) + 1; + }, + else => @compileError("parameter must be a primitive integer type"), + } } -/// An arbitrary-precision big integer. -/// -/// Memory is allocated by an Int as needed to ensure operations never overflow. The range of an -/// Int is bounded only by available memory. -pub const Int = struct { - const sign_bit: usize = 1 << (usize.bit_count - 1); +pub fn calcToStringLimbsBufferLen(a_len: usize, base: u8) usize { + if (math.isPowerOfTwo(base)) + return 0; + return a_len + 2 + a_len + calcDivLimbsBufferLen(a_len, 1); +} - /// Default number of limbs to allocate on creation of an Int. - pub const default_capacity = 4; +pub fn calcDivLimbsBufferLen(a_len: usize, b_len: usize) usize { + return calcMulLimbsBufferLen(a_len, b_len, 2) * 4; +} - /// Allocator used by the Int when requesting memory. - allocator: ?*Allocator, +pub fn calcMulLimbsBufferLen(a_len: usize, b_len: usize, aliases: usize) usize { + return aliases * math.max(a_len, b_len); +} +pub fn calcSetStringLimbsBufferLen(base: u8, string_len: usize) usize { + const limb_count = calcSetStringLimbCount(base, string_len); + return calcMulLimbsBufferLen(limb_count, limb_count, 2); +} + +pub fn calcSetStringLimbCount(base: u8, string_len: usize) usize { + return (string_len + (Limb.bit_count / base - 1)) / (Limb.bit_count / base); +} + +/// a + b * c + *carry, sets carry to the overflow bits +pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb { + @setRuntimeSafety(false); + var r1: Limb = undefined; + + // r1 = a + *carry + const c1: Limb = @boolToInt(@addWithOverflow(Limb, a, carry.*, &r1)); + + // r2 = b * c + const bc = @as(DoubleLimb, math.mulWide(Limb, b, c)); + const r2 = @truncate(Limb, bc); + const c2 = @truncate(Limb, bc >> Limb.bit_count); + + // r1 = r1 + r2 + const c3: Limb = @boolToInt(@addWithOverflow(Limb, r1, r2, &r1)); + + // This never overflows, c1, c3 are either 0 or 1 and if both are 1 then + // c2 is at least <= maxInt(Limb) - 2. + carry.* = c1 + c2 + c3; + + return r1; +} + +/// A arbitrary-precision big integer, with a fixed set of mutable limbs. +pub const Mutable = struct { /// Raw digits. These are: /// /// * Little-endian ordered /// * limbs.len >= 1 - /// * Zero is represent as Int.len() == 1 with limbs[0] == 0. + /// * Zero is represented as limbs.len == 1 with limbs[0] == 0. /// /// Accessing limbs directly should be avoided. + /// These are allocated limbs; the `len` field tells the valid range. limbs: []Limb, + len: usize, + positive: bool, - /// High bit is the sign bit. If set, Int is negative, else Int is positive. - /// The remaining bits represent the number of limbs used by Int. - metadata: usize, - - /// Creates a new Int. default_capacity limbs will be allocated immediately. - /// Int will be zeroed. - pub fn init(allocator: *Allocator) !Int { - return try Int.initCapacity(allocator, default_capacity); + pub fn toConst(self: Mutable) Const { + return .{ + .limbs = self.limbs[0..self.len], + .positive = self.positive, + }; } - /// Creates a new Int. Int will be set to `value`. - /// - /// This is identical to an `init`, followed by a `set`. - pub fn initSet(allocator: *Allocator, value: var) !Int { - var s = try Int.init(allocator); - try s.set(value); - return s; - } - - /// Creates a new Int with a specific capacity. If capacity < default_capacity then the - /// default capacity will be used instead. - pub fn initCapacity(allocator: *Allocator, capacity: usize) !Int { - return Int{ + /// Asserts that the allocator owns the limbs memory. If this is not the case, + /// use `toConst().toManaged()`. + pub fn toManaged(self: Mutable, allocator: *Allocator) Managed { + return .{ .allocator = allocator, - .metadata = 1, - .limbs = block: { - var limbs = try allocator.alloc(Limb, math.max(default_capacity, capacity)); - limbs[0] = 0; - break :block limbs; - }, + .limbs = limbs, + .metadata = if (self.positive) + self.len & ~Managed.sign_bit + else + self.len | Managed.sign_bit, }; } - /// Returns the number of limbs currently in use. - pub fn len(self: Int) usize { - return self.metadata & ~sign_bit; - } - - /// Returns whether an Int is positive. - pub fn isPositive(self: Int) bool { - return self.metadata & sign_bit == 0; - } - - /// Sets the sign of an Int. - pub fn setSign(self: *Int, positive: bool) void { - if (positive) { - self.metadata &= ~sign_bit; - } else { - self.metadata |= sign_bit; - } - } - - /// Sets the length of an Int. - /// - /// If setLen is used, then the Int must be normalized to suit. - pub fn setLen(self: *Int, new_len: usize) void { - self.metadata &= sign_bit; - self.metadata |= new_len; - } - - /// Returns an Int backed by a fixed set of limb values. - /// This is read-only and cannot be used as a result argument. If the Int tries to allocate - /// memory a runtime panic will occur. - pub fn initFixed(limbs: []const Limb) Int { - var self = Int{ - .allocator = null, - .metadata = limbs.len, - // Cast away the const, invalid use to pass as a pointer argument. - .limbs = @intToPtr([*]Limb, @ptrToInt(limbs.ptr))[0..limbs.len], + /// `value` is a primitive integer type. + /// Asserts the value fits within the provided `limbs_buffer`. + /// Note: `calcLimbLen` can be used to figure out how big an array to allocate for `limbs_buffer`. + pub fn init(limbs_buffer: []Limb, value: var) Mutable { + limbs_buffer[0] = 0; + var self: Mutable = .{ + .limbs = limbs_buffer, + .len = 1, + .positive = true, }; - - self.normalize(limbs.len); + self.set(value); return self; } - /// Ensures an Int has enough space allocated for capacity limbs. If the Int does not have - /// sufficient capacity, the exact amount will be allocated. This occurs even if the requested - /// capacity is only greater than the current capacity by one limb. - pub fn ensureCapacity(self: *Int, capacity: usize) !void { - self.assertWritable(); - if (capacity <= self.limbs.len) { - return; + /// Copies the value of a Const to an existing Mutable so that they both have the same value. + /// Asserts the value fits in the limbs buffer. + pub fn copy(self: *Mutable, other: Const) void { + if (self.limbs.ptr != other.limbs.ptr) { + mem.copy(Limb, self.limbs[0..], other.limbs[0..other.limbs.len]); } - - self.limbs = try self.allocator.?.realloc(self.limbs, capacity); + self.positive = other.positive; + self.len = other.limbs.len; } - fn assertWritable(self: Int) void { - if (self.allocator == null) { - @panic("provided Int value is read-only but must be writable"); + /// Efficiently swap an Mutable with another. This swaps the limb pointers and a full copy is not + /// performed. The address of the limbs field will not be the same after this function. + pub fn swap(self: *Mutable, other: *Mutable) void { + mem.swap(Mutable, self, other); + } + + pub fn dump(self: Mutable) void { + for (self.limbs[0..self.len]) |limb| { + std.debug.warn("{x} ", .{limb}); } + std.debug.warn("capacity={} positive={}\n", .{ self.limbs.len, self.positive }); } - /// Frees all memory associated with an Int. - pub fn deinit(self: Int) void { - self.assertWritable(); - self.allocator.?.free(self.limbs); - } - - /// Clones an Int and returns a new Int with the same value. The new Int is a deep copy and + /// Clones an Mutable and returns a new Mutable with the same value. The new Mutable is a deep copy and /// can be modified separately from the original. - pub fn clone(other: Int) !Int { - return other.clone2(other.allocator.?); - } - - pub fn clone2(other: Int, allocator: *Allocator) !Int { - return Int{ - .allocator = allocator, - .metadata = other.metadata, - .limbs = block: { - var limbs = try allocator.alloc(Limb, other.len()); - mem.copy(Limb, limbs[0..], other.limbs[0..other.len()]); - break :block limbs; - }, + /// Asserts that limbs is big enough to store the value. + pub fn clone(other: Mutable, limbs: []Limb) Mutable { + mem.copy(Limb, limbs, other.limbs[0..other.len]); + return .{ + .limbs = limbs, + .len = other.len, + .positive = other.positive, }; } - /// Copies the value of an Int to an existing Int so that they both have the same value. - /// Extra memory will be allocated if the receiver does not have enough capacity. - pub fn copy(self: *Int, other: Int) !void { - self.assertWritable(); - if (self.limbs.ptr == other.limbs.ptr) { - return; - } - - try self.ensureCapacity(other.len()); - mem.copy(Limb, self.limbs[0..], other.limbs[0..other.len()]); - self.metadata = other.metadata; + pub fn negate(self: *Mutable) void { + self.positive = !self.positive; } - /// Efficiently swap an Int with another. This swaps the limb pointers and a full copy is not - /// performed. The address of the limbs field will not be the same after this function. - pub fn swap(self: *Int, other: *Int) void { - self.assertWritable(); - mem.swap(Int, self, other); + /// Modify to become the absolute value + pub fn abs(self: *Mutable) void { + self.positive = true; } - pub fn dump(self: Int) void { - for (self.limbs) |limb| { - debug.warn("{x} ", .{limb}); - } - debug.warn("\n", .{}); - } - - /// Negate the sign of an Int. - pub fn negate(self: *Int) void { - self.metadata ^= sign_bit; - } - - /// Make an Int positive. - pub fn abs(self: *Int) void { - self.metadata &= ~sign_bit; - } - - /// Returns true if an Int is odd. - pub fn isOdd(self: Int) bool { - return self.limbs[0] & 1 != 0; - } - - /// Returns true if an Int is even. - pub fn isEven(self: Int) bool { - return !self.isOdd(); - } - - /// Returns the number of bits required to represent the absolute value an Int. - fn bitCountAbs(self: Int) usize { - return (self.len() - 1) * Limb.bit_count + (Limb.bit_count - @clz(Limb, self.limbs[self.len() - 1])); - } - - /// Returns the number of bits required to represent the integer in twos-complement form. - /// - /// If the integer is negative the value returned is the number of bits needed by a signed - /// integer to represent the value. If positive the value is the number of bits for an - /// unsigned integer. Any unsigned integer will fit in the signed integer with bitcount - /// one greater than the returned value. - /// - /// e.g. -127 returns 8 as it will fit in an i8. 127 returns 7 since it fits in a u7. - fn bitCountTwosComp(self: Int) usize { - var bits = self.bitCountAbs(); - - // If the entire value has only one bit set (e.g. 0b100000000) then the negation in twos - // complement requires one less bit. - if (!self.isPositive()) block: { - bits += 1; - - if (@popCount(Limb, self.limbs[self.len() - 1]) == 1) { - for (self.limbs[0 .. self.len() - 1]) |limb| { - if (@popCount(Limb, limb) != 0) { - break :block; - } - } - - bits -= 1; - } - } - - return bits; - } - - pub fn fitsInTwosComp(self: Int, is_signed: bool, bit_count: usize) bool { - if (self.eqZero()) { - return true; - } - if (!is_signed and !self.isPositive()) { - return false; - } - - const req_bits = self.bitCountTwosComp() + @boolToInt(self.isPositive() and is_signed); - return bit_count >= req_bits; - } - - /// Returns whether self can fit into an integer of the requested type. - pub fn fits(self: Int, comptime T: type) bool { - return self.fitsInTwosComp(T.is_signed, T.bit_count); - } - - /// Returns the approximate size of the integer in the given base. Negative values accommodate for - /// the minus sign. This is used for determining the number of characters needed to print the - /// value. It is inexact and may exceed the given value by ~1-2 bytes. - pub fn sizeInBase(self: Int, base: usize) usize { - const bit_count = @as(usize, @boolToInt(!self.isPositive())) + self.bitCountAbs(); - return (bit_count / math.log2(base)) + 1; - } - - /// Sets an Int to value. Value must be an primitive integer type. - pub fn set(self: *Int, value: var) Allocator.Error!void { - self.assertWritable(); + /// Sets the Mutable to value. Value must be an primitive integer type. + /// Asserts the value fits within the limbs buffer. + /// Note: `calcLimbLen` can be used to figure out how big the limbs buffer + /// needs to be to store a specific value. + pub fn set(self: *Mutable, value: var) void { const T = @TypeOf(value); switch (@typeInfo(T)) { .Int => |info| { const UT = if (T.is_signed) std.meta.Int(false, T.bit_count - 1) else T; - try self.ensureCapacity(@sizeOf(UT) / @sizeOf(Limb)); - self.metadata = 0; - self.setSign(value >= 0); + const needed_limbs = @sizeOf(UT) / @sizeOf(Limb); + assert(needed_limbs <= self.limbs.len); // value too big + self.len = 0; + self.positive = value >= 0; var w_value: UT = if (value < 0) @intCast(UT, -value) else @intCast(UT, value); if (info.bits <= Limb.bit_count) { self.limbs[0] = @as(Limb, w_value); - self.metadata += 1; + self.len += 1; } else { var i: usize = 0; while (w_value != 0) : (i += 1) { self.limbs[i] = @truncate(Limb, w_value); - self.metadata += 1; + self.len += 1; // TODO: shift == 64 at compile-time fails. Fails on u128 limbs. w_value >>= Limb.bit_count / 2; @@ -299,10 +202,10 @@ pub const Int = struct { comptime var w_value = if (value < 0) -value else value; const req_limbs = @divFloor(math.log2(w_value), Limb.bit_count) + 1; - try self.ensureCapacity(req_limbs); + assert(req_limbs <= self.limbs.len); // value too big - self.metadata = req_limbs; - self.setSign(value >= 0); + self.len = req_limbs; + self.positive = value >= 0; if (w_value <= maxInt(Limb)) { self.limbs[0] = w_value; @@ -318,98 +221,35 @@ pub const Int = struct { } } }, - else => { - @compileError("cannot set Int using type " ++ @typeName(T)); - }, + else => @compileError("cannot set Mutable using type " ++ @typeName(T)), } } - pub const ConvertError = error{ - NegativeIntoUnsigned, - TargetTooSmall, - }; - - /// Convert self to type T. - /// - /// Returns an error if self cannot be narrowed into the requested type without truncation. - pub fn to(self: Int, comptime T: type) ConvertError!T { - switch (@typeInfo(T)) { - .Int => { - const UT = std.meta.Int(false, T.bit_count); - - if (self.bitCountTwosComp() > T.bit_count) { - return error.TargetTooSmall; - } - - var r: UT = 0; - - if (@sizeOf(UT) <= @sizeOf(Limb)) { - r = @intCast(UT, self.limbs[0]); - } else { - for (self.limbs[0..self.len()]) |_, ri| { - const limb = self.limbs[self.len() - ri - 1]; - r <<= Limb.bit_count; - r |= limb; - } - } - - if (!T.is_signed) { - return if (self.isPositive()) @intCast(T, r) else error.NegativeIntoUnsigned; - } else { - if (self.isPositive()) { - return @intCast(T, r); - } else { - if (math.cast(T, r)) |ok| { - return -ok; - } else |_| { - return minInt(T); - } - } - } - }, - else => { - @compileError("cannot convert Int to type " ++ @typeName(T)); - }, - } - } - - fn charToDigit(ch: u8, base: u8) !u8 { - const d = switch (ch) { - '0'...'9' => ch - '0', - 'a'...'f' => (ch - 'a') + 0xa, - 'A'...'F' => (ch - 'A') + 0xa, - else => return error.InvalidCharForDigit, - }; - - return if (d < base) d else return error.DigitTooLargeForBase; - } - - fn digitToChar(d: u8, base: u8, uppercase: bool) !u8 { - if (d >= base) { - return error.DigitTooLargeForBase; - } - - const a: u8 = if (uppercase) 'A' else 'a'; - return switch (d) { - 0...9 => '0' + d, - 0xa...0xf => (a - 0xa) + d, - else => unreachable, - }; - } - /// Set self from the string representation `value`. /// /// `value` must contain only digits <= `base` and is case insensitive. Base prefixes are /// not allowed (e.g. 0x43 should simply be 43). Underscores in the input string are /// ignored and can be used as digit separators. /// - /// Returns an error if memory could not be allocated or `value` has invalid digits for the - /// requested base. - pub fn setString(self: *Int, base: u8, value: []const u8) !void { - self.assertWritable(); - if (base < 2 or base > 16) { - return error.InvalidBase; - } + /// Asserts there is enough memory for the value in `self.limbs`. An upper bound on number of limbs can + /// be determined with `calcSetStringLimbCount`. + /// Asserts the base is in the range [2, 16]. + /// + /// Returns an error if the value has invalid digits for the requested base. + /// + /// `limbs_buffer` is used for temporary storage. The size required can be found with + /// `calcSetStringLimbsBufferLen`. + /// + /// If `allocator` is provided, it will be used for temporary storage to improve + /// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm. + pub fn setString( + self: *Mutable, + base: u8, + value: []const u8, + limbs_buffer: []Limb, + allocator: ?*Allocator, + ) error{InvalidCharacter}!void { + assert(base >= 2 and base <= 16); var i: usize = 0; var positive = true; @@ -418,1009 +258,370 @@ pub const Int = struct { i += 1; } - const ap_base = Int.initFixed(([_]Limb{base})[0..]); - try self.set(0); + const ap_base: Const = .{ .limbs = &[_]Limb{base}, .positive = true }; + self.set(0); for (value[i..]) |ch| { if (ch == '_') { continue; } - const d = try charToDigit(ch, base); + const d = try std.fmt.charToDigit(ch, base); + const ap_d: Const = .{ .limbs = &[_]Limb{d}, .positive = true }; - const ap_d = Int.initFixed(([_]Limb{d})[0..]); - - try self.mul(self.*, ap_base); - try self.add(self.*, ap_d); + self.mul(self.toConst(), ap_base, limbs_buffer, allocator); + self.add(self.toConst(), ap_d); } - self.setSign(positive); + self.positive = positive; } - /// Converts self to a string in the requested base. Memory is allocated from the provided - /// allocator and not the one present in self. - /// TODO make this call format instead of the other way around - pub fn toString(self: Int, allocator: *Allocator, base: u8, uppercase: bool) ![]const u8 { - if (base < 2 or base > 16) { - return error.InvalidBase; - } - - var digits = ArrayList(u8).init(allocator); - try digits.ensureCapacity(self.sizeInBase(base) + 1); - defer digits.deinit(); - - if (self.eqZero()) { - try digits.append('0'); - return digits.toOwnedSlice(); - } - - // Power of two: can do a single pass and use masks to extract digits. - if (math.isPowerOfTwo(base)) { - const base_shift = math.log2_int(Limb, base); - - for (self.limbs[0..self.len()]) |limb| { - var shift: usize = 0; - while (shift < Limb.bit_count) : (shift += base_shift) { - const r = @intCast(u8, (limb >> @intCast(Log2Limb, shift)) & @as(Limb, base - 1)); - const ch = try digitToChar(r, base, uppercase); - try digits.append(ch); - } - } - - while (true) { - // always will have a non-zero digit somewhere - const c = digits.pop(); - if (c != '0') { - digits.append(c) catch unreachable; - break; - } - } - } else { - // Non power-of-two: batch divisions per word size. - const digits_per_limb = math.log(Limb, base, maxInt(Limb)); - var limb_base: Limb = 1; - var j: usize = 0; - while (j < digits_per_limb) : (j += 1) { - limb_base *= base; - } - - var q = try self.clone2(allocator); - defer q.deinit(); - q.abs(); - var r = try Int.init(allocator); - defer r.deinit(); - var b = try Int.initSet(allocator, limb_base); - defer b.deinit(); - - while (q.len() >= 2) { - try Int.divTrunc(&q, &r, q, b); - - var r_word = r.limbs[0]; - var i: usize = 0; - while (i < digits_per_limb) : (i += 1) { - const ch = try digitToChar(@intCast(u8, r_word % base), base, uppercase); - r_word /= base; - try digits.append(ch); - } - } - - { - debug.assert(q.len() == 1); - - var r_word = q.limbs[0]; - while (r_word != 0) { - const ch = try digitToChar(@intCast(u8, r_word % base), base, uppercase); - r_word /= base; - try digits.append(ch); - } - } - } - - if (!self.isPositive()) { - try digits.append('-'); - } - - var s = digits.toOwnedSlice(); - mem.reverse(u8, s); - return s; - } - - /// To allow `std.fmt.printf` to work with Int. - /// TODO make this non-allocating - /// TODO support read-only fixed integers - pub fn format( - self: Int, - comptime fmt: []const u8, - options: std.fmt.FormatOptions, - out_stream: var, - ) !void { - comptime var radix = 10; - comptime var uppercase = false; - - if (fmt.len == 0 or comptime std.mem.eql(u8, fmt, "d")) { - radix = 10; - uppercase = false; - } else if (comptime std.mem.eql(u8, fmt, "b")) { - radix = 2; - uppercase = false; - } else if (comptime std.mem.eql(u8, fmt, "x")) { - radix = 16; - uppercase = false; - } else if (comptime std.mem.eql(u8, fmt, "X")) { - radix = 16; - uppercase = true; - } else { - @compileError("Unknown format string: '" ++ fmt ++ "'"); - } - - var buf: [4096]u8 = undefined; - var fba = std.heap.FixedBufferAllocator.init(&buf); - const str = self.toString(&fba.allocator, radix, uppercase) catch @panic("TODO make this non allocating"); - return out_stream.writeAll(str); - } - - /// Returns math.Order.lt, math.Order.eq, math.Order.gt if |a| < |b|, |a| == - /// |b| or |a| > |b| respectively. - pub fn cmpAbs(a: Int, b: Int) math.Order { - if (a.len() < b.len()) { - return .lt; - } - if (a.len() > b.len()) { - return .gt; - } - - var i: usize = a.len() - 1; - while (i != 0) : (i -= 1) { - if (a.limbs[i] != b.limbs[i]) { - break; - } - } - - if (a.limbs[i] < b.limbs[i]) { - return .lt; - } else if (a.limbs[i] > b.limbs[i]) { - return .gt; - } else { - return .eq; - } - } - - /// Returns math.Order.lt, math.Order.eq, math.Order.gt if a < b, a == b or a - /// > b respectively. - pub fn cmp(a: Int, b: Int) math.Order { - if (a.isPositive() != b.isPositive()) { - return if (a.isPositive()) .gt else .lt; - } else { - const r = cmpAbs(a, b); - return if (a.isPositive()) r else switch (r) { - .lt => math.Order.gt, - .eq => math.Order.eq, - .gt => math.Order.lt, - }; - } - } - - /// Returns true if a == 0. - pub fn eqZero(a: Int) bool { - return a.len() == 1 and a.limbs[0] == 0; - } - - /// Returns true if |a| == |b|. - pub fn eqAbs(a: Int, b: Int) bool { - return cmpAbs(a, b) == .eq; - } - - /// Returns true if a == b. - pub fn eq(a: Int, b: Int) bool { - return cmp(a, b) == .eq; - } - - // Normalize a possible sequence of leading zeros. - // - // [1, 2, 3, 4, 0] -> [1, 2, 3, 4] - // [1, 2, 0, 0, 0] -> [1, 2] - // [0, 0, 0, 0, 0] -> [0] - fn normalize(r: *Int, length: usize) void { - debug.assert(length > 0); - debug.assert(length <= r.limbs.len); - - var j = length; - while (j > 0) : (j -= 1) { - if (r.limbs[j - 1] != 0) { - break; - } - } - - // Handle zero - r.setLen(if (j != 0) j else 1); - } - - // Cannot be used as a result argument to any function. - fn readOnlyPositive(a: Int) Int { - return Int{ - .allocator = null, - .metadata = a.len(), - .limbs = a.limbs, - }; + /// r = a + scalar + /// + /// r and a may be aliases. + /// scalar is a primitive integer type. + /// + /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by + /// r is `math.max(a.limbs.len, calcLimbLen(scalar)) + 1`. + pub fn addScalar(r: *Mutable, a: Const, scalar: var) void { + var limbs: [calcLimbLen(scalar)]Limb = undefined; + const operand = init(&limbs, scalar).toConst(); + return add(r, a, operand); } /// r = a + b /// /// r, a and b may be aliases. /// - /// Returns an error if memory could not be allocated. - pub fn add(r: *Int, a: Int, b: Int) Allocator.Error!void { - r.assertWritable(); + /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by + /// r is `math.max(a.limbs.len, b.limbs.len) + 1`. + pub fn add(r: *Mutable, a: Const, b: Const) void { if (a.eqZero()) { - try r.copy(b); + r.copy(b); return; } else if (b.eqZero()) { - try r.copy(a); + r.copy(a); return; } - if (a.isPositive() != b.isPositive()) { - if (a.isPositive()) { + if (a.limbs.len == 1 and b.limbs.len == 1 and a.positive == b.positive) { + if (!@addWithOverflow(Limb, a.limbs[0], b.limbs[0], &r.limbs[0])) { + r.len = 1; + r.positive = a.positive; + return; + } + } + + if (a.positive != b.positive) { + if (a.positive) { // (a) + (-b) => a - b - try r.sub(a, readOnlyPositive(b)); + r.sub(a, b.abs()); } else { // (-a) + (b) => b - a - try r.sub(b, readOnlyPositive(a)); + r.sub(b, a.abs()); } } else { - if (a.len() >= b.len()) { - try r.ensureCapacity(a.len() + 1); - lladd(r.limbs[0..], a.limbs[0..a.len()], b.limbs[0..b.len()]); - r.normalize(a.len() + 1); + if (a.limbs.len >= b.limbs.len) { + lladd(r.limbs[0..], a.limbs[0..a.limbs.len], b.limbs[0..b.limbs.len]); + r.normalize(a.limbs.len + 1); } else { - try r.ensureCapacity(b.len() + 1); - lladd(r.limbs[0..], b.limbs[0..b.len()], a.limbs[0..a.len()]); - r.normalize(b.len() + 1); + lladd(r.limbs[0..], b.limbs[0..b.limbs.len], a.limbs[0..a.limbs.len]); + r.normalize(b.limbs.len + 1); } - r.setSign(a.isPositive()); + r.positive = a.positive; } } - // Knuth 4.3.1, Algorithm A. - fn lladd(r: []Limb, a: []const Limb, b: []const Limb) void { - @setRuntimeSafety(false); - debug.assert(a.len != 0 and b.len != 0); - debug.assert(a.len >= b.len); - debug.assert(r.len >= a.len + 1); - - var i: usize = 0; - var carry: Limb = 0; - - while (i < b.len) : (i += 1) { - var c: Limb = 0; - c += @boolToInt(@addWithOverflow(Limb, a[i], b[i], &r[i])); - c += @boolToInt(@addWithOverflow(Limb, r[i], carry, &r[i])); - carry = c; - } - - while (i < a.len) : (i += 1) { - carry = @boolToInt(@addWithOverflow(Limb, a[i], carry, &r[i])); - } - - r[i] = carry; - } - /// r = a - b /// /// r, a and b may be aliases. /// - /// Returns an error if memory could not be allocated. - pub fn sub(r: *Int, a: Int, b: Int) !void { - r.assertWritable(); - if (a.isPositive() != b.isPositive()) { - if (a.isPositive()) { + /// Asserts the result fits in `r`. An upper bound on the number of limbs needed by + /// r is `math.max(a.limbs.len, b.limbs.len) + 1`. The +1 is not needed if both operands are positive. + pub fn sub(r: *Mutable, a: Const, b: Const) void { + if (a.positive != b.positive) { + if (a.positive) { // (a) - (-b) => a + b - try r.add(a, readOnlyPositive(b)); + r.add(a, b.abs()); } else { // (-a) - (b) => -(a + b) - try r.add(readOnlyPositive(a), b); - r.setSign(false); + r.add(a.abs(), b); + r.positive = false; } } else { - if (a.isPositive()) { + if (a.positive) { // (a) - (b) => a - b - if (a.cmp(b) != .lt) { - try r.ensureCapacity(a.len() + 1); - llsub(r.limbs[0..], a.limbs[0..a.len()], b.limbs[0..b.len()]); - r.normalize(a.len()); - r.setSign(true); + if (a.order(b) != .lt) { + llsub(r.limbs[0..], a.limbs[0..a.limbs.len], b.limbs[0..b.limbs.len]); + r.normalize(a.limbs.len); + r.positive = true; } else { - try r.ensureCapacity(b.len() + 1); - llsub(r.limbs[0..], b.limbs[0..b.len()], a.limbs[0..a.len()]); - r.normalize(b.len()); - r.setSign(false); + llsub(r.limbs[0..], b.limbs[0..b.limbs.len], a.limbs[0..a.limbs.len]); + r.normalize(b.limbs.len); + r.positive = false; } } else { // (-a) - (-b) => -(a - b) - if (a.cmp(b) == .lt) { - try r.ensureCapacity(a.len() + 1); - llsub(r.limbs[0..], a.limbs[0..a.len()], b.limbs[0..b.len()]); - r.normalize(a.len()); - r.setSign(false); + if (a.order(b) == .lt) { + llsub(r.limbs[0..], a.limbs[0..a.limbs.len], b.limbs[0..b.limbs.len]); + r.normalize(a.limbs.len); + r.positive = false; } else { - try r.ensureCapacity(b.len() + 1); - llsub(r.limbs[0..], b.limbs[0..b.len()], a.limbs[0..a.len()]); - r.normalize(b.len()); - r.setSign(true); + llsub(r.limbs[0..], b.limbs[0..b.limbs.len], a.limbs[0..a.limbs.len]); + r.normalize(b.limbs.len); + r.positive = true; } } } } - // Knuth 4.3.1, Algorithm S. - fn llsub(r: []Limb, a: []const Limb, b: []const Limb) void { - @setRuntimeSafety(false); - debug.assert(a.len != 0 and b.len != 0); - debug.assert(a.len > b.len or (a.len == b.len and a[a.len - 1] >= b[b.len - 1])); - debug.assert(r.len >= a.len); - - var i: usize = 0; - var borrow: Limb = 0; - - while (i < b.len) : (i += 1) { - var c: Limb = 0; - c += @boolToInt(@subWithOverflow(Limb, a[i], b[i], &r[i])); - c += @boolToInt(@subWithOverflow(Limb, r[i], borrow, &r[i])); - borrow = c; - } - - while (i < a.len) : (i += 1) { - borrow = @boolToInt(@subWithOverflow(Limb, a[i], borrow, &r[i])); - } - - debug.assert(borrow == 0); - } - /// rma = a * b /// - /// rma, a and b may be aliases. However, it is more efficient if rma does not alias a or b. + /// `rma` may alias with `a` or `b`. + /// `a` and `b` may alias with each other. /// - /// Returns an error if memory could not be allocated. - pub fn mul(rma: *Int, a: Int, b: Int) !void { - rma.assertWritable(); + /// Asserts the result fits in `rma`. An upper bound on the number of limbs needed by + /// rma is given by `a.limbs.len + b.limbs.len + 1`. + /// + /// `limbs_buffer` is used for temporary storage. The amount required is given by `calcMulLimbsBufferLen`. + pub fn mul(rma: *Mutable, a: Const, b: Const, limbs_buffer: []Limb, allocator: ?*Allocator) void { + var buf_index: usize = 0; - var r = rma; - var aliased = rma.limbs.ptr == a.limbs.ptr or rma.limbs.ptr == b.limbs.ptr; + const a_copy = if (rma.limbs.ptr == a.limbs.ptr) blk: { + const start = buf_index; + mem.copy(Limb, limbs_buffer[buf_index..], a.limbs); + buf_index += a.limbs.len; + break :blk a.toMutable(limbs_buffer[start..buf_index]).toConst(); + } else a; - var sr: Int = undefined; - if (aliased) { - sr = try Int.initCapacity(rma.allocator.?, a.len() + b.len()); - r = &sr; - aliased = true; - } - defer if (aliased) { - rma.swap(r); - r.deinit(); - }; + const b_copy = if (rma.limbs.ptr == b.limbs.ptr) blk: { + const start = buf_index; + mem.copy(Limb, limbs_buffer[buf_index..], b.limbs); + buf_index += b.limbs.len; + break :blk b.toMutable(limbs_buffer[start..buf_index]).toConst(); + } else b; - try r.ensureCapacity(a.len() + b.len() + 1); - - mem.set(Limb, r.limbs[0 .. a.len() + b.len() + 1], 0); - - try llmulacc(rma.allocator.?, r.limbs, a.limbs[0..a.len()], b.limbs[0..b.len()]); - - r.normalize(a.len() + b.len()); - r.setSign(a.isPositive() == b.isPositive()); + return rma.mulNoAlias(a_copy, b_copy, allocator); } - // a + b * c + *carry, sets carry to the overflow bits - pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb { - @setRuntimeSafety(false); - var r1: Limb = undefined; + /// rma = a * b + /// + /// `rma` may not alias with `a` or `b`. + /// `a` and `b` may alias with each other. + /// + /// Asserts the result fits in `rma`. An upper bound on the number of limbs needed by + /// rma is given by `a.limbs.len + b.limbs.len + 1`. + /// + /// If `allocator` is provided, it will be used for temporary storage to improve + /// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm. + pub fn mulNoAlias(rma: *Mutable, a: Const, b: Const, allocator: ?*Allocator) void { + assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing + assert(rma.limbs.ptr != b.limbs.ptr); // illegal aliasing - // r1 = a + *carry - const c1: Limb = @boolToInt(@addWithOverflow(Limb, a, carry.*, &r1)); - - // r2 = b * c - const bc = @as(DoubleLimb, math.mulWide(Limb, b, c)); - const r2 = @truncate(Limb, bc); - const c2 = @truncate(Limb, bc >> Limb.bit_count); - - // r1 = r1 + r2 - const c3: Limb = @boolToInt(@addWithOverflow(Limb, r1, r2, &r1)); - - // This never overflows, c1, c3 are either 0 or 1 and if both are 1 then - // c2 is at least <= maxInt(Limb) - 2. - carry.* = c1 + c2 + c3; - - return r1; - } - - fn llmulDigit(acc: []Limb, y: []const Limb, xi: Limb) void { - @setRuntimeSafety(false); - if (xi == 0) { - return; - } - - var carry: usize = 0; - var a_lo = acc[0..y.len]; - var a_hi = acc[y.len..]; - - var j: usize = 0; - while (j < a_lo.len) : (j += 1) { - a_lo[j] = @call(.{ .modifier = .always_inline }, addMulLimbWithCarry, .{ a_lo[j], y[j], xi, &carry }); - } - - j = 0; - while ((carry != 0) and (j < a_hi.len)) : (j += 1) { - carry = @boolToInt(@addWithOverflow(Limb, a_hi[j], carry, &a_hi[j])); - } - } - - // Knuth 4.3.1, Algorithm M. - // - // r MUST NOT alias any of a or b. - fn llmulacc(allocator: *Allocator, r: []Limb, a: []const Limb, b: []const Limb) error{OutOfMemory}!void { - @setRuntimeSafety(false); - - const a_norm = a[0..llnormalize(a)]; - const b_norm = b[0..llnormalize(b)]; - var x = a_norm; - var y = b_norm; - if (a_norm.len > b_norm.len) { - x = b_norm; - y = a_norm; - } - - debug.assert(r.len >= x.len + y.len + 1); - - // 48 is a pretty abitrary size chosen based on performance of a factorial program. - if (x.len <= 48) { - // Basecase multiplication - var i: usize = 0; - while (i < x.len) : (i += 1) { - llmulDigit(r[i..], y, x[i]); - } - } else { - // Karatsuba multiplication - const split = @divFloor(x.len, 2); - var x0 = x[0..split]; - var x1 = x[split..x.len]; - var y0 = y[0..split]; - var y1 = y[split..y.len]; - - var tmp = try allocator.alloc(Limb, x1.len + y1.len + 1); - defer allocator.free(tmp); - mem.set(Limb, tmp, 0); - - try llmulacc(allocator, tmp, x1, y1); - - var length = llnormalize(tmp); - _ = llaccum(r[split..], tmp[0..length]); - _ = llaccum(r[split * 2 ..], tmp[0..length]); - - mem.set(Limb, tmp[0..length], 0); - - try llmulacc(allocator, tmp, x0, y0); - - length = llnormalize(tmp); - _ = llaccum(r[0..], tmp[0..length]); - _ = llaccum(r[split..], tmp[0..length]); - - const x_cmp = llcmp(x1, x0); - const y_cmp = llcmp(y1, y0); - if (x_cmp * y_cmp == 0) { + if (a.limbs.len == 1 and b.limbs.len == 1) { + if (!@mulWithOverflow(Limb, a.limbs[0], b.limbs[0], &rma.limbs[0])) { + rma.len = 1; + rma.positive = (a.positive == b.positive); return; } - const x0_len = llnormalize(x0); - const x1_len = llnormalize(x1); - var j0 = try allocator.alloc(Limb, math.max(x0_len, x1_len)); - defer allocator.free(j0); - if (x_cmp == 1) { - llsub(j0, x1[0..x1_len], x0[0..x0_len]); - } else { - llsub(j0, x0[0..x0_len], x1[0..x1_len]); - } - - const y0_len = llnormalize(y0); - const y1_len = llnormalize(y1); - var j1 = try allocator.alloc(Limb, math.max(y0_len, y1_len)); - defer allocator.free(j1); - if (y_cmp == 1) { - llsub(j1, y1[0..y1_len], y0[0..y0_len]); - } else { - llsub(j1, y0[0..y0_len], y1[0..y1_len]); - } - const j0_len = llnormalize(j0); - const j1_len = llnormalize(j1); - if (x_cmp == y_cmp) { - mem.set(Limb, tmp[0..length], 0); - try llmulacc(allocator, tmp, j0, j1); - - length = Int.llnormalize(tmp); - llsub(r[split..], r[split..], tmp[0..length]); - } else { - try llmulacc(allocator, r[split..], j0, j1); - } - } - } - - // r = r + a - fn llaccum(r: []Limb, a: []const Limb) Limb { - @setRuntimeSafety(false); - debug.assert(r.len != 0 and a.len != 0); - debug.assert(r.len >= a.len); - - var i: usize = 0; - var carry: Limb = 0; - - while (i < a.len) : (i += 1) { - var c: Limb = 0; - c += @boolToInt(@addWithOverflow(Limb, r[i], a[i], &r[i])); - c += @boolToInt(@addWithOverflow(Limb, r[i], carry, &r[i])); - carry = c; } - while ((carry != 0) and i < r.len) : (i += 1) { - carry = @boolToInt(@addWithOverflow(Limb, r[i], carry, &r[i])); - } + mem.set(Limb, rma.limbs[0 .. a.limbs.len + b.limbs.len + 1], 0); - return carry; - } + llmulacc(allocator, rma.limbs, a.limbs, b.limbs); - /// Returns -1, 0, 1 if |a| < |b|, |a| == |b| or |a| > |b| respectively for limbs. - pub fn llcmp(a: []const Limb, b: []const Limb) i8 { - @setRuntimeSafety(false); - const a_len = llnormalize(a); - const b_len = llnormalize(b); - if (a_len < b_len) { - return -1; - } - if (a_len > b_len) { - return 1; - } - - var i: usize = a_len - 1; - while (i != 0) : (i -= 1) { - if (a[i] != b[i]) { - break; - } - } - - if (a[i] < b[i]) { - return -1; - } else if (a[i] > b[i]) { - return 1; - } else { - return 0; - } - } - - // returns the min length the limb could be. - fn llnormalize(a: []const Limb) usize { - @setRuntimeSafety(false); - var j = a.len; - while (j > 0) : (j -= 1) { - if (a[j - 1] != 0) { - break; - } - } - - // Handle zero - return if (j != 0) j else 1; + rma.normalize(a.limbs.len + b.limbs.len); + rma.positive = (a.positive == b.positive); } /// q = a / b (rem r) /// /// a / b are floored (rounded towards 0). - pub fn divFloor(q: *Int, r: *Int, a: Int, b: Int) !void { - try div(q, r, a, b); + /// q may alias with a or b. + /// + /// Asserts there is enough memory to store q and r. + /// The upper bound for r limb count is a.limbs.len. + /// The upper bound for q limb count is given by `a.limbs.len + b.limbs.len + 1`. + /// + /// If `allocator` is provided, it will be used for temporary storage to improve + /// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm. + /// + /// `limbs_buffer` is used for temporary storage. The amount required is given by `calcDivLimbsBufferLen`. + pub fn divFloor( + q: *Mutable, + r: *Mutable, + a: Const, + b: Const, + limbs_buffer: []Limb, + allocator: ?*Allocator, + ) void { + div(q, r, a, b, limbs_buffer, allocator); // Trunc -> Floor. - if (!q.isPositive()) { - const one = Int.initFixed(([_]Limb{1})[0..]); - try q.sub(q.*, one); - try r.add(q.*, one); + if (!q.positive) { + const one: Const = .{ .limbs = &[_]Limb{1}, .positive = true }; + q.sub(q.toConst(), one); + r.add(q.toConst(), one); } - r.setSign(b.isPositive()); + r.positive = b.positive; } /// q = a / b (rem r) /// /// a / b are truncated (rounded towards -inf). - pub fn divTrunc(q: *Int, r: *Int, a: Int, b: Int) !void { - try div(q, r, a, b); - r.setSign(a.isPositive()); - } - - // Truncates by default. - fn div(quo: *Int, rem: *Int, a: Int, b: Int) !void { - quo.assertWritable(); - rem.assertWritable(); - - if (b.eqZero()) { - @panic("division by zero"); - } - if (quo == rem) { - @panic("quo and rem cannot be same variable"); - } - - if (a.cmpAbs(b) == .lt) { - // quo may alias a so handle rem first - try rem.copy(a); - rem.setSign(a.isPositive() == b.isPositive()); - - quo.metadata = 1; - quo.limbs[0] = 0; - return; - } - - // Handle trailing zero-words of divisor/dividend. These are not handled in the following - // algorithms. - const a_zero_limb_count = blk: { - var i: usize = 0; - while (i < a.len()) : (i += 1) { - if (a.limbs[i] != 0) break; - } - break :blk i; - }; - const b_zero_limb_count = blk: { - var i: usize = 0; - while (i < b.len()) : (i += 1) { - if (b.limbs[i] != 0) break; - } - break :blk i; - }; - - const ab_zero_limb_count = std.math.min(a_zero_limb_count, b_zero_limb_count); - - if (b.len() - ab_zero_limb_count == 1) { - try quo.ensureCapacity(a.len()); - - lldiv1(quo.limbs[0..], &rem.limbs[0], a.limbs[ab_zero_limb_count..a.len()], b.limbs[b.len() - 1]); - quo.normalize(a.len() - ab_zero_limb_count); - quo.setSign(a.isPositive() == b.isPositive()); - - rem.metadata = 1; - } else { - // x and y are modified during division - var x = try Int.initCapacity(quo.allocator.?, a.len()); - defer x.deinit(); - try x.copy(a); - - var y = try Int.initCapacity(quo.allocator.?, b.len()); - defer y.deinit(); - try y.copy(b); - - // x may grow one limb during normalization - try quo.ensureCapacity(a.len() + y.len()); - - // Shrink x, y such that the trailing zero limbs shared between are removed. - if (ab_zero_limb_count != 0) { - std.mem.copy(Limb, x.limbs[0..], x.limbs[ab_zero_limb_count..]); - std.mem.copy(Limb, y.limbs[0..], y.limbs[ab_zero_limb_count..]); - x.metadata -= ab_zero_limb_count; - y.metadata -= ab_zero_limb_count; - } - - try divN(quo.allocator.?, quo, rem, &x, &y); - quo.setSign(a.isPositive() == b.isPositive()); - } - - if (ab_zero_limb_count != 0) { - try rem.shiftLeft(rem.*, ab_zero_limb_count * Limb.bit_count); - } - } - - // Knuth 4.3.1, Exercise 16. - fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void { - @setRuntimeSafety(false); - debug.assert(a.len > 1 or a[0] >= b); - debug.assert(quo.len >= a.len); - - rem.* = 0; - for (a) |_, ri| { - const i = a.len - ri - 1; - const pdiv = ((@as(DoubleLimb, rem.*) << Limb.bit_count) | a[i]); - - if (pdiv == 0) { - quo[i] = 0; - rem.* = 0; - } else if (pdiv < b) { - quo[i] = 0; - rem.* = @truncate(Limb, pdiv); - } else if (pdiv == b) { - quo[i] = 1; - rem.* = 0; - } else { - quo[i] = @truncate(Limb, @divTrunc(pdiv, b)); - rem.* = @truncate(Limb, pdiv - (quo[i] *% b)); - } - } - } - - // Handbook of Applied Cryptography, 14.20 - // - // x = qy + r where 0 <= r < y - fn divN(allocator: *Allocator, q: *Int, r: *Int, x: *Int, y: *Int) !void { - debug.assert(y.len() >= 2); - debug.assert(x.len() >= y.len()); - debug.assert(q.limbs.len >= x.len() + y.len() - 1); - debug.assert(default_capacity >= 3); // see 3.2 - - var tmp = try Int.init(allocator); - defer tmp.deinit(); - - // Normalize so y > Limb.bit_count / 2 (i.e. leading bit is set) and even - var norm_shift = @clz(Limb, y.limbs[y.len() - 1]); - if (norm_shift == 0 and y.isOdd()) { - norm_shift = Limb.bit_count; - } - try x.shiftLeft(x.*, norm_shift); - try y.shiftLeft(y.*, norm_shift); - - const n = x.len() - 1; - const t = y.len() - 1; - - // 1. - q.metadata = n - t + 1; - mem.set(Limb, q.limbs[0..q.len()], 0); - - // 2. - try tmp.shiftLeft(y.*, Limb.bit_count * (n - t)); - while (x.cmp(tmp) != .lt) { - q.limbs[n - t] += 1; - try x.sub(x.*, tmp); - } - - // 3. - var i = n; - while (i > t) : (i -= 1) { - // 3.1 - if (x.limbs[i] == y.limbs[t]) { - q.limbs[i - t - 1] = maxInt(Limb); - } else { - const num = (@as(DoubleLimb, x.limbs[i]) << Limb.bit_count) | @as(DoubleLimb, x.limbs[i - 1]); - const z = @intCast(Limb, num / @as(DoubleLimb, y.limbs[t])); - q.limbs[i - t - 1] = if (z > maxInt(Limb)) maxInt(Limb) else @as(Limb, z); - } - - // 3.2 - tmp.limbs[0] = if (i >= 2) x.limbs[i - 2] else 0; - tmp.limbs[1] = if (i >= 1) x.limbs[i - 1] else 0; - tmp.limbs[2] = x.limbs[i]; - tmp.normalize(3); - - while (true) { - // 2x1 limb multiplication unrolled against single-limb q[i-t-1] - var carry: Limb = 0; - r.limbs[0] = addMulLimbWithCarry(0, if (t >= 1) y.limbs[t - 1] else 0, q.limbs[i - t - 1], &carry); - r.limbs[1] = addMulLimbWithCarry(0, y.limbs[t], q.limbs[i - t - 1], &carry); - r.limbs[2] = carry; - r.normalize(3); - - if (r.cmpAbs(tmp) != .gt) { - break; - } - - q.limbs[i - t - 1] -= 1; - } - - // 3.3 - try tmp.set(q.limbs[i - t - 1]); - try tmp.mul(tmp, y.*); - try tmp.shiftLeft(tmp, Limb.bit_count * (i - t - 1)); - try x.sub(x.*, tmp); - - if (!x.isPositive()) { - try tmp.shiftLeft(y.*, Limb.bit_count * (i - t - 1)); - try x.add(x.*, tmp); - q.limbs[i - t - 1] -= 1; - } - } - - // Denormalize - q.normalize(q.len()); - - try r.shiftRight(x.*, norm_shift); - r.normalize(r.len()); + /// q may alias with a or b. + /// + /// Asserts there is enough memory to store q and r. + /// The upper bound for r limb count is a.limbs.len. + /// The upper bound for q limb count is given by `calcQuotientLimbLen`. This accounts + /// for temporary space used by the division algorithm. + /// + /// If `allocator` is provided, it will be used for temporary storage to improve + /// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm. + /// + /// `limbs_buffer` is used for temporary storage. The amount required is given by `calcDivLimbsBufferLen`. + pub fn divTrunc( + q: *Mutable, + r: *Mutable, + a: Const, + b: Const, + limbs_buffer: []Limb, + allocator: ?*Allocator, + ) void { + div(q, r, a, b, limbs_buffer, allocator); + r.positive = a.positive; } /// r = a << shift, in other words, r = a * 2^shift - pub fn shiftLeft(r: *Int, a: Int, shift: usize) !void { - r.assertWritable(); - - try r.ensureCapacity(a.len() + (shift / Limb.bit_count) + 1); - llshl(r.limbs[0..], a.limbs[0..a.len()], shift); - r.normalize(a.len() + (shift / Limb.bit_count) + 1); - r.setSign(a.isPositive()); - } - - fn llshl(r: []Limb, a: []const Limb, shift: usize) void { - @setRuntimeSafety(false); - debug.assert(a.len >= 1); - debug.assert(r.len >= a.len + (shift / Limb.bit_count) + 1); - - const limb_shift = shift / Limb.bit_count + 1; - const interior_limb_shift = @intCast(Log2Limb, shift % Limb.bit_count); - - var carry: Limb = 0; - var i: usize = 0; - while (i < a.len) : (i += 1) { - const src_i = a.len - i - 1; - const dst_i = src_i + limb_shift; - - const src_digit = a[src_i]; - r[dst_i] = carry | @call(.{ .modifier = .always_inline }, math.shr, .{ - Limb, - src_digit, - Limb.bit_count - @intCast(Limb, interior_limb_shift), - }); - carry = (src_digit << interior_limb_shift); - } - - r[limb_shift - 1] = carry; - mem.set(Limb, r[0 .. limb_shift - 1], 0); + /// + /// r and a may alias. + /// + /// Asserts there is enough memory to fit the result. The upper bound Limb count is + /// `a.limbs.len + (shift / (@sizeOf(Limb) * 8))`. + pub fn shiftLeft(r: *Mutable, a: Const, shift: usize) void { + llshl(r.limbs[0..], a.limbs[0..a.limbs.len], shift); + r.normalize(a.limbs.len + (shift / Limb.bit_count) + 1); + r.positive = a.positive; } /// r = a >> shift - pub fn shiftRight(r: *Int, a: Int, shift: usize) !void { - r.assertWritable(); - - if (a.len() <= shift / Limb.bit_count) { - r.metadata = 1; + /// r and a may alias. + /// + /// Asserts there is enough memory to fit the result. The upper bound Limb count is + /// `a.limbs.len - (shift / (@sizeOf(Limb) * 8))`. + pub fn shiftRight(r: *Mutable, a: Const, shift: usize) void { + if (a.limbs.len <= shift / Limb.bit_count) { + r.len = 1; + r.positive = true; r.limbs[0] = 0; return; } - try r.ensureCapacity(a.len() - (shift / Limb.bit_count)); - const r_len = llshr(r.limbs[0..], a.limbs[0..a.len()], shift); - r.metadata = a.len() - (shift / Limb.bit_count); - r.setSign(a.isPositive()); - } - - fn llshr(r: []Limb, a: []const Limb, shift: usize) void { - @setRuntimeSafety(false); - debug.assert(a.len >= 1); - debug.assert(r.len >= a.len - (shift / Limb.bit_count)); - - const limb_shift = shift / Limb.bit_count; - const interior_limb_shift = @intCast(Log2Limb, shift % Limb.bit_count); - - var carry: Limb = 0; - var i: usize = 0; - while (i < a.len - limb_shift) : (i += 1) { - const src_i = a.len - i - 1; - const dst_i = src_i - limb_shift; - - const src_digit = a[src_i]; - r[dst_i] = carry | (src_digit >> interior_limb_shift); - carry = @call(.{ .modifier = .always_inline }, math.shl, .{ - Limb, - src_digit, - Limb.bit_count - @intCast(Limb, interior_limb_shift), - }); - } + const r_len = llshr(r.limbs[0..], a.limbs[0..a.limbs.len], shift); + r.len = a.limbs.len - (shift / Limb.bit_count); + r.positive = a.positive; } /// r = a | b + /// r may alias with a or b. /// /// a and b are zero-extended to the longer of a or b. - pub fn bitOr(r: *Int, a: Int, b: Int) !void { - r.assertWritable(); - - if (a.len() > b.len()) { - try r.ensureCapacity(a.len()); - llor(r.limbs[0..], a.limbs[0..a.len()], b.limbs[0..b.len()]); - r.setLen(a.len()); + /// + /// Asserts that r has enough limbs to store the result. Upper bound is `math.max(a.limbs.len, b.limbs.len)`. + pub fn bitOr(r: *Mutable, a: Const, b: Const) void { + if (a.limbs.len > b.limbs.len) { + llor(r.limbs[0..], a.limbs[0..a.limbs.len], b.limbs[0..b.limbs.len]); + r.len = a.limbs.len; } else { - try r.ensureCapacity(b.len()); - llor(r.limbs[0..], b.limbs[0..b.len()], a.limbs[0..a.len()]); - r.setLen(b.len()); - } - } - - fn llor(r: []Limb, a: []const Limb, b: []const Limb) void { - @setRuntimeSafety(false); - debug.assert(r.len >= a.len); - debug.assert(a.len >= b.len); - - var i: usize = 0; - while (i < b.len) : (i += 1) { - r[i] = a[i] | b[i]; - } - while (i < a.len) : (i += 1) { - r[i] = a[i]; + llor(r.limbs[0..], b.limbs[0..b.limbs.len], a.limbs[0..a.limbs.len]); + r.len = b.limbs.len; } } /// r = a & b - pub fn bitAnd(r: *Int, a: Int, b: Int) !void { - r.assertWritable(); - - if (a.len() > b.len()) { - try r.ensureCapacity(b.len()); - lland(r.limbs[0..], a.limbs[0..a.len()], b.limbs[0..b.len()]); - r.normalize(b.len()); + /// r may alias with a or b. + /// + /// Asserts that r has enough limbs to store the result. Upper bound is `math.min(a.limbs.len, b.limbs.len)`. + pub fn bitAnd(r: *Mutable, a: Const, b: Const) void { + if (a.limbs.len > b.limbs.len) { + lland(r.limbs[0..], a.limbs[0..a.limbs.len], b.limbs[0..b.limbs.len]); + r.normalize(b.limbs.len); } else { - try r.ensureCapacity(a.len()); - lland(r.limbs[0..], b.limbs[0..b.len()], a.limbs[0..a.len()]); - r.normalize(a.len()); - } - } - - fn lland(r: []Limb, a: []const Limb, b: []const Limb) void { - @setRuntimeSafety(false); - debug.assert(r.len >= b.len); - debug.assert(a.len >= b.len); - - var i: usize = 0; - while (i < b.len) : (i += 1) { - r[i] = a[i] & b[i]; + lland(r.limbs[0..], b.limbs[0..b.limbs.len], a.limbs[0..a.limbs.len]); + r.normalize(a.limbs.len); } } /// r = a ^ b - pub fn bitXor(r: *Int, a: Int, b: Int) !void { - r.assertWritable(); - - if (a.len() > b.len()) { - try r.ensureCapacity(a.len()); - llxor(r.limbs[0..], a.limbs[0..a.len()], b.limbs[0..b.len()]); - r.normalize(a.len()); + /// r may alias with a or b. + /// + /// Asserts that r has enough limbs to store the result. Upper bound is `math.max(a.limbs.len, b.limbs.len)`. + pub fn bitXor(r: *Mutable, a: Const, b: Const) void { + if (a.limbs.len > b.limbs.len) { + llxor(r.limbs[0..], a.limbs[0..a.limbs.len], b.limbs[0..b.limbs.len]); + r.normalize(a.limbs.len); } else { - try r.ensureCapacity(b.len()); - llxor(r.limbs[0..], b.limbs[0..b.len()], a.limbs[0..a.len()]); - r.normalize(b.len()); + llxor(r.limbs[0..], b.limbs[0..b.limbs.len], a.limbs[0..a.limbs.len]); + r.normalize(b.limbs.len); } } - fn llxor(r: []Limb, a: []const Limb, b: []const Limb) void { - @setRuntimeSafety(false); - debug.assert(r.len >= a.len); - debug.assert(a.len >= b.len); + /// rma may alias x or y. + /// x and y may alias each other. + /// Asserts that `rma` has enough limbs to store the result. Upper bound is + /// `math.min(x.limbs.len, y.limbs.len)`. + /// + /// `limbs_buffer` is used for temporary storage during the operation. When this function returns, + /// it will have the same length as it had when the function was called. + pub fn gcd(rma: *Mutable, x: Const, y: Const, limbs_buffer: *std.ArrayList(Limb)) !void { + const prev_len = limbs_buffer.items.len; + defer limbs_buffer.shrink(prev_len); + const x_copy = if (rma.limbs.ptr == x.limbs.ptr) blk: { + const start = limbs_buffer.items.len; + try limbs_buffer.appendSlice(x.limbs); + break :blk x.toMutable(limbs_buffer.items[start..]).toConst(); + } else x; + const y_copy = if (rma.limbs.ptr == y.limbs.ptr) blk: { + const start = limbs_buffer.items.len; + try limbs_buffer.appendSlice(y.limbs); + break :blk y.toMutable(limbs_buffer.items[start..]).toConst(); + } else y; - var i: usize = 0; - while (i < b.len) : (i += 1) { - r[i] = a[i] ^ b[i]; - } - while (i < a.len) : (i += 1) { - r[i] = a[i]; - } + return gcdLehmer(rma, x_copy, y_copy, limbs_buffer); } - pub fn gcd(rma: *Int, x: Int, y: Int) !void { - rma.assertWritable(); - var r = rma; - var aliased = rma.limbs.ptr == x.limbs.ptr or rma.limbs.ptr == y.limbs.ptr; - - var sr: Int = undefined; - if (aliased) { - sr = try Int.initCapacity(rma.allocator.?, math.max(x.len(), y.len())); - r = &sr; - aliased = true; - } - defer if (aliased) { - rma.swap(r); - r.deinit(); - }; - - try gcdLehmer(r, x, y); + /// rma may not alias x or y. + /// x and y may alias each other. + /// Asserts that `rma` has enough limbs to store the result. Upper bound is given by `calcGcdNoAliasLimbLen`. + /// + /// `limbs_buffer` is used for temporary storage during the operation. + pub fn gcdNoAlias(rma: *Mutable, x: Const, y: Const, limbs_buffer: *std.ArrayList(Limb)) !void { + assert(rma.limbs.ptr != x.limbs.ptr); // illegal aliasing + assert(rma.limbs.ptr != y.limbs.ptr); // illegal aliasing + return gcdLehmer(rma, x, y, allocator); } - fn gcdLehmer(r: *Int, xa: Int, ya: Int) !void { - var x = try xa.clone(); - x.abs(); + fn gcdLehmer(result: *Mutable, xa: Const, ya: Const, limbs_buffer: *std.ArrayList(Limb)) !void { + var x = try xa.toManaged(limbs_buffer.allocator); defer x.deinit(); + x.abs(); - var y = try ya.clone(); - y.abs(); + var y = try ya.toManaged(limbs_buffer.allocator); defer y.deinit(); + y.abs(); - if (x.cmp(y) == .lt) { + if (x.toConst().order(y.toConst()) == .lt) { x.swap(&y); } - var T = try Int.init(r.allocator.?); - defer T.deinit(); + var t_big = try Managed.init(limbs_buffer.allocator); + defer t_big.deinit(); + + var r = try Managed.init(limbs_buffer.allocator); + defer r.deinit(); while (y.len() > 1) { - debug.assert(x.isPositive() and y.isPositive()); - debug.assert(x.len() >= y.len()); + assert(x.isPositive() and y.isPositive()); + assert(x.len() >= y.len()); var xh: SignedDoubleLimb = x.limbs[x.len() - 1]; var yh: SignedDoubleLimb = if (x.len() > y.len()) 0 else y.limbs[x.len() - 1]; @@ -1450,1499 +651,1482 @@ pub const Int = struct { } if (B == 0) { - // T = x % y, r is unused - try Int.divTrunc(r, &T, x, y); - debug.assert(T.isPositive()); + // t_big = x % y, r is unused + try r.divTrunc(&t_big, x.toConst(), y.toConst()); + assert(t_big.isPositive()); x.swap(&y); - y.swap(&T); + y.swap(&t_big); } else { var storage: [8]Limb = undefined; - const Ap = FixedIntFromSignedDoubleLimb(A, storage[0..2]); - const Bp = FixedIntFromSignedDoubleLimb(B, storage[2..4]); - const Cp = FixedIntFromSignedDoubleLimb(C, storage[4..6]); - const Dp = FixedIntFromSignedDoubleLimb(D, storage[6..8]); + const Ap = fixedIntFromSignedDoubleLimb(A, storage[0..2]).toConst(); + const Bp = fixedIntFromSignedDoubleLimb(B, storage[2..4]).toConst(); + const Cp = fixedIntFromSignedDoubleLimb(C, storage[4..6]).toConst(); + const Dp = fixedIntFromSignedDoubleLimb(D, storage[6..8]).toConst(); - // T = Ax + By - try r.mul(x, Ap); - try T.mul(y, Bp); - try T.add(r.*, T); + // t_big = Ax + By + try r.mul(x.toConst(), Ap); + try t_big.mul(y.toConst(), Bp); + try t_big.add(r.toConst(), t_big.toConst()); // u = Cx + Dy, r as u - try x.mul(x, Cp); - try r.mul(y, Dp); - try r.add(x, r.*); + try x.mul(x.toConst(), Cp); + try r.mul(y.toConst(), Dp); + try r.add(x.toConst(), r.toConst()); - x.swap(&T); - y.swap(r); + x.swap(&t_big); + y.swap(&r); } } // euclidean algorithm - debug.assert(x.cmp(y) != .lt); + assert(x.toConst().order(y.toConst()) != .lt); - while (!y.eqZero()) { - try Int.divTrunc(&T, r, x, y); + while (!y.toConst().eqZero()) { + try t_big.divTrunc(&r, x.toConst(), y.toConst()); x.swap(&y); - y.swap(r); + y.swap(&r); } - r.swap(&x); + result.copy(x.toConst()); + } + + /// Truncates by default. + fn div(quo: *Mutable, rem: *Mutable, a: Const, b: Const, limbs_buffer: []Limb, allocator: ?*Allocator) void { + assert(!b.eqZero()); // division by zero + assert(quo != rem); // illegal aliasing + + if (a.orderAbs(b) == .lt) { + // quo may alias a so handle rem first + rem.copy(a); + rem.positive = a.positive == b.positive; + + quo.positive = true; + quo.len = 1; + quo.limbs[0] = 0; + return; + } + + // Handle trailing zero-words of divisor/dividend. These are not handled in the following + // algorithms. + const a_zero_limb_count = blk: { + var i: usize = 0; + while (i < a.limbs.len) : (i += 1) { + if (a.limbs[i] != 0) break; + } + break :blk i; + }; + const b_zero_limb_count = blk: { + var i: usize = 0; + while (i < b.limbs.len) : (i += 1) { + if (b.limbs[i] != 0) break; + } + break :blk i; + }; + + const ab_zero_limb_count = math.min(a_zero_limb_count, b_zero_limb_count); + + if (b.limbs.len - ab_zero_limb_count == 1) { + lldiv1(quo.limbs[0..], &rem.limbs[0], a.limbs[ab_zero_limb_count..a.limbs.len], b.limbs[b.limbs.len - 1]); + quo.normalize(a.limbs.len - ab_zero_limb_count); + quo.positive = (a.positive == b.positive); + + rem.len = 1; + rem.positive = true; + } else { + // x and y are modified during division + const sep_len = calcMulLimbsBufferLen(a.limbs.len, b.limbs.len, 2); + const x_limbs = limbs_buffer[0 * sep_len ..][0..sep_len]; + const y_limbs = limbs_buffer[1 * sep_len ..][0..sep_len]; + const t_limbs = limbs_buffer[2 * sep_len ..][0..sep_len]; + const mul_limbs_buf = limbs_buffer[3 * sep_len ..][0..sep_len]; + + var x: Mutable = .{ + .limbs = x_limbs, + .positive = a.positive, + .len = a.limbs.len - ab_zero_limb_count, + }; + var y: Mutable = .{ + .limbs = y_limbs, + .positive = b.positive, + .len = b.limbs.len - ab_zero_limb_count, + }; + + // Shrink x, y such that the trailing zero limbs shared between are removed. + mem.copy(Limb, x.limbs, a.limbs[ab_zero_limb_count..a.limbs.len]); + mem.copy(Limb, y.limbs, b.limbs[ab_zero_limb_count..b.limbs.len]); + + divN(quo, rem, &x, &y, t_limbs, mul_limbs_buf, allocator); + quo.positive = (a.positive == b.positive); + } + + if (ab_zero_limb_count != 0) { + rem.shiftLeft(rem.toConst(), ab_zero_limb_count * Limb.bit_count); + } + } + + /// Handbook of Applied Cryptography, 14.20 + /// + /// x = qy + r where 0 <= r < y + fn divN( + q: *Mutable, + r: *Mutable, + x: *Mutable, + y: *Mutable, + tmp_limbs: []Limb, + mul_limb_buf: []Limb, + allocator: ?*Allocator, + ) void { + assert(y.len >= 2); + assert(x.len >= y.len); + assert(q.limbs.len >= x.len + y.len - 1); + + // See 3.2 + var backup_tmp_limbs: [3]Limb = undefined; + const t_limbs = if (tmp_limbs.len < 3) &backup_tmp_limbs else tmp_limbs; + + var tmp: Mutable = .{ + .limbs = t_limbs, + .len = 1, + .positive = true, + }; + tmp.limbs[0] = 0; + + // Normalize so y > Limb.bit_count / 2 (i.e. leading bit is set) and even + var norm_shift = @clz(Limb, y.limbs[y.len - 1]); + if (norm_shift == 0 and y.toConst().isOdd()) { + norm_shift = Limb.bit_count; + } + x.shiftLeft(x.toConst(), norm_shift); + y.shiftLeft(y.toConst(), norm_shift); + + const n = x.len - 1; + const t = y.len - 1; + + // 1. + q.len = n - t + 1; + q.positive = true; + mem.set(Limb, q.limbs[0..q.len], 0); + + // 2. + tmp.shiftLeft(y.toConst(), Limb.bit_count * (n - t)); + while (x.toConst().order(tmp.toConst()) != .lt) { + q.limbs[n - t] += 1; + x.sub(x.toConst(), tmp.toConst()); + } + + // 3. + var i = n; + while (i > t) : (i -= 1) { + // 3.1 + if (x.limbs[i] == y.limbs[t]) { + q.limbs[i - t - 1] = maxInt(Limb); + } else { + const num = (@as(DoubleLimb, x.limbs[i]) << Limb.bit_count) | @as(DoubleLimb, x.limbs[i - 1]); + const z = @intCast(Limb, num / @as(DoubleLimb, y.limbs[t])); + q.limbs[i - t - 1] = if (z > maxInt(Limb)) maxInt(Limb) else @as(Limb, z); + } + + // 3.2 + tmp.limbs[0] = if (i >= 2) x.limbs[i - 2] else 0; + tmp.limbs[1] = if (i >= 1) x.limbs[i - 1] else 0; + tmp.limbs[2] = x.limbs[i]; + tmp.normalize(3); + + while (true) { + // 2x1 limb multiplication unrolled against single-limb q[i-t-1] + var carry: Limb = 0; + r.limbs[0] = addMulLimbWithCarry(0, if (t >= 1) y.limbs[t - 1] else 0, q.limbs[i - t - 1], &carry); + r.limbs[1] = addMulLimbWithCarry(0, y.limbs[t], q.limbs[i - t - 1], &carry); + r.limbs[2] = carry; + r.normalize(3); + + if (r.toConst().orderAbs(tmp.toConst()) != .gt) { + break; + } + + q.limbs[i - t - 1] -= 1; + } + + // 3.3 + tmp.set(q.limbs[i - t - 1]); + tmp.mul(tmp.toConst(), y.toConst(), mul_limb_buf, allocator); + tmp.shiftLeft(tmp.toConst(), Limb.bit_count * (i - t - 1)); + x.sub(x.toConst(), tmp.toConst()); + + if (!x.positive) { + tmp.shiftLeft(y.toConst(), Limb.bit_count * (i - t - 1)); + x.add(x.toConst(), tmp.toConst()); + q.limbs[i - t - 1] -= 1; + } + } + + // Denormalize + q.normalize(q.len); + + r.shiftRight(x.toConst(), norm_shift); + r.normalize(r.len); + } + + /// Normalize a possible sequence of leading zeros. + /// + /// [1, 2, 3, 4, 0] -> [1, 2, 3, 4] + /// [1, 2, 0, 0, 0] -> [1, 2] + /// [0, 0, 0, 0, 0] -> [0] + fn normalize(r: *Mutable, length: usize) void { + r.len = llnormalize(r.limbs[0..length]); } }; -// Storage must live for the lifetime of the returned value -fn FixedIntFromSignedDoubleLimb(A: SignedDoubleLimb, storage: []Limb) Int { - std.debug.assert(storage.len >= 2); +/// A arbitrary-precision big integer, with a fixed set of immutable limbs. +pub const Const = struct { + /// Raw digits. These are: + /// + /// * Little-endian ordered + /// * limbs.len >= 1 + /// * Zero is represented as limbs.len == 1 with limbs[0] == 0. + /// + /// Accessing limbs directly should be avoided. + limbs: []const Limb, + positive: bool, - var A_is_positive = A >= 0; - const Au = @intCast(DoubleLimb, if (A < 0) -A else A); - storage[0] = @truncate(Limb, Au); - storage[1] = @truncate(Limb, Au >> Limb.bit_count); - var Ap = Int.initFixed(storage[0..2]); - Ap.setSign(A_is_positive); - return Ap; -} + /// The result is an independent resource which is managed by the caller. + pub fn toManaged(self: Const, allocator: *Allocator) Allocator.Error!Managed { + const limbs = try allocator.alloc(Limb, math.max(Managed.default_capacity, self.limbs.len)); + mem.copy(Limb, limbs, self.limbs); + return Managed{ + .allocator = allocator, + .limbs = limbs, + .metadata = if (self.positive) + self.limbs.len & ~Managed.sign_bit + else + self.limbs.len | Managed.sign_bit, + }; + } -// NOTE: All the following tests assume the max machine-word will be 64-bit. -// -// They will still run on larger than this and should pass, but the multi-limb code-paths -// may be untested in some cases. + /// Asserts `limbs` is big enough to store the value. + pub fn toMutable(self: Const, limbs: []Limb) Mutable { + mem.copy(Limb, limbs, self.limbs[0..self.limbs.len]); + return .{ + .limbs = limbs, + .positive = self.positive, + .len = self.limbs.len, + }; + } -test "big.int comptime_int set" { - comptime var s = 0xefffffff00000001eeeeeeefaaaaaaab; - var a = try Int.initSet(testing.allocator, s); - defer a.deinit(); + pub fn dump(self: Const) void { + for (self.limbs[0..self.limbs.len]) |limb| { + std.debug.warn("{x} ", .{limb}); + } + std.debug.warn("positive={}\n", .{self.positive}); + } - const s_limb_count = 128 / Limb.bit_count; + pub fn abs(self: Const) Const { + return .{ + .limbs = self.limbs, + .positive = true, + }; + } - comptime var i: usize = 0; - inline while (i < s_limb_count) : (i += 1) { - const result = @as(Limb, s & maxInt(Limb)); - s >>= Limb.bit_count / 2; - s >>= Limb.bit_count / 2; - testing.expect(a.limbs[i] == result); + pub fn isOdd(self: Const) bool { + return self.limbs[0] & 1 != 0; + } + + pub fn isEven(self: Const) bool { + return !self.isOdd(); + } + + /// Returns the number of bits required to represent the absolute value of an integer. + pub fn bitCountAbs(self: Const) usize { + return (self.limbs.len - 1) * Limb.bit_count + (Limb.bit_count - @clz(Limb, self.limbs[self.limbs.len - 1])); + } + + /// Returns the number of bits required to represent the integer in twos-complement form. + /// + /// If the integer is negative the value returned is the number of bits needed by a signed + /// integer to represent the value. If positive the value is the number of bits for an + /// unsigned integer. Any unsigned integer will fit in the signed integer with bitcount + /// one greater than the returned value. + /// + /// e.g. -127 returns 8 as it will fit in an i8. 127 returns 7 since it fits in a u7. + pub fn bitCountTwosComp(self: Const) usize { + var bits = self.bitCountAbs(); + + // If the entire value has only one bit set (e.g. 0b100000000) then the negation in twos + // complement requires one less bit. + if (!self.positive) block: { + bits += 1; + + if (@popCount(Limb, self.limbs[self.limbs.len - 1]) == 1) { + for (self.limbs[0 .. self.limbs.len - 1]) |limb| { + if (@popCount(Limb, limb) != 0) { + break :block; + } + } + + bits -= 1; + } + } + + return bits; + } + + pub fn fitsInTwosComp(self: Const, is_signed: bool, bit_count: usize) bool { + if (self.eqZero()) { + return true; + } + if (!is_signed and !self.positive) { + return false; + } + + const req_bits = self.bitCountTwosComp() + @boolToInt(self.positive and is_signed); + return bit_count >= req_bits; + } + + /// Returns whether self can fit into an integer of the requested type. + pub fn fits(self: Const, comptime T: type) bool { + const info = @typeInfo(T).Int; + return self.fitsInTwosComp(info.is_signed, info.bits); + } + + /// Returns the approximate size of the integer in the given base. Negative values accommodate for + /// the minus sign. This is used for determining the number of characters needed to print the + /// value. It is inexact and may exceed the given value by ~1-2 bytes. + /// TODO See if we can make this exact. + pub fn sizeInBaseUpperBound(self: Const, base: usize) usize { + const bit_count = @as(usize, @boolToInt(!self.positive)) + self.bitCountAbs(); + return (bit_count / math.log2(base)) + 2; + } + + pub const ConvertError = error{ + NegativeIntoUnsigned, + TargetTooSmall, + }; + + /// Convert self to type T. + /// + /// Returns an error if self cannot be narrowed into the requested type without truncation. + pub fn to(self: Const, comptime T: type) ConvertError!T { + switch (@typeInfo(T)) { + .Int => { + const UT = std.meta.Int(false, T.bit_count); + + if (self.bitCountTwosComp() > T.bit_count) { + return error.TargetTooSmall; + } + + var r: UT = 0; + + if (@sizeOf(UT) <= @sizeOf(Limb)) { + r = @intCast(UT, self.limbs[0]); + } else { + for (self.limbs[0..self.limbs.len]) |_, ri| { + const limb = self.limbs[self.limbs.len - ri - 1]; + r <<= Limb.bit_count; + r |= limb; + } + } + + if (!T.is_signed) { + return if (self.positive) @intCast(T, r) else error.NegativeIntoUnsigned; + } else { + if (self.positive) { + return @intCast(T, r); + } else { + if (math.cast(T, r)) |ok| { + return -ok; + } else |_| { + return minInt(T); + } + } + } + }, + else => @compileError("cannot convert Const to type " ++ @typeName(T)), + } + } + + /// To allow `std.fmt.format` to work with this type. + /// If the integer is larger than `pow(2, 64 * @sizeOf(usize) * 8), this function will fail + /// to print the string, printing "(BigInt)" instead of a number. + /// This is because the rendering algorithm requires reversing a string, which requires O(N) memory. + /// See `toString` and `toStringAlloc` for a way to print big integers without failure. + pub fn format( + self: Const, + comptime fmt: []const u8, + options: std.fmt.FormatOptions, + out_stream: var, + ) !void { + comptime var radix = 10; + comptime var uppercase = false; + + if (fmt.len == 0 or comptime mem.eql(u8, fmt, "d")) { + radix = 10; + uppercase = false; + } else if (comptime mem.eql(u8, fmt, "b")) { + radix = 2; + uppercase = false; + } else if (comptime mem.eql(u8, fmt, "x")) { + radix = 16; + uppercase = false; + } else if (comptime mem.eql(u8, fmt, "X")) { + radix = 16; + uppercase = true; + } else { + @compileError("Unknown format string: '" ++ fmt ++ "'"); + } + + var limbs: [128]Limb = undefined; + const needed_limbs = calcDivLimbsBufferLen(self.limbs.len, 1); + if (needed_limbs > limbs.len) + return out_stream.writeAll("(BigInt)"); + + // This is the inverse of calcDivLimbsBufferLen + const available_len = (limbs.len / 3) - 2; + + const biggest: Const = .{ + .limbs = &([1]Limb{math.maxInt(Limb)} ** available_len), + .positive = false, + }; + var buf: [biggest.sizeInBaseUpperBound(radix)]u8 = undefined; + const len = self.toString(&buf, radix, uppercase, &limbs); + return out_stream.writeAll(buf[0..len]); + } + + /// Converts self to a string in the requested base. + /// Caller owns returned memory. + /// Asserts that `base` is in the range [2, 16]. + /// See also `toString`, a lower level function than this. + pub fn toStringAlloc(self: Const, allocator: *Allocator, base: u8, uppercase: bool) Allocator.Error![]u8 { + assert(base >= 2); + assert(base <= 16); + + if (self.eqZero()) { + return mem.dupe(allocator, u8, "0"); + } + const string = try allocator.alloc(u8, self.sizeInBaseUpperBound(base)); + errdefer allocator.free(string); + + const limbs = try allocator.alloc(Limb, calcToStringLimbsBufferLen(self.limbs.len, base)); + defer allocator.free(limbs); + + return allocator.shrink(string, self.toString(string, base, uppercase, limbs)); + } + + /// Converts self to a string in the requested base. + /// Asserts that `base` is in the range [2, 16]. + /// `string` is a caller-provided slice of at least `sizeInBaseUpperBound` bytes, + /// where the result is written to. + /// Returns the length of the string. + /// `limbs_buffer` is caller-provided memory for `toString` to use as a working area. It must have + /// length of at least `calcToStringLimbsBufferLen`. + /// In the case of power-of-two base, `limbs_buffer` is ignored. + /// See also `toStringAlloc`, a higher level function than this. + pub fn toString(self: Const, string: []u8, base: u8, uppercase: bool, limbs_buffer: []Limb) usize { + assert(base >= 2); + assert(base <= 16); + + if (self.eqZero()) { + string[0] = '0'; + return 1; + } + + var digits_len: usize = 0; + + // Power of two: can do a single pass and use masks to extract digits. + if (math.isPowerOfTwo(base)) { + const base_shift = math.log2_int(Limb, base); + + outer: for (self.limbs[0..self.limbs.len]) |limb| { + var shift: usize = 0; + while (shift < Limb.bit_count) : (shift += base_shift) { + const r = @intCast(u8, (limb >> @intCast(Log2Limb, shift)) & @as(Limb, base - 1)); + const ch = std.fmt.digitToChar(r, uppercase); + string[digits_len] = ch; + digits_len += 1; + // If we hit the end, it must be all zeroes from here. + if (digits_len == string.len) break :outer; + } + } + + // Always will have a non-zero digit somewhere. + while (string[digits_len - 1] == '0') { + digits_len -= 1; + } + } else { + // Non power-of-two: batch divisions per word size. + const digits_per_limb = math.log(Limb, base, maxInt(Limb)); + var limb_base: Limb = 1; + var j: usize = 0; + while (j < digits_per_limb) : (j += 1) { + limb_base *= base; + } + const b: Const = .{ .limbs = &[_]Limb{limb_base}, .positive = true }; + + var q: Mutable = .{ + .limbs = limbs_buffer[0 .. self.limbs.len + 2], + .positive = true, // Make absolute by ignoring self.positive. + .len = self.limbs.len, + }; + mem.copy(Limb, q.limbs, self.limbs); + + var r: Mutable = .{ + .limbs = limbs_buffer[q.limbs.len..][0..self.limbs.len], + .positive = true, + .len = 1, + }; + r.limbs[0] = 0; + + const rest_of_the_limbs_buf = limbs_buffer[q.limbs.len + r.limbs.len ..]; + + while (q.len >= 2) { + // Passing an allocator here would not be helpful since this division is destroying + // information, not creating it. [TODO citation needed] + q.divTrunc(&r, q.toConst(), b, rest_of_the_limbs_buf, null); + + var r_word = r.limbs[0]; + var i: usize = 0; + while (i < digits_per_limb) : (i += 1) { + const ch = std.fmt.digitToChar(@intCast(u8, r_word % base), uppercase); + r_word /= base; + string[digits_len] = ch; + digits_len += 1; + } + } + + { + assert(q.len == 1); + + var r_word = q.limbs[0]; + while (r_word != 0) { + const ch = std.fmt.digitToChar(@intCast(u8, r_word % base), uppercase); + r_word /= base; + string[digits_len] = ch; + digits_len += 1; + } + } + } + + if (!self.positive) { + string[digits_len] = '-'; + digits_len += 1; + } + + const s = string[0..digits_len]; + mem.reverse(u8, s); + return s.len; + } + + /// Returns `math.Order.lt`, `math.Order.eq`, `math.Order.gt` if + /// `|a| < |b|`, `|a| == |b|`, or `|a| > |b|` respectively. + pub fn orderAbs(a: Const, b: Const) math.Order { + if (a.limbs.len < b.limbs.len) { + return .lt; + } + if (a.limbs.len > b.limbs.len) { + return .gt; + } + + var i: usize = a.limbs.len - 1; + while (i != 0) : (i -= 1) { + if (a.limbs[i] != b.limbs[i]) { + break; + } + } + + if (a.limbs[i] < b.limbs[i]) { + return .lt; + } else if (a.limbs[i] > b.limbs[i]) { + return .gt; + } else { + return .eq; + } + } + + /// Returns `math.Order.lt`, `math.Order.eq`, `math.Order.gt` if `a < b`, `a == b` or `a > b` respectively. + pub fn order(a: Const, b: Const) math.Order { + if (a.positive != b.positive) { + return if (a.positive) .gt else .lt; + } else { + const r = orderAbs(a, b); + return if (a.positive) r else switch (r) { + .lt => math.Order.gt, + .eq => math.Order.eq, + .gt => math.Order.lt, + }; + } + } + + /// Same as `order` but the right-hand operand is a primitive integer. + pub fn orderAgainstScalar(lhs: Const, scalar: var) math.Order { + var limbs: [calcLimbLen(scalar)]Limb = undefined; + const rhs = Mutable.init(&limbs, scalar); + return order(lhs, rhs.toConst()); + } + + /// Returns true if `a == 0`. + pub fn eqZero(a: Const) bool { + return a.limbs.len == 1 and a.limbs[0] == 0; + } + + /// Returns true if `|a| == |b|`. + pub fn eqAbs(a: Const, b: Const) bool { + return orderAbs(a, b) == .eq; + } + + /// Returns true if `a == b`. + pub fn eq(a: Const, b: Const) bool { + return order(a, b) == .eq; + } +}; + +/// An arbitrary-precision big integer along with an allocator which manages the memory. +/// +/// Memory is allocated as needed to ensure operations never overflow. The range +/// is bounded only by available memory. +pub const Managed = struct { + pub const sign_bit: usize = 1 << (usize.bit_count - 1); + + /// Default number of limbs to allocate on creation of a `Managed`. + pub const default_capacity = 4; + + /// Allocator used by the Managed when requesting memory. + allocator: *Allocator, + + /// Raw digits. These are: + /// + /// * Little-endian ordered + /// * limbs.len >= 1 + /// * Zero is represent as Managed.len() == 1 with limbs[0] == 0. + /// + /// Accessing limbs directly should be avoided. + limbs: []Limb, + + /// High bit is the sign bit. If set, Managed is negative, else Managed is positive. + /// The remaining bits represent the number of limbs used by Managed. + metadata: usize, + + /// Creates a new `Managed`. `default_capacity` limbs will be allocated immediately. + /// The integer value after initializing is `0`. + pub fn init(allocator: *Allocator) !Managed { + return initCapacity(allocator, default_capacity); + } + + pub fn toMutable(self: Managed) Mutable { + return .{ + .limbs = self.limbs, + .positive = self.isPositive(), + .len = self.len(), + }; + } + + pub fn toConst(self: Managed) Const { + return .{ + .limbs = self.limbs[0..self.len()], + .positive = self.isPositive(), + }; + } + + /// Creates a new `Managed` with value `value`. + /// + /// This is identical to an `init`, followed by a `set`. + pub fn initSet(allocator: *Allocator, value: var) !Managed { + var s = try Managed.init(allocator); + try s.set(value); + return s; + } + + /// Creates a new Managed with a specific capacity. If capacity < default_capacity then the + /// default capacity will be used instead. + /// The integer value after initializing is `0`. + pub fn initCapacity(allocator: *Allocator, capacity: usize) !Managed { + return Managed{ + .allocator = allocator, + .metadata = 1, + .limbs = block: { + const limbs = try allocator.alloc(Limb, math.max(default_capacity, capacity)); + limbs[0] = 0; + break :block limbs; + }, + }; + } + + /// Returns the number of limbs currently in use. + pub fn len(self: Managed) usize { + return self.metadata & ~sign_bit; + } + + /// Returns whether an Managed is positive. + pub fn isPositive(self: Managed) bool { + return self.metadata & sign_bit == 0; + } + + /// Sets the sign of an Managed. + pub fn setSign(self: *Managed, positive: bool) void { + if (positive) { + self.metadata &= ~sign_bit; + } else { + self.metadata |= sign_bit; + } + } + + /// Sets the length of an Managed. + /// + /// If setLen is used, then the Managed must be normalized to suit. + pub fn setLen(self: *Managed, new_len: usize) void { + self.metadata &= sign_bit; + self.metadata |= new_len; + } + + pub fn setMetadata(self: *Managed, positive: bool, length: usize) void { + self.metadata = if (positive) length & ~sign_bit else length | sign_bit; + } + + /// Ensures an Managed has enough space allocated for capacity limbs. If the Managed does not have + /// sufficient capacity, the exact amount will be allocated. This occurs even if the requested + /// capacity is only greater than the current capacity by one limb. + pub fn ensureCapacity(self: *Managed, capacity: usize) !void { + if (capacity <= self.limbs.len) { + return; + } + self.limbs = try self.allocator.realloc(self.limbs, capacity); + } + + /// Frees all associated memory. + pub fn deinit(self: *Managed) void { + self.allocator.free(self.limbs); + self.* = undefined; + } + + /// Returns a `Managed` with the same value. The returned `Managed` is a deep copy and + /// can be modified separately from the original, and its resources are managed + /// separately from the original. + pub fn clone(other: Managed) !Managed { + return other.cloneWithDifferentAllocator(other.allocator); + } + + pub fn cloneWithDifferentAllocator(other: Managed, allocator: *Allocator) !Managed { + return Managed{ + .allocator = allocator, + .metadata = other.metadata, + .limbs = block: { + var limbs = try allocator.alloc(Limb, other.len()); + mem.copy(Limb, limbs[0..], other.limbs[0..other.len()]); + break :block limbs; + }, + }; + } + + /// Copies the value of the integer to an existing `Managed` so that they both have the same value. + /// Extra memory will be allocated if the receiver does not have enough capacity. + pub fn copy(self: *Managed, other: Const) !void { + if (self.limbs.ptr == other.limbs.ptr) return; + + try self.ensureCapacity(other.limbs.len); + mem.copy(Limb, self.limbs[0..], other.limbs[0..other.limbs.len]); + self.setMetadata(other.positive, other.limbs.len); + } + + /// Efficiently swap a `Managed` with another. This swaps the limb pointers and a full copy is not + /// performed. The address of the limbs field will not be the same after this function. + pub fn swap(self: *Managed, other: *Managed) void { + mem.swap(Managed, self, other); + } + + /// Debugging tool: prints the state to stderr. + pub fn dump(self: Managed) void { + for (self.limbs[0..self.len()]) |limb| { + std.debug.warn("{x} ", .{limb}); + } + std.debug.warn("capacity={} positive={}\n", .{ self.limbs.len, self.positive }); + } + + /// Negate the sign. + pub fn negate(self: *Managed) void { + self.metadata ^= sign_bit; + } + + /// Make positive. + pub fn abs(self: *Managed) void { + self.metadata &= ~sign_bit; + } + + pub fn isOdd(self: Managed) bool { + return self.limbs[0] & 1 != 0; + } + + pub fn isEven(self: Managed) bool { + return !self.isOdd(); + } + + /// Returns the number of bits required to represent the absolute value of an integer. + pub fn bitCountAbs(self: Managed) usize { + return self.toConst().bitCountAbs(); + } + + /// Returns the number of bits required to represent the integer in twos-complement form. + /// + /// If the integer is negative the value returned is the number of bits needed by a signed + /// integer to represent the value. If positive the value is the number of bits for an + /// unsigned integer. Any unsigned integer will fit in the signed integer with bitcount + /// one greater than the returned value. + /// + /// e.g. -127 returns 8 as it will fit in an i8. 127 returns 7 since it fits in a u7. + pub fn bitCountTwosComp(self: Managed) usize { + return self.toConst().bitCountTwosComp(); + } + + pub fn fitsInTwosComp(self: Managed, is_signed: bool, bit_count: usize) bool { + return self.toConst().fitsInTwosComp(is_signed, bit_count); + } + + /// Returns whether self can fit into an integer of the requested type. + pub fn fits(self: Managed, comptime T: type) bool { + return self.toConst().fits(T); + } + + /// Returns the approximate size of the integer in the given base. Negative values accommodate for + /// the minus sign. This is used for determining the number of characters needed to print the + /// value. It is inexact and may exceed the given value by ~1-2 bytes. + pub fn sizeInBaseUpperBound(self: Managed, base: usize) usize { + return self.toConst().sizeInBaseUpperBound(base); + } + + /// Sets an Managed to value. Value must be an primitive integer type. + pub fn set(self: *Managed, value: var) Allocator.Error!void { + try self.ensureCapacity(calcLimbLen(value)); + var m = self.toMutable(); + m.set(value); + self.setMetadata(m.positive, m.len); + } + + pub const ConvertError = Const.ConvertError; + + /// Convert self to type T. + /// + /// Returns an error if self cannot be narrowed into the requested type without truncation. + pub fn to(self: Managed, comptime T: type) ConvertError!T { + return self.toConst().to(T); + } + + /// Set self from the string representation `value`. + /// + /// `value` must contain only digits <= `base` and is case insensitive. Base prefixes are + /// not allowed (e.g. 0x43 should simply be 43). Underscores in the input string are + /// ignored and can be used as digit separators. + /// + /// Returns an error if memory could not be allocated or `value` has invalid digits for the + /// requested base. + /// + /// self's allocator is used for temporary storage to boost multiplication performance. + pub fn setString(self: *Managed, base: u8, value: []const u8) !void { + if (base < 2 or base > 16) return error.InvalidBase; + const den = (@sizeOf(Limb) * 8 / base); + try self.ensureCapacity((value.len + (den - 1)) / den); + const limbs_buffer = try self.allocator.alloc(Limb, calcSetStringLimbsBufferLen(base, value.len)); + defer self.allocator.free(limbs_buffer); + var m = self.toMutable(); + try m.setString(base, value, limbs_buffer, self.allocator); + self.setMetadata(m.positive, m.len); + } + + /// Converts self to a string in the requested base. Memory is allocated from the provided + /// allocator and not the one present in self. + pub fn toString(self: Managed, allocator: *Allocator, base: u8, uppercase: bool) ![]u8 { + if (base < 2 or base > 16) return error.InvalidBase; + return self.toConst().toStringAlloc(self.allocator, base, uppercase); + } + + /// To allow `std.fmt.format` to work with `Managed`. + /// If the integer is larger than `pow(2, 64 * @sizeOf(usize) * 8), this function will fail + /// to print the string, printing "(BigInt)" instead of a number. + /// This is because the rendering algorithm requires reversing a string, which requires O(N) memory. + /// See `toString` and `toStringAlloc` for a way to print big integers without failure. + pub fn format( + self: Managed, + comptime fmt: []const u8, + options: std.fmt.FormatOptions, + out_stream: var, + ) !void { + return self.toConst().format(fmt, options, out_stream); + } + + /// Returns math.Order.lt, math.Order.eq, math.Order.gt if |a| < |b|, |a| == + /// |b| or |a| > |b| respectively. + pub fn orderAbs(a: Managed, b: Managed) math.Order { + return a.toConst().orderAbs(b.toConst()); + } + + /// Returns math.Order.lt, math.Order.eq, math.Order.gt if a < b, a == b or a + /// > b respectively. + pub fn order(a: Managed, b: Managed) math.Order { + return a.toConst().order(b.toConst()); + } + + /// Returns true if a == 0. + pub fn eqZero(a: Managed) bool { + return a.toConst().eqZero(); + } + + /// Returns true if |a| == |b|. + pub fn eqAbs(a: Managed, b: Managed) bool { + return a.toConst().eqAbs(b.toConst()); + } + + /// Returns true if a == b. + pub fn eq(a: Managed, b: Managed) bool { + return a.toConst().eq(b.toConst()); + } + + /// Normalize a possible sequence of leading zeros. + /// + /// [1, 2, 3, 4, 0] -> [1, 2, 3, 4] + /// [1, 2, 0, 0, 0] -> [1, 2] + /// [0, 0, 0, 0, 0] -> [0] + pub fn normalize(r: *Managed, length: usize) void { + assert(length > 0); + assert(length <= r.limbs.len); + + var j = length; + while (j > 0) : (j -= 1) { + if (r.limbs[j - 1] != 0) { + break; + } + } + + // Handle zero + r.setLen(if (j != 0) j else 1); + } + + /// r = a + scalar + /// + /// r and a may be aliases. + /// scalar is a primitive integer type. + /// + /// Returns an error if memory could not be allocated. + pub fn addScalar(r: *Managed, a: Const, scalar: var) Allocator.Error!void { + try r.ensureCapacity(math.max(a.limbs.len, calcLimbLen(scalar)) + 1); + var m = r.toMutable(); + m.addScalar(a, scalar); + r.setMetadata(m.positive, m.len); + } + + /// r = a + b + /// + /// r, a and b may be aliases. + /// + /// Returns an error if memory could not be allocated. + pub fn add(r: *Managed, a: Const, b: Const) Allocator.Error!void { + try r.ensureCapacity(math.max(a.limbs.len, b.limbs.len) + 1); + var m = r.toMutable(); + m.add(a, b); + r.setMetadata(m.positive, m.len); + } + + /// r = a - b + /// + /// r, a and b may be aliases. + /// + /// Returns an error if memory could not be allocated. + pub fn sub(r: *Managed, a: Const, b: Const) !void { + try r.ensureCapacity(math.max(a.limbs.len, b.limbs.len) + 1); + var m = r.toMutable(); + m.sub(a, b); + r.setMetadata(m.positive, m.len); + } + + /// rma = a * b + /// + /// rma, a and b may be aliases. However, it is more efficient if rma does not alias a or b. + /// + /// Returns an error if memory could not be allocated. + /// + /// rma's allocator is used for temporary storage to speed up the multiplication. + pub fn mul(rma: *Managed, a: Const, b: Const) !void { + try rma.ensureCapacity(a.limbs.len + b.limbs.len + 1); + var alias_count: usize = 0; + if (rma.limbs.ptr == a.limbs.ptr) + alias_count += 1; + if (rma.limbs.ptr == b.limbs.ptr) + alias_count += 1; + var m = rma.toMutable(); + if (alias_count == 0) { + m.mulNoAlias(a, b, rma.allocator); + } else { + const limb_count = calcMulLimbsBufferLen(a.limbs.len, b.limbs.len, alias_count); + const limbs_buffer = try rma.allocator.alloc(Limb, limb_count); + defer rma.allocator.free(limbs_buffer); + m.mul(a, b, limbs_buffer, rma.allocator); + } + rma.setMetadata(m.positive, m.len); + } + + /// q = a / b (rem r) + /// + /// a / b are floored (rounded towards 0). + /// + /// Returns an error if memory could not be allocated. + /// + /// q's allocator is used for temporary storage to speed up the multiplication. + pub fn divFloor(q: *Managed, r: *Managed, a: Const, b: Const) !void { + try q.ensureCapacity(a.limbs.len + b.limbs.len + 1); + try r.ensureCapacity(a.limbs.len); + var mq = q.toMutable(); + var mr = r.toMutable(); + const limbs_buffer = try q.allocator.alloc(Limb, calcDivLimbsBufferLen(a.limbs.len, b.limbs.len)); + defer q.allocator.free(limbs_buffer); + mq.divFloor(&mr, a, b, limbs_buffer, q.allocator); + q.setMetadata(mq.positive, mq.len); + r.setMetadata(mr.positive, mr.len); + } + + /// q = a / b (rem r) + /// + /// a / b are truncated (rounded towards -inf). + /// + /// Returns an error if memory could not be allocated. + /// + /// q's allocator is used for temporary storage to speed up the multiplication. + pub fn divTrunc(q: *Managed, r: *Managed, a: Const, b: Const) !void { + try q.ensureCapacity(a.limbs.len + b.limbs.len + 1); + try r.ensureCapacity(a.limbs.len); + var mq = q.toMutable(); + var mr = r.toMutable(); + const limbs_buffer = try q.allocator.alloc(Limb, calcDivLimbsBufferLen(a.limbs.len, b.limbs.len)); + defer q.allocator.free(limbs_buffer); + mq.divTrunc(&mr, a, b, limbs_buffer, q.allocator); + q.setMetadata(mq.positive, mq.len); + r.setMetadata(mr.positive, mr.len); + } + + /// r = a << shift, in other words, r = a * 2^shift + pub fn shiftLeft(r: *Managed, a: Managed, shift: usize) !void { + try r.ensureCapacity(a.len() + (shift / Limb.bit_count) + 1); + var m = r.toMutable(); + m.shiftLeft(a.toConst(), shift); + r.setMetadata(m.positive, m.len); + } + + /// r = a >> shift + pub fn shiftRight(r: *Managed, a: Managed, shift: usize) !void { + if (a.len() <= shift / Limb.bit_count) { + r.metadata = 1; + r.limbs[0] = 0; + return; + } + + try r.ensureCapacity(a.len() - (shift / Limb.bit_count)); + var m = r.toMutable(); + m.shiftRight(a.toConst(), shift); + r.setMetadata(m.positive, m.len); + } + + /// r = a | b + /// + /// a and b are zero-extended to the longer of a or b. + pub fn bitOr(r: *Managed, a: Managed, b: Managed) !void { + try r.ensureCapacity(math.max(a.len(), b.len())); + var m = r.toMutable(); + m.bitOr(a.toConst(), b.toConst()); + r.setMetadata(m.positive, m.len); + } + + /// r = a & b + pub fn bitAnd(r: *Managed, a: Managed, b: Managed) !void { + try r.ensureCapacity(math.min(a.len(), b.len())); + var m = r.toMutable(); + m.bitAnd(a.toConst(), b.toConst()); + r.setMetadata(m.positive, m.len); + } + + /// r = a ^ b + pub fn bitXor(r: *Managed, a: Managed, b: Managed) !void { + try r.ensureCapacity(math.max(a.len(), b.len())); + var m = r.toMutable(); + m.bitXor(a.toConst(), b.toConst()); + r.setMetadata(m.positive, m.len); + } + + /// rma may alias x or y. + /// x and y may alias each other. + /// + /// rma's allocator is used for temporary storage to boost multiplication performance. + pub fn gcd(rma: *Managed, x: Managed, y: Managed) !void { + try rma.ensureCapacity(math.min(x.len(), y.len())); + var m = rma.toMutable(); + var limbs_buffer = std.ArrayList(Limb).init(rma.allocator); + defer limbs_buffer.deinit(); + try m.gcd(x.toConst(), y.toConst(), &limbs_buffer); + rma.setMetadata(m.positive, m.len); + } +}; + +/// Knuth 4.3.1, Algorithm M. +/// +/// r MUST NOT alias any of a or b. +fn llmulacc(opt_allocator: ?*Allocator, r: []Limb, a: []const Limb, b: []const Limb) void { + @setRuntimeSafety(false); + + const a_norm = a[0..llnormalize(a)]; + const b_norm = b[0..llnormalize(b)]; + var x = a_norm; + var y = b_norm; + if (a_norm.len > b_norm.len) { + x = b_norm; + y = a_norm; + } + + assert(r.len >= x.len + y.len + 1); + + // 48 is a pretty abitrary size chosen based on performance of a factorial program. + if (x.len > 48) { + if (opt_allocator) |allocator| { + llmulacc_karatsuba(allocator, r, x, y) catch |err| switch (err) { + error.OutOfMemory => {}, // handled below + }; + } + } + + // Basecase multiplication + var i: usize = 0; + while (i < x.len) : (i += 1) { + llmulDigit(r[i..], y, x[i]); } } -test "big.int comptime_int set negative" { - var a = try Int.initSet(testing.allocator, -10); - defer a.deinit(); - - testing.expect(a.limbs[0] == 10); - testing.expect(a.isPositive() == false); -} - -test "big.int int set unaligned small" { - var a = try Int.initSet(testing.allocator, @as(u7, 45)); - defer a.deinit(); - - testing.expect(a.limbs[0] == 45); - testing.expect(a.isPositive() == true); -} - -test "big.int comptime_int to" { - const a = try Int.initSet(testing.allocator, 0xefffffff00000001eeeeeeefaaaaaaab); - defer a.deinit(); - - testing.expect((try a.to(u128)) == 0xefffffff00000001eeeeeeefaaaaaaab); -} - -test "big.int sub-limb to" { - const a = try Int.initSet(testing.allocator, 10); - defer a.deinit(); - - testing.expect((try a.to(u8)) == 10); -} - -test "big.int to target too small error" { - const a = try Int.initSet(testing.allocator, 0xffffffff); - defer a.deinit(); - - testing.expectError(error.TargetTooSmall, a.to(u8)); -} - -test "big.int normalize" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - try a.ensureCapacity(8); - - a.limbs[0] = 1; - a.limbs[1] = 2; - a.limbs[2] = 3; - a.limbs[3] = 0; - a.normalize(4); - testing.expect(a.len() == 3); - - a.limbs[0] = 1; - a.limbs[1] = 2; - a.limbs[2] = 3; - a.normalize(3); - testing.expect(a.len() == 3); - - a.limbs[0] = 0; - a.limbs[1] = 0; - a.normalize(2); - testing.expect(a.len() == 1); - - a.limbs[0] = 0; - a.normalize(1); - testing.expect(a.len() == 1); -} - -test "big.int normalize multi" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - try a.ensureCapacity(8); - - a.limbs[0] = 1; - a.limbs[1] = 2; - a.limbs[2] = 0; - a.limbs[3] = 0; - a.normalize(4); - testing.expect(a.len() == 2); - - a.limbs[0] = 1; - a.limbs[1] = 2; - a.limbs[2] = 3; - a.normalize(3); - testing.expect(a.len() == 3); - - a.limbs[0] = 0; - a.limbs[1] = 0; - a.limbs[2] = 0; - a.limbs[3] = 0; - a.normalize(4); - testing.expect(a.len() == 1); - - a.limbs[0] = 0; - a.normalize(1); - testing.expect(a.len() == 1); -} - -test "big.int parity" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - - try a.set(0); - testing.expect(a.isEven()); - testing.expect(!a.isOdd()); - - try a.set(7); - testing.expect(!a.isEven()); - testing.expect(a.isOdd()); -} - -test "big.int bitcount + sizeInBase" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - - try a.set(0b100); - testing.expect(a.bitCountAbs() == 3); - testing.expect(a.sizeInBase(2) >= 3); - testing.expect(a.sizeInBase(10) >= 1); - - a.negate(); - testing.expect(a.bitCountAbs() == 3); - testing.expect(a.sizeInBase(2) >= 4); - testing.expect(a.sizeInBase(10) >= 2); - - try a.set(0xffffffff); - testing.expect(a.bitCountAbs() == 32); - testing.expect(a.sizeInBase(2) >= 32); - testing.expect(a.sizeInBase(10) >= 10); - - try a.shiftLeft(a, 5000); - testing.expect(a.bitCountAbs() == 5032); - testing.expect(a.sizeInBase(2) >= 5032); - a.setSign(false); - - testing.expect(a.bitCountAbs() == 5032); - testing.expect(a.sizeInBase(2) >= 5033); -} - -test "big.int bitcount/to" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - - try a.set(0); - testing.expect(a.bitCountTwosComp() == 0); - - testing.expect((try a.to(u0)) == 0); - testing.expect((try a.to(i0)) == 0); - - try a.set(-1); - testing.expect(a.bitCountTwosComp() == 1); - testing.expect((try a.to(i1)) == -1); - - try a.set(-8); - testing.expect(a.bitCountTwosComp() == 4); - testing.expect((try a.to(i4)) == -8); - - try a.set(127); - testing.expect(a.bitCountTwosComp() == 7); - testing.expect((try a.to(u7)) == 127); - - try a.set(-128); - testing.expect(a.bitCountTwosComp() == 8); - testing.expect((try a.to(i8)) == -128); - - try a.set(-129); - testing.expect(a.bitCountTwosComp() == 9); - testing.expect((try a.to(i9)) == -129); -} - -test "big.int fits" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - - try a.set(0); - testing.expect(a.fits(u0)); - testing.expect(a.fits(i0)); - - try a.set(255); - testing.expect(!a.fits(u0)); - testing.expect(!a.fits(u1)); - testing.expect(!a.fits(i8)); - testing.expect(a.fits(u8)); - testing.expect(a.fits(u9)); - testing.expect(a.fits(i9)); - - try a.set(-128); - testing.expect(!a.fits(i7)); - testing.expect(a.fits(i8)); - testing.expect(a.fits(i9)); - testing.expect(!a.fits(u9)); - - try a.set(0x1ffffffffeeeeeeee); - testing.expect(!a.fits(u32)); - testing.expect(!a.fits(u64)); - testing.expect(a.fits(u65)); -} - -test "big.int string set" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - - try a.setString(10, "120317241209124781241290847124"); - testing.expect((try a.to(u128)) == 120317241209124781241290847124); -} - -test "big.int string negative" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - - try a.setString(10, "-1023"); - testing.expect((try a.to(i32)) == -1023); -} - -test "big.int string set number with underscores" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - - try a.setString(10, "__1_2_0_3_1_7_2_4_1_2_0_____9_1__2__4_7_8_1_2_4_1_2_9_0_8_4_7_1_2_4___"); - testing.expect((try a.to(u128)) == 120317241209124781241290847124); -} - -test "big.int string set case insensitive number" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - - try a.setString(16, "aB_cD_eF"); - testing.expect((try a.to(u32)) == 0xabcdef); -} - -test "big.int string set bad char error" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - testing.expectError(error.InvalidCharForDigit, a.setString(10, "x")); -} - -test "big.int string set bad base error" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - testing.expectError(error.InvalidBase, a.setString(45, "10")); -} - -test "big.int string to" { - const a = try Int.initSet(testing.allocator, 120317241209124781241290847124); - defer a.deinit(); - - const as = try a.toString(testing.allocator, 10, false); - defer testing.allocator.free(as); - const es = "120317241209124781241290847124"; - - testing.expect(mem.eql(u8, as, es)); -} - -test "big.int string to base base error" { - const a = try Int.initSet(testing.allocator, 0xffffffff); - defer a.deinit(); - - testing.expectError(error.InvalidBase, a.toString(testing.allocator, 45, false)); -} - -test "big.int string to base 2" { - const a = try Int.initSet(testing.allocator, -0b1011); - defer a.deinit(); - - const as = try a.toString(testing.allocator, 2, false); - defer testing.allocator.free(as); - const es = "-1011"; - - testing.expect(mem.eql(u8, as, es)); -} - -test "big.int string to base 16" { - const a = try Int.initSet(testing.allocator, 0xefffffff00000001eeeeeeefaaaaaaab); - defer a.deinit(); - - const as = try a.toString(testing.allocator, 16, false); - defer testing.allocator.free(as); - const es = "efffffff00000001eeeeeeefaaaaaaab"; - - testing.expect(mem.eql(u8, as, es)); -} - -test "big.int neg string to" { - const a = try Int.initSet(testing.allocator, -123907434); - defer a.deinit(); - - const as = try a.toString(testing.allocator, 10, false); - defer testing.allocator.free(as); - const es = "-123907434"; - - testing.expect(mem.eql(u8, as, es)); -} - -test "big.int zero string to" { - const a = try Int.initSet(testing.allocator, 0); - defer a.deinit(); - - const as = try a.toString(testing.allocator, 10, false); - defer testing.allocator.free(as); - const es = "0"; - - testing.expect(mem.eql(u8, as, es)); -} - -test "big.int clone" { - var a = try Int.initSet(testing.allocator, 1234); - defer a.deinit(); - const b = try a.clone(); - defer b.deinit(); - - testing.expect((try a.to(u32)) == 1234); - testing.expect((try b.to(u32)) == 1234); - - try a.set(77); - testing.expect((try a.to(u32)) == 77); - testing.expect((try b.to(u32)) == 1234); -} - -test "big.int swap" { - var a = try Int.initSet(testing.allocator, 1234); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 5678); - defer b.deinit(); - - testing.expect((try a.to(u32)) == 1234); - testing.expect((try b.to(u32)) == 5678); - - a.swap(&b); - - testing.expect((try a.to(u32)) == 5678); - testing.expect((try b.to(u32)) == 1234); -} - -test "big.int to negative" { - var a = try Int.initSet(testing.allocator, -10); - defer a.deinit(); - - testing.expect((try a.to(i32)) == -10); -} - -test "big.int compare" { - var a = try Int.initSet(testing.allocator, -11); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 10); - defer b.deinit(); - - testing.expect(a.cmpAbs(b) == .gt); - testing.expect(a.cmp(b) == .lt); -} - -test "big.int compare similar" { - var a = try Int.initSet(testing.allocator, 0xffffffffeeeeeeeeffffffffeeeeeeee); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0xffffffffeeeeeeeeffffffffeeeeeeef); - defer b.deinit(); - - testing.expect(a.cmpAbs(b) == .lt); - testing.expect(b.cmpAbs(a) == .gt); -} - -test "big.int compare different limb size" { - var a = try Int.initSet(testing.allocator, maxInt(Limb) + 1); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 1); - defer b.deinit(); - - testing.expect(a.cmpAbs(b) == .gt); - testing.expect(b.cmpAbs(a) == .lt); -} - -test "big.int compare multi-limb" { - var a = try Int.initSet(testing.allocator, -0x7777777799999999ffffeeeeffffeeeeffffeeeef); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0x7777777799999999ffffeeeeffffeeeeffffeeeee); - defer b.deinit(); - - testing.expect(a.cmpAbs(b) == .gt); - testing.expect(a.cmp(b) == .lt); -} - -test "big.int equality" { - var a = try Int.initSet(testing.allocator, 0xffffffff1); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, -0xffffffff1); - defer b.deinit(); - - testing.expect(a.eqAbs(b)); - testing.expect(!a.eq(b)); -} - -test "big.int abs" { - var a = try Int.initSet(testing.allocator, -5); - defer a.deinit(); - - a.abs(); - testing.expect((try a.to(u32)) == 5); - - a.abs(); - testing.expect((try a.to(u32)) == 5); -} - -test "big.int negate" { - var a = try Int.initSet(testing.allocator, 5); - defer a.deinit(); - - a.negate(); - testing.expect((try a.to(i32)) == -5); - - a.negate(); - testing.expect((try a.to(i32)) == 5); -} - -test "big.int add single-single" { - var a = try Int.initSet(testing.allocator, 50); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 5); - defer b.deinit(); - - var c = try Int.init(testing.allocator); - defer c.deinit(); - try c.add(a, b); - - testing.expect((try c.to(u32)) == 55); -} - -test "big.int add multi-single" { - var a = try Int.initSet(testing.allocator, maxInt(Limb) + 1); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 1); - defer b.deinit(); - - var c = try Int.init(testing.allocator); - defer c.deinit(); - - try c.add(a, b); - testing.expect((try c.to(DoubleLimb)) == maxInt(Limb) + 2); - - try c.add(b, a); - testing.expect((try c.to(DoubleLimb)) == maxInt(Limb) + 2); -} - -test "big.int add multi-multi" { - const op1 = 0xefefefef7f7f7f7f; - const op2 = 0xfefefefe9f9f9f9f; - var a = try Int.initSet(testing.allocator, op1); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, op2); - defer b.deinit(); - - var c = try Int.init(testing.allocator); - defer c.deinit(); - try c.add(a, b); - - testing.expect((try c.to(u128)) == op1 + op2); -} - -test "big.int add zero-zero" { - var a = try Int.initSet(testing.allocator, 0); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0); - defer b.deinit(); - - var c = try Int.init(testing.allocator); - defer c.deinit(); - try c.add(a, b); - - testing.expect((try c.to(u32)) == 0); -} - -test "big.int add alias multi-limb nonzero-zero" { - const op1 = 0xffffffff777777771; - var a = try Int.initSet(testing.allocator, op1); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0); - defer b.deinit(); - - try a.add(a, b); - - testing.expect((try a.to(u128)) == op1); -} - -test "big.int add sign" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - - const one = try Int.initSet(testing.allocator, 1); - defer one.deinit(); - const two = try Int.initSet(testing.allocator, 2); - defer two.deinit(); - const neg_one = try Int.initSet(testing.allocator, -1); - defer neg_one.deinit(); - const neg_two = try Int.initSet(testing.allocator, -2); - defer neg_two.deinit(); - - try a.add(one, two); - testing.expect((try a.to(i32)) == 3); - - try a.add(neg_one, two); - testing.expect((try a.to(i32)) == 1); - - try a.add(one, neg_two); - testing.expect((try a.to(i32)) == -1); - - try a.add(neg_one, neg_two); - testing.expect((try a.to(i32)) == -3); -} - -test "big.int sub single-single" { - var a = try Int.initSet(testing.allocator, 50); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 5); - defer b.deinit(); - - var c = try Int.init(testing.allocator); - defer c.deinit(); - try c.sub(a, b); - - testing.expect((try c.to(u32)) == 45); -} - -test "big.int sub multi-single" { - var a = try Int.initSet(testing.allocator, maxInt(Limb) + 1); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 1); - defer b.deinit(); - - var c = try Int.init(testing.allocator); - defer c.deinit(); - try c.sub(a, b); - - testing.expect((try c.to(Limb)) == maxInt(Limb)); -} - -test "big.int sub multi-multi" { - const op1 = 0xefefefefefefefefefefefef; - const op2 = 0xabababababababababababab; - - var a = try Int.initSet(testing.allocator, op1); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, op2); - defer b.deinit(); - - var c = try Int.init(testing.allocator); - defer c.deinit(); - try c.sub(a, b); - - testing.expect((try c.to(u128)) == op1 - op2); -} - -test "big.int sub equal" { - var a = try Int.initSet(testing.allocator, 0x11efefefefefefefefefefefef); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0x11efefefefefefefefefefefef); - defer b.deinit(); - - var c = try Int.init(testing.allocator); - defer c.deinit(); - try c.sub(a, b); - - testing.expect((try c.to(u32)) == 0); -} - -test "big.int sub sign" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - - const one = try Int.initSet(testing.allocator, 1); - defer one.deinit(); - const two = try Int.initSet(testing.allocator, 2); - defer two.deinit(); - const neg_one = try Int.initSet(testing.allocator, -1); - defer neg_one.deinit(); - const neg_two = try Int.initSet(testing.allocator, -2); - defer neg_two.deinit(); - - try a.sub(one, two); - testing.expect((try a.to(i32)) == -1); - - try a.sub(neg_one, two); - testing.expect((try a.to(i32)) == -3); - - try a.sub(one, neg_two); - testing.expect((try a.to(i32)) == 3); - - try a.sub(neg_one, neg_two); - testing.expect((try a.to(i32)) == 1); - - try a.sub(neg_two, neg_one); - testing.expect((try a.to(i32)) == -1); -} - -test "big.int mul single-single" { - var a = try Int.initSet(testing.allocator, 50); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 5); - defer b.deinit(); - - var c = try Int.init(testing.allocator); - defer c.deinit(); - try c.mul(a, b); - - testing.expect((try c.to(u64)) == 250); -} - -test "big.int mul multi-single" { - var a = try Int.initSet(testing.allocator, maxInt(Limb)); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 2); - defer b.deinit(); - - var c = try Int.init(testing.allocator); - defer c.deinit(); - try c.mul(a, b); - - testing.expect((try c.to(DoubleLimb)) == 2 * maxInt(Limb)); -} - -test "big.int mul multi-multi" { - const op1 = 0x998888efefefefefefefef; - const op2 = 0x333000abababababababab; - var a = try Int.initSet(testing.allocator, op1); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, op2); - defer b.deinit(); - - var c = try Int.init(testing.allocator); - defer c.deinit(); - try c.mul(a, b); - - testing.expect((try c.to(u256)) == op1 * op2); -} - -test "big.int mul alias r with a" { - var a = try Int.initSet(testing.allocator, maxInt(Limb)); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 2); - defer b.deinit(); - - try a.mul(a, b); - - testing.expect((try a.to(DoubleLimb)) == 2 * maxInt(Limb)); -} - -test "big.int mul alias r with b" { - var a = try Int.initSet(testing.allocator, maxInt(Limb)); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 2); - defer b.deinit(); - - try a.mul(b, a); - - testing.expect((try a.to(DoubleLimb)) == 2 * maxInt(Limb)); -} - -test "big.int mul alias r with a and b" { - var a = try Int.initSet(testing.allocator, maxInt(Limb)); - defer a.deinit(); - - try a.mul(a, a); - - testing.expect((try a.to(DoubleLimb)) == maxInt(Limb) * maxInt(Limb)); -} - -test "big.int mul a*0" { - var a = try Int.initSet(testing.allocator, 0xefefefefefefefef); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0); - defer b.deinit(); - - var c = try Int.init(testing.allocator); - defer c.deinit(); - try c.mul(a, b); - - testing.expect((try c.to(u32)) == 0); -} - -test "big.int mul 0*0" { - var a = try Int.initSet(testing.allocator, 0); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0); - defer b.deinit(); - - var c = try Int.init(testing.allocator); - defer c.deinit(); - try c.mul(a, b); - - testing.expect((try c.to(u32)) == 0); -} - -test "big.int div single-single no rem" { - var a = try Int.initSet(testing.allocator, 50); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 5); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - testing.expect((try q.to(u32)) == 10); - testing.expect((try r.to(u32)) == 0); -} - -test "big.int div single-single with rem" { - var a = try Int.initSet(testing.allocator, 49); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 5); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - testing.expect((try q.to(u32)) == 9); - testing.expect((try r.to(u32)) == 4); -} - -test "big.int div multi-single no rem" { - const op1 = 0xffffeeeeddddcccc; - const op2 = 34; - - var a = try Int.initSet(testing.allocator, op1); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, op2); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - testing.expect((try q.to(u64)) == op1 / op2); - testing.expect((try r.to(u64)) == 0); -} - -test "big.int div multi-single with rem" { - const op1 = 0xffffeeeeddddcccf; - const op2 = 34; - - var a = try Int.initSet(testing.allocator, op1); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, op2); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - testing.expect((try q.to(u64)) == op1 / op2); - testing.expect((try r.to(u64)) == 3); -} - -test "big.int div multi>2-single" { - const op1 = 0xfefefefefefefefefefefefefefefefe; - const op2 = 0xefab8; - - var a = try Int.initSet(testing.allocator, op1); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, op2); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - testing.expect((try q.to(u128)) == op1 / op2); - testing.expect((try r.to(u32)) == 0x3e4e); -} - -test "big.int div single-single q < r" { - var a = try Int.initSet(testing.allocator, 0x0078f432); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0x01000000); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - testing.expect((try q.to(u64)) == 0); - testing.expect((try r.to(u64)) == 0x0078f432); -} - -test "big.int div single-single q == r" { - var a = try Int.initSet(testing.allocator, 10); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 10); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - testing.expect((try q.to(u64)) == 1); - testing.expect((try r.to(u64)) == 0); -} - -test "big.int div q=0 alias" { - var a = try Int.initSet(testing.allocator, 3); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 10); - defer b.deinit(); - - try Int.divTrunc(&a, &b, a, b); - - testing.expect((try a.to(u64)) == 0); - testing.expect((try b.to(u64)) == 3); -} - -test "big.int div multi-multi q < r" { - const op1 = 0x1ffffffff0078f432; - const op2 = 0x1ffffffff01000000; - var a = try Int.initSet(testing.allocator, op1); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, op2); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - testing.expect((try q.to(u128)) == 0); - testing.expect((try r.to(u128)) == op1); -} - -test "big.int div trunc single-single +/+" { - const u: i32 = 5; - const v: i32 = 3; - - var a = try Int.initSet(testing.allocator, u); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, v); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - // n = q * d + r - // 5 = 1 * 3 + 2 - const eq = @divTrunc(u, v); - const er = @mod(u, v); - - testing.expect((try q.to(i32)) == eq); - testing.expect((try r.to(i32)) == er); -} - -test "big.int div trunc single-single -/+" { - const u: i32 = -5; - const v: i32 = 3; - - var a = try Int.initSet(testing.allocator, u); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, v); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - // n = q * d + r - // -5 = 1 * -3 - 2 - const eq = -1; - const er = -2; - - testing.expect((try q.to(i32)) == eq); - testing.expect((try r.to(i32)) == er); -} - -test "big.int div trunc single-single +/-" { - const u: i32 = 5; - const v: i32 = -3; - - var a = try Int.initSet(testing.allocator, u); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, v); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - // n = q * d + r - // 5 = -1 * -3 + 2 - const eq = -1; - const er = 2; - - testing.expect((try q.to(i32)) == eq); - testing.expect((try r.to(i32)) == er); -} - -test "big.int div trunc single-single -/-" { - const u: i32 = -5; - const v: i32 = -3; - - var a = try Int.initSet(testing.allocator, u); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, v); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - // n = q * d + r - // -5 = 1 * -3 - 2 - const eq = 1; - const er = -2; - - testing.expect((try q.to(i32)) == eq); - testing.expect((try r.to(i32)) == er); -} - -test "big.int div floor single-single +/+" { - const u: i32 = 5; - const v: i32 = 3; - - var a = try Int.initSet(testing.allocator, u); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, v); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divFloor(&q, &r, a, b); - - // n = q * d + r - // 5 = 1 * 3 + 2 - const eq = 1; - const er = 2; - - testing.expect((try q.to(i32)) == eq); - testing.expect((try r.to(i32)) == er); -} - -test "big.int div floor single-single -/+" { - const u: i32 = -5; - const v: i32 = 3; - - var a = try Int.initSet(testing.allocator, u); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, v); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divFloor(&q, &r, a, b); - - // n = q * d + r - // -5 = -2 * 3 + 1 - const eq = -2; - const er = 1; - - testing.expect((try q.to(i32)) == eq); - testing.expect((try r.to(i32)) == er); -} - -test "big.int div floor single-single +/-" { - const u: i32 = 5; - const v: i32 = -3; - - var a = try Int.initSet(testing.allocator, u); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, v); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divFloor(&q, &r, a, b); - - // n = q * d + r - // 5 = -2 * -3 - 1 - const eq = -2; - const er = -1; - - testing.expect((try q.to(i32)) == eq); - testing.expect((try r.to(i32)) == er); -} - -test "big.int div floor single-single -/-" { - const u: i32 = -5; - const v: i32 = -3; - - var a = try Int.initSet(testing.allocator, u); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, v); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divFloor(&q, &r, a, b); - - // n = q * d + r - // -5 = 2 * -3 + 1 - const eq = 1; - const er = -2; - - testing.expect((try q.to(i32)) == eq); - testing.expect((try r.to(i32)) == er); -} - -test "big.int div multi-multi with rem" { - var a = try Int.initSet(testing.allocator, 0x8888999911110000ffffeeeeddddccccbbbbaaaa9999); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0x99990000111122223333); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - testing.expect((try q.to(u128)) == 0xe38f38e39161aaabd03f0f1b); - testing.expect((try r.to(u128)) == 0x28de0acacd806823638); -} - -test "big.int div multi-multi no rem" { - var a = try Int.initSet(testing.allocator, 0x8888999911110000ffffeeeedb4fec200ee3a4286361); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0x99990000111122223333); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - testing.expect((try q.to(u128)) == 0xe38f38e39161aaabd03f0f1b); - testing.expect((try r.to(u128)) == 0); -} - -test "big.int div multi-multi (2 branch)" { - var a = try Int.initSet(testing.allocator, 0x866666665555555588888887777777761111111111111111); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0x86666666555555554444444433333333); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - testing.expect((try q.to(u128)) == 0x10000000000000000); - testing.expect((try r.to(u128)) == 0x44444443444444431111111111111111); -} - -test "big.int div multi-multi (3.1/3.3 branch)" { - var a = try Int.initSet(testing.allocator, 0x11111111111111111111111111111111111111111111111111111111111111); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0x1111111111111111111111111111111111111111171); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - testing.expect((try q.to(u128)) == 0xfffffffffffffffffff); - testing.expect((try r.to(u256)) == 0x1111111111111111111110b12222222222222222282); -} - -test "big.int div multi-single zero-limb trailing" { - var a = try Int.initSet(testing.allocator, 0x60000000000000000000000000000000000000000000000000000000000000000); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0x10000000000000000); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - var expected = try Int.initSet(testing.allocator, 0x6000000000000000000000000000000000000000000000000); - defer expected.deinit(); - testing.expect(q.eq(expected)); - testing.expect(r.eqZero()); -} - -test "big.int div multi-multi zero-limb trailing (with rem)" { - var a = try Int.initSet(testing.allocator, 0x86666666555555558888888777777776111111111111111100000000000000000000000000000000); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0x8666666655555555444444443333333300000000000000000000000000000000); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - testing.expect((try q.to(u128)) == 0x10000000000000000); - - const rs = try r.toString(testing.allocator, 16, false); - defer testing.allocator.free(rs); - testing.expect(std.mem.eql(u8, rs, "4444444344444443111111111111111100000000000000000000000000000000")); -} - -test "big.int div multi-multi zero-limb trailing (with rem) and dividend zero-limb count > divisor zero-limb count" { - var a = try Int.initSet(testing.allocator, 0x8666666655555555888888877777777611111111111111110000000000000000); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0x8666666655555555444444443333333300000000000000000000000000000000); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - testing.expect((try q.to(u128)) == 0x1); - - const rs = try r.toString(testing.allocator, 16, false); - defer testing.allocator.free(rs); - testing.expect(std.mem.eql(u8, rs, "444444434444444311111111111111110000000000000000")); -} - -test "big.int div multi-multi zero-limb trailing (with rem) and dividend zero-limb count < divisor zero-limb count" { - var a = try Int.initSet(testing.allocator, 0x86666666555555558888888777777776111111111111111100000000000000000000000000000000); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0x866666665555555544444444333333330000000000000000); - defer b.deinit(); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - const qs = try q.toString(testing.allocator, 16, false); - defer testing.allocator.free(qs); - testing.expect(std.mem.eql(u8, qs, "10000000000000000820820803105186f")); - - const rs = try r.toString(testing.allocator, 16, false); - defer testing.allocator.free(rs); - testing.expect(std.mem.eql(u8, rs, "4e11f2baa5896a321d463b543d0104e30000000000000000")); -} - -test "big.int div multi-multi fuzz case #1" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - var b = try Int.init(testing.allocator); - defer b.deinit(); - - try a.setString(16, "ffffffffffffffffffffffffffffc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"); - try b.setString(16, "3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0000000000000000000000000000000000001ffffffffffffffffffffffffffffffffffffffffffffffffffc000000000000000000000000000000007fffffffffff"); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - const qs = try q.toString(testing.allocator, 16, false); - defer testing.allocator.free(qs); - testing.expect(std.mem.eql(u8, qs, "3ffffffffffffffffffffffffffff0000000000000000000000000000000000001ffffffffffffffffffffffffffff7fffffffe000000000000000000000000000180000000000000000000003fffffbfffffffdfffffffffffffeffff800000100101000000100000000020003fffffdfbfffffe3ffffffffffffeffff7fffc00800a100000017ffe000002000400007efbfff7fe9f00000037ffff3fff7fffa004006100000009ffe00000190038200bf7d2ff7fefe80400060000f7d7f8fbf9401fe38e0403ffc0bdffffa51102c300d7be5ef9df4e5060007b0127ad3fa69f97d0f820b6605ff617ddf7f32ad7a05c0d03f2e7bc78a6000e087a8bbcdc59e07a5a079128a7861f553ddebed7e8e56701756f9ead39b48cd1b0831889ea6ec1fddf643d0565b075ff07e6caea4e2854ec9227fd635ed60a2f5eef2893052ffd54718fa08604acbf6a15e78a467c4a3c53c0278af06c4416573f925491b195e8fd79302cb1aaf7caf4ecfc9aec1254cc969786363ac729f914c6ddcc26738d6b0facd54eba026580aba2eb6482a088b0d224a8852420b91ec1")); - - const rs = try r.toString(testing.allocator, 16, false); - defer testing.allocator.free(rs); - testing.expect(std.mem.eql(u8, rs, "310d1d4c414426b4836c2635bad1df3a424e50cbdd167ffccb4dfff57d36b4aae0d6ca0910698220171a0f3373c1060a046c2812f0027e321f72979daa5e7973214170d49e885de0c0ecc167837d44502430674a82522e5df6a0759548052420b91ec1")); -} - -test "big.int div multi-multi fuzz case #2" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - var b = try Int.init(testing.allocator); - defer b.deinit(); - - try a.setString(16, "3ffffffffe00000000000000000000000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffffe000000000000000000000000000000000000000000000000000000000000001fffffffffffffffff800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffc000000000000000000000000000000000000000000000000000000000000000"); - try b.setString(16, "ffc0000000000000000000000000000000000000000000000000"); - - var q = try Int.init(testing.allocator); - defer q.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - try Int.divTrunc(&q, &r, a, b); - - const qs = try q.toString(testing.allocator, 16, false); - defer testing.allocator.free(qs); - testing.expect(std.mem.eql(u8, qs, "40100400fe3f8fe3f8fe3f8fe3f8fe3f8fe4f93e4f93e4f93e4f93e4f93e4f93e4f93e4f93e4f93e4f93e4f93e4f91e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4992649926499264991e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4792e4b92e4b92e4b92e4b92a4a92a4a92a4")); - - const rs = try r.toString(testing.allocator, 16, false); - defer testing.allocator.free(rs); - testing.expect(std.mem.eql(u8, rs, "a900000000000000000000000000000000000000000000000000")); -} - -test "big.int shift-right single" { - var a = try Int.initSet(testing.allocator, 0xffff0000); - defer a.deinit(); - try a.shiftRight(a, 16); - - testing.expect((try a.to(u32)) == 0xffff); -} - -test "big.int shift-right multi" { - var a = try Int.initSet(testing.allocator, 0xffff0000eeee1111dddd2222cccc3333); - defer a.deinit(); - try a.shiftRight(a, 67); - - testing.expect((try a.to(u64)) == 0x1fffe0001dddc222); -} - -test "big.int shift-left single" { - var a = try Int.initSet(testing.allocator, 0xffff); - defer a.deinit(); - try a.shiftLeft(a, 16); - - testing.expect((try a.to(u64)) == 0xffff0000); -} - -test "big.int shift-left multi" { - var a = try Int.initSet(testing.allocator, 0x1fffe0001dddc222); - defer a.deinit(); - try a.shiftLeft(a, 67); - - testing.expect((try a.to(u128)) == 0xffff0000eeee11100000000000000000); -} - -test "big.int shift-right negative" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - - try a.shiftRight(try Int.initSet(testing.allocator, -20), 2); - defer a.deinit(); - testing.expect((try a.to(i32)) == -20 >> 2); - - try a.shiftRight(try Int.initSet(testing.allocator, -5), 10); - defer a.deinit(); - testing.expect((try a.to(i32)) == -5 >> 10); -} - -test "big.int shift-left negative" { - var a = try Int.init(testing.allocator); - defer a.deinit(); - - try a.shiftRight(try Int.initSet(testing.allocator, -10), 1232); - defer a.deinit(); - testing.expect((try a.to(i32)) == -10 >> 1232); -} - -test "big.int bitwise and simple" { - var a = try Int.initSet(testing.allocator, 0xffffffff11111111); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0xeeeeeeee22222222); - defer b.deinit(); - - try a.bitAnd(a, b); - - testing.expect((try a.to(u64)) == 0xeeeeeeee00000000); -} - -test "big.int bitwise and multi-limb" { - var a = try Int.initSet(testing.allocator, maxInt(Limb) + 1); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, maxInt(Limb)); - defer b.deinit(); - - try a.bitAnd(a, b); - - testing.expect((try a.to(u128)) == 0); -} - -test "big.int bitwise xor simple" { - var a = try Int.initSet(testing.allocator, 0xffffffff11111111); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0xeeeeeeee22222222); - defer b.deinit(); - - try a.bitXor(a, b); - - testing.expect((try a.to(u64)) == 0x1111111133333333); -} - -test "big.int bitwise xor multi-limb" { - var a = try Int.initSet(testing.allocator, maxInt(Limb) + 1); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, maxInt(Limb)); - defer b.deinit(); - - try a.bitXor(a, b); - - testing.expect((try a.to(DoubleLimb)) == (maxInt(Limb) + 1) ^ maxInt(Limb)); -} - -test "big.int bitwise or simple" { - var a = try Int.initSet(testing.allocator, 0xffffffff11111111); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0xeeeeeeee22222222); - defer b.deinit(); - - try a.bitOr(a, b); - - testing.expect((try a.to(u64)) == 0xffffffff33333333); -} - -test "big.int bitwise or multi-limb" { - var a = try Int.initSet(testing.allocator, maxInt(Limb) + 1); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, maxInt(Limb)); - defer b.deinit(); - - try a.bitOr(a, b); - - // TODO: big.int.cpp or is wrong on multi-limb. - testing.expect((try a.to(DoubleLimb)) == (maxInt(Limb) + 1) + maxInt(Limb)); -} - -test "big.int var args" { - var a = try Int.initSet(testing.allocator, 5); - defer a.deinit(); - - const b = try Int.initSet(testing.allocator, 6); - defer b.deinit(); - try a.add(a, b); - testing.expect((try a.to(u64)) == 11); - - const c = try Int.initSet(testing.allocator, 11); - defer c.deinit(); - testing.expect(a.cmp(c) == .eq); - - const d = try Int.initSet(testing.allocator, 14); - defer d.deinit(); - testing.expect(a.cmp(d) != .gt); -} - -test "big.int gcd non-one small" { - var a = try Int.initSet(testing.allocator, 17); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 97); - defer b.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - - try r.gcd(a, b); - - testing.expect((try r.to(u32)) == 1); -} - -test "big.int gcd non-one small" { - var a = try Int.initSet(testing.allocator, 4864); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 3458); - defer b.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - - try r.gcd(a, b); - - testing.expect((try r.to(u32)) == 38); -} - -test "big.int gcd non-one large" { - var a = try Int.initSet(testing.allocator, 0xffffffffffffffff); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0xffffffffffffffff7777); - defer b.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - - try r.gcd(a, b); - - testing.expect((try r.to(u32)) == 4369); -} - -test "big.int gcd large multi-limb result" { - var a = try Int.initSet(testing.allocator, 0x12345678123456781234567812345678123456781234567812345678); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 0x12345671234567123456712345671234567123456712345671234567); - defer b.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - - try r.gcd(a, b); - - testing.expect((try r.to(u256)) == 0xf000000ff00000fff0000ffff000fffff00ffffff1); -} - -test "big.int gcd one large" { - var a = try Int.initSet(testing.allocator, 1897056385327307); - defer a.deinit(); - var b = try Int.initSet(testing.allocator, 2251799813685248); - defer b.deinit(); - var r = try Int.init(testing.allocator); - defer r.deinit(); - - try r.gcd(a, b); - - testing.expect((try r.to(u64)) == 1); +/// Knuth 4.3.1, Algorithm M. +/// +/// r MUST NOT alias any of a or b. +fn llmulacc_karatsuba(allocator: *Allocator, r: []Limb, x: []const Limb, y: []const Limb) error{OutOfMemory}!void { + @setRuntimeSafety(false); + + assert(r.len >= x.len + y.len + 1); + + const split = @divFloor(x.len, 2); + var x0 = x[0..split]; + var x1 = x[split..x.len]; + var y0 = y[0..split]; + var y1 = y[split..y.len]; + + var tmp = try allocator.alloc(Limb, x1.len + y1.len + 1); + defer allocator.free(tmp); + mem.set(Limb, tmp, 0); + + llmulacc(allocator, tmp, x1, y1); + + var length = llnormalize(tmp); + _ = llaccum(r[split..], tmp[0..length]); + _ = llaccum(r[split * 2 ..], tmp[0..length]); + + mem.set(Limb, tmp[0..length], 0); + + llmulacc(allocator, tmp, x0, y0); + + length = llnormalize(tmp); + _ = llaccum(r[0..], tmp[0..length]); + _ = llaccum(r[split..], tmp[0..length]); + + const x_cmp = llcmp(x1, x0); + const y_cmp = llcmp(y1, y0); + if (x_cmp * y_cmp == 0) { + return; + } + const x0_len = llnormalize(x0); + const x1_len = llnormalize(x1); + var j0 = try allocator.alloc(Limb, math.max(x0_len, x1_len)); + defer allocator.free(j0); + if (x_cmp == 1) { + llsub(j0, x1[0..x1_len], x0[0..x0_len]); + } else { + llsub(j0, x0[0..x0_len], x1[0..x1_len]); + } + + const y0_len = llnormalize(y0); + const y1_len = llnormalize(y1); + var j1 = try allocator.alloc(Limb, math.max(y0_len, y1_len)); + defer allocator.free(j1); + if (y_cmp == 1) { + llsub(j1, y1[0..y1_len], y0[0..y0_len]); + } else { + llsub(j1, y0[0..y0_len], y1[0..y1_len]); + } + const j0_len = llnormalize(j0); + const j1_len = llnormalize(j1); + if (x_cmp == y_cmp) { + mem.set(Limb, tmp[0..length], 0); + llmulacc(allocator, tmp, j0, j1); + + length = llnormalize(tmp); + llsub(r[split..], r[split..], tmp[0..length]); + } else { + llmulacc(allocator, r[split..], j0, j1); + } +} + +// r = r + a +fn llaccum(r: []Limb, a: []const Limb) Limb { + @setRuntimeSafety(false); + assert(r.len != 0 and a.len != 0); + assert(r.len >= a.len); + + var i: usize = 0; + var carry: Limb = 0; + + while (i < a.len) : (i += 1) { + var c: Limb = 0; + c += @boolToInt(@addWithOverflow(Limb, r[i], a[i], &r[i])); + c += @boolToInt(@addWithOverflow(Limb, r[i], carry, &r[i])); + carry = c; + } + + while ((carry != 0) and i < r.len) : (i += 1) { + carry = @boolToInt(@addWithOverflow(Limb, r[i], carry, &r[i])); + } + + return carry; +} + +/// Returns -1, 0, 1 if |a| < |b|, |a| == |b| or |a| > |b| respectively for limbs. +pub fn llcmp(a: []const Limb, b: []const Limb) i8 { + @setRuntimeSafety(false); + const a_len = llnormalize(a); + const b_len = llnormalize(b); + if (a_len < b_len) { + return -1; + } + if (a_len > b_len) { + return 1; + } + + var i: usize = a_len - 1; + while (i != 0) : (i -= 1) { + if (a[i] != b[i]) { + break; + } + } + + if (a[i] < b[i]) { + return -1; + } else if (a[i] > b[i]) { + return 1; + } else { + return 0; + } +} + +fn llmulDigit(acc: []Limb, y: []const Limb, xi: Limb) void { + @setRuntimeSafety(false); + if (xi == 0) { + return; + } + + var carry: usize = 0; + var a_lo = acc[0..y.len]; + var a_hi = acc[y.len..]; + + var j: usize = 0; + while (j < a_lo.len) : (j += 1) { + a_lo[j] = @call(.{ .modifier = .always_inline }, addMulLimbWithCarry, .{ a_lo[j], y[j], xi, &carry }); + } + + j = 0; + while ((carry != 0) and (j < a_hi.len)) : (j += 1) { + carry = @boolToInt(@addWithOverflow(Limb, a_hi[j], carry, &a_hi[j])); + } +} + +/// returns the min length the limb could be. +fn llnormalize(a: []const Limb) usize { + @setRuntimeSafety(false); + var j = a.len; + while (j > 0) : (j -= 1) { + if (a[j - 1] != 0) { + break; + } + } + + // Handle zero + return if (j != 0) j else 1; +} + +/// Knuth 4.3.1, Algorithm S. +fn llsub(r: []Limb, a: []const Limb, b: []const Limb) void { + @setRuntimeSafety(false); + assert(a.len != 0 and b.len != 0); + assert(a.len > b.len or (a.len == b.len and a[a.len - 1] >= b[b.len - 1])); + assert(r.len >= a.len); + + var i: usize = 0; + var borrow: Limb = 0; + + while (i < b.len) : (i += 1) { + var c: Limb = 0; + c += @boolToInt(@subWithOverflow(Limb, a[i], b[i], &r[i])); + c += @boolToInt(@subWithOverflow(Limb, r[i], borrow, &r[i])); + borrow = c; + } + + while (i < a.len) : (i += 1) { + borrow = @boolToInt(@subWithOverflow(Limb, a[i], borrow, &r[i])); + } + + assert(borrow == 0); +} + +/// Knuth 4.3.1, Algorithm A. +fn lladd(r: []Limb, a: []const Limb, b: []const Limb) void { + @setRuntimeSafety(false); + assert(a.len != 0 and b.len != 0); + assert(a.len >= b.len); + assert(r.len >= a.len + 1); + + var i: usize = 0; + var carry: Limb = 0; + + while (i < b.len) : (i += 1) { + var c: Limb = 0; + c += @boolToInt(@addWithOverflow(Limb, a[i], b[i], &r[i])); + c += @boolToInt(@addWithOverflow(Limb, r[i], carry, &r[i])); + carry = c; + } + + while (i < a.len) : (i += 1) { + carry = @boolToInt(@addWithOverflow(Limb, a[i], carry, &r[i])); + } + + r[i] = carry; +} + +/// Knuth 4.3.1, Exercise 16. +fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void { + @setRuntimeSafety(false); + assert(a.len > 1 or a[0] >= b); + assert(quo.len >= a.len); + + rem.* = 0; + for (a) |_, ri| { + const i = a.len - ri - 1; + const pdiv = ((@as(DoubleLimb, rem.*) << Limb.bit_count) | a[i]); + + if (pdiv == 0) { + quo[i] = 0; + rem.* = 0; + } else if (pdiv < b) { + quo[i] = 0; + rem.* = @truncate(Limb, pdiv); + } else if (pdiv == b) { + quo[i] = 1; + rem.* = 0; + } else { + quo[i] = @truncate(Limb, @divTrunc(pdiv, b)); + rem.* = @truncate(Limb, pdiv - (quo[i] *% b)); + } + } +} + +fn llshl(r: []Limb, a: []const Limb, shift: usize) void { + @setRuntimeSafety(false); + assert(a.len >= 1); + assert(r.len >= a.len + (shift / Limb.bit_count) + 1); + + const limb_shift = shift / Limb.bit_count + 1; + const interior_limb_shift = @intCast(Log2Limb, shift % Limb.bit_count); + + var carry: Limb = 0; + var i: usize = 0; + while (i < a.len) : (i += 1) { + const src_i = a.len - i - 1; + const dst_i = src_i + limb_shift; + + const src_digit = a[src_i]; + r[dst_i] = carry | @call(.{ .modifier = .always_inline }, math.shr, .{ + Limb, + src_digit, + Limb.bit_count - @intCast(Limb, interior_limb_shift), + }); + carry = (src_digit << interior_limb_shift); + } + + r[limb_shift - 1] = carry; + mem.set(Limb, r[0 .. limb_shift - 1], 0); +} + +fn llshr(r: []Limb, a: []const Limb, shift: usize) void { + @setRuntimeSafety(false); + assert(a.len >= 1); + assert(r.len >= a.len - (shift / Limb.bit_count)); + + const limb_shift = shift / Limb.bit_count; + const interior_limb_shift = @intCast(Log2Limb, shift % Limb.bit_count); + + var carry: Limb = 0; + var i: usize = 0; + while (i < a.len - limb_shift) : (i += 1) { + const src_i = a.len - i - 1; + const dst_i = src_i - limb_shift; + + const src_digit = a[src_i]; + r[dst_i] = carry | (src_digit >> interior_limb_shift); + carry = @call(.{ .modifier = .always_inline }, math.shl, .{ + Limb, + src_digit, + Limb.bit_count - @intCast(Limb, interior_limb_shift), + }); + } +} + +fn llor(r: []Limb, a: []const Limb, b: []const Limb) void { + @setRuntimeSafety(false); + assert(r.len >= a.len); + assert(a.len >= b.len); + + var i: usize = 0; + while (i < b.len) : (i += 1) { + r[i] = a[i] | b[i]; + } + while (i < a.len) : (i += 1) { + r[i] = a[i]; + } +} + +fn lland(r: []Limb, a: []const Limb, b: []const Limb) void { + @setRuntimeSafety(false); + assert(r.len >= b.len); + assert(a.len >= b.len); + + var i: usize = 0; + while (i < b.len) : (i += 1) { + r[i] = a[i] & b[i]; + } +} + +fn llxor(r: []Limb, a: []const Limb, b: []const Limb) void { + assert(r.len >= a.len); + assert(a.len >= b.len); + + var i: usize = 0; + while (i < b.len) : (i += 1) { + r[i] = a[i] ^ b[i]; + } + while (i < a.len) : (i += 1) { + r[i] = a[i]; + } +} + +// Storage must live for the lifetime of the returned value +fn fixedIntFromSignedDoubleLimb(A: SignedDoubleLimb, storage: []Limb) Mutable { + assert(storage.len >= 2); + + const A_is_positive = A >= 0; + const Au = @intCast(DoubleLimb, if (A < 0) -A else A); + storage[0] = @truncate(Limb, Au); + storage[1] = @truncate(Limb, Au >> Limb.bit_count); + return .{ + .limbs = storage[0..2], + .positive = A_is_positive, + .len = 2, + }; +} + +test "" { + _ = @import("int_test.zig"); } diff --git a/lib/std/math/big/int_test.zig b/lib/std/math/big/int_test.zig new file mode 100644 index 0000000000..d7e354879e --- /dev/null +++ b/lib/std/math/big/int_test.zig @@ -0,0 +1,1455 @@ +const std = @import("../../std.zig"); +const mem = std.mem; +const testing = std.testing; +const Managed = std.math.big.int.Managed; +const Limb = std.math.big.Limb; +const DoubleLimb = std.math.big.DoubleLimb; +const maxInt = std.math.maxInt; +const minInt = std.math.minInt; + +// NOTE: All the following tests assume the max machine-word will be 64-bit. +// +// They will still run on larger than this and should pass, but the multi-limb code-paths +// may be untested in some cases. + +test "big.int comptime_int set" { + comptime var s = 0xefffffff00000001eeeeeeefaaaaaaab; + var a = try Managed.initSet(testing.allocator, s); + defer a.deinit(); + + const s_limb_count = 128 / Limb.bit_count; + + comptime var i: usize = 0; + inline while (i < s_limb_count) : (i += 1) { + const result = @as(Limb, s & maxInt(Limb)); + s >>= Limb.bit_count / 2; + s >>= Limb.bit_count / 2; + testing.expect(a.limbs[i] == result); + } +} + +test "big.int comptime_int set negative" { + var a = try Managed.initSet(testing.allocator, -10); + defer a.deinit(); + + testing.expect(a.limbs[0] == 10); + testing.expect(a.isPositive() == false); +} + +test "big.int int set unaligned small" { + var a = try Managed.initSet(testing.allocator, @as(u7, 45)); + defer a.deinit(); + + testing.expect(a.limbs[0] == 45); + testing.expect(a.isPositive() == true); +} + +test "big.int comptime_int to" { + var a = try Managed.initSet(testing.allocator, 0xefffffff00000001eeeeeeefaaaaaaab); + defer a.deinit(); + + testing.expect((try a.to(u128)) == 0xefffffff00000001eeeeeeefaaaaaaab); +} + +test "big.int sub-limb to" { + var a = try Managed.initSet(testing.allocator, 10); + defer a.deinit(); + + testing.expect((try a.to(u8)) == 10); +} + +test "big.int to target too small error" { + var a = try Managed.initSet(testing.allocator, 0xffffffff); + defer a.deinit(); + + testing.expectError(error.TargetTooSmall, a.to(u8)); +} + +test "big.int normalize" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + try a.ensureCapacity(8); + + a.limbs[0] = 1; + a.limbs[1] = 2; + a.limbs[2] = 3; + a.limbs[3] = 0; + a.normalize(4); + testing.expect(a.len() == 3); + + a.limbs[0] = 1; + a.limbs[1] = 2; + a.limbs[2] = 3; + a.normalize(3); + testing.expect(a.len() == 3); + + a.limbs[0] = 0; + a.limbs[1] = 0; + a.normalize(2); + testing.expect(a.len() == 1); + + a.limbs[0] = 0; + a.normalize(1); + testing.expect(a.len() == 1); +} + +test "big.int normalize multi" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + try a.ensureCapacity(8); + + a.limbs[0] = 1; + a.limbs[1] = 2; + a.limbs[2] = 0; + a.limbs[3] = 0; + a.normalize(4); + testing.expect(a.len() == 2); + + a.limbs[0] = 1; + a.limbs[1] = 2; + a.limbs[2] = 3; + a.normalize(3); + testing.expect(a.len() == 3); + + a.limbs[0] = 0; + a.limbs[1] = 0; + a.limbs[2] = 0; + a.limbs[3] = 0; + a.normalize(4); + testing.expect(a.len() == 1); + + a.limbs[0] = 0; + a.normalize(1); + testing.expect(a.len() == 1); +} + +test "big.int parity" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + + try a.set(0); + testing.expect(a.isEven()); + testing.expect(!a.isOdd()); + + try a.set(7); + testing.expect(!a.isEven()); + testing.expect(a.isOdd()); +} + +test "big.int bitcount + sizeInBaseUpperBound" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + + try a.set(0b100); + testing.expect(a.bitCountAbs() == 3); + testing.expect(a.sizeInBaseUpperBound(2) >= 3); + testing.expect(a.sizeInBaseUpperBound(10) >= 1); + + a.negate(); + testing.expect(a.bitCountAbs() == 3); + testing.expect(a.sizeInBaseUpperBound(2) >= 4); + testing.expect(a.sizeInBaseUpperBound(10) >= 2); + + try a.set(0xffffffff); + testing.expect(a.bitCountAbs() == 32); + testing.expect(a.sizeInBaseUpperBound(2) >= 32); + testing.expect(a.sizeInBaseUpperBound(10) >= 10); + + try a.shiftLeft(a, 5000); + testing.expect(a.bitCountAbs() == 5032); + testing.expect(a.sizeInBaseUpperBound(2) >= 5032); + a.setSign(false); + + testing.expect(a.bitCountAbs() == 5032); + testing.expect(a.sizeInBaseUpperBound(2) >= 5033); +} + +test "big.int bitcount/to" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + + try a.set(0); + testing.expect(a.bitCountTwosComp() == 0); + + testing.expect((try a.to(u0)) == 0); + testing.expect((try a.to(i0)) == 0); + + try a.set(-1); + testing.expect(a.bitCountTwosComp() == 1); + testing.expect((try a.to(i1)) == -1); + + try a.set(-8); + testing.expect(a.bitCountTwosComp() == 4); + testing.expect((try a.to(i4)) == -8); + + try a.set(127); + testing.expect(a.bitCountTwosComp() == 7); + testing.expect((try a.to(u7)) == 127); + + try a.set(-128); + testing.expect(a.bitCountTwosComp() == 8); + testing.expect((try a.to(i8)) == -128); + + try a.set(-129); + testing.expect(a.bitCountTwosComp() == 9); + testing.expect((try a.to(i9)) == -129); +} + +test "big.int fits" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + + try a.set(0); + testing.expect(a.fits(u0)); + testing.expect(a.fits(i0)); + + try a.set(255); + testing.expect(!a.fits(u0)); + testing.expect(!a.fits(u1)); + testing.expect(!a.fits(i8)); + testing.expect(a.fits(u8)); + testing.expect(a.fits(u9)); + testing.expect(a.fits(i9)); + + try a.set(-128); + testing.expect(!a.fits(i7)); + testing.expect(a.fits(i8)); + testing.expect(a.fits(i9)); + testing.expect(!a.fits(u9)); + + try a.set(0x1ffffffffeeeeeeee); + testing.expect(!a.fits(u32)); + testing.expect(!a.fits(u64)); + testing.expect(a.fits(u65)); +} + +test "big.int string set" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + + try a.setString(10, "120317241209124781241290847124"); + testing.expect((try a.to(u128)) == 120317241209124781241290847124); +} + +test "big.int string negative" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + + try a.setString(10, "-1023"); + testing.expect((try a.to(i32)) == -1023); +} + +test "big.int string set number with underscores" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + + try a.setString(10, "__1_2_0_3_1_7_2_4_1_2_0_____9_1__2__4_7_8_1_2_4_1_2_9_0_8_4_7_1_2_4___"); + testing.expect((try a.to(u128)) == 120317241209124781241290847124); +} + +test "big.int string set case insensitive number" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + + try a.setString(16, "aB_cD_eF"); + testing.expect((try a.to(u32)) == 0xabcdef); +} + +test "big.int string set bad char error" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + testing.expectError(error.InvalidCharacter, a.setString(10, "x")); +} + +test "big.int string set bad base error" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + testing.expectError(error.InvalidBase, a.setString(45, "10")); +} + +test "big.int string to" { + var a = try Managed.initSet(testing.allocator, 120317241209124781241290847124); + defer a.deinit(); + + const as = try a.toString(testing.allocator, 10, false); + defer testing.allocator.free(as); + const es = "120317241209124781241290847124"; + + testing.expect(mem.eql(u8, as, es)); +} + +test "big.int string to base base error" { + var a = try Managed.initSet(testing.allocator, 0xffffffff); + defer a.deinit(); + + testing.expectError(error.InvalidBase, a.toString(testing.allocator, 45, false)); +} + +test "big.int string to base 2" { + var a = try Managed.initSet(testing.allocator, -0b1011); + defer a.deinit(); + + const as = try a.toString(testing.allocator, 2, false); + defer testing.allocator.free(as); + const es = "-1011"; + + testing.expect(mem.eql(u8, as, es)); +} + +test "big.int string to base 16" { + var a = try Managed.initSet(testing.allocator, 0xefffffff00000001eeeeeeefaaaaaaab); + defer a.deinit(); + + const as = try a.toString(testing.allocator, 16, false); + defer testing.allocator.free(as); + const es = "efffffff00000001eeeeeeefaaaaaaab"; + + testing.expect(mem.eql(u8, as, es)); +} + +test "big.int neg string to" { + var a = try Managed.initSet(testing.allocator, -123907434); + defer a.deinit(); + + const as = try a.toString(testing.allocator, 10, false); + defer testing.allocator.free(as); + const es = "-123907434"; + + testing.expect(mem.eql(u8, as, es)); +} + +test "big.int zero string to" { + var a = try Managed.initSet(testing.allocator, 0); + defer a.deinit(); + + const as = try a.toString(testing.allocator, 10, false); + defer testing.allocator.free(as); + const es = "0"; + + testing.expect(mem.eql(u8, as, es)); +} + +test "big.int clone" { + var a = try Managed.initSet(testing.allocator, 1234); + defer a.deinit(); + var b = try a.clone(); + defer b.deinit(); + + testing.expect((try a.to(u32)) == 1234); + testing.expect((try b.to(u32)) == 1234); + + try a.set(77); + testing.expect((try a.to(u32)) == 77); + testing.expect((try b.to(u32)) == 1234); +} + +test "big.int swap" { + var a = try Managed.initSet(testing.allocator, 1234); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 5678); + defer b.deinit(); + + testing.expect((try a.to(u32)) == 1234); + testing.expect((try b.to(u32)) == 5678); + + a.swap(&b); + + testing.expect((try a.to(u32)) == 5678); + testing.expect((try b.to(u32)) == 1234); +} + +test "big.int to negative" { + var a = try Managed.initSet(testing.allocator, -10); + defer a.deinit(); + + testing.expect((try a.to(i32)) == -10); +} + +test "big.int compare" { + var a = try Managed.initSet(testing.allocator, -11); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 10); + defer b.deinit(); + + testing.expect(a.orderAbs(b) == .gt); + testing.expect(a.order(b) == .lt); +} + +test "big.int compare similar" { + var a = try Managed.initSet(testing.allocator, 0xffffffffeeeeeeeeffffffffeeeeeeee); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0xffffffffeeeeeeeeffffffffeeeeeeef); + defer b.deinit(); + + testing.expect(a.orderAbs(b) == .lt); + testing.expect(b.orderAbs(a) == .gt); +} + +test "big.int compare different limb size" { + var a = try Managed.initSet(testing.allocator, maxInt(Limb) + 1); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 1); + defer b.deinit(); + + testing.expect(a.orderAbs(b) == .gt); + testing.expect(b.orderAbs(a) == .lt); +} + +test "big.int compare multi-limb" { + var a = try Managed.initSet(testing.allocator, -0x7777777799999999ffffeeeeffffeeeeffffeeeef); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0x7777777799999999ffffeeeeffffeeeeffffeeeee); + defer b.deinit(); + + testing.expect(a.orderAbs(b) == .gt); + testing.expect(a.order(b) == .lt); +} + +test "big.int equality" { + var a = try Managed.initSet(testing.allocator, 0xffffffff1); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, -0xffffffff1); + defer b.deinit(); + + testing.expect(a.eqAbs(b)); + testing.expect(!a.eq(b)); +} + +test "big.int abs" { + var a = try Managed.initSet(testing.allocator, -5); + defer a.deinit(); + + a.abs(); + testing.expect((try a.to(u32)) == 5); + + a.abs(); + testing.expect((try a.to(u32)) == 5); +} + +test "big.int negate" { + var a = try Managed.initSet(testing.allocator, 5); + defer a.deinit(); + + a.negate(); + testing.expect((try a.to(i32)) == -5); + + a.negate(); + testing.expect((try a.to(i32)) == 5); +} + +test "big.int add single-single" { + var a = try Managed.initSet(testing.allocator, 50); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 5); + defer b.deinit(); + + var c = try Managed.init(testing.allocator); + defer c.deinit(); + try c.add(a.toConst(), b.toConst()); + + testing.expect((try c.to(u32)) == 55); +} + +test "big.int add multi-single" { + var a = try Managed.initSet(testing.allocator, maxInt(Limb) + 1); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 1); + defer b.deinit(); + + var c = try Managed.init(testing.allocator); + defer c.deinit(); + + try c.add(a.toConst(), b.toConst()); + testing.expect((try c.to(DoubleLimb)) == maxInt(Limb) + 2); + + try c.add(b.toConst(), a.toConst()); + testing.expect((try c.to(DoubleLimb)) == maxInt(Limb) + 2); +} + +test "big.int add multi-multi" { + const op1 = 0xefefefef7f7f7f7f; + const op2 = 0xfefefefe9f9f9f9f; + var a = try Managed.initSet(testing.allocator, op1); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, op2); + defer b.deinit(); + + var c = try Managed.init(testing.allocator); + defer c.deinit(); + try c.add(a.toConst(), b.toConst()); + + testing.expect((try c.to(u128)) == op1 + op2); +} + +test "big.int add zero-zero" { + var a = try Managed.initSet(testing.allocator, 0); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0); + defer b.deinit(); + + var c = try Managed.init(testing.allocator); + defer c.deinit(); + try c.add(a.toConst(), b.toConst()); + + testing.expect((try c.to(u32)) == 0); +} + +test "big.int add alias multi-limb nonzero-zero" { + const op1 = 0xffffffff777777771; + var a = try Managed.initSet(testing.allocator, op1); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0); + defer b.deinit(); + + try a.add(a.toConst(), b.toConst()); + + testing.expect((try a.to(u128)) == op1); +} + +test "big.int add sign" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + + var one = try Managed.initSet(testing.allocator, 1); + defer one.deinit(); + var two = try Managed.initSet(testing.allocator, 2); + defer two.deinit(); + var neg_one = try Managed.initSet(testing.allocator, -1); + defer neg_one.deinit(); + var neg_two = try Managed.initSet(testing.allocator, -2); + defer neg_two.deinit(); + + try a.add(one.toConst(), two.toConst()); + testing.expect((try a.to(i32)) == 3); + + try a.add(neg_one.toConst(), two.toConst()); + testing.expect((try a.to(i32)) == 1); + + try a.add(one.toConst(), neg_two.toConst()); + testing.expect((try a.to(i32)) == -1); + + try a.add(neg_one.toConst(), neg_two.toConst()); + testing.expect((try a.to(i32)) == -3); +} + +test "big.int sub single-single" { + var a = try Managed.initSet(testing.allocator, 50); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 5); + defer b.deinit(); + + var c = try Managed.init(testing.allocator); + defer c.deinit(); + try c.sub(a.toConst(), b.toConst()); + + testing.expect((try c.to(u32)) == 45); +} + +test "big.int sub multi-single" { + var a = try Managed.initSet(testing.allocator, maxInt(Limb) + 1); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 1); + defer b.deinit(); + + var c = try Managed.init(testing.allocator); + defer c.deinit(); + try c.sub(a.toConst(), b.toConst()); + + testing.expect((try c.to(Limb)) == maxInt(Limb)); +} + +test "big.int sub multi-multi" { + const op1 = 0xefefefefefefefefefefefef; + const op2 = 0xabababababababababababab; + + var a = try Managed.initSet(testing.allocator, op1); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, op2); + defer b.deinit(); + + var c = try Managed.init(testing.allocator); + defer c.deinit(); + try c.sub(a.toConst(), b.toConst()); + + testing.expect((try c.to(u128)) == op1 - op2); +} + +test "big.int sub equal" { + var a = try Managed.initSet(testing.allocator, 0x11efefefefefefefefefefefef); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0x11efefefefefefefefefefefef); + defer b.deinit(); + + var c = try Managed.init(testing.allocator); + defer c.deinit(); + try c.sub(a.toConst(), b.toConst()); + + testing.expect((try c.to(u32)) == 0); +} + +test "big.int sub sign" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + + var one = try Managed.initSet(testing.allocator, 1); + defer one.deinit(); + var two = try Managed.initSet(testing.allocator, 2); + defer two.deinit(); + var neg_one = try Managed.initSet(testing.allocator, -1); + defer neg_one.deinit(); + var neg_two = try Managed.initSet(testing.allocator, -2); + defer neg_two.deinit(); + + try a.sub(one.toConst(), two.toConst()); + testing.expect((try a.to(i32)) == -1); + + try a.sub(neg_one.toConst(), two.toConst()); + testing.expect((try a.to(i32)) == -3); + + try a.sub(one.toConst(), neg_two.toConst()); + testing.expect((try a.to(i32)) == 3); + + try a.sub(neg_one.toConst(), neg_two.toConst()); + testing.expect((try a.to(i32)) == 1); + + try a.sub(neg_two.toConst(), neg_one.toConst()); + testing.expect((try a.to(i32)) == -1); +} + +test "big.int mul single-single" { + var a = try Managed.initSet(testing.allocator, 50); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 5); + defer b.deinit(); + + var c = try Managed.init(testing.allocator); + defer c.deinit(); + try c.mul(a.toConst(), b.toConst()); + + testing.expect((try c.to(u64)) == 250); +} + +test "big.int mul multi-single" { + var a = try Managed.initSet(testing.allocator, maxInt(Limb)); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 2); + defer b.deinit(); + + var c = try Managed.init(testing.allocator); + defer c.deinit(); + try c.mul(a.toConst(), b.toConst()); + + testing.expect((try c.to(DoubleLimb)) == 2 * maxInt(Limb)); +} + +test "big.int mul multi-multi" { + const op1 = 0x998888efefefefefefefef; + const op2 = 0x333000abababababababab; + var a = try Managed.initSet(testing.allocator, op1); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, op2); + defer b.deinit(); + + var c = try Managed.init(testing.allocator); + defer c.deinit(); + try c.mul(a.toConst(), b.toConst()); + + testing.expect((try c.to(u256)) == op1 * op2); +} + +test "big.int mul alias r with a" { + var a = try Managed.initSet(testing.allocator, maxInt(Limb)); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 2); + defer b.deinit(); + + try a.mul(a.toConst(), b.toConst()); + + testing.expect((try a.to(DoubleLimb)) == 2 * maxInt(Limb)); +} + +test "big.int mul alias r with b" { + var a = try Managed.initSet(testing.allocator, maxInt(Limb)); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 2); + defer b.deinit(); + + try a.mul(b.toConst(), a.toConst()); + + testing.expect((try a.to(DoubleLimb)) == 2 * maxInt(Limb)); +} + +test "big.int mul alias r with a and b" { + var a = try Managed.initSet(testing.allocator, maxInt(Limb)); + defer a.deinit(); + + try a.mul(a.toConst(), a.toConst()); + + testing.expect((try a.to(DoubleLimb)) == maxInt(Limb) * maxInt(Limb)); +} + +test "big.int mul a*0" { + var a = try Managed.initSet(testing.allocator, 0xefefefefefefefef); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0); + defer b.deinit(); + + var c = try Managed.init(testing.allocator); + defer c.deinit(); + try c.mul(a.toConst(), b.toConst()); + + testing.expect((try c.to(u32)) == 0); +} + +test "big.int mul 0*0" { + var a = try Managed.initSet(testing.allocator, 0); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0); + defer b.deinit(); + + var c = try Managed.init(testing.allocator); + defer c.deinit(); + try c.mul(a.toConst(), b.toConst()); + + testing.expect((try c.to(u32)) == 0); +} + +test "big.int div single-single no rem" { + var a = try Managed.initSet(testing.allocator, 50); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 5); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + testing.expect((try q.to(u32)) == 10); + testing.expect((try r.to(u32)) == 0); +} + +test "big.int div single-single with rem" { + var a = try Managed.initSet(testing.allocator, 49); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 5); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + testing.expect((try q.to(u32)) == 9); + testing.expect((try r.to(u32)) == 4); +} + +test "big.int div multi-single no rem" { + const op1 = 0xffffeeeeddddcccc; + const op2 = 34; + + var a = try Managed.initSet(testing.allocator, op1); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, op2); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + testing.expect((try q.to(u64)) == op1 / op2); + testing.expect((try r.to(u64)) == 0); +} + +test "big.int div multi-single with rem" { + const op1 = 0xffffeeeeddddcccf; + const op2 = 34; + + var a = try Managed.initSet(testing.allocator, op1); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, op2); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + testing.expect((try q.to(u64)) == op1 / op2); + testing.expect((try r.to(u64)) == 3); +} + +test "big.int div multi>2-single" { + const op1 = 0xfefefefefefefefefefefefefefefefe; + const op2 = 0xefab8; + + var a = try Managed.initSet(testing.allocator, op1); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, op2); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + testing.expect((try q.to(u128)) == op1 / op2); + testing.expect((try r.to(u32)) == 0x3e4e); +} + +test "big.int div single-single q < r" { + var a = try Managed.initSet(testing.allocator, 0x0078f432); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0x01000000); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + testing.expect((try q.to(u64)) == 0); + testing.expect((try r.to(u64)) == 0x0078f432); +} + +test "big.int div single-single q == r" { + var a = try Managed.initSet(testing.allocator, 10); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 10); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + testing.expect((try q.to(u64)) == 1); + testing.expect((try r.to(u64)) == 0); +} + +test "big.int div q=0 alias" { + var a = try Managed.initSet(testing.allocator, 3); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 10); + defer b.deinit(); + + try Managed.divTrunc(&a, &b, a.toConst(), b.toConst()); + + testing.expect((try a.to(u64)) == 0); + testing.expect((try b.to(u64)) == 3); +} + +test "big.int div multi-multi q < r" { + const op1 = 0x1ffffffff0078f432; + const op2 = 0x1ffffffff01000000; + var a = try Managed.initSet(testing.allocator, op1); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, op2); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + testing.expect((try q.to(u128)) == 0); + testing.expect((try r.to(u128)) == op1); +} + +test "big.int div trunc single-single +/+" { + const u: i32 = 5; + const v: i32 = 3; + + var a = try Managed.initSet(testing.allocator, u); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, v); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + // n = q * d + r + // 5 = 1 * 3 + 2 + const eq = @divTrunc(u, v); + const er = @mod(u, v); + + testing.expect((try q.to(i32)) == eq); + testing.expect((try r.to(i32)) == er); +} + +test "big.int div trunc single-single -/+" { + const u: i32 = -5; + const v: i32 = 3; + + var a = try Managed.initSet(testing.allocator, u); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, v); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + // n = q * d + r + // -5 = 1 * -3 - 2 + const eq = -1; + const er = -2; + + testing.expect((try q.to(i32)) == eq); + testing.expect((try r.to(i32)) == er); +} + +test "big.int div trunc single-single +/-" { + const u: i32 = 5; + const v: i32 = -3; + + var a = try Managed.initSet(testing.allocator, u); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, v); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + // n = q * d + r + // 5 = -1 * -3 + 2 + const eq = -1; + const er = 2; + + testing.expect((try q.to(i32)) == eq); + testing.expect((try r.to(i32)) == er); +} + +test "big.int div trunc single-single -/-" { + const u: i32 = -5; + const v: i32 = -3; + + var a = try Managed.initSet(testing.allocator, u); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, v); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + // n = q * d + r + // -5 = 1 * -3 - 2 + const eq = 1; + const er = -2; + + testing.expect((try q.to(i32)) == eq); + testing.expect((try r.to(i32)) == er); +} + +test "big.int div floor single-single +/+" { + const u: i32 = 5; + const v: i32 = 3; + + var a = try Managed.initSet(testing.allocator, u); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, v); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divFloor(&q, &r, a.toConst(), b.toConst()); + + // n = q * d + r + // 5 = 1 * 3 + 2 + const eq = 1; + const er = 2; + + testing.expect((try q.to(i32)) == eq); + testing.expect((try r.to(i32)) == er); +} + +test "big.int div floor single-single -/+" { + const u: i32 = -5; + const v: i32 = 3; + + var a = try Managed.initSet(testing.allocator, u); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, v); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divFloor(&q, &r, a.toConst(), b.toConst()); + + // n = q * d + r + // -5 = -2 * 3 + 1 + const eq = -2; + const er = 1; + + testing.expect((try q.to(i32)) == eq); + testing.expect((try r.to(i32)) == er); +} + +test "big.int div floor single-single +/-" { + const u: i32 = 5; + const v: i32 = -3; + + var a = try Managed.initSet(testing.allocator, u); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, v); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divFloor(&q, &r, a.toConst(), b.toConst()); + + // n = q * d + r + // 5 = -2 * -3 - 1 + const eq = -2; + const er = -1; + + testing.expect((try q.to(i32)) == eq); + testing.expect((try r.to(i32)) == er); +} + +test "big.int div floor single-single -/-" { + const u: i32 = -5; + const v: i32 = -3; + + var a = try Managed.initSet(testing.allocator, u); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, v); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divFloor(&q, &r, a.toConst(), b.toConst()); + + // n = q * d + r + // -5 = 2 * -3 + 1 + const eq = 1; + const er = -2; + + testing.expect((try q.to(i32)) == eq); + testing.expect((try r.to(i32)) == er); +} + +test "big.int div multi-multi with rem" { + var a = try Managed.initSet(testing.allocator, 0x8888999911110000ffffeeeeddddccccbbbbaaaa9999); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0x99990000111122223333); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + testing.expect((try q.to(u128)) == 0xe38f38e39161aaabd03f0f1b); + testing.expect((try r.to(u128)) == 0x28de0acacd806823638); +} + +test "big.int div multi-multi no rem" { + var a = try Managed.initSet(testing.allocator, 0x8888999911110000ffffeeeedb4fec200ee3a4286361); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0x99990000111122223333); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + testing.expect((try q.to(u128)) == 0xe38f38e39161aaabd03f0f1b); + testing.expect((try r.to(u128)) == 0); +} + +test "big.int div multi-multi (2 branch)" { + var a = try Managed.initSet(testing.allocator, 0x866666665555555588888887777777761111111111111111); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0x86666666555555554444444433333333); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + testing.expect((try q.to(u128)) == 0x10000000000000000); + testing.expect((try r.to(u128)) == 0x44444443444444431111111111111111); +} + +test "big.int div multi-multi (3.1/3.3 branch)" { + var a = try Managed.initSet(testing.allocator, 0x11111111111111111111111111111111111111111111111111111111111111); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0x1111111111111111111111111111111111111111171); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + testing.expect((try q.to(u128)) == 0xfffffffffffffffffff); + testing.expect((try r.to(u256)) == 0x1111111111111111111110b12222222222222222282); +} + +test "big.int div multi-single zero-limb trailing" { + var a = try Managed.initSet(testing.allocator, 0x60000000000000000000000000000000000000000000000000000000000000000); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0x10000000000000000); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + var expected = try Managed.initSet(testing.allocator, 0x6000000000000000000000000000000000000000000000000); + defer expected.deinit(); + testing.expect(q.eq(expected)); + testing.expect(r.eqZero()); +} + +test "big.int div multi-multi zero-limb trailing (with rem)" { + var a = try Managed.initSet(testing.allocator, 0x86666666555555558888888777777776111111111111111100000000000000000000000000000000); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0x8666666655555555444444443333333300000000000000000000000000000000); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + testing.expect((try q.to(u128)) == 0x10000000000000000); + + const rs = try r.toString(testing.allocator, 16, false); + defer testing.allocator.free(rs); + testing.expect(std.mem.eql(u8, rs, "4444444344444443111111111111111100000000000000000000000000000000")); +} + +test "big.int div multi-multi zero-limb trailing (with rem) and dividend zero-limb count > divisor zero-limb count" { + var a = try Managed.initSet(testing.allocator, 0x8666666655555555888888877777777611111111111111110000000000000000); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0x8666666655555555444444443333333300000000000000000000000000000000); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + testing.expect((try q.to(u128)) == 0x1); + + const rs = try r.toString(testing.allocator, 16, false); + defer testing.allocator.free(rs); + testing.expect(std.mem.eql(u8, rs, "444444434444444311111111111111110000000000000000")); +} + +test "big.int div multi-multi zero-limb trailing (with rem) and dividend zero-limb count < divisor zero-limb count" { + var a = try Managed.initSet(testing.allocator, 0x86666666555555558888888777777776111111111111111100000000000000000000000000000000); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0x866666665555555544444444333333330000000000000000); + defer b.deinit(); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + const qs = try q.toString(testing.allocator, 16, false); + defer testing.allocator.free(qs); + testing.expect(std.mem.eql(u8, qs, "10000000000000000820820803105186f")); + + const rs = try r.toString(testing.allocator, 16, false); + defer testing.allocator.free(rs); + testing.expect(std.mem.eql(u8, rs, "4e11f2baa5896a321d463b543d0104e30000000000000000")); +} + +test "big.int div multi-multi fuzz case #1" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + var b = try Managed.init(testing.allocator); + defer b.deinit(); + + try a.setString(16, "ffffffffffffffffffffffffffffc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"); + try b.setString(16, "3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0000000000000000000000000000000000001ffffffffffffffffffffffffffffffffffffffffffffffffffc000000000000000000000000000000007fffffffffff"); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + const qs = try q.toString(testing.allocator, 16, false); + defer testing.allocator.free(qs); + testing.expect(std.mem.eql(u8, qs, "3ffffffffffffffffffffffffffff0000000000000000000000000000000000001ffffffffffffffffffffffffffff7fffffffe000000000000000000000000000180000000000000000000003fffffbfffffffdfffffffffffffeffff800000100101000000100000000020003fffffdfbfffffe3ffffffffffffeffff7fffc00800a100000017ffe000002000400007efbfff7fe9f00000037ffff3fff7fffa004006100000009ffe00000190038200bf7d2ff7fefe80400060000f7d7f8fbf9401fe38e0403ffc0bdffffa51102c300d7be5ef9df4e5060007b0127ad3fa69f97d0f820b6605ff617ddf7f32ad7a05c0d03f2e7bc78a6000e087a8bbcdc59e07a5a079128a7861f553ddebed7e8e56701756f9ead39b48cd1b0831889ea6ec1fddf643d0565b075ff07e6caea4e2854ec9227fd635ed60a2f5eef2893052ffd54718fa08604acbf6a15e78a467c4a3c53c0278af06c4416573f925491b195e8fd79302cb1aaf7caf4ecfc9aec1254cc969786363ac729f914c6ddcc26738d6b0facd54eba026580aba2eb6482a088b0d224a8852420b91ec1")); + + const rs = try r.toString(testing.allocator, 16, false); + defer testing.allocator.free(rs); + testing.expect(std.mem.eql(u8, rs, "310d1d4c414426b4836c2635bad1df3a424e50cbdd167ffccb4dfff57d36b4aae0d6ca0910698220171a0f3373c1060a046c2812f0027e321f72979daa5e7973214170d49e885de0c0ecc167837d44502430674a82522e5df6a0759548052420b91ec1")); +} + +test "big.int div multi-multi fuzz case #2" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + var b = try Managed.init(testing.allocator); + defer b.deinit(); + + try a.setString(16, "3ffffffffe00000000000000000000000000fffffffffffffffffffffffffffffffffffffffffffffffffffffffffe000000000000000000000000000000000000000000000000000000000000001fffffffffffffffff800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffffffffc000000000000000000000000000000000000000000000000000000000000000"); + try b.setString(16, "ffc0000000000000000000000000000000000000000000000000"); + + var q = try Managed.init(testing.allocator); + defer q.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + try Managed.divTrunc(&q, &r, a.toConst(), b.toConst()); + + const qs = try q.toString(testing.allocator, 16, false); + defer testing.allocator.free(qs); + testing.expect(std.mem.eql(u8, qs, "40100400fe3f8fe3f8fe3f8fe3f8fe3f8fe4f93e4f93e4f93e4f93e4f93e4f93e4f93e4f93e4f93e4f93e4f93e4f91e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4992649926499264991e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4791e4792e4b92e4b92e4b92e4b92a4a92a4a92a4")); + + const rs = try r.toString(testing.allocator, 16, false); + defer testing.allocator.free(rs); + testing.expect(std.mem.eql(u8, rs, "a900000000000000000000000000000000000000000000000000")); +} + +test "big.int shift-right single" { + var a = try Managed.initSet(testing.allocator, 0xffff0000); + defer a.deinit(); + try a.shiftRight(a, 16); + + testing.expect((try a.to(u32)) == 0xffff); +} + +test "big.int shift-right multi" { + var a = try Managed.initSet(testing.allocator, 0xffff0000eeee1111dddd2222cccc3333); + defer a.deinit(); + try a.shiftRight(a, 67); + + testing.expect((try a.to(u64)) == 0x1fffe0001dddc222); +} + +test "big.int shift-left single" { + var a = try Managed.initSet(testing.allocator, 0xffff); + defer a.deinit(); + try a.shiftLeft(a, 16); + + testing.expect((try a.to(u64)) == 0xffff0000); +} + +test "big.int shift-left multi" { + var a = try Managed.initSet(testing.allocator, 0x1fffe0001dddc222); + defer a.deinit(); + try a.shiftLeft(a, 67); + + testing.expect((try a.to(u128)) == 0xffff0000eeee11100000000000000000); +} + +test "big.int shift-right negative" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + + var arg = try Managed.initSet(testing.allocator, -20); + defer arg.deinit(); + try a.shiftRight(arg, 2); + testing.expect((try a.to(i32)) == -20 >> 2); + + var arg2 = try Managed.initSet(testing.allocator, -5); + defer arg2.deinit(); + try a.shiftRight(arg2, 10); + testing.expect((try a.to(i32)) == -5 >> 10); +} + +test "big.int shift-left negative" { + var a = try Managed.init(testing.allocator); + defer a.deinit(); + + var arg = try Managed.initSet(testing.allocator, -10); + defer arg.deinit(); + try a.shiftRight(arg, 1232); + testing.expect((try a.to(i32)) == -10 >> 1232); +} + +test "big.int bitwise and simple" { + var a = try Managed.initSet(testing.allocator, 0xffffffff11111111); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0xeeeeeeee22222222); + defer b.deinit(); + + try a.bitAnd(a, b); + + testing.expect((try a.to(u64)) == 0xeeeeeeee00000000); +} + +test "big.int bitwise and multi-limb" { + var a = try Managed.initSet(testing.allocator, maxInt(Limb) + 1); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, maxInt(Limb)); + defer b.deinit(); + + try a.bitAnd(a, b); + + testing.expect((try a.to(u128)) == 0); +} + +test "big.int bitwise xor simple" { + var a = try Managed.initSet(testing.allocator, 0xffffffff11111111); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0xeeeeeeee22222222); + defer b.deinit(); + + try a.bitXor(a, b); + + testing.expect((try a.to(u64)) == 0x1111111133333333); +} + +test "big.int bitwise xor multi-limb" { + var a = try Managed.initSet(testing.allocator, maxInt(Limb) + 1); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, maxInt(Limb)); + defer b.deinit(); + + try a.bitXor(a, b); + + testing.expect((try a.to(DoubleLimb)) == (maxInt(Limb) + 1) ^ maxInt(Limb)); +} + +test "big.int bitwise or simple" { + var a = try Managed.initSet(testing.allocator, 0xffffffff11111111); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0xeeeeeeee22222222); + defer b.deinit(); + + try a.bitOr(a, b); + + testing.expect((try a.to(u64)) == 0xffffffff33333333); +} + +test "big.int bitwise or multi-limb" { + var a = try Managed.initSet(testing.allocator, maxInt(Limb) + 1); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, maxInt(Limb)); + defer b.deinit(); + + try a.bitOr(a, b); + + // TODO: big.int.cpp or is wrong on multi-limb. + testing.expect((try a.to(DoubleLimb)) == (maxInt(Limb) + 1) + maxInt(Limb)); +} + +test "big.int var args" { + var a = try Managed.initSet(testing.allocator, 5); + defer a.deinit(); + + var b = try Managed.initSet(testing.allocator, 6); + defer b.deinit(); + try a.add(a.toConst(), b.toConst()); + testing.expect((try a.to(u64)) == 11); + + var c = try Managed.initSet(testing.allocator, 11); + defer c.deinit(); + testing.expect(a.order(c) == .eq); + + var d = try Managed.initSet(testing.allocator, 14); + defer d.deinit(); + testing.expect(a.order(d) != .gt); +} + +test "big.int gcd non-one small" { + var a = try Managed.initSet(testing.allocator, 17); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 97); + defer b.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + + try r.gcd(a, b); + + testing.expect((try r.to(u32)) == 1); +} + +test "big.int gcd non-one small" { + var a = try Managed.initSet(testing.allocator, 4864); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 3458); + defer b.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + + try r.gcd(a, b); + + testing.expect((try r.to(u32)) == 38); +} + +test "big.int gcd non-one large" { + var a = try Managed.initSet(testing.allocator, 0xffffffffffffffff); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0xffffffffffffffff7777); + defer b.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + + try r.gcd(a, b); + + testing.expect((try r.to(u32)) == 4369); +} + +test "big.int gcd large multi-limb result" { + var a = try Managed.initSet(testing.allocator, 0x12345678123456781234567812345678123456781234567812345678); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 0x12345671234567123456712345671234567123456712345671234567); + defer b.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + + try r.gcd(a, b); + + const answer = (try r.to(u256)); + testing.expect(answer == 0xf000000ff00000fff0000ffff000fffff00ffffff1); +} + +test "big.int gcd one large" { + var a = try Managed.initSet(testing.allocator, 1897056385327307); + defer a.deinit(); + var b = try Managed.initSet(testing.allocator, 2251799813685248); + defer b.deinit(); + var r = try Managed.init(testing.allocator); + defer r.deinit(); + + try r.gcd(a, b); + + testing.expect((try r.to(u64)) == 1); +} diff --git a/lib/std/math/big/rational.zig b/lib/std/math/big/rational.zig index f5f2f53113..3624a16139 100644 --- a/lib/std/math/big/rational.zig +++ b/lib/std/math/big/rational.zig @@ -5,10 +5,10 @@ const mem = std.mem; const testing = std.testing; const Allocator = mem.Allocator; -const bn = @import("int.zig"); -const Limb = bn.Limb; -const DoubleLimb = bn.DoubleLimb; -const Int = bn.Int; +const Limb = std.math.big.Limb; +const DoubleLimb = std.math.big.DoubleLimb; +const Int = std.math.big.int.Managed; +const IntConst = std.math.big.int.Const; /// An arbitrary-precision rational number. /// @@ -17,6 +17,9 @@ const Int = bn.Int; /// /// Rational's are always normalized. That is, for a Rational r = p/q where p and q are integers, /// gcd(p, q) = 1 always. +/// +/// TODO rework this to store its own allocator and use a non-managed big int, to avoid double +/// allocator storage. pub const Rational = struct { /// Numerator. Determines the sign of the Rational. p: Int, @@ -98,20 +101,20 @@ pub const Rational = struct { if (point) |i| { try self.p.setString(10, str[0..i]); - const base = Int.initFixed(([_]Limb{10})[0..]); + const base = IntConst{ .limbs = &[_]Limb{10}, .positive = true }; var j: usize = start; while (j < str.len - i - 1) : (j += 1) { - try self.p.mul(self.p, base); + try self.p.mul(self.p.toConst(), base); } try self.q.setString(10, str[i + 1 ..]); - try self.p.add(self.p, self.q); + try self.p.add(self.p.toConst(), self.q.toConst()); try self.q.set(1); var k: usize = i + 1; while (k < str.len) : (k += 1) { - try self.q.mul(self.q, base); + try self.q.mul(self.q.toConst(), base); } try self.reduce(); @@ -218,14 +221,14 @@ pub const Rational = struct { } // 2. compute quotient and remainder - var q = try Int.init(self.p.allocator.?); + var q = try Int.init(self.p.allocator); defer q.deinit(); // unused - var r = try Int.init(self.p.allocator.?); + var r = try Int.init(self.p.allocator); defer r.deinit(); - try Int.divTrunc(&q, &r, a2, b2); + try Int.divTrunc(&q, &r, a2.toConst(), b2.toConst()); var mantissa = extractLowBits(q, BitReprType); var have_rem = r.len() > 0; @@ -293,14 +296,14 @@ pub const Rational = struct { /// Set a Rational directly from an Int. pub fn copyInt(self: *Rational, a: Int) !void { - try self.p.copy(a); + try self.p.copy(a.toConst()); try self.q.set(1); } /// Set a Rational directly from a ratio of two Int's. pub fn copyRatio(self: *Rational, a: Int, b: Int) !void { - try self.p.copy(a); - try self.q.copy(b); + try self.p.copy(a.toConst()); + try self.q.copy(b.toConst()); self.p.setSign(@boolToInt(self.p.isPositive()) ^ @boolToInt(self.q.isPositive()) == 0); self.q.setSign(true); @@ -327,13 +330,13 @@ pub const Rational = struct { /// Returns math.Order.lt, math.Order.eq, math.Order.gt if a < b, a == b or a /// > b respectively. - pub fn cmp(a: Rational, b: Rational) !math.Order { + pub fn order(a: Rational, b: Rational) !math.Order { return cmpInternal(a, b, true); } /// Returns math.Order.lt, math.Order.eq, math.Order.gt if |a| < |b|, |a| == /// |b| or |a| > |b| respectively. - pub fn cmpAbs(a: Rational, b: Rational) !math.Order { + pub fn orderAbs(a: Rational, b: Rational) !math.Order { return cmpInternal(a, b, false); } @@ -341,16 +344,16 @@ pub const Rational = struct { fn cmpInternal(a: Rational, b: Rational, is_abs: bool) !math.Order { // TODO: Would a div compare algorithm of sorts be viable and quicker? Can we avoid // the memory allocations here? - var q = try Int.init(a.p.allocator.?); + var q = try Int.init(a.p.allocator); defer q.deinit(); - var p = try Int.init(b.p.allocator.?); + var p = try Int.init(b.p.allocator); defer p.deinit(); - try q.mul(a.p, b.q); - try p.mul(b.p, a.q); + try q.mul(a.p.toConst(), b.q.toConst()); + try p.mul(b.p.toConst(), a.q.toConst()); - return if (is_abs) q.cmpAbs(p) else q.cmp(p); + return if (is_abs) q.orderAbs(p) else q.order(p); } /// rma = a + b. @@ -364,7 +367,7 @@ pub const Rational = struct { var sr: Rational = undefined; if (aliased) { - sr = try Rational.init(rma.p.allocator.?); + sr = try Rational.init(rma.p.allocator); r = &sr; aliased = true; } @@ -373,11 +376,11 @@ pub const Rational = struct { r.deinit(); }; - try r.p.mul(a.p, b.q); - try r.q.mul(b.p, a.q); - try r.p.add(r.p, r.q); + try r.p.mul(a.p.toConst(), b.q.toConst()); + try r.q.mul(b.p.toConst(), a.q.toConst()); + try r.p.add(r.p.toConst(), r.q.toConst()); - try r.q.mul(a.q, b.q); + try r.q.mul(a.q.toConst(), b.q.toConst()); try r.reduce(); } @@ -392,7 +395,7 @@ pub const Rational = struct { var sr: Rational = undefined; if (aliased) { - sr = try Rational.init(rma.p.allocator.?); + sr = try Rational.init(rma.p.allocator); r = &sr; aliased = true; } @@ -401,11 +404,11 @@ pub const Rational = struct { r.deinit(); }; - try r.p.mul(a.p, b.q); - try r.q.mul(b.p, a.q); - try r.p.sub(r.p, r.q); + try r.p.mul(a.p.toConst(), b.q.toConst()); + try r.q.mul(b.p.toConst(), a.q.toConst()); + try r.p.sub(r.p.toConst(), r.q.toConst()); - try r.q.mul(a.q, b.q); + try r.q.mul(a.q.toConst(), b.q.toConst()); try r.reduce(); } @@ -415,8 +418,8 @@ pub const Rational = struct { /// /// Returns an error if memory could not be allocated. pub fn mul(r: *Rational, a: Rational, b: Rational) !void { - try r.p.mul(a.p, b.p); - try r.q.mul(a.q, b.q); + try r.p.mul(a.p.toConst(), b.p.toConst()); + try r.q.mul(a.q.toConst(), b.q.toConst()); try r.reduce(); } @@ -430,8 +433,8 @@ pub const Rational = struct { @panic("division by zero"); } - try r.p.mul(a.p, b.q); - try r.q.mul(b.p, a.q); + try r.p.mul(a.p.toConst(), b.q.toConst()); + try r.q.mul(b.p.toConst(), a.q.toConst()); try r.reduce(); } @@ -442,7 +445,7 @@ pub const Rational = struct { // reduce r/q such that gcd(r, q) = 1 fn reduce(r: *Rational) !void { - var a = try Int.init(r.p.allocator.?); + var a = try Int.init(r.p.allocator); defer a.deinit(); const sign = r.p.isPositive(); @@ -450,15 +453,15 @@ pub const Rational = struct { try a.gcd(r.p, r.q); r.p.setSign(sign); - const one = Int.initFixed(([_]Limb{1})[0..]); - if (a.cmp(one) != .eq) { - var unused = try Int.init(r.p.allocator.?); + const one = IntConst{ .limbs = &[_]Limb{1}, .positive = true }; + if (a.toConst().order(one) != .eq) { + var unused = try Int.init(r.p.allocator); defer unused.deinit(); // TODO: divexact would be useful here // TODO: don't copy r.q for div - try Int.divTrunc(&r.p, &unused, r.p, a); - try Int.divTrunc(&r.q, &unused, r.q, a); + try Int.divTrunc(&r.p, &unused, r.p.toConst(), a.toConst()); + try Int.divTrunc(&r.q, &unused, r.q.toConst(), a.toConst()); } } }; @@ -596,25 +599,25 @@ test "big.rational copy" { var a = try Rational.init(testing.allocator); defer a.deinit(); - const b = try Int.initSet(testing.allocator, 5); + var b = try Int.initSet(testing.allocator, 5); defer b.deinit(); try a.copyInt(b); testing.expect((try a.p.to(u32)) == 5); testing.expect((try a.q.to(u32)) == 1); - const c = try Int.initSet(testing.allocator, 7); + var c = try Int.initSet(testing.allocator, 7); defer c.deinit(); - const d = try Int.initSet(testing.allocator, 3); + var d = try Int.initSet(testing.allocator, 3); defer d.deinit(); try a.copyRatio(c, d); testing.expect((try a.p.to(u32)) == 7); testing.expect((try a.q.to(u32)) == 3); - const e = try Int.initSet(testing.allocator, 9); + var e = try Int.initSet(testing.allocator, 9); defer e.deinit(); - const f = try Int.initSet(testing.allocator, 3); + var f = try Int.initSet(testing.allocator, 3); defer f.deinit(); try a.copyRatio(e, f); @@ -680,7 +683,7 @@ test "big.rational swap" { testing.expect((try b.q.to(u32)) == 23); } -test "big.rational cmp" { +test "big.rational order" { var a = try Rational.init(testing.allocator); defer a.deinit(); var b = try Rational.init(testing.allocator); @@ -688,11 +691,11 @@ test "big.rational cmp" { try a.setRatio(500, 231); try b.setRatio(18903, 8584); - testing.expect((try a.cmp(b)) == .lt); + testing.expect((try a.order(b)) == .lt); try a.setRatio(890, 10); try b.setRatio(89, 1); - testing.expect((try a.cmp(b)) == .eq); + testing.expect((try a.order(b)) == .eq); } test "big.rational add single-limb" { @@ -703,11 +706,11 @@ test "big.rational add single-limb" { try a.setRatio(500, 231); try b.setRatio(18903, 8584); - testing.expect((try a.cmp(b)) == .lt); + testing.expect((try a.order(b)) == .lt); try a.setRatio(890, 10); try b.setRatio(89, 1); - testing.expect((try a.cmp(b)) == .eq); + testing.expect((try a.order(b)) == .eq); } test "big.rational add" { @@ -723,7 +726,7 @@ test "big.rational add" { try a.add(a, b); try r.setRatio(984786924199, 290395044174); - testing.expect((try a.cmp(r)) == .eq); + testing.expect((try a.order(r)) == .eq); } test "big.rational sub" { @@ -739,7 +742,7 @@ test "big.rational sub" { try a.sub(a, b); try r.setRatio(979040510045, 290395044174); - testing.expect((try a.cmp(r)) == .eq); + testing.expect((try a.order(r)) == .eq); } test "big.rational mul" { @@ -755,7 +758,7 @@ test "big.rational mul" { try a.mul(a, b); try r.setRatio(571481443, 17082061422); - testing.expect((try a.cmp(r)) == .eq); + testing.expect((try a.order(r)) == .eq); } test "big.rational div" { @@ -771,7 +774,7 @@ test "big.rational div" { try a.div(a, b); try r.setRatio(75531824394, 221015929); - testing.expect((try a.cmp(r)) == .eq); + testing.expect((try a.order(r)) == .eq); } test "big.rational div" { @@ -784,11 +787,11 @@ test "big.rational div" { a.invert(); try r.setRatio(23341, 78923); - testing.expect((try a.cmp(r)) == .eq); + testing.expect((try a.order(r)) == .eq); try a.setRatio(-78923, 23341); a.invert(); try r.setRatio(-23341, 78923); - testing.expect((try a.cmp(r)) == .eq); + testing.expect((try a.order(r)) == .eq); } diff --git a/lib/std/target.zig b/lib/std/target.zig index 6e383ba4c7..9df3e21e52 100644 --- a/lib/std/target.zig +++ b/lib/std/target.zig @@ -404,6 +404,7 @@ pub const Target = struct { }; pub const ObjectFormat = enum { + /// TODO Get rid of this one. unknown, coff, elf, diff --git a/lib/std/testing.zig b/lib/std/testing.zig index fed2b15bf5..0f6cefb787 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -12,7 +12,7 @@ pub const failing_allocator = &failing_allocator_instance.allocator; pub var failing_allocator_instance = FailingAllocator.init(&base_allocator_instance.allocator, 0); pub var base_allocator_instance = std.heap.ThreadSafeFixedBufferAllocator.init(allocator_mem[0..]); -var allocator_mem: [1024 * 1024]u8 = undefined; +var allocator_mem: [2 * 1024 * 1024]u8 = undefined; /// This function is intended to be used only in tests. It prints diagnostics to stderr /// and then aborts when actual_error_union is not expected_error. @@ -193,6 +193,44 @@ pub fn expect(ok: bool) void { if (!ok) @panic("test failure"); } +pub const TmpDir = struct { + dir: std.fs.Dir, + parent_dir: std.fs.Dir, + sub_path: [sub_path_len]u8, + + const random_bytes_count = 12; + const sub_path_len = std.base64.Base64Encoder.calcSize(random_bytes_count); + + pub fn cleanup(self: *TmpDir) void { + self.dir.close(); + self.parent_dir.deleteTree(&self.sub_path) catch {}; + self.parent_dir.close(); + self.* = undefined; + } +}; + +pub fn tmpDir(opts: std.fs.Dir.OpenDirOptions) TmpDir { + var random_bytes: [TmpDir.random_bytes_count]u8 = undefined; + std.crypto.randomBytes(&random_bytes) catch + @panic("unable to make tmp dir for testing: unable to get random bytes"); + var sub_path: [TmpDir.sub_path_len]u8 = undefined; + std.fs.base64_encoder.encode(&sub_path, &random_bytes); + + var cache_dir = std.fs.cwd().makeOpenPath("zig-cache", .{}) catch + @panic("unable to make tmp dir for testing: unable to make and open zig-cache dir"); + defer cache_dir.close(); + var parent_dir = cache_dir.makeOpenPath("tmp", .{}) catch + @panic("unable to make tmp dir for testing: unable to make and open zig-cache/tmp dir"); + var dir = parent_dir.makeOpenPath(&sub_path, opts) catch + @panic("unable to make tmp dir for testing: unable to make and open the tmp dir"); + + return .{ + .dir = dir, + .parent_dir = parent_dir, + .sub_path = sub_path, + }; +} + test "expectEqual nested array" { const a = [2][2]f32{ [_]f32{ 1.0, 0.0 }, diff --git a/lib/std/zig.zig b/lib/std/zig.zig index dcf7842a3c..bb4f955797 100644 --- a/lib/std/zig.zig +++ b/lib/std/zig.zig @@ -9,6 +9,23 @@ pub const ast = @import("zig/ast.zig"); pub const system = @import("zig/system.zig"); pub const CrossTarget = @import("zig/cross_target.zig").CrossTarget; +pub fn findLineColumn(source: []const u8, byte_offset: usize) struct { line: usize, column: usize } { + var line: usize = 0; + var column: usize = 0; + for (source[0..byte_offset]) |byte| { + switch (byte) { + '\n' => { + line += 1; + column = 0; + }, + else => { + column += 1; + }, + } + } + return .{ .line = line, .column = column }; +} + test "" { @import("std").meta.refAllDecls(@This()); } diff --git a/lib/std/zig/system.zig b/lib/std/zig/system.zig index 9585058c2a..7b05e3bcfb 100644 --- a/lib/std/zig/system.zig +++ b/lib/std/zig/system.zig @@ -415,7 +415,12 @@ pub const NativeTargetInfo = struct { // over our own shared objects and find a dynamic linker. self_exe: { const lib_paths = try std.process.getSelfExeSharedLibPaths(allocator); - defer allocator.free(lib_paths); + defer { + for (lib_paths) |lib_path| { + allocator.free(lib_path); + } + allocator.free(lib_paths); + } var found_ld_info: LdInfo = undefined; var found_ld_path: [:0]const u8 = undefined; diff --git a/src-self-hosted/codegen.zig b/src-self-hosted/codegen.zig index 3a8d0e1282..675b8faad2 100644 --- a/src-self-hosted/codegen.zig +++ b/src-self-hosted/codegen.zig @@ -39,7 +39,7 @@ pub fn generateSymbol(typed_value: ir.TypedValue, module: ir.Module, code: *std. defer function.inst_table.deinit(); defer function.errors.deinit(); - for (module_fn.body) |inst| { + for (module_fn.body.instructions) |inst| { const new_inst = function.genFuncInst(inst) catch |err| switch (err) { error.CodegenFail => { assert(function.errors.items.len != 0); @@ -77,32 +77,63 @@ const Function = struct { fn genFuncInst(self: *Function, inst: *ir.Inst) !MCValue { switch (inst.tag) { - .unreach => return self.genPanic(inst.src), + .breakpoint => return self.genBreakpoint(inst.src), + .unreach => return MCValue{ .unreach = {} }, .constant => unreachable, // excluded from function bodies .assembly => return self.genAsm(inst.cast(ir.Inst.Assembly).?), .ptrtoint => return self.genPtrToInt(inst.cast(ir.Inst.PtrToInt).?), .bitcast => return self.genBitCast(inst.cast(ir.Inst.BitCast).?), + .ret => return self.genRet(inst.cast(ir.Inst.Ret).?), + .cmp => return self.genCmp(inst.cast(ir.Inst.Cmp).?), + .condbr => return self.genCondBr(inst.cast(ir.Inst.CondBr).?), + .isnull => return self.genIsNull(inst.cast(ir.Inst.IsNull).?), + .isnonnull => return self.genIsNonNull(inst.cast(ir.Inst.IsNonNull).?), } } - fn genPanic(self: *Function, src: usize) !MCValue { - // TODO change this to call the panic function + fn genBreakpoint(self: *Function, src: usize) !MCValue { switch (self.module.target.cpu.arch) { .i386, .x86_64 => { try self.code.append(0xcc); // int3 }, - else => return self.fail(src, "TODO implement panic for {}", .{self.module.target.cpu.arch}), + else => return self.fail(src, "TODO implement @breakpoint() for {}", .{self.module.target.cpu.arch}), } return .unreach; } - fn genRet(self: *Function, src: usize) !void { - // TODO change this to call the panic function + fn genRet(self: *Function, inst: *ir.Inst.Ret) !MCValue { switch (self.module.target.cpu.arch) { .i386, .x86_64 => { try self.code.append(0xc3); // ret }, - else => return self.fail(src, "TODO implement ret for {}", .{self.module.target.cpu.arch}), + else => return self.fail(inst.base.src, "TODO implement return for {}", .{self.module.target.cpu.arch}), + } + return .unreach; + } + + fn genCmp(self: *Function, inst: *ir.Inst.Cmp) !MCValue { + switch (self.module.target.cpu.arch) { + else => return self.fail(inst.base.src, "TODO implement cmp for {}", .{self.module.target.cpu.arch}), + } + } + + fn genCondBr(self: *Function, inst: *ir.Inst.CondBr) !MCValue { + switch (self.module.target.cpu.arch) { + else => return self.fail(inst.base.src, "TODO implement condbr for {}", .{self.module.target.cpu.arch}), + } + } + + fn genIsNull(self: *Function, inst: *ir.Inst.IsNull) !MCValue { + switch (self.module.target.cpu.arch) { + else => return self.fail(inst.base.src, "TODO implement isnull for {}", .{self.module.target.cpu.arch}), + } + } + + fn genIsNonNull(self: *Function, inst: *ir.Inst.IsNonNull) !MCValue { + // Here you can specialize this instruction if it makes sense to, otherwise the default + // will call genIsNull and invert the result. + switch (self.module.target.cpu.arch) { + else => return self.fail(inst.base.src, "TODO call genIsNull and invert the result ", .{}), } } @@ -501,11 +532,19 @@ fn Reg(comptime arch: Target.Cpu.Arch) type { bh, ch, dh, + bph, + sph, + sih, + dih, al, bl, cl, dl, + bpl, + spl, + sil, + dil, r8b, r9b, r10b, diff --git a/src-self-hosted/ir.zig b/src-self-hosted/ir.zig index 310591e629..6e58236ca8 100644 --- a/src-self-hosted/ir.zig +++ b/src-self-hosted/ir.zig @@ -4,10 +4,12 @@ const Allocator = std.mem.Allocator; const Value = @import("value.zig").Value; const Type = @import("type.zig").Type; const assert = std.debug.assert; -const text = @import("ir/text.zig"); -const BigInt = std.math.big.Int; +const BigIntConst = std.math.big.int.Const; +const BigIntMutable = std.math.big.int.Mutable; const Target = std.Target; +pub const text = @import("ir/text.zig"); + /// These are in-memory, analyzed instructions. See `text.Inst` for the representation /// of instructions that correspond to the ZIR text format. /// This struct owns the `Value` and `Type` memory. When the struct is deallocated, @@ -20,11 +22,17 @@ pub const Inst = struct { src: usize, pub const Tag = enum { - unreach, - constant, assembly, - ptrtoint, bitcast, + breakpoint, + cmp, + condbr, + constant, + isnonnull, + isnull, + ptrtoint, + ret, + unreach, }; pub fn cast(base: *Inst, comptime T: type) ?*T { @@ -40,30 +48,13 @@ pub const Inst = struct { /// Returns `null` if runtime-known. pub fn value(base: *Inst) ?Value { - return switch (base.tag) { - .unreach => Value.initTag(.noreturn_value), - .constant => base.cast(Constant).?.val, + if (base.ty.onePossibleValue()) + return Value.initTag(.the_one_possible_value); - .assembly, - .ptrtoint, - .bitcast, - => null, - }; + const inst = base.cast(Constant) orelse return null; + return inst.val; } - pub const Unreach = struct { - pub const base_tag = Tag.unreach; - base: Inst, - args: void, - }; - - pub const Constant = struct { - pub const base_tag = Tag.constant; - base: Inst, - - val: Value, - }; - pub const Assembly = struct { pub const base_tag = Tag.assembly; base: Inst, @@ -78,6 +69,68 @@ pub const Inst = struct { }, }; + pub const BitCast = struct { + pub const base_tag = Tag.bitcast; + + base: Inst, + args: struct { + operand: *Inst, + }, + }; + + pub const Breakpoint = struct { + pub const base_tag = Tag.breakpoint; + base: Inst, + args: void, + }; + + pub const Cmp = struct { + pub const base_tag = Tag.cmp; + + base: Inst, + args: struct { + lhs: *Inst, + op: std.math.CompareOperator, + rhs: *Inst, + }, + }; + + pub const CondBr = struct { + pub const base_tag = Tag.condbr; + + base: Inst, + args: struct { + condition: *Inst, + true_body: Module.Body, + false_body: Module.Body, + }, + }; + + pub const Constant = struct { + pub const base_tag = Tag.constant; + base: Inst, + + val: Value, + }; + + pub const IsNonNull = struct { + pub const base_tag = Tag.isnonnull; + + base: Inst, + args: struct { + operand: *Inst, + }, + }; + + pub const IsNull = struct { + pub const base_tag = Tag.isnull; + + base: Inst, + args: struct { + operand: *Inst, + }, + }; + pub const PtrToInt = struct { pub const base_tag = Tag.ptrtoint; @@ -87,13 +140,16 @@ pub const Inst = struct { }, }; - pub const BitCast = struct { - pub const base_tag = Tag.bitcast; - + pub const Ret = struct { + pub const base_tag = Tag.ret; base: Inst, - args: struct { - operand: *Inst, - }, + args: void, + }; + + pub const Unreach = struct { + pub const base_tag = Tag.unreach; + base: Inst, + args: void, }; }; @@ -108,6 +164,10 @@ pub const Module = struct { arena: std.heap.ArenaAllocator, fns: []Fn, target: Target, + link_mode: std.builtin.LinkMode, + output_mode: std.builtin.OutputMode, + object_format: std.Target.ObjectFormat, + optimize_mode: std.builtin.Mode, pub const Export = struct { name: []const u8, @@ -117,13 +177,21 @@ pub const Module = struct { pub const Fn = struct { analysis_status: enum { in_progress, failure, success }, - body: []*Inst, + body: Body, fn_type: Type, }; + pub const Body = struct { + instructions: []*Inst, + }; + pub fn deinit(self: *Module, allocator: *Allocator) void { allocator.free(self.exports); allocator.free(self.errors); + for (self.fns) |f| { + allocator.free(f.body.instructions); + } + allocator.free(self.fns); self.arena.deinit(); self.* = undefined; } @@ -134,7 +202,15 @@ pub const ErrorMsg = struct { msg: []const u8, }; -pub fn analyze(allocator: *Allocator, old_module: text.Module, target: Target) !Module { +pub const AnalyzeOptions = struct { + target: Target, + output_mode: std.builtin.OutputMode, + link_mode: std.builtin.LinkMode, + object_format: ?std.Target.ObjectFormat = null, + optimize_mode: std.builtin.Mode, +}; + +pub fn analyze(allocator: *Allocator, old_module: text.Module, options: AnalyzeOptions) !Module { var ctx = Analyze{ .allocator = allocator, .arena = std.heap.ArenaAllocator.init(allocator), @@ -143,7 +219,10 @@ pub fn analyze(allocator: *Allocator, old_module: text.Module, target: Target) ! .decl_table = std.AutoHashMap(*text.Inst, Analyze.NewDecl).init(allocator), .exports = std.ArrayList(Module.Export).init(allocator), .fns = std.ArrayList(Module.Fn).init(allocator), - .target = target, + .target = options.target, + .optimize_mode = options.optimize_mode, + .link_mode = options.link_mode, + .output_mode = options.output_mode, }; defer ctx.errors.deinit(); defer ctx.decl_table.deinit(); @@ -162,7 +241,11 @@ pub fn analyze(allocator: *Allocator, old_module: text.Module, target: Target) ! .errors = ctx.errors.toOwnedSlice(), .fns = ctx.fns.toOwnedSlice(), .arena = ctx.arena, - .target = target, + .target = ctx.target, + .link_mode = ctx.link_mode, + .output_mode = ctx.output_mode, + .object_format = options.object_format orelse ctx.target.getObjectFormat(), + .optimize_mode = ctx.optimize_mode, }; } @@ -175,6 +258,9 @@ const Analyze = struct { exports: std.ArrayList(Module.Export), fns: std.ArrayList(Module.Fn), target: Target, + link_mode: std.builtin.LinkMode, + optimize_mode: std.builtin.Mode, + output_mode: std.builtin.OutputMode, const NewDecl = struct { /// null means a semantic analysis error happened @@ -187,10 +273,15 @@ const Analyze = struct { }; const Fn = struct { - body: std.ArrayList(*Inst), - inst_table: std.AutoHashMap(*text.Inst, NewInst), /// Index into Module fns array fn_index: usize, + inner_block: Block, + inst_table: std.AutoHashMap(*text.Inst, NewInst), + }; + + const Block = struct { + func: *Fn, + instructions: std.ArrayList(*Inst), }; const InnerError = error{ OutOfMemory, AnalysisFail }; @@ -203,9 +294,9 @@ const Analyze = struct { } } - fn resolveInst(self: *Analyze, opt_func: ?*Fn, old_inst: *text.Inst) InnerError!*Inst { - if (opt_func) |func| { - if (func.inst_table.get(old_inst)) |kv| { + fn resolveInst(self: *Analyze, opt_block: ?*Block, old_inst: *text.Inst) InnerError!*Inst { + if (opt_block) |block| { + if (block.func.inst_table.get(old_inst)) |kv| { return kv.value.ptr orelse return error.AnalysisFail; } } @@ -225,12 +316,12 @@ const Analyze = struct { } } - fn requireFunctionBody(self: *Analyze, func: ?*Fn, src: usize) !*Fn { - return func orelse return self.fail(src, "instruction illegal outside function body", .{}); + fn requireRuntimeBlock(self: *Analyze, block: ?*Block, src: usize) !*Block { + return block orelse return self.fail(src, "instruction illegal outside function body", .{}); } - fn resolveInstConst(self: *Analyze, func: ?*Fn, old_inst: *text.Inst) InnerError!TypedValue { - const new_inst = try self.resolveInst(func, old_inst); + fn resolveInstConst(self: *Analyze, block: ?*Block, old_inst: *text.Inst) InnerError!TypedValue { + const new_inst = try self.resolveInst(block, old_inst); const val = try self.resolveConstValue(new_inst); return TypedValue{ .ty = new_inst.ty, @@ -239,28 +330,39 @@ const Analyze = struct { } fn resolveConstValue(self: *Analyze, base: *Inst) !Value { - return base.value() orelse return self.fail(base.src, "unable to resolve comptime value", .{}); + return (try self.resolveDefinedValue(base)) orelse + return self.fail(base.src, "unable to resolve comptime value", .{}); } - fn resolveConstString(self: *Analyze, func: ?*Fn, old_inst: *text.Inst) ![]u8 { - const new_inst = try self.resolveInst(func, old_inst); + fn resolveDefinedValue(self: *Analyze, base: *Inst) !?Value { + if (base.value()) |val| { + if (val.isUndef()) { + return self.fail(base.src, "use of undefined value here causes undefined behavior", .{}); + } + return val; + } + return null; + } + + fn resolveConstString(self: *Analyze, block: ?*Block, old_inst: *text.Inst) ![]u8 { + const new_inst = try self.resolveInst(block, old_inst); const wanted_type = Type.initTag(.const_slice_u8); - const coerced_inst = try self.coerce(func, wanted_type, new_inst); + const coerced_inst = try self.coerce(block, wanted_type, new_inst); const val = try self.resolveConstValue(coerced_inst); return val.toAllocatedBytes(&self.arena.allocator); } - fn resolveType(self: *Analyze, func: ?*Fn, old_inst: *text.Inst) !Type { - const new_inst = try self.resolveInst(func, old_inst); + fn resolveType(self: *Analyze, block: ?*Block, old_inst: *text.Inst) !Type { + const new_inst = try self.resolveInst(block, old_inst); const wanted_type = Type.initTag(.@"type"); - const coerced_inst = try self.coerce(func, wanted_type, new_inst); + const coerced_inst = try self.coerce(block, wanted_type, new_inst); const val = try self.resolveConstValue(coerced_inst); return val.toType(); } - fn analyzeExport(self: *Analyze, func: ?*Fn, export_inst: *text.Inst.Export) !void { - const symbol_name = try self.resolveConstString(func, export_inst.positionals.symbol_name); - const typed_value = try self.resolveInstConst(func, export_inst.positionals.value); + fn analyzeExport(self: *Analyze, block: ?*Block, export_inst: *text.Inst.Export) !void { + const symbol_name = try self.resolveConstString(block, export_inst.positionals.symbol_name); + const typed_value = try self.resolveInstConst(block, export_inst.positionals.value); switch (typed_value.ty.zigTypeTag()) { .Fn => {}, @@ -280,18 +382,18 @@ const Analyze = struct { /// TODO should not need the cast on the last parameter at the callsites fn addNewInstArgs( self: *Analyze, - func: *Fn, + block: *Block, src: usize, ty: Type, comptime T: type, args: Inst.Args(T), ) !*Inst { - const inst = try self.addNewInst(func, src, ty, T); + const inst = try self.addNewInst(block, src, ty, T); inst.args = args; return &inst.base; } - fn addNewInst(self: *Analyze, func: *Fn, src: usize, ty: Type, comptime T: type) !*T { + fn addNewInst(self: *Analyze, block: *Block, src: usize, ty: Type, comptime T: type) !*T { const inst = try self.arena.allocator.create(T); inst.* = .{ .base = .{ @@ -301,7 +403,7 @@ const Analyze = struct { }, .args = undefined, }; - try func.body.append(&inst.base); + try block.instructions.append(&inst.base); return inst; } @@ -344,7 +446,21 @@ const Analyze = struct { fn constVoid(self: *Analyze, src: usize) !*Inst { return self.constInst(src, .{ .ty = Type.initTag(.void), - .val = Value.initTag(.void_value), + .val = Value.initTag(.the_one_possible_value), + }); + } + + fn constUndef(self: *Analyze, src: usize, ty: Type) !*Inst { + return self.constInst(src, .{ + .ty = ty, + .val = Value.initTag(.undef), + }); + } + + fn constBool(self: *Analyze, src: usize, v: bool) !*Inst { + return self.constInst(src, .{ + .ty = Type.initTag(.bool), + .val = ([2]Value{ Value.initTag(.bool_false), Value.initTag(.bool_true) })[@boolToInt(v)], }); } @@ -368,34 +484,38 @@ const Analyze = struct { }); } - fn constIntBig(self: *Analyze, src: usize, ty: Type, big_int: BigInt) !*Inst { - if (big_int.isPositive()) { + fn constIntBig(self: *Analyze, src: usize, ty: Type, big_int: BigIntConst) !*Inst { + const val_payload = if (big_int.positive) blk: { if (big_int.to(u64)) |x| { return self.constIntUnsigned(src, ty, x); } else |err| switch (err) { error.NegativeIntoUnsigned => unreachable, error.TargetTooSmall => {}, // handled below } - } else { + const big_int_payload = try self.arena.allocator.create(Value.Payload.IntBigPositive); + big_int_payload.* = .{ .limbs = big_int.limbs }; + break :blk &big_int_payload.base; + } else blk: { if (big_int.to(i64)) |x| { return self.constIntSigned(src, ty, x); } else |err| switch (err) { error.NegativeIntoUnsigned => unreachable, error.TargetTooSmall => {}, // handled below } - } - - const big_int_payload = try self.arena.allocator.create(Value.Payload.IntBig); - big_int_payload.* = .{ .big_int = big_int }; + const big_int_payload = try self.arena.allocator.create(Value.Payload.IntBigNegative); + big_int_payload.* = .{ .limbs = big_int.limbs }; + break :blk &big_int_payload.base; + }; return self.constInst(src, .{ .ty = ty, - .val = Value.initPayload(&big_int_payload.base), + .val = Value.initPayload(val_payload), }); } - fn analyzeInst(self: *Analyze, func: ?*Fn, old_inst: *text.Inst) InnerError!*Inst { + fn analyzeInst(self: *Analyze, block: ?*Block, old_inst: *text.Inst) InnerError!*Inst { switch (old_inst.tag) { + .breakpoint => return self.analyzeInstBreakpoint(block, old_inst.cast(text.Inst.Breakpoint).?), .str => { // We can use this reference because Inst.Const's Value is arena-allocated. // The value would get copied to a MemoryCell before the `text.Inst.Str` lifetime ends. @@ -406,35 +526,49 @@ const Analyze = struct { const big_int = old_inst.cast(text.Inst.Int).?.positionals.int; return self.constIntBig(old_inst.src, Type.initTag(.comptime_int), big_int); }, - .ptrtoint => return self.analyzeInstPtrToInt(func, old_inst.cast(text.Inst.PtrToInt).?), - .fieldptr => return self.analyzeInstFieldPtr(func, old_inst.cast(text.Inst.FieldPtr).?), - .deref => return self.analyzeInstDeref(func, old_inst.cast(text.Inst.Deref).?), - .as => return self.analyzeInstAs(func, old_inst.cast(text.Inst.As).?), - .@"asm" => return self.analyzeInstAsm(func, old_inst.cast(text.Inst.Asm).?), - .@"unreachable" => return self.analyzeInstUnreachable(func, old_inst.cast(text.Inst.Unreachable).?), - .@"fn" => return self.analyzeInstFn(func, old_inst.cast(text.Inst.Fn).?), + .ptrtoint => return self.analyzeInstPtrToInt(block, old_inst.cast(text.Inst.PtrToInt).?), + .fieldptr => return self.analyzeInstFieldPtr(block, old_inst.cast(text.Inst.FieldPtr).?), + .deref => return self.analyzeInstDeref(block, old_inst.cast(text.Inst.Deref).?), + .as => return self.analyzeInstAs(block, old_inst.cast(text.Inst.As).?), + .@"asm" => return self.analyzeInstAsm(block, old_inst.cast(text.Inst.Asm).?), + .@"unreachable" => return self.analyzeInstUnreachable(block, old_inst.cast(text.Inst.Unreachable).?), + .@"return" => return self.analyzeInstRet(block, old_inst.cast(text.Inst.Return).?), + .@"fn" => return self.analyzeInstFn(block, old_inst.cast(text.Inst.Fn).?), .@"export" => { - try self.analyzeExport(func, old_inst.cast(text.Inst.Export).?); + try self.analyzeExport(block, old_inst.cast(text.Inst.Export).?); return self.constVoid(old_inst.src); }, - .primitive => return self.analyzeInstPrimitive(func, old_inst.cast(text.Inst.Primitive).?), - .fntype => return self.analyzeInstFnType(func, old_inst.cast(text.Inst.FnType).?), - .intcast => return self.analyzeInstIntCast(func, old_inst.cast(text.Inst.IntCast).?), - .bitcast => return self.analyzeInstBitCast(func, old_inst.cast(text.Inst.BitCast).?), - .elemptr => return self.analyzeInstElemPtr(func, old_inst.cast(text.Inst.ElemPtr).?), - .add => return self.analyzeInstAdd(func, old_inst.cast(text.Inst.Add).?), + .primitive => return self.analyzeInstPrimitive(old_inst.cast(text.Inst.Primitive).?), + .fntype => return self.analyzeInstFnType(block, old_inst.cast(text.Inst.FnType).?), + .intcast => return self.analyzeInstIntCast(block, old_inst.cast(text.Inst.IntCast).?), + .bitcast => return self.analyzeInstBitCast(block, old_inst.cast(text.Inst.BitCast).?), + .elemptr => return self.analyzeInstElemPtr(block, old_inst.cast(text.Inst.ElemPtr).?), + .add => return self.analyzeInstAdd(block, old_inst.cast(text.Inst.Add).?), + .cmp => return self.analyzeInstCmp(block, old_inst.cast(text.Inst.Cmp).?), + .condbr => return self.analyzeInstCondBr(block, old_inst.cast(text.Inst.CondBr).?), + .isnull => return self.analyzeInstIsNull(block, old_inst.cast(text.Inst.IsNull).?), + .isnonnull => return self.analyzeInstIsNonNull(block, old_inst.cast(text.Inst.IsNonNull).?), } } - fn analyzeInstFn(self: *Analyze, opt_func: ?*Fn, fn_inst: *text.Inst.Fn) InnerError!*Inst { - const fn_type = try self.resolveType(opt_func, fn_inst.positionals.fn_type); + fn analyzeInstBreakpoint(self: *Analyze, block: ?*Block, inst: *text.Inst.Breakpoint) InnerError!*Inst { + const b = try self.requireRuntimeBlock(block, inst.base.src); + return self.addNewInstArgs(b, inst.base.src, Type.initTag(.void), Inst.Breakpoint, Inst.Args(Inst.Breakpoint){}); + } + + fn analyzeInstFn(self: *Analyze, block: ?*Block, fn_inst: *text.Inst.Fn) InnerError!*Inst { + const fn_type = try self.resolveType(block, fn_inst.positionals.fn_type); var new_func: Fn = .{ - .body = std.ArrayList(*Inst).init(self.allocator), - .inst_table = std.AutoHashMap(*text.Inst, NewInst).init(self.allocator), .fn_index = self.fns.items.len, + .inner_block = .{ + .func = undefined, + .instructions = std.ArrayList(*Inst).init(self.allocator), + }, + .inst_table = std.AutoHashMap(*text.Inst, NewInst).init(self.allocator), }; - defer new_func.body.deinit(); + new_func.inner_block.func = &new_func; + defer new_func.inner_block.instructions.deinit(); defer new_func.inst_table.deinit(); // Don't hang on to a reference to this when analyzing body instructions, since the memory // could become invalid. @@ -444,18 +578,11 @@ const Analyze = struct { .body = undefined, }; - for (fn_inst.positionals.body.instructions) |src_inst| { - const new_inst = self.analyzeInst(&new_func, src_inst) catch |err| { - self.fns.items[new_func.fn_index].analysis_status = .failure; - try new_func.inst_table.putNoClobber(src_inst, .{ .ptr = null }); - return err; - }; - try new_func.inst_table.putNoClobber(src_inst, .{ .ptr = new_inst }); - } + try self.analyzeBody(&new_func.inner_block, fn_inst.positionals.body); const f = &self.fns.items[new_func.fn_index]; f.analysis_status = .success; - f.body = new_func.body.toOwnedSlice(); + f.body = .{ .instructions = new_func.inner_block.instructions.toOwnedSlice() }; const fn_payload = try self.arena.allocator.create(Value.Payload.Function); fn_payload.* = .{ .index = new_func.fn_index }; @@ -466,8 +593,8 @@ const Analyze = struct { }); } - fn analyzeInstFnType(self: *Analyze, func: ?*Fn, fntype: *text.Inst.FnType) InnerError!*Inst { - const return_type = try self.resolveType(func, fntype.positionals.return_type); + fn analyzeInstFnType(self: *Analyze, block: ?*Block, fntype: *text.Inst.FnType) InnerError!*Inst { + const return_type = try self.resolveType(block, fntype.positionals.return_type); if (return_type.zigTypeTag() == .NoReturn and fntype.positionals.param_types.len == 0 and @@ -476,33 +603,40 @@ const Analyze = struct { return self.constType(fntype.base.src, Type.initTag(.fn_naked_noreturn_no_args)); } + if (return_type.zigTypeTag() == .Void and + fntype.positionals.param_types.len == 0 and + fntype.kw_args.cc == .C) + { + return self.constType(fntype.base.src, Type.initTag(.fn_ccc_void_no_args)); + } + return self.fail(fntype.base.src, "TODO implement fntype instruction more", .{}); } - fn analyzeInstPrimitive(self: *Analyze, func: ?*Fn, primitive: *text.Inst.Primitive) InnerError!*Inst { + fn analyzeInstPrimitive(self: *Analyze, primitive: *text.Inst.Primitive) InnerError!*Inst { return self.constType(primitive.base.src, primitive.positionals.tag.toType()); } - fn analyzeInstAs(self: *Analyze, func: ?*Fn, as: *text.Inst.As) InnerError!*Inst { - const dest_type = try self.resolveType(func, as.positionals.dest_type); - const new_inst = try self.resolveInst(func, as.positionals.value); - return self.coerce(func, dest_type, new_inst); + fn analyzeInstAs(self: *Analyze, block: ?*Block, as: *text.Inst.As) InnerError!*Inst { + const dest_type = try self.resolveType(block, as.positionals.dest_type); + const new_inst = try self.resolveInst(block, as.positionals.value); + return self.coerce(block, dest_type, new_inst); } - fn analyzeInstPtrToInt(self: *Analyze, func: ?*Fn, ptrtoint: *text.Inst.PtrToInt) InnerError!*Inst { - const ptr = try self.resolveInst(func, ptrtoint.positionals.ptr); + fn analyzeInstPtrToInt(self: *Analyze, block: ?*Block, ptrtoint: *text.Inst.PtrToInt) InnerError!*Inst { + const ptr = try self.resolveInst(block, ptrtoint.positionals.ptr); if (ptr.ty.zigTypeTag() != .Pointer) { return self.fail(ptrtoint.positionals.ptr.src, "expected pointer, found '{}'", .{ptr.ty}); } // TODO handle known-pointer-address - const f = try self.requireFunctionBody(func, ptrtoint.base.src); + const b = try self.requireRuntimeBlock(block, ptrtoint.base.src); const ty = Type.initTag(.usize); - return self.addNewInstArgs(f, ptrtoint.base.src, ty, Inst.PtrToInt, Inst.Args(Inst.PtrToInt){ .ptr = ptr }); + return self.addNewInstArgs(b, ptrtoint.base.src, ty, Inst.PtrToInt, Inst.Args(Inst.PtrToInt){ .ptr = ptr }); } - fn analyzeInstFieldPtr(self: *Analyze, func: ?*Fn, fieldptr: *text.Inst.FieldPtr) InnerError!*Inst { - const object_ptr = try self.resolveInst(func, fieldptr.positionals.object_ptr); - const field_name = try self.resolveConstString(func, fieldptr.positionals.field_name); + fn analyzeInstFieldPtr(self: *Analyze, block: ?*Block, fieldptr: *text.Inst.FieldPtr) InnerError!*Inst { + const object_ptr = try self.resolveInst(block, fieldptr.positionals.object_ptr); + const field_name = try self.resolveConstString(block, fieldptr.positionals.field_name); const elem_ty = switch (object_ptr.ty.zigTypeTag()) { .Pointer => object_ptr.ty.elemType(), @@ -533,9 +667,9 @@ const Analyze = struct { } } - fn analyzeInstIntCast(self: *Analyze, func: ?*Fn, intcast: *text.Inst.IntCast) InnerError!*Inst { - const dest_type = try self.resolveType(func, intcast.positionals.dest_type); - const new_inst = try self.resolveInst(func, intcast.positionals.value); + fn analyzeInstIntCast(self: *Analyze, block: ?*Block, intcast: *text.Inst.IntCast) InnerError!*Inst { + const dest_type = try self.resolveType(block, intcast.positionals.dest_type); + const new_inst = try self.resolveInst(block, intcast.positionals.value); const dest_is_comptime_int = switch (dest_type.zigTypeTag()) { .ComptimeInt => true, @@ -559,22 +693,22 @@ const Analyze = struct { } if (dest_is_comptime_int or new_inst.value() != null) { - return self.coerce(func, dest_type, new_inst); + return self.coerce(block, dest_type, new_inst); } return self.fail(intcast.base.src, "TODO implement analyze widen or shorten int", .{}); } - fn analyzeInstBitCast(self: *Analyze, func: ?*Fn, inst: *text.Inst.BitCast) InnerError!*Inst { - const dest_type = try self.resolveType(func, inst.positionals.dest_type); - const operand = try self.resolveInst(func, inst.positionals.operand); - return self.bitcast(func, dest_type, operand); + fn analyzeInstBitCast(self: *Analyze, block: ?*Block, inst: *text.Inst.BitCast) InnerError!*Inst { + const dest_type = try self.resolveType(block, inst.positionals.dest_type); + const operand = try self.resolveInst(block, inst.positionals.operand); + return self.bitcast(block, dest_type, operand); } - fn analyzeInstElemPtr(self: *Analyze, func: ?*Fn, inst: *text.Inst.ElemPtr) InnerError!*Inst { - const array_ptr = try self.resolveInst(func, inst.positionals.array_ptr); - const uncasted_index = try self.resolveInst(func, inst.positionals.index); - const elem_index = try self.coerce(func, Type.initTag(.usize), uncasted_index); + fn analyzeInstElemPtr(self: *Analyze, block: ?*Block, inst: *text.Inst.ElemPtr) InnerError!*Inst { + const array_ptr = try self.resolveInst(block, inst.positionals.array_ptr); + const uncasted_index = try self.resolveInst(block, inst.positionals.index); + const elem_index = try self.coerce(block, Type.initTag(.usize), uncasted_index); if (array_ptr.ty.isSinglePointer() and array_ptr.ty.elemType().zigTypeTag() == .Array) { if (array_ptr.value()) |array_ptr_val| { @@ -602,28 +736,44 @@ const Analyze = struct { return self.fail(inst.base.src, "TODO implement more analyze elemptr", .{}); } - fn analyzeInstAdd(self: *Analyze, func: ?*Fn, inst: *text.Inst.Add) InnerError!*Inst { - const lhs = try self.resolveInst(func, inst.positionals.lhs); - const rhs = try self.resolveInst(func, inst.positionals.rhs); + fn analyzeInstAdd(self: *Analyze, block: ?*Block, inst: *text.Inst.Add) InnerError!*Inst { + const lhs = try self.resolveInst(block, inst.positionals.lhs); + const rhs = try self.resolveInst(block, inst.positionals.rhs); if (lhs.ty.zigTypeTag() == .Int and rhs.ty.zigTypeTag() == .Int) { if (lhs.value()) |lhs_val| { if (rhs.value()) |rhs_val| { - const lhs_bigint = try lhs_val.toBigInt(&self.arena.allocator); - const rhs_bigint = try rhs_val.toBigInt(&self.arena.allocator); - var result_bigint = try BigInt.init(&self.arena.allocator); - try BigInt.add(&result_bigint, lhs_bigint, rhs_bigint); + // TODO is this a performance issue? maybe we should try the operation without + // resorting to BigInt first. + var lhs_space: Value.BigIntSpace = undefined; + var rhs_space: Value.BigIntSpace = undefined; + const lhs_bigint = lhs_val.toBigInt(&lhs_space); + const rhs_bigint = rhs_val.toBigInt(&rhs_space); + const limbs = try self.arena.allocator.alloc( + std.math.big.Limb, + std.math.max(lhs_bigint.limbs.len, rhs_bigint.limbs.len) + 1, + ); + var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + result_bigint.add(lhs_bigint, rhs_bigint); + const result_limbs = result_bigint.limbs[0..result_bigint.len]; if (!lhs.ty.eql(rhs.ty)) { return self.fail(inst.base.src, "TODO implement peer type resolution", .{}); } - const val_payload = try self.arena.allocator.create(Value.Payload.IntBig); - val_payload.* = .{ .big_int = result_bigint }; + const val_payload = if (result_bigint.positive) blk: { + const val_payload = try self.arena.allocator.create(Value.Payload.IntBigPositive); + val_payload.* = .{ .limbs = result_limbs }; + break :blk &val_payload.base; + } else blk: { + const val_payload = try self.arena.allocator.create(Value.Payload.IntBigNegative); + val_payload.* = .{ .limbs = result_limbs }; + break :blk &val_payload.base; + }; return self.constInst(inst.base.src, .{ .ty = lhs.ty, - .val = Value.initPayload(&val_payload.base), + .val = Value.initPayload(val_payload), }); } } @@ -632,8 +782,8 @@ const Analyze = struct { return self.fail(inst.base.src, "TODO implement more analyze add", .{}); } - fn analyzeInstDeref(self: *Analyze, func: ?*Fn, deref: *text.Inst.Deref) InnerError!*Inst { - const ptr = try self.resolveInst(func, deref.positionals.ptr); + fn analyzeInstDeref(self: *Analyze, block: ?*Block, deref: *text.Inst.Deref) InnerError!*Inst { + const ptr = try self.resolveInst(block, deref.positionals.ptr); const elem_ty = switch (ptr.ty.zigTypeTag()) { .Pointer => ptr.ty.elemType(), else => return self.fail(deref.positionals.ptr.src, "expected pointer, found '{}'", .{ptr.ty}), @@ -648,28 +798,28 @@ const Analyze = struct { return self.fail(deref.base.src, "TODO implement runtime deref", .{}); } - fn analyzeInstAsm(self: *Analyze, func: ?*Fn, assembly: *text.Inst.Asm) InnerError!*Inst { - const return_type = try self.resolveType(func, assembly.positionals.return_type); - const asm_source = try self.resolveConstString(func, assembly.positionals.asm_source); - const output = if (assembly.kw_args.output) |o| try self.resolveConstString(func, o) else null; + fn analyzeInstAsm(self: *Analyze, block: ?*Block, assembly: *text.Inst.Asm) InnerError!*Inst { + const return_type = try self.resolveType(block, assembly.positionals.return_type); + const asm_source = try self.resolveConstString(block, assembly.positionals.asm_source); + const output = if (assembly.kw_args.output) |o| try self.resolveConstString(block, o) else null; const inputs = try self.arena.allocator.alloc([]const u8, assembly.kw_args.inputs.len); const clobbers = try self.arena.allocator.alloc([]const u8, assembly.kw_args.clobbers.len); const args = try self.arena.allocator.alloc(*Inst, assembly.kw_args.args.len); for (inputs) |*elem, i| { - elem.* = try self.resolveConstString(func, assembly.kw_args.inputs[i]); + elem.* = try self.resolveConstString(block, assembly.kw_args.inputs[i]); } for (clobbers) |*elem, i| { - elem.* = try self.resolveConstString(func, assembly.kw_args.clobbers[i]); + elem.* = try self.resolveConstString(block, assembly.kw_args.clobbers[i]); } for (args) |*elem, i| { - const arg = try self.resolveInst(func, assembly.kw_args.args[i]); - elem.* = try self.coerce(func, Type.initTag(.usize), arg); + const arg = try self.resolveInst(block, assembly.kw_args.args[i]); + elem.* = try self.coerce(block, Type.initTag(.usize), arg); } - const f = try self.requireFunctionBody(func, assembly.base.src); - return self.addNewInstArgs(f, assembly.base.src, return_type, Inst.Assembly, Inst.Args(Inst.Assembly){ + const b = try self.requireRuntimeBlock(block, assembly.base.src); + return self.addNewInstArgs(b, assembly.base.src, return_type, Inst.Assembly, Inst.Args(Inst.Assembly){ .asm_source = asm_source, .is_volatile = assembly.kw_args.@"volatile", .output = output, @@ -679,19 +829,370 @@ const Analyze = struct { }); } - fn analyzeInstUnreachable(self: *Analyze, func: ?*Fn, unreach: *text.Inst.Unreachable) InnerError!*Inst { - const f = try self.requireFunctionBody(func, unreach.base.src); - return self.addNewInstArgs(f, unreach.base.src, Type.initTag(.noreturn), Inst.Unreach, {}); + fn analyzeInstCmp(self: *Analyze, block: ?*Block, inst: *text.Inst.Cmp) InnerError!*Inst { + const lhs = try self.resolveInst(block, inst.positionals.lhs); + const rhs = try self.resolveInst(block, inst.positionals.rhs); + const op = inst.positionals.op; + + const is_equality_cmp = switch (op) { + .eq, .neq => true, + else => false, + }; + const lhs_ty_tag = lhs.ty.zigTypeTag(); + const rhs_ty_tag = rhs.ty.zigTypeTag(); + if (is_equality_cmp and lhs_ty_tag == .Null and rhs_ty_tag == .Null) { + // null == null, null != null + return self.constBool(inst.base.src, op == .eq); + } else if (is_equality_cmp and + ((lhs_ty_tag == .Null and rhs_ty_tag == .Optional) or + rhs_ty_tag == .Null and lhs_ty_tag == .Optional)) + { + // comparing null with optionals + const opt_operand = if (lhs_ty_tag == .Optional) lhs else rhs; + if (opt_operand.value()) |opt_val| { + const is_null = opt_val.isNull(); + return self.constBool(inst.base.src, if (op == .eq) is_null else !is_null); + } + const b = try self.requireRuntimeBlock(block, inst.base.src); + switch (op) { + .eq => return self.addNewInstArgs( + b, + inst.base.src, + Type.initTag(.bool), + Inst.IsNull, + Inst.Args(Inst.IsNull){ .operand = opt_operand }, + ), + .neq => return self.addNewInstArgs( + b, + inst.base.src, + Type.initTag(.bool), + Inst.IsNonNull, + Inst.Args(Inst.IsNonNull){ .operand = opt_operand }, + ), + else => unreachable, + } + } else if (is_equality_cmp and + ((lhs_ty_tag == .Null and rhs.ty.isCPtr()) or (rhs_ty_tag == .Null and lhs.ty.isCPtr()))) + { + return self.fail(inst.base.src, "TODO implement C pointer cmp", .{}); + } else if (lhs_ty_tag == .Null or rhs_ty_tag == .Null) { + const non_null_type = if (lhs_ty_tag == .Null) rhs.ty else lhs.ty; + return self.fail(inst.base.src, "comparison of '{}' with null", .{non_null_type}); + } else if (is_equality_cmp and + ((lhs_ty_tag == .EnumLiteral and rhs_ty_tag == .Union) or + (rhs_ty_tag == .EnumLiteral and lhs_ty_tag == .Union))) + { + return self.fail(inst.base.src, "TODO implement equality comparison between a union's tag value and an enum literal", .{}); + } else if (lhs_ty_tag == .ErrorSet and rhs_ty_tag == .ErrorSet) { + if (!is_equality_cmp) { + return self.fail(inst.base.src, "{} operator not allowed for errors", .{@tagName(op)}); + } + return self.fail(inst.base.src, "TODO implement equality comparison between errors", .{}); + } else if (lhs.ty.isNumeric() and rhs.ty.isNumeric()) { + // This operation allows any combination of integer and float types, regardless of the + // signed-ness, comptime-ness, and bit-width. So peer type resolution is incorrect for + // numeric types. + return self.cmpNumeric(block, inst.base.src, lhs, rhs, op); + } + return self.fail(inst.base.src, "TODO implement more cmp analysis", .{}); } - fn coerce(self: *Analyze, func: ?*Fn, dest_type: Type, inst: *Inst) !*Inst { + fn analyzeInstIsNull(self: *Analyze, block: ?*Block, inst: *text.Inst.IsNull) InnerError!*Inst { + const operand = try self.resolveInst(block, inst.positionals.operand); + return self.analyzeIsNull(block, inst.base.src, operand, true); + } + + fn analyzeInstIsNonNull(self: *Analyze, block: ?*Block, inst: *text.Inst.IsNonNull) InnerError!*Inst { + const operand = try self.resolveInst(block, inst.positionals.operand); + return self.analyzeIsNull(block, inst.base.src, operand, false); + } + + fn analyzeInstCondBr(self: *Analyze, block: ?*Block, inst: *text.Inst.CondBr) InnerError!*Inst { + const uncasted_cond = try self.resolveInst(block, inst.positionals.condition); + const cond = try self.coerce(block, Type.initTag(.bool), uncasted_cond); + + if (try self.resolveDefinedValue(cond)) |cond_val| { + const body = if (cond_val.toBool()) &inst.positionals.true_body else &inst.positionals.false_body; + try self.analyzeBody(block, body.*); + return self.constVoid(inst.base.src); + } + + const parent_block = try self.requireRuntimeBlock(block, inst.base.src); + + var true_block: Block = .{ + .func = parent_block.func, + .instructions = std.ArrayList(*Inst).init(self.allocator), + }; + defer true_block.instructions.deinit(); + try self.analyzeBody(&true_block, inst.positionals.true_body); + + var false_block: Block = .{ + .func = parent_block.func, + .instructions = std.ArrayList(*Inst).init(self.allocator), + }; + defer false_block.instructions.deinit(); + try self.analyzeBody(&false_block, inst.positionals.false_body); + + // Copy the instruction pointers to the arena memory + const true_instructions = try self.arena.allocator.alloc(*Inst, true_block.instructions.items.len); + const false_instructions = try self.arena.allocator.alloc(*Inst, false_block.instructions.items.len); + + mem.copy(*Inst, true_instructions, true_block.instructions.items); + mem.copy(*Inst, false_instructions, false_block.instructions.items); + + return self.addNewInstArgs(parent_block, inst.base.src, Type.initTag(.void), Inst.CondBr, Inst.Args(Inst.CondBr){ + .condition = cond, + .true_body = .{ .instructions = true_instructions }, + .false_body = .{ .instructions = false_instructions }, + }); + } + + fn wantSafety(self: *Analyze, block: ?*Block) bool { + return switch (self.optimize_mode) { + .Debug => true, + .ReleaseSafe => true, + .ReleaseFast => false, + .ReleaseSmall => false, + }; + } + + fn analyzeInstUnreachable(self: *Analyze, block: ?*Block, unreach: *text.Inst.Unreachable) InnerError!*Inst { + const b = try self.requireRuntimeBlock(block, unreach.base.src); + if (self.wantSafety(block)) { + // TODO Once we have a panic function to call, call it here instead of this. + _ = try self.addNewInstArgs(b, unreach.base.src, Type.initTag(.void), Inst.Breakpoint, {}); + } + return self.addNewInstArgs(b, unreach.base.src, Type.initTag(.noreturn), Inst.Unreach, {}); + } + + fn analyzeInstRet(self: *Analyze, block: ?*Block, inst: *text.Inst.Return) InnerError!*Inst { + const b = try self.requireRuntimeBlock(block, inst.base.src); + return self.addNewInstArgs(b, inst.base.src, Type.initTag(.noreturn), Inst.Ret, {}); + } + + fn analyzeBody(self: *Analyze, block: ?*Block, body: text.Module.Body) !void { + for (body.instructions) |src_inst| { + const new_inst = self.analyzeInst(block, src_inst) catch |err| { + if (block) |b| { + self.fns.items[b.func.fn_index].analysis_status = .failure; + try b.func.inst_table.putNoClobber(src_inst, .{ .ptr = null }); + } + return err; + }; + if (block) |b| try b.func.inst_table.putNoClobber(src_inst, .{ .ptr = new_inst }); + } + } + + fn analyzeIsNull( + self: *Analyze, + block: ?*Block, + src: usize, + operand: *Inst, + invert_logic: bool, + ) InnerError!*Inst { + return self.fail(src, "TODO implement analysis of isnull and isnotnull", .{}); + } + + /// Asserts that lhs and rhs types are both numeric. + fn cmpNumeric( + self: *Analyze, + block: ?*Block, + src: usize, + lhs: *Inst, + rhs: *Inst, + op: std.math.CompareOperator, + ) !*Inst { + assert(lhs.ty.isNumeric()); + assert(rhs.ty.isNumeric()); + + const lhs_ty_tag = lhs.ty.zigTypeTag(); + const rhs_ty_tag = rhs.ty.zigTypeTag(); + + if (lhs_ty_tag == .Vector and rhs_ty_tag == .Vector) { + if (lhs.ty.arrayLen() != rhs.ty.arrayLen()) { + return self.fail(src, "vector length mismatch: {} and {}", .{ + lhs.ty.arrayLen(), + rhs.ty.arrayLen(), + }); + } + return self.fail(src, "TODO implement support for vectors in cmpNumeric", .{}); + } else if (lhs_ty_tag == .Vector or rhs_ty_tag == .Vector) { + return self.fail(src, "mixed scalar and vector operands to comparison operator: '{}' and '{}'", .{ + lhs.ty, + rhs.ty, + }); + } + + if (lhs.value()) |lhs_val| { + if (rhs.value()) |rhs_val| { + return self.constBool(src, Value.compare(lhs_val, op, rhs_val)); + } + } + + // TODO handle comparisons against lazy zero values + // Some values can be compared against zero without being runtime known or without forcing + // a full resolution of their value, for example `@sizeOf(@Frame(function))` is known to + // always be nonzero, and we benefit from not forcing the full evaluation and stack frame layout + // of this function if we don't need to. + + // It must be a runtime comparison. + const b = try self.requireRuntimeBlock(block, src); + // For floats, emit a float comparison instruction. + const lhs_is_float = switch (lhs_ty_tag) { + .Float, .ComptimeFloat => true, + else => false, + }; + const rhs_is_float = switch (rhs_ty_tag) { + .Float, .ComptimeFloat => true, + else => false, + }; + if (lhs_is_float and rhs_is_float) { + // Implicit cast the smaller one to the larger one. + const dest_type = x: { + if (lhs_ty_tag == .ComptimeFloat) { + break :x rhs.ty; + } else if (rhs_ty_tag == .ComptimeFloat) { + break :x lhs.ty; + } + if (lhs.ty.floatBits(self.target) >= rhs.ty.floatBits(self.target)) { + break :x lhs.ty; + } else { + break :x rhs.ty; + } + }; + const casted_lhs = try self.coerce(block, dest_type, lhs); + const casted_rhs = try self.coerce(block, dest_type, rhs); + return self.addNewInstArgs(b, src, dest_type, Inst.Cmp, Inst.Args(Inst.Cmp){ + .lhs = casted_lhs, + .rhs = casted_rhs, + .op = op, + }); + } + // For mixed unsigned integer sizes, implicit cast both operands to the larger integer. + // For mixed signed and unsigned integers, implicit cast both operands to a signed + // integer with + 1 bit. + // For mixed floats and integers, extract the integer part from the float, cast that to + // a signed integer with mantissa bits + 1, and if there was any non-integral part of the float, + // add/subtract 1. + const lhs_is_signed = if (lhs.value()) |lhs_val| + lhs_val.compareWithZero(.lt) + else + (lhs.ty.isFloat() or lhs.ty.isSignedInt()); + const rhs_is_signed = if (rhs.value()) |rhs_val| + rhs_val.compareWithZero(.lt) + else + (rhs.ty.isFloat() or rhs.ty.isSignedInt()); + const dest_int_is_signed = lhs_is_signed or rhs_is_signed; + + var dest_float_type: ?Type = null; + + var lhs_bits: usize = undefined; + if (lhs.value()) |lhs_val| { + if (lhs_val.isUndef()) + return self.constUndef(src, Type.initTag(.bool)); + const is_unsigned = if (lhs_is_float) x: { + var bigint_space: Value.BigIntSpace = undefined; + var bigint = try lhs_val.toBigInt(&bigint_space).toManaged(self.allocator); + defer bigint.deinit(); + const zcmp = lhs_val.orderAgainstZero(); + if (lhs_val.floatHasFraction()) { + switch (op) { + .eq => return self.constBool(src, false), + .neq => return self.constBool(src, true), + else => {}, + } + if (zcmp == .lt) { + try bigint.addScalar(bigint.toConst(), -1); + } else { + try bigint.addScalar(bigint.toConst(), 1); + } + } + lhs_bits = bigint.toConst().bitCountTwosComp(); + break :x (zcmp != .lt); + } else x: { + lhs_bits = lhs_val.intBitCountTwosComp(); + break :x (lhs_val.orderAgainstZero() != .lt); + }; + lhs_bits += @boolToInt(is_unsigned and dest_int_is_signed); + } else if (lhs_is_float) { + dest_float_type = lhs.ty; + } else { + const int_info = lhs.ty.intInfo(self.target); + lhs_bits = int_info.bits + @boolToInt(!int_info.signed and dest_int_is_signed); + } + + var rhs_bits: usize = undefined; + if (rhs.value()) |rhs_val| { + if (rhs_val.isUndef()) + return self.constUndef(src, Type.initTag(.bool)); + const is_unsigned = if (rhs_is_float) x: { + var bigint_space: Value.BigIntSpace = undefined; + var bigint = try rhs_val.toBigInt(&bigint_space).toManaged(self.allocator); + defer bigint.deinit(); + const zcmp = rhs_val.orderAgainstZero(); + if (rhs_val.floatHasFraction()) { + switch (op) { + .eq => return self.constBool(src, false), + .neq => return self.constBool(src, true), + else => {}, + } + if (zcmp == .lt) { + try bigint.addScalar(bigint.toConst(), -1); + } else { + try bigint.addScalar(bigint.toConst(), 1); + } + } + rhs_bits = bigint.toConst().bitCountTwosComp(); + break :x (zcmp != .lt); + } else x: { + rhs_bits = rhs_val.intBitCountTwosComp(); + break :x (rhs_val.orderAgainstZero() != .lt); + }; + rhs_bits += @boolToInt(is_unsigned and dest_int_is_signed); + } else if (rhs_is_float) { + dest_float_type = rhs.ty; + } else { + const int_info = rhs.ty.intInfo(self.target); + rhs_bits = int_info.bits + @boolToInt(!int_info.signed and dest_int_is_signed); + } + + const dest_type = if (dest_float_type) |ft| ft else blk: { + const max_bits = std.math.max(lhs_bits, rhs_bits); + const casted_bits = std.math.cast(u16, max_bits) catch |err| switch (err) { + error.Overflow => return self.fail(src, "{} exceeds maximum integer bit count", .{max_bits}), + }; + break :blk try self.makeIntType(dest_int_is_signed, casted_bits); + }; + const casted_lhs = try self.coerce(block, dest_type, lhs); + const casted_rhs = try self.coerce(block, dest_type, lhs); + + return self.addNewInstArgs(b, src, dest_type, Inst.Cmp, Inst.Args(Inst.Cmp){ + .lhs = casted_lhs, + .rhs = casted_rhs, + .op = op, + }); + } + + fn makeIntType(self: *Analyze, signed: bool, bits: u16) !Type { + if (signed) { + const int_payload = try self.arena.allocator.create(Type.Payload.IntSigned); + int_payload.* = .{ .bits = bits }; + return Type.initPayload(&int_payload.base); + } else { + const int_payload = try self.arena.allocator.create(Type.Payload.IntUnsigned); + int_payload.* = .{ .bits = bits }; + return Type.initPayload(&int_payload.base); + } + } + + fn coerce(self: *Analyze, block: ?*Block, dest_type: Type, inst: *Inst) !*Inst { // If the types are the same, we can return the operand. if (dest_type.eql(inst.ty)) return inst; const in_memory_result = coerceInMemoryAllowed(dest_type, inst.ty); if (in_memory_result == .ok) { - return self.bitcast(func, dest_type, inst); + return self.bitcast(block, dest_type, inst); } // *[N]T to []T @@ -735,14 +1236,14 @@ const Analyze = struct { return self.fail(inst.src, "TODO implement type coercion from {} to {}", .{ inst.ty, dest_type }); } - fn bitcast(self: *Analyze, func: ?*Fn, dest_type: Type, inst: *Inst) !*Inst { + fn bitcast(self: *Analyze, block: ?*Block, dest_type: Type, inst: *Inst) !*Inst { if (inst.value()) |val| { // Keep the comptime Value representation; take the new type. return self.constInst(inst.src, .{ .ty = dest_type, .val = val }); } // TODO validate the type size and other compile errors - const f = try self.requireFunctionBody(func, inst.src); - return self.addNewInstArgs(f, inst.src, dest_type, Inst.BitCast, Inst.Args(Inst.BitCast){ .operand = inst }); + const b = try self.requireRuntimeBlock(block, inst.src); + return self.addNewInstArgs(b, inst.src, dest_type, Inst.BitCast, Inst.Args(Inst.BitCast){ .operand = inst }); } fn coerceArrayPtrToSlice(self: *Analyze, dest_type: Type, inst: *Inst) !*Inst { @@ -784,18 +1285,20 @@ pub fn main() anyerror!void { const allocator = if (std.builtin.link_libc) std.heap.c_allocator else &arena.allocator; const args = try std.process.argsAlloc(allocator); + defer std.process.argsFree(allocator, args); const src_path = args[1]; const debug_error_trace = true; const source = try std.fs.cwd().readFileAllocOptions(allocator, src_path, std.math.maxInt(u32), 1, 0); + defer allocator.free(source); var zir_module = try text.parse(allocator, source); defer zir_module.deinit(allocator); if (zir_module.errors.len != 0) { for (zir_module.errors) |err_msg| { - const loc = findLineColumn(source, err_msg.byte_offset); + const loc = std.zig.findLineColumn(source, err_msg.byte_offset); std.debug.warn("{}:{}:{}: error: {}\n", .{ src_path, loc.line + 1, loc.column + 1, err_msg.msg }); } if (debug_error_trace) return error.ParseFailure; @@ -804,15 +1307,20 @@ pub fn main() anyerror!void { const native_info = try std.zig.system.NativeTargetInfo.detect(allocator, .{}); - var analyzed_module = try analyze(allocator, zir_module, native_info.target); + var analyzed_module = try analyze(allocator, zir_module, .{ + .target = native_info.target, + .output_mode = .Obj, + .link_mode = .Static, + .optimize_mode = .Debug, + }); defer analyzed_module.deinit(allocator); if (analyzed_module.errors.len != 0) { for (analyzed_module.errors) |err_msg| { - const loc = findLineColumn(source, err_msg.byte_offset); + const loc = std.zig.findLineColumn(source, err_msg.byte_offset); std.debug.warn("{}:{}:{}: error: {}\n", .{ src_path, loc.line + 1, loc.column + 1, err_msg.msg }); } - if (debug_error_trace) return error.ParseFailure; + if (debug_error_trace) return error.AnalysisFail; std.process.exit(1); } @@ -827,34 +1335,17 @@ pub fn main() anyerror!void { } const link = @import("link.zig"); - var result = try link.updateExecutableFilePath(allocator, analyzed_module, std.fs.cwd(), "a.out"); + var result = try link.updateFilePath(allocator, analyzed_module, std.fs.cwd(), "zir.o"); defer result.deinit(allocator); if (result.errors.len != 0) { for (result.errors) |err_msg| { - const loc = findLineColumn(source, err_msg.byte_offset); + const loc = std.zig.findLineColumn(source, err_msg.byte_offset); std.debug.warn("{}:{}:{}: error: {}\n", .{ src_path, loc.line + 1, loc.column + 1, err_msg.msg }); } - if (debug_error_trace) return error.ParseFailure; + if (debug_error_trace) return error.LinkFailure; std.process.exit(1); } } -fn findLineColumn(source: []const u8, byte_offset: usize) struct { line: usize, column: usize } { - var line: usize = 0; - var column: usize = 0; - for (source[0..byte_offset]) |byte| { - switch (byte) { - '\n' => { - line += 1; - column = 0; - }, - else => { - column += 1; - }, - } - } - return .{ .line = line, .column = column }; -} - // Performance optimization ideas: // * when analyzing use a field in the Inst instead of HashMap to track corresponding instructions diff --git a/src-self-hosted/ir/text.zig b/src-self-hosted/ir/text.zig index 5d0b49b89e..e1efb40fe5 100644 --- a/src-self-hosted/ir/text.zig +++ b/src-self-hosted/ir/text.zig @@ -4,7 +4,8 @@ const std = @import("std"); const mem = std.mem; const Allocator = std.mem.Allocator; const assert = std.debug.assert; -const BigInt = std.math.big.Int; +const BigIntConst = std.math.big.int.Const; +const BigIntMutable = std.math.big.int.Mutable; const Type = @import("../type.zig").Type; const Value = @import("../value.zig").Value; const ir = @import("../ir.zig"); @@ -18,6 +19,7 @@ pub const Inst = struct { /// These names are used directly as the instruction names in the text format. pub const Tag = enum { + breakpoint, str, int, ptrtoint, @@ -26,6 +28,7 @@ pub const Inst = struct { as, @"asm", @"unreachable", + @"return", @"fn", @"export", primitive, @@ -34,10 +37,15 @@ pub const Inst = struct { bitcast, elemptr, add, + cmp, + condbr, + isnull, + isnonnull, }; pub fn TagToType(tag: Tag) type { return switch (tag) { + .breakpoint => Breakpoint, .str => Str, .int => Int, .ptrtoint => PtrToInt, @@ -46,6 +54,7 @@ pub const Inst = struct { .as => As, .@"asm" => Asm, .@"unreachable" => Unreachable, + .@"return" => Return, .@"fn" => Fn, .@"export" => Export, .primitive => Primitive, @@ -54,6 +63,10 @@ pub const Inst = struct { .bitcast => BitCast, .elemptr => ElemPtr, .add => Add, + .cmp => Cmp, + .condbr => CondBr, + .isnull => IsNull, + .isnonnull => IsNonNull, }; } @@ -64,6 +77,14 @@ pub const Inst = struct { return @fieldParentPtr(T, "base", base); } + pub const Breakpoint = struct { + pub const base_tag = Tag.breakpoint; + base: Inst, + + positionals: struct {}, + kw_args: struct {}, + }; + pub const Str = struct { pub const base_tag = Tag.str; base: Inst, @@ -79,7 +100,7 @@ pub const Inst = struct { base: Inst, positionals: struct { - int: BigInt, + int: BigIntConst, }, kw_args: struct {}, }; @@ -151,19 +172,23 @@ pub const Inst = struct { kw_args: struct {}, }; + pub const Return = struct { + pub const base_tag = Tag.@"return"; + base: Inst, + + positionals: struct {}, + kw_args: struct {}, + }; + pub const Fn = struct { pub const base_tag = Tag.@"fn"; base: Inst, positionals: struct { fn_type: *Inst, - body: Body, + body: Module.Body, }, kw_args: struct {}, - - pub const Body = struct { - instructions: []*Inst, - }; }; pub const Export = struct { @@ -297,6 +322,50 @@ pub const Inst = struct { }, kw_args: struct {}, }; + + pub const Cmp = struct { + pub const base_tag = Tag.cmp; + base: Inst, + + positionals: struct { + lhs: *Inst, + op: std.math.CompareOperator, + rhs: *Inst, + }, + kw_args: struct {}, + }; + + pub const CondBr = struct { + pub const base_tag = Tag.condbr; + base: Inst, + + positionals: struct { + condition: *Inst, + true_body: Module.Body, + false_body: Module.Body, + }, + kw_args: struct {}, + }; + + pub const IsNull = struct { + pub const base_tag = Tag.isnull; + base: Inst, + + positionals: struct { + operand: *Inst, + }, + kw_args: struct {}, + }; + + pub const IsNonNull = struct { + pub const base_tag = Tag.isnonnull; + base: Inst, + + positionals: struct { + operand: *Inst, + }, + kw_args: struct {}, + }; }; pub const ErrorMsg = struct { @@ -309,6 +378,10 @@ pub const Module = struct { errors: []ErrorMsg, arena: std.heap.ArenaAllocator, + pub const Body = struct { + instructions: []*Inst, + }; + pub fn deinit(self: *Module, allocator: *Allocator) void { allocator.free(self.decls); allocator.free(self.errors); @@ -321,7 +394,7 @@ pub const Module = struct { self.writeToStream(std.heap.page_allocator, std.io.getStdErr().outStream()) catch {}; } - const InstPtrTable = std.AutoHashMap(*Inst, struct { index: usize, fn_body: ?*Inst.Fn.Body }); + const InstPtrTable = std.AutoHashMap(*Inst, struct { index: usize, fn_body: ?*Module.Body }); /// The allocator is used for temporary storage, but this function always returns /// with no resources allocated. @@ -357,6 +430,7 @@ pub const Module = struct { ) @TypeOf(stream).Error!void { // TODO I tried implementing this with an inline for loop and hit a compiler bug switch (decl.tag) { + .breakpoint => return self.writeInstToStreamGeneric(stream, .breakpoint, decl, inst_table), .str => return self.writeInstToStreamGeneric(stream, .str, decl, inst_table), .int => return self.writeInstToStreamGeneric(stream, .int, decl, inst_table), .ptrtoint => return self.writeInstToStreamGeneric(stream, .ptrtoint, decl, inst_table), @@ -365,6 +439,7 @@ pub const Module = struct { .as => return self.writeInstToStreamGeneric(stream, .as, decl, inst_table), .@"asm" => return self.writeInstToStreamGeneric(stream, .@"asm", decl, inst_table), .@"unreachable" => return self.writeInstToStreamGeneric(stream, .@"unreachable", decl, inst_table), + .@"return" => return self.writeInstToStreamGeneric(stream, .@"return", decl, inst_table), .@"fn" => return self.writeInstToStreamGeneric(stream, .@"fn", decl, inst_table), .@"export" => return self.writeInstToStreamGeneric(stream, .@"export", decl, inst_table), .primitive => return self.writeInstToStreamGeneric(stream, .primitive, decl, inst_table), @@ -373,6 +448,10 @@ pub const Module = struct { .bitcast => return self.writeInstToStreamGeneric(stream, .bitcast, decl, inst_table), .elemptr => return self.writeInstToStreamGeneric(stream, .elemptr, decl, inst_table), .add => return self.writeInstToStreamGeneric(stream, .add, decl, inst_table), + .cmp => return self.writeInstToStreamGeneric(stream, .cmp, decl, inst_table), + .condbr => return self.writeInstToStreamGeneric(stream, .condbr, decl, inst_table), + .isnull => return self.writeInstToStreamGeneric(stream, .isnull, decl, inst_table), + .isnonnull => return self.writeInstToStreamGeneric(stream, .isnonnull, decl, inst_table), } } @@ -432,7 +511,7 @@ pub const Module = struct { } try stream.writeByte(']'); }, - Inst.Fn.Body => { + Module.Body => { try stream.writeAll("{\n"); for (param.instructions) |inst, i| { try stream.print(" %{} ", .{i}); @@ -443,7 +522,7 @@ pub const Module = struct { }, bool => return stream.writeByte("01"[@boolToInt(param)]), []u8, []const u8 => return std.zig.renderStringLiteral(param, stream), - BigInt => return stream.print("{}", .{param}), + BigIntConst => return stream.print("{}", .{param}), else => |T| @compileError("unimplemented: rendering parameter of type " ++ @typeName(T)), } } @@ -497,7 +576,7 @@ const Parser = struct { name_map: std.StringHashMap(usize), }; - fn parseBody(self: *Parser) !Inst.Fn.Body { + fn parseBody(self: *Parser) !Module.Body { var body_context = Body{ .instructions = std.ArrayList(*Inst).init(self.allocator), .name_map = std.StringHashMap(usize).init(self.allocator), @@ -532,9 +611,10 @@ const Parser = struct { else => |byte| return self.failByte(byte), }; - return Inst.Fn.Body{ - .instructions = body_context.instructions.toOwnedSlice(), - }; + // Move the instructions to the arena + const instrs = try self.arena.allocator.alloc(*Inst, body_context.instructions.items.len); + mem.copy(*Inst, instrs, body_context.instructions.items); + return Module.Body{ .instructions = instrs }; } fn parseStringLiteral(self: *Parser) ![]u8 { @@ -565,7 +645,7 @@ const Parser = struct { }; } - fn parseIntegerLiteral(self: *Parser) !BigInt { + fn parseIntegerLiteral(self: *Parser) !BigIntConst { const start = self.i; if (self.source[self.i] == '-') self.i += 1; while (true) : (self.i += 1) switch (self.source[self.i]) { @@ -573,41 +653,46 @@ const Parser = struct { else => break, }; const number_text = self.source[start..self.i]; - var result = try BigInt.init(&self.arena.allocator); - result.setString(10, number_text) catch |err| { - self.i = start; - switch (err) { - error.InvalidBase => unreachable, - error.InvalidCharForDigit => return self.fail("invalid digit in integer literal", .{}), - error.DigitTooLargeForBase => return self.fail("digit too large in integer literal", .{}), - else => |e| return e, - } + const base = 10; + // TODO reuse the same array list for this + const limbs_buffer_len = std.math.big.int.calcSetStringLimbsBufferLen(base, number_text.len); + const limbs_buffer = try self.allocator.alloc(std.math.big.Limb, limbs_buffer_len); + defer self.allocator.free(limbs_buffer); + const limb_len = std.math.big.int.calcSetStringLimbCount(base, number_text.len); + const limbs = try self.arena.allocator.alloc(std.math.big.Limb, limb_len); + var result = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined }; + result.setString(base, number_text, limbs_buffer, self.allocator) catch |err| switch (err) { + error.InvalidCharacter => { + self.i = start; + return self.fail("invalid digit in integer literal", .{}); + }, }; - return result; + return result.toConst(); } fn parseRoot(self: *Parser) !void { // The IR format is designed so that it can be tokenized and parsed at the same time. - while (true) : (self.i += 1) switch (self.source[self.i]) { - ';' => _ = try skipToAndOver(self, '\n'), - '@' => { - self.i += 1; - const ident = try skipToAndOver(self, ' '); - skipSpace(self); - try requireEatBytes(self, "="); - skipSpace(self); - const inst = try parseInstruction(self, null); - const ident_index = self.decls.items.len; - if (try self.global_name_map.put(ident, ident_index)) |_| { - return self.fail("redefinition of identifier '{}'", .{ident}); - } - try self.decls.append(inst); - continue; - }, - ' ', '\n' => continue, - 0 => break, - else => |byte| return self.fail("unexpected byte: '{c}'", .{byte}), - }; + while (true) { + switch (self.source[self.i]) { + ';' => _ = try skipToAndOver(self, '\n'), + '@' => { + self.i += 1; + const ident = try skipToAndOver(self, ' '); + skipSpace(self); + try requireEatBytes(self, "="); + skipSpace(self); + const inst = try parseInstruction(self, null); + const ident_index = self.decls.items.len; + if (try self.global_name_map.put(ident, ident_index)) |_| { + return self.fail("redefinition of identifier '{}'", .{ident}); + } + try self.decls.append(inst); + }, + ' ', '\n' => self.i += 1, + 0 => break, + else => |byte| return self.fail("unexpected byte: '{c}'", .{byte}), + } + } } fn eatByte(self: *Parser, byte: u8) bool { @@ -752,7 +837,7 @@ const Parser = struct { }; } switch (T) { - Inst.Fn.Body => return parseBody(self), + Module.Body => return parseBody(self), bool => { const bool_value = switch (self.source[self.i]) { '0' => false, @@ -779,7 +864,7 @@ const Parser = struct { }, *Inst => return parseParameterInst(self, body_ctx), []u8, []const u8 => return self.parseStringLiteral(), - BigInt => return self.parseIntegerLiteral(), + BigIntConst => return self.parseIntegerLiteral(), else => @compileError("Unimplemented: ir parseParameterGeneric for type " ++ @typeName(T)), } return self.fail("TODO parse parameter {}", .{@typeName(T)}); @@ -878,11 +963,12 @@ const EmitZIR = struct { } fn emitComptimeIntVal(self: *EmitZIR, src: usize, val: Value) !*Inst { + const big_int_space = try self.arena.allocator.create(Value.BigIntSpace); const int_inst = try self.arena.allocator.create(Inst.Int); int_inst.* = .{ .base = .{ .src = src, .tag = Inst.Int.base_tag }, .positionals = .{ - .int = try val.toBigInt(&self.arena.allocator), + .int = val.toBigInt(big_int_space), }, .kw_args = .{}, }; @@ -937,96 +1023,19 @@ const EmitZIR = struct { var instructions = std.ArrayList(*Inst).init(self.allocator); defer instructions.deinit(); - for (module_fn.body) |inst| { - const new_inst = switch (inst.tag) { - .unreach => blk: { - const unreach_inst = try self.arena.allocator.create(Inst.Unreachable); - unreach_inst.* = .{ - .base = .{ .src = inst.src, .tag = Inst.Unreachable.base_tag }, - .positionals = .{}, - .kw_args = .{}, - }; - break :blk &unreach_inst.base; - }, - .constant => unreachable, // excluded from function bodies - .assembly => blk: { - const old_inst = inst.cast(ir.Inst.Assembly).?; - const new_inst = try self.arena.allocator.create(Inst.Asm); - - const inputs = try self.arena.allocator.alloc(*Inst, old_inst.args.inputs.len); - for (inputs) |*elem, i| { - elem.* = try self.emitStringLiteral(inst.src, old_inst.args.inputs[i]); - } - - const clobbers = try self.arena.allocator.alloc(*Inst, old_inst.args.clobbers.len); - for (clobbers) |*elem, i| { - elem.* = try self.emitStringLiteral(inst.src, old_inst.args.clobbers[i]); - } - - const args = try self.arena.allocator.alloc(*Inst, old_inst.args.args.len); - for (args) |*elem, i| { - elem.* = try self.resolveInst(&inst_table, old_inst.args.args[i]); - } - - new_inst.* = .{ - .base = .{ .src = inst.src, .tag = Inst.Asm.base_tag }, - .positionals = .{ - .asm_source = try self.emitStringLiteral(inst.src, old_inst.args.asm_source), - .return_type = try self.emitType(inst.src, inst.ty), - }, - .kw_args = .{ - .@"volatile" = old_inst.args.is_volatile, - .output = if (old_inst.args.output) |o| - try self.emitStringLiteral(inst.src, o) - else - null, - .inputs = inputs, - .clobbers = clobbers, - .args = args, - }, - }; - break :blk &new_inst.base; - }, - .ptrtoint => blk: { - const old_inst = inst.cast(ir.Inst.PtrToInt).?; - const new_inst = try self.arena.allocator.create(Inst.PtrToInt); - new_inst.* = .{ - .base = .{ .src = inst.src, .tag = Inst.PtrToInt.base_tag }, - .positionals = .{ - .ptr = try self.resolveInst(&inst_table, old_inst.args.ptr), - }, - .kw_args = .{}, - }; - break :blk &new_inst.base; - }, - .bitcast => blk: { - const old_inst = inst.cast(ir.Inst.BitCast).?; - const new_inst = try self.arena.allocator.create(Inst.BitCast); - new_inst.* = .{ - .base = .{ .src = inst.src, .tag = Inst.BitCast.base_tag }, - .positionals = .{ - .dest_type = try self.emitType(inst.src, inst.ty), - .operand = try self.resolveInst(&inst_table, old_inst.args.operand), - }, - .kw_args = .{}, - }; - break :blk &new_inst.base; - }, - }; - try instructions.append(new_inst); - try inst_table.putNoClobber(inst, new_inst); - } + try self.emitBody(module_fn.body, &inst_table, &instructions); const fn_type = try self.emitType(src, module_fn.fn_type); + const arena_instrs = try self.arena.allocator.alloc(*Inst, instructions.items.len); + mem.copy(*Inst, arena_instrs, instructions.items); + const fn_inst = try self.arena.allocator.create(Inst.Fn); fn_inst.* = .{ .base = .{ .src = src, .tag = Inst.Fn.base_tag }, .positionals = .{ .fn_type = fn_type, - .body = .{ - .instructions = instructions.toOwnedSlice(), - }, + .body = .{ .instructions = arena_instrs }, }, .kw_args = .{}, }; @@ -1037,6 +1046,159 @@ const EmitZIR = struct { } } + fn emitTrivial(self: *EmitZIR, src: usize, comptime T: type) Allocator.Error!*Inst { + const new_inst = try self.arena.allocator.create(T); + new_inst.* = .{ + .base = .{ .src = src, .tag = T.base_tag }, + .positionals = .{}, + .kw_args = .{}, + }; + return &new_inst.base; + } + + fn emitBody( + self: *EmitZIR, + body: ir.Module.Body, + inst_table: *std.AutoHashMap(*ir.Inst, *Inst), + instructions: *std.ArrayList(*Inst), + ) Allocator.Error!void { + for (body.instructions) |inst| { + const new_inst = switch (inst.tag) { + .breakpoint => try self.emitTrivial(inst.src, Inst.Breakpoint), + .unreach => try self.emitTrivial(inst.src, Inst.Unreachable), + .ret => try self.emitTrivial(inst.src, Inst.Return), + .constant => unreachable, // excluded from function bodies + .assembly => blk: { + const old_inst = inst.cast(ir.Inst.Assembly).?; + const new_inst = try self.arena.allocator.create(Inst.Asm); + + const inputs = try self.arena.allocator.alloc(*Inst, old_inst.args.inputs.len); + for (inputs) |*elem, i| { + elem.* = try self.emitStringLiteral(inst.src, old_inst.args.inputs[i]); + } + + const clobbers = try self.arena.allocator.alloc(*Inst, old_inst.args.clobbers.len); + for (clobbers) |*elem, i| { + elem.* = try self.emitStringLiteral(inst.src, old_inst.args.clobbers[i]); + } + + const args = try self.arena.allocator.alloc(*Inst, old_inst.args.args.len); + for (args) |*elem, i| { + elem.* = try self.resolveInst(inst_table, old_inst.args.args[i]); + } + + new_inst.* = .{ + .base = .{ .src = inst.src, .tag = Inst.Asm.base_tag }, + .positionals = .{ + .asm_source = try self.emitStringLiteral(inst.src, old_inst.args.asm_source), + .return_type = try self.emitType(inst.src, inst.ty), + }, + .kw_args = .{ + .@"volatile" = old_inst.args.is_volatile, + .output = if (old_inst.args.output) |o| + try self.emitStringLiteral(inst.src, o) + else + null, + .inputs = inputs, + .clobbers = clobbers, + .args = args, + }, + }; + break :blk &new_inst.base; + }, + .ptrtoint => blk: { + const old_inst = inst.cast(ir.Inst.PtrToInt).?; + const new_inst = try self.arena.allocator.create(Inst.PtrToInt); + new_inst.* = .{ + .base = .{ .src = inst.src, .tag = Inst.PtrToInt.base_tag }, + .positionals = .{ + .ptr = try self.resolveInst(inst_table, old_inst.args.ptr), + }, + .kw_args = .{}, + }; + break :blk &new_inst.base; + }, + .bitcast => blk: { + const old_inst = inst.cast(ir.Inst.BitCast).?; + const new_inst = try self.arena.allocator.create(Inst.BitCast); + new_inst.* = .{ + .base = .{ .src = inst.src, .tag = Inst.BitCast.base_tag }, + .positionals = .{ + .dest_type = try self.emitType(inst.src, inst.ty), + .operand = try self.resolveInst(inst_table, old_inst.args.operand), + }, + .kw_args = .{}, + }; + break :blk &new_inst.base; + }, + .cmp => blk: { + const old_inst = inst.cast(ir.Inst.Cmp).?; + const new_inst = try self.arena.allocator.create(Inst.Cmp); + new_inst.* = .{ + .base = .{ .src = inst.src, .tag = Inst.Cmp.base_tag }, + .positionals = .{ + .lhs = try self.resolveInst(inst_table, old_inst.args.lhs), + .rhs = try self.resolveInst(inst_table, old_inst.args.rhs), + .op = old_inst.args.op, + }, + .kw_args = .{}, + }; + break :blk &new_inst.base; + }, + .condbr => blk: { + const old_inst = inst.cast(ir.Inst.CondBr).?; + + var true_body = std.ArrayList(*Inst).init(self.allocator); + var false_body = std.ArrayList(*Inst).init(self.allocator); + + defer true_body.deinit(); + defer false_body.deinit(); + + try self.emitBody(old_inst.args.true_body, inst_table, &true_body); + try self.emitBody(old_inst.args.false_body, inst_table, &false_body); + + const new_inst = try self.arena.allocator.create(Inst.CondBr); + new_inst.* = .{ + .base = .{ .src = inst.src, .tag = Inst.CondBr.base_tag }, + .positionals = .{ + .condition = try self.resolveInst(inst_table, old_inst.args.condition), + .true_body = .{ .instructions = true_body.toOwnedSlice() }, + .false_body = .{ .instructions = false_body.toOwnedSlice() }, + }, + .kw_args = .{}, + }; + break :blk &new_inst.base; + }, + .isnull => blk: { + const old_inst = inst.cast(ir.Inst.IsNull).?; + const new_inst = try self.arena.allocator.create(Inst.IsNull); + new_inst.* = .{ + .base = .{ .src = inst.src, .tag = Inst.IsNull.base_tag }, + .positionals = .{ + .operand = try self.resolveInst(inst_table, old_inst.args.operand), + }, + .kw_args = .{}, + }; + break :blk &new_inst.base; + }, + .isnonnull => blk: { + const old_inst = inst.cast(ir.Inst.IsNonNull).?; + const new_inst = try self.arena.allocator.create(Inst.IsNonNull); + new_inst.* = .{ + .base = .{ .src = inst.src, .tag = Inst.IsNonNull.base_tag }, + .positionals = .{ + .operand = try self.resolveInst(inst_table, old_inst.args.operand), + }, + .kw_args = .{}, + }; + break :blk &new_inst.base; + }, + }; + try instructions.append(new_inst); + try inst_table.putNoClobber(inst, new_inst); + } + } + fn emitType(self: *EmitZIR, src: usize, ty: Type) Allocator.Error!*Inst { switch (ty.tag()) { .isize => return self.emitPrimitiveType(src, .isize), diff --git a/src-self-hosted/link.zig b/src-self-hosted/link.zig index cb6aa40afe..504c374ca7 100644 --- a/src-self-hosted/link.zig +++ b/src-self-hosted/link.zig @@ -7,11 +7,6 @@ const fs = std.fs; const elf = std.elf; const codegen = @import("codegen.zig"); -/// On common systems with a 0o022 umask, 0o777 will still result in a file created -/// with 0o755 permissions, but it works appropriately if the system is configured -/// more leniently. As another data point, C's fopen seems to open files with the -/// 666 mode. -const executable_mode = 0o777; const default_entry_addr = 0x8000000; pub const ErrorMsg = struct { @@ -35,29 +30,29 @@ pub const Result = struct { /// If incremental linking fails, falls back to truncating the file and rewriting it. /// A malicious file is detected as incremental link failure and does not cause Illegal Behavior. /// This operation is not atomic. -pub fn updateExecutableFilePath( +pub fn updateFilePath( allocator: *Allocator, module: ir.Module, dir: fs.Dir, sub_path: []const u8, ) !Result { - const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = executable_mode }); + const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = determineMode(module) }); defer file.close(); - return updateExecutableFile(allocator, module, file); + return updateFile(allocator, module, file); } /// Atomically overwrites the old file, if present. -pub fn writeExecutableFilePath( +pub fn writeFilePath( allocator: *Allocator, module: ir.Module, dir: fs.Dir, sub_path: []const u8, ) !Result { - const af = try dir.atomicFile(sub_path, .{ .mode = executable_mode }); + const af = try dir.atomicFile(sub_path, .{ .mode = determineMode(module) }); defer af.deinit(); - const result = try writeExecutableFile(allocator, module, af.file); + const result = try writeFile(allocator, module, af.file); try af.finish(); return result; } @@ -67,10 +62,10 @@ pub fn writeExecutableFilePath( /// Returns an error if `file` is not already open with +read +write +seek abilities. /// A malicious file is detected as incremental link failure and does not cause Illegal Behavior. /// This operation is not atomic. -pub fn updateExecutableFile(allocator: *Allocator, module: ir.Module, file: fs.File) !Result { - return updateExecutableFileInner(allocator, module, file) catch |err| switch (err) { +pub fn updateFile(allocator: *Allocator, module: ir.Module, file: fs.File) !Result { + return updateFileInner(allocator, module, file) catch |err| switch (err) { error.IncrFailed => { - return writeExecutableFile(allocator, module, file); + return writeFile(allocator, module, file); }, else => |e| return e, }; @@ -436,7 +431,7 @@ const Update = struct { }, } } - if (self.entry_addr == null) { + if (self.entry_addr == null and self.module.output_mode == .Exe) { const msg = try std.fmt.allocPrint(self.errors.allocator, "no entry point found", .{}); errdefer self.errors.allocator.free(msg); try self.errors.append(.{ @@ -485,7 +480,15 @@ const Update = struct { assert(index == 16); - mem.writeInt(u16, hdr_buf[index..][0..2], @enumToInt(elf.ET.EXEC), endian); + const elf_type = switch (self.module.output_mode) { + .Exe => elf.ET.EXEC, + .Obj => elf.ET.REL, + .Lib => switch (self.module.link_mode) { + .Static => elf.ET.REL, + .Dynamic => elf.ET.DYN, + }, + }; + mem.writeInt(u16, hdr_buf[index..][0..2], @enumToInt(elf_type), endian); index += 2; const machine = self.module.target.cpu.arch.toElfMachine(); @@ -496,10 +499,11 @@ const Update = struct { mem.writeInt(u32, hdr_buf[index..][0..4], 1, endian); index += 4; + const e_entry = if (elf_type == .REL) 0 else self.entry_addr.?; + switch (ptr_width) { .p32 => { - // e_entry - mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, self.entry_addr.?), endian); + mem.writeInt(u32, hdr_buf[index..][0..4], @intCast(u32, e_entry), endian); index += 4; // e_phoff @@ -512,7 +516,7 @@ const Update = struct { }, .p64 => { // e_entry - mem.writeInt(u64, hdr_buf[index..][0..8], self.entry_addr.?, endian); + mem.writeInt(u64, hdr_buf[index..][0..8], e_entry, endian); index += 8; // e_phoff @@ -750,7 +754,20 @@ const Update = struct { /// Truncates the existing file contents and overwrites the contents. /// Returns an error if `file` is not already open with +read +write +seek abilities. -pub fn writeExecutableFile(allocator: *Allocator, module: ir.Module, file: fs.File) !Result { +pub fn writeFile(allocator: *Allocator, module: ir.Module, file: fs.File) !Result { + switch (module.output_mode) { + .Exe => {}, + .Obj => {}, + .Lib => return error.TODOImplementWritingLibFiles, + } + switch (module.object_format) { + .unknown => unreachable, // TODO remove this tag from the enum + .coff => return error.TODOImplementWritingCOFF, + .elf => {}, + .macho => return error.TODOImplementWritingMachO, + .wasm => return error.TODOImplementWritingWasmObjects, + } + var update = Update{ .file = file, .module = &module, @@ -778,7 +795,7 @@ pub fn writeExecutableFile(allocator: *Allocator, module: ir.Module, file: fs.Fi } /// Returns error.IncrFailed if incremental update could not be performed. -fn updateExecutableFileInner(allocator: *Allocator, module: ir.Module, file: fs.File) !Result { +fn updateFileInner(allocator: *Allocator, module: ir.Module, file: fs.File) !Result { //var ehdr_buf: [@sizeOf(elf.Elf64_Ehdr)]u8 = undefined; // TODO implement incremental linking @@ -822,3 +839,19 @@ fn sectHeaderTo32(shdr: elf.Elf64_Shdr) elf.Elf32_Shdr { .sh_entsize = @intCast(u32, shdr.sh_entsize), }; } + +fn determineMode(module: ir.Module) fs.File.Mode { + // On common systems with a 0o022 umask, 0o777 will still result in a file created + // with 0o755 permissions, but it works appropriately if the system is configured + // more leniently. As another data point, C's fopen seems to open files with the + // 666 mode. + const executable_mode = if (std.Target.current.os.tag == .windows) 0 else 0o777; + switch (module.output_mode) { + .Lib => return switch (module.link_mode) { + .Dynamic => executable_mode, + .Static => fs.File.default_mode, + }, + .Exe => return executable_mode, + .Obj => return fs.File.default_mode, + } +} diff --git a/src-self-hosted/test.zig b/src-self-hosted/test.zig index 9038b8128d..5868f9383d 100644 --- a/src-self-hosted/test.zig +++ b/src-self-hosted/test.zig @@ -1,237 +1,248 @@ const std = @import("std"); -const mem = std.mem; -const Target = std.Target; -const Compilation = @import("compilation.zig").Compilation; -const introspect = @import("introspect.zig"); -const testing = std.testing; -const errmsg = @import("errmsg.zig"); -const ZigCompiler = @import("compilation.zig").ZigCompiler; +const link = @import("link.zig"); +const ir = @import("ir.zig"); +const Allocator = std.mem.Allocator; -var ctx: TestContext = undefined; +var global_ctx: TestContext = undefined; -test "stage2" { - // TODO provide a way to run tests in evented I/O mode - if (!std.io.is_async) return error.SkipZigTest; +test "self-hosted" { + try global_ctx.init(); + defer global_ctx.deinit(); - // TODO https://github.com/ziglang/zig/issues/1364 - // TODO https://github.com/ziglang/zig/issues/3117 - if (true) return error.SkipZigTest; + try @import("stage2_tests").addCases(&global_ctx); - try ctx.init(); - defer ctx.deinit(); - - try @import("stage2_tests").addCases(&ctx); - - try ctx.run(); + try global_ctx.run(); } -const file1 = "1.zig"; -// TODO https://github.com/ziglang/zig/issues/3783 -const allocator = std.heap.page_allocator; - pub const TestContext = struct { - zig_compiler: ZigCompiler, - zig_lib_dir: []u8, - file_index: std.atomic.Int(usize), - group: std.event.Group(anyerror!void), - any_err: anyerror!void, + zir_cmp_output_cases: std.ArrayList(ZIRCompareOutputCase), + zir_transform_cases: std.ArrayList(ZIRTransformCase), - const tmp_dir_name = "stage2_test_tmp"; + pub const ZIRCompareOutputCase = struct { + name: []const u8, + src: [:0]const u8, + expected_stdout: []const u8, + }; + + pub const ZIRTransformCase = struct { + name: []const u8, + src: [:0]const u8, + expected_zir: []const u8, + }; + + pub fn addZIRCompareOutput( + ctx: *TestContext, + name: []const u8, + src: [:0]const u8, + expected_stdout: []const u8, + ) void { + ctx.zir_cmp_output_cases.append(.{ + .name = name, + .src = src, + .expected_stdout = expected_stdout, + }) catch unreachable; + } + + pub fn addZIRTransform( + ctx: *TestContext, + name: []const u8, + src: [:0]const u8, + expected_zir: []const u8, + ) void { + ctx.zir_transform_cases.append(.{ + .name = name, + .src = src, + .expected_zir = expected_zir, + }) catch unreachable; + } fn init(self: *TestContext) !void { - self.* = TestContext{ - .any_err = {}, - .zig_compiler = undefined, - .zig_lib_dir = undefined, - .group = undefined, - .file_index = std.atomic.Int(usize).init(0), + self.* = .{ + .zir_cmp_output_cases = std.ArrayList(ZIRCompareOutputCase).init(std.heap.page_allocator), + .zir_transform_cases = std.ArrayList(ZIRTransformCase).init(std.heap.page_allocator), }; - - self.zig_compiler = try ZigCompiler.init(allocator); - errdefer self.zig_compiler.deinit(); - - self.group = std.event.Group(anyerror!void).init(allocator); - errdefer self.group.wait() catch {}; - - self.zig_lib_dir = try introspect.resolveZigLibDir(allocator); - errdefer allocator.free(self.zig_lib_dir); - - try std.fs.cwd().makePath(tmp_dir_name); - errdefer std.fs.cwd().deleteTree(tmp_dir_name) catch {}; } fn deinit(self: *TestContext) void { - std.fs.cwd().deleteTree(tmp_dir_name) catch {}; - allocator.free(self.zig_lib_dir); - self.zig_compiler.deinit(); + self.zir_cmp_output_cases.deinit(); + self.zir_transform_cases.deinit(); + self.* = undefined; } fn run(self: *TestContext) !void { - std.event.Loop.startCpuBoundOperation(); - self.any_err = self.group.wait(); - return self.any_err; + var progress = std.Progress{}; + const root_node = try progress.start("zir", self.zir_cmp_output_cases.items.len + + self.zir_transform_cases.items.len); + defer root_node.end(); + + const native_info = try std.zig.system.NativeTargetInfo.detect(std.heap.page_allocator, .{}); + + for (self.zir_cmp_output_cases.items) |case| { + std.testing.base_allocator_instance.reset(); + try self.runOneZIRCmpOutputCase(std.testing.allocator, root_node, case, native_info.target); + try std.testing.allocator_instance.validate(); + } + for (self.zir_transform_cases.items) |case| { + std.testing.base_allocator_instance.reset(); + try self.runOneZIRTransformCase(std.testing.allocator, root_node, case, native_info.target); + try std.testing.allocator_instance.validate(); + } } - fn testCompileError( + fn runOneZIRCmpOutputCase( self: *TestContext, - source: []const u8, - path: []const u8, - line: usize, - column: usize, - msg: []const u8, + allocator: *Allocator, + root_node: *std.Progress.Node, + case: ZIRCompareOutputCase, + target: std.Target, ) !void { - var file_index_buf: [20]u8 = undefined; - const file_index = try std.fmt.bufPrint(file_index_buf[0..], "{}", .{self.file_index.incr()}); - const file1_path = try std.fs.path.join(allocator, [_][]const u8{ tmp_dir_name, file_index, file1 }); + var tmp = std.testing.tmpDir(.{ .share_with_child_process = true }); + defer tmp.cleanup(); - if (std.fs.path.dirname(file1_path)) |dirname| { - try std.fs.cwd().makePath(dirname); + var prg_node = root_node.start(case.name, 4); + prg_node.activate(); + defer prg_node.end(); + + var zir_module = x: { + var parse_node = prg_node.start("parse", null); + parse_node.activate(); + defer parse_node.end(); + + break :x try ir.text.parse(allocator, case.src); + }; + defer zir_module.deinit(allocator); + if (zir_module.errors.len != 0) { + debugPrintErrors(case.src, zir_module.errors); + return error.ParseFailure; } - try std.fs.cwd().writeFile(file1_path, source); + var analyzed_module = x: { + var analyze_node = prg_node.start("analyze", null); + analyze_node.activate(); + defer analyze_node.end(); - var comp = try Compilation.create( - &self.zig_compiler, - "test", - file1_path, - .Native, - .Obj, - .Debug, - true, // is_static - self.zig_lib_dir, - ); - errdefer comp.destroy(); + break :x try ir.analyze(allocator, zir_module, .{ + .target = target, + .output_mode = .Exe, + .link_mode = .Static, + .optimize_mode = .Debug, + }); + }; + defer analyzed_module.deinit(allocator); + if (analyzed_module.errors.len != 0) { + debugPrintErrors(case.src, analyzed_module.errors); + return error.ParseFailure; + } - comp.start(); + var link_result = x: { + var link_node = prg_node.start("link", null); + link_node.activate(); + defer link_node.end(); - try self.group.call(getModuleEvent, comp, source, path, line, column, msg); + break :x try link.updateFilePath(allocator, analyzed_module, tmp.dir, "a.out"); + }; + defer link_result.deinit(allocator); + if (link_result.errors.len != 0) { + debugPrintErrors(case.src, link_result.errors); + return error.LinkFailure; + } + + var exec_result = x: { + var exec_node = prg_node.start("execute", null); + exec_node.activate(); + defer exec_node.end(); + + break :x try std.ChildProcess.exec(.{ + .allocator = allocator, + .argv = &[_][]const u8{"./a.out"}, + .cwd_dir = tmp.dir, + }); + }; + defer allocator.free(exec_result.stdout); + defer allocator.free(exec_result.stderr); + switch (exec_result.term) { + .Exited => |code| { + if (code != 0) { + std.debug.warn("elf file exited with code {}\n", .{code}); + return error.BinaryBadExitCode; + } + }, + else => return error.BinaryCrashed, + } + std.testing.expectEqualSlices(u8, case.expected_stdout, exec_result.stdout); } - fn testCompareOutputLibC( + fn runOneZIRTransformCase( self: *TestContext, - source: []const u8, - expected_output: []const u8, + allocator: *Allocator, + root_node: *std.Progress.Node, + case: ZIRTransformCase, + target: std.Target, ) !void { - var file_index_buf: [20]u8 = undefined; - const file_index = try std.fmt.bufPrint(file_index_buf[0..], "{}", .{self.file_index.incr()}); - const file1_path = try std.fs.path.join(allocator, [_][]const u8{ tmp_dir_name, file_index, file1 }); + var prg_node = root_node.start(case.name, 4); + prg_node.activate(); + defer prg_node.end(); - const output_file = try std.fmt.allocPrint(allocator, "{}-out{}", .{ file1_path, (Target{ .Native = {} }).exeFileExt() }); - if (std.fs.path.dirname(file1_path)) |dirname| { - try std.fs.cwd().makePath(dirname); + var parse_node = prg_node.start("parse", null); + parse_node.activate(); + var zir_module = try ir.text.parse(allocator, case.src); + defer zir_module.deinit(allocator); + if (zir_module.errors.len != 0) { + debugPrintErrors(case.src, zir_module.errors); + return error.ParseFailure; } + parse_node.end(); - try std.fs.cwd().writeFile(file1_path, source); - - var comp = try Compilation.create( - &self.zig_compiler, - "test", - file1_path, - .Native, - .Exe, - .Debug, - false, - self.zig_lib_dir, - ); - errdefer comp.destroy(); - - _ = try comp.addLinkLib("c", true); - comp.link_out_file = output_file; - comp.start(); - - try self.group.call(getModuleEventSuccess, comp, output_file, expected_output); - } - - async fn getModuleEventSuccess( - comp: *Compilation, - exe_file: []const u8, - expected_output: []const u8, - ) anyerror!void { - defer comp.destroy(); - const build_event = comp.events.get(); - - switch (build_event) { - .Ok => { - const argv = [_][]const u8{exe_file}; - // TODO use event loop - const child = try std.ChildProcess.exec(.{ - .allocator = allocator, - .argv = argv, - .max_output_bytes = 1024 * 1024, - }); - switch (child.term) { - .Exited => |code| { - if (code != 0) { - return error.BadReturnCode; - } - }, - else => { - return error.Crashed; - }, - } - if (!mem.eql(u8, child.stdout, expected_output)) { - return error.OutputMismatch; - } - }, - .Error => @panic("Cannot return error: https://github.com/ziglang/zig/issues/3190"), // |err| return err, - .Fail => |msgs| { - const stderr = std.io.getStdErr(); - try stderr.write("build incorrectly failed:\n"); - for (msgs) |msg| { - defer msg.destroy(); - try msg.printToFile(stderr, .Auto); - } - }, + var analyze_node = prg_node.start("analyze", null); + analyze_node.activate(); + var analyzed_module = try ir.analyze(allocator, zir_module, .{ + .target = target, + .output_mode = .Obj, + .link_mode = .Static, + .optimize_mode = .Debug, + }); + defer analyzed_module.deinit(allocator); + if (analyzed_module.errors.len != 0) { + debugPrintErrors(case.src, analyzed_module.errors); + return error.ParseFailure; } - } + analyze_node.end(); - async fn getModuleEvent( - comp: *Compilation, - source: []const u8, - path: []const u8, - line: usize, - column: usize, - text: []const u8, - ) anyerror!void { - defer comp.destroy(); - const build_event = comp.events.get(); + var emit_node = prg_node.start("emit", null); + emit_node.activate(); + var new_zir_module = try ir.text.emit_zir(allocator, analyzed_module); + defer new_zir_module.deinit(allocator); + emit_node.end(); - switch (build_event) { - .Ok => { - @panic("build incorrectly succeeded"); - }, - .Error => |err| { - @panic("build incorrectly failed"); - }, - .Fail => |msgs| { - testing.expect(msgs.len != 0); - for (msgs) |msg| { - if (mem.endsWith(u8, msg.realpath, path) and mem.eql(u8, msg.text, text)) { - const span = msg.getSpan(); - const first_token = msg.getTree().tokens.at(span.first); - const last_token = msg.getTree().tokens.at(span.first); - const start_loc = msg.getTree().tokenLocationPtr(0, first_token); - if (start_loc.line + 1 == line and start_loc.column + 1 == column) { - return; - } - } - } - std.debug.warn("\n=====source:=======\n{}\n====expected:========\n{}:{}:{}: error: {}\n", .{ - source, - path, - line, - column, - text, - }); - std.debug.warn("\n====found:========\n", .{}); - const stderr = std.io.getStdErr(); - for (msgs) |msg| { - defer msg.destroy(); - try msg.printToFile(stderr, errmsg.Color.Auto); - } - std.debug.warn("============\n", .{}); - return error.TestFailed; - }, - } + var write_node = prg_node.start("write", null); + write_node.activate(); + var out_zir = std.ArrayList(u8).init(allocator); + defer out_zir.deinit(); + try new_zir_module.writeToStream(allocator, out_zir.outStream()); + write_node.end(); + + std.testing.expectEqualSlices(u8, case.expected_zir, out_zir.items); } }; + +fn debugPrintErrors(src: []const u8, errors: var) void { + std.debug.warn("\n", .{}); + var nl = true; + var line: usize = 1; + for (src) |byte| { + if (nl) { + std.debug.warn("{: >3}| ", .{line}); + nl = false; + } + if (byte == '\n') { + nl = true; + line += 1; + } + std.debug.warn("{c}", .{byte}); + } + std.debug.warn("\n", .{}); + for (errors) |err_msg| { + const loc = std.zig.findLineColumn(src, err_msg.byte_offset); + std.debug.warn("{}:{}: error: {}\n", .{ loc.line + 1, loc.column + 1, err_msg.msg }); + } +} diff --git a/src-self-hosted/translate_c.zig b/src-self-hosted/translate_c.zig index bda152f134..1c689e9f76 100644 --- a/src-self-hosted/translate_c.zig +++ b/src-self-hosted/translate_c.zig @@ -3913,18 +3913,20 @@ fn transCreateNodeAPInt(c: *Context, int: *const ZigClangAPSInt) !*ast.Node { }; var aps_int = int; const is_negative = ZigClangAPSInt_isSigned(int) and ZigClangAPSInt_isNegative(int); - if (is_negative) - aps_int = ZigClangAPSInt_negate(aps_int); - var big = try math.big.Int.initCapacity(c.a(), num_limbs); - if (is_negative) - big.negate(); - defer big.deinit(); + if (is_negative) aps_int = ZigClangAPSInt_negate(aps_int); + defer if (is_negative) { + ZigClangAPSInt_free(aps_int); + }; + + const limbs = try c.a().alloc(math.big.Limb, num_limbs); + defer c.a().free(limbs); + const data = ZigClangAPSInt_getRawData(aps_int); - switch (@sizeOf(std.math.big.Limb)) { + switch (@sizeOf(math.big.Limb)) { 8 => { var i: usize = 0; while (i < num_limbs) : (i += 1) { - big.limbs[i] = data[i]; + limbs[i] = data[i]; } }, 4 => { @@ -3934,23 +3936,23 @@ fn transCreateNodeAPInt(c: *Context, int: *const ZigClangAPSInt) !*ast.Node { limb_i += 2; data_i += 1; }) { - big.limbs[limb_i] = @truncate(u32, data[data_i]); - big.limbs[limb_i + 1] = @truncate(u32, data[data_i] >> 32); + limbs[limb_i] = @truncate(u32, data[data_i]); + limbs[limb_i + 1] = @truncate(u32, data[data_i] >> 32); } }, else => @compileError("unimplemented"), } - const str = big.toString(c.a(), 10, false) catch |err| switch (err) { + + const big: math.big.int.Const = .{ .limbs = limbs, .positive = !is_negative }; + const str = big.toStringAlloc(c.a(), 10, false) catch |err| switch (err) { error.OutOfMemory => return error.OutOfMemory, - else => unreachable, }; + defer c.a().free(str); const token = try appendToken(c, .IntegerLiteral, str); const node = try c.a().create(ast.Node.IntegerLiteral); node.* = .{ .token = token, }; - if (is_negative) - ZigClangAPSInt_free(aps_int); return &node.base; } diff --git a/src-self-hosted/type.zig b/src-self-hosted/type.zig index 51fe8cc685..25f726a680 100644 --- a/src-self-hosted/type.zig +++ b/src-self-hosted/type.zig @@ -20,37 +20,40 @@ pub const Type = extern union { pub fn zigTypeTag(self: Type) std.builtin.TypeId { switch (self.tag()) { - .@"u8", - .@"i8", - .@"isize", - .@"usize", - .@"c_short", - .@"c_ushort", - .@"c_int", - .@"c_uint", - .@"c_long", - .@"c_ulong", - .@"c_longlong", - .@"c_ulonglong", - .@"c_longdouble", + .u8, + .i8, + .isize, + .usize, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .int_signed, + .int_unsigned, => return .Int, - .@"f16", - .@"f32", - .@"f64", - .@"f128", + .f16, + .f32, + .f64, + .f128, => return .Float, - .@"c_void" => return .Opaque, - .@"bool" => return .Bool, - .@"void" => return .Void, - .@"type" => return .Type, - .@"anyerror" => return .ErrorSet, - .@"comptime_int" => return .ComptimeInt, - .@"comptime_float" => return .ComptimeFloat, - .@"noreturn" => return .NoReturn, + .c_void => return .Opaque, + .bool => return .Bool, + .void => return .Void, + .type => return .Type, + .anyerror => return .ErrorSet, + .comptime_int => return .ComptimeInt, + .comptime_float => return .ComptimeFloat, + .noreturn => return .NoReturn, .fn_naked_noreturn_no_args => return .Fn, + .fn_ccc_void_no_args => return .Fn, .array, .array_u8_sentinel_0 => return .Array, .single_const_pointer => return .Pointer, @@ -153,35 +156,36 @@ pub const Type = extern union { while (true) { const t = ty.tag(); switch (t) { - .@"u8", - .@"i8", - .@"isize", - .@"usize", - .@"c_short", - .@"c_ushort", - .@"c_int", - .@"c_uint", - .@"c_long", - .@"c_ulong", - .@"c_longlong", - .@"c_ulonglong", - .@"c_longdouble", - .@"c_void", - .@"f16", - .@"f32", - .@"f64", - .@"f128", - .@"bool", - .@"void", - .@"type", - .@"anyerror", - .@"comptime_int", - .@"comptime_float", - .@"noreturn", + .u8, + .i8, + .isize, + .usize, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .c_void, + .f16, + .f32, + .f64, + .f128, + .bool, + .void, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .noreturn, => return out_stream.writeAll(@tagName(t)), .const_slice_u8 => return out_stream.writeAll("[]const u8"), .fn_naked_noreturn_no_args => return out_stream.writeAll("fn() callconv(.Naked) noreturn"), + .fn_ccc_void_no_args => return out_stream.writeAll("fn() callconv(.C) void"), .single_const_pointer_to_comptime_int => return out_stream.writeAll("*const comptime_int"), .array_u8_sentinel_0 => { @@ -200,6 +204,14 @@ pub const Type = extern union { ty = payload.pointee_type; continue; }, + .int_signed => { + const payload = @fieldParentPtr(Payload.IntSigned, "base", ty.ptr_otherwise); + return out_stream.print("i{}", .{payload.bits}); + }, + .int_unsigned => { + const payload = @fieldParentPtr(Payload.IntUnsigned, "base", ty.ptr_otherwise); + return out_stream.print("u{}", .{payload.bits}); + }, } unreachable; } @@ -207,32 +219,33 @@ pub const Type = extern union { pub fn toValue(self: Type, allocator: *Allocator) Allocator.Error!Value { switch (self.tag()) { - .@"u8" => return Value.initTag(.u8_type), - .@"i8" => return Value.initTag(.i8_type), - .@"isize" => return Value.initTag(.isize_type), - .@"usize" => return Value.initTag(.usize_type), - .@"c_short" => return Value.initTag(.c_short_type), - .@"c_ushort" => return Value.initTag(.c_ushort_type), - .@"c_int" => return Value.initTag(.c_int_type), - .@"c_uint" => return Value.initTag(.c_uint_type), - .@"c_long" => return Value.initTag(.c_long_type), - .@"c_ulong" => return Value.initTag(.c_ulong_type), - .@"c_longlong" => return Value.initTag(.c_longlong_type), - .@"c_ulonglong" => return Value.initTag(.c_ulonglong_type), - .@"c_longdouble" => return Value.initTag(.c_longdouble_type), - .@"c_void" => return Value.initTag(.c_void_type), - .@"f16" => return Value.initTag(.f16_type), - .@"f32" => return Value.initTag(.f32_type), - .@"f64" => return Value.initTag(.f64_type), - .@"f128" => return Value.initTag(.f128_type), - .@"bool" => return Value.initTag(.bool_type), - .@"void" => return Value.initTag(.void_type), - .@"type" => return Value.initTag(.type_type), - .@"anyerror" => return Value.initTag(.anyerror_type), - .@"comptime_int" => return Value.initTag(.comptime_int_type), - .@"comptime_float" => return Value.initTag(.comptime_float_type), - .@"noreturn" => return Value.initTag(.noreturn_type), + .u8 => return Value.initTag(.u8_type), + .i8 => return Value.initTag(.i8_type), + .isize => return Value.initTag(.isize_type), + .usize => return Value.initTag(.usize_type), + .c_short => return Value.initTag(.c_short_type), + .c_ushort => return Value.initTag(.c_ushort_type), + .c_int => return Value.initTag(.c_int_type), + .c_uint => return Value.initTag(.c_uint_type), + .c_long => return Value.initTag(.c_long_type), + .c_ulong => return Value.initTag(.c_ulong_type), + .c_longlong => return Value.initTag(.c_longlong_type), + .c_ulonglong => return Value.initTag(.c_ulonglong_type), + .c_longdouble => return Value.initTag(.c_longdouble_type), + .c_void => return Value.initTag(.c_void_type), + .f16 => return Value.initTag(.f16_type), + .f32 => return Value.initTag(.f32_type), + .f64 => return Value.initTag(.f64_type), + .f128 => return Value.initTag(.f128_type), + .bool => return Value.initTag(.bool_type), + .void => return Value.initTag(.void_type), + .type => return Value.initTag(.type_type), + .anyerror => return Value.initTag(.anyerror_type), + .comptime_int => return Value.initTag(.comptime_int_type), + .comptime_float => return Value.initTag(.comptime_float_type), + .noreturn => return Value.initTag(.noreturn_type), .fn_naked_noreturn_no_args => return Value.initTag(.fn_naked_noreturn_no_args_type), + .fn_ccc_void_no_args => return Value.initTag(.fn_ccc_void_no_args_type), .single_const_pointer_to_comptime_int => return Value.initTag(.single_const_pointer_to_comptime_int_type), .const_slice_u8 => return Value.initTag(.const_slice_u8_type), else => { @@ -245,35 +258,38 @@ pub const Type = extern union { pub fn isSinglePointer(self: Type) bool { return switch (self.tag()) { - .@"u8", - .@"i8", - .@"isize", - .@"usize", - .@"c_short", - .@"c_ushort", - .@"c_int", - .@"c_uint", - .@"c_long", - .@"c_ulong", - .@"c_longlong", - .@"c_ulonglong", - .@"c_longdouble", - .@"f16", - .@"f32", - .@"f64", - .@"f128", - .@"c_void", - .@"bool", - .@"void", - .@"type", - .@"anyerror", - .@"comptime_int", - .@"comptime_float", - .@"noreturn", + .u8, + .i8, + .isize, + .usize, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .f16, + .f32, + .f64, + .f128, + .c_void, + .bool, + .void, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .noreturn, .array, .array_u8_sentinel_0, .const_slice_u8, .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, + .int_unsigned, + .int_signed, => false, .single_const_pointer, @@ -284,36 +300,39 @@ pub const Type = extern union { pub fn isSlice(self: Type) bool { return switch (self.tag()) { - .@"u8", - .@"i8", - .@"isize", - .@"usize", - .@"c_short", - .@"c_ushort", - .@"c_int", - .@"c_uint", - .@"c_long", - .@"c_ulong", - .@"c_longlong", - .@"c_ulonglong", - .@"c_longdouble", - .@"f16", - .@"f32", - .@"f64", - .@"f128", - .@"c_void", - .@"bool", - .@"void", - .@"type", - .@"anyerror", - .@"comptime_int", - .@"comptime_float", - .@"noreturn", + .u8, + .i8, + .isize, + .usize, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .f16, + .f32, + .f64, + .f128, + .c_void, + .bool, + .void, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .noreturn, .array, .array_u8_sentinel_0, .single_const_pointer, .single_const_pointer_to_comptime_int, .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, + .int_unsigned, + .int_signed, => false, .const_slice_u8 => true, @@ -323,34 +342,37 @@ pub const Type = extern union { /// Asserts the type is a pointer type. pub fn pointerIsConst(self: Type) bool { return switch (self.tag()) { - .@"u8", - .@"i8", - .@"isize", - .@"usize", - .@"c_short", - .@"c_ushort", - .@"c_int", - .@"c_uint", - .@"c_long", - .@"c_ulong", - .@"c_longlong", - .@"c_ulonglong", - .@"c_longdouble", - .@"f16", - .@"f32", - .@"f64", - .@"f128", - .@"c_void", - .@"bool", - .@"void", - .@"type", - .@"anyerror", - .@"comptime_int", - .@"comptime_float", - .@"noreturn", + .u8, + .i8, + .isize, + .usize, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .f16, + .f32, + .f64, + .f128, + .c_void, + .bool, + .void, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .noreturn, .array, .array_u8_sentinel_0, .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, + .int_unsigned, + .int_signed, => unreachable, .single_const_pointer, @@ -363,32 +385,35 @@ pub const Type = extern union { /// Asserts the type is a pointer or array type. pub fn elemType(self: Type) Type { return switch (self.tag()) { - .@"u8", - .@"i8", - .@"isize", - .@"usize", - .@"c_short", - .@"c_ushort", - .@"c_int", - .@"c_uint", - .@"c_long", - .@"c_ulong", - .@"c_longlong", - .@"c_ulonglong", - .@"c_longdouble", - .@"f16", - .@"f32", - .@"f64", - .@"f128", - .@"c_void", - .@"bool", - .@"void", - .@"type", - .@"anyerror", - .@"comptime_int", - .@"comptime_float", - .@"noreturn", + .u8, + .i8, + .isize, + .usize, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .c_longdouble, + .f16, + .f32, + .f64, + .f128, + .c_void, + .bool, + .void, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .noreturn, .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, + .int_unsigned, + .int_signed, => unreachable, .array => self.cast(Payload.Array).?.elem_type, @@ -398,7 +423,7 @@ pub const Type = extern union { }; } - /// Asserts the type is an array. + /// Asserts the type is an array or vector. pub fn arrayLen(self: Type) u64 { return switch (self.tag()) { .u8, @@ -427,9 +452,12 @@ pub const Type = extern union { .comptime_float, .noreturn, .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, .single_const_pointer, .single_const_pointer_to_comptime_int, .const_slice_u8, + .int_unsigned, + .int_signed, => unreachable, .array => self.cast(Payload.Array).?.len, @@ -437,23 +465,67 @@ pub const Type = extern union { }; } + /// Returns true if and only if the type is a fixed-width, signed integer. + pub fn isSignedInt(self: Type) bool { + return switch (self.tag()) { + .f16, + .f32, + .f64, + .f128, + .c_longdouble, + .c_void, + .bool, + .void, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .noreturn, + .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, + .array, + .single_const_pointer, + .single_const_pointer_to_comptime_int, + .array_u8_sentinel_0, + .const_slice_u8, + .int_unsigned, + .u8, + .usize, + .c_ushort, + .c_uint, + .c_ulong, + .c_ulonglong, + => false, + + .int_signed, + .i8, + .isize, + .c_short, + .c_int, + .c_long, + .c_longlong, + => true, + }; + } + /// Asserts the type is a fixed-width integer. pub fn intInfo(self: Type, target: Target) struct { signed: bool, bits: u16 } { return switch (self.tag()) { - .@"f16", - .@"f32", - .@"f64", - .@"f128", - .@"c_longdouble", - .@"c_void", - .@"bool", - .@"void", - .@"type", - .@"anyerror", - .@"comptime_int", - .@"comptime_float", - .@"noreturn", + .f16, + .f32, + .f64, + .f128, + .c_longdouble, + .c_void, + .bool, + .void, + .type, + .anyerror, + .comptime_int, + .comptime_float, + .noreturn, .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, .array, .single_const_pointer, .single_const_pointer_to_comptime_int, @@ -461,18 +533,46 @@ pub const Type = extern union { .const_slice_u8, => unreachable, - .@"u8" => .{ .signed = false, .bits = 8 }, - .@"i8" => .{ .signed = true, .bits = 8 }, - .@"usize" => .{ .signed = false, .bits = target.cpu.arch.ptrBitWidth() }, - .@"isize" => .{ .signed = true, .bits = target.cpu.arch.ptrBitWidth() }, - .@"c_short" => .{ .signed = true, .bits = CInteger.short.sizeInBits(target) }, - .@"c_ushort" => .{ .signed = false, .bits = CInteger.ushort.sizeInBits(target) }, - .@"c_int" => .{ .signed = true, .bits = CInteger.int.sizeInBits(target) }, - .@"c_uint" => .{ .signed = false, .bits = CInteger.uint.sizeInBits(target) }, - .@"c_long" => .{ .signed = true, .bits = CInteger.long.sizeInBits(target) }, - .@"c_ulong" => .{ .signed = false, .bits = CInteger.ulong.sizeInBits(target) }, - .@"c_longlong" => .{ .signed = true, .bits = CInteger.longlong.sizeInBits(target) }, - .@"c_ulonglong" => .{ .signed = false, .bits = CInteger.ulonglong.sizeInBits(target) }, + .int_unsigned => .{ .signed = false, .bits = self.cast(Payload.IntUnsigned).?.bits }, + .int_signed => .{ .signed = true, .bits = self.cast(Payload.IntSigned).?.bits }, + .u8 => .{ .signed = false, .bits = 8 }, + .i8 => .{ .signed = true, .bits = 8 }, + .usize => .{ .signed = false, .bits = target.cpu.arch.ptrBitWidth() }, + .isize => .{ .signed = true, .bits = target.cpu.arch.ptrBitWidth() }, + .c_short => .{ .signed = true, .bits = CType.short.sizeInBits(target) }, + .c_ushort => .{ .signed = false, .bits = CType.ushort.sizeInBits(target) }, + .c_int => .{ .signed = true, .bits = CType.int.sizeInBits(target) }, + .c_uint => .{ .signed = false, .bits = CType.uint.sizeInBits(target) }, + .c_long => .{ .signed = true, .bits = CType.long.sizeInBits(target) }, + .c_ulong => .{ .signed = false, .bits = CType.ulong.sizeInBits(target) }, + .c_longlong => .{ .signed = true, .bits = CType.longlong.sizeInBits(target) }, + .c_ulonglong => .{ .signed = false, .bits = CType.ulonglong.sizeInBits(target) }, + }; + } + + pub fn isFloat(self: Type) bool { + return switch (self.tag()) { + .f16, + .f32, + .f64, + .f128, + .c_longdouble, + => true, + + else => false, + }; + } + + /// Asserts the type is a fixed-size float. + pub fn floatBits(self: Type, target: Target) u16 { + return switch (self.tag()) { + .f16 => 16, + .f32 => 32, + .f64 => 64, + .f128 => 128, + .c_longdouble => CType.longdouble.sizeInBits(target), + + else => unreachable, }; } @@ -480,6 +580,7 @@ pub const Type = extern union { pub fn fnParamLen(self: Type) usize { return switch (self.tag()) { .fn_naked_noreturn_no_args => 0, + .fn_ccc_void_no_args => 0, .f16, .f32, @@ -511,6 +612,8 @@ pub const Type = extern union { .c_ulong, .c_longlong, .c_ulonglong, + .int_unsigned, + .int_signed, => unreachable, }; } @@ -520,6 +623,7 @@ pub const Type = extern union { pub fn fnParamTypes(self: Type, types: []Type) void { switch (self.tag()) { .fn_naked_noreturn_no_args => return, + .fn_ccc_void_no_args => return, .f16, .f32, @@ -551,6 +655,8 @@ pub const Type = extern union { .c_ulong, .c_longlong, .c_ulonglong, + .int_unsigned, + .int_signed, => unreachable, } } @@ -559,6 +665,7 @@ pub const Type = extern union { pub fn fnReturnType(self: Type) Type { return switch (self.tag()) { .fn_naked_noreturn_no_args => Type.initTag(.noreturn), + .fn_ccc_void_no_args => Type.initTag(.void), .f16, .f32, @@ -590,6 +697,8 @@ pub const Type = extern union { .c_ulong, .c_longlong, .c_ulonglong, + .int_unsigned, + .int_signed, => unreachable, }; } @@ -598,6 +707,7 @@ pub const Type = extern union { pub fn fnCallingConvention(self: Type) std.builtin.CallingConvention { return switch (self.tag()) { .fn_naked_noreturn_no_args => .Naked, + .fn_ccc_void_no_args => .C, .f16, .f32, @@ -629,10 +739,148 @@ pub const Type = extern union { .c_ulong, .c_longlong, .c_ulonglong, + .int_unsigned, + .int_signed, => unreachable, }; } + pub fn isNumeric(self: Type) bool { + return switch (self.tag()) { + .f16, + .f32, + .f64, + .f128, + .c_longdouble, + .comptime_int, + .comptime_float, + .u8, + .i8, + .usize, + .isize, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .int_unsigned, + .int_signed, + => true, + + .c_void, + .bool, + .void, + .type, + .anyerror, + .noreturn, + .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, + .array, + .single_const_pointer, + .single_const_pointer_to_comptime_int, + .array_u8_sentinel_0, + .const_slice_u8, + => false, + }; + } + + pub fn onePossibleValue(self: Type) bool { + var ty = self; + while (true) switch (ty.tag()) { + .f16, + .f32, + .f64, + .f128, + .c_longdouble, + .comptime_int, + .comptime_float, + .u8, + .i8, + .usize, + .isize, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .bool, + .type, + .anyerror, + .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, + .single_const_pointer_to_comptime_int, + .array_u8_sentinel_0, + .const_slice_u8, + => return false, + + .c_void, + .void, + .noreturn, + => return true, + + .int_unsigned => return ty.cast(Payload.IntUnsigned).?.bits == 0, + .int_signed => return ty.cast(Payload.IntSigned).?.bits == 0, + .array => { + const array = ty.cast(Payload.Array).?; + if (array.len == 0) + return true; + ty = array.elem_type; + continue; + }, + .single_const_pointer => { + const ptr = ty.cast(Payload.SingleConstPointer).?; + ty = ptr.pointee_type; + continue; + }, + }; + } + + pub fn isCPtr(self: Type) bool { + return switch (self.tag()) { + .f16, + .f32, + .f64, + .f128, + .c_longdouble, + .comptime_int, + .comptime_float, + .u8, + .i8, + .usize, + .isize, + .c_short, + .c_ushort, + .c_int, + .c_uint, + .c_long, + .c_ulong, + .c_longlong, + .c_ulonglong, + .bool, + .type, + .anyerror, + .fn_naked_noreturn_no_args, + .fn_ccc_void_no_args, + .single_const_pointer_to_comptime_int, + .array_u8_sentinel_0, + .const_slice_u8, + .c_void, + .void, + .noreturn, + .int_unsigned, + .int_signed, + .array, + .single_const_pointer, + => return false, + }; + } + /// This enum does not directly correspond to `std.builtin.TypeId` because /// it has extra enum tags in it, as a way of using less memory. For example, /// even though Zig recognizes `*align(10) i32` and `*i32` both as Pointer types @@ -667,6 +915,7 @@ pub const Type = extern union { comptime_float, noreturn, fn_naked_noreturn_no_args, + fn_ccc_void_no_args, single_const_pointer_to_comptime_int, const_slice_u8, // See last_no_payload_tag below. // After this, the tag requires a payload. @@ -674,6 +923,8 @@ pub const Type = extern union { array_u8_sentinel_0, array, single_const_pointer, + int_signed, + int_unsigned, pub const last_no_payload_tag = Tag.const_slice_u8; pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; @@ -700,10 +951,22 @@ pub const Type = extern union { pointee_type: Type, }; + + pub const IntSigned = struct { + base: Payload = Payload{ .tag = .int_signed }, + + bits: u16, + }; + + pub const IntUnsigned = struct { + base: Payload = Payload{ .tag = .int_unsigned }, + + bits: u16, + }; }; }; -pub const CInteger = enum { +pub const CType = enum { short, ushort, int, @@ -712,8 +975,9 @@ pub const CInteger = enum { ulong, longlong, ulonglong, + longdouble, - pub fn sizeInBits(self: CInteger, target: Target) u16 { + pub fn sizeInBits(self: CType, target: Target) u16 { const arch = target.cpu.arch; switch (target.os.tag) { .freestanding, .other => switch (target.cpu.arch) { @@ -729,6 +993,7 @@ pub const CInteger = enum { .longlong, .ulonglong, => return 64, + .longdouble => @panic("TODO figure out what kind of float `long double` is on this target"), }, else => switch (self) { .short, @@ -743,6 +1008,7 @@ pub const CInteger = enum { .longlong, .ulonglong, => return 64, + .longdouble => @panic("TODO figure out what kind of float `long double` is on this target"), }, }, @@ -767,6 +1033,7 @@ pub const CInteger = enum { .longlong, .ulonglong, => return 64, + .longdouble => @panic("TODO figure out what kind of float `long double` is on this target"), }, .windows, .uefi => switch (self) { @@ -781,6 +1048,7 @@ pub const CInteger = enum { .longlong, .ulonglong, => return 64, + .longdouble => @panic("TODO figure out what kind of float `long double` is on this target"), }, .ios => switch (self) { @@ -795,6 +1063,7 @@ pub const CInteger = enum { .longlong, .ulonglong, => return 64, + .longdouble => @panic("TODO figure out what kind of float `long double` is on this target"), }, .ananas, @@ -821,7 +1090,7 @@ pub const CInteger = enum { .amdpal, .hermit, .hurd, - => @panic("TODO specify the C integer type sizes for this OS"), + => @panic("TODO specify the C integer and float type sizes for this OS"), } } }; diff --git a/src-self-hosted/value.zig b/src-self-hosted/value.zig index 2adbb7807e..3d04e6e813 100644 --- a/src-self-hosted/value.zig +++ b/src-self-hosted/value.zig @@ -2,7 +2,8 @@ const std = @import("std"); const Type = @import("type.zig").Type; const log2 = std.math.log2; const assert = std.debug.assert; -const BigInt = std.math.big.Int; +const BigIntConst = std.math.big.int.Const; +const BigIntMutable = std.math.big.int.Mutable; const Target = std.Target; const Allocator = std.mem.Allocator; @@ -45,12 +46,14 @@ pub const Value = extern union { comptime_float_type, noreturn_type, fn_naked_noreturn_no_args_type, + fn_ccc_void_no_args_type, single_const_pointer_to_comptime_int_type, const_slice_u8_type, + undef, zero, - void_value, - noreturn_value, + the_one_possible_value, // when the type only has one possible value + null_value, bool_true, bool_false, // See last_no_payload_tag below. // After this, the tag requires a payload. @@ -58,11 +61,13 @@ pub const Value = extern union { ty, int_u64, int_i64, - int_big, + int_big_positive, + int_big_negative, function, ref, ref_val, bytes, + repeated, // the value is a value repeated some number of times pub const last_no_payload_tag = Tag.bool_false; pub const no_payload_count = @enumToInt(last_no_payload_tag) + 1; @@ -132,18 +137,21 @@ pub const Value = extern union { .comptime_float_type => return out_stream.writeAll("comptime_float"), .noreturn_type => return out_stream.writeAll("noreturn"), .fn_naked_noreturn_no_args_type => return out_stream.writeAll("fn() callconv(.Naked) noreturn"), + .fn_ccc_void_no_args_type => return out_stream.writeAll("fn() callconv(.C) void"), .single_const_pointer_to_comptime_int_type => return out_stream.writeAll("*const comptime_int"), .const_slice_u8_type => return out_stream.writeAll("[]const u8"), + .null_value => return out_stream.writeAll("null"), + .undef => return out_stream.writeAll("undefined"), .zero => return out_stream.writeAll("0"), - .void_value => return out_stream.writeAll("{}"), - .noreturn_value => return out_stream.writeAll("unreachable"), + .the_one_possible_value => return out_stream.writeAll("(one possible value)"), .bool_true => return out_stream.writeAll("true"), .bool_false => return out_stream.writeAll("false"), .ty => return val.cast(Payload.Ty).?.ty.format("", options, out_stream), .int_u64 => return std.fmt.formatIntValue(val.cast(Payload.Int_u64).?.int, "", options, out_stream), .int_i64 => return std.fmt.formatIntValue(val.cast(Payload.Int_i64).?.int, "", options, out_stream), - .int_big => return out_stream.print("{}", .{val.cast(Payload.IntBig).?.big_int}), + .int_big_positive => return out_stream.print("{}", .{val.cast(Payload.IntBigPositive).?.asBigInt()}), + .int_big_negative => return out_stream.print("{}", .{val.cast(Payload.IntBigNegative).?.asBigInt()}), .function => return out_stream.writeAll("(function)"), .ref => return out_stream.writeAll("(ref)"), .ref_val => { @@ -152,6 +160,10 @@ pub const Value = extern union { continue; }, .bytes => return std.zig.renderStringLiteral(self.cast(Payload.Bytes).?.data, out_stream), + .repeated => { + try out_stream.writeAll("(repeated) "); + val = val.cast(Payload.Repeated).?.val; + }, }; } @@ -195,27 +207,31 @@ pub const Value = extern union { .comptime_float_type => Type.initTag(.@"comptime_float"), .noreturn_type => Type.initTag(.@"noreturn"), .fn_naked_noreturn_no_args_type => Type.initTag(.fn_naked_noreturn_no_args), + .fn_ccc_void_no_args_type => Type.initTag(.fn_ccc_void_no_args), .single_const_pointer_to_comptime_int_type => Type.initTag(.single_const_pointer_to_comptime_int), .const_slice_u8_type => Type.initTag(.const_slice_u8), + .undef, .zero, - .void_value, - .noreturn_value, + .the_one_possible_value, .bool_true, .bool_false, + .null_value, .int_u64, .int_i64, - .int_big, + .int_big_positive, + .int_big_negative, .function, .ref, .ref_val, .bytes, + .repeated, => unreachable, }; } /// Asserts the value is an integer. - pub fn toBigInt(self: Value, allocator: *Allocator) Allocator.Error!BigInt { + pub fn toBigInt(self: Value, space: *BigIntSpace) BigIntConst { switch (self.tag()) { .ty, .u8_type, @@ -244,23 +260,28 @@ pub const Value = extern union { .comptime_float_type, .noreturn_type, .fn_naked_noreturn_no_args_type, + .fn_ccc_void_no_args_type, .single_const_pointer_to_comptime_int_type, .const_slice_u8_type, - .void_value, - .noreturn_value, .bool_true, .bool_false, + .null_value, .function, .ref, .ref_val, .bytes, + .undef, + .repeated, => unreachable, - .zero => return BigInt.initSet(allocator, 0), + .the_one_possible_value, // An integer with one possible value is always zero. + .zero, + => return BigIntMutable.init(&space.limbs, 0).toConst(), - .int_u64 => return BigInt.initSet(allocator, self.cast(Payload.Int_u64).?.int), - .int_i64 => return BigInt.initSet(allocator, self.cast(Payload.Int_i64).?.int), - .int_big => return self.cast(Payload.IntBig).?.big_int, + .int_u64 => return BigIntMutable.init(&space.limbs, self.cast(Payload.Int_u64).?.int).toConst(), + .int_i64 => return BigIntMutable.init(&space.limbs, self.cast(Payload.Int_i64).?.int).toConst(), + .int_big_positive => return self.cast(Payload.IntBigPositive).?.asBigInt(), + .int_big_negative => return self.cast(Payload.IntBigPositive).?.asBigInt(), } } @@ -294,23 +315,90 @@ pub const Value = extern union { .comptime_float_type, .noreturn_type, .fn_naked_noreturn_no_args_type, + .fn_ccc_void_no_args_type, .single_const_pointer_to_comptime_int_type, .const_slice_u8_type, - .void_value, - .noreturn_value, .bool_true, .bool_false, + .null_value, .function, .ref, .ref_val, .bytes, + .undef, + .repeated, => unreachable, - .zero => return 0, + .zero, + .the_one_possible_value, // an integer with one possible value is always zero + => return 0, .int_u64 => return self.cast(Payload.Int_u64).?.int, .int_i64 => return @intCast(u64, self.cast(Payload.Int_u64).?.int), - .int_big => return self.cast(Payload.IntBig).?.big_int.to(u64) catch unreachable, + .int_big_positive => return self.cast(Payload.IntBigPositive).?.asBigInt().to(u64) catch unreachable, + .int_big_negative => return self.cast(Payload.IntBigNegative).?.asBigInt().to(u64) catch unreachable, + } + } + + /// Asserts the value is an integer and not undefined. + /// Returns the number of bits the value requires to represent stored in twos complement form. + pub fn intBitCountTwosComp(self: Value) usize { + switch (self.tag()) { + .ty, + .u8_type, + .i8_type, + .isize_type, + .usize_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f128_type, + .c_void_type, + .bool_type, + .void_type, + .type_type, + .anyerror_type, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + .fn_naked_noreturn_no_args_type, + .fn_ccc_void_no_args_type, + .single_const_pointer_to_comptime_int_type, + .const_slice_u8_type, + .bool_true, + .bool_false, + .null_value, + .function, + .ref, + .ref_val, + .bytes, + .undef, + .repeated, + => unreachable, + + .the_one_possible_value, // an integer with one possible value is always zero + .zero, + => return 0, + + .int_u64 => { + const x = self.cast(Payload.Int_u64).?.int; + if (x == 0) return 0; + return std.math.log2(x) + 1; + }, + .int_i64 => { + @panic("TODO implement i64 intBitCountTwosComp"); + }, + .int_big_positive => return self.cast(Payload.IntBigPositive).?.asBigInt().bitCountTwosComp(), + .int_big_negative => return self.cast(Payload.IntBigNegative).?.asBigInt().bitCountTwosComp(), } } @@ -344,19 +432,23 @@ pub const Value = extern union { .comptime_float_type, .noreturn_type, .fn_naked_noreturn_no_args_type, + .fn_ccc_void_no_args_type, .single_const_pointer_to_comptime_int_type, .const_slice_u8_type, - .void_value, - .noreturn_value, .bool_true, .bool_false, + .null_value, .function, .ref, .ref_val, .bytes, + .repeated, => unreachable, - .zero => return true, + .zero, + .undef, + .the_one_possible_value, // an integer with one possible value is always zero + => return true, .int_u64 => switch (ty.zigTypeTag()) { .Int => { @@ -381,10 +473,18 @@ pub const Value = extern union { .ComptimeInt => return true, else => unreachable, }, - .int_big => switch (ty.zigTypeTag()) { + .int_big_positive => switch (ty.zigTypeTag()) { .Int => { const info = ty.intInfo(target); - return self.cast(Payload.IntBig).?.big_int.fitsInTwosComp(info.signed, info.bits); + return self.cast(Payload.IntBigPositive).?.asBigInt().fitsInTwosComp(info.signed, info.bits); + }, + .ComptimeInt => return true, + else => unreachable, + }, + .int_big_negative => switch (ty.zigTypeTag()) { + .Int => { + const info = ty.intInfo(target); + return self.cast(Payload.IntBigNegative).?.asBigInt().fitsInTwosComp(info.signed, info.bits); }, .ComptimeInt => return true, else => unreachable, @@ -392,9 +492,9 @@ pub const Value = extern union { } } - /// Asserts the value is a pointer and dereferences it. - pub fn pointerDeref(self: Value) Value { - switch (self.tag()) { + /// Asserts the value is a float + pub fn floatHasFraction(self: Value) bool { + return switch (self.tag()) { .ty, .u8_type, .i8_type, @@ -422,23 +522,170 @@ pub const Value = extern union { .comptime_float_type, .noreturn_type, .fn_naked_noreturn_no_args_type, + .fn_ccc_void_no_args_type, + .single_const_pointer_to_comptime_int_type, + .const_slice_u8_type, + .bool_true, + .bool_false, + .null_value, + .function, + .ref, + .ref_val, + .bytes, + .repeated, + .undef, + .int_u64, + .int_i64, + .int_big_positive, + .int_big_negative, + .the_one_possible_value, + => unreachable, + + .zero => false, + }; + } + + pub fn orderAgainstZero(lhs: Value) std.math.Order { + switch (lhs.tag()) { + .ty, + .u8_type, + .i8_type, + .isize_type, + .usize_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f128_type, + .c_void_type, + .bool_type, + .void_type, + .type_type, + .anyerror_type, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + .fn_naked_noreturn_no_args_type, + .fn_ccc_void_no_args_type, + .single_const_pointer_to_comptime_int_type, + .const_slice_u8_type, + .bool_true, + .bool_false, + .null_value, + .function, + .ref, + .ref_val, + .bytes, + .repeated, + .undef, + => unreachable, + + .zero, + .the_one_possible_value, // an integer with one possible value is always zero + => return .eq, + + .int_u64 => return std.math.order(lhs.cast(Payload.Int_u64).?.int, 0), + .int_i64 => return std.math.order(lhs.cast(Payload.Int_i64).?.int, 0), + .int_big_positive => return lhs.cast(Payload.IntBigPositive).?.asBigInt().orderAgainstScalar(0), + .int_big_negative => return lhs.cast(Payload.IntBigNegative).?.asBigInt().orderAgainstScalar(0), + } + } + + /// Asserts the value is comparable. + pub fn order(lhs: Value, rhs: Value) std.math.Order { + const lhs_tag = lhs.tag(); + const rhs_tag = lhs.tag(); + const lhs_is_zero = lhs_tag == .zero or lhs_tag == .the_one_possible_value; + const rhs_is_zero = rhs_tag == .zero or rhs_tag == .the_one_possible_value; + if (lhs_is_zero) return rhs.orderAgainstZero().invert(); + if (rhs_is_zero) return lhs.orderAgainstZero(); + + // TODO floats + + var lhs_bigint_space: BigIntSpace = undefined; + var rhs_bigint_space: BigIntSpace = undefined; + const lhs_bigint = lhs.toBigInt(&lhs_bigint_space); + const rhs_bigint = rhs.toBigInt(&rhs_bigint_space); + return lhs_bigint.order(rhs_bigint); + } + + /// Asserts the value is comparable. + pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value) bool { + return order(lhs, rhs).compare(op); + } + + /// Asserts the value is comparable. + pub fn compareWithZero(lhs: Value, op: std.math.CompareOperator) bool { + return orderAgainstZero(lhs).compare(op); + } + + pub fn toBool(self: Value) bool { + return switch (self.tag()) { + .bool_true => true, + .bool_false => false, + else => unreachable, + }; + } + + /// Asserts the value is a pointer and dereferences it. + pub fn pointerDeref(self: Value) Value { + return switch (self.tag()) { + .ty, + .u8_type, + .i8_type, + .isize_type, + .usize_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f128_type, + .c_void_type, + .bool_type, + .void_type, + .type_type, + .anyerror_type, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + .fn_naked_noreturn_no_args_type, + .fn_ccc_void_no_args_type, .single_const_pointer_to_comptime_int_type, .const_slice_u8_type, .zero, - .void_value, - .noreturn_value, .bool_true, .bool_false, + .null_value, .function, .int_u64, .int_i64, - .int_big, + .int_big_positive, + .int_big_negative, .bytes, + .undef, + .repeated, => unreachable, - .ref => return self.cast(Payload.Ref).?.cell.contents, - .ref_val => return self.cast(Payload.RefVal).?.val, - } + .the_one_possible_value => Value.initTag(.the_one_possible_value), + .ref => self.cast(Payload.Ref).?.cell.contents, + .ref_val => self.cast(Payload.RefVal).?.val, + }; } /// Asserts the value is a single-item pointer to an array, or an array, @@ -472,17 +719,20 @@ pub const Value = extern union { .comptime_float_type, .noreturn_type, .fn_naked_noreturn_no_args_type, + .fn_ccc_void_no_args_type, .single_const_pointer_to_comptime_int_type, .const_slice_u8_type, .zero, - .void_value, - .noreturn_value, + .the_one_possible_value, .bool_true, .bool_false, + .null_value, .function, .int_u64, .int_i64, - .int_big, + .int_big_positive, + .int_big_negative, + .undef, => unreachable, .ref => @panic("TODO figure out how MemoryCell works"), @@ -493,9 +743,70 @@ pub const Value = extern union { int_payload.* = .{ .int = self.cast(Payload.Bytes).?.data[index] }; return Value.initPayload(&int_payload.base); }, + + // No matter the index; all the elements are the same! + .repeated => return self.cast(Payload.Repeated).?.val, } } + pub fn isUndef(self: Value) bool { + return self.tag() == .undef; + } + + /// Valid for all types. Asserts the value is not undefined. + /// `.the_one_possible_value` is reported as not null. + pub fn isNull(self: Value) bool { + return switch (self.tag()) { + .ty, + .u8_type, + .i8_type, + .isize_type, + .usize_type, + .c_short_type, + .c_ushort_type, + .c_int_type, + .c_uint_type, + .c_long_type, + .c_ulong_type, + .c_longlong_type, + .c_ulonglong_type, + .c_longdouble_type, + .f16_type, + .f32_type, + .f64_type, + .f128_type, + .c_void_type, + .bool_type, + .void_type, + .type_type, + .anyerror_type, + .comptime_int_type, + .comptime_float_type, + .noreturn_type, + .fn_naked_noreturn_no_args_type, + .fn_ccc_void_no_args_type, + .single_const_pointer_to_comptime_int_type, + .const_slice_u8_type, + .zero, + .the_one_possible_value, + .bool_true, + .bool_false, + .function, + .int_u64, + .int_i64, + .int_big_positive, + .int_big_negative, + .ref, + .ref_val, + .bytes, + .repeated, + => false, + + .undef => unreachable, + .null_value => true, + }; + } + /// This type is not copyable since it may contain pointers to its inner data. pub const Payload = struct { tag: Tag, @@ -510,9 +821,22 @@ pub const Value = extern union { int: i64, }; - pub const IntBig = struct { - base: Payload = Payload{ .tag = .int_big }, - big_int: BigInt, + pub const IntBigPositive = struct { + base: Payload = Payload{ .tag = .int_big_positive }, + limbs: []const std.math.big.Limb, + + pub fn asBigInt(self: IntBigPositive) BigIntConst { + return BigIntConst{ .limbs = self.limbs, .positive = true }; + } + }; + + pub const IntBigNegative = struct { + base: Payload = Payload{ .tag = .int_big_negative }, + limbs: []const std.math.big.Limb, + + pub fn asBigInt(self: IntBigNegative) BigIntConst { + return BigIntConst{ .limbs = self.limbs, .positive = false }; + } }; pub const Function = struct { @@ -550,6 +874,20 @@ pub const Value = extern union { base: Payload = Payload{ .tag = .ty }, ty: Type, }; + + pub const Repeated = struct { + base: Payload = Payload{ .tag = .ty }, + /// This value is repeated some number of times. The amount of times to repeat + /// is stored externally. + val: Value, + }; + }; + + /// Big enough to fit any non-BigInt value + pub const BigIntSpace = struct { + /// The +1 is headroom so that operations such as incrementing once or decrementing once + /// are possible without using an allocator. + limbs: [(@sizeOf(u64) / @sizeOf(std.math.big.Limb)) + 1]std.math.big.Limb, }; }; diff --git a/src/all_types.hpp b/src/all_types.hpp index 9304a215dc..750cfdab6b 100644 --- a/src/all_types.hpp +++ b/src/all_types.hpp @@ -3453,7 +3453,6 @@ struct IrInstSrcOptionalUnwrapPtr { IrInstSrc *base_ptr; bool safety_check_on; - bool initializing; }; struct IrInstGenOptionalUnwrapPtr { diff --git a/src/ir.cpp b/src/ir.cpp index 4795645544..f37f91088a 100644 --- a/src/ir.cpp +++ b/src/ir.cpp @@ -2967,12 +2967,11 @@ static IrInstGen *ir_build_test_non_null_gen(IrAnalyze *ira, IrInst *source_inst } static IrInstSrc *ir_build_optional_unwrap_ptr(IrBuilderSrc *irb, Scope *scope, AstNode *source_node, - IrInstSrc *base_ptr, bool safety_check_on, bool initializing) + IrInstSrc *base_ptr, bool safety_check_on) { IrInstSrcOptionalUnwrapPtr *instruction = ir_build_instruction(irb, scope, source_node); instruction->base_ptr = base_ptr; instruction->safety_check_on = safety_check_on; - instruction->initializing = initializing; ir_ref_instruction(base_ptr, irb->current_basic_block); @@ -5721,7 +5720,7 @@ static IrInstSrc *ir_gen_orelse(IrBuilderSrc *irb, Scope *parent_scope, AstNode ir_mark_gen(ir_build_br(irb, parent_scope, node, end_block, is_comptime)); ir_set_cursor_at_end_and_append_block(irb, ok_block); - IrInstSrc *unwrapped_ptr = ir_build_optional_unwrap_ptr(irb, parent_scope, node, maybe_ptr, false, false); + IrInstSrc *unwrapped_ptr = ir_build_optional_unwrap_ptr(irb, parent_scope, node, maybe_ptr, false); IrInstSrc *unwrapped_payload = ir_build_load_ptr(irb, parent_scope, node, unwrapped_ptr); ir_build_end_expr(irb, parent_scope, node, unwrapped_payload, &peer_parent->peers.at(1)->base); IrBasicBlockSrc *after_ok_block = irb->current_basic_block; @@ -8080,7 +8079,7 @@ static IrInstSrc *ir_gen_while_expr(IrBuilderSrc *irb, Scope *scope, AstNode *no is_comptime); ir_set_cursor_at_end_and_append_block(irb, body_block); - IrInstSrc *payload_ptr = ir_build_optional_unwrap_ptr(irb, &spill_scope->base, symbol_node, maybe_val_ptr, false, false); + IrInstSrc *payload_ptr = ir_build_optional_unwrap_ptr(irb, &spill_scope->base, symbol_node, maybe_val_ptr, false); IrInstSrc *var_value = node->data.while_expr.var_is_ptr ? payload_ptr : ir_build_load_ptr(irb, &spill_scope->base, symbol_node, payload_ptr); build_decl_var_and_init(irb, child_scope, symbol_node, payload_var, var_value, buf_ptr(var_symbol), is_comptime); @@ -8743,7 +8742,7 @@ static IrInstSrc *ir_gen_if_optional_expr(IrBuilderSrc *irb, Scope *scope, AstNo ZigVar *var = ir_create_var(irb, node, subexpr_scope, var_symbol, is_const, is_const, is_shadowable, is_comptime); - IrInstSrc *payload_ptr = ir_build_optional_unwrap_ptr(irb, subexpr_scope, node, maybe_val_ptr, false, false); + IrInstSrc *payload_ptr = ir_build_optional_unwrap_ptr(irb, subexpr_scope, node, maybe_val_ptr, false); IrInstSrc *var_value = var_is_ptr ? payload_ptr : ir_build_load_ptr(irb, &spill_scope->base, node, payload_ptr); build_decl_var_and_init(irb, subexpr_scope, node, var, var_value, buf_ptr(var_symbol), is_comptime); @@ -9987,7 +9986,7 @@ static IrInstSrc *ir_gen_node_raw(IrBuilderSrc *irb, AstNode *node, Scope *scope if (maybe_ptr == irb->codegen->invalid_inst_src) return irb->codegen->invalid_inst_src; - IrInstSrc *unwrapped_ptr = ir_build_optional_unwrap_ptr(irb, scope, node, maybe_ptr, true, false); + IrInstSrc *unwrapped_ptr = ir_build_optional_unwrap_ptr(irb, scope, node, maybe_ptr, true ); if (lval == LValPtr || lval == LValAssign) return unwrapped_ptr; diff --git a/test/stage2/compare_output.zig b/test/stage2/compare_output.zig index 443ed7a0ee..1f289c2762 100644 --- a/test/stage2/compare_output.zig +++ b/test/stage2/compare_output.zig @@ -2,24 +2,27 @@ const std = @import("std"); const TestContext = @import("../../src-self-hosted/test.zig").TestContext; pub fn addCases(ctx: *TestContext) !void { - // hello world - try ctx.testCompareOutputLibC( - \\extern fn puts([*]const u8) void; - \\pub export fn main() c_int { - \\ puts("Hello, world!"); - \\ return 0; - \\} - , "Hello, world!" ++ std.cstr.line_sep); + // TODO: re-enable these tests. + // https://github.com/ziglang/zig/issues/1364 - // function calling another function - try ctx.testCompareOutputLibC( - \\extern fn puts(s: [*]const u8) void; - \\pub export fn main() c_int { - \\ return foo("OK"); - \\} - \\fn foo(s: [*]const u8) c_int { - \\ puts(s); - \\ return 0; - \\} - , "OK" ++ std.cstr.line_sep); + //// hello world + //try ctx.testCompareOutputLibC( + // \\extern fn puts([*]const u8) void; + // \\pub export fn main() c_int { + // \\ puts("Hello, world!"); + // \\ return 0; + // \\} + //, "Hello, world!" ++ std.cstr.line_sep); + + //// function calling another function + //try ctx.testCompareOutputLibC( + // \\extern fn puts(s: [*]const u8) void; + // \\pub export fn main() c_int { + // \\ return foo("OK"); + // \\} + // \\fn foo(s: [*]const u8) c_int { + // \\ puts(s); + // \\ return 0; + // \\} + //, "OK" ++ std.cstr.line_sep); } diff --git a/test/stage2/compile_errors.zig b/test/stage2/compile_errors.zig index 377d060056..9b8dcd91c4 100644 --- a/test/stage2/compile_errors.zig +++ b/test/stage2/compile_errors.zig @@ -1,54 +1,57 @@ const TestContext = @import("../../src-self-hosted/test.zig").TestContext; pub fn addCases(ctx: *TestContext) !void { - try ctx.testCompileError( - \\export fn entry() void {} - \\export fn entry() void {} - , "1.zig", 2, 8, "exported symbol collision: 'entry'"); + // TODO: re-enable these tests. + // https://github.com/ziglang/zig/issues/1364 - try ctx.testCompileError( - \\fn() void {} - , "1.zig", 1, 1, "missing function name"); + //try ctx.testCompileError( + // \\export fn entry() void {} + // \\export fn entry() void {} + //, "1.zig", 2, 8, "exported symbol collision: 'entry'"); - try ctx.testCompileError( - \\comptime { - \\ return; - \\} - , "1.zig", 2, 5, "return expression outside function definition"); + //try ctx.testCompileError( + // \\fn() void {} + //, "1.zig", 1, 1, "missing function name"); - try ctx.testCompileError( - \\export fn entry() void { - \\ defer return; - \\} - , "1.zig", 2, 11, "cannot return from defer expression"); + //try ctx.testCompileError( + // \\comptime { + // \\ return; + // \\} + //, "1.zig", 2, 5, "return expression outside function definition"); - try ctx.testCompileError( - \\export fn entry() c_int { - \\ return 36893488147419103232; - \\} - , "1.zig", 2, 12, "integer value '36893488147419103232' cannot be stored in type 'c_int'"); + //try ctx.testCompileError( + // \\export fn entry() void { + // \\ defer return; + // \\} + //, "1.zig", 2, 11, "cannot return from defer expression"); - try ctx.testCompileError( - \\comptime { - \\ var a: *align(4) align(4) i32 = 0; - \\} - , "1.zig", 2, 22, "Extra align qualifier"); + //try ctx.testCompileError( + // \\export fn entry() c_int { + // \\ return 36893488147419103232; + // \\} + //, "1.zig", 2, 12, "integer value '36893488147419103232' cannot be stored in type 'c_int'"); - try ctx.testCompileError( - \\comptime { - \\ var b: *const const i32 = 0; - \\} - , "1.zig", 2, 19, "Extra align qualifier"); + //try ctx.testCompileError( + // \\comptime { + // \\ var a: *align(4) align(4) i32 = 0; + // \\} + //, "1.zig", 2, 22, "Extra align qualifier"); - try ctx.testCompileError( - \\comptime { - \\ var c: *volatile volatile i32 = 0; - \\} - , "1.zig", 2, 22, "Extra align qualifier"); + //try ctx.testCompileError( + // \\comptime { + // \\ var b: *const const i32 = 0; + // \\} + //, "1.zig", 2, 19, "Extra align qualifier"); - try ctx.testCompileError( - \\comptime { - \\ var d: *allowzero allowzero i32 = 0; - \\} - , "1.zig", 2, 23, "Extra align qualifier"); + //try ctx.testCompileError( + // \\comptime { + // \\ var c: *volatile volatile i32 = 0; + // \\} + //, "1.zig", 2, 22, "Extra align qualifier"); + + //try ctx.testCompileError( + // \\comptime { + // \\ var d: *allowzero allowzero i32 = 0; + // \\} + //, "1.zig", 2, 23, "Extra align qualifier"); } diff --git a/test/stage2/ir.zig b/test/stage2/ir.zig deleted file mode 100644 index 450d8fa102..0000000000 --- a/test/stage2/ir.zig +++ /dev/null @@ -1,54 +0,0 @@ -test "hello world IR" { - exeCmp( - \\@0 = str("Hello, world!\n") - \\@1 = primitive(void) - \\@2 = primitive(usize) - \\@3 = fntype([], @1, cc=Naked) - \\@4 = int(0) - \\@5 = int(1) - \\@6 = int(231) - \\@7 = str("len") - \\ - \\@8 = fn(@3, { - \\ %0 = as(@2, @5) ; SYS_write - \\ %1 = as(@2, @5) ; STDOUT_FILENO - \\ %2 = ptrtoint(@0) ; msg ptr - \\ %3 = fieldptr(@0, @7) ; msg len ptr - \\ %4 = deref(%3) ; msg len - \\ %sysoutreg = str("={rax}") - \\ %rax = str("{rax}") - \\ %rdi = str("{rdi}") - \\ %rsi = str("{rsi}") - \\ %rdx = str("{rdx}") - \\ %rcx = str("rcx") - \\ %r11 = str("r11") - \\ %memory = str("memory") - \\ %syscall = str("syscall") - \\ %5 = asm(%syscall, @2, - \\ volatile=1, - \\ output=%sysoutreg, - \\ inputs=[%rax, %rdi, %rsi, %rdx], - \\ clobbers=[%rcx, %r11, %memory], - \\ args=[%0, %1, %2, %4]) - \\ - \\ %6 = as(@2, @6) ;SYS_exit_group - \\ %7 = as(@2, @4) ;exit code - \\ %8 = asm(%syscall, @2, - \\ volatile=1, - \\ output=%sysoutreg, - \\ inputs=[%rax, %rdi], - \\ clobbers=[%rcx, %r11, %memory], - \\ args=[%6, %7]) - \\ - \\ %9 = unreachable() - \\}) - \\ - \\@9 = str("_start") - \\@10 = export(@9, @8) - , - \\Hello, world! - \\ - ); -} - -fn exeCmp(src: []const u8, expected_stdout: []const u8) void {} diff --git a/test/stage2/test.zig b/test/stage2/test.zig index f4768cd39a..dc92f99506 100644 --- a/test/stage2/test.zig +++ b/test/stage2/test.zig @@ -3,4 +3,5 @@ const TestContext = @import("../../src-self-hosted/test.zig").TestContext; pub fn addCases(ctx: *TestContext) !void { try @import("compile_errors.zig").addCases(ctx); try @import("compare_output.zig").addCases(ctx); + @import("zir.zig").addCases(ctx); } diff --git a/test/stage2/zir.zig b/test/stage2/zir.zig new file mode 100644 index 0000000000..9a65e9ab96 --- /dev/null +++ b/test/stage2/zir.zig @@ -0,0 +1,107 @@ +const TestContext = @import("../../src-self-hosted/test.zig").TestContext; + +pub fn addCases(ctx: *TestContext) void { + ctx.addZIRTransform("elemptr, add, cmp, condbr, return, breakpoint", + \\@void = primitive(void) + \\@usize = primitive(usize) + \\@fnty = fntype([], @void, cc=C) + \\@0 = int(0) + \\@1 = int(1) + \\@2 = int(2) + \\@3 = int(3) + \\ + \\@entry = fn(@fnty, { + \\ %a = str("\x32\x08\x01\x0a") + \\ %eptr0 = elemptr(%a, @0) + \\ %eptr1 = elemptr(%a, @1) + \\ %eptr2 = elemptr(%a, @2) + \\ %eptr3 = elemptr(%a, @3) + \\ %v0 = deref(%eptr0) + \\ %v1 = deref(%eptr1) + \\ %v2 = deref(%eptr2) + \\ %v3 = deref(%eptr3) + \\ %x0 = add(%v0, %v1) + \\ %x1 = add(%v2, %v3) + \\ %result = add(%x0, %x1) + \\ + \\ %expected = int(69) + \\ %ok = cmp(%result, eq, %expected) + \\ %10 = condbr(%ok, { + \\ %11 = return() + \\ }, { + \\ %12 = breakpoint() + \\ }) + \\}) + \\ + \\@9 = str("entry") + \\@10 = export(@9, @entry) + , + \\@0 = primitive(void) + \\@1 = fntype([], @0, cc=C) + \\@2 = fn(@1, { + \\ %0 = return() + \\}) + \\@3 = str("entry") + \\@4 = export(@3, @2) + \\ + ); + + if (@import("std").Target.current.os.tag != .linux or + @import("std").Target.current.cpu.arch != .x86_64) + { + // TODO implement self-hosted PE (.exe file) linking + // TODO implement more ZIR so we don't depend on x86_64-linux + return; + } + + ctx.addZIRCompareOutput("hello world ZIR", + \\@0 = str("Hello, world!\n") + \\@1 = primitive(noreturn) + \\@2 = primitive(usize) + \\@3 = fntype([], @1, cc=Naked) + \\@4 = int(0) + \\@5 = int(1) + \\@6 = int(231) + \\@7 = str("len") + \\ + \\@8 = fn(@3, { + \\ %0 = as(@2, @5) ; SYS_write + \\ %1 = as(@2, @5) ; STDOUT_FILENO + \\ %2 = ptrtoint(@0) ; msg ptr + \\ %3 = fieldptr(@0, @7) ; msg len ptr + \\ %4 = deref(%3) ; msg len + \\ %sysoutreg = str("={rax}") + \\ %rax = str("{rax}") + \\ %rdi = str("{rdi}") + \\ %rsi = str("{rsi}") + \\ %rdx = str("{rdx}") + \\ %rcx = str("rcx") + \\ %r11 = str("r11") + \\ %memory = str("memory") + \\ %syscall = str("syscall") + \\ %5 = asm(%syscall, @2, + \\ volatile=1, + \\ output=%sysoutreg, + \\ inputs=[%rax, %rdi, %rsi, %rdx], + \\ clobbers=[%rcx, %r11, %memory], + \\ args=[%0, %1, %2, %4]) + \\ + \\ %6 = as(@2, @6) ;SYS_exit_group + \\ %7 = as(@2, @4) ;exit code + \\ %8 = asm(%syscall, @2, + \\ volatile=1, + \\ output=%sysoutreg, + \\ inputs=[%rax, %rdi], + \\ clobbers=[%rcx, %r11, %memory], + \\ args=[%6, %7]) + \\ + \\ %9 = unreachable() + \\}) + \\ + \\@9 = str("_start") + \\@10 = export(@9, @8) + , + \\Hello, world! + \\ + ); +}