mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
Merge remote-tracking branch 'origin/master' into llvm11
This commit is contained in:
commit
4971400bdc
@ -402,41 +402,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn freeSlot(
|
|
||||||
self: *Self,
|
|
||||||
bucket: *BucketHeader,
|
|
||||||
bucket_index: usize,
|
|
||||||
size_class: usize,
|
|
||||||
slot_index: SlotIndex,
|
|
||||||
used_byte: *u8,
|
|
||||||
used_bit_index: u3,
|
|
||||||
trace_addr: usize,
|
|
||||||
) void {
|
|
||||||
// Capture stack trace to be the "first free", in case a double free happens.
|
|
||||||
bucket.captureStackTrace(trace_addr, size_class, slot_index, .free);
|
|
||||||
|
|
||||||
used_byte.* &= ~(@as(u8, 1) << used_bit_index);
|
|
||||||
bucket.used_count -= 1;
|
|
||||||
if (bucket.used_count == 0) {
|
|
||||||
if (bucket.next == bucket) {
|
|
||||||
// it's the only bucket and therefore the current one
|
|
||||||
self.buckets[bucket_index] = null;
|
|
||||||
} else {
|
|
||||||
bucket.next.prev = bucket.prev;
|
|
||||||
bucket.prev.next = bucket.next;
|
|
||||||
self.buckets[bucket_index] = bucket.prev;
|
|
||||||
}
|
|
||||||
if (!config.never_unmap) {
|
|
||||||
self.backing_allocator.free(bucket.page[0..page_size]);
|
|
||||||
}
|
|
||||||
const bucket_size = bucketSize(size_class);
|
|
||||||
const bucket_slice = @ptrCast([*]align(@alignOf(BucketHeader)) u8, bucket)[0..bucket_size];
|
|
||||||
self.backing_allocator.free(bucket_slice);
|
|
||||||
} else {
|
|
||||||
@memset(bucket.page + slot_index * size_class, undefined, size_class);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This function assumes the object is in the large object storage regardless
|
/// This function assumes the object is in the large object storage regardless
|
||||||
/// of the parameters.
|
/// of the parameters.
|
||||||
fn resizeLarge(
|
fn resizeLarge(
|
||||||
@ -560,7 +525,29 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (new_size == 0) {
|
if (new_size == 0) {
|
||||||
self.freeSlot(bucket, bucket_index, size_class, slot_index, used_byte, used_bit_index, ret_addr);
|
// Capture stack trace to be the "first free", in case a double free happens.
|
||||||
|
bucket.captureStackTrace(ret_addr, size_class, slot_index, .free);
|
||||||
|
|
||||||
|
used_byte.* &= ~(@as(u8, 1) << used_bit_index);
|
||||||
|
bucket.used_count -= 1;
|
||||||
|
if (bucket.used_count == 0) {
|
||||||
|
if (bucket.next == bucket) {
|
||||||
|
// it's the only bucket and therefore the current one
|
||||||
|
self.buckets[bucket_index] = null;
|
||||||
|
} else {
|
||||||
|
bucket.next.prev = bucket.prev;
|
||||||
|
bucket.prev.next = bucket.next;
|
||||||
|
self.buckets[bucket_index] = bucket.prev;
|
||||||
|
}
|
||||||
|
if (!config.never_unmap) {
|
||||||
|
self.backing_allocator.free(bucket.page[0..page_size]);
|
||||||
|
}
|
||||||
|
const bucket_size = bucketSize(size_class);
|
||||||
|
const bucket_slice = @ptrCast([*]align(@alignOf(BucketHeader)) u8, bucket)[0..bucket_size];
|
||||||
|
self.backing_allocator.free(bucket_slice);
|
||||||
|
} else {
|
||||||
|
@memset(old_mem.ptr, undefined, old_mem.len);
|
||||||
|
}
|
||||||
return @as(usize, 0);
|
return @as(usize, 0);
|
||||||
}
|
}
|
||||||
const new_aligned_size = math.max(new_size, old_align);
|
const new_aligned_size = math.max(new_size, old_align);
|
||||||
|
|||||||
@ -15,6 +15,8 @@ const maxInt = std.math.maxInt;
|
|||||||
const minInt = std.math.minInt;
|
const minInt = std.math.minInt;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
|
|
||||||
|
const debug_safety = false;
|
||||||
|
|
||||||
/// Returns the number of limbs needed to store `scalar`, which must be a
|
/// Returns the number of limbs needed to store `scalar`, which must be a
|
||||||
/// primitive integer value.
|
/// primitive integer value.
|
||||||
pub fn calcLimbLen(scalar: anytype) usize {
|
pub fn calcLimbLen(scalar: anytype) usize {
|
||||||
@ -57,7 +59,7 @@ pub fn calcSetStringLimbCount(base: u8, string_len: usize) usize {
|
|||||||
|
|
||||||
/// a + b * c + *carry, sets carry to the overflow bits
|
/// a + b * c + *carry, sets carry to the overflow bits
|
||||||
pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb {
|
pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb {
|
||||||
@setRuntimeSafety(false);
|
@setRuntimeSafety(debug_safety);
|
||||||
var r1: Limb = undefined;
|
var r1: Limb = undefined;
|
||||||
|
|
||||||
// r1 = a + *carry
|
// r1 = a + *carry
|
||||||
@ -1529,8 +1531,7 @@ pub const Managed = struct {
|
|||||||
/// self's allocator is used for temporary storage to boost multiplication performance.
|
/// self's allocator is used for temporary storage to boost multiplication performance.
|
||||||
pub fn setString(self: *Managed, base: u8, value: []const u8) !void {
|
pub fn setString(self: *Managed, base: u8, value: []const u8) !void {
|
||||||
if (base < 2 or base > 16) return error.InvalidBase;
|
if (base < 2 or base > 16) return error.InvalidBase;
|
||||||
const den = (@sizeOf(Limb) * 8 / base);
|
try self.ensureCapacity(calcSetStringLimbCount(base, value.len));
|
||||||
try self.ensureCapacity((value.len + (den - 1)) / den);
|
|
||||||
const limbs_buffer = try self.allocator.alloc(Limb, calcSetStringLimbsBufferLen(base, value.len));
|
const limbs_buffer = try self.allocator.alloc(Limb, calcSetStringLimbsBufferLen(base, value.len));
|
||||||
defer self.allocator.free(limbs_buffer);
|
defer self.allocator.free(limbs_buffer);
|
||||||
var m = self.toMutable();
|
var m = self.toMutable();
|
||||||
@ -1646,17 +1647,19 @@ pub const Managed = struct {
|
|||||||
/// rma = a * b
|
/// rma = a * b
|
||||||
///
|
///
|
||||||
/// rma, a and b may be aliases. However, it is more efficient if rma does not alias a or b.
|
/// rma, a and b may be aliases. However, it is more efficient if rma does not alias a or b.
|
||||||
|
/// If rma aliases a or b, then caller must call `rma.ensureMulCapacity` prior to calling `mul`.
|
||||||
///
|
///
|
||||||
/// Returns an error if memory could not be allocated.
|
/// Returns an error if memory could not be allocated.
|
||||||
///
|
///
|
||||||
/// rma's allocator is used for temporary storage to speed up the multiplication.
|
/// rma's allocator is used for temporary storage to speed up the multiplication.
|
||||||
pub fn mul(rma: *Managed, a: Const, b: Const) !void {
|
pub fn mul(rma: *Managed, a: Const, b: Const) !void {
|
||||||
try rma.ensureCapacity(a.limbs.len + b.limbs.len + 1);
|
|
||||||
var alias_count: usize = 0;
|
var alias_count: usize = 0;
|
||||||
if (rma.limbs.ptr == a.limbs.ptr)
|
if (rma.limbs.ptr == a.limbs.ptr)
|
||||||
alias_count += 1;
|
alias_count += 1;
|
||||||
if (rma.limbs.ptr == b.limbs.ptr)
|
if (rma.limbs.ptr == b.limbs.ptr)
|
||||||
alias_count += 1;
|
alias_count += 1;
|
||||||
|
assert(alias_count == 0 or rma.limbs.len >= a.limbs.len + b.limbs.len + 1);
|
||||||
|
try rma.ensureMulCapacity(a, b);
|
||||||
var m = rma.toMutable();
|
var m = rma.toMutable();
|
||||||
if (alias_count == 0) {
|
if (alias_count == 0) {
|
||||||
m.mulNoAlias(a, b, rma.allocator);
|
m.mulNoAlias(a, b, rma.allocator);
|
||||||
@ -1669,6 +1672,10 @@ pub const Managed = struct {
|
|||||||
rma.setMetadata(m.positive, m.len);
|
rma.setMetadata(m.positive, m.len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn ensureMulCapacity(rma: *Managed, a: Const, b: Const) !void {
|
||||||
|
try rma.ensureCapacity(a.limbs.len + b.limbs.len + 1);
|
||||||
|
}
|
||||||
|
|
||||||
/// q = a / b (rem r)
|
/// q = a / b (rem r)
|
||||||
///
|
///
|
||||||
/// a / b are floored (rounded towards 0).
|
/// a / b are floored (rounded towards 0).
|
||||||
@ -1773,7 +1780,7 @@ pub const Managed = struct {
|
|||||||
///
|
///
|
||||||
/// r MUST NOT alias any of a or b.
|
/// r MUST NOT alias any of a or b.
|
||||||
fn llmulacc(opt_allocator: ?*Allocator, r: []Limb, a: []const Limb, b: []const Limb) void {
|
fn llmulacc(opt_allocator: ?*Allocator, r: []Limb, a: []const Limb, b: []const Limb) void {
|
||||||
@setRuntimeSafety(false);
|
@setRuntimeSafety(debug_safety);
|
||||||
|
|
||||||
const a_norm = a[0..llnormalize(a)];
|
const a_norm = a[0..llnormalize(a)];
|
||||||
const b_norm = b[0..llnormalize(b)];
|
const b_norm = b[0..llnormalize(b)];
|
||||||
@ -1806,7 +1813,7 @@ fn llmulacc(opt_allocator: ?*Allocator, r: []Limb, a: []const Limb, b: []const L
|
|||||||
///
|
///
|
||||||
/// r MUST NOT alias any of a or b.
|
/// r MUST NOT alias any of a or b.
|
||||||
fn llmulacc_karatsuba(allocator: *Allocator, r: []Limb, x: []const Limb, y: []const Limb) error{OutOfMemory}!void {
|
fn llmulacc_karatsuba(allocator: *Allocator, r: []Limb, x: []const Limb, y: []const Limb) error{OutOfMemory}!void {
|
||||||
@setRuntimeSafety(false);
|
@setRuntimeSafety(debug_safety);
|
||||||
|
|
||||||
assert(r.len >= x.len + y.len + 1);
|
assert(r.len >= x.len + y.len + 1);
|
||||||
|
|
||||||
@ -1873,7 +1880,7 @@ fn llmulacc_karatsuba(allocator: *Allocator, r: []Limb, x: []const Limb, y: []co
|
|||||||
|
|
||||||
// r = r + a
|
// r = r + a
|
||||||
fn llaccum(r: []Limb, a: []const Limb) Limb {
|
fn llaccum(r: []Limb, a: []const Limb) Limb {
|
||||||
@setRuntimeSafety(false);
|
@setRuntimeSafety(debug_safety);
|
||||||
assert(r.len != 0 and a.len != 0);
|
assert(r.len != 0 and a.len != 0);
|
||||||
assert(r.len >= a.len);
|
assert(r.len >= a.len);
|
||||||
|
|
||||||
@ -1896,7 +1903,7 @@ fn llaccum(r: []Limb, a: []const Limb) Limb {
|
|||||||
|
|
||||||
/// Returns -1, 0, 1 if |a| < |b|, |a| == |b| or |a| > |b| respectively for limbs.
|
/// Returns -1, 0, 1 if |a| < |b|, |a| == |b| or |a| > |b| respectively for limbs.
|
||||||
pub fn llcmp(a: []const Limb, b: []const Limb) i8 {
|
pub fn llcmp(a: []const Limb, b: []const Limb) i8 {
|
||||||
@setRuntimeSafety(false);
|
@setRuntimeSafety(debug_safety);
|
||||||
const a_len = llnormalize(a);
|
const a_len = llnormalize(a);
|
||||||
const b_len = llnormalize(b);
|
const b_len = llnormalize(b);
|
||||||
if (a_len < b_len) {
|
if (a_len < b_len) {
|
||||||
@ -1923,12 +1930,12 @@ pub fn llcmp(a: []const Limb, b: []const Limb) i8 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn llmulDigit(acc: []Limb, y: []const Limb, xi: Limb) void {
|
fn llmulDigit(acc: []Limb, y: []const Limb, xi: Limb) void {
|
||||||
@setRuntimeSafety(false);
|
@setRuntimeSafety(debug_safety);
|
||||||
if (xi == 0) {
|
if (xi == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
var carry: usize = 0;
|
var carry: Limb = 0;
|
||||||
var a_lo = acc[0..y.len];
|
var a_lo = acc[0..y.len];
|
||||||
var a_hi = acc[y.len..];
|
var a_hi = acc[y.len..];
|
||||||
|
|
||||||
@ -1945,7 +1952,7 @@ fn llmulDigit(acc: []Limb, y: []const Limb, xi: Limb) void {
|
|||||||
|
|
||||||
/// returns the min length the limb could be.
|
/// returns the min length the limb could be.
|
||||||
fn llnormalize(a: []const Limb) usize {
|
fn llnormalize(a: []const Limb) usize {
|
||||||
@setRuntimeSafety(false);
|
@setRuntimeSafety(debug_safety);
|
||||||
var j = a.len;
|
var j = a.len;
|
||||||
while (j > 0) : (j -= 1) {
|
while (j > 0) : (j -= 1) {
|
||||||
if (a[j - 1] != 0) {
|
if (a[j - 1] != 0) {
|
||||||
@ -1959,7 +1966,7 @@ fn llnormalize(a: []const Limb) usize {
|
|||||||
|
|
||||||
/// Knuth 4.3.1, Algorithm S.
|
/// Knuth 4.3.1, Algorithm S.
|
||||||
fn llsub(r: []Limb, a: []const Limb, b: []const Limb) void {
|
fn llsub(r: []Limb, a: []const Limb, b: []const Limb) void {
|
||||||
@setRuntimeSafety(false);
|
@setRuntimeSafety(debug_safety);
|
||||||
assert(a.len != 0 and b.len != 0);
|
assert(a.len != 0 and b.len != 0);
|
||||||
assert(a.len > b.len or (a.len == b.len and a[a.len - 1] >= b[b.len - 1]));
|
assert(a.len > b.len or (a.len == b.len and a[a.len - 1] >= b[b.len - 1]));
|
||||||
assert(r.len >= a.len);
|
assert(r.len >= a.len);
|
||||||
@ -1983,7 +1990,7 @@ fn llsub(r: []Limb, a: []const Limb, b: []const Limb) void {
|
|||||||
|
|
||||||
/// Knuth 4.3.1, Algorithm A.
|
/// Knuth 4.3.1, Algorithm A.
|
||||||
fn lladd(r: []Limb, a: []const Limb, b: []const Limb) void {
|
fn lladd(r: []Limb, a: []const Limb, b: []const Limb) void {
|
||||||
@setRuntimeSafety(false);
|
@setRuntimeSafety(debug_safety);
|
||||||
assert(a.len != 0 and b.len != 0);
|
assert(a.len != 0 and b.len != 0);
|
||||||
assert(a.len >= b.len);
|
assert(a.len >= b.len);
|
||||||
assert(r.len >= a.len + 1);
|
assert(r.len >= a.len + 1);
|
||||||
@ -2007,7 +2014,7 @@ fn lladd(r: []Limb, a: []const Limb, b: []const Limb) void {
|
|||||||
|
|
||||||
/// Knuth 4.3.1, Exercise 16.
|
/// Knuth 4.3.1, Exercise 16.
|
||||||
fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
|
fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
|
||||||
@setRuntimeSafety(false);
|
@setRuntimeSafety(debug_safety);
|
||||||
assert(a.len > 1 or a[0] >= b);
|
assert(a.len > 1 or a[0] >= b);
|
||||||
assert(quo.len >= a.len);
|
assert(quo.len >= a.len);
|
||||||
|
|
||||||
@ -2033,7 +2040,7 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
|
fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
|
||||||
@setRuntimeSafety(false);
|
@setRuntimeSafety(debug_safety);
|
||||||
assert(a.len >= 1);
|
assert(a.len >= 1);
|
||||||
assert(r.len >= a.len + (shift / Limb.bit_count) + 1);
|
assert(r.len >= a.len + (shift / Limb.bit_count) + 1);
|
||||||
|
|
||||||
@ -2060,7 +2067,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
|
fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
|
||||||
@setRuntimeSafety(false);
|
@setRuntimeSafety(debug_safety);
|
||||||
assert(a.len >= 1);
|
assert(a.len >= 1);
|
||||||
assert(r.len >= a.len - (shift / Limb.bit_count));
|
assert(r.len >= a.len - (shift / Limb.bit_count));
|
||||||
|
|
||||||
@ -2084,7 +2091,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn llor(r: []Limb, a: []const Limb, b: []const Limb) void {
|
fn llor(r: []Limb, a: []const Limb, b: []const Limb) void {
|
||||||
@setRuntimeSafety(false);
|
@setRuntimeSafety(debug_safety);
|
||||||
assert(r.len >= a.len);
|
assert(r.len >= a.len);
|
||||||
assert(a.len >= b.len);
|
assert(a.len >= b.len);
|
||||||
|
|
||||||
@ -2098,7 +2105,7 @@ fn llor(r: []Limb, a: []const Limb, b: []const Limb) void {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn lland(r: []Limb, a: []const Limb, b: []const Limb) void {
|
fn lland(r: []Limb, a: []const Limb, b: []const Limb) void {
|
||||||
@setRuntimeSafety(false);
|
@setRuntimeSafety(debug_safety);
|
||||||
assert(r.len >= b.len);
|
assert(r.len >= b.len);
|
||||||
assert(a.len >= b.len);
|
assert(a.len >= b.len);
|
||||||
|
|
||||||
|
|||||||
@ -110,6 +110,7 @@ pub const Rational = struct {
|
|||||||
|
|
||||||
var j: usize = start;
|
var j: usize = start;
|
||||||
while (j < str.len - i - 1) : (j += 1) {
|
while (j < str.len - i - 1) : (j += 1) {
|
||||||
|
try self.p.ensureMulCapacity(self.p.toConst(), base);
|
||||||
try self.p.mul(self.p.toConst(), base);
|
try self.p.mul(self.p.toConst(), base);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -3,21 +3,22 @@
|
|||||||
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
|
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
|
||||||
// The MIT license requires this copyright notice to be included in all copies
|
// The MIT license requires this copyright notice to be included in all copies
|
||||||
// and substantial portions of the software.
|
// and substantial portions of the software.
|
||||||
// The engines provided here should be initialized from an external source. For now, randomBytes
|
|
||||||
// from the crypto package is the most suitable. Be sure to use a CSPRNG when required, otherwise using
|
//! The engines provided here should be initialized from an external source. For now, randomBytes
|
||||||
// a normal PRNG will be faster and use substantially less stack space.
|
//! from the crypto package is the most suitable. Be sure to use a CSPRNG when required, otherwise using
|
||||||
//
|
//! a normal PRNG will be faster and use substantially less stack space.
|
||||||
// ```
|
//!
|
||||||
// var buf: [8]u8 = undefined;
|
//! ```
|
||||||
// try std.crypto.randomBytes(buf[0..]);
|
//! var buf: [8]u8 = undefined;
|
||||||
// const seed = mem.readIntLittle(u64, buf[0..8]);
|
//! try std.crypto.randomBytes(buf[0..]);
|
||||||
//
|
//! const seed = mem.readIntLittle(u64, buf[0..8]);
|
||||||
// var r = DefaultPrng.init(seed);
|
//!
|
||||||
//
|
//! var r = DefaultPrng.init(seed);
|
||||||
// const s = r.random.int(u64);
|
//!
|
||||||
// ```
|
//! const s = r.random.int(u64);
|
||||||
//
|
//! ```
|
||||||
// TODO(tiehuis): Benchmark these against other reference implementations.
|
//!
|
||||||
|
//! TODO(tiehuis): Benchmark these against other reference implementations.
|
||||||
|
|
||||||
const std = @import("std.zig");
|
const std = @import("std.zig");
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
@ -29,10 +30,10 @@ const math = std.math;
|
|||||||
const ziggurat = @import("rand/ziggurat.zig");
|
const ziggurat = @import("rand/ziggurat.zig");
|
||||||
const maxInt = std.math.maxInt;
|
const maxInt = std.math.maxInt;
|
||||||
|
|
||||||
// When you need fast unbiased random numbers
|
/// Fast unbiased random numbers.
|
||||||
pub const DefaultPrng = Xoroshiro128;
|
pub const DefaultPrng = Xoroshiro128;
|
||||||
|
|
||||||
// When you need cryptographically secure random numbers
|
/// Cryptographically secure random numbers.
|
||||||
pub const DefaultCsprng = Isaac64;
|
pub const DefaultCsprng = Isaac64;
|
||||||
|
|
||||||
pub const Random = struct {
|
pub const Random = struct {
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user