From 973e6c978cedacc9aab86999ab67e5066fc1db1d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 25 Aug 2020 19:48:39 -0700 Subject: [PATCH 1/3] std: clean up GeneralPurposeAllocator memset code The freeSlot function was only called once so I inlined the logic and utilized some of the other locals that were in scope. --- lib/std/heap/general_purpose_allocator.zig | 59 +++++++++------------- 1 file changed, 23 insertions(+), 36 deletions(-) diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index ba710059aa..34bd50bc13 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -402,41 +402,6 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } } - fn freeSlot( - self: *Self, - bucket: *BucketHeader, - bucket_index: usize, - size_class: usize, - slot_index: SlotIndex, - used_byte: *u8, - used_bit_index: u3, - trace_addr: usize, - ) void { - // Capture stack trace to be the "first free", in case a double free happens. - bucket.captureStackTrace(trace_addr, size_class, slot_index, .free); - - used_byte.* &= ~(@as(u8, 1) << used_bit_index); - bucket.used_count -= 1; - if (bucket.used_count == 0) { - if (bucket.next == bucket) { - // it's the only bucket and therefore the current one - self.buckets[bucket_index] = null; - } else { - bucket.next.prev = bucket.prev; - bucket.prev.next = bucket.next; - self.buckets[bucket_index] = bucket.prev; - } - if (!config.never_unmap) { - self.backing_allocator.free(bucket.page[0..page_size]); - } - const bucket_size = bucketSize(size_class); - const bucket_slice = @ptrCast([*]align(@alignOf(BucketHeader)) u8, bucket)[0..bucket_size]; - self.backing_allocator.free(bucket_slice); - } else { - @memset(bucket.page + slot_index * size_class, undefined, size_class); - } - } - /// This function assumes the object is in the large object storage regardless /// of the parameters. fn resizeLarge( @@ -560,7 +525,29 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type { } } if (new_size == 0) { - self.freeSlot(bucket, bucket_index, size_class, slot_index, used_byte, used_bit_index, ret_addr); + // Capture stack trace to be the "first free", in case a double free happens. + bucket.captureStackTrace(ret_addr, size_class, slot_index, .free); + + used_byte.* &= ~(@as(u8, 1) << used_bit_index); + bucket.used_count -= 1; + if (bucket.used_count == 0) { + if (bucket.next == bucket) { + // it's the only bucket and therefore the current one + self.buckets[bucket_index] = null; + } else { + bucket.next.prev = bucket.prev; + bucket.prev.next = bucket.next; + self.buckets[bucket_index] = bucket.prev; + } + if (!config.never_unmap) { + self.backing_allocator.free(bucket.page[0..page_size]); + } + const bucket_size = bucketSize(size_class); + const bucket_slice = @ptrCast([*]align(@alignOf(BucketHeader)) u8, bucket)[0..bucket_size]; + self.backing_allocator.free(bucket_slice); + } else { + @memset(old_mem.ptr, undefined, old_mem.len); + } return @as(usize, 0); } const new_aligned_size = math.max(new_size, old_align); From b498eebfd450e7579dc8652d5827a23cc7070d9d Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 25 Aug 2020 19:49:40 -0700 Subject: [PATCH 2/3] std.math.big: fix use-after-free When there is parameter aliasing, the ensureCapacity calls can cause the Const parameters to become dangling pointers. See #6167 --- lib/std/math/big/int.zig | 43 ++++++++++++++++++++--------------- lib/std/math/big/rational.zig | 1 + 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/lib/std/math/big/int.zig b/lib/std/math/big/int.zig index adb5848931..28da1064c9 100644 --- a/lib/std/math/big/int.zig +++ b/lib/std/math/big/int.zig @@ -15,6 +15,8 @@ const maxInt = std.math.maxInt; const minInt = std.math.minInt; const assert = std.debug.assert; +const debug_safety = false; + /// Returns the number of limbs needed to store `scalar`, which must be a /// primitive integer value. pub fn calcLimbLen(scalar: anytype) usize { @@ -57,7 +59,7 @@ pub fn calcSetStringLimbCount(base: u8, string_len: usize) usize { /// a + b * c + *carry, sets carry to the overflow bits pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb { - @setRuntimeSafety(false); + @setRuntimeSafety(debug_safety); var r1: Limb = undefined; // r1 = a + *carry @@ -1529,8 +1531,7 @@ pub const Managed = struct { /// self's allocator is used for temporary storage to boost multiplication performance. pub fn setString(self: *Managed, base: u8, value: []const u8) !void { if (base < 2 or base > 16) return error.InvalidBase; - const den = (@sizeOf(Limb) * 8 / base); - try self.ensureCapacity((value.len + (den - 1)) / den); + try self.ensureCapacity(calcSetStringLimbCount(base, value.len)); const limbs_buffer = try self.allocator.alloc(Limb, calcSetStringLimbsBufferLen(base, value.len)); defer self.allocator.free(limbs_buffer); var m = self.toMutable(); @@ -1646,17 +1647,19 @@ pub const Managed = struct { /// rma = a * b /// /// rma, a and b may be aliases. However, it is more efficient if rma does not alias a or b. + /// If rma aliases a or b, then caller must call `rma.ensureMulCapacity` prior to calling `mul`. /// /// Returns an error if memory could not be allocated. /// /// rma's allocator is used for temporary storage to speed up the multiplication. pub fn mul(rma: *Managed, a: Const, b: Const) !void { - try rma.ensureCapacity(a.limbs.len + b.limbs.len + 1); var alias_count: usize = 0; if (rma.limbs.ptr == a.limbs.ptr) alias_count += 1; if (rma.limbs.ptr == b.limbs.ptr) alias_count += 1; + assert(alias_count == 0 or rma.limbs.len >= a.limbs.len + b.limbs.len + 1); + try rma.ensureMulCapacity(a, b); var m = rma.toMutable(); if (alias_count == 0) { m.mulNoAlias(a, b, rma.allocator); @@ -1669,6 +1672,10 @@ pub const Managed = struct { rma.setMetadata(m.positive, m.len); } + pub fn ensureMulCapacity(rma: *Managed, a: Const, b: Const) !void { + try rma.ensureCapacity(a.limbs.len + b.limbs.len + 1); + } + /// q = a / b (rem r) /// /// a / b are floored (rounded towards 0). @@ -1773,7 +1780,7 @@ pub const Managed = struct { /// /// r MUST NOT alias any of a or b. fn llmulacc(opt_allocator: ?*Allocator, r: []Limb, a: []const Limb, b: []const Limb) void { - @setRuntimeSafety(false); + @setRuntimeSafety(debug_safety); const a_norm = a[0..llnormalize(a)]; const b_norm = b[0..llnormalize(b)]; @@ -1806,7 +1813,7 @@ fn llmulacc(opt_allocator: ?*Allocator, r: []Limb, a: []const Limb, b: []const L /// /// r MUST NOT alias any of a or b. fn llmulacc_karatsuba(allocator: *Allocator, r: []Limb, x: []const Limb, y: []const Limb) error{OutOfMemory}!void { - @setRuntimeSafety(false); + @setRuntimeSafety(debug_safety); assert(r.len >= x.len + y.len + 1); @@ -1873,7 +1880,7 @@ fn llmulacc_karatsuba(allocator: *Allocator, r: []Limb, x: []const Limb, y: []co // r = r + a fn llaccum(r: []Limb, a: []const Limb) Limb { - @setRuntimeSafety(false); + @setRuntimeSafety(debug_safety); assert(r.len != 0 and a.len != 0); assert(r.len >= a.len); @@ -1896,7 +1903,7 @@ fn llaccum(r: []Limb, a: []const Limb) Limb { /// Returns -1, 0, 1 if |a| < |b|, |a| == |b| or |a| > |b| respectively for limbs. pub fn llcmp(a: []const Limb, b: []const Limb) i8 { - @setRuntimeSafety(false); + @setRuntimeSafety(debug_safety); const a_len = llnormalize(a); const b_len = llnormalize(b); if (a_len < b_len) { @@ -1923,12 +1930,12 @@ pub fn llcmp(a: []const Limb, b: []const Limb) i8 { } fn llmulDigit(acc: []Limb, y: []const Limb, xi: Limb) void { - @setRuntimeSafety(false); + @setRuntimeSafety(debug_safety); if (xi == 0) { return; } - var carry: usize = 0; + var carry: Limb = 0; var a_lo = acc[0..y.len]; var a_hi = acc[y.len..]; @@ -1945,7 +1952,7 @@ fn llmulDigit(acc: []Limb, y: []const Limb, xi: Limb) void { /// returns the min length the limb could be. fn llnormalize(a: []const Limb) usize { - @setRuntimeSafety(false); + @setRuntimeSafety(debug_safety); var j = a.len; while (j > 0) : (j -= 1) { if (a[j - 1] != 0) { @@ -1959,7 +1966,7 @@ fn llnormalize(a: []const Limb) usize { /// Knuth 4.3.1, Algorithm S. fn llsub(r: []Limb, a: []const Limb, b: []const Limb) void { - @setRuntimeSafety(false); + @setRuntimeSafety(debug_safety); assert(a.len != 0 and b.len != 0); assert(a.len > b.len or (a.len == b.len and a[a.len - 1] >= b[b.len - 1])); assert(r.len >= a.len); @@ -1983,7 +1990,7 @@ fn llsub(r: []Limb, a: []const Limb, b: []const Limb) void { /// Knuth 4.3.1, Algorithm A. fn lladd(r: []Limb, a: []const Limb, b: []const Limb) void { - @setRuntimeSafety(false); + @setRuntimeSafety(debug_safety); assert(a.len != 0 and b.len != 0); assert(a.len >= b.len); assert(r.len >= a.len + 1); @@ -2007,7 +2014,7 @@ fn lladd(r: []Limb, a: []const Limb, b: []const Limb) void { /// Knuth 4.3.1, Exercise 16. fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void { - @setRuntimeSafety(false); + @setRuntimeSafety(debug_safety); assert(a.len > 1 or a[0] >= b); assert(quo.len >= a.len); @@ -2033,7 +2040,7 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void { } fn llshl(r: []Limb, a: []const Limb, shift: usize) void { - @setRuntimeSafety(false); + @setRuntimeSafety(debug_safety); assert(a.len >= 1); assert(r.len >= a.len + (shift / Limb.bit_count) + 1); @@ -2060,7 +2067,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void { } fn llshr(r: []Limb, a: []const Limb, shift: usize) void { - @setRuntimeSafety(false); + @setRuntimeSafety(debug_safety); assert(a.len >= 1); assert(r.len >= a.len - (shift / Limb.bit_count)); @@ -2084,7 +2091,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void { } fn llor(r: []Limb, a: []const Limb, b: []const Limb) void { - @setRuntimeSafety(false); + @setRuntimeSafety(debug_safety); assert(r.len >= a.len); assert(a.len >= b.len); @@ -2098,7 +2105,7 @@ fn llor(r: []Limb, a: []const Limb, b: []const Limb) void { } fn lland(r: []Limb, a: []const Limb, b: []const Limb) void { - @setRuntimeSafety(false); + @setRuntimeSafety(debug_safety); assert(r.len >= b.len); assert(a.len >= b.len); diff --git a/lib/std/math/big/rational.zig b/lib/std/math/big/rational.zig index e83139cf37..5b3c105718 100644 --- a/lib/std/math/big/rational.zig +++ b/lib/std/math/big/rational.zig @@ -110,6 +110,7 @@ pub const Rational = struct { var j: usize = start; while (j < str.len - i - 1) : (j += 1) { + try self.p.ensureMulCapacity(self.p.toConst(), base); try self.p.mul(self.p.toConst(), base); } From 3e24e958922806379f9f609e36954a489eb20831 Mon Sep 17 00:00:00 2001 From: Andrew Kelley Date: Tue, 25 Aug 2020 19:51:40 -0700 Subject: [PATCH 3/3] std.rand: promote normal comments to doc comments --- lib/std/rand.zig | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/lib/std/rand.zig b/lib/std/rand.zig index 42726472bf..7988efffc9 100644 --- a/lib/std/rand.zig +++ b/lib/std/rand.zig @@ -3,21 +3,22 @@ // This file is part of [zig](https://ziglang.org/), which is MIT licensed. // The MIT license requires this copyright notice to be included in all copies // and substantial portions of the software. -// The engines provided here should be initialized from an external source. For now, randomBytes -// from the crypto package is the most suitable. Be sure to use a CSPRNG when required, otherwise using -// a normal PRNG will be faster and use substantially less stack space. -// -// ``` -// var buf: [8]u8 = undefined; -// try std.crypto.randomBytes(buf[0..]); -// const seed = mem.readIntLittle(u64, buf[0..8]); -// -// var r = DefaultPrng.init(seed); -// -// const s = r.random.int(u64); -// ``` -// -// TODO(tiehuis): Benchmark these against other reference implementations. + +//! The engines provided here should be initialized from an external source. For now, randomBytes +//! from the crypto package is the most suitable. Be sure to use a CSPRNG when required, otherwise using +//! a normal PRNG will be faster and use substantially less stack space. +//! +//! ``` +//! var buf: [8]u8 = undefined; +//! try std.crypto.randomBytes(buf[0..]); +//! const seed = mem.readIntLittle(u64, buf[0..8]); +//! +//! var r = DefaultPrng.init(seed); +//! +//! const s = r.random.int(u64); +//! ``` +//! +//! TODO(tiehuis): Benchmark these against other reference implementations. const std = @import("std.zig"); const builtin = @import("builtin"); @@ -29,10 +30,10 @@ const math = std.math; const ziggurat = @import("rand/ziggurat.zig"); const maxInt = std.math.maxInt; -// When you need fast unbiased random numbers +/// Fast unbiased random numbers. pub const DefaultPrng = Xoroshiro128; -// When you need cryptographically secure random numbers +/// Cryptographically secure random numbers. pub const DefaultCsprng = Isaac64; pub const Random = struct {