From 819e0e83d338a898f06d1e39c21da812ac58238b Mon Sep 17 00:00:00 2001 From: Ryan Liptak Date: Thu, 23 Jun 2022 17:01:56 -0700 Subject: [PATCH 1/5] Add stack trace capturing to FailingAllocator --- lib/std/testing/failing_allocator.zig | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig index 677ca6f51b..16f517732c 100644 --- a/lib/std/testing/failing_allocator.zig +++ b/lib/std/testing/failing_allocator.zig @@ -19,6 +19,8 @@ pub const FailingAllocator = struct { freed_bytes: usize, allocations: usize, deallocations: usize, + stack_addresses: [16]usize, + has_induced_failure: bool, /// `fail_index` is the number of successful allocations you can /// expect from this allocator. The next allocation will fail. @@ -37,6 +39,8 @@ pub const FailingAllocator = struct { .freed_bytes = 0, .allocations = 0, .deallocations = 0, + .stack_addresses = undefined, + .has_induced_failure = false, }; } @@ -52,6 +56,13 @@ pub const FailingAllocator = struct { return_address: usize, ) error{OutOfMemory}![]u8 { if (self.index == self.fail_index) { + mem.set(usize, &self.stack_addresses, 0); + var stack_trace = std.builtin.StackTrace{ + .instruction_addresses = &self.stack_addresses, + .index = 0, + }; + std.debug.captureStackTrace(return_address, &stack_trace); + self.has_induced_failure = true; return error.OutOfMemory; } const result = try self.internal_allocator.rawAlloc(len, ptr_align, len_align, return_address); @@ -88,4 +99,17 @@ pub const FailingAllocator = struct { self.deallocations += 1; self.freed_bytes += old_mem.len; } + + /// Only valid once `has_induced_failure == true` + pub fn getStackTrace(self: *FailingAllocator) std.builtin.StackTrace { + std.debug.assert(self.has_induced_failure); + var len: usize = 0; + while (len < self.stack_addresses.len and self.stack_addresses[len] != 0) { + len += 1; + } + return .{ + .instruction_addresses = &self.stack_addresses, + .index = len, + }; + } }; From def304a9a5b9d001e37c19509a9d2b2450088033 Mon Sep 17 00:00:00 2001 From: Ryan Liptak Date: Thu, 23 Jun 2022 17:02:29 -0700 Subject: [PATCH 2/5] Integrate FailingAllocator stack trace with testing.checkAllAllocationFailures --- lib/std/testing.zig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 4e43413d28..46d41affdf 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -650,7 +650,7 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime error.OutOfMemory => { if (failing_allocator_inst.allocated_bytes != failing_allocator_inst.freed_bytes) { print( - "\nfail_index: {d}/{d}\nallocated bytes: {d}\nfreed bytes: {d}\nallocations: {d}\ndeallocations: {d}\n", + "\nfail_index: {d}/{d}\nallocated bytes: {d}\nfreed bytes: {d}\nallocations: {d}\ndeallocations: {d}\nallocation that was made to fail: {s}", .{ fail_index, needed_alloc_count, @@ -658,6 +658,7 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime failing_allocator_inst.freed_bytes, failing_allocator_inst.allocations, failing_allocator_inst.deallocations, + failing_allocator_inst.getStackTrace(), }, ); return error.MemoryLeakDetected; From c321b2f2a0257fa45262f1ece0a263b22e8c70f3 Mon Sep 17 00:00:00 2001 From: Ryan Liptak Date: Thu, 23 Jun 2022 17:20:24 -0700 Subject: [PATCH 3/5] checkAllAllocationFailures: add possibility of SwallowedOutOfMemoryError (split from NondeterministicMemoryUsage) Inducing failure but not getting OutOfMemory back is not as much of a problem as never inducing failure when it was expected to be induced, so treating them differently and allowing them to be handled differently by the caller is useful. For example, the current implementation of `std.HashMapUnmanaged.getOrPutContextAdapted` always tries to grow and then recovers from OutOfMemory by attempting a lookup of an existing key. If this function is used (i.e. from `std.BufMap.putMove`) with `checkAllAllocationFailures`, then we'd have previously triggered `error.NondeterministicMemoryUsage`, but the real cause is that `OutOfMemory` is being recovered from and so the error is being swallowed. The new error allows us to both understand what's happening easier and to catch it and ignore it if we're okay with the code we're testing handling `error.OutOfMemory` without always bubbling it up. --- lib/std/testing.zig | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 46d41affdf..8fa5fb68f8 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -534,18 +534,27 @@ test { /// /// Any relevant state shared between runs of `test_fn` *must* be reset within `test_fn`. /// -/// Expects that the `test_fn` has a deterministic number of memory allocations -/// (an error will be returned if non-deterministic allocations are detected). -/// /// The strategy employed is to: /// - Run the test function once to get the total number of allocations. /// - Then, iterate and run the function X more times, incrementing /// the failing index each iteration (where X is the total number of /// allocations determined previously) /// +/// Expects that `test_fn` has a deterministic number of memory allocations: +/// - If an allocation was made to fail during a run of `test_fn`, but `test_fn` +/// didn't return `error.OutOfMemory`, then `error.SwallowedOutOfMemoryError` +/// is returned from `checkAllAllocationFailures`. You may want to ignore this +/// depending on whether or not the code you're testing includes some strategies +/// for recovering from `error.OutOfMemory`. +/// - If a run of `test_fn` with an expected allocation failure executes without +/// an allocation failure being induced, then `error.NondeterministicMemoryUsage` +/// is returned. This error means that there are allocation points that won't be +/// tested by the strategy this function employs (that is, there are sometimes more +/// points of allocation than the initial run of `test_fn` detects). +/// /// --- /// -/// Here's an example of using a simple test case that will cause a leak when the +/// Here's an example using a simple test case that will cause a leak when the /// allocation of `bar` fails (but will pass normally): /// /// ```zig @@ -645,7 +654,11 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime args.@"0" = failing_allocator_inst.allocator(); if (@call(.{}, test_fn, args)) |_| { - return error.NondeterministicMemoryUsage; + if (failing_allocator_inst.has_induced_failure) { + return error.SwallowedOutOfMemoryError; + } else { + return error.NondeterministicMemoryUsage; + } } else |err| switch (err) { error.OutOfMemory => { if (failing_allocator_inst.allocated_bytes != failing_allocator_inst.freed_bytes) { From 19d7f4dd8221fcd36b7fb71067bf676bb294ca1c Mon Sep 17 00:00:00 2001 From: Ryan Liptak Date: Thu, 23 Jun 2022 17:27:55 -0700 Subject: [PATCH 4/5] FailingAllocator: Only capture the stack trace of the first induced allocation failure This is a precaution to avoid confusing stack traces on the off chance that FailingAllocator continues to try to allocate after the first failure. --- lib/std/testing/failing_allocator.zig | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig index 16f517732c..c5d1d3df30 100644 --- a/lib/std/testing/failing_allocator.zig +++ b/lib/std/testing/failing_allocator.zig @@ -56,13 +56,15 @@ pub const FailingAllocator = struct { return_address: usize, ) error{OutOfMemory}![]u8 { if (self.index == self.fail_index) { - mem.set(usize, &self.stack_addresses, 0); - var stack_trace = std.builtin.StackTrace{ - .instruction_addresses = &self.stack_addresses, - .index = 0, - }; - std.debug.captureStackTrace(return_address, &stack_trace); - self.has_induced_failure = true; + if (!self.has_induced_failure) { + mem.set(usize, &self.stack_addresses, 0); + var stack_trace = std.builtin.StackTrace{ + .instruction_addresses = &self.stack_addresses, + .index = 0, + }; + std.debug.captureStackTrace(return_address, &stack_trace); + self.has_induced_failure = true; + } return error.OutOfMemory; } const result = try self.internal_allocator.rawAlloc(len, ptr_align, len_align, return_address); From 22720981ea50b1cee38d8309e0cd32df401b8156 Mon Sep 17 00:00:00 2001 From: Ryan Liptak Date: Sat, 25 Jun 2022 21:27:56 -0700 Subject: [PATCH 5/5] Move sys_can_stack_trace from GPA to std.debug so that it can be re-used as needed --- lib/std/debug.zig | 21 +++++++++++++++++++++ lib/std/heap/general_purpose_allocator.zig | 22 +--------------------- lib/std/testing/failing_allocator.zig | 4 +++- 3 files changed, 25 insertions(+), 22 deletions(-) diff --git a/lib/std/debug.zig b/lib/std/debug.zig index ba1f509e6c..9401bca257 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -26,6 +26,27 @@ pub const runtime_safety = switch (builtin.mode) { .ReleaseFast, .ReleaseSmall => false, }; +pub const sys_can_stack_trace = switch (builtin.cpu.arch) { + // Observed to go into an infinite loop. + // TODO: Make this work. + .mips, + .mipsel, + => false, + + // `@returnAddress()` in LLVM 10 gives + // "Non-Emscripten WebAssembly hasn't implemented __builtin_return_address". + .wasm32, + .wasm64, + => builtin.os.tag == .emscripten, + + // `@returnAddress()` is unsupported in LLVM 13. + .bpfel, + .bpfeb, + => false, + + else => true, +}; + pub const LineInfo = struct { line: u64, column: u64, diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index 11d897ac1b..abffad751c 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -105,28 +105,8 @@ const StackTrace = std.builtin.StackTrace; /// Integer type for pointing to slots in a small allocation const SlotIndex = std.meta.Int(.unsigned, math.log2(page_size) + 1); -const sys_can_stack_trace = switch (builtin.cpu.arch) { - // Observed to go into an infinite loop. - // TODO: Make this work. - .mips, - .mipsel, - => false, - - // `@returnAddress()` in LLVM 10 gives - // "Non-Emscripten WebAssembly hasn't implemented __builtin_return_address". - .wasm32, - .wasm64, - => builtin.os.tag == .emscripten, - - // `@returnAddress()` is unsupported in LLVM 13. - .bpfel, - .bpfeb, - => false, - - else => true, -}; const default_test_stack_trace_frames: usize = if (builtin.is_test) 8 else 4; -const default_sys_stack_trace_frames: usize = if (sys_can_stack_trace) default_test_stack_trace_frames else 0; +const default_sys_stack_trace_frames: usize = if (std.debug.sys_can_stack_trace) default_test_stack_trace_frames else 0; const default_stack_trace_frames: usize = switch (builtin.mode) { .Debug => default_sys_stack_trace_frames, else => 0, diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig index c5d1d3df30..03e345986b 100644 --- a/lib/std/testing/failing_allocator.zig +++ b/lib/std/testing/failing_allocator.zig @@ -19,9 +19,11 @@ pub const FailingAllocator = struct { freed_bytes: usize, allocations: usize, deallocations: usize, - stack_addresses: [16]usize, + stack_addresses: [num_stack_frames]usize, has_induced_failure: bool, + const num_stack_frames = if (std.debug.sys_can_stack_trace) 16 else 0; + /// `fail_index` is the number of successful allocations you can /// expect from this allocator. The next allocation will fail. /// For example, if this is called with `fail_index` equal to 2,