diff --git a/lib/std/debug.zig b/lib/std/debug.zig index 54c614f270..b406fed471 100644 --- a/lib/std/debug.zig +++ b/lib/std/debug.zig @@ -26,6 +26,27 @@ pub const runtime_safety = switch (builtin.mode) { .ReleaseFast, .ReleaseSmall => false, }; +pub const sys_can_stack_trace = switch (builtin.cpu.arch) { + // Observed to go into an infinite loop. + // TODO: Make this work. + .mips, + .mipsel, + => false, + + // `@returnAddress()` in LLVM 10 gives + // "Non-Emscripten WebAssembly hasn't implemented __builtin_return_address". + .wasm32, + .wasm64, + => builtin.os.tag == .emscripten, + + // `@returnAddress()` is unsupported in LLVM 13. + .bpfel, + .bpfeb, + => false, + + else => true, +}; + pub const LineInfo = struct { line: u64, column: u64, diff --git a/lib/std/heap/general_purpose_allocator.zig b/lib/std/heap/general_purpose_allocator.zig index 11d897ac1b..abffad751c 100644 --- a/lib/std/heap/general_purpose_allocator.zig +++ b/lib/std/heap/general_purpose_allocator.zig @@ -105,28 +105,8 @@ const StackTrace = std.builtin.StackTrace; /// Integer type for pointing to slots in a small allocation const SlotIndex = std.meta.Int(.unsigned, math.log2(page_size) + 1); -const sys_can_stack_trace = switch (builtin.cpu.arch) { - // Observed to go into an infinite loop. - // TODO: Make this work. - .mips, - .mipsel, - => false, - - // `@returnAddress()` in LLVM 10 gives - // "Non-Emscripten WebAssembly hasn't implemented __builtin_return_address". - .wasm32, - .wasm64, - => builtin.os.tag == .emscripten, - - // `@returnAddress()` is unsupported in LLVM 13. - .bpfel, - .bpfeb, - => false, - - else => true, -}; const default_test_stack_trace_frames: usize = if (builtin.is_test) 8 else 4; -const default_sys_stack_trace_frames: usize = if (sys_can_stack_trace) default_test_stack_trace_frames else 0; +const default_sys_stack_trace_frames: usize = if (std.debug.sys_can_stack_trace) default_test_stack_trace_frames else 0; const default_stack_trace_frames: usize = switch (builtin.mode) { .Debug => default_sys_stack_trace_frames, else => 0, diff --git a/lib/std/testing.zig b/lib/std/testing.zig index 4e43413d28..8fa5fb68f8 100644 --- a/lib/std/testing.zig +++ b/lib/std/testing.zig @@ -534,18 +534,27 @@ test { /// /// Any relevant state shared between runs of `test_fn` *must* be reset within `test_fn`. /// -/// Expects that the `test_fn` has a deterministic number of memory allocations -/// (an error will be returned if non-deterministic allocations are detected). -/// /// The strategy employed is to: /// - Run the test function once to get the total number of allocations. /// - Then, iterate and run the function X more times, incrementing /// the failing index each iteration (where X is the total number of /// allocations determined previously) /// +/// Expects that `test_fn` has a deterministic number of memory allocations: +/// - If an allocation was made to fail during a run of `test_fn`, but `test_fn` +/// didn't return `error.OutOfMemory`, then `error.SwallowedOutOfMemoryError` +/// is returned from `checkAllAllocationFailures`. You may want to ignore this +/// depending on whether or not the code you're testing includes some strategies +/// for recovering from `error.OutOfMemory`. +/// - If a run of `test_fn` with an expected allocation failure executes without +/// an allocation failure being induced, then `error.NondeterministicMemoryUsage` +/// is returned. This error means that there are allocation points that won't be +/// tested by the strategy this function employs (that is, there are sometimes more +/// points of allocation than the initial run of `test_fn` detects). +/// /// --- /// -/// Here's an example of using a simple test case that will cause a leak when the +/// Here's an example using a simple test case that will cause a leak when the /// allocation of `bar` fails (but will pass normally): /// /// ```zig @@ -645,12 +654,16 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime args.@"0" = failing_allocator_inst.allocator(); if (@call(.{}, test_fn, args)) |_| { - return error.NondeterministicMemoryUsage; + if (failing_allocator_inst.has_induced_failure) { + return error.SwallowedOutOfMemoryError; + } else { + return error.NondeterministicMemoryUsage; + } } else |err| switch (err) { error.OutOfMemory => { if (failing_allocator_inst.allocated_bytes != failing_allocator_inst.freed_bytes) { print( - "\nfail_index: {d}/{d}\nallocated bytes: {d}\nfreed bytes: {d}\nallocations: {d}\ndeallocations: {d}\n", + "\nfail_index: {d}/{d}\nallocated bytes: {d}\nfreed bytes: {d}\nallocations: {d}\ndeallocations: {d}\nallocation that was made to fail: {s}", .{ fail_index, needed_alloc_count, @@ -658,6 +671,7 @@ pub fn checkAllAllocationFailures(backing_allocator: std.mem.Allocator, comptime failing_allocator_inst.freed_bytes, failing_allocator_inst.allocations, failing_allocator_inst.deallocations, + failing_allocator_inst.getStackTrace(), }, ); return error.MemoryLeakDetected; diff --git a/lib/std/testing/failing_allocator.zig b/lib/std/testing/failing_allocator.zig index 677ca6f51b..03e345986b 100644 --- a/lib/std/testing/failing_allocator.zig +++ b/lib/std/testing/failing_allocator.zig @@ -19,6 +19,10 @@ pub const FailingAllocator = struct { freed_bytes: usize, allocations: usize, deallocations: usize, + stack_addresses: [num_stack_frames]usize, + has_induced_failure: bool, + + const num_stack_frames = if (std.debug.sys_can_stack_trace) 16 else 0; /// `fail_index` is the number of successful allocations you can /// expect from this allocator. The next allocation will fail. @@ -37,6 +41,8 @@ pub const FailingAllocator = struct { .freed_bytes = 0, .allocations = 0, .deallocations = 0, + .stack_addresses = undefined, + .has_induced_failure = false, }; } @@ -52,6 +58,15 @@ pub const FailingAllocator = struct { return_address: usize, ) error{OutOfMemory}![]u8 { if (self.index == self.fail_index) { + if (!self.has_induced_failure) { + mem.set(usize, &self.stack_addresses, 0); + var stack_trace = std.builtin.StackTrace{ + .instruction_addresses = &self.stack_addresses, + .index = 0, + }; + std.debug.captureStackTrace(return_address, &stack_trace); + self.has_induced_failure = true; + } return error.OutOfMemory; } const result = try self.internal_allocator.rawAlloc(len, ptr_align, len_align, return_address); @@ -88,4 +103,17 @@ pub const FailingAllocator = struct { self.deallocations += 1; self.freed_bytes += old_mem.len; } + + /// Only valid once `has_induced_failure == true` + pub fn getStackTrace(self: *FailingAllocator) std.builtin.StackTrace { + std.debug.assert(self.has_induced_failure); + var len: usize = 0; + while (len < self.stack_addresses.len and self.stack_addresses[len] != 0) { + len += 1; + } + return .{ + .instruction_addresses = &self.stack_addresses, + .index = len, + }; + } };