mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 14:23:09 +00:00
adjust runtime page size APIs
* fix merge conflicts * rename the declarations * reword documentation * extract FixedBufferAllocator to separate file * take advantage of locals * remove the assertion about max alignment in Allocator API, leaving it Allocator implementation defined * fix non-inline function call in start logic The GeneralPurposeAllocator implementation is totally broken because it uses global state but I didn't address that in this commit.
This commit is contained in:
parent
439667be04
commit
284de7d957
@ -480,7 +480,7 @@ pub const MemoryMappedList = struct {
|
|||||||
/// of this ArrayList in accordance with the respective documentation. In
|
/// of this ArrayList in accordance with the respective documentation. In
|
||||||
/// all cases, "invalidated" means that the memory has been passed to this
|
/// all cases, "invalidated" means that the memory has been passed to this
|
||||||
/// allocator's resize or free function.
|
/// allocator's resize or free function.
|
||||||
items: []align(std.heap.min_page_size) volatile u8,
|
items: []align(std.heap.page_size_min) volatile u8,
|
||||||
/// How many bytes this list can hold without allocating additional memory.
|
/// How many bytes this list can hold without allocating additional memory.
|
||||||
capacity: usize,
|
capacity: usize,
|
||||||
|
|
||||||
|
|||||||
@ -41,7 +41,7 @@ const fuzzer_arch_os_abi = "wasm32-freestanding";
|
|||||||
const fuzzer_cpu_features = "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext";
|
const fuzzer_cpu_features = "baseline+atomics+bulk_memory+multivalue+mutable_globals+nontrapping_fptoint+reference_types+sign_ext";
|
||||||
|
|
||||||
const CoverageMap = struct {
|
const CoverageMap = struct {
|
||||||
mapped_memory: []align(std.heap.min_page_size) const u8,
|
mapped_memory: []align(std.heap.page_size_min) const u8,
|
||||||
coverage: Coverage,
|
coverage: Coverage,
|
||||||
source_locations: []Coverage.SourceLocation,
|
source_locations: []Coverage.SourceLocation,
|
||||||
/// Elements are indexes into `source_locations` pointing to the unit tests that are being fuzz tested.
|
/// Elements are indexes into `source_locations` pointing to the unit tests that are being fuzz tested.
|
||||||
|
|||||||
@ -1155,7 +1155,7 @@ const LinuxThreadImpl = struct {
|
|||||||
completion: Completion = Completion.init(.running),
|
completion: Completion = Completion.init(.running),
|
||||||
child_tid: std.atomic.Value(i32) = std.atomic.Value(i32).init(1),
|
child_tid: std.atomic.Value(i32) = std.atomic.Value(i32).init(1),
|
||||||
parent_tid: i32 = undefined,
|
parent_tid: i32 = undefined,
|
||||||
mapped: []align(std.heap.min_page_size) u8,
|
mapped: []align(std.heap.page_size_min) u8,
|
||||||
|
|
||||||
/// Calls `munmap(mapped.ptr, mapped.len)` then `exit(1)` without touching the stack (which lives in `mapped.ptr`).
|
/// Calls `munmap(mapped.ptr, mapped.len)` then `exit(1)` without touching the stack (which lives in `mapped.ptr`).
|
||||||
/// Ported over from musl libc's pthread detached implementation:
|
/// Ported over from musl libc's pthread detached implementation:
|
||||||
|
|||||||
@ -3,7 +3,7 @@ const builtin = @import("builtin");
|
|||||||
const c = @This();
|
const c = @This();
|
||||||
const maxInt = std.math.maxInt;
|
const maxInt = std.math.maxInt;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const min_page_size = std.heap.min_page_size;
|
const page_size = std.heap.page_size_min;
|
||||||
const native_abi = builtin.abi;
|
const native_abi = builtin.abi;
|
||||||
const native_arch = builtin.cpu.arch;
|
const native_arch = builtin.cpu.arch;
|
||||||
const native_os = builtin.os.tag;
|
const native_os = builtin.os.tag;
|
||||||
@ -2229,7 +2229,7 @@ pub const SC = switch (native_os) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
pub const _SC = switch (native_os) {
|
pub const _SC = switch (native_os) {
|
||||||
.bridgeos, .driverkit, .ios, .macos, .tvos, .visionos, .watchos => enum(c_int) {
|
.driverkit, .ios, .macos, .tvos, .visionos, .watchos => enum(c_int) {
|
||||||
PAGESIZE = 29,
|
PAGESIZE = 29,
|
||||||
},
|
},
|
||||||
.dragonfly => enum(c_int) {
|
.dragonfly => enum(c_int) {
|
||||||
@ -9265,7 +9265,7 @@ pub extern "c" fn getpwnam(name: [*:0]const u8) ?*passwd;
|
|||||||
pub extern "c" fn getpwuid(uid: uid_t) ?*passwd;
|
pub extern "c" fn getpwuid(uid: uid_t) ?*passwd;
|
||||||
pub extern "c" fn getrlimit64(resource: rlimit_resource, rlim: *rlimit) c_int;
|
pub extern "c" fn getrlimit64(resource: rlimit_resource, rlim: *rlimit) c_int;
|
||||||
pub extern "c" fn lseek64(fd: fd_t, offset: i64, whence: c_int) i64;
|
pub extern "c" fn lseek64(fd: fd_t, offset: i64, whence: c_int) i64;
|
||||||
pub extern "c" fn mmap64(addr: ?*align(min_page_size) anyopaque, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: i64) *anyopaque;
|
pub extern "c" fn mmap64(addr: ?*align(page_size) anyopaque, len: usize, prot: c_uint, flags: c_uint, fd: fd_t, offset: i64) *anyopaque;
|
||||||
pub extern "c" fn open64(path: [*:0]const u8, oflag: O, ...) c_int;
|
pub extern "c" fn open64(path: [*:0]const u8, oflag: O, ...) c_int;
|
||||||
pub extern "c" fn openat64(fd: c_int, path: [*:0]const u8, oflag: O, ...) c_int;
|
pub extern "c" fn openat64(fd: c_int, path: [*:0]const u8, oflag: O, ...) c_int;
|
||||||
pub extern "c" fn pread64(fd: fd_t, buf: [*]u8, nbyte: usize, offset: i64) isize;
|
pub extern "c" fn pread64(fd: fd_t, buf: [*]u8, nbyte: usize, offset: i64) isize;
|
||||||
@ -9357,13 +9357,13 @@ pub extern "c" fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) c_int;
|
|||||||
|
|
||||||
pub extern "c" fn prlimit(pid: pid_t, resource: rlimit_resource, new_limit: *const rlimit, old_limit: *rlimit) c_int;
|
pub extern "c" fn prlimit(pid: pid_t, resource: rlimit_resource, new_limit: *const rlimit, old_limit: *rlimit) c_int;
|
||||||
pub extern "c" fn mincore(
|
pub extern "c" fn mincore(
|
||||||
addr: *align(min_page_size) anyopaque,
|
addr: *align(page_size) anyopaque,
|
||||||
length: usize,
|
length: usize,
|
||||||
vec: [*]u8,
|
vec: [*]u8,
|
||||||
) c_int;
|
) c_int;
|
||||||
|
|
||||||
pub extern "c" fn madvise(
|
pub extern "c" fn madvise(
|
||||||
addr: *align(min_page_size) anyopaque,
|
addr: *align(page_size) anyopaque,
|
||||||
length: usize,
|
length: usize,
|
||||||
advice: u32,
|
advice: u32,
|
||||||
) c_int;
|
) c_int;
|
||||||
@ -9506,9 +9506,9 @@ pub extern "c" fn writev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint) i
|
|||||||
pub extern "c" fn pwritev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint, offset: off_t) isize;
|
pub extern "c" fn pwritev(fd: c_int, iov: [*]const iovec_const, iovcnt: c_uint, offset: off_t) isize;
|
||||||
pub extern "c" fn write(fd: fd_t, buf: [*]const u8, nbyte: usize) isize;
|
pub extern "c" fn write(fd: fd_t, buf: [*]const u8, nbyte: usize) isize;
|
||||||
pub extern "c" fn pwrite(fd: fd_t, buf: [*]const u8, nbyte: usize, offset: off_t) isize;
|
pub extern "c" fn pwrite(fd: fd_t, buf: [*]const u8, nbyte: usize, offset: off_t) isize;
|
||||||
pub extern "c" fn mmap(addr: ?*align(min_page_size) anyopaque, len: usize, prot: c_uint, flags: MAP, fd: fd_t, offset: off_t) *anyopaque;
|
pub extern "c" fn mmap(addr: ?*align(page_size) anyopaque, len: usize, prot: c_uint, flags: MAP, fd: fd_t, offset: off_t) *anyopaque;
|
||||||
pub extern "c" fn munmap(addr: *align(min_page_size) const anyopaque, len: usize) c_int;
|
pub extern "c" fn munmap(addr: *align(page_size) const anyopaque, len: usize) c_int;
|
||||||
pub extern "c" fn mprotect(addr: *align(min_page_size) anyopaque, len: usize, prot: c_uint) c_int;
|
pub extern "c" fn mprotect(addr: *align(page_size) anyopaque, len: usize, prot: c_uint) c_int;
|
||||||
pub extern "c" fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8) c_int;
|
pub extern "c" fn link(oldpath: [*:0]const u8, newpath: [*:0]const u8) c_int;
|
||||||
pub extern "c" fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: c_int) c_int;
|
pub extern "c" fn linkat(oldfd: fd_t, oldpath: [*:0]const u8, newfd: fd_t, newpath: [*:0]const u8, flags: c_int) c_int;
|
||||||
pub extern "c" fn unlink(path: [*:0]const u8) c_int;
|
pub extern "c" fn unlink(path: [*:0]const u8) c_int;
|
||||||
@ -10191,7 +10191,7 @@ const private = struct {
|
|||||||
};
|
};
|
||||||
extern "c" fn getrusage(who: c_int, usage: *rusage) c_int;
|
extern "c" fn getrusage(who: c_int, usage: *rusage) c_int;
|
||||||
extern "c" fn gettimeofday(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int;
|
extern "c" fn gettimeofday(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int;
|
||||||
extern "c" fn msync(addr: *align(min_page_size) const anyopaque, len: usize, flags: c_int) c_int;
|
extern "c" fn msync(addr: *align(page_size) const anyopaque, len: usize, flags: c_int) c_int;
|
||||||
extern "c" fn nanosleep(rqtp: *const timespec, rmtp: ?*timespec) c_int;
|
extern "c" fn nanosleep(rqtp: *const timespec, rmtp: ?*timespec) c_int;
|
||||||
extern "c" fn pipe2(fds: *[2]fd_t, flags: O) c_int;
|
extern "c" fn pipe2(fds: *[2]fd_t, flags: O) c_int;
|
||||||
extern "c" fn readdir(dir: *DIR) ?*dirent;
|
extern "c" fn readdir(dir: *DIR) ?*dirent;
|
||||||
@ -10239,7 +10239,7 @@ const private = struct {
|
|||||||
extern "c" fn __getrusage50(who: c_int, usage: *rusage) c_int;
|
extern "c" fn __getrusage50(who: c_int, usage: *rusage) c_int;
|
||||||
extern "c" fn __gettimeofday50(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int;
|
extern "c" fn __gettimeofday50(noalias tv: ?*timeval, noalias tz: ?*timezone) c_int;
|
||||||
extern "c" fn __libc_thr_yield() c_int;
|
extern "c" fn __libc_thr_yield() c_int;
|
||||||
extern "c" fn __msync13(addr: *align(min_page_size) const anyopaque, len: usize, flags: c_int) c_int;
|
extern "c" fn __msync13(addr: *align(page_size) const anyopaque, len: usize, flags: c_int) c_int;
|
||||||
extern "c" fn __nanosleep50(rqtp: *const timespec, rmtp: ?*timespec) c_int;
|
extern "c" fn __nanosleep50(rqtp: *const timespec, rmtp: ?*timespec) c_int;
|
||||||
extern "c" fn __sigaction14(sig: c_int, noalias act: ?*const Sigaction, noalias oact: ?*Sigaction) c_int;
|
extern "c" fn __sigaction14(sig: c_int, noalias act: ?*const Sigaction, noalias oact: ?*Sigaction) c_int;
|
||||||
extern "c" fn __sigfillset14(set: ?*sigset_t) void;
|
extern "c" fn __sigfillset14(set: ?*sigset_t) void;
|
||||||
|
|||||||
@ -6,7 +6,6 @@
|
|||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const heap = std.heap;
|
|
||||||
const native_os = builtin.os.tag;
|
const native_os = builtin.os.tag;
|
||||||
const posix = std.posix;
|
const posix = std.posix;
|
||||||
|
|
||||||
@ -43,7 +42,7 @@ var install_atfork_handler = std.once(struct {
|
|||||||
}
|
}
|
||||||
}.do);
|
}.do);
|
||||||
|
|
||||||
threadlocal var wipe_mem: []align(heap.min_page_size) u8 = &[_]u8{};
|
threadlocal var wipe_mem: []align(std.heap.page_size_min) u8 = &[_]u8{};
|
||||||
|
|
||||||
fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void {
|
fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void {
|
||||||
if (os_has_arc4random) {
|
if (os_has_arc4random) {
|
||||||
@ -78,7 +77,7 @@ fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void {
|
|||||||
} else {
|
} else {
|
||||||
// Use a static thread-local buffer.
|
// Use a static thread-local buffer.
|
||||||
const S = struct {
|
const S = struct {
|
||||||
threadlocal var buf: Context align(heap.min_page_size) = .{
|
threadlocal var buf: Context align(std.heap.page_size_min) = .{
|
||||||
.init_state = .uninitialized,
|
.init_state = .uninitialized,
|
||||||
.rng = undefined,
|
.rng = undefined,
|
||||||
};
|
};
|
||||||
@ -86,7 +85,7 @@ fn tlsCsprngFill(_: *anyopaque, buffer: []u8) void {
|
|||||||
wipe_mem = mem.asBytes(&S.buf);
|
wipe_mem = mem.asBytes(&S.buf);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
const ctx = @as(*Context, @ptrCast(wipe_mem.ptr));
|
const ctx: *Context = @ptrCast(wipe_mem.ptr);
|
||||||
|
|
||||||
switch (ctx.init_state) {
|
switch (ctx.init_state) {
|
||||||
.uninitialized => {
|
.uninitialized => {
|
||||||
@ -142,7 +141,7 @@ fn childAtForkHandler() callconv(.c) void {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn fillWithCsprng(buffer: []u8) void {
|
fn fillWithCsprng(buffer: []u8) void {
|
||||||
const ctx = @as(*Context, @ptrCast(wipe_mem.ptr));
|
const ctx: *Context = @ptrCast(wipe_mem.ptr);
|
||||||
return ctx.rng.fill(buffer);
|
return ctx.rng.fill(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -158,7 +157,7 @@ fn initAndFill(buffer: []u8) void {
|
|||||||
// the `std.options.cryptoRandomSeed` function is provided.
|
// the `std.options.cryptoRandomSeed` function is provided.
|
||||||
std.options.cryptoRandomSeed(&seed);
|
std.options.cryptoRandomSeed(&seed);
|
||||||
|
|
||||||
const ctx = @as(*Context, @ptrCast(wipe_mem.ptr));
|
const ctx: *Context = @ptrCast(wipe_mem.ptr);
|
||||||
ctx.rng = Rng.init(seed);
|
ctx.rng = Rng.init(seed);
|
||||||
std.crypto.secureZero(u8, &seed);
|
std.crypto.secureZero(u8, &seed);
|
||||||
|
|
||||||
|
|||||||
@ -2,7 +2,6 @@ const builtin = @import("builtin");
|
|||||||
const std = @import("std.zig");
|
const std = @import("std.zig");
|
||||||
const math = std.math;
|
const math = std.math;
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const heap = std.heap;
|
|
||||||
const io = std.io;
|
const io = std.io;
|
||||||
const posix = std.posix;
|
const posix = std.posix;
|
||||||
const fs = std.fs;
|
const fs = std.fs;
|
||||||
@ -1238,7 +1237,7 @@ test printLineFromFileAnyOs {
|
|||||||
|
|
||||||
const overlap = 10;
|
const overlap = 10;
|
||||||
var writer = file.writer();
|
var writer = file.writer();
|
||||||
try writer.writeByteNTimes('a', heap.min_page_size - overlap);
|
try writer.writeByteNTimes('a', std.heap.page_size_min - overlap);
|
||||||
try writer.writeByte('\n');
|
try writer.writeByte('\n');
|
||||||
try writer.writeByteNTimes('a', overlap);
|
try writer.writeByteNTimes('a', overlap);
|
||||||
|
|
||||||
@ -1253,10 +1252,10 @@ test printLineFromFileAnyOs {
|
|||||||
defer allocator.free(path);
|
defer allocator.free(path);
|
||||||
|
|
||||||
var writer = file.writer();
|
var writer = file.writer();
|
||||||
try writer.writeByteNTimes('a', heap.max_page_size);
|
try writer.writeByteNTimes('a', std.heap.page_size_max);
|
||||||
|
|
||||||
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
|
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
|
||||||
try expectEqualStrings(("a" ** heap.max_page_size) ++ "\n", output.items);
|
try expectEqualStrings(("a" ** std.heap.page_size_max) ++ "\n", output.items);
|
||||||
output.clearRetainingCapacity();
|
output.clearRetainingCapacity();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
@ -1266,18 +1265,18 @@ test printLineFromFileAnyOs {
|
|||||||
defer allocator.free(path);
|
defer allocator.free(path);
|
||||||
|
|
||||||
var writer = file.writer();
|
var writer = file.writer();
|
||||||
try writer.writeByteNTimes('a', 3 * heap.max_page_size);
|
try writer.writeByteNTimes('a', 3 * std.heap.page_size_max);
|
||||||
|
|
||||||
try expectError(error.EndOfFile, printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 }));
|
try expectError(error.EndOfFile, printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 }));
|
||||||
|
|
||||||
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
|
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
|
||||||
try expectEqualStrings(("a" ** (3 * heap.max_page_size)) ++ "\n", output.items);
|
try expectEqualStrings(("a" ** (3 * std.heap.page_size_max)) ++ "\n", output.items);
|
||||||
output.clearRetainingCapacity();
|
output.clearRetainingCapacity();
|
||||||
|
|
||||||
try writer.writeAll("a\na");
|
try writer.writeAll("a\na");
|
||||||
|
|
||||||
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
|
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 1, .column = 0 });
|
||||||
try expectEqualStrings(("a" ** (3 * heap.max_page_size)) ++ "a\n", output.items);
|
try expectEqualStrings(("a" ** (3 * std.heap.page_size_max)) ++ "a\n", output.items);
|
||||||
output.clearRetainingCapacity();
|
output.clearRetainingCapacity();
|
||||||
|
|
||||||
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 });
|
try printLineFromFileAnyOs(output_stream, .{ .file_name = path, .line = 2, .column = 0 });
|
||||||
@ -1291,7 +1290,7 @@ test printLineFromFileAnyOs {
|
|||||||
defer allocator.free(path);
|
defer allocator.free(path);
|
||||||
|
|
||||||
var writer = file.writer();
|
var writer = file.writer();
|
||||||
const real_file_start = 3 * heap.min_page_size;
|
const real_file_start = 3 * std.heap.page_size_min;
|
||||||
try writer.writeByteNTimes('\n', real_file_start);
|
try writer.writeByteNTimes('\n', real_file_start);
|
||||||
try writer.writeAll("abc\ndef");
|
try writer.writeAll("abc\ndef");
|
||||||
|
|
||||||
|
|||||||
@ -2120,8 +2120,8 @@ fn pcRelBase(field_ptr: usize, pc_rel_offset: i64) !usize {
|
|||||||
pub const ElfModule = struct {
|
pub const ElfModule = struct {
|
||||||
base_address: usize,
|
base_address: usize,
|
||||||
dwarf: Dwarf,
|
dwarf: Dwarf,
|
||||||
mapped_memory: []align(std.heap.min_page_size) const u8,
|
mapped_memory: []align(std.heap.page_size_min) const u8,
|
||||||
external_mapped_memory: ?[]align(std.heap.min_page_size) const u8,
|
external_mapped_memory: ?[]align(std.heap.page_size_min) const u8,
|
||||||
|
|
||||||
pub fn deinit(self: *@This(), allocator: Allocator) void {
|
pub fn deinit(self: *@This(), allocator: Allocator) void {
|
||||||
self.dwarf.deinit(allocator);
|
self.dwarf.deinit(allocator);
|
||||||
@ -2167,11 +2167,11 @@ pub const ElfModule = struct {
|
|||||||
/// sections from an external file.
|
/// sections from an external file.
|
||||||
pub fn load(
|
pub fn load(
|
||||||
gpa: Allocator,
|
gpa: Allocator,
|
||||||
mapped_mem: []align(std.heap.min_page_size) const u8,
|
mapped_mem: []align(std.heap.page_size_min) const u8,
|
||||||
build_id: ?[]const u8,
|
build_id: ?[]const u8,
|
||||||
expected_crc: ?u32,
|
expected_crc: ?u32,
|
||||||
parent_sections: *Dwarf.SectionArray,
|
parent_sections: *Dwarf.SectionArray,
|
||||||
parent_mapped_mem: ?[]align(std.heap.min_page_size) const u8,
|
parent_mapped_mem: ?[]align(std.heap.page_size_min) const u8,
|
||||||
elf_filename: ?[]const u8,
|
elf_filename: ?[]const u8,
|
||||||
) LoadError!Dwarf.ElfModule {
|
) LoadError!Dwarf.ElfModule {
|
||||||
if (expected_crc) |crc| if (crc != std.hash.crc.Crc32.hash(mapped_mem)) return error.InvalidDebugInfo;
|
if (expected_crc) |crc| if (crc != std.hash.crc.Crc32.hash(mapped_mem)) return error.InvalidDebugInfo;
|
||||||
@ -2423,7 +2423,7 @@ pub const ElfModule = struct {
|
|||||||
build_id: ?[]const u8,
|
build_id: ?[]const u8,
|
||||||
expected_crc: ?u32,
|
expected_crc: ?u32,
|
||||||
parent_sections: *Dwarf.SectionArray,
|
parent_sections: *Dwarf.SectionArray,
|
||||||
parent_mapped_mem: ?[]align(std.heap.min_page_size) const u8,
|
parent_mapped_mem: ?[]align(std.heap.page_size_min) const u8,
|
||||||
) LoadError!Dwarf.ElfModule {
|
) LoadError!Dwarf.ElfModule {
|
||||||
const elf_file = elf_file_path.root_dir.handle.openFile(elf_file_path.sub_path, .{}) catch |err| switch (err) {
|
const elf_file = elf_file_path.root_dir.handle.openFile(elf_file_path.sub_path, .{}) catch |err| switch (err) {
|
||||||
error.FileNotFound => return missing(),
|
error.FileNotFound => return missing(),
|
||||||
|
|||||||
@ -7,7 +7,7 @@ const native_os = builtin.os.tag;
|
|||||||
const std = @import("../std.zig");
|
const std = @import("../std.zig");
|
||||||
const posix = std.posix;
|
const posix = std.posix;
|
||||||
const File = std.fs.File;
|
const File = std.fs.File;
|
||||||
const min_page_size = std.heap.min_page_size;
|
const page_size_min = std.heap.page_size_min;
|
||||||
|
|
||||||
const MemoryAccessor = @This();
|
const MemoryAccessor = @This();
|
||||||
|
|
||||||
@ -96,7 +96,7 @@ pub fn isValidMemory(address: usize) bool {
|
|||||||
const page_size = std.heap.pageSize();
|
const page_size = std.heap.pageSize();
|
||||||
const aligned_address = address & ~(page_size - 1);
|
const aligned_address = address & ~(page_size - 1);
|
||||||
if (aligned_address == 0) return false;
|
if (aligned_address == 0) return false;
|
||||||
const aligned_memory = @as([*]align(min_page_size) u8, @ptrFromInt(aligned_address))[0..page_size];
|
const aligned_memory = @as([*]align(page_size_min) u8, @ptrFromInt(aligned_address))[0..page_size];
|
||||||
|
|
||||||
if (native_os == .windows) {
|
if (native_os == .windows) {
|
||||||
const windows = std.os.windows;
|
const windows = std.os.windows;
|
||||||
|
|||||||
@ -504,7 +504,7 @@ pub const Module = switch (native_os) {
|
|||||||
.macos, .ios, .watchos, .tvos, .visionos => struct {
|
.macos, .ios, .watchos, .tvos, .visionos => struct {
|
||||||
base_address: usize,
|
base_address: usize,
|
||||||
vmaddr_slide: usize,
|
vmaddr_slide: usize,
|
||||||
mapped_memory: []align(std.heap.min_page_size) const u8,
|
mapped_memory: []align(std.heap.page_size_min) const u8,
|
||||||
symbols: []const MachoSymbol,
|
symbols: []const MachoSymbol,
|
||||||
strings: [:0]const u8,
|
strings: [:0]const u8,
|
||||||
ofiles: OFileTable,
|
ofiles: OFileTable,
|
||||||
@ -1046,7 +1046,7 @@ pub fn readElfDebugInfo(
|
|||||||
build_id: ?[]const u8,
|
build_id: ?[]const u8,
|
||||||
expected_crc: ?u32,
|
expected_crc: ?u32,
|
||||||
parent_sections: *Dwarf.SectionArray,
|
parent_sections: *Dwarf.SectionArray,
|
||||||
parent_mapped_mem: ?[]align(std.heap.min_page_size) const u8,
|
parent_mapped_mem: ?[]align(std.heap.page_size_min) const u8,
|
||||||
) !Dwarf.ElfModule {
|
) !Dwarf.ElfModule {
|
||||||
nosuspend {
|
nosuspend {
|
||||||
const elf_file = (if (elf_filename) |filename| blk: {
|
const elf_file = (if (elf_filename) |filename| blk: {
|
||||||
@ -1088,7 +1088,7 @@ const MachoSymbol = struct {
|
|||||||
|
|
||||||
/// Takes ownership of file, even on error.
|
/// Takes ownership of file, even on error.
|
||||||
/// TODO it's weird to take ownership even on error, rework this code.
|
/// TODO it's weird to take ownership even on error, rework this code.
|
||||||
fn mapWholeFile(file: File) ![]align(std.heap.min_page_size) const u8 {
|
fn mapWholeFile(file: File) ![]align(std.heap.page_size_min) const u8 {
|
||||||
nosuspend {
|
nosuspend {
|
||||||
defer file.close();
|
defer file.close();
|
||||||
|
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
const std = @import("std.zig");
|
const std = @import("std.zig");
|
||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const heap = std.heap;
|
|
||||||
const testing = std.testing;
|
const testing = std.testing;
|
||||||
const elf = std.elf;
|
const elf = std.elf;
|
||||||
const windows = std.os.windows;
|
const windows = std.os.windows;
|
||||||
@ -144,7 +143,7 @@ pub const ElfDynLib = struct {
|
|||||||
hashtab: [*]posix.Elf_Symndx,
|
hashtab: [*]posix.Elf_Symndx,
|
||||||
versym: ?[*]elf.Versym,
|
versym: ?[*]elf.Versym,
|
||||||
verdef: ?*elf.Verdef,
|
verdef: ?*elf.Verdef,
|
||||||
memory: []align(heap.min_page_size) u8,
|
memory: []align(std.heap.page_size_min) u8,
|
||||||
|
|
||||||
pub const Error = ElfDynLibError;
|
pub const Error = ElfDynLibError;
|
||||||
|
|
||||||
@ -220,11 +219,13 @@ pub const ElfDynLib = struct {
|
|||||||
const stat = try file.stat();
|
const stat = try file.stat();
|
||||||
const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig;
|
const size = std.math.cast(usize, stat.size) orelse return error.FileTooBig;
|
||||||
|
|
||||||
|
const page_size = std.heap.pageSize();
|
||||||
|
|
||||||
// This one is to read the ELF info. We do more mmapping later
|
// This one is to read the ELF info. We do more mmapping later
|
||||||
// corresponding to the actual LOAD sections.
|
// corresponding to the actual LOAD sections.
|
||||||
const file_bytes = try posix.mmap(
|
const file_bytes = try posix.mmap(
|
||||||
null,
|
null,
|
||||||
mem.alignForward(usize, size, heap.pageSize()),
|
mem.alignForward(usize, size, page_size),
|
||||||
posix.PROT.READ,
|
posix.PROT.READ,
|
||||||
.{ .TYPE = .PRIVATE },
|
.{ .TYPE = .PRIVATE },
|
||||||
fd,
|
fd,
|
||||||
@ -285,10 +286,10 @@ pub const ElfDynLib = struct {
|
|||||||
elf.PT_LOAD => {
|
elf.PT_LOAD => {
|
||||||
// The VirtAddr may not be page-aligned; in such case there will be
|
// The VirtAddr may not be page-aligned; in such case there will be
|
||||||
// extra nonsense mapped before/after the VirtAddr,MemSiz
|
// extra nonsense mapped before/after the VirtAddr,MemSiz
|
||||||
const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, heap.pageSize()) - 1);
|
const aligned_addr = (base + ph.p_vaddr) & ~(@as(usize, page_size) - 1);
|
||||||
const extra_bytes = (base + ph.p_vaddr) - aligned_addr;
|
const extra_bytes = (base + ph.p_vaddr) - aligned_addr;
|
||||||
const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, heap.pageSize());
|
const extended_memsz = mem.alignForward(usize, ph.p_memsz + extra_bytes, page_size);
|
||||||
const ptr = @as([*]align(heap.min_page_size) u8, @ptrFromInt(aligned_addr));
|
const ptr = @as([*]align(std.heap.page_size_min) u8, @ptrFromInt(aligned_addr));
|
||||||
const prot = elfToMmapProt(ph.p_flags);
|
const prot = elfToMmapProt(ph.p_flags);
|
||||||
if ((ph.p_flags & elf.PF_W) == 0) {
|
if ((ph.p_flags & elf.PF_W) == 0) {
|
||||||
// If it does not need write access, it can be mapped from the fd.
|
// If it does not need write access, it can be mapped from the fd.
|
||||||
|
|||||||
993
lib/std/heap.zig
993
lib/std/heap.zig
File diff suppressed because it is too large
Load Diff
218
lib/std/heap/FixedBufferAllocator.zig
Normal file
218
lib/std/heap/FixedBufferAllocator.zig
Normal file
@ -0,0 +1,218 @@
|
|||||||
|
const std = @import("../std.zig");
|
||||||
|
const Allocator = std.mem.Allocator;
|
||||||
|
const assert = std.debug.assert;
|
||||||
|
const mem = std.mem;
|
||||||
|
|
||||||
|
const FixedBufferAllocator = @This();
|
||||||
|
|
||||||
|
end_index: usize,
|
||||||
|
buffer: []u8,
|
||||||
|
|
||||||
|
pub fn init(buffer: []u8) FixedBufferAllocator {
|
||||||
|
return FixedBufferAllocator{
|
||||||
|
.buffer = buffer,
|
||||||
|
.end_index = 0,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Using this at the same time as the interface returned by `threadSafeAllocator` is not thread safe.
|
||||||
|
pub fn allocator(self: *FixedBufferAllocator) Allocator {
|
||||||
|
return .{
|
||||||
|
.ptr = self,
|
||||||
|
.vtable = &.{
|
||||||
|
.alloc = alloc,
|
||||||
|
.resize = resize,
|
||||||
|
.free = free,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Provides a lock free thread safe `Allocator` interface to the underlying `FixedBufferAllocator`
|
||||||
|
///
|
||||||
|
/// Using this at the same time as the interface returned by `allocator` is not thread safe.
|
||||||
|
pub fn threadSafeAllocator(self: *FixedBufferAllocator) Allocator {
|
||||||
|
return .{
|
||||||
|
.ptr = self,
|
||||||
|
.vtable = &.{
|
||||||
|
.alloc = threadSafeAlloc,
|
||||||
|
.resize = Allocator.noResize,
|
||||||
|
.free = Allocator.noFree,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ownsPtr(self: *FixedBufferAllocator, ptr: [*]u8) bool {
|
||||||
|
return sliceContainsPtr(self.buffer, ptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn ownsSlice(self: *FixedBufferAllocator, slice: []u8) bool {
|
||||||
|
return sliceContainsSlice(self.buffer, slice);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This has false negatives when the last allocation had an
|
||||||
|
/// adjusted_index. In such case we won't be able to determine what the
|
||||||
|
/// last allocation was because the alignForward operation done in alloc is
|
||||||
|
/// not reversible.
|
||||||
|
pub fn isLastAllocation(self: *FixedBufferAllocator, buf: []u8) bool {
|
||||||
|
return buf.ptr + buf.len == self.buffer.ptr + self.end_index;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn alloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
|
||||||
|
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
|
||||||
|
_ = ra;
|
||||||
|
const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
|
||||||
|
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + self.end_index, ptr_align) orelse return null;
|
||||||
|
const adjusted_index = self.end_index + adjust_off;
|
||||||
|
const new_end_index = adjusted_index + n;
|
||||||
|
if (new_end_index > self.buffer.len) return null;
|
||||||
|
self.end_index = new_end_index;
|
||||||
|
return self.buffer.ptr + adjusted_index;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn resize(
|
||||||
|
ctx: *anyopaque,
|
||||||
|
buf: []u8,
|
||||||
|
log2_buf_align: u8,
|
||||||
|
new_size: usize,
|
||||||
|
return_address: usize,
|
||||||
|
) bool {
|
||||||
|
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
|
||||||
|
_ = log2_buf_align;
|
||||||
|
_ = return_address;
|
||||||
|
assert(@inComptime() or self.ownsSlice(buf));
|
||||||
|
|
||||||
|
if (!self.isLastAllocation(buf)) {
|
||||||
|
if (new_size > buf.len) return false;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (new_size <= buf.len) {
|
||||||
|
const sub = buf.len - new_size;
|
||||||
|
self.end_index -= sub;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
const add = new_size - buf.len;
|
||||||
|
if (add + self.end_index > self.buffer.len) return false;
|
||||||
|
|
||||||
|
self.end_index += add;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn free(
|
||||||
|
ctx: *anyopaque,
|
||||||
|
buf: []u8,
|
||||||
|
log2_buf_align: u8,
|
||||||
|
return_address: usize,
|
||||||
|
) void {
|
||||||
|
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
|
||||||
|
_ = log2_buf_align;
|
||||||
|
_ = return_address;
|
||||||
|
assert(@inComptime() or self.ownsSlice(buf));
|
||||||
|
|
||||||
|
if (self.isLastAllocation(buf)) {
|
||||||
|
self.end_index -= buf.len;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn threadSafeAlloc(ctx: *anyopaque, n: usize, log2_ptr_align: u8, ra: usize) ?[*]u8 {
|
||||||
|
const self: *FixedBufferAllocator = @ptrCast(@alignCast(ctx));
|
||||||
|
_ = ra;
|
||||||
|
const ptr_align = @as(usize, 1) << @as(Allocator.Log2Align, @intCast(log2_ptr_align));
|
||||||
|
var end_index = @atomicLoad(usize, &self.end_index, .seq_cst);
|
||||||
|
while (true) {
|
||||||
|
const adjust_off = mem.alignPointerOffset(self.buffer.ptr + end_index, ptr_align) orelse return null;
|
||||||
|
const adjusted_index = end_index + adjust_off;
|
||||||
|
const new_end_index = adjusted_index + n;
|
||||||
|
if (new_end_index > self.buffer.len) return null;
|
||||||
|
end_index = @cmpxchgWeak(usize, &self.end_index, end_index, new_end_index, .seq_cst, .seq_cst) orelse
|
||||||
|
return self.buffer[adjusted_index..new_end_index].ptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn reset(self: *FixedBufferAllocator) void {
|
||||||
|
self.end_index = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sliceContainsPtr(container: []u8, ptr: [*]u8) bool {
|
||||||
|
return @intFromPtr(ptr) >= @intFromPtr(container.ptr) and
|
||||||
|
@intFromPtr(ptr) < (@intFromPtr(container.ptr) + container.len);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sliceContainsSlice(container: []u8, slice: []u8) bool {
|
||||||
|
return @intFromPtr(slice.ptr) >= @intFromPtr(container.ptr) and
|
||||||
|
(@intFromPtr(slice.ptr) + slice.len) <= (@intFromPtr(container.ptr) + container.len);
|
||||||
|
}
|
||||||
|
|
||||||
|
var test_fixed_buffer_allocator_memory: [800000 * @sizeOf(u64)]u8 = undefined;
|
||||||
|
|
||||||
|
test FixedBufferAllocator {
|
||||||
|
var fixed_buffer_allocator = mem.validationWrap(FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]));
|
||||||
|
const a = fixed_buffer_allocator.allocator();
|
||||||
|
|
||||||
|
try std.heap.testAllocator(a);
|
||||||
|
try std.heap.testAllocatorAligned(a);
|
||||||
|
try std.heap.testAllocatorLargeAlignment(a);
|
||||||
|
try std.heap.testAllocatorAlignedShrink(a);
|
||||||
|
}
|
||||||
|
|
||||||
|
test reset {
|
||||||
|
var buf: [8]u8 align(@alignOf(u64)) = undefined;
|
||||||
|
var fba = FixedBufferAllocator.init(buf[0..]);
|
||||||
|
const a = fba.allocator();
|
||||||
|
|
||||||
|
const X = 0xeeeeeeeeeeeeeeee;
|
||||||
|
const Y = 0xffffffffffffffff;
|
||||||
|
|
||||||
|
const x = try a.create(u64);
|
||||||
|
x.* = X;
|
||||||
|
try std.testing.expectError(error.OutOfMemory, a.create(u64));
|
||||||
|
|
||||||
|
fba.reset();
|
||||||
|
const y = try a.create(u64);
|
||||||
|
y.* = Y;
|
||||||
|
|
||||||
|
// we expect Y to have overwritten X.
|
||||||
|
try std.testing.expect(x.* == y.*);
|
||||||
|
try std.testing.expect(y.* == Y);
|
||||||
|
}
|
||||||
|
|
||||||
|
test "reuse memory on realloc" {
|
||||||
|
var small_fixed_buffer: [10]u8 = undefined;
|
||||||
|
// check if we re-use the memory
|
||||||
|
{
|
||||||
|
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
|
||||||
|
const a = fixed_buffer_allocator.allocator();
|
||||||
|
|
||||||
|
const slice0 = try a.alloc(u8, 5);
|
||||||
|
try std.testing.expect(slice0.len == 5);
|
||||||
|
const slice1 = try a.realloc(slice0, 10);
|
||||||
|
try std.testing.expect(slice1.ptr == slice0.ptr);
|
||||||
|
try std.testing.expect(slice1.len == 10);
|
||||||
|
try std.testing.expectError(error.OutOfMemory, a.realloc(slice1, 11));
|
||||||
|
}
|
||||||
|
// check that we don't re-use the memory if it's not the most recent block
|
||||||
|
{
|
||||||
|
var fixed_buffer_allocator = FixedBufferAllocator.init(small_fixed_buffer[0..]);
|
||||||
|
const a = fixed_buffer_allocator.allocator();
|
||||||
|
|
||||||
|
var slice0 = try a.alloc(u8, 2);
|
||||||
|
slice0[0] = 1;
|
||||||
|
slice0[1] = 2;
|
||||||
|
const slice1 = try a.alloc(u8, 2);
|
||||||
|
const slice2 = try a.realloc(slice0, 4);
|
||||||
|
try std.testing.expect(slice0.ptr != slice2.ptr);
|
||||||
|
try std.testing.expect(slice1.ptr != slice2.ptr);
|
||||||
|
try std.testing.expect(slice2[0] == 1);
|
||||||
|
try std.testing.expect(slice2[1] == 2);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
test "thread safe version" {
|
||||||
|
var fixed_buffer_allocator = FixedBufferAllocator.init(test_fixed_buffer_allocator_memory[0..]);
|
||||||
|
|
||||||
|
try std.heap.testAllocator(fixed_buffer_allocator.threadSafeAllocator());
|
||||||
|
try std.heap.testAllocatorAligned(fixed_buffer_allocator.threadSafeAllocator());
|
||||||
|
try std.heap.testAllocatorLargeAlignment(fixed_buffer_allocator.threadSafeAllocator());
|
||||||
|
try std.heap.testAllocatorAlignedShrink(fixed_buffer_allocator.threadSafeAllocator());
|
||||||
|
}
|
||||||
@ -2,14 +2,14 @@ const std = @import("../std.zig");
|
|||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const heap = std.heap;
|
|
||||||
const maxInt = std.math.maxInt;
|
const maxInt = std.math.maxInt;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const native_os = builtin.os.tag;
|
const native_os = builtin.os.tag;
|
||||||
const windows = std.os.windows;
|
const windows = std.os.windows;
|
||||||
const posix = std.posix;
|
const posix = std.posix;
|
||||||
|
const page_size_min = std.heap.page_size_min;
|
||||||
|
|
||||||
pub const vtable = Allocator.VTable{
|
pub const vtable: Allocator.VTable = .{
|
||||||
.alloc = alloc,
|
.alloc = alloc,
|
||||||
.resize = resize,
|
.resize = resize,
|
||||||
.free = free,
|
.free = free,
|
||||||
@ -19,7 +19,6 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
|
|||||||
_ = ra;
|
_ = ra;
|
||||||
_ = log2_align;
|
_ = log2_align;
|
||||||
assert(n > 0);
|
assert(n > 0);
|
||||||
if (n > maxInt(usize) - (heap.pageSize() - 1)) return null;
|
|
||||||
|
|
||||||
if (native_os == .windows) {
|
if (native_os == .windows) {
|
||||||
const addr = windows.VirtualAlloc(
|
const addr = windows.VirtualAlloc(
|
||||||
@ -35,7 +34,10 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
|
|||||||
return @ptrCast(addr);
|
return @ptrCast(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
const aligned_len = mem.alignForward(usize, n, heap.pageSize());
|
const page_size = std.heap.pageSize();
|
||||||
|
if (n >= maxInt(usize) - page_size) return null;
|
||||||
|
|
||||||
|
const aligned_len = mem.alignForward(usize, n, page_size);
|
||||||
const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .unordered);
|
const hint = @atomicLoad(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, .unordered);
|
||||||
const slice = posix.mmap(
|
const slice = posix.mmap(
|
||||||
hint,
|
hint,
|
||||||
@ -45,8 +47,8 @@ fn alloc(_: *anyopaque, n: usize, log2_align: u8, ra: usize) ?[*]u8 {
|
|||||||
-1,
|
-1,
|
||||||
0,
|
0,
|
||||||
) catch return null;
|
) catch return null;
|
||||||
assert(mem.isAligned(@intFromPtr(slice.ptr), heap.pageSize()));
|
assert(mem.isAligned(@intFromPtr(slice.ptr), page_size_min));
|
||||||
const new_hint: [*]align(heap.min_page_size) u8 = @alignCast(slice.ptr + aligned_len);
|
const new_hint: [*]align(std.heap.page_size_min) u8 = @alignCast(slice.ptr + aligned_len);
|
||||||
_ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .monotonic, .monotonic);
|
_ = @cmpxchgStrong(@TypeOf(std.heap.next_mmap_addr_hint), &std.heap.next_mmap_addr_hint, hint, new_hint, .monotonic, .monotonic);
|
||||||
return slice.ptr;
|
return slice.ptr;
|
||||||
}
|
}
|
||||||
@ -60,13 +62,14 @@ fn resize(
|
|||||||
) bool {
|
) bool {
|
||||||
_ = log2_buf_align;
|
_ = log2_buf_align;
|
||||||
_ = return_address;
|
_ = return_address;
|
||||||
const new_size_aligned = mem.alignForward(usize, new_size, heap.pageSize());
|
const page_size = std.heap.pageSize();
|
||||||
|
const new_size_aligned = mem.alignForward(usize, new_size, page_size);
|
||||||
|
|
||||||
if (native_os == .windows) {
|
if (native_os == .windows) {
|
||||||
if (new_size <= buf_unaligned.len) {
|
if (new_size <= buf_unaligned.len) {
|
||||||
const base_addr = @intFromPtr(buf_unaligned.ptr);
|
const base_addr = @intFromPtr(buf_unaligned.ptr);
|
||||||
const old_addr_end = base_addr + buf_unaligned.len;
|
const old_addr_end = base_addr + buf_unaligned.len;
|
||||||
const new_addr_end = mem.alignForward(usize, base_addr + new_size, heap.pageSize());
|
const new_addr_end = mem.alignForward(usize, base_addr + new_size, page_size);
|
||||||
if (old_addr_end > new_addr_end) {
|
if (old_addr_end > new_addr_end) {
|
||||||
// For shrinking that is not releasing, we will only
|
// For shrinking that is not releasing, we will only
|
||||||
// decommit the pages not needed anymore.
|
// decommit the pages not needed anymore.
|
||||||
@ -78,14 +81,14 @@ fn resize(
|
|||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, heap.pageSize());
|
const old_size_aligned = mem.alignForward(usize, buf_unaligned.len, page_size);
|
||||||
if (new_size_aligned <= old_size_aligned) {
|
if (new_size_aligned <= old_size_aligned) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, heap.pageSize());
|
const buf_aligned_len = mem.alignForward(usize, buf_unaligned.len, page_size);
|
||||||
if (new_size_aligned == buf_aligned_len)
|
if (new_size_aligned == buf_aligned_len)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
@ -108,7 +111,7 @@ fn free(_: *anyopaque, slice: []u8, log2_buf_align: u8, return_address: usize) v
|
|||||||
if (native_os == .windows) {
|
if (native_os == .windows) {
|
||||||
windows.VirtualFree(slice.ptr, 0, windows.MEM_RELEASE);
|
windows.VirtualFree(slice.ptr, 0, windows.MEM_RELEASE);
|
||||||
} else {
|
} else {
|
||||||
const buf_aligned_len = mem.alignForward(usize, slice.len, heap.pageSize());
|
const buf_aligned_len = mem.alignForward(usize, slice.len, std.heap.pageSize());
|
||||||
posix.munmap(@alignCast(slice.ptr[0..buf_aligned_len]));
|
posix.munmap(@alignCast(slice.ptr[0..buf_aligned_len]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@ -75,7 +75,7 @@
|
|||||||
//! BucketHeader, followed by "used bits", and two stack traces for each slot
|
//! BucketHeader, followed by "used bits", and two stack traces for each slot
|
||||||
//! (allocation trace and free trace).
|
//! (allocation trace and free trace).
|
||||||
//!
|
//!
|
||||||
//! The buckets array contains buckets for every size class below `max_page_size`.
|
//! The buckets array contains buckets for every size class below `page_size_max`.
|
||||||
//! At runtime, only size classes below `pageSize()` will actually be used for allocations.
|
//! At runtime, only size classes below `pageSize()` will actually be used for allocations.
|
||||||
//!
|
//!
|
||||||
//! The "used bits" are 1 bit per slot representing whether the slot is used.
|
//! The "used bits" are 1 bit per slot representing whether the slot is used.
|
||||||
@ -102,13 +102,13 @@ const math = std.math;
|
|||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const Allocator = std.mem.Allocator;
|
const Allocator = std.mem.Allocator;
|
||||||
const min_page_size = std.heap.min_page_size;
|
const page_size_min = std.heap.page_size_min;
|
||||||
const max_page_size = std.heap.max_page_size;
|
const page_size_max = std.heap.page_size_max;
|
||||||
const pageSize = std.heap.pageSize;
|
const pageSize = std.heap.pageSize;
|
||||||
const StackTrace = std.builtin.StackTrace;
|
const StackTrace = std.builtin.StackTrace;
|
||||||
|
|
||||||
/// Integer type for pointing to slots in a small allocation
|
/// Integer type for pointing to slots in a small allocation
|
||||||
const SlotIndex = std.meta.Int(.unsigned, math.log2(max_page_size) + 1);
|
const SlotIndex = std.meta.Int(.unsigned, math.log2(page_size_max) + 1);
|
||||||
|
|
||||||
const default_test_stack_trace_frames: usize = if (builtin.is_test) 10 else 6;
|
const default_test_stack_trace_frames: usize = if (builtin.is_test) 10 else 6;
|
||||||
const default_sys_stack_trace_frames: usize = if (std.debug.sys_can_stack_trace) default_test_stack_trace_frames else 0;
|
const default_sys_stack_trace_frames: usize = if (std.debug.sys_can_stack_trace) default_test_stack_trace_frames else 0;
|
||||||
@ -214,7 +214,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||||||
|
|
||||||
pub const Error = mem.Allocator.Error;
|
pub const Error = mem.Allocator.Error;
|
||||||
|
|
||||||
const small_bucket_count = math.log2(max_page_size);
|
const small_bucket_count = math.log2(page_size_max);
|
||||||
const largest_bucket_object_size = 1 << (small_bucket_count - 1);
|
const largest_bucket_object_size = 1 << (small_bucket_count - 1);
|
||||||
const LargestSizeClassInt = std.math.IntFittingRange(0, largest_bucket_object_size);
|
const LargestSizeClassInt = std.math.IntFittingRange(0, largest_bucket_object_size);
|
||||||
fn used_small_bucket_count() usize {
|
fn used_small_bucket_count() usize {
|
||||||
@ -287,7 +287,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||||||
// * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation
|
// * stack_trace_addresses: [N]usize, // traces_per_slot for every allocation
|
||||||
|
|
||||||
const BucketHeader = struct {
|
const BucketHeader = struct {
|
||||||
page: [*]align(min_page_size) u8,
|
page: [*]align(page_size_min) u8,
|
||||||
alloc_cursor: SlotIndex,
|
alloc_cursor: SlotIndex,
|
||||||
used_count: SlotIndex,
|
used_count: SlotIndex,
|
||||||
|
|
||||||
@ -591,7 +591,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||||||
addr: usize,
|
addr: usize,
|
||||||
current_bucket: ?*BucketHeader,
|
current_bucket: ?*BucketHeader,
|
||||||
) ?*BucketHeader {
|
) ?*BucketHeader {
|
||||||
const search_page: [*]align(min_page_size) u8 = @ptrFromInt(mem.alignBackward(usize, addr, pageSize()));
|
const search_page: [*]align(page_size_min) u8 = @ptrFromInt(mem.alignBackward(usize, addr, pageSize()));
|
||||||
if (current_bucket != null and current_bucket.?.page == search_page) {
|
if (current_bucket != null and current_bucket.?.page == search_page) {
|
||||||
return current_bucket;
|
return current_bucket;
|
||||||
}
|
}
|
||||||
@ -1062,7 +1062,7 @@ pub fn GeneralPurposeAllocator(comptime config: Config) type {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn createBucket(self: *Self, size_class: usize) Error!*BucketHeader {
|
fn createBucket(self: *Self, size_class: usize) Error!*BucketHeader {
|
||||||
const page = try self.backing_allocator.alignedAlloc(u8, min_page_size, pageSize());
|
const page = try self.backing_allocator.alignedAlloc(u8, page_size_min, pageSize());
|
||||||
errdefer self.backing_allocator.free(page);
|
errdefer self.backing_allocator.free(page);
|
||||||
|
|
||||||
const bucket_size = bucketSize(size_class);
|
const bucket_size = bucketSize(size_class);
|
||||||
|
|||||||
@ -1048,17 +1048,18 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co
|
|||||||
// as we don't read into a new page. This should be the case for most architectures
|
// as we don't read into a new page. This should be the case for most architectures
|
||||||
// which use paged memory, however should be confirmed before adding a new arch below.
|
// which use paged memory, however should be confirmed before adding a new arch below.
|
||||||
.aarch64, .x86, .x86_64 => if (std.simd.suggestVectorLength(T)) |block_len| {
|
.aarch64, .x86, .x86_64 => if (std.simd.suggestVectorLength(T)) |block_len| {
|
||||||
|
const page_size = std.heap.pageSize();
|
||||||
const block_size = @sizeOf(T) * block_len;
|
const block_size = @sizeOf(T) * block_len;
|
||||||
const Block = @Vector(block_len, T);
|
const Block = @Vector(block_len, T);
|
||||||
const mask: Block = @splat(sentinel);
|
const mask: Block = @splat(sentinel);
|
||||||
|
|
||||||
comptime std.debug.assert(std.heap.max_page_size % @sizeOf(Block) == 0);
|
comptime assert(std.heap.page_size_max % @sizeOf(Block) == 0);
|
||||||
std.debug.assert(std.heap.pageSize() % @sizeOf(Block) == 0);
|
assert(page_size % @sizeOf(Block) == 0);
|
||||||
|
|
||||||
// First block may be unaligned
|
// First block may be unaligned
|
||||||
const start_addr = @intFromPtr(&p[i]);
|
const start_addr = @intFromPtr(&p[i]);
|
||||||
const offset_in_page = start_addr & (std.heap.pageSize() - 1);
|
const offset_in_page = start_addr & (page_size - 1);
|
||||||
if (offset_in_page <= std.heap.pageSize() - @sizeOf(Block)) {
|
if (offset_in_page <= page_size - @sizeOf(Block)) {
|
||||||
// Will not read past the end of a page, full block.
|
// Will not read past the end of a page, full block.
|
||||||
const block: Block = p[i..][0..block_len].*;
|
const block: Block = p[i..][0..block_len].*;
|
||||||
const matches = block == mask;
|
const matches = block == mask;
|
||||||
@ -1078,7 +1079,7 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std.debug.assert(std.mem.isAligned(@intFromPtr(&p[i]), block_size));
|
assert(std.mem.isAligned(@intFromPtr(&p[i]), block_size));
|
||||||
while (true) {
|
while (true) {
|
||||||
const block: *const Block = @ptrCast(@alignCast(p[i..][0..block_len]));
|
const block: *const Block = @ptrCast(@alignCast(p[i..][0..block_len]));
|
||||||
const matches = block.* == mask;
|
const matches = block.* == mask;
|
||||||
@ -1101,23 +1102,24 @@ pub fn indexOfSentinel(comptime T: type, comptime sentinel: T, p: [*:sentinel]co
|
|||||||
test "indexOfSentinel vector paths" {
|
test "indexOfSentinel vector paths" {
|
||||||
const Types = [_]type{ u8, u16, u32, u64 };
|
const Types = [_]type{ u8, u16, u32, u64 };
|
||||||
const allocator = std.testing.allocator;
|
const allocator = std.testing.allocator;
|
||||||
|
const page_size = std.heap.pageSize();
|
||||||
|
|
||||||
inline for (Types) |T| {
|
inline for (Types) |T| {
|
||||||
const block_len = std.simd.suggestVectorLength(T) orelse continue;
|
const block_len = std.simd.suggestVectorLength(T) orelse continue;
|
||||||
|
|
||||||
// Allocate three pages so we guarantee a page-crossing address with a full page after
|
// Allocate three pages so we guarantee a page-crossing address with a full page after
|
||||||
const memory = try allocator.alloc(T, 3 * std.heap.pageSize() / @sizeOf(T));
|
const memory = try allocator.alloc(T, 3 * page_size / @sizeOf(T));
|
||||||
defer allocator.free(memory);
|
defer allocator.free(memory);
|
||||||
@memset(memory, 0xaa);
|
@memset(memory, 0xaa);
|
||||||
|
|
||||||
// Find starting page-alignment = 0
|
// Find starting page-alignment = 0
|
||||||
var start: usize = 0;
|
var start: usize = 0;
|
||||||
const start_addr = @intFromPtr(&memory);
|
const start_addr = @intFromPtr(&memory);
|
||||||
start += (std.mem.alignForward(usize, start_addr, std.heap.pageSize()) - start_addr) / @sizeOf(T);
|
start += (std.mem.alignForward(usize, start_addr, page_size) - start_addr) / @sizeOf(T);
|
||||||
try testing.expect(start < std.heap.pageSize() / @sizeOf(T));
|
try testing.expect(start < page_size / @sizeOf(T));
|
||||||
|
|
||||||
// Validate all sub-block alignments
|
// Validate all sub-block alignments
|
||||||
const search_len = std.heap.pageSize() / @sizeOf(T);
|
const search_len = page_size / @sizeOf(T);
|
||||||
memory[start + search_len] = 0;
|
memory[start + search_len] = 0;
|
||||||
for (0..block_len) |offset| {
|
for (0..block_len) |offset| {
|
||||||
try testing.expectEqual(search_len - offset, indexOfSentinel(T, 0, @ptrCast(&memory[start + offset])));
|
try testing.expectEqual(search_len - offset, indexOfSentinel(T, 0, @ptrCast(&memory[start + offset])));
|
||||||
@ -1125,7 +1127,7 @@ test "indexOfSentinel vector paths" {
|
|||||||
memory[start + search_len] = 0xaa;
|
memory[start + search_len] = 0xaa;
|
||||||
|
|
||||||
// Validate page boundary crossing
|
// Validate page boundary crossing
|
||||||
const start_page_boundary = start + (std.heap.pageSize() / @sizeOf(T));
|
const start_page_boundary = start + (page_size / @sizeOf(T));
|
||||||
memory[start_page_boundary + block_len] = 0;
|
memory[start_page_boundary + block_len] = 0;
|
||||||
for (0..block_len) |offset| {
|
for (0..block_len) |offset| {
|
||||||
try testing.expectEqual(2 * block_len - offset, indexOfSentinel(T, 0, @ptrCast(&memory[start_page_boundary - block_len + offset])));
|
try testing.expectEqual(2 * block_len - offset, indexOfSentinel(T, 0, @ptrCast(&memory[start_page_boundary - block_len + offset])));
|
||||||
|
|||||||
@ -18,11 +18,15 @@ ptr: *anyopaque,
|
|||||||
vtable: *const VTable,
|
vtable: *const VTable,
|
||||||
|
|
||||||
pub const VTable = struct {
|
pub const VTable = struct {
|
||||||
/// Attempt to allocate exactly `len` bytes aligned to `1 << ptr_align`.
|
/// Allocate exactly `len` bytes aligned to `1 << ptr_align`, or return `null`
|
||||||
|
/// indicating the allocation failed.
|
||||||
///
|
///
|
||||||
/// `ret_addr` is optionally provided as the first return address of the
|
/// `ret_addr` is optionally provided as the first return address of the
|
||||||
/// allocation call stack. If the value is `0` it means no return address
|
/// allocation call stack. If the value is `0` it means no return address
|
||||||
/// has been provided.
|
/// has been provided.
|
||||||
|
///
|
||||||
|
/// The returned slice of memory must have been `@memset` to `undefined`
|
||||||
|
/// by the allocator implementation.
|
||||||
alloc: *const fn (ctx: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8,
|
alloc: *const fn (ctx: *anyopaque, len: usize, ptr_align: u8, ret_addr: usize) ?[*]u8,
|
||||||
|
|
||||||
/// Attempt to expand or shrink memory in place. `buf.len` must equal the
|
/// Attempt to expand or shrink memory in place. `buf.len` must equal the
|
||||||
@ -215,11 +219,6 @@ fn allocWithSizeAndAlignment(self: Allocator, comptime size: usize, comptime ali
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn allocBytesWithAlignment(self: Allocator, comptime alignment: u29, byte_count: usize, return_address: usize) Error![*]align(alignment) u8 {
|
fn allocBytesWithAlignment(self: Allocator, comptime alignment: u29, byte_count: usize, return_address: usize) Error![*]align(alignment) u8 {
|
||||||
// The Zig Allocator interface is not intended to solve alignments beyond
|
|
||||||
// the minimum OS page size. For these use cases, the caller must use OS
|
|
||||||
// APIs directly.
|
|
||||||
if (!@inComptime() and alignment > std.heap.pageSize()) @panic("Alignment must be smaller than page size.");
|
|
||||||
|
|
||||||
if (byte_count == 0) {
|
if (byte_count == 0) {
|
||||||
const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), alignment);
|
const ptr = comptime std.mem.alignBackward(usize, math.maxInt(usize), alignment);
|
||||||
return @as([*]align(alignment) u8, @ptrFromInt(ptr));
|
return @as([*]align(alignment) u8, @ptrFromInt(ptr));
|
||||||
|
|||||||
@ -3,12 +3,12 @@ const std = @import("std");
|
|||||||
const builtin = @import("builtin");
|
const builtin = @import("builtin");
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const heap = std.heap;
|
|
||||||
const net = std.net;
|
const net = std.net;
|
||||||
const posix = std.posix;
|
const posix = std.posix;
|
||||||
const linux = std.os.linux;
|
const linux = std.os.linux;
|
||||||
const testing = std.testing;
|
const testing = std.testing;
|
||||||
const is_linux = builtin.os.tag == .linux;
|
const is_linux = builtin.os.tag == .linux;
|
||||||
|
const page_size_min = std.heap.page_size_min;
|
||||||
|
|
||||||
fd: posix.fd_t = -1,
|
fd: posix.fd_t = -1,
|
||||||
sq: SubmissionQueue,
|
sq: SubmissionQueue,
|
||||||
@ -1342,8 +1342,8 @@ pub const SubmissionQueue = struct {
|
|||||||
dropped: *u32,
|
dropped: *u32,
|
||||||
array: []u32,
|
array: []u32,
|
||||||
sqes: []linux.io_uring_sqe,
|
sqes: []linux.io_uring_sqe,
|
||||||
mmap: []align(heap.min_page_size) u8,
|
mmap: []align(page_size_min) u8,
|
||||||
mmap_sqes: []align(heap.min_page_size) u8,
|
mmap_sqes: []align(page_size_min) u8,
|
||||||
|
|
||||||
// We use `sqe_head` and `sqe_tail` in the same way as liburing:
|
// We use `sqe_head` and `sqe_tail` in the same way as liburing:
|
||||||
// We increment `sqe_tail` (but not `tail`) for each call to `get_sqe()`.
|
// We increment `sqe_tail` (but not `tail`) for each call to `get_sqe()`.
|
||||||
@ -1461,7 +1461,7 @@ pub const BufferGroup = struct {
|
|||||||
/// Pointer to the memory shared by the kernel.
|
/// Pointer to the memory shared by the kernel.
|
||||||
/// `buffers_count` of `io_uring_buf` structures are shared by the kernel.
|
/// `buffers_count` of `io_uring_buf` structures are shared by the kernel.
|
||||||
/// First `io_uring_buf` is overlaid by `io_uring_buf_ring` struct.
|
/// First `io_uring_buf` is overlaid by `io_uring_buf_ring` struct.
|
||||||
br: *align(heap.min_page_size) linux.io_uring_buf_ring,
|
br: *align(page_size_min) linux.io_uring_buf_ring,
|
||||||
/// Contiguous block of memory of size (buffers_count * buffer_size).
|
/// Contiguous block of memory of size (buffers_count * buffer_size).
|
||||||
buffers: []u8,
|
buffers: []u8,
|
||||||
/// Size of each buffer in buffers.
|
/// Size of each buffer in buffers.
|
||||||
@ -1556,7 +1556,7 @@ pub const BufferGroup = struct {
|
|||||||
/// `fd` is IO_Uring.fd for which the provided buffer ring is being registered.
|
/// `fd` is IO_Uring.fd for which the provided buffer ring is being registered.
|
||||||
/// `entries` is the number of entries requested in the buffer ring, must be power of 2.
|
/// `entries` is the number of entries requested in the buffer ring, must be power of 2.
|
||||||
/// `group_id` is the chosen buffer group ID, unique in IO_Uring.
|
/// `group_id` is the chosen buffer group ID, unique in IO_Uring.
|
||||||
pub fn setup_buf_ring(fd: posix.fd_t, entries: u16, group_id: u16) !*align(heap.min_page_size) linux.io_uring_buf_ring {
|
pub fn setup_buf_ring(fd: posix.fd_t, entries: u16, group_id: u16) !*align(page_size_min) linux.io_uring_buf_ring {
|
||||||
if (entries == 0 or entries > 1 << 15) return error.EntriesNotInRange;
|
if (entries == 0 or entries > 1 << 15) return error.EntriesNotInRange;
|
||||||
if (!std.math.isPowerOfTwo(entries)) return error.EntriesNotPowerOfTwo;
|
if (!std.math.isPowerOfTwo(entries)) return error.EntriesNotPowerOfTwo;
|
||||||
|
|
||||||
@ -1572,7 +1572,7 @@ pub fn setup_buf_ring(fd: posix.fd_t, entries: u16, group_id: u16) !*align(heap.
|
|||||||
errdefer posix.munmap(mmap);
|
errdefer posix.munmap(mmap);
|
||||||
assert(mmap.len == mmap_size);
|
assert(mmap.len == mmap_size);
|
||||||
|
|
||||||
const br: *align(heap.min_page_size) linux.io_uring_buf_ring = @ptrCast(mmap.ptr);
|
const br: *align(page_size_min) linux.io_uring_buf_ring = @ptrCast(mmap.ptr);
|
||||||
try register_buf_ring(fd, @intFromPtr(br), entries, group_id);
|
try register_buf_ring(fd, @intFromPtr(br), entries, group_id);
|
||||||
return br;
|
return br;
|
||||||
}
|
}
|
||||||
@ -1614,9 +1614,9 @@ fn handle_register_buf_ring_result(res: usize) !void {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Unregisters a previously registered shared buffer ring, returned from io_uring_setup_buf_ring.
|
// Unregisters a previously registered shared buffer ring, returned from io_uring_setup_buf_ring.
|
||||||
pub fn free_buf_ring(fd: posix.fd_t, br: *align(heap.min_page_size) linux.io_uring_buf_ring, entries: u32, group_id: u16) void {
|
pub fn free_buf_ring(fd: posix.fd_t, br: *align(page_size_min) linux.io_uring_buf_ring, entries: u32, group_id: u16) void {
|
||||||
unregister_buf_ring(fd, group_id) catch {};
|
unregister_buf_ring(fd, group_id) catch {};
|
||||||
var mmap: []align(heap.min_page_size) u8 = undefined;
|
var mmap: []align(page_size_min) u8 = undefined;
|
||||||
mmap.ptr = @ptrCast(br);
|
mmap.ptr = @ptrCast(br);
|
||||||
mmap.len = entries * @sizeOf(linux.io_uring_buf);
|
mmap.len = entries * @sizeOf(linux.io_uring_buf);
|
||||||
posix.munmap(mmap);
|
posix.munmap(mmap);
|
||||||
|
|||||||
@ -11,13 +11,13 @@
|
|||||||
|
|
||||||
const std = @import("std");
|
const std = @import("std");
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const heap = std.heap;
|
|
||||||
const elf = std.elf;
|
const elf = std.elf;
|
||||||
const math = std.math;
|
const math = std.math;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const native_arch = @import("builtin").cpu.arch;
|
const native_arch = @import("builtin").cpu.arch;
|
||||||
const linux = std.os.linux;
|
const linux = std.os.linux;
|
||||||
const posix = std.posix;
|
const posix = std.posix;
|
||||||
|
const page_size_min = std.heap.page_size_min;
|
||||||
|
|
||||||
/// Represents an ELF TLS variant.
|
/// Represents an ELF TLS variant.
|
||||||
///
|
///
|
||||||
@ -485,13 +485,13 @@ pub fn prepareArea(area: []u8) usize {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
// The main motivation for the size chosen here is that this is how much ends up being requested for
|
/// The main motivation for the size chosen here is that this is how much ends up being requested for
|
||||||
// the thread-local variables of the `std.crypto.random` implementation. I'm not sure why it ends up
|
/// the thread-local variables of the `std.crypto.random` implementation. I'm not sure why it ends up
|
||||||
// being so much; the struct itself is only 64 bytes. I think it has to do with being page-aligned
|
/// being so much; the struct itself is only 64 bytes. I think it has to do with being page-aligned
|
||||||
// and LLVM or LLD is not smart enough to lay out the TLS data in a space-conserving way. Anyway, I
|
/// and LLVM or LLD is not smart enough to lay out the TLS data in a space-conserving way. Anyway, I
|
||||||
// think it's fine because it's less than 3 pages of memory, and putting it in the ELF like this is
|
/// think it's fine because it's less than 3 pages of memory, and putting it in the ELF like this is
|
||||||
// equivalent to moving the `mmap` call below into the kernel, avoiding syscall overhead.
|
/// equivalent to moving the `mmap` call below into the kernel, avoiding syscall overhead.
|
||||||
var main_thread_area_buffer: [0x2100]u8 align(heap.min_page_size) = undefined;
|
var main_thread_area_buffer: [0x2100]u8 align(page_size_min) = undefined;
|
||||||
|
|
||||||
/// Computes the layout of the static TLS area, allocates the area, initializes all of its fields,
|
/// Computes the layout of the static TLS area, allocates the area, initializes all of its fields,
|
||||||
/// and assigns the architecture-specific value to the TP register.
|
/// and assigns the architecture-specific value to the TP register.
|
||||||
@ -504,7 +504,7 @@ pub fn initStatic(phdrs: []elf.Phdr) void {
|
|||||||
const area = blk: {
|
const area = blk: {
|
||||||
// Fast path for the common case where the TLS data is really small, avoid an allocation and
|
// Fast path for the common case where the TLS data is really small, avoid an allocation and
|
||||||
// use our local buffer.
|
// use our local buffer.
|
||||||
if (area_desc.alignment <= heap.min_page_size and area_desc.size <= main_thread_area_buffer.len) {
|
if (area_desc.alignment <= page_size_min and area_desc.size <= main_thread_area_buffer.len) {
|
||||||
break :blk main_thread_area_buffer[0..area_desc.size];
|
break :blk main_thread_area_buffer[0..area_desc.size];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -518,7 +518,7 @@ pub fn initStatic(phdrs: []elf.Phdr) void {
|
|||||||
);
|
);
|
||||||
if (@as(isize, @bitCast(begin_addr)) < 0) @trap();
|
if (@as(isize, @bitCast(begin_addr)) < 0) @trap();
|
||||||
|
|
||||||
const area_ptr: [*]align(heap.min_page_size) u8 = @ptrFromInt(begin_addr);
|
const area_ptr: [*]align(page_size_min) u8 = @ptrFromInt(begin_addr);
|
||||||
|
|
||||||
// Make sure the slice is correctly aligned.
|
// Make sure the slice is correctly aligned.
|
||||||
const begin_aligned_addr = alignForward(begin_addr, area_desc.alignment);
|
const begin_aligned_addr = alignForward(begin_addr, area_desc.alignment);
|
||||||
|
|||||||
@ -18,13 +18,13 @@ const builtin = @import("builtin");
|
|||||||
const root = @import("root");
|
const root = @import("root");
|
||||||
const std = @import("std.zig");
|
const std = @import("std.zig");
|
||||||
const mem = std.mem;
|
const mem = std.mem;
|
||||||
const heap = std.heap;
|
|
||||||
const fs = std.fs;
|
const fs = std.fs;
|
||||||
const max_path_bytes = fs.max_path_bytes;
|
const max_path_bytes = fs.max_path_bytes;
|
||||||
const maxInt = std.math.maxInt;
|
const maxInt = std.math.maxInt;
|
||||||
const cast = std.math.cast;
|
const cast = std.math.cast;
|
||||||
const assert = std.debug.assert;
|
const assert = std.debug.assert;
|
||||||
const native_os = builtin.os.tag;
|
const native_os = builtin.os.tag;
|
||||||
|
const page_size_min = std.heap.page_size_min;
|
||||||
|
|
||||||
test {
|
test {
|
||||||
_ = @import("posix/test.zig");
|
_ = @import("posix/test.zig");
|
||||||
@ -4695,7 +4695,7 @@ pub const MProtectError = error{
|
|||||||
OutOfMemory,
|
OutOfMemory,
|
||||||
} || UnexpectedError;
|
} || UnexpectedError;
|
||||||
|
|
||||||
pub fn mprotect(memory: []align(heap.min_page_size) u8, protection: u32) MProtectError!void {
|
pub fn mprotect(memory: []align(page_size_min) u8, protection: u32) MProtectError!void {
|
||||||
if (native_os == .windows) {
|
if (native_os == .windows) {
|
||||||
const win_prot: windows.DWORD = switch (@as(u3, @truncate(protection))) {
|
const win_prot: windows.DWORD = switch (@as(u3, @truncate(protection))) {
|
||||||
0b000 => windows.PAGE_NOACCESS,
|
0b000 => windows.PAGE_NOACCESS,
|
||||||
@ -4760,21 +4760,21 @@ pub const MMapError = error{
|
|||||||
/// * SIGSEGV - Attempted write into a region mapped as read-only.
|
/// * SIGSEGV - Attempted write into a region mapped as read-only.
|
||||||
/// * SIGBUS - Attempted access to a portion of the buffer that does not correspond to the file
|
/// * SIGBUS - Attempted access to a portion of the buffer that does not correspond to the file
|
||||||
pub fn mmap(
|
pub fn mmap(
|
||||||
ptr: ?[*]align(heap.min_page_size) u8,
|
ptr: ?[*]align(page_size_min) u8,
|
||||||
length: usize,
|
length: usize,
|
||||||
prot: u32,
|
prot: u32,
|
||||||
flags: system.MAP,
|
flags: system.MAP,
|
||||||
fd: fd_t,
|
fd: fd_t,
|
||||||
offset: u64,
|
offset: u64,
|
||||||
) MMapError![]align(heap.min_page_size) u8 {
|
) MMapError![]align(page_size_min) u8 {
|
||||||
const mmap_sym = if (lfs64_abi) system.mmap64 else system.mmap;
|
const mmap_sym = if (lfs64_abi) system.mmap64 else system.mmap;
|
||||||
const rc = mmap_sym(ptr, length, prot, @bitCast(flags), fd, @bitCast(offset));
|
const rc = mmap_sym(ptr, length, prot, @bitCast(flags), fd, @bitCast(offset));
|
||||||
const err: E = if (builtin.link_libc) blk: {
|
const err: E = if (builtin.link_libc) blk: {
|
||||||
if (rc != std.c.MAP_FAILED) return @as([*]align(heap.min_page_size) u8, @ptrCast(@alignCast(rc)))[0..length];
|
if (rc != std.c.MAP_FAILED) return @as([*]align(page_size_min) u8, @ptrCast(@alignCast(rc)))[0..length];
|
||||||
break :blk @enumFromInt(system._errno().*);
|
break :blk @enumFromInt(system._errno().*);
|
||||||
} else blk: {
|
} else blk: {
|
||||||
const err = errno(rc);
|
const err = errno(rc);
|
||||||
if (err == .SUCCESS) return @as([*]align(heap.min_page_size) u8, @ptrFromInt(rc))[0..length];
|
if (err == .SUCCESS) return @as([*]align(page_size_min) u8, @ptrFromInt(rc))[0..length];
|
||||||
break :blk err;
|
break :blk err;
|
||||||
};
|
};
|
||||||
switch (err) {
|
switch (err) {
|
||||||
@ -4800,7 +4800,7 @@ pub fn mmap(
|
|||||||
/// Zig's munmap function does not, for two reasons:
|
/// Zig's munmap function does not, for two reasons:
|
||||||
/// * It violates the Zig principle that resource deallocation must succeed.
|
/// * It violates the Zig principle that resource deallocation must succeed.
|
||||||
/// * The Windows function, VirtualFree, has this restriction.
|
/// * The Windows function, VirtualFree, has this restriction.
|
||||||
pub fn munmap(memory: []align(heap.min_page_size) const u8) void {
|
pub fn munmap(memory: []align(page_size_min) const u8) void {
|
||||||
switch (errno(system.munmap(memory.ptr, memory.len))) {
|
switch (errno(system.munmap(memory.ptr, memory.len))) {
|
||||||
.SUCCESS => return,
|
.SUCCESS => return,
|
||||||
.INVAL => unreachable, // Invalid parameters.
|
.INVAL => unreachable, // Invalid parameters.
|
||||||
@ -4814,7 +4814,7 @@ pub const MSyncError = error{
|
|||||||
PermissionDenied,
|
PermissionDenied,
|
||||||
} || UnexpectedError;
|
} || UnexpectedError;
|
||||||
|
|
||||||
pub fn msync(memory: []align(heap.min_page_size) u8, flags: i32) MSyncError!void {
|
pub fn msync(memory: []align(page_size_min) u8, flags: i32) MSyncError!void {
|
||||||
switch (errno(system.msync(memory.ptr, memory.len, flags))) {
|
switch (errno(system.msync(memory.ptr, memory.len, flags))) {
|
||||||
.SUCCESS => return,
|
.SUCCESS => return,
|
||||||
.PERM => return error.PermissionDenied,
|
.PERM => return error.PermissionDenied,
|
||||||
@ -7136,7 +7136,7 @@ pub const MincoreError = error{
|
|||||||
} || UnexpectedError;
|
} || UnexpectedError;
|
||||||
|
|
||||||
/// Determine whether pages are resident in memory.
|
/// Determine whether pages are resident in memory.
|
||||||
pub fn mincore(ptr: [*]align(heap.min_page_size) u8, length: usize, vec: [*]u8) MincoreError!void {
|
pub fn mincore(ptr: [*]align(page_size_min) u8, length: usize, vec: [*]u8) MincoreError!void {
|
||||||
return switch (errno(system.mincore(ptr, length, vec))) {
|
return switch (errno(system.mincore(ptr, length, vec))) {
|
||||||
.SUCCESS => {},
|
.SUCCESS => {},
|
||||||
.AGAIN => error.SystemResources,
|
.AGAIN => error.SystemResources,
|
||||||
@ -7182,7 +7182,7 @@ pub const MadviseError = error{
|
|||||||
|
|
||||||
/// Give advice about use of memory.
|
/// Give advice about use of memory.
|
||||||
/// This syscall is optional and is sometimes configured to be disabled.
|
/// This syscall is optional and is sometimes configured to be disabled.
|
||||||
pub fn madvise(ptr: [*]align(heap.min_page_size) u8, length: usize, advice: u32) MadviseError!void {
|
pub fn madvise(ptr: [*]align(page_size_min) u8, length: usize, advice: u32) MadviseError!void {
|
||||||
switch (errno(system.madvise(ptr, length, advice))) {
|
switch (errno(system.madvise(ptr, length, advice))) {
|
||||||
.SUCCESS => return,
|
.SUCCESS => return,
|
||||||
.PERM => return error.PermissionDenied,
|
.PERM => return error.PermissionDenied,
|
||||||
|
|||||||
@ -1560,7 +1560,7 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
|
|||||||
ReadGroupId,
|
ReadGroupId,
|
||||||
};
|
};
|
||||||
|
|
||||||
var buf: [std.heap.min_page_size]u8 = undefined;
|
var buf: [std.heap.page_size_min]u8 = undefined;
|
||||||
var name_index: usize = 0;
|
var name_index: usize = 0;
|
||||||
var state = State.Start;
|
var state = State.Start;
|
||||||
var uid: posix.uid_t = 0;
|
var uid: posix.uid_t = 0;
|
||||||
|
|||||||
@ -576,7 +576,7 @@ fn expandStackSize(phdrs: []elf.Phdr) void {
|
|||||||
switch (phdr.p_type) {
|
switch (phdr.p_type) {
|
||||||
elf.PT_GNU_STACK => {
|
elf.PT_GNU_STACK => {
|
||||||
if (phdr.p_memsz == 0) break;
|
if (phdr.p_memsz == 0) break;
|
||||||
assert(phdr.p_memsz % std.heap.pageSize() == 0);
|
assert(phdr.p_memsz % std.heap.page_size_min == 0);
|
||||||
|
|
||||||
// Silently fail if we are unable to get limits.
|
// Silently fail if we are unable to get limits.
|
||||||
const limits = std.posix.getrlimit(.STACK) catch break;
|
const limits = std.posix.getrlimit(.STACK) catch break;
|
||||||
|
|||||||
@ -119,9 +119,12 @@ pub const Options = struct {
|
|||||||
args: anytype,
|
args: anytype,
|
||||||
) void = log.defaultLog,
|
) void = log.defaultLog,
|
||||||
|
|
||||||
min_page_size: ?usize = null,
|
/// Overrides `std.heap.page_size_min`.
|
||||||
max_page_size: ?usize = null,
|
page_size_min: ?usize = null,
|
||||||
queryPageSizeFn: fn () usize = heap.defaultQueryPageSize,
|
/// Overrides `std.heap.page_size_max`.
|
||||||
|
page_size_max: ?usize = null,
|
||||||
|
/// Overrides default implementation for determining OS page size at runtime.
|
||||||
|
queryPageSize: fn () usize = heap.defaultQueryPageSize,
|
||||||
|
|
||||||
fmt_max_depth: usize = fmt.default_max_depth,
|
fmt_max_depth: usize = fmt.default_max_depth,
|
||||||
|
|
||||||
|
|||||||
@ -1249,7 +1249,7 @@ fn unzip(f: *Fetch, out_dir: fs.Dir, reader: anytype) RunError!UnpackResult {
|
|||||||
.{@errorName(err)},
|
.{@errorName(err)},
|
||||||
));
|
));
|
||||||
defer zip_file.close();
|
defer zip_file.close();
|
||||||
var buf: [std.heap.min_page_size]u8 = undefined;
|
var buf: [4096]u8 = undefined;
|
||||||
while (true) {
|
while (true) {
|
||||||
const len = reader.readAll(&buf) catch |err| return f.fail(f.location_tok, try eb.printString(
|
const len = reader.readAll(&buf) catch |err| return f.fail(f.location_tok, try eb.printString(
|
||||||
"read zip stream failed: {s}",
|
"read zip stream failed: {s}",
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user