Merge remote-tracking branch 'origin/master' into llvm11

This commit is contained in:
Andrew Kelley 2020-10-12 17:57:35 -07:00
commit c19dcafa17
40 changed files with 964 additions and 178 deletions

75
CODE_OF_CONDUCT.md Normal file
View File

@ -0,0 +1,75 @@
# Code of Conduct
Hello, and welcome! 👋
The Zig community is decentralized. Anyone is free to start and maintain their
own space for people to gather, and edit
[the Community wiki page](https://github.com/ziglang/zig/wiki/Community) to add
a link. There is no concept of "official" or "unofficial", however, each
gathering place has its own moderators and rules.
This is Andrew Kelley speaking. At least for now, I'm the moderator of the
ziglang organization GitHub repositories and the #zig IRC channel on Freenode.
**This document contains the rules that govern these two spaces only**.
The rules here are strict. This space is for focused, on topic, technical work
on the Zig project only. It is everyone's responsibility to maintain a positive
environment, especially when disagreements occur.
## Our Standards
Examples of behavior that contribute to creating a positive environment include:
* Using welcoming and inclusive language.
* Being respectful of differing viewpoints and experiences.
* Gracefully accepting constructive criticism.
* Helping another person accomplish their own goals.
* Showing empathy towards others.
* Showing appreciation for others' work.
* Validating someone else's experience, skills, insight, and use cases.
Examples of unacceptable behavior by participants include:
* Unwelcome sexual attention or advances, or use of sexualized language or
imagery that causes discomfort.
* Trolling, insulting/derogatory comments, and personal attacks. Anything
antagonistic towards someone else.
* Off-topic discussion of any kind - especially offensive or sensitive issues.
* Publishing others' private information, such as a physical or electronic
address, without explicit permission.
* Discussing this Code of Conduct or publicly accusing someone of violating it.
* Making someone else feel like an outsider or implying a lack of technical
abilities.
* Destructive behavior. Anything that harms Zig or another open-source project.
## Enforcement
If you need to report an issue you can contact me or Loris Cro, who are both
paid by the Zig Software Foundation, and so moderation of this space is part of
our job. We will swiftly remove anyone who is antagonizing others or being
generally destructive.
This includes Private Harassment. If person A is directly harassed or
antagonized by person B, person B will be blocked from participating in this
space even if the harassment didn't take place on one of the mediums directly
under rule of this Code of Conduct.
As noted, discussing this Code of Conduct should not take place on GitHub or IRC
because these spaces are for directly working on code, not for meta-discussion.
If you have any issues with it, you can contact me directly, or you can join one
of the community spaces that has different rules.
* Andrew Kelley <andrew@ziglang.org>
* Loris Cro <loris@ziglang.org>
## Conclusion
Thanks for reading the rules. Together, we can make this space welcoming and
inclusive for everyone, regardless of age, body size, disability, ethnicity,
sex characteristics, gender identity and expression, level of experience,
education, socio-economic status, nationality, personal appearance, race,
religion, or sexual identity and orientation.
Sincerely,
Andrew ✌️

View File

@ -7,8 +7,10 @@ A general-purpose programming language and toolchain for maintaining
* [Introduction](https://ziglang.org/#Introduction)
* [Download & Documentation](https://ziglang.org/download)
* [Chapter 0 - Getting Started | ZigLearn.org](https://ziglearn.org/)
* [Community](https://github.com/ziglang/zig/wiki/Community)
* [Contributing](https://github.com/ziglang/zig/blob/master/CONTRIBUTING.md)
* [Code of Conduct](https://github.com/ziglang/zig/blob/master/CODE_OF_CONDUCT.md)
* [Frequently Asked Questions](https://github.com/ziglang/zig/wiki/FAQ)
* [Community Projects](https://github.com/ziglang/zig/wiki/Community-Projects)

View File

@ -9905,7 +9905,10 @@ const std = @import("std");
const PreopenList = std.fs.wasi.PreopenList;
pub fn main() !void {
var preopens = PreopenList.init(std.heap.page_allocator);
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
const gpa = &general_purpose_allocator.allocator;
var preopens = PreopenList.init(gpa);
defer preopens.deinit();
try preopens.populate();

View File

@ -22,7 +22,7 @@ pub usingnamespace @import("os/bits.zig");
pub usingnamespace switch (std.Target.current.os.tag) {
.linux => @import("c/linux.zig"),
.windows => @import("c/windows.zig"),
.macosx, .ios, .tvos, .watchos => @import("c/darwin.zig"),
.macos, .ios, .tvos, .watchos => @import("c/darwin.zig"),
.freebsd, .kfreebsd => @import("c/freebsd.zig"),
.netbsd => @import("c/netbsd.zig"),
.dragonfly => @import("c/dragonfly.zig"),
@ -122,7 +122,7 @@ pub extern "c" fn readlink(noalias path: [*:0]const u8, noalias buf: [*]u8, bufs
pub extern "c" fn readlinkat(dirfd: fd_t, noalias path: [*:0]const u8, noalias buf: [*]u8, bufsize: usize) isize;
pub usingnamespace switch (builtin.os.tag) {
.macosx, .ios, .watchos, .tvos => struct {
.macos, .ios, .watchos, .tvos => struct {
pub const realpath = @"realpath$DARWIN_EXTSN";
pub const fstatat = @"fstatat$INODE64";
},
@ -189,7 +189,7 @@ pub usingnamespace switch (builtin.os.tag) {
pub const sigprocmask = __sigprocmask14;
pub const stat = __stat50;
},
.macosx, .ios, .watchos, .tvos => struct {
.macos, .ios, .watchos, .tvos => struct {
// XXX: close -> close$NOCANCEL
// XXX: getdirentries -> _getdirentries64
pub extern "c" fn clock_getres(clk_id: c_int, tp: *timespec) c_int;
@ -252,7 +252,7 @@ pub usingnamespace switch (builtin.os.tag) {
.linux, .freebsd, .kfreebsd, .netbsd, .openbsd => struct {
pub extern "c" fn malloc_usable_size(?*const c_void) usize;
},
.macosx, .ios, .watchos, .tvos => struct {
.macos, .ios, .watchos, .tvos => struct {
pub extern "c" fn malloc_size(?*const c_void) usize;
},
else => struct {},

View File

@ -18,6 +18,14 @@ pub extern "c" fn _dyld_get_image_header(image_index: u32) ?*mach_header;
pub extern "c" fn _dyld_get_image_vmaddr_slide(image_index: u32) usize;
pub extern "c" fn _dyld_get_image_name(image_index: u32) [*:0]const u8;
pub const COPYFILE_ACL = 1 << 0;
pub const COPYFILE_STAT = 1 << 1;
pub const COPYFILE_XATTR = 1 << 2;
pub const COPYFILE_DATA = 1 << 3;
pub const copyfile_state_t = *opaque {};
pub extern "c" fn fcopyfile(from: fd_t, to: fd_t, state: ?copyfile_state_t, flags: u32) c_int;
pub extern "c" fn @"realpath$DARWIN_EXTSN"(noalias file_name: [*:0]const u8, noalias resolved_name: [*]u8) ?[*:0]u8;
pub extern "c" fn __getdirentries64(fd: c_int, buf_ptr: [*]u8, buf_len: usize, basep: *i64) isize;

View File

@ -654,7 +654,7 @@ pub fn openSelfDebugInfo(allocator: *mem.Allocator) anyerror!DebugInfo {
.freebsd,
.netbsd,
.dragonfly,
.macosx,
.macos,
.windows,
=> return DebugInfo.init(allocator),
else => @compileError("openSelfDebugInfo unsupported for this platform"),
@ -1320,7 +1320,7 @@ const SymbolInfo = struct {
};
pub const ModuleDebugInfo = switch (builtin.os.tag) {
.macosx, .ios, .watchos, .tvos => struct {
.macos, .ios, .watchos, .tvos => struct {
base_address: usize,
mapped_memory: []const u8,
symbols: []const MachoSymbol,

View File

@ -19,7 +19,7 @@ const max = std.math.max;
pub const DynLib = switch (builtin.os.tag) {
.linux => if (builtin.link_libc) DlDynlib else ElfDynLib,
.windows => WindowsDynLib,
.macosx, .tvos, .watchos, .ios, .freebsd => DlDynlib,
.macos, .tvos, .watchos, .ios, .freebsd => DlDynlib,
else => void,
};
@ -404,7 +404,7 @@ test "dynamic_library" {
const libname = switch (builtin.os.tag) {
.linux, .freebsd => "invalid_so.so",
.windows => "invalid_dll.dll",
.macosx, .tvos, .watchos, .ios => "invalid_dylib.dylib",
.macos, .tvos, .watchos, .ios => "invalid_dylib.dylib",
else => return error.SkipZigTest,
};

View File

@ -66,7 +66,7 @@ pub const Loop = struct {
};
pub const EventFd = switch (builtin.os.tag) {
.macosx, .freebsd, .netbsd, .dragonfly => KEventFd,
.macos, .freebsd, .netbsd, .dragonfly => KEventFd,
.linux => struct {
base: ResumeNode,
epoll_op: u32,
@ -85,7 +85,7 @@ pub const Loop = struct {
};
pub const Basic = switch (builtin.os.tag) {
.macosx, .freebsd, .netbsd, .dragonfly => KEventBasic,
.macos, .freebsd, .netbsd, .dragonfly => KEventBasic,
.linux => struct {
base: ResumeNode,
},
@ -259,7 +259,7 @@ pub const Loop = struct {
self.extra_threads[extra_thread_index] = try Thread.spawn(self, workerRun);
}
},
.macosx, .freebsd, .netbsd, .dragonfly => {
.macos, .freebsd, .netbsd, .dragonfly => {
self.os_data.kqfd = try os.kqueue();
errdefer os.close(self.os_data.kqfd);
@ -384,7 +384,7 @@ pub const Loop = struct {
while (self.available_eventfd_resume_nodes.pop()) |node| os.close(node.data.eventfd);
os.close(self.os_data.epollfd);
},
.macosx, .freebsd, .netbsd, .dragonfly => {
.macos, .freebsd, .netbsd, .dragonfly => {
os.close(self.os_data.kqfd);
},
.windows => {
@ -478,7 +478,7 @@ pub const Loop = struct {
.linux => {
self.linuxWaitFd(fd, os.EPOLLET | os.EPOLLONESHOT | os.EPOLLIN);
},
.macosx, .freebsd, .netbsd, .dragonfly => {
.macos, .freebsd, .netbsd, .dragonfly => {
self.bsdWaitKev(@intCast(usize, fd), os.EVFILT_READ, os.EV_ONESHOT);
},
else => @compileError("Unsupported OS"),
@ -490,7 +490,7 @@ pub const Loop = struct {
.linux => {
self.linuxWaitFd(fd, os.EPOLLET | os.EPOLLONESHOT | os.EPOLLOUT);
},
.macosx, .freebsd, .netbsd, .dragonfly => {
.macos, .freebsd, .netbsd, .dragonfly => {
self.bsdWaitKev(@intCast(usize, fd), os.EVFILT_WRITE, os.EV_ONESHOT);
},
else => @compileError("Unsupported OS"),
@ -502,7 +502,7 @@ pub const Loop = struct {
.linux => {
self.linuxWaitFd(fd, os.EPOLLET | os.EPOLLONESHOT | os.EPOLLOUT | os.EPOLLIN);
},
.macosx, .freebsd, .netbsd, .dragonfly => {
.macos, .freebsd, .netbsd, .dragonfly => {
self.bsdWaitKev(@intCast(usize, fd), os.EVFILT_READ, os.EV_ONESHOT);
self.bsdWaitKev(@intCast(usize, fd), os.EVFILT_WRITE, os.EV_ONESHOT);
},
@ -571,7 +571,7 @@ pub const Loop = struct {
const eventfd_node = &resume_stack_node.data;
eventfd_node.base.handle = next_tick_node.data;
switch (builtin.os.tag) {
.macosx, .freebsd, .netbsd, .dragonfly => {
.macos, .freebsd, .netbsd, .dragonfly => {
const kevent_array = @as(*const [1]os.Kevent, &eventfd_node.kevent);
const empty_kevs = &[0]os.Kevent{};
_ = os.kevent(self.os_data.kqfd, kevent_array, empty_kevs, null) catch {
@ -633,7 +633,7 @@ pub const Loop = struct {
if (!builtin.single_threaded) {
switch (builtin.os.tag) {
.linux,
.macosx,
.macos,
.freebsd,
.netbsd,
.dragonfly,
@ -725,7 +725,7 @@ pub const Loop = struct {
}
return;
},
.macosx, .freebsd, .netbsd, .dragonfly => {
.macos, .freebsd, .netbsd, .dragonfly => {
const final_kevent = @as(*const [1]os.Kevent, &self.os_data.final_kevent);
const empty_kevs = &[0]os.Kevent{};
// cannot fail because we already added it and this just enables it
@ -1218,7 +1218,7 @@ pub const Loop = struct {
}
}
},
.macosx, .freebsd, .netbsd, .dragonfly => {
.macos, .freebsd, .netbsd, .dragonfly => {
var eventlist: [1]os.Kevent = undefined;
const empty_kevs = &[0]os.Kevent{};
const count = os.kevent(self.os_data.kqfd, empty_kevs, eventlist[0..], null) catch unreachable;
@ -1344,7 +1344,7 @@ pub const Loop = struct {
const OsData = switch (builtin.os.tag) {
.linux => LinuxOsData,
.macosx, .freebsd, .netbsd, .dragonfly => KEventData,
.macos, .freebsd, .netbsd, .dragonfly => KEventData,
.windows => struct {
io_port: windows.HANDLE,
extra_thread_count: usize,

View File

@ -39,7 +39,7 @@ pub const Watch = @import("fs/watch.zig").Watch;
/// fit into a UTF-8 encoded array of this length.
/// The byte count includes room for a null sentinel byte.
pub const MAX_PATH_BYTES = switch (builtin.os.tag) {
.linux, .macosx, .ios, .freebsd, .netbsd, .dragonfly => os.PATH_MAX,
.linux, .macos, .ios, .freebsd, .netbsd, .dragonfly => os.PATH_MAX,
// Each UTF-16LE character may be expanded to 3 UTF-8 bytes.
// If it would require 4 UTF-8 bytes, then there would be a surrogate
// pair in the UTF-16LE, and we (over)account 3 bytes for it that way.
@ -303,7 +303,7 @@ pub const Dir = struct {
const IteratorError = error{AccessDenied} || os.UnexpectedError;
pub const Iterator = switch (builtin.os.tag) {
.macosx, .ios, .freebsd, .netbsd, .dragonfly => struct {
.macos, .ios, .freebsd, .netbsd, .dragonfly => struct {
dir: Dir,
seek: i64,
buf: [8192]u8, // TODO align(@alignOf(os.dirent)),
@ -318,7 +318,7 @@ pub const Dir = struct {
/// with subsequent calls to `next`, as well as when this `Dir` is deinitialized.
pub fn next(self: *Self) Error!?Entry {
switch (builtin.os.tag) {
.macosx, .ios => return self.nextDarwin(),
.macos, .ios => return self.nextDarwin(),
.freebsd, .netbsd, .dragonfly => return self.nextBsd(),
else => @compileError("unimplemented"),
}
@ -615,7 +615,7 @@ pub const Dir = struct {
pub fn iterate(self: Dir) Iterator {
switch (builtin.os.tag) {
.macosx, .ios, .freebsd, .netbsd, .dragonfly => return Iterator{
.macos, .ios, .freebsd, .netbsd, .dragonfly => return Iterator{
.dir = self,
.seek = 0,
.index = 0,
@ -1302,7 +1302,7 @@ pub const Dir = struct {
error.AccessDenied => |e| switch (builtin.os.tag) {
// non-Linux POSIX systems return EPERM when trying to delete a directory, so
// we need to handle that case specifically and translate the error
.macosx, .ios, .freebsd, .netbsd, .dragonfly => {
.macos, .ios, .freebsd, .netbsd, .dragonfly => {
// Don't follow symlinks to match unlinkat (which acts on symlinks rather than follows them)
const fstat = os.fstatatZ(self.fd, sub_path_c, os.AT_SYMLINK_NOFOLLOW) catch return e;
const is_dir = fstat.mode & os.S_IFMT == os.S_IFDIR;
@ -1490,6 +1490,19 @@ pub const Dir = struct {
return os.windows.ReadLink(self.fd, sub_path_w, buffer);
}
/// Read all of file contents using a preallocated buffer.
/// The returned slice has the same pointer as `buffer`. If the length matches `buffer.len`
/// the situation is ambiguous. It could either mean that the entire file was read, and
/// it exactly fits the buffer, or it could mean the buffer was not big enough for the
/// entire file.
pub fn readFile(self: Dir, file_path: []const u8, buffer: []u8) ![]u8 {
var file = try self.openFile(file_path, .{});
defer file.close();
const end_index = try file.readAll(buffer);
return buffer[0..end_index];
}
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
pub fn readFileAlloc(self: Dir, allocator: *mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 {
@ -1823,7 +1836,7 @@ pub const Dir = struct {
var atomic_file = try dest_dir.atomicFile(dest_path, .{ .mode = mode });
defer atomic_file.deinit();
try atomic_file.file.writeFileAll(in_file, .{ .in_len = size });
try copy_file(in_file.handle, atomic_file.file.handle);
return atomic_file.finish();
}
@ -2271,6 +2284,53 @@ pub fn realpathAlloc(allocator: *Allocator, pathname: []const u8) ![]u8 {
return allocator.dupe(u8, try os.realpath(pathname, &buf));
}
const CopyFileError = error{SystemResources} || os.CopyFileRangeError || os.SendFileError;
// Transfer all the data between two file descriptors in the most efficient way.
// The copy starts at offset 0, the initial offsets are preserved.
// No metadata is transferred over.
fn copy_file(fd_in: os.fd_t, fd_out: os.fd_t) CopyFileError!void {
if (comptime std.Target.current.isDarwin()) {
const rc = os.system.fcopyfile(fd_in, fd_out, null, os.system.COPYFILE_DATA);
switch (os.errno(rc)) {
0 => return,
os.EINVAL => unreachable,
os.ENOMEM => return error.SystemResources,
// The source file is not a directory, symbolic link, or regular file.
// Try with the fallback path before giving up.
os.ENOTSUP => {},
else => |err| return os.unexpectedErrno(err),
}
}
if (std.Target.current.os.tag == .linux) {
// Try copy_file_range first as that works at the FS level and is the
// most efficient method (if available).
var offset: u64 = 0;
cfr_loop: while (true) {
// The kernel checks the u64 value `offset+count` for overflow, use
// a 32 bit value so that the syscall won't return EINVAL except for
// impossibly large files (> 2^64-1 - 2^32-1).
const amt = try os.copy_file_range(fd_in, offset, fd_out, offset, math.maxInt(u32), 0);
// Terminate when no data was copied
if (amt == 0) break :cfr_loop;
offset += amt;
}
return;
}
// Sendfile is a zero-copy mechanism iff the OS supports it, otherwise the
// fallback code will copy the contents chunk by chunk.
const empty_iovec = [0]os.iovec_const{};
var offset: u64 = 0;
sendfile_loop: while (true) {
const amt = try os.sendfile(fd_out, fd_in, offset, 0, &empty_iovec, &empty_iovec, 0);
// Terminate when no data was copied
if (amt == 0) break :sendfile_loop;
offset += amt;
}
}
test "" {
if (builtin.os.tag != .wasi) {
_ = makeDirAbsolute;

View File

@ -42,7 +42,7 @@ pub fn getAppDataDir(allocator: *mem.Allocator, appname: []const u8) GetAppDataD
else => return error.AppDataDirUnavailable,
}
},
.macosx => {
.macos => {
const home_dir = os.getenv("HOME") orelse {
// TODO look in /etc/passwd
return error.AppDataDirUnavailable;

View File

@ -143,7 +143,7 @@ fn contains(entries: *const std.ArrayList(Dir.Entry), el: Dir.Entry) bool {
test "Dir.realpath smoke test" {
switch (builtin.os.tag) {
.linux, .windows, .macosx, .ios, .watchos, .tvos => {},
.linux, .windows, .macos, .ios, .watchos, .tvos => {},
else => return error.SkipZigTest,
}

View File

@ -49,7 +49,7 @@ pub fn Watch(comptime V: type) type {
const OsData = switch (builtin.os.tag) {
// TODO https://github.com/ziglang/zig/issues/3778
.macosx, .freebsd, .netbsd, .dragonfly => KqOsData,
.macos, .freebsd, .netbsd, .dragonfly => KqOsData,
.linux => LinuxOsData,
.windows => WindowsOsData,
@ -160,7 +160,7 @@ pub fn Watch(comptime V: type) type {
return self;
},
.macosx, .freebsd, .netbsd, .dragonfly => {
.macos, .freebsd, .netbsd, .dragonfly => {
self.* = Self{
.allocator = allocator,
.channel = channel,
@ -178,7 +178,7 @@ pub fn Watch(comptime V: type) type {
/// All addFile calls and removeFile calls must have completed.
pub fn deinit(self: *Self) void {
switch (builtin.os.tag) {
.macosx, .freebsd, .netbsd, .dragonfly => {
.macos, .freebsd, .netbsd, .dragonfly => {
// TODO we need to cancel the frames before destroying the lock
self.os_data.table_lock.deinit();
var it = self.os_data.file_table.iterator();
@ -229,7 +229,7 @@ pub fn Watch(comptime V: type) type {
pub fn addFile(self: *Self, file_path: []const u8, value: V) !?V {
switch (builtin.os.tag) {
.macosx, .freebsd, .netbsd, .dragonfly => return addFileKEvent(self, file_path, value),
.macos, .freebsd, .netbsd, .dragonfly => return addFileKEvent(self, file_path, value),
.linux => return addFileLinux(self, file_path, value),
.windows => return addFileWindows(self, file_path, value),
else => @compileError("Unsupported OS"),

View File

@ -446,6 +446,26 @@ pub const Mutable = struct {
rma.positive = (a.positive == b.positive);
}
/// rma = a * a
///
/// `rma` may not alias with `a`.
///
/// Asserts the result fits in `rma`. An upper bound on the number of limbs needed by
/// rma is given by `2 * a.limbs.len + 1`.
///
/// If `allocator` is provided, it will be used for temporary storage to improve
/// multiplication performance. `error.OutOfMemory` is handled with a fallback algorithm.
pub fn sqrNoAlias(rma: *Mutable, a: Const, opt_allocator: ?*Allocator) void {
assert(rma.limbs.ptr != a.limbs.ptr); // illegal aliasing
mem.set(Limb, rma.limbs, 0);
llsquare_basecase(rma.limbs, a.limbs);
rma.normalize(2 * a.limbs.len + 1);
rma.positive = true;
}
/// q = a / b (rem r)
///
/// a / b are floored (rounded towards 0).
@ -1827,7 +1847,28 @@ pub const Managed = struct {
rma.setMetadata(m.positive, m.len);
}
pub fn pow(rma: *Managed, a: Managed, b: u32) !void {
/// r = a * a
pub fn sqr(rma: *Managed, a: Const) !void {
const needed_limbs = 2 * a.limbs.len + 1;
if (rma.limbs.ptr == a.limbs.ptr) {
var m = try Managed.initCapacity(rma.allocator, needed_limbs);
errdefer m.deinit();
var m_mut = m.toMutable();
m_mut.sqrNoAlias(a, rma.allocator);
m.setMetadata(m_mut.positive, m_mut.len);
rma.deinit();
rma.swap(&m);
} else {
try rma.ensureCapacity(needed_limbs);
var rma_mut = rma.toMutable();
rma_mut.sqrNoAlias(a, rma.allocator);
rma.setMetadata(rma_mut.positive, rma_mut.len);
}
}
pub fn pow(rma: *Managed, a: Const, b: u32) !void {
const needed_limbs = calcPowLimbsBufferLen(a.bitCountAbs(), b);
const limbs_buffer = try rma.allocator.alloc(Limb, needed_limbs);
@ -1837,7 +1878,7 @@ pub const Managed = struct {
var m = try Managed.initCapacity(rma.allocator, needed_limbs);
errdefer m.deinit();
var m_mut = m.toMutable();
try m_mut.pow(a.toConst(), b, limbs_buffer);
try m_mut.pow(a, b, limbs_buffer);
m.setMetadata(m_mut.positive, m_mut.len);
rma.deinit();
@ -1845,7 +1886,7 @@ pub const Managed = struct {
} else {
try rma.ensureCapacity(needed_limbs);
var rma_mut = rma.toMutable();
try rma_mut.pow(a.toConst(), b, limbs_buffer);
try rma_mut.pow(a, b, limbs_buffer);
rma.setMetadata(rma_mut.positive, rma_mut.len);
}
}
@ -1869,11 +1910,14 @@ fn llmulacc(opt_allocator: ?*Allocator, r: []Limb, a: []const Limb, b: []const L
assert(r.len >= x.len + y.len + 1);
// 48 is a pretty abitrary size chosen based on performance of a factorial program.
if (x.len > 48) {
if (opt_allocator) |allocator| {
llmulacc_karatsuba(allocator, r, x, y) catch |err| switch (err) {
error.OutOfMemory => {}, // handled below
};
k_mul: {
if (x.len > 48) {
if (opt_allocator) |allocator| {
llmulacc_karatsuba(allocator, r, x, y) catch |err| switch (err) {
error.OutOfMemory => break :k_mul, // handled below
};
return;
}
}
}
@ -2203,6 +2247,42 @@ fn llxor(r: []Limb, a: []const Limb, b: []const Limb) void {
}
}
/// r MUST NOT alias x.
fn llsquare_basecase(r: []Limb, x: []const Limb) void {
@setRuntimeSafety(debug_safety);
const x_norm = x;
assert(r.len >= 2 * x_norm.len + 1);
// Compute the square of a N-limb bigint with only (N^2 + N)/2
// multiplications by exploting the symmetry of the coefficients around the
// diagonal:
//
// a b c *
// a b c =
// -------------------
// ca cb cc +
// ba bb bc +
// aa ab ac
//
// Note that:
// - Each mixed-product term appears twice for each column,
// - Squares are always in the 2k (0 <= k < N) column
for (x_norm) |v, i| {
// Accumulate all the x[i]*x[j] (with x!=j) products
llmulDigit(r[2 * i + 1 ..], x_norm[i + 1 ..], v);
}
// Each product appears twice, multiply by 2
llshl(r, r[0 .. 2 * x_norm.len], 1);
for (x_norm) |v, i| {
// Compute and add the squares
llmulDigit(r[2 * i ..], x[i .. i + 1], v);
}
}
/// Knuth 4.6.3
fn llpow(r: []Limb, a: []const Limb, b: u32, tmp_limbs: []Limb) void {
var tmp1: []Limb = undefined;
@ -2212,9 +2292,9 @@ fn llpow(r: []Limb, a: []const Limb, b: u32, tmp_limbs: []Limb) void {
// variable, use the output limbs and another temporary set to overcome this
// limitation.
// The initial assignment makes the result end in `r` so an extra memory
// copy is saved, each 1 flips the index twice so it's a no-op so count the
// 0.
const b_leading_zeros = @intCast(u5, @clz(u32, b));
// copy is saved, each 1 flips the index twice so it's only the zeros that
// matter.
const b_leading_zeros = @clz(u32, b);
const exp_zeros = @popCount(u32, ~b) - b_leading_zeros;
if (exp_zeros & 1 != 0) {
tmp1 = tmp_limbs;
@ -2224,32 +2304,28 @@ fn llpow(r: []Limb, a: []const Limb, b: u32, tmp_limbs: []Limb) void {
tmp2 = tmp_limbs;
}
const a_norm = a[0..llnormalize(a)];
mem.copy(Limb, tmp1, a_norm);
mem.set(Limb, tmp1[a_norm.len..], 0);
mem.copy(Limb, tmp1, a);
mem.set(Limb, tmp1[a.len..], 0);
// Scan the exponent as a binary number, from left to right, dropping the
// most significant bit set.
const exp_bits = @intCast(u5, 31 - b_leading_zeros);
var exp = @bitReverse(u32, b) >> 1 + b_leading_zeros;
// Square the result if the current bit is zero, square and multiply by a if
// it is one.
var exp_bits = 32 - 1 - b_leading_zeros;
var exp = b << @intCast(u5, 1 + b_leading_zeros);
var i: u5 = 0;
var i: usize = 0;
while (i < exp_bits) : (i += 1) {
// Square
{
mem.set(Limb, tmp2, 0);
const op = tmp1[0..llnormalize(tmp1)];
llmulacc(null, tmp2, op, op);
mem.swap([]Limb, &tmp1, &tmp2);
}
mem.set(Limb, tmp2, 0);
llsquare_basecase(tmp2, tmp1[0..llnormalize(tmp1)]);
mem.swap([]Limb, &tmp1, &tmp2);
// Multiply by a
if (exp & 1 != 0) {
if (@shlWithOverflow(u32, exp, 1, &exp)) {
mem.set(Limb, tmp2, 0);
llmulacc(null, tmp2, tmp1[0..llnormalize(tmp1)], a_norm);
llmulacc(null, tmp2, tmp1[0..llnormalize(tmp1)], a);
mem.swap([]Limb, &tmp1, &tmp2);
}
exp >>= 1;
}
}

View File

@ -720,6 +720,27 @@ test "big.int mul 0*0" {
testing.expect((try c.to(u32)) == 0);
}
test "big.int mul large" {
var a = try Managed.initCapacity(testing.allocator, 50);
defer a.deinit();
var b = try Managed.initCapacity(testing.allocator, 100);
defer b.deinit();
var c = try Managed.initCapacity(testing.allocator, 100);
defer c.deinit();
// Generate a number that's large enough to cross the thresholds for the use
// of subquadratic algorithms
for (a.limbs) |*p| {
p.* = std.math.maxInt(Limb);
}
a.setMetadata(true, 50);
try b.mul(a.toConst(), a.toConst());
try c.sqr(a.toConst());
testing.expect(b.eq(c));
}
test "big.int div single-single no rem" {
var a = try Managed.initSet(testing.allocator, 50);
defer a.deinit();
@ -1483,11 +1504,14 @@ test "big.int const to managed" {
test "big.int pow" {
{
var a = try Managed.initSet(testing.allocator, 10);
var a = try Managed.initSet(testing.allocator, -3);
defer a.deinit();
try a.pow(a, 8);
testing.expectEqual(@as(u32, 100000000), try a.to(u32));
try a.pow(a.toConst(), 3);
testing.expectEqual(@as(i32, -27), try a.to(i32));
try a.pow(a.toConst(), 4);
testing.expectEqual(@as(i32, 531441), try a.to(i32));
}
{
var a = try Managed.initSet(testing.allocator, 10);
@ -1497,9 +1521,9 @@ test "big.int pow" {
defer y.deinit();
// y and a are not aliased
try y.pow(a, 123);
try y.pow(a.toConst(), 123);
// y and a are aliased
try a.pow(a, 123);
try a.pow(a.toConst(), 123);
testing.expect(a.eq(y));
@ -1517,18 +1541,18 @@ test "big.int pow" {
var a = try Managed.initSet(testing.allocator, 0);
defer a.deinit();
try a.pow(a, 100);
try a.pow(a.toConst(), 100);
testing.expectEqual(@as(i32, 0), try a.to(i32));
try a.set(1);
try a.pow(a, 0);
try a.pow(a.toConst(), 0);
testing.expectEqual(@as(i32, 1), try a.to(i32));
try a.pow(a, 100);
try a.pow(a.toConst(), 100);
testing.expectEqual(@as(i32, 1), try a.to(i32));
try a.set(-1);
try a.pow(a, 15);
try a.pow(a.toConst(), 15);
testing.expectEqual(@as(i32, -1), try a.to(i32));
try a.pow(a, 16);
try a.pow(a.toConst(), 16);
testing.expectEqual(@as(i32, 1), try a.to(i32));
}
}

View File

@ -62,7 +62,7 @@ pub const system = if (@hasDecl(root, "os") and root.os != @This())
else if (builtin.link_libc)
std.c
else switch (builtin.os.tag) {
.macosx, .ios, .watchos, .tvos => darwin,
.macos, .ios, .watchos, .tvos => darwin,
.freebsd => freebsd,
.linux => linux,
.netbsd => netbsd,
@ -354,7 +354,7 @@ pub fn read(fd: fd_t, buf: []u8) ReadError!usize {
// Prevents EINVAL.
const max_count = switch (std.Target.current.os.tag) {
.linux => 0x7ffff000,
.macosx, .ios, .watchos, .tvos => math.maxInt(i32),
.macos, .ios, .watchos, .tvos => math.maxInt(i32),
else => math.maxInt(isize),
};
const adjusted_len = math.min(max_count, buf.len);
@ -582,7 +582,7 @@ pub fn ftruncate(fd: fd_t, length: u64) TruncateError!void {
/// On these systems, the read races with concurrent writes to the same file descriptor.
pub fn preadv(fd: fd_t, iov: []const iovec, offset: u64) PReadError!usize {
const have_pread_but_not_preadv = switch (std.Target.current.os.tag) {
.windows, .macosx, .ios, .watchos, .tvos => true,
.windows, .macos, .ios, .watchos, .tvos => true,
else => false,
};
if (have_pread_but_not_preadv) {
@ -709,7 +709,7 @@ pub fn write(fd: fd_t, bytes: []const u8) WriteError!usize {
const max_count = switch (std.Target.current.os.tag) {
.linux => 0x7ffff000,
.macosx, .ios, .watchos, .tvos => math.maxInt(i32),
.macos, .ios, .watchos, .tvos => math.maxInt(i32),
else => math.maxInt(isize),
};
const adjusted_len = math.min(max_count, bytes.len);
@ -863,7 +863,7 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
// Prevent EINVAL.
const max_count = switch (std.Target.current.os.tag) {
.linux => 0x7ffff000,
.macosx, .ios, .watchos, .tvos => math.maxInt(i32),
.macos, .ios, .watchos, .tvos => math.maxInt(i32),
else => math.maxInt(isize),
};
const adjusted_len = math.min(max_count, bytes.len);
@ -915,7 +915,7 @@ pub fn pwrite(fd: fd_t, bytes: []const u8, offset: u64) PWriteError!usize {
/// If `iov.len` is larger than will fit in a `u31`, a partial write will occur.
pub fn pwritev(fd: fd_t, iov: []const iovec_const, offset: u64) PWriteError!usize {
const have_pwrite_but_not_pwritev = switch (std.Target.current.os.tag) {
.windows, .macosx, .ios, .watchos, .tvos => true,
.windows, .macos, .ios, .watchos, .tvos => true,
else => false,
};
@ -4091,7 +4091,7 @@ pub fn getFdPath(fd: fd_t, out_buffer: *[MAX_PATH_BYTES]u8) RealPathError![]u8 {
const end_index = std.unicode.utf16leToUtf8(out_buffer, wide_slice) catch unreachable;
return out_buffer[0..end_index];
},
.macosx, .ios, .watchos, .tvos => {
.macos, .ios, .watchos, .tvos => {
// On macOS, we can use F_GETPATH fcntl command to query the OS for
// the path to the file descriptor.
@memset(out_buffer, 0, MAX_PATH_BYTES);
@ -4688,7 +4688,7 @@ pub fn sendfile(
});
const max_count = switch (std.Target.current.os.tag) {
.linux => 0x7ffff000,
.macosx, .ios, .watchos, .tvos => math.maxInt(i32),
.macos, .ios, .watchos, .tvos => math.maxInt(i32),
else => math.maxInt(size_t),
};
@ -4846,7 +4846,7 @@ pub fn sendfile(
}
}
},
.macosx, .ios, .tvos, .watchos => sf: {
.macos, .ios, .tvos, .watchos => sf: {
var hdtr_data: std.c.sf_hdtr = undefined;
var hdtr: ?*std.c.sf_hdtr = null;
if (headers.len != 0 or trailers.len != 0) {
@ -4945,6 +4945,9 @@ pub fn sendfile(
pub const CopyFileRangeError = error{
FileTooBig,
InputOutput,
/// `fd_in` is not open for reading; or `fd_out` is not open for writing;
/// or the `O_APPEND` flag is set for `fd_out`.
FilesOpenedWithWrongFlags,
IsDir,
OutOfMemory,
NoSpaceLeft,
@ -4953,6 +4956,11 @@ pub const CopyFileRangeError = error{
FileBusy,
} || PReadError || PWriteError || UnexpectedError;
var has_copy_file_range_syscall = init: {
const kernel_has_syscall = std.Target.current.os.isAtLeast(.linux, .{ .major = 4, .minor = 5 }) orelse true;
break :init std.atomic.Int(bool).init(kernel_has_syscall);
};
/// Transfer data between file descriptors at specified offsets.
/// Returns the number of bytes written, which can less than requested.
///
@ -4981,22 +4989,18 @@ pub const CopyFileRangeError = error{
pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len: usize, flags: u32) CopyFileRangeError!usize {
const use_c = std.c.versionCheck(.{ .major = 2, .minor = 27, .patch = 0 }).ok;
// TODO support for other systems than linux
const try_syscall = comptime std.Target.current.os.isAtLeast(.linux, .{ .major = 4, .minor = 5 }) != false;
if (use_c or try_syscall) {
if (std.Target.current.os.tag == .linux and
(use_c or has_copy_file_range_syscall.get()))
{
const sys = if (use_c) std.c else linux;
var off_in_copy = @bitCast(i64, off_in);
var off_out_copy = @bitCast(i64, off_out);
const rc = sys.copy_file_range(fd_in, &off_in_copy, fd_out, &off_out_copy, len, flags);
// TODO avoid wasting a syscall every time if kernel is too old and returns ENOSYS https://github.com/ziglang/zig/issues/1018
switch (sys.getErrno(rc)) {
0 => return @intCast(usize, rc),
EBADF => unreachable,
EBADF => return error.FilesOpenedWithWrongFlags,
EFBIG => return error.FileTooBig,
EIO => return error.InputOutput,
EISDIR => return error.IsDir,
@ -5005,9 +5009,14 @@ pub fn copy_file_range(fd_in: fd_t, off_in: u64, fd_out: fd_t, off_out: u64, len
EOVERFLOW => return error.Unseekable,
EPERM => return error.PermissionDenied,
ETXTBSY => return error.FileBusy,
EINVAL => {}, // these may not be regular files, try fallback
EXDEV => {}, // support for cross-filesystem copy added in Linux 5.3, use fallback
ENOSYS => {}, // syscall added in Linux 4.5, use fallback
// these may not be regular files, try fallback
EINVAL => {},
// support for cross-filesystem copy added in Linux 5.3, use fallback
EXDEV => {},
// syscall added in Linux 4.5, use fallback
ENOSYS => {
has_copy_file_range_syscall.set(false);
},
else => |err| return unexpectedErrno(err),
}
}

View File

@ -12,7 +12,7 @@ const std = @import("std");
const root = @import("root");
pub usingnamespace switch (std.Target.current.os.tag) {
.macosx, .ios, .tvos, .watchos => @import("bits/darwin.zig"),
.macos, .ios, .tvos, .watchos => @import("bits/darwin.zig"),
.dragonfly => @import("bits/dragonfly.zig"),
.freebsd => @import("bits/freebsd.zig"),
.linux => @import("bits/linux.zig"),

View File

@ -360,7 +360,7 @@ fn iter_fn(info: *dl_phdr_info, size: usize, counter: *usize) IterFnError!void {
}
test "dl_iterate_phdr" {
if (builtin.os.tag == .windows or builtin.os.tag == .wasi or builtin.os.tag == .macosx)
if (builtin.os.tag == .windows or builtin.os.tag == .wasi or builtin.os.tag == .macos)
return error.SkipZigTest;
var counter: usize = 0;

View File

@ -618,7 +618,7 @@ test "PackedIntArray at end of available memory" {
if (we_are_testing_this_with_stage1_which_leaks_comptime_memory) return error.SkipZigTest;
switch (builtin.os.tag) {
.linux, .macosx, .ios, .freebsd, .netbsd, .windows => {},
.linux, .macos, .ios, .freebsd, .netbsd, .windows => {},
else => return,
}
const PackedArray = PackedIntArray(u3, 8);
@ -639,7 +639,7 @@ test "PackedIntSlice at end of available memory" {
if (we_are_testing_this_with_stage1_which_leaks_comptime_memory) return error.SkipZigTest;
switch (builtin.os.tag) {
.linux, .macosx, .ios, .freebsd, .netbsd, .windows => {},
.linux, .macos, .ios, .freebsd, .netbsd, .windows => {},
else => return,
}
const PackedSlice = PackedIntSlice(u11);

View File

@ -585,7 +585,7 @@ pub const UserInfo = struct {
/// POSIX function which gets a uid from username.
pub fn getUserInfo(name: []const u8) !UserInfo {
return switch (builtin.os.tag) {
.linux, .macosx, .watchos, .tvos, .ios, .freebsd, .netbsd => posixGetUserInfo(name),
.linux, .macos, .watchos, .tvos, .ios, .freebsd, .netbsd => posixGetUserInfo(name),
else => @compileError("Unsupported OS"),
};
}
@ -688,7 +688,7 @@ pub fn getBaseAddress() usize {
const phdr = os.system.getauxval(std.elf.AT_PHDR);
return phdr - @sizeOf(std.elf.Ehdr);
},
.macosx, .freebsd, .netbsd => {
.macos, .freebsd, .netbsd => {
return @ptrToInt(&std.c._mh_execute_header);
},
.windows => return @ptrToInt(os.windows.kernel32.GetModuleHandleW(null)),
@ -733,7 +733,7 @@ pub fn getSelfExeSharedLibPaths(allocator: *Allocator) error{OutOfMemory}![][:0]
}.callback);
return paths.toOwnedSlice();
},
.macosx, .ios, .watchos, .tvos => {
.macos, .ios, .watchos, .tvos => {
var paths = List.init(allocator);
errdefer {
const slice = paths.toOwnedSlice();

View File

@ -44,7 +44,7 @@ pub fn clear_cache(start: usize, end: usize) callconv(.C) void {
else => false,
};
const apple = switch (os) {
.ios, .macosx, .watchos, .tvos => true,
.ios, .macos, .watchos, .tvos => true,
else => false,
};
if (x86) {

View File

@ -31,7 +31,7 @@ pub const Target = struct {
kfreebsd,
linux,
lv2,
macosx,
macos,
netbsd,
openbsd,
solaris,
@ -61,7 +61,7 @@ pub const Target = struct {
pub fn isDarwin(tag: Tag) bool {
return switch (tag) {
.ios, .macosx, .watchos, .tvos => true,
.ios, .macos, .watchos, .tvos => true,
else => false,
};
}
@ -234,7 +234,7 @@ pub const Target = struct {
.max = .{ .major = 12, .minor = 1 },
},
},
.macosx => return .{
.macos => return .{
.semver = .{
.min = .{ .major = 10, .minor = 13 },
.max = .{ .major = 10, .minor = 15, .patch = 3 },
@ -312,7 +312,7 @@ pub const Target = struct {
.windows => return TaggedVersionRange{ .windows = self.version_range.windows },
.freebsd,
.macosx,
.macos,
.ios,
.tvos,
.watchos,
@ -341,7 +341,7 @@ pub const Target = struct {
return switch (os.tag) {
.freebsd,
.netbsd,
.macosx,
.macos,
.ios,
.tvos,
.watchos,
@ -450,7 +450,7 @@ pub const Target = struct {
.other,
=> return .eabi,
.openbsd,
.macosx,
.macos,
.freebsd,
.ios,
.tvos,
@ -1277,7 +1277,7 @@ pub const Target = struct {
.ios,
.tvos,
.watchos,
.macosx,
.macos,
.uefi,
.windows,
.emscripten,
@ -1450,7 +1450,7 @@ pub const Target = struct {
.ios,
.tvos,
.watchos,
.macosx,
.macos,
.uefi,
.windows,
.emscripten,

View File

@ -143,7 +143,7 @@ pub const Timer = struct {
/// be less precise
frequency: switch (builtin.os.tag) {
.windows => u64,
.macosx, .ios, .tvos, .watchos => os.darwin.mach_timebase_info_data,
.macos, .ios, .tvos, .watchos => os.darwin.mach_timebase_info_data,
else => void,
},
resolution: u64,

View File

@ -137,7 +137,7 @@ pub const CrossTarget = struct {
},
.freebsd,
.macosx,
.macos,
.ios,
.tvos,
.watchos,
@ -578,7 +578,7 @@ pub const CrossTarget = struct {
const os = switch (self.getOsTag()) {
.windows => "windows",
.linux => "linux",
.macosx => "macos",
.macos => "macos",
else => return error.UnsupportedVcpkgOperatingSystem,
};
@ -718,7 +718,7 @@ pub const CrossTarget = struct {
=> return error.InvalidOperatingSystemVersion,
.freebsd,
.macosx,
.macos,
.ios,
.tvos,
.watchos,

View File

@ -267,7 +267,7 @@ pub const NativeTargetInfo = struct {
os.version_range.windows.max = @intToEnum(Target.Os.WindowsVersion, version);
os.version_range.windows.min = @intToEnum(Target.Os.WindowsVersion, version);
},
.macosx => {
.macos => {
var scbuf: [32]u8 = undefined;
var size: usize = undefined;

View File

@ -576,6 +576,30 @@ pub const Manifest = struct {
}
};
/// On operating systems that support symlinks, does a readlink. On other operating systems,
/// uses the file contents. Windows supports symlinks but only with elevated privileges, so
/// it is treated as not supporting symlinks.
pub fn readSmallFile(dir: fs.Dir, sub_path: []const u8, buffer: []u8) ![]u8 {
if (std.Target.current.os.tag == .windows) {
return dir.readFile(sub_path, buffer);
} else {
return dir.readLink(sub_path, buffer);
}
}
/// On operating systems that support symlinks, does a symlink. On other operating systems,
/// uses the file contents. Windows supports symlinks but only with elevated privileges, so
/// it is treated as not supporting symlinks.
/// `data` must be a valid UTF-8 encoded file path and 255 bytes or fewer.
pub fn writeSmallFile(dir: fs.Dir, sub_path: []const u8, data: []const u8) !void {
assert(data.len <= 255);
if (std.Target.current.os.tag == .windows) {
return dir.writeFile(sub_path, data);
} else {
return dir.symLink(data, sub_path, .{});
}
}
fn hashFile(file: fs.File, bin_digest: []u8) !void {
var buf: [1024]u8 = undefined;

View File

@ -922,7 +922,10 @@ pub fn create(gpa: *Allocator, options: InitOptions) !*Compilation {
try comp.work_queue.writeItem(.libcxx);
try comp.work_queue.writeItem(.libcxxabi);
}
if (is_exe_or_dyn_lib and build_options.is_stage1) {
const needs_compiler_rt_and_c = is_exe_or_dyn_lib or
(comp.getTarget().isWasm() and comp.bin_file.options.output_mode != .Obj);
if (needs_compiler_rt_and_c and build_options.is_stage1) {
try comp.work_queue.writeItem(.{ .libcompiler_rt = {} });
if (!comp.bin_file.options.link_libc) {
try comp.work_queue.writeItem(.{ .zig_libc = {} });
@ -2602,8 +2605,12 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
// We use an extra hex-encoded byte here to store some flags.
var prev_digest_buf: [digest.len + 2]u8 = undefined;
const prev_digest: []u8 = directory.handle.readLink(id_symlink_basename, &prev_digest_buf) catch |err| blk: {
log.debug("stage1 {} new_digest={} readlink error: {}", .{ mod.root_pkg.root_src_path, digest, @errorName(err) });
const prev_digest: []u8 = Cache.readSmallFile(
directory.handle,
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
log.debug("stage1 {} new_digest={} error: {}", .{ mod.root_pkg.root_src_path, digest, @errorName(err) });
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
@ -2774,7 +2781,7 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
const digest = man.final();
// Update the dangling symlink with the digest. If it fails we can continue; it only
// Update the small file with the digest. If it fails we can continue; it only
// means that the next invocation will have an unnecessary cache miss.
const stage1_flags_byte = @bitCast(u8, mod.stage1_flags);
log.debug("stage1 {} final digest={} flags={x}", .{
@ -2789,10 +2796,10 @@ fn updateStage1Module(comp: *Compilation, main_progress_node: *std.Progress.Node
log.debug("saved digest + flags: '{s}' (byte = {}) have_winmain_crt_startup={}", .{
digest_plus_flags, stage1_flags_byte, mod.stage1_flags.have_winmain_crt_startup,
});
directory.handle.symLink(&digest_plus_flags, id_symlink_basename, .{}) catch |err| {
log.warn("failed to save stage1 hash digest symlink: {}", .{@errorName(err)});
Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest_plus_flags) catch |err| {
log.warn("failed to save stage1 hash digest file: {}", .{@errorName(err)});
};
// Again failure here only means an unnecessary cache miss.
// Failure here only means an unnecessary cache miss.
man.writeManifest() catch |err| {
log.warn("failed to write cache manifest when linking: {}", .{@errorName(err)});
};

View File

@ -93,7 +93,7 @@ pub const Export = struct {
/// Byte offset into the file that contains the export directive.
src: usize,
/// Represents the position of the export, if any, in the output file.
link: link.File.Elf.Export,
link: link.File.Export,
/// The Decl that performs the export. Note that this is *not* the Decl being exported.
owner_decl: *Decl,
/// The Decl being exported. Note this is *not* the Decl performing the export.
@ -1712,7 +1712,10 @@ fn deleteDeclExports(self: *Module, decl: *Decl) void {
}
}
if (self.comp.bin_file.cast(link.File.Elf)) |elf| {
elf.deleteExport(exp.link);
elf.deleteExport(exp.link.elf);
}
if (self.comp.bin_file.cast(link.File.MachO)) |macho| {
macho.deleteExport(exp.link.macho);
}
if (self.failed_exports.remove(exp)) |entry| {
entry.value.destroy(self.gpa);
@ -1875,7 +1878,13 @@ pub fn analyzeExport(self: *Module, scope: *Scope, src: usize, borrowed_symbol_n
new_export.* = .{
.options = .{ .name = symbol_name },
.src = src,
.link = .{},
.link = switch (self.comp.bin_file.tag) {
.coff => .{ .coff = {} },
.elf => .{ .elf = link.File.Elf.Export{} },
.macho => .{ .macho = link.File.MachO.Export{} },
.c => .{ .c = {} },
.wasm => .{ .wasm = {} },
},
.owner_decl = owner_decl,
.exported_decl = exported_decl,
.status = .in_progress,

View File

@ -69,7 +69,7 @@ pub fn targetTriple(allocator: *Allocator, target: std.Target) ![]u8 {
.kfreebsd => "kfreebsd",
.linux => "linux",
.lv2 => "lv2",
.macosx => "macosx",
.macos => "macosx",
.netbsd => "netbsd",
.openbsd => "openbsd",
.solaris => "solaris",

View File

@ -133,6 +133,14 @@ pub const File = struct {
wasm: ?Wasm.FnData,
};
pub const Export = union {
elf: Elf.Export,
coff: void,
macho: MachO.Export,
c: void,
wasm: void,
};
/// For DWARF .debug_info.
pub const DbgInfoTypeRelocsTable = std.HashMapUnmanaged(Type, DbgInfoTypeReloc, Type.hash, Type.eql, std.hash_map.DefaultMaxLoadPercentage);
@ -458,8 +466,12 @@ pub const File = struct {
const digest = ch.final();
var prev_digest_buf: [digest.len]u8 = undefined;
const prev_digest: []u8 = directory.handle.readLink(id_symlink_basename, &prev_digest_buf) catch |err| b: {
log.debug("archive new_digest={} readlink error: {}", .{ digest, @errorName(err) });
const prev_digest: []u8 = Cache.readSmallFile(
directory.handle,
id_symlink_basename,
&prev_digest_buf,
) catch |err| b: {
log.debug("archive new_digest={} readFile error: {}", .{ digest, @errorName(err) });
break :b prev_digest_buf[0..0];
};
if (mem.eql(u8, prev_digest, &digest)) {
@ -504,8 +516,8 @@ pub const File = struct {
const bad = llvm.WriteArchive(full_out_path_z, object_files.items.ptr, object_files.items.len, os_type);
if (bad) return error.UnableToWriteArchive;
directory.handle.symLink(&digest, id_symlink_basename, .{}) catch |err| {
std.log.warn("failed to save archive hash digest symlink: {}", .{@errorName(err)});
Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
std.log.warn("failed to save archive hash digest file: {}", .{@errorName(err)});
};
ch.writeManifest() catch |err| {

View File

@ -854,8 +854,12 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
_ = try man.hit();
digest = man.final();
var prev_digest_buf: [digest.len]u8 = undefined;
const prev_digest: []u8 = directory.handle.readLink(id_symlink_basename, &prev_digest_buf) catch |err| blk: {
log.debug("COFF LLD new_digest={} readlink error: {}", .{ digest, @errorName(err) });
const prev_digest: []u8 = Cache.readSmallFile(
directory.handle,
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
log.debug("COFF LLD new_digest={} error: {}", .{ digest, @errorName(err) });
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
@ -1180,10 +1184,10 @@ fn linkWithLLD(self: *Coff, comp: *Compilation) !void {
}
if (!self.base.options.disable_lld_caching) {
// Update the dangling symlink with the digest. If it fails we can continue; it only
// Update the file with the digest. If it fails we can continue; it only
// means that the next invocation will have an unnecessary cache miss.
directory.handle.symLink(&digest, id_symlink_basename, .{}) catch |err| {
std.log.warn("failed to save linking hash digest symlink: {}", .{@errorName(err)});
Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
std.log.warn("failed to save linking hash digest file: {}", .{@errorName(err)});
};
// Again failure here only means an unnecessary cache miss.
man.writeManifest() catch |err| {

View File

@ -1326,8 +1326,12 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
digest = man.final();
var prev_digest_buf: [digest.len]u8 = undefined;
const prev_digest: []u8 = directory.handle.readLink(id_symlink_basename, &prev_digest_buf) catch |err| blk: {
log.debug("ELF LLD new_digest={} readlink error: {}", .{ digest, @errorName(err) });
const prev_digest: []u8 = Cache.readSmallFile(
directory.handle,
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
log.debug("ELF LLD new_digest={} error: {}", .{ digest, @errorName(err) });
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
@ -1647,10 +1651,10 @@ fn linkWithLLD(self: *Elf, comp: *Compilation) !void {
}
if (!self.base.options.disable_lld_caching) {
// Update the dangling symlink with the digest. If it fails we can continue; it only
// Update the file with the digest. If it fails we can continue; it only
// means that the next invocation will have an unnecessary cache miss.
directory.handle.symLink(&digest, id_symlink_basename, .{}) catch |err| {
std.log.warn("failed to save linking hash digest symlink: {}", .{@errorName(err)});
Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
std.log.warn("failed to save linking hash digest file: {}", .{@errorName(err)});
};
// Again failure here only means an unnecessary cache miss.
man.writeManifest() catch |err| {
@ -2588,7 +2592,7 @@ pub fn updateDeclExports(
},
};
const stt_bits: u8 = @truncate(u4, decl_sym.st_info);
if (exp.link.sym_index) |i| {
if (exp.link.elf.sym_index) |i| {
const sym = &self.global_symbols.items[i];
sym.* = .{
.st_name = try self.updateString(sym.st_name, exp.options.name),
@ -2613,7 +2617,7 @@ pub fn updateDeclExports(
.st_size = decl_sym.st_size,
};
exp.link.sym_index = @intCast(u32, i);
exp.link.elf.sym_index = @intCast(u32, i);
}
}
}

View File

@ -20,6 +20,8 @@ const File = link.File;
const Cache = @import("../Cache.zig");
const target_util = @import("../target.zig");
const Trie = @import("MachO/Trie.zig");
pub const base_tag: File.Tag = File.Tag.macho;
const LoadCommand = union(enum) {
@ -113,6 +115,9 @@ local_symbols: std.ArrayListUnmanaged(macho.nlist_64) = .{},
global_symbols: std.ArrayListUnmanaged(macho.nlist_64) = .{},
/// Table of all undefined symbols
undef_symbols: std.ArrayListUnmanaged(macho.nlist_64) = .{},
global_symbol_free_list: std.ArrayListUnmanaged(u32) = .{},
dyld_stub_binder_index: ?u16 = null,
/// Table of symbol names aka the string table.
@ -176,6 +181,10 @@ pub const TextBlock = struct {
};
};
pub const Export = struct {
sym_index: ?u32 = null,
};
pub const SrcFn = struct {
pub const empty = SrcFn{};
};
@ -256,10 +265,10 @@ pub fn flushModule(self: *MachO, comp: *Compilation) !void {
switch (self.base.options.output_mode) {
.Exe => {
if (self.entry_addr) |addr| {
// Write export trie.
try self.writeExportTrie();
// Write export trie.
try self.writeExportTrie();
if (self.entry_addr) |addr| {
// Update LC_MAIN with entry offset
const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment;
const main_cmd = &self.load_commands.items[self.main_cmd_index.?].EntryPoint;
@ -410,8 +419,12 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
digest = man.final();
var prev_digest_buf: [digest.len]u8 = undefined;
const prev_digest: []u8 = directory.handle.readLink(id_symlink_basename, &prev_digest_buf) catch |err| blk: {
log.debug("MachO LLD new_digest={} readlink error: {}", .{ digest, @errorName(err) });
const prev_digest: []u8 = Cache.readSmallFile(
directory.handle,
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
log.debug("MachO LLD new_digest={} error: {}", .{ digest, @errorName(err) });
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
@ -512,7 +525,7 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
try argv.append(darwinArchString(target.cpu.arch));
switch (target.os.tag) {
.macosx => {
.macos => {
try argv.append("-macosx_version_min");
},
.ios, .tvos, .watchos => switch (target.cpu.arch) {
@ -665,10 +678,10 @@ fn linkWithLLD(self: *MachO, comp: *Compilation) !void {
}
if (!self.base.options.disable_lld_caching) {
// Update the dangling symlink with the digest. If it fails we can continue; it only
// Update the file with the digest. If it fails we can continue; it only
// means that the next invocation will have an unnecessary cache miss.
directory.handle.symLink(&digest, id_symlink_basename, .{}) catch |err| {
std.log.warn("failed to save linking hash digest symlink: {}", .{@errorName(err)});
Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
std.log.warn("failed to save linking hash digest file: {}", .{@errorName(err)});
};
// Again failure here only means an unnecessary cache miss.
man.writeManifest() catch |err| {
@ -711,6 +724,7 @@ pub fn deinit(self: *MachO) void {
self.string_table.deinit(self.base.allocator);
self.undef_symbols.deinit(self.base.allocator);
self.global_symbols.deinit(self.base.allocator);
self.global_symbol_free_list.deinit(self.base.allocator);
self.local_symbols.deinit(self.base.allocator);
self.sections.deinit(self.base.allocator);
self.load_commands.deinit(self.base.allocator);
@ -835,7 +849,7 @@ pub fn updateDeclExports(
},
};
const n_type = decl_sym.n_type | macho.N_EXT;
if (exp.link.sym_index) |i| {
if (exp.link.macho.sym_index) |i| {
const sym = &self.global_symbols.items[i];
sym.* = .{
.n_strx = try self.updateString(sym.n_strx, exp.options.name),
@ -846,8 +860,10 @@ pub fn updateDeclExports(
};
} else {
const name_str_index = try self.makeString(exp.options.name);
_ = self.global_symbols.addOneAssumeCapacity();
const i = self.global_symbols.items.len - 1;
const i = if (self.global_symbol_free_list.popOrNull()) |i| i else blk: {
_ = self.global_symbols.addOneAssumeCapacity();
break :blk self.global_symbols.items.len - 1;
};
self.global_symbols.items[i] = .{
.n_strx = name_str_index,
.n_type = n_type,
@ -856,11 +872,17 @@ pub fn updateDeclExports(
.n_value = decl_sym.n_value,
};
exp.link.sym_index = @intCast(u32, i);
exp.link.macho.sym_index = @intCast(u32, i);
}
}
}
pub fn deleteExport(self: *MachO, exp: Export) void {
const sym_index = exp.sym_index orelse return;
self.global_symbol_free_list.append(self.base.allocator, sym_index) catch {};
self.global_symbols.items[sym_index].n_type = 0;
}
pub fn freeDecl(self: *MachO, decl: *Module.Decl) void {}
pub fn getDeclVAddr(self: *MachO, decl: *const Module.Decl) u64 {
@ -1383,25 +1405,30 @@ fn writeAllUndefSymbols(self: *MachO) !void {
}
fn writeExportTrie(self: *MachO) !void {
assert(self.entry_addr != null);
if (self.global_symbols.items.len == 0) return; // No exports, nothing to do.
// TODO implement mechanism for generating a prefix tree of the exported symbols
// single branch export trie
var buf = [_]u8{0} ** 24;
buf[0] = 0; // root node
buf[1] = 1; // 1 branch from root
mem.copy(u8, buf[2..], "_start");
buf[8] = 0;
buf[9] = 9 + 1;
var trie: Trie = .{};
defer trie.deinit(self.base.allocator);
const text_segment = self.load_commands.items[self.text_segment_cmd_index.?].Segment;
const addr = self.entry_addr.? - text_segment.vmaddr;
const written = try std.debug.leb.writeULEB128Mem(buf[12..], addr);
buf[10] = @intCast(u8, written) + 1;
buf[11] = 0;
for (self.global_symbols.items) |symbol| {
// TODO figure out if we should put all global symbols into the export trie
const name = self.getString(symbol.n_strx);
assert(symbol.n_value >= text_segment.vmaddr);
try trie.put(self.base.allocator, .{
.name = name,
.vmaddr_offset = symbol.n_value - text_segment.vmaddr,
.export_flags = 0, // TODO workout creation of export flags
});
}
var buffer: std.ArrayListUnmanaged(u8) = .{};
defer buffer.deinit(self.base.allocator);
try trie.writeULEB128Mem(self.base.allocator, &buffer);
const dyld_info = &self.load_commands.items[self.dyld_info_cmd_index.?].DyldInfo;
try self.base.file.?.pwriteAll(buf[0..], dyld_info.export_off);
try self.base.file.?.pwriteAll(buffer.items, dyld_info.export_off);
}
fn writeStringTable(self: *MachO) !void {

436
src/link/MachO/Trie.zig Normal file
View File

@ -0,0 +1,436 @@
//! Represents export trie used in MachO executables and dynamic libraries.
//! The purpose of an export trie is to encode as compactly as possible all
//! export symbols for the loader `dyld`.
//! The export trie encodes offset and other information using ULEB128
//! encoding, and is part of the __LINKEDIT segment.
//!
//! Description from loader.h:
//!
//! The symbols exported by a dylib are encoded in a trie. This is a compact
//! representation that factors out common prefixes. It also reduces LINKEDIT pages
//! in RAM because it encodes all information (name, address, flags) in one small,
//! contiguous range. The export area is a stream of nodes. The first node sequentially
//! is the start node for the trie.
//!
//! Nodes for a symbol start with a uleb128 that is the length of the exported symbol
//! information for the string so far. If there is no exported symbol, the node starts
//! with a zero byte. If there is exported info, it follows the length.
//!
//! First is a uleb128 containing flags. Normally, it is followed by a uleb128 encoded
//! offset which is location of the content named by the symbol from the mach_header
//! for the image. If the flags is EXPORT_SYMBOL_FLAGS_REEXPORT, then following the flags
//! is a uleb128 encoded library ordinal, then a zero terminated UTF8 string. If the string
//! is zero length, then the symbol is re-export from the specified dylib with the same name.
//! If the flags is EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER, then following the flags is two
//! uleb128s: the stub offset and the resolver offset. The stub is used by non-lazy pointers.
//! The resolver is used by lazy pointers and must be called to get the actual address to use.
//!
//! After the optional exported symbol information is a byte of how many edges (0-255) that
//! this node has leaving it, followed by each edge. Each edge is a zero terminated UTF8 of
//! the addition chars in the symbol, followed by a uleb128 offset for the node that edge points to.
const Trie = @This();
const std = @import("std");
const mem = std.mem;
const leb = std.debug.leb;
const log = std.log.scoped(.link);
const testing = std.testing;
const assert = std.debug.assert;
const Allocator = mem.Allocator;
pub const Symbol = struct {
name: []const u8,
vmaddr_offset: u64,
export_flags: u64,
};
const Edge = struct {
from: *Node,
to: *Node,
label: []const u8,
fn deinit(self: *Edge, alloc: *Allocator) void {
self.to.deinit(alloc);
alloc.destroy(self.to);
self.from = undefined;
self.to = undefined;
}
};
const Node = struct {
/// Export flags associated with this exported symbol (if any).
export_flags: ?u64 = null,
/// VM address offset wrt to the section this symbol is defined against (if any).
vmaddr_offset: ?u64 = null,
/// Offset of this node in the trie output byte stream.
trie_offset: ?usize = null,
/// List of all edges originating from this node.
edges: std.ArrayListUnmanaged(Edge) = .{},
fn deinit(self: *Node, alloc: *Allocator) void {
for (self.edges.items) |*edge| {
edge.deinit(alloc);
}
self.edges.deinit(alloc);
}
const PutResult = struct {
/// Node reached at this stage of `put` op.
node: *Node,
/// Count of newly inserted nodes at this stage of `put` op.
node_count: usize,
};
/// Inserts a new node starting from `self`.
fn put(self: *Node, alloc: *Allocator, label: []const u8, node_count: usize) !PutResult {
var curr_node_count = node_count;
// Check for match with edges from this node.
for (self.edges.items) |*edge| {
const match = mem.indexOfDiff(u8, edge.label, label) orelse return PutResult{
.node = edge.to,
.node_count = curr_node_count,
};
if (match == 0) continue;
if (match == edge.label.len) return edge.to.put(alloc, label[match..], curr_node_count);
// Found a match, need to splice up nodes.
// From: A -> B
// To: A -> C -> B
const mid = try alloc.create(Node);
mid.* = .{};
const to_label = edge.label;
const to_node = edge.to;
edge.to = mid;
edge.label = label[0..match];
curr_node_count += 1;
try mid.edges.append(alloc, .{
.from = mid,
.to = to_node,
.label = to_label[match..],
});
if (match == label.len) {
return PutResult{ .node = to_node, .node_count = curr_node_count };
} else {
return mid.put(alloc, label[match..], curr_node_count);
}
}
// Add a new node.
const node = try alloc.create(Node);
node.* = .{};
curr_node_count += 1;
try self.edges.append(alloc, .{
.from = self,
.to = node,
.label = label,
});
return PutResult{ .node = node, .node_count = curr_node_count };
}
/// This method should only be called *after* updateOffset has been called!
/// In case this is not upheld, this method will panic.
fn writeULEB128Mem(self: Node, buffer: *std.ArrayListUnmanaged(u8)) !void {
assert(self.trie_offset != null); // You need to call updateOffset first.
if (self.vmaddr_offset) |offset| {
// Terminal node info: encode export flags and vmaddr offset of this symbol.
var info_buf_len: usize = 0;
var info_buf: [@sizeOf(u64) * 2]u8 = undefined;
info_buf_len += try leb.writeULEB128Mem(info_buf[0..], self.export_flags.?);
info_buf_len += try leb.writeULEB128Mem(info_buf[info_buf_len..], offset);
// Encode the size of the terminal node info.
var size_buf: [@sizeOf(u64)]u8 = undefined;
const size_buf_len = try leb.writeULEB128Mem(size_buf[0..], info_buf_len);
// Now, write them to the output buffer.
buffer.appendSliceAssumeCapacity(size_buf[0..size_buf_len]);
buffer.appendSliceAssumeCapacity(info_buf[0..info_buf_len]);
} else {
// Non-terminal node is delimited by 0 byte.
buffer.appendAssumeCapacity(0);
}
// Write number of edges (max legal number of edges is 256).
buffer.appendAssumeCapacity(@intCast(u8, self.edges.items.len));
for (self.edges.items) |edge| {
// Write edges labels.
buffer.appendSliceAssumeCapacity(edge.label);
buffer.appendAssumeCapacity(0);
var buf: [@sizeOf(u64)]u8 = undefined;
const buf_len = try leb.writeULEB128Mem(buf[0..], edge.to.trie_offset.?);
buffer.appendSliceAssumeCapacity(buf[0..buf_len]);
}
}
const UpdateResult = struct {
/// Current size of this node in bytes.
node_size: usize,
/// True if the trie offset of this node in the output byte stream
/// would need updating; false otherwise.
updated: bool,
};
/// Updates offset of this node in the output byte stream.
fn updateOffset(self: *Node, offset: usize) UpdateResult {
var node_size: usize = 0;
if (self.vmaddr_offset) |vmaddr| {
node_size += sizeULEB128Mem(self.export_flags.?);
node_size += sizeULEB128Mem(vmaddr);
node_size += sizeULEB128Mem(node_size);
} else {
node_size += 1; // 0x0 for non-terminal nodes
}
node_size += 1; // 1 byte for edge count
for (self.edges.items) |edge| {
const next_node_offset = edge.to.trie_offset orelse 0;
node_size += edge.label.len + 1 + sizeULEB128Mem(next_node_offset);
}
const trie_offset = self.trie_offset orelse 0;
const updated = offset != trie_offset;
self.trie_offset = offset;
return .{ .node_size = node_size, .updated = updated };
}
/// Calculates number of bytes in ULEB128 encoding of value.
fn sizeULEB128Mem(value: u64) usize {
var res: usize = 0;
var v = value;
while (true) {
v = v >> 7;
res += 1;
if (v == 0) break;
}
return res;
}
};
/// Count of nodes in the trie.
/// The count is updated at every `put` call.
/// The trie always consists of at least a root node, hence
/// the count always starts at 1.
node_count: usize = 1,
/// The root node of the trie.
root: Node = .{},
/// Insert a symbol into the trie, updating the prefixes in the process.
/// This operation may change the layout of the trie by splicing edges in
/// certain circumstances.
pub fn put(self: *Trie, alloc: *Allocator, symbol: Symbol) !void {
const res = try self.root.put(alloc, symbol.name, 0);
self.node_count += res.node_count;
res.node.vmaddr_offset = symbol.vmaddr_offset;
res.node.export_flags = symbol.export_flags;
}
/// Write the trie to a buffer ULEB128 encoded.
pub fn writeULEB128Mem(self: *Trie, alloc: *Allocator, buffer: *std.ArrayListUnmanaged(u8)) !void {
var ordered_nodes: std.ArrayListUnmanaged(*Node) = .{};
defer ordered_nodes.deinit(alloc);
try ordered_nodes.ensureCapacity(alloc, self.node_count);
walkInOrder(&self.root, &ordered_nodes);
var offset: usize = 0;
var more: bool = true;
while (more) {
offset = 0;
more = false;
for (ordered_nodes.items) |node| {
const res = node.updateOffset(offset);
offset += res.node_size;
if (res.updated) more = true;
}
}
try buffer.ensureCapacity(alloc, buffer.items.len + offset);
for (ordered_nodes.items) |node| {
try node.writeULEB128Mem(buffer);
}
}
/// Walks the trie in DFS order gathering all nodes into a linear stream of nodes.
fn walkInOrder(node: *Node, list: *std.ArrayListUnmanaged(*Node)) void {
list.appendAssumeCapacity(node);
for (node.edges.items) |*edge| {
walkInOrder(edge.to, list);
}
}
pub fn deinit(self: *Trie, alloc: *Allocator) void {
self.root.deinit(alloc);
}
test "Trie node count" {
var gpa = testing.allocator;
var trie: Trie = .{};
defer trie.deinit(gpa);
testing.expectEqual(trie.node_count, 1);
try trie.put(gpa, .{
.name = "_main",
.vmaddr_offset = 0,
.export_flags = 0,
});
testing.expectEqual(trie.node_count, 2);
// Inserting the same node shouldn't update the trie.
try trie.put(gpa, .{
.name = "_main",
.vmaddr_offset = 0,
.export_flags = 0,
});
testing.expectEqual(trie.node_count, 2);
try trie.put(gpa, .{
.name = "__mh_execute_header",
.vmaddr_offset = 0x1000,
.export_flags = 0,
});
testing.expectEqual(trie.node_count, 4);
// Inserting the same node shouldn't update the trie.
try trie.put(gpa, .{
.name = "__mh_execute_header",
.vmaddr_offset = 0x1000,
.export_flags = 0,
});
testing.expectEqual(trie.node_count, 4);
try trie.put(gpa, .{
.name = "_main",
.vmaddr_offset = 0,
.export_flags = 0,
});
testing.expectEqual(trie.node_count, 4);
}
test "Trie basic" {
var gpa = testing.allocator;
var trie: Trie = .{};
defer trie.deinit(gpa);
// root
testing.expect(trie.root.edges.items.len == 0);
// root --- _st ---> node
try trie.put(gpa, .{
.name = "_st",
.vmaddr_offset = 0,
.export_flags = 0,
});
testing.expect(trie.root.edges.items.len == 1);
testing.expect(mem.eql(u8, trie.root.edges.items[0].label, "_st"));
{
// root --- _st ---> node --- art ---> node
try trie.put(gpa, .{
.name = "_start",
.vmaddr_offset = 0,
.export_flags = 0,
});
testing.expect(trie.root.edges.items.len == 1);
const nextEdge = &trie.root.edges.items[0];
testing.expect(mem.eql(u8, nextEdge.label, "_st"));
testing.expect(nextEdge.to.edges.items.len == 1);
testing.expect(mem.eql(u8, nextEdge.to.edges.items[0].label, "art"));
}
{
// root --- _ ---> node --- st ---> node --- art ---> node
// |
// | --- main ---> node
try trie.put(gpa, .{
.name = "_main",
.vmaddr_offset = 0,
.export_flags = 0,
});
testing.expect(trie.root.edges.items.len == 1);
const nextEdge = &trie.root.edges.items[0];
testing.expect(mem.eql(u8, nextEdge.label, "_"));
testing.expect(nextEdge.to.edges.items.len == 2);
testing.expect(mem.eql(u8, nextEdge.to.edges.items[0].label, "st"));
testing.expect(mem.eql(u8, nextEdge.to.edges.items[1].label, "main"));
const nextNextEdge = &nextEdge.to.edges.items[0];
testing.expect(mem.eql(u8, nextNextEdge.to.edges.items[0].label, "art"));
}
}
test "Trie.writeULEB128Mem" {
var gpa = testing.allocator;
var trie: Trie = .{};
defer trie.deinit(gpa);
try trie.put(gpa, .{
.name = "__mh_execute_header",
.vmaddr_offset = 0,
.export_flags = 0,
});
try trie.put(gpa, .{
.name = "_main",
.vmaddr_offset = 0x1000,
.export_flags = 0,
});
var buffer: std.ArrayListUnmanaged(u8) = .{};
defer buffer.deinit(gpa);
try trie.writeULEB128Mem(gpa, &buffer);
const exp_buffer = [_]u8{
0x0,
0x1,
0x5f,
0x0,
0x5,
0x0,
0x2,
0x5f,
0x6d,
0x68,
0x5f,
0x65,
0x78,
0x65,
0x63,
0x75,
0x74,
0x65,
0x5f,
0x68,
0x65,
0x61,
0x64,
0x65,
0x72,
0x0,
0x21,
0x6d,
0x61,
0x69,
0x6e,
0x0,
0x25,
0x2,
0x0,
0x0,
0x0,
0x3,
0x0,
0x80,
0x20,
0x0,
};
testing.expect(buffer.items.len == exp_buffer.len);
testing.expect(mem.eql(u8, buffer.items, exp_buffer[0..]));
}

View File

@ -310,8 +310,12 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
digest = man.final();
var prev_digest_buf: [digest.len]u8 = undefined;
const prev_digest: []u8 = directory.handle.readLink(id_symlink_basename, &prev_digest_buf) catch |err| blk: {
log.debug("WASM LLD new_digest={} readlink error: {}", .{ digest, @errorName(err) });
const prev_digest: []u8 = Cache.readSmallFile(
directory.handle,
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
log.debug("WASM LLD new_digest={} error: {}", .{ digest, @errorName(err) });
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
@ -374,7 +378,7 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
try argv.append(p);
}
if (self.base.options.output_mode == .Exe and !self.base.options.is_compiler_rt_or_libc) {
if (self.base.options.output_mode != .Obj and !self.base.options.is_compiler_rt_or_libc) {
if (!self.base.options.link_libc) {
try argv.append(comp.libc_static_lib.?.full_object_path);
}
@ -424,9 +428,9 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation) !void {
}
if (!self.base.options.disable_lld_caching) {
// Update the dangling symlink with the digest. If it fails we can continue; it only
// Update the file with the digest. If it fails we can continue; it only
// means that the next invocation will have an unnecessary cache miss.
directory.handle.symLink(&digest, id_symlink_basename, .{}) catch |err| {
Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
std.log.warn("failed to save linking hash digest symlink: {}", .{@errorName(err)});
};
// Again failure here only means an unnecessary cache miss.

View File

@ -123,14 +123,14 @@ pub fn cannotDynamicLink(target: std.Target) bool {
/// since this is the stable syscall interface.
pub fn osRequiresLibC(target: std.Target) bool {
return switch (target.os.tag) {
.freebsd, .netbsd, .dragonfly, .macosx, .ios, .watchos, .tvos => true,
.freebsd, .netbsd, .dragonfly, .macos, .ios, .watchos, .tvos => true,
else => false,
};
}
pub fn libcNeedsLibUnwind(target: std.Target) bool {
return switch (target.os.tag) {
.macosx,
.macos,
.ios,
.watchos,
.tvos,
@ -197,7 +197,7 @@ pub fn osToLLVM(os_tag: std.Target.Os.Tag) llvm.OSType {
.kfreebsd => .KFreeBSD,
.linux => .Linux,
.lv2 => .Lv2,
.macosx => .MacOSX,
.macos => .MacOSX,
.netbsd => .NetBSD,
.openbsd => .OpenBSD,
.solaris => .Solaris,

View File

@ -3104,7 +3104,7 @@ pub const CType = enum {
},
.linux,
.macosx,
.macos,
.freebsd,
.netbsd,
.dragonfly,

View File

@ -820,7 +820,9 @@ const char *ZigLLVMGetVendorTypeName(ZigLLVM_VendorType vendor) {
}
const char *ZigLLVMGetOSTypeName(ZigLLVM_OSType os) {
return (const char*)Triple::getOSTypeName((Triple::OSType)os).bytes_begin();
const char* name = (const char*)Triple::getOSTypeName((Triple::OSType)os).bytes_begin();
if (strcmp(name, "macosx") == 0) return "macos";
return name;
}
const char *ZigLLVMGetEnvironmentTypeName(ZigLLVM_EnvironmentType env_type) {

View File

@ -308,7 +308,7 @@ pub fn addCases(cases: *tests.StackTracesContext) void {
},
);
},
.macosx => {
.macos => {
cases.addCase(
"return",
source_return,

View File

@ -13,7 +13,7 @@ const linux_x64 = std.zig.CrossTarget{
const macosx_x64 = std.zig.CrossTarget{
.cpu_arch = .x86_64,
.os_tag = .macosx,
.os_tag = .macos,
};
const linux_riscv64 = std.zig.CrossTarget{

View File

@ -234,7 +234,7 @@ const test_targets = blk: {
TestTarget{
.target = .{
.cpu_arch = .x86_64,
.os_tag = .macosx,
.os_tag = .macos,
.abi = .gnu,
},
// https://github.com/ziglang/zig/issues/3295