Merge branch 'master' into improve-windows-networking

This commit is contained in:
Bas 2020-09-08 11:56:59 +02:00 committed by GitHub
commit 4a6ca735d9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
119 changed files with 6467 additions and 2121 deletions

View File

@ -123,7 +123,13 @@ pub fn build(b: *Builder) !void {
.source_dir = "lib",
.install_dir = .Lib,
.install_subdir = "zig",
.exclude_extensions = &[_][]const u8{ "test.zig", "README.md" },
.exclude_extensions = &[_][]const u8{
"test.zig",
"README.md",
".z.0",
".z.9",
"rfc1951.txt",
},
});
const test_filter = b.option([]const u8, "test-filter", "Skip tests that do not match filter");

View File

@ -2156,7 +2156,7 @@ test "pointer casting" {
test "pointer child type" {
// pointer types have a `child` field which tells you the type they point to.
assert((*u32).Child == u32);
assert(@typeInfo(*u32).Pointer.child == u32);
}
{#code_end#}
{#header_open|Alignment#}
@ -2184,7 +2184,7 @@ test "variable alignment" {
assert(@TypeOf(&x) == *i32);
assert(*i32 == *align(align_of_i32) i32);
if (std.Target.current.cpu.arch == .x86_64) {
assert((*i32).alignment == 4);
assert(@typeInfo(*i32).Pointer.alignment == 4);
}
}
{#code_end#}
@ -2202,7 +2202,7 @@ const assert = @import("std").debug.assert;
var foo: u8 align(4) = 100;
test "global variable alignment" {
assert(@TypeOf(&foo).alignment == 4);
assert(@typeInfo(@TypeOf(&foo)).Pointer.alignment == 4);
assert(@TypeOf(&foo) == *align(4) u8);
const as_pointer_to_array: *[1]u8 = &foo;
const as_slice: []u8 = as_pointer_to_array;
@ -4310,8 +4310,8 @@ test "fn type inference" {
const assert = @import("std").debug.assert;
test "fn reflection" {
assert(@TypeOf(assert).ReturnType == void);
assert(@TypeOf(assert).is_var_args == false);
assert(@typeInfo(@TypeOf(assert)).Fn.return_type.? == void);
assert(@typeInfo(@TypeOf(assert)).Fn.is_var_args == false);
}
{#code_end#}
{#header_close#}
@ -4611,10 +4611,10 @@ test "error union" {
foo = error.SomeError;
// Use compile-time reflection to access the payload type of an error union:
comptime assert(@TypeOf(foo).Payload == i32);
comptime assert(@typeInfo(@TypeOf(foo)).ErrorUnion.payload == i32);
// Use compile-time reflection to access the error set type of an error union:
comptime assert(@TypeOf(foo).ErrorSet == anyerror);
comptime assert(@typeInfo(@TypeOf(foo)).ErrorUnion.error_set == anyerror);
}
{#code_end#}
{#header_open|Merging Error Sets#}
@ -4991,7 +4991,7 @@ test "optional type" {
foo = 1234;
// Use compile-time reflection to access the child type of the optional:
comptime assert(@TypeOf(foo).Child == i32);
comptime assert(@typeInfo(@TypeOf(foo)).Optional.child == i32);
}
{#code_end#}
{#header_close#}
@ -6889,7 +6889,7 @@ fn func(y: *i32) void {
This builtin function atomically dereferences a pointer and returns the value.
</p>
<p>
{#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
{#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
{#header_close#}
@ -6899,7 +6899,7 @@ fn func(y: *i32) void {
This builtin function atomically modifies memory and then returns the previous value.
</p>
<p>
{#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
{#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
<p>
@ -6925,7 +6925,7 @@ fn func(y: *i32) void {
This builtin function atomically stores a value.
</p>
<p>
{#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
{#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
{#header_close#}
@ -7208,10 +7208,10 @@ fn cmpxchgStrongButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_v
more efficiently in machine instructions.
</p>
<p>
{#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
{#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
<p>{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
<p>{#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
{#see_also|Compile Variables|cmpxchgWeak#}
{#header_close#}
{#header_open|@cmpxchgWeak#}
@ -7237,10 +7237,10 @@ fn cmpxchgWeakButNotAtomic(comptime T: type, ptr: *T, expected_value: T, new_val
However if you need a stronger guarantee, use {#link|@cmpxchgStrong#}.
</p>
<p>
{#syntax#}T{#endsyntax#} must be a {#syntax#}bool{#endsyntax#}, a float,
{#syntax#}T{#endsyntax#} must be a pointer, a {#syntax#}bool{#endsyntax#}, a float,
an integer or an enum.
</p>
<p>{#syntax#}@TypeOf(ptr).alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
<p>{#syntax#}@typeInfo(@TypeOf(ptr)).Pointer.alignment{#endsyntax#} must be {#syntax#}>= @sizeOf(T).{#endsyntax#}</p>
{#see_also|Compile Variables|cmpxchgStrong#}
{#header_close#}

View File

@ -46,7 +46,11 @@ pub fn ArrayListAligned(comptime T: type, comptime alignment: ?u29) type {
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
var self = Self.init(allocator);
try self.ensureCapacity(num);
const new_memory = try self.allocator.allocAdvanced(T, alignment, num, .at_least);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
return self;
}
@ -366,7 +370,11 @@ pub fn ArrayListAlignedUnmanaged(comptime T: type, comptime alignment: ?u29) typ
/// Deinitialize with `deinit` or use `toOwnedSlice`.
pub fn initCapacity(allocator: *Allocator, num: usize) !Self {
var self = Self{};
try self.ensureCapacity(allocator, num);
const new_memory = try self.allocator.allocAdvanced(T, alignment, num, .at_least);
self.items.ptr = new_memory.ptr;
self.capacity = new_memory.len;
return self;
}

View File

@ -330,3 +330,8 @@ pub const FILE = @Type(.Opaque);
pub extern "c" fn dlopen(path: [*:0]const u8, mode: c_int) ?*c_void;
pub extern "c" fn dlclose(handle: *c_void) c_int;
pub extern "c" fn dlsym(handle: ?*c_void, symbol: [*:0]const u8) ?*c_void;
pub extern "c" fn sync() void;
pub extern "c" fn syncfs(fd: c_int) c_int;
pub extern "c" fn fsync(fd: c_int) c_int;
pub extern "c" fn fdatasync(fd: c_int) c_int;

View File

@ -44,10 +44,10 @@ pub const ChildProcess = struct {
stderr_behavior: StdIo,
/// Set to change the user id when spawning the child process.
uid: if (builtin.os.tag == .windows) void else ?u32,
uid: if (builtin.os.tag == .windows or builtin.os.tag == .wasi) void else ?os.uid_t,
/// Set to change the group id when spawning the child process.
gid: if (builtin.os.tag == .windows) void else ?u32,
gid: if (builtin.os.tag == .windows or builtin.os.tag == .wasi) void else ?os.gid_t,
/// Set to change the current working directory when spawning the child process.
cwd: ?[]const u8,
@ -275,9 +275,7 @@ pub const ChildProcess = struct {
}
fn handleWaitResult(self: *ChildProcess, status: u32) void {
// TODO https://github.com/ziglang/zig/issues/3190
var term = self.cleanupAfterWait(status);
self.term = term;
self.term = self.cleanupAfterWait(status);
}
fn cleanupStreams(self: *ChildProcess) void {
@ -487,8 +485,8 @@ pub const ChildProcess = struct {
const any_ignore = (self.stdin_behavior == StdIo.Ignore or self.stdout_behavior == StdIo.Ignore or self.stderr_behavior == StdIo.Ignore);
const nul_handle = if (any_ignore)
windows.OpenFile(&[_]u16{ 'N', 'U', 'L' }, .{
.dir = std.fs.cwd().fd,
// "\Device\Null" or "\??\NUL"
windows.OpenFile(&[_]u16{ '\\', 'D', 'e', 'v', 'i', 'c', 'e', '\\', 'N', 'u', 'l', 'l' }, .{
.access_mask = windows.GENERIC_READ | windows.SYNCHRONIZE,
.share_access = windows.FILE_SHARE_READ,
.creation = windows.OPEN_EXISTING,

View File

@ -18,11 +18,77 @@ const IMAGE_FILE_MACHINE_I386 = 0x014c;
const IMAGE_FILE_MACHINE_IA64 = 0x0200;
const IMAGE_FILE_MACHINE_AMD64 = 0x8664;
pub const MachineType = enum(u16) {
Unknown = 0x0,
/// Matsushita AM33
AM33 = 0x1d3,
/// x64
X64 = 0x8664,
/// ARM little endian
ARM = 0x1c0,
/// ARM64 little endian
ARM64 = 0xaa64,
/// ARM Thumb-2 little endian
ARMNT = 0x1c4,
/// EFI byte code
EBC = 0xebc,
/// Intel 386 or later processors and compatible processors
I386 = 0x14c,
/// Intel Itanium processor family
IA64 = 0x200,
/// Mitsubishi M32R little endian
M32R = 0x9041,
/// MIPS16
MIPS16 = 0x266,
/// MIPS with FPU
MIPSFPU = 0x366,
/// MIPS16 with FPU
MIPSFPU16 = 0x466,
/// Power PC little endian
POWERPC = 0x1f0,
/// Power PC with floating point support
POWERPCFP = 0x1f1,
/// MIPS little endian
R4000 = 0x166,
/// RISC-V 32-bit address space
RISCV32 = 0x5032,
/// RISC-V 64-bit address space
RISCV64 = 0x5064,
/// RISC-V 128-bit address space
RISCV128 = 0x5128,
/// Hitachi SH3
SH3 = 0x1a2,
/// Hitachi SH3 DSP
SH3DSP = 0x1a3,
/// Hitachi SH4
SH4 = 0x1a6,
/// Hitachi SH5
SH5 = 0x1a8,
/// Thumb
Thumb = 0x1c2,
/// MIPS little-endian WCE v2
WCEMIPSV2 = 0x169,
};
// OptionalHeader.magic values
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms680339(v=vs.85).aspx
const IMAGE_NT_OPTIONAL_HDR32_MAGIC = 0x10b;
const IMAGE_NT_OPTIONAL_HDR64_MAGIC = 0x20b;
// Image Characteristics
pub const IMAGE_FILE_RELOCS_STRIPPED = 0x1;
pub const IMAGE_FILE_DEBUG_STRIPPED = 0x200;
pub const IMAGE_FILE_EXECUTABLE_IMAGE = 0x2;
pub const IMAGE_FILE_32BIT_MACHINE = 0x100;
pub const IMAGE_FILE_LARGE_ADDRESS_AWARE = 0x20;
// Section flags
pub const IMAGE_SCN_CNT_INITIALIZED_DATA = 0x40;
pub const IMAGE_SCN_MEM_READ = 0x40000000;
pub const IMAGE_SCN_CNT_CODE = 0x20;
pub const IMAGE_SCN_MEM_EXECUTE = 0x20000000;
pub const IMAGE_SCN_MEM_WRITE = 0x80000000;
const IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16;
const IMAGE_DEBUG_TYPE_CODEVIEW = 2;
const DEBUG_DIRECTORY = 6;

13
lib/std/compress.zig Normal file
View File

@ -0,0 +1,13 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2020 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
const std = @import("std.zig");
pub const deflate = @import("compress/deflate.zig");
pub const zlib = @import("compress/zlib.zig");
test "" {
_ = zlib;
}

View File

@ -0,0 +1,521 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2020 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
//
// Decompressor for DEFLATE data streams (RFC1951)
//
// Heavily inspired by the simple decompressor puff.c by Mark Adler
const std = @import("std");
const io = std.io;
const math = std.math;
const mem = std.mem;
const assert = std.debug.assert;
const MAXBITS = 15;
const MAXLCODES = 286;
const MAXDCODES = 30;
const MAXCODES = MAXLCODES + MAXDCODES;
const FIXLCODES = 288;
const Huffman = struct {
count: [MAXBITS + 1]u16,
symbol: [MAXCODES]u16,
fn construct(self: *Huffman, length: []const u16) !void {
for (self.count) |*val| {
val.* = 0;
}
for (length) |val| {
self.count[val] += 1;
}
if (self.count[0] == length.len)
return;
var left: isize = 1;
for (self.count[1..]) |val| {
left *= 2;
left -= @as(isize, @bitCast(i16, val));
if (left < 0)
return error.InvalidTree;
}
var offs: [MAXBITS + 1]u16 = undefined;
{
var len: usize = 1;
offs[1] = 0;
while (len < MAXBITS) : (len += 1) {
offs[len + 1] = offs[len] + self.count[len];
}
}
for (length) |val, symbol| {
if (val != 0) {
self.symbol[offs[val]] = @truncate(u16, symbol);
offs[val] += 1;
}
}
}
};
pub fn InflateStream(comptime ReaderType: type) type {
return struct {
const Self = @This();
pub const Error = ReaderType.Error || error{
EndOfStream,
BadCounts,
InvalidBlockType,
InvalidDistance,
InvalidFixedCode,
InvalidLength,
InvalidStoredSize,
InvalidSymbol,
InvalidTree,
MissingEOBCode,
NoLastLength,
OutOfCodes,
};
pub const Reader = io.Reader(*Self, Error, read);
bit_reader: io.BitReader(.Little, ReaderType),
// True if the decoder met the end of the compressed stream, no further
// data can be decompressed
seen_eos: bool,
state: union(enum) {
// Parse a compressed block header and set up the internal state for
// decompressing its contents.
DecodeBlockHeader: void,
// Decode all the symbols in a compressed block.
DecodeBlockData: void,
// Copy N bytes of uncompressed data from the underlying stream into
// the window.
Copy: usize,
// Copy 1 byte into the window.
CopyLit: u8,
// Copy L bytes from the window itself, starting from D bytes
// behind.
CopyFrom: struct { distance: u16, length: u16 },
},
// Sliding window for the LZ77 algorithm
window: struct {
const WSelf = @This();
// invariant: buffer length is always a power of 2
buf: []u8,
// invariant: ri <= wi
wi: usize = 0, // Write index
ri: usize = 0, // Read index
el: usize = 0, // Number of readable elements
fn readable(self: *WSelf) usize {
return self.el;
}
fn writable(self: *WSelf) usize {
return self.buf.len - self.el;
}
// Insert a single byte into the window.
// Returns 1 if there's enough space for the new byte and 0
// otherwise.
fn append(self: *WSelf, value: u8) usize {
if (self.writable() < 1) return 0;
self.appendUnsafe(value);
return 1;
}
// Insert a single byte into the window.
// Assumes there's enough space.
fn appendUnsafe(self: *WSelf, value: u8) void {
self.buf[self.wi] = value;
self.wi = (self.wi + 1) & (self.buf.len - 1);
self.el += 1;
}
// Fill dest[] with data from the window, starting from the read
// position. This updates the read pointer.
// Returns the number of read bytes or 0 if there's nothing to read
// yet.
fn read(self: *WSelf, dest: []u8) usize {
const N = math.min(dest.len, self.readable());
if (N == 0) return 0;
if (self.ri + N < self.buf.len) {
// The data doesn't wrap around
mem.copy(u8, dest, self.buf[self.ri .. self.ri + N]);
} else {
// The data wraps around the buffer, split the copy
std.mem.copy(u8, dest, self.buf[self.ri..]);
// How much data we've copied from `ri` to the end
const r = self.buf.len - self.ri;
std.mem.copy(u8, dest[r..], self.buf[0 .. N - r]);
}
self.ri = (self.ri + N) & (self.buf.len - 1);
self.el -= N;
return N;
}
// Copy `length` bytes starting from `distance` bytes behind the
// write pointer.
// Be careful as the length may be greater than the distance, that's
// how the compressor encodes run-length encoded sequences.
fn copyFrom(self: *WSelf, distance: usize, length: usize) usize {
const N = math.min(length, self.writable());
if (N == 0) return 0;
// TODO: Profile and, if needed, replace with smarter juggling
// of the window memory for the non-overlapping case.
var i: usize = 0;
while (i < N) : (i += 1) {
const index = (self.wi -% distance) % self.buf.len;
self.appendUnsafe(self.buf[index]);
}
return N;
}
},
// Compressor-local Huffman tables used to decompress blocks with
// dynamic codes.
huffman_tables: [2]Huffman = undefined,
// Huffman tables used for decoding length/distance pairs.
hdist: *Huffman,
hlen: *Huffman,
fn stored(self: *Self) !void {
// Discard the remaining bits, the lenght field is always
// byte-aligned (and so is the data)
self.bit_reader.alignToByte();
const length = (try self.bit_reader.readBitsNoEof(u16, 16));
const length_cpl = (try self.bit_reader.readBitsNoEof(u16, 16));
if (length != ~length_cpl)
return error.InvalidStoredSize;
self.state = .{ .Copy = length };
}
fn fixed(self: *Self) !void {
comptime var lencode: Huffman = undefined;
comptime var distcode: Huffman = undefined;
// The Huffman codes are specified in the RFC1951, section 3.2.6
comptime {
@setEvalBranchQuota(100000);
const len_lengths = //
[_]u16{8} ** 144 ++
[_]u16{9} ** 112 ++
[_]u16{7} ** 24 ++
[_]u16{8} ** 8;
assert(len_lengths.len == FIXLCODES);
try lencode.construct(len_lengths[0..]);
const dist_lengths = [_]u16{5} ** MAXDCODES;
try distcode.construct(dist_lengths[0..]);
}
self.hlen = &lencode;
self.hdist = &distcode;
self.state = .DecodeBlockData;
}
fn dynamic(self: *Self) !void {
// Number of length codes
const nlen = (try self.bit_reader.readBitsNoEof(usize, 5)) + 257;
// Number of distance codes
const ndist = (try self.bit_reader.readBitsNoEof(usize, 5)) + 1;
// Number of code length codes
const ncode = (try self.bit_reader.readBitsNoEof(usize, 4)) + 4;
if (nlen > MAXLCODES or ndist > MAXDCODES)
return error.BadCounts;
// Permutation of code length codes
const ORDER = [19]u16{
16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4,
12, 3, 13, 2, 14, 1, 15,
};
// Build the Huffman table to decode the code length codes
var lencode: Huffman = undefined;
{
var lengths = std.mem.zeroes([19]u16);
// Read the code lengths, missing ones are left as zero
for (ORDER[0..ncode]) |val| {
lengths[val] = try self.bit_reader.readBitsNoEof(u16, 3);
}
try lencode.construct(lengths[0..]);
}
// Read the length/literal and distance code length tables.
// Zero the table by default so we can avoid explicitly writing out
// zeros for codes 17 and 18
var lengths = std.mem.zeroes([MAXCODES]u16);
var i: usize = 0;
while (i < nlen + ndist) {
const symbol = try self.decode(&lencode);
switch (symbol) {
0...15 => {
lengths[i] = symbol;
i += 1;
},
16 => {
// repeat last length 3..6 times
if (i == 0) return error.NoLastLength;
const last_length = lengths[i - 1];
const repeat = 3 + (try self.bit_reader.readBitsNoEof(usize, 2));
const last_index = i + repeat;
while (i < last_index) : (i += 1) {
lengths[i] = last_length;
}
},
17 => {
// repeat zero 3..10 times
i += 3 + (try self.bit_reader.readBitsNoEof(usize, 3));
},
18 => {
// repeat zero 11..138 times
i += 11 + (try self.bit_reader.readBitsNoEof(usize, 7));
},
else => return error.InvalidSymbol,
}
}
if (i > nlen + ndist)
return error.InvalidLength;
// Check if the end of block code is present
if (lengths[256] == 0)
return error.MissingEOBCode;
try self.huffman_tables[0].construct(lengths[0..nlen]);
try self.huffman_tables[1].construct(lengths[nlen .. nlen + ndist]);
self.hlen = &self.huffman_tables[0];
self.hdist = &self.huffman_tables[1];
self.state = .DecodeBlockData;
}
fn codes(self: *Self, lencode: *Huffman, distcode: *Huffman) !bool {
// Size base for length codes 257..285
const LENS = [29]u16{
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258,
};
// Extra bits for length codes 257..285
const LEXT = [29]u16{
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2,
3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0,
};
// Offset base for distance codes 0..29
const DISTS = [30]u16{
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577,
};
// Extra bits for distance codes 0..29
const DEXT = [30]u16{
0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6,
7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13,
};
while (true) {
const symbol = try self.decode(lencode);
switch (symbol) {
0...255 => {
// Literal value
const c = @truncate(u8, symbol);
if (self.window.append(c) == 0) {
self.state = .{ .CopyLit = c };
return false;
}
},
256 => {
// End of block symbol
return true;
},
257...285 => {
// Length/distance pair
const length_symbol = symbol - 257;
const length = LENS[length_symbol] +
try self.bit_reader.readBitsNoEof(u16, LEXT[length_symbol]);
const distance_symbol = try self.decode(distcode);
const distance = DISTS[distance_symbol] +
try self.bit_reader.readBitsNoEof(u16, DEXT[distance_symbol]);
if (distance > self.window.buf.len)
return error.InvalidDistance;
const written = self.window.copyFrom(distance, length);
if (written != length) {
self.state = .{
.CopyFrom = .{
.distance = distance,
.length = length - @truncate(u16, written),
},
};
return false;
}
},
else => return error.InvalidFixedCode,
}
}
}
fn decode(self: *Self, h: *Huffman) !u16 {
var len: usize = 1;
var code: usize = 0;
var first: usize = 0;
var index: usize = 0;
while (len <= MAXBITS) : (len += 1) {
code |= try self.bit_reader.readBitsNoEof(usize, 1);
const count = h.count[len];
if (code < first + count)
return h.symbol[index + (code - first)];
index += count;
first += count;
first <<= 1;
code <<= 1;
}
return error.OutOfCodes;
}
fn step(self: *Self) !void {
while (true) {
switch (self.state) {
.DecodeBlockHeader => {
// The compressed stream is done
if (self.seen_eos) return;
const last = try self.bit_reader.readBitsNoEof(u1, 1);
const kind = try self.bit_reader.readBitsNoEof(u2, 2);
self.seen_eos = last != 0;
// The next state depends on the block type
switch (kind) {
0 => try self.stored(),
1 => try self.fixed(),
2 => try self.dynamic(),
3 => return error.InvalidBlockType,
}
},
.DecodeBlockData => {
if (!try self.codes(self.hlen, self.hdist)) {
return;
}
self.state = .DecodeBlockHeader;
},
.Copy => |*length| {
const N = math.min(self.window.writable(), length.*);
// TODO: This loop can be more efficient. On the other
// hand uncompressed blocks are not that common so...
var i: usize = 0;
while (i < N) : (i += 1) {
var tmp: [1]u8 = undefined;
if ((try self.bit_reader.read(&tmp)) != 1) {
// Unexpected end of stream, keep this error
// consistent with the use of readBitsNoEof
return error.EndOfStream;
}
self.window.appendUnsafe(tmp[0]);
}
if (N != length.*) {
length.* -= N;
return;
}
self.state = .DecodeBlockHeader;
},
.CopyLit => |c| {
if (self.window.append(c) == 0) {
return;
}
self.state = .DecodeBlockData;
},
.CopyFrom => |*info| {
const written = self.window.copyFrom(info.distance, info.length);
if (written != info.length) {
info.length -= @truncate(u16, written);
return;
}
self.state = .DecodeBlockData;
},
}
}
}
fn init(source: ReaderType, window_slice: []u8) Self {
assert(math.isPowerOfTwo(window_slice.len));
return Self{
.bit_reader = io.bitReader(.Little, source),
.window = .{ .buf = window_slice },
.seen_eos = false,
.state = .DecodeBlockHeader,
.hdist = undefined,
.hlen = undefined,
};
}
// Implements the io.Reader interface
pub fn read(self: *Self, buffer: []u8) Error!usize {
if (buffer.len == 0)
return 0;
// Try reading as much as possible from the window
var read_amt: usize = self.window.read(buffer);
while (read_amt < buffer.len) {
// Run the state machine, we can detect the "effective" end of
// stream condition by checking if any progress was made.
// Why "effective"? Because even though `seen_eos` is true we
// may still have to finish processing other decoding steps.
try self.step();
// No progress was made
if (self.window.readable() == 0)
break;
read_amt += self.window.read(buffer[read_amt..]);
}
return read_amt;
}
pub fn reader(self: *Self) Reader {
return .{ .context = self };
}
};
}
pub fn inflateStream(reader: anytype, window_slice: []u8) InflateStream(@TypeOf(reader)) {
return InflateStream(@TypeOf(reader)).init(reader, window_slice);
}

View File

@ -0,0 +1,955 @@
Network Working Group P. Deutsch
Request for Comments: 1951 Aladdin Enterprises
Category: Informational May 1996
DEFLATE Compressed Data Format Specification version 1.3
Status of This Memo
This memo provides information for the Internet community. This memo
does not specify an Internet standard of any kind. Distribution of
this memo is unlimited.
IESG Note:
The IESG takes no position on the validity of any Intellectual
Property Rights statements contained in this document.
Notices
Copyright (c) 1996 L. Peter Deutsch
Permission is granted to copy and distribute this document for any
purpose and without charge, including translations into other
languages and incorporation into compilations, provided that the
copyright notice and this notice are preserved, and that any
substantive changes or deletions from the original are clearly
marked.
A pointer to the latest version of this and related documentation in
HTML format can be found at the URL
<ftp://ftp.uu.net/graphics/png/documents/zlib/zdoc-index.html>.
Abstract
This specification defines a lossless compressed data format that
compresses data using a combination of the LZ77 algorithm and Huffman
coding, with efficiency comparable to the best currently available
general-purpose compression methods. The data can be produced or
consumed, even for an arbitrarily long sequentially presented input
data stream, using only an a priori bounded amount of intermediate
storage. The format can be implemented readily in a manner not
covered by patents.
Deutsch Informational [Page 1]
RFC 1951 DEFLATE Compressed Data Format Specification May 1996
Table of Contents
1. Introduction ................................................... 2
1.1. Purpose ................................................... 2
1.2. Intended audience ......................................... 3
1.3. Scope ..................................................... 3
1.4. Compliance ................................................ 3
1.5. Definitions of terms and conventions used ................ 3
1.6. Changes from previous versions ............................ 4
2. Compressed representation overview ............................. 4
3. Detailed specification ......................................... 5
3.1. Overall conventions ....................................... 5
3.1.1. Packing into bytes .................................. 5
3.2. Compressed block format ................................... 6
3.2.1. Synopsis of prefix and Huffman coding ............... 6
3.2.2. Use of Huffman coding in the "deflate" format ....... 7
3.2.3. Details of block format ............................. 9
3.2.4. Non-compressed blocks (BTYPE=00) ................... 11
3.2.5. Compressed blocks (length and distance codes) ...... 11
3.2.6. Compression with fixed Huffman codes (BTYPE=01) .... 12
3.2.7. Compression with dynamic Huffman codes (BTYPE=10) .. 13
3.3. Compliance ............................................... 14
4. Compression algorithm details ................................. 14
5. References .................................................... 16
6. Security Considerations ....................................... 16
7. Source code ................................................... 16
8. Acknowledgements .............................................. 16
9. Author's Address .............................................. 17
1. Introduction
1.1. Purpose
The purpose of this specification is to define a lossless
compressed data format that:
* Is independent of CPU type, operating system, file system,
and character set, and hence can be used for interchange;
* Can be produced or consumed, even for an arbitrarily long
sequentially presented input data stream, using only an a
priori bounded amount of intermediate storage, and hence
can be used in data communications or similar structures
such as Unix filters;
* Compresses data with efficiency comparable to the best
currently available general-purpose compression methods,
and in particular considerably better than the "compress"
program;
* Can be implemented readily in a manner not covered by
patents, and hence can be practiced freely;
Deutsch Informational [Page 2]
RFC 1951 DEFLATE Compressed Data Format Specification May 1996
* Is compatible with the file format produced by the current
widely used gzip utility, in that conforming decompressors
will be able to read data produced by the existing gzip
compressor.
The data format defined by this specification does not attempt to:
* Allow random access to compressed data;
* Compress specialized data (e.g., raster graphics) as well
as the best currently available specialized algorithms.
A simple counting argument shows that no lossless compression
algorithm can compress every possible input data set. For the
format defined here, the worst case expansion is 5 bytes per 32K-
byte block, i.e., a size increase of 0.015% for large data sets.
English text usually compresses by a factor of 2.5 to 3;
executable files usually compress somewhat less; graphical data
such as raster images may compress much more.
1.2. Intended audience
This specification is intended for use by implementors of software
to compress data into "deflate" format and/or decompress data from
"deflate" format.
The text of the specification assumes a basic background in
programming at the level of bits and other primitive data
representations. Familiarity with the technique of Huffman coding
is helpful but not required.
1.3. Scope
The specification specifies a method for representing a sequence
of bytes as a (usually shorter) sequence of bits, and a method for
packing the latter bit sequence into bytes.
1.4. Compliance
Unless otherwise indicated below, a compliant decompressor must be
able to accept and decompress any data set that conforms to all
the specifications presented here; a compliant compressor must
produce data sets that conform to all the specifications presented
here.
1.5. Definitions of terms and conventions used
Byte: 8 bits stored or transmitted as a unit (same as an octet).
For this specification, a byte is exactly 8 bits, even on machines
Deutsch Informational [Page 3]
RFC 1951 DEFLATE Compressed Data Format Specification May 1996
which store a character on a number of bits different from eight.
See below, for the numbering of bits within a byte.
String: a sequence of arbitrary bytes.
1.6. Changes from previous versions
There have been no technical changes to the deflate format since
version 1.1 of this specification. In version 1.2, some
terminology was changed. Version 1.3 is a conversion of the
specification to RFC style.
2. Compressed representation overview
A compressed data set consists of a series of blocks, corresponding
to successive blocks of input data. The block sizes are arbitrary,
except that non-compressible blocks are limited to 65,535 bytes.
Each block is compressed using a combination of the LZ77 algorithm
and Huffman coding. The Huffman trees for each block are independent
of those for previous or subsequent blocks; the LZ77 algorithm may
use a reference to a duplicated string occurring in a previous block,
up to 32K input bytes before.
Each block consists of two parts: a pair of Huffman code trees that
describe the representation of the compressed data part, and a
compressed data part. (The Huffman trees themselves are compressed
using Huffman encoding.) The compressed data consists of a series of
elements of two types: literal bytes (of strings that have not been
detected as duplicated within the previous 32K input bytes), and
pointers to duplicated strings, where a pointer is represented as a
pair <length, backward distance>. The representation used in the
"deflate" format limits distances to 32K bytes and lengths to 258
bytes, but does not limit the size of a block, except for
uncompressible blocks, which are limited as noted above.
Each type of value (literals, distances, and lengths) in the
compressed data is represented using a Huffman code, using one code
tree for literals and lengths and a separate code tree for distances.
The code trees for each block appear in a compact form just before
the compressed data for that block.
Deutsch Informational [Page 4]
RFC 1951 DEFLATE Compressed Data Format Specification May 1996
3. Detailed specification
3.1. Overall conventions In the diagrams below, a box like this:
+---+
| | <-- the vertical bars might be missing
+---+
represents one byte; a box like this:
+==============+
| |
+==============+
represents a variable number of bytes.
Bytes stored within a computer do not have a "bit order", since
they are always treated as a unit. However, a byte considered as
an integer between 0 and 255 does have a most- and least-
significant bit, and since we write numbers with the most-
significant digit on the left, we also write bytes with the most-
significant bit on the left. In the diagrams below, we number the
bits of a byte so that bit 0 is the least-significant bit, i.e.,
the bits are numbered:
+--------+
|76543210|
+--------+
Within a computer, a number may occupy multiple bytes. All
multi-byte numbers in the format described here are stored with
the least-significant byte first (at the lower memory address).
For example, the decimal number 520 is stored as:
0 1
+--------+--------+
|00001000|00000010|
+--------+--------+
^ ^
| |
| + more significant byte = 2 x 256
+ less significant byte = 8
3.1.1. Packing into bytes
This document does not address the issue of the order in which
bits of a byte are transmitted on a bit-sequential medium,
since the final data format described here is byte- rather than
Deutsch Informational [Page 5]
RFC 1951 DEFLATE Compressed Data Format Specification May 1996
bit-oriented. However, we describe the compressed block format
in below, as a sequence of data elements of various bit
lengths, not a sequence of bytes. We must therefore specify
how to pack these data elements into bytes to form the final
compressed byte sequence:
* Data elements are packed into bytes in order of
increasing bit number within the byte, i.e., starting
with the least-significant bit of the byte.
* Data elements other than Huffman codes are packed
starting with the least-significant bit of the data
element.
* Huffman codes are packed starting with the most-
significant bit of the code.
In other words, if one were to print out the compressed data as
a sequence of bytes, starting with the first byte at the
*right* margin and proceeding to the *left*, with the most-
significant bit of each byte on the left as usual, one would be
able to parse the result from right to left, with fixed-width
elements in the correct MSB-to-LSB order and Huffman codes in
bit-reversed order (i.e., with the first bit of the code in the
relative LSB position).
3.2. Compressed block format
3.2.1. Synopsis of prefix and Huffman coding
Prefix coding represents symbols from an a priori known
alphabet by bit sequences (codes), one code for each symbol, in
a manner such that different symbols may be represented by bit
sequences of different lengths, but a parser can always parse
an encoded string unambiguously symbol-by-symbol.
We define a prefix code in terms of a binary tree in which the
two edges descending from each non-leaf node are labeled 0 and
1 and in which the leaf nodes correspond one-for-one with (are
labeled with) the symbols of the alphabet; then the code for a
symbol is the sequence of 0's and 1's on the edges leading from
the root to the leaf labeled with that symbol. For example:
Deutsch Informational [Page 6]
RFC 1951 DEFLATE Compressed Data Format Specification May 1996
/\ Symbol Code
0 1 ------ ----
/ \ A 00
/\ B B 1
0 1 C 011
/ \ D 010
A /\
0 1
/ \
D C
A parser can decode the next symbol from an encoded input
stream by walking down the tree from the root, at each step
choosing the edge corresponding to the next input bit.
Given an alphabet with known symbol frequencies, the Huffman
algorithm allows the construction of an optimal prefix code
(one which represents strings with those symbol frequencies
using the fewest bits of any possible prefix codes for that
alphabet). Such a code is called a Huffman code. (See
reference [1] in Chapter 5, references for additional
information on Huffman codes.)
Note that in the "deflate" format, the Huffman codes for the
various alphabets must not exceed certain maximum code lengths.
This constraint complicates the algorithm for computing code
lengths from symbol frequencies. Again, see Chapter 5,
references for details.
3.2.2. Use of Huffman coding in the "deflate" format
The Huffman codes used for each alphabet in the "deflate"
format have two additional rules:
* All codes of a given bit length have lexicographically
consecutive values, in the same order as the symbols
they represent;
* Shorter codes lexicographically precede longer codes.
Deutsch Informational [Page 7]
RFC 1951 DEFLATE Compressed Data Format Specification May 1996
We could recode the example above to follow this rule as
follows, assuming that the order of the alphabet is ABCD:
Symbol Code
------ ----
A 10
B 0
C 110
D 111
I.e., 0 precedes 10 which precedes 11x, and 110 and 111 are
lexicographically consecutive.
Given this rule, we can define the Huffman code for an alphabet
just by giving the bit lengths of the codes for each symbol of
the alphabet in order; this is sufficient to determine the
actual codes. In our example, the code is completely defined
by the sequence of bit lengths (2, 1, 3, 3). The following
algorithm generates the codes as integers, intended to be read
from most- to least-significant bit. The code lengths are
initially in tree[I].Len; the codes are produced in
tree[I].Code.
1) Count the number of codes for each code length. Let
bl_count[N] be the number of codes of length N, N >= 1.
2) Find the numerical value of the smallest code for each
code length:
code = 0;
bl_count[0] = 0;
for (bits = 1; bits <= MAX_BITS; bits++) {
code = (code + bl_count[bits-1]) << 1;
next_code[bits] = code;
}
3) Assign numerical values to all codes, using consecutive
values for all codes of the same length with the base
values determined at step 2. Codes that are never used
(which have a bit length of zero) must not be assigned a
value.
for (n = 0; n <= max_code; n++) {
len = tree[n].Len;
if (len != 0) {
tree[n].Code = next_code[len];
next_code[len]++;
}
Deutsch Informational [Page 8]
RFC 1951 DEFLATE Compressed Data Format Specification May 1996
}
Example:
Consider the alphabet ABCDEFGH, with bit lengths (3, 3, 3, 3,
3, 2, 4, 4). After step 1, we have:
N bl_count[N]
- -----------
2 1
3 5
4 2
Step 2 computes the following next_code values:
N next_code[N]
- ------------
1 0
2 0
3 2
4 14
Step 3 produces the following code values:
Symbol Length Code
------ ------ ----
A 3 010
B 3 011
C 3 100
D 3 101
E 3 110
F 2 00
G 4 1110
H 4 1111
3.2.3. Details of block format
Each block of compressed data begins with 3 header bits
containing the following data:
first bit BFINAL
next 2 bits BTYPE
Note that the header bits do not necessarily begin on a byte
boundary, since a block does not necessarily occupy an integral
number of bytes.
Deutsch Informational [Page 9]
RFC 1951 DEFLATE Compressed Data Format Specification May 1996
BFINAL is set if and only if this is the last block of the data
set.
BTYPE specifies how the data are compressed, as follows:
00 - no compression
01 - compressed with fixed Huffman codes
10 - compressed with dynamic Huffman codes
11 - reserved (error)
The only difference between the two compressed cases is how the
Huffman codes for the literal/length and distance alphabets are
defined.
In all cases, the decoding algorithm for the actual data is as
follows:
do
read block header from input stream.
if stored with no compression
skip any remaining bits in current partially
processed byte
read LEN and NLEN (see next section)
copy LEN bytes of data to output
otherwise
if compressed with dynamic Huffman codes
read representation of code trees (see
subsection below)
loop (until end of block code recognized)
decode literal/length value from input stream
if value < 256
copy value (literal byte) to output stream
otherwise
if value = end of block (256)
break from loop
otherwise (value = 257..285)
decode distance from input stream
move backwards distance bytes in the output
stream, and copy length bytes from this
position to the output stream.
end loop
while not last block
Note that a duplicated string reference may refer to a string
in a previous block; i.e., the backward distance may cross one
or more block boundaries. However a distance cannot refer past
the beginning of the output stream. (An application using a
Deutsch Informational [Page 10]
RFC 1951 DEFLATE Compressed Data Format Specification May 1996
preset dictionary might discard part of the output stream; a
distance can refer to that part of the output stream anyway)
Note also that the referenced string may overlap the current
position; for example, if the last 2 bytes decoded have values
X and Y, a string reference with <length = 5, distance = 2>
adds X,Y,X,Y,X to the output stream.
We now specify each compression method in turn.
3.2.4. Non-compressed blocks (BTYPE=00)
Any bits of input up to the next byte boundary are ignored.
The rest of the block consists of the following information:
0 1 2 3 4...
+---+---+---+---+================================+
| LEN | NLEN |... LEN bytes of literal data...|
+---+---+---+---+================================+
LEN is the number of data bytes in the block. NLEN is the
one's complement of LEN.
3.2.5. Compressed blocks (length and distance codes)
As noted above, encoded data blocks in the "deflate" format
consist of sequences of symbols drawn from three conceptually
distinct alphabets: either literal bytes, from the alphabet of
byte values (0..255), or <length, backward distance> pairs,
where the length is drawn from (3..258) and the distance is
drawn from (1..32,768). In fact, the literal and length
alphabets are merged into a single alphabet (0..285), where
values 0..255 represent literal bytes, the value 256 indicates
end-of-block, and values 257..285 represent length codes
(possibly in conjunction with extra bits following the symbol
code) as follows:
Deutsch Informational [Page 11]
RFC 1951 DEFLATE Compressed Data Format Specification May 1996
Extra Extra Extra
Code Bits Length(s) Code Bits Lengths Code Bits Length(s)
---- ---- ------ ---- ---- ------- ---- ---- -------
257 0 3 267 1 15,16 277 4 67-82
258 0 4 268 1 17,18 278 4 83-98
259 0 5 269 2 19-22 279 4 99-114
260 0 6 270 2 23-26 280 4 115-130
261 0 7 271 2 27-30 281 5 131-162
262 0 8 272 2 31-34 282 5 163-194
263 0 9 273 3 35-42 283 5 195-226
264 0 10 274 3 43-50 284 5 227-257
265 1 11,12 275 3 51-58 285 0 258
266 1 13,14 276 3 59-66
The extra bits should be interpreted as a machine integer
stored with the most-significant bit first, e.g., bits 1110
represent the value 14.
Extra Extra Extra
Code Bits Dist Code Bits Dist Code Bits Distance
---- ---- ---- ---- ---- ------ ---- ---- --------
0 0 1 10 4 33-48 20 9 1025-1536
1 0 2 11 4 49-64 21 9 1537-2048
2 0 3 12 5 65-96 22 10 2049-3072
3 0 4 13 5 97-128 23 10 3073-4096
4 1 5,6 14 6 129-192 24 11 4097-6144
5 1 7,8 15 6 193-256 25 11 6145-8192
6 2 9-12 16 7 257-384 26 12 8193-12288
7 2 13-16 17 7 385-512 27 12 12289-16384
8 3 17-24 18 8 513-768 28 13 16385-24576
9 3 25-32 19 8 769-1024 29 13 24577-32768
3.2.6. Compression with fixed Huffman codes (BTYPE=01)
The Huffman codes for the two alphabets are fixed, and are not
represented explicitly in the data. The Huffman code lengths
for the literal/length alphabet are:
Lit Value Bits Codes
--------- ---- -----
0 - 143 8 00110000 through
10111111
144 - 255 9 110010000 through
111111111
256 - 279 7 0000000 through
0010111
280 - 287 8 11000000 through
11000111
Deutsch Informational [Page 12]
RFC 1951 DEFLATE Compressed Data Format Specification May 1996
The code lengths are sufficient to generate the actual codes,
as described above; we show the codes in the table for added
clarity. Literal/length values 286-287 will never actually
occur in the compressed data, but participate in the code
construction.
Distance codes 0-31 are represented by (fixed-length) 5-bit
codes, with possible additional bits as shown in the table
shown in Paragraph 3.2.5, above. Note that distance codes 30-
31 will never actually occur in the compressed data.
3.2.7. Compression with dynamic Huffman codes (BTYPE=10)
The Huffman codes for the two alphabets appear in the block
immediately after the header bits and before the actual
compressed data, first the literal/length code and then the
distance code. Each code is defined by a sequence of code
lengths, as discussed in Paragraph 3.2.2, above. For even
greater compactness, the code length sequences themselves are
compressed using a Huffman code. The alphabet for code lengths
is as follows:
0 - 15: Represent code lengths of 0 - 15
16: Copy the previous code length 3 - 6 times.
The next 2 bits indicate repeat length
(0 = 3, ... , 3 = 6)
Example: Codes 8, 16 (+2 bits 11),
16 (+2 bits 10) will expand to
12 code lengths of 8 (1 + 6 + 5)
17: Repeat a code length of 0 for 3 - 10 times.
(3 bits of length)
18: Repeat a code length of 0 for 11 - 138 times
(7 bits of length)
A code length of 0 indicates that the corresponding symbol in
the literal/length or distance alphabet will not occur in the
block, and should not participate in the Huffman code
construction algorithm given earlier. If only one distance
code is used, it is encoded using one bit, not zero bits; in
this case there is a single code length of one, with one unused
code. One distance code of zero bits means that there are no
distance codes used at all (the data is all literals).
We can now define the format of the block:
5 Bits: HLIT, # of Literal/Length codes - 257 (257 - 286)
5 Bits: HDIST, # of Distance codes - 1 (1 - 32)
4 Bits: HCLEN, # of Code Length codes - 4 (4 - 19)
Deutsch Informational [Page 13]
RFC 1951 DEFLATE Compressed Data Format Specification May 1996
(HCLEN + 4) x 3 bits: code lengths for the code length
alphabet given just above, in the order: 16, 17, 18,
0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15
These code lengths are interpreted as 3-bit integers
(0-7); as above, a code length of 0 means the
corresponding symbol (literal/length or distance code
length) is not used.
HLIT + 257 code lengths for the literal/length alphabet,
encoded using the code length Huffman code
HDIST + 1 code lengths for the distance alphabet,
encoded using the code length Huffman code
The actual compressed data of the block,
encoded using the literal/length and distance Huffman
codes
The literal/length symbol 256 (end of data),
encoded using the literal/length Huffman code
The code length repeat codes can cross from HLIT + 257 to the
HDIST + 1 code lengths. In other words, all code lengths form
a single sequence of HLIT + HDIST + 258 values.
3.3. Compliance
A compressor may limit further the ranges of values specified in
the previous section and still be compliant; for example, it may
limit the range of backward pointers to some value smaller than
32K. Similarly, a compressor may limit the size of blocks so that
a compressible block fits in memory.
A compliant decompressor must accept the full range of possible
values defined in the previous section, and must accept blocks of
arbitrary size.
4. Compression algorithm details
While it is the intent of this document to define the "deflate"
compressed data format without reference to any particular
compression algorithm, the format is related to the compressed
formats produced by LZ77 (Lempel-Ziv 1977, see reference [2] below);
since many variations of LZ77 are patented, it is strongly
recommended that the implementor of a compressor follow the general
algorithm presented here, which is known not to be patented per se.
The material in this section is not part of the definition of the
Deutsch Informational [Page 14]
RFC 1951 DEFLATE Compressed Data Format Specification May 1996
specification per se, and a compressor need not follow it in order to
be compliant.
The compressor terminates a block when it determines that starting a
new block with fresh trees would be useful, or when the block size
fills up the compressor's block buffer.
The compressor uses a chained hash table to find duplicated strings,
using a hash function that operates on 3-byte sequences. At any
given point during compression, let XYZ be the next 3 input bytes to
be examined (not necessarily all different, of course). First, the
compressor examines the hash chain for XYZ. If the chain is empty,
the compressor simply writes out X as a literal byte and advances one
byte in the input. If the hash chain is not empty, indicating that
the sequence XYZ (or, if we are unlucky, some other 3 bytes with the
same hash function value) has occurred recently, the compressor
compares all strings on the XYZ hash chain with the actual input data
sequence starting at the current point, and selects the longest
match.
The compressor searches the hash chains starting with the most recent
strings, to favor small distances and thus take advantage of the
Huffman encoding. The hash chains are singly linked. There are no
deletions from the hash chains; the algorithm simply discards matches
that are too old. To avoid a worst-case situation, very long hash
chains are arbitrarily truncated at a certain length, determined by a
run-time parameter.
To improve overall compression, the compressor optionally defers the
selection of matches ("lazy matching"): after a match of length N has
been found, the compressor searches for a longer match starting at
the next input byte. If it finds a longer match, it truncates the
previous match to a length of one (thus producing a single literal
byte) and then emits the longer match. Otherwise, it emits the
original match, and, as described above, advances N bytes before
continuing.
Run-time parameters also control this "lazy match" procedure. If
compression ratio is most important, the compressor attempts a
complete second search regardless of the length of the first match.
In the normal case, if the current match is "long enough", the
compressor reduces the search for a longer match, thus speeding up
the process. If speed is most important, the compressor inserts new
strings in the hash table only when no match was found, or when the
match is not "too long". This degrades the compression ratio but
saves time since there are both fewer insertions and fewer searches.
Deutsch Informational [Page 15]
RFC 1951 DEFLATE Compressed Data Format Specification May 1996
5. References
[1] Huffman, D. A., "A Method for the Construction of Minimum
Redundancy Codes", Proceedings of the Institute of Radio
Engineers, September 1952, Volume 40, Number 9, pp. 1098-1101.
[2] Ziv J., Lempel A., "A Universal Algorithm for Sequential Data
Compression", IEEE Transactions on Information Theory, Vol. 23,
No. 3, pp. 337-343.
[3] Gailly, J.-L., and Adler, M., ZLIB documentation and sources,
available in ftp://ftp.uu.net/pub/archiving/zip/doc/
[4] Gailly, J.-L., and Adler, M., GZIP documentation and sources,
available as gzip-*.tar in ftp://prep.ai.mit.edu/pub/gnu/
[5] Schwartz, E. S., and Kallick, B. "Generating a canonical prefix
encoding." Comm. ACM, 7,3 (Mar. 1964), pp. 166-169.
[6] Hirschberg and Lelewer, "Efficient decoding of prefix codes,"
Comm. ACM, 33,4, April 1990, pp. 449-459.
6. Security Considerations
Any data compression method involves the reduction of redundancy in
the data. Consequently, any corruption of the data is likely to have
severe effects and be difficult to correct. Uncompressed text, on
the other hand, will probably still be readable despite the presence
of some corrupted bytes.
It is recommended that systems using this data format provide some
means of validating the integrity of the compressed data. See
reference [3], for example.
7. Source code
Source code for a C language implementation of a "deflate" compliant
compressor and decompressor is available within the zlib package at
ftp://ftp.uu.net/pub/archiving/zip/zlib/.
8. Acknowledgements
Trademarks cited in this document are the property of their
respective owners.
Phil Katz designed the deflate format. Jean-Loup Gailly and Mark
Adler wrote the related software described in this specification.
Glenn Randers-Pehrson converted this document to RFC and HTML format.
Deutsch Informational [Page 16]
RFC 1951 DEFLATE Compressed Data Format Specification May 1996
9. Author's Address
L. Peter Deutsch
Aladdin Enterprises
203 Santa Margarita Ave.
Menlo Park, CA 94025
Phone: (415) 322-0103 (AM only)
FAX: (415) 322-1734
EMail: <ghost@aladdin.com>
Questions about the technical content of this specification can be
sent by email to:
Jean-Loup Gailly <gzip@prep.ai.mit.edu> and
Mark Adler <madler@alumni.caltech.edu>
Editorial comments on this specification can be sent by email to:
L. Peter Deutsch <ghost@aladdin.com> and
Glenn Randers-Pehrson <randeg@alumni.rpi.edu>
Deutsch Informational [Page 17]

Binary file not shown.

Binary file not shown.

Binary file not shown.

178
lib/std/compress/zlib.zig Normal file
View File

@ -0,0 +1,178 @@
// SPDX-License-Identifier: MIT
// Copyright (c) 2015-2020 Zig Contributors
// This file is part of [zig](https://ziglang.org/), which is MIT licensed.
// The MIT license requires this copyright notice to be included in all copies
// and substantial portions of the software.
//
// Decompressor for ZLIB data streams (RFC1950)
const std = @import("std");
const io = std.io;
const fs = std.fs;
const testing = std.testing;
const mem = std.mem;
const deflate = std.compress.deflate;
pub fn ZlibStream(comptime ReaderType: type) type {
return struct {
const Self = @This();
pub const Error = ReaderType.Error ||
deflate.InflateStream(ReaderType).Error ||
error{ WrongChecksum, Unsupported };
pub const Reader = io.Reader(*Self, Error, read);
allocator: *mem.Allocator,
inflater: deflate.InflateStream(ReaderType),
in_reader: ReaderType,
hasher: std.hash.Adler32,
window_slice: []u8,
fn init(allocator: *mem.Allocator, source: ReaderType) !Self {
// Zlib header format is specified in RFC1950
const header = try source.readBytesNoEof(2);
const CM = @truncate(u4, header[0]);
const CINFO = @truncate(u4, header[0] >> 4);
const FCHECK = @truncate(u5, header[1]);
const FDICT = @truncate(u1, header[1] >> 5);
if ((@as(u16, header[0]) << 8 | header[1]) % 31 != 0)
return error.BadHeader;
// The CM field must be 8 to indicate the use of DEFLATE
if (CM != 8) return error.InvalidCompression;
// CINFO is the base-2 logarithm of the window size, minus 8.
// Values above 7 are unspecified and therefore rejected.
if (CINFO > 7) return error.InvalidWindowSize;
const window_size: u16 = @as(u16, 1) << (CINFO + 8);
// TODO: Support this case
if (FDICT != 0)
return error.Unsupported;
var window_slice = try allocator.alloc(u8, window_size);
return Self{
.allocator = allocator,
.inflater = deflate.inflateStream(source, window_slice),
.in_reader = source,
.hasher = std.hash.Adler32.init(),
.window_slice = window_slice,
};
}
fn deinit(self: *Self) void {
self.allocator.free(self.window_slice);
}
// Implements the io.Reader interface
pub fn read(self: *Self, buffer: []u8) Error!usize {
if (buffer.len == 0)
return 0;
// Read from the compressed stream and update the computed checksum
const r = try self.inflater.read(buffer);
if (r != 0) {
self.hasher.update(buffer[0..r]);
return r;
}
// We've reached the end of stream, check if the checksum matches
const hash = try self.in_reader.readIntBig(u32);
if (hash != self.hasher.final())
return error.WrongChecksum;
return 0;
}
pub fn reader(self: *Self) Reader {
return .{ .context = self };
}
};
}
pub fn zlibStream(allocator: *mem.Allocator, reader: anytype) !ZlibStream(@TypeOf(reader)) {
return ZlibStream(@TypeOf(reader)).init(allocator, reader);
}
fn testReader(data: []const u8, comptime expected: []const u8) !void {
var in_stream = io.fixedBufferStream(data);
var zlib_stream = try zlibStream(testing.allocator, in_stream.reader());
defer zlib_stream.deinit();
// Read and decompress the whole file
const buf = try zlib_stream.reader().readAllAlloc(testing.allocator, std.math.maxInt(usize));
defer testing.allocator.free(buf);
// Calculate its SHA256 hash and check it against the reference
var hash: [32]u8 = undefined;
std.crypto.hash.sha2.Sha256.hash(buf, hash[0..], .{});
assertEqual(expected, &hash);
}
// Assert `expected` == `input` where `input` is a bytestring.
pub fn assertEqual(comptime expected: []const u8, input: []const u8) void {
var expected_bytes: [expected.len / 2]u8 = undefined;
for (expected_bytes) |*r, i| {
r.* = std.fmt.parseInt(u8, expected[2 * i .. 2 * i + 2], 16) catch unreachable;
}
testing.expectEqualSlices(u8, &expected_bytes, input);
}
// All the test cases are obtained by compressing the RFC1950 text
//
// https://tools.ietf.org/rfc/rfc1950.txt length=36944 bytes
// SHA256=5ebf4b5b7fe1c3a0c0ab9aa3ac8c0f3853a7dc484905e76e03b0b0f301350009
test "compressed data" {
// Compressed with compression level = 0
try testReader(
@embedFile("rfc1951.txt.z.0"),
"5ebf4b5b7fe1c3a0c0ab9aa3ac8c0f3853a7dc484905e76e03b0b0f301350009",
);
// Compressed with compression level = 9
try testReader(
@embedFile("rfc1951.txt.z.9"),
"5ebf4b5b7fe1c3a0c0ab9aa3ac8c0f3853a7dc484905e76e03b0b0f301350009",
);
// Compressed with compression level = 9 and fixed Huffman codes
try testReader(
@embedFile("rfc1951.txt.fixed.z.9"),
"5ebf4b5b7fe1c3a0c0ab9aa3ac8c0f3853a7dc484905e76e03b0b0f301350009",
);
}
test "sanity checks" {
// Truncated header
testing.expectError(
error.EndOfStream,
testReader(&[_]u8{0x78}, ""),
);
// Failed FCHECK check
testing.expectError(
error.BadHeader,
testReader(&[_]u8{ 0x78, 0x9D }, ""),
);
// Wrong CM
testing.expectError(
error.InvalidCompression,
testReader(&[_]u8{ 0x79, 0x94 }, ""),
);
// Wrong CINFO
testing.expectError(
error.InvalidWindowSize,
testReader(&[_]u8{ 0x88, 0x98 }, ""),
);
// Wrong checksum
testing.expectError(
error.WrongChecksum,
testReader(&[_]u8{ 0x78, 0xda, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00 }, ""),
);
// Truncated checksum
testing.expectError(
error.EndOfStream,
testReader(&[_]u8{ 0x78, 0xda, 0x03, 0x00, 0x00 }, ""),
);
}

View File

@ -9,10 +9,10 @@ const testing = std.testing;
/// Read a single unsigned LEB128 value from the given reader as type T,
/// or error.Overflow if the value cannot fit.
pub fn readULEB128(comptime T: type, reader: anytype) !T {
const U = if (T.bit_count < 8) u8 else T;
const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
const ShiftT = std.math.Log2Int(U);
const max_group = (U.bit_count + 6) / 7;
const max_group = (@typeInfo(U).Int.bits + 6) / 7;
var value = @as(U, 0);
var group = @as(ShiftT, 0);
@ -40,7 +40,7 @@ pub fn readULEB128(comptime T: type, reader: anytype) !T {
/// Write a single unsigned integer as unsigned LEB128 to the given writer.
pub fn writeULEB128(writer: anytype, uint_value: anytype) !void {
const T = @TypeOf(uint_value);
const U = if (T.bit_count < 8) u8 else T;
const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
var value = @intCast(U, uint_value);
while (true) {
@ -68,7 +68,7 @@ pub fn readULEB128Mem(comptime T: type, ptr: *[]const u8) !T {
/// returning the number of bytes written.
pub fn writeULEB128Mem(ptr: []u8, uint_value: anytype) !usize {
const T = @TypeOf(uint_value);
const max_group = (T.bit_count + 6) / 7;
const max_group = (@typeInfo(T).Int.bits + 6) / 7;
var buf = std.io.fixedBufferStream(ptr);
try writeULEB128(buf.writer(), uint_value);
return buf.pos;
@ -77,11 +77,11 @@ pub fn writeULEB128Mem(ptr: []u8, uint_value: anytype) !usize {
/// Read a single signed LEB128 value from the given reader as type T,
/// or error.Overflow if the value cannot fit.
pub fn readILEB128(comptime T: type, reader: anytype) !T {
const S = if (T.bit_count < 8) i8 else T;
const U = std.meta.Int(false, S.bit_count);
const S = if (@typeInfo(T).Int.bits < 8) i8 else T;
const U = std.meta.Int(false, @typeInfo(S).Int.bits);
const ShiftU = std.math.Log2Int(U);
const max_group = (U.bit_count + 6) / 7;
const max_group = (@typeInfo(U).Int.bits + 6) / 7;
var value = @as(U, 0);
var group = @as(ShiftU, 0);
@ -97,7 +97,7 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
if (@bitCast(S, temp) >= 0) return error.Overflow;
// and all the overflowed bits are 1
const remaining_shift = @intCast(u3, U.bit_count - @as(u16, shift));
const remaining_shift = @intCast(u3, @typeInfo(U).Int.bits - @as(u16, shift));
const remaining_bits = @bitCast(i8, byte | 0x80) >> remaining_shift;
if (remaining_bits != -1) return error.Overflow;
}
@ -127,8 +127,8 @@ pub fn readILEB128(comptime T: type, reader: anytype) !T {
/// Write a single signed integer as signed LEB128 to the given writer.
pub fn writeILEB128(writer: anytype, int_value: anytype) !void {
const T = @TypeOf(int_value);
const S = if (T.bit_count < 8) i8 else T;
const U = std.meta.Int(false, S.bit_count);
const S = if (@typeInfo(T).Int.bits < 8) i8 else T;
const U = std.meta.Int(false, @typeInfo(S).Int.bits);
var value = @intCast(S, int_value);
@ -173,7 +173,7 @@ pub fn writeILEB128Mem(ptr: []u8, int_value: anytype) !usize {
/// different value without shifting all the following code.
pub fn writeUnsignedFixed(comptime l: usize, ptr: *[l]u8, int: std.meta.Int(false, l * 7)) void {
const T = @TypeOf(int);
const U = if (T.bit_count < 8) u8 else T;
const U = if (@typeInfo(T).Int.bits < 8) u8 else T;
var value = @intCast(U, int);
comptime var i = 0;
@ -346,28 +346,29 @@ test "deserialize unsigned LEB128" {
fn test_write_leb128(value: anytype) !void {
const T = @TypeOf(value);
const t_signed = @typeInfo(T).Int.is_signed;
const writeStream = if (T.is_signed) writeILEB128 else writeULEB128;
const writeMem = if (T.is_signed) writeILEB128Mem else writeULEB128Mem;
const readStream = if (T.is_signed) readILEB128 else readULEB128;
const readMem = if (T.is_signed) readILEB128Mem else readULEB128Mem;
const writeStream = if (t_signed) writeILEB128 else writeULEB128;
const writeMem = if (t_signed) writeILEB128Mem else writeULEB128Mem;
const readStream = if (t_signed) readILEB128 else readULEB128;
const readMem = if (t_signed) readILEB128Mem else readULEB128Mem;
// decode to a larger bit size too, to ensure sign extension
// is working as expected
const larger_type_bits = ((T.bit_count + 8) / 8) * 8;
const B = std.meta.Int(T.is_signed, larger_type_bits);
const larger_type_bits = ((@typeInfo(T).Int.bits + 8) / 8) * 8;
const B = std.meta.Int(t_signed, larger_type_bits);
const bytes_needed = bn: {
const S = std.meta.Int(T.is_signed, @sizeOf(T) * 8);
if (T.bit_count <= 7) break :bn @as(u16, 1);
const S = std.meta.Int(t_signed, @sizeOf(T) * 8);
if (@typeInfo(T).Int.bits <= 7) break :bn @as(u16, 1);
const unused_bits = if (value < 0) @clz(T, ~value) else @clz(T, value);
const used_bits: u16 = (T.bit_count - unused_bits) + @boolToInt(T.is_signed);
const used_bits: u16 = (@typeInfo(T).Int.bits - unused_bits) + @boolToInt(t_signed);
if (used_bits <= 7) break :bn @as(u16, 1);
break :bn ((used_bits + 6) / 7);
};
const max_groups = if (T.bit_count == 0) 1 else (T.bit_count + 6) / 7;
const max_groups = if (@typeInfo(T).Int.bits == 0) 1 else (@typeInfo(T).Int.bits + 6) / 7;
var buf: [max_groups]u8 = undefined;
var fbs = std.io.fixedBufferStream(&buf);
@ -414,7 +415,7 @@ test "serialize unsigned LEB128" {
const T = std.meta.Int(false, t);
const min = std.math.minInt(T);
const max = std.math.maxInt(T);
var i = @as(std.meta.Int(false, T.bit_count + 1), min);
var i = @as(std.meta.Int(false, @typeInfo(T).Int.bits + 1), min);
while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i));
}
@ -432,7 +433,7 @@ test "serialize signed LEB128" {
const T = std.meta.Int(true, t);
const min = std.math.minInt(T);
const max = std.math.maxInt(T);
var i = @as(std.meta.Int(true, T.bit_count + 1), min);
var i = @as(std.meta.Int(true, @typeInfo(T).Int.bits + 1), min);
while (i <= max) : (i += 1) try test_write_leb128(@intCast(T, i));
}

View File

@ -66,6 +66,7 @@ fn peekIsAlign(comptime fmt: []const u8) bool {
/// - output numeric value in hexadecimal notation
/// - `s`: print a pointer-to-many as a c-string, use zero-termination
/// - `B` and `Bi`: output a memory size in either metric (1000) or power-of-two (1024) based notation. works for both float and integer values.
/// - `e` and `E`: if printing a string, escape non-printable characters
/// - `e`: output floating point value in scientific notation
/// - `d`: output numeric value in decimal notation
/// - `b`: output integer value in binary notation
@ -81,6 +82,8 @@ fn peekIsAlign(comptime fmt: []const u8) bool {
/// This allows user types to be formatted in a logical manner instead of dumping all fields of the type.
///
/// A user type may be a `struct`, `vector`, `union` or `enum` type.
///
/// To print literal curly braces, escape them by writing them twice, e.g. `{{` or `}}`.
pub fn format(
writer: anytype,
comptime fmt: []const u8,
@ -90,7 +93,7 @@ pub fn format(
if (@typeInfo(@TypeOf(args)) != .Struct) {
@compileError("Expected tuple or struct argument, found " ++ @typeName(@TypeOf(args)));
}
if (args.len > ArgSetType.bit_count) {
if (args.len > @typeInfo(ArgSetType).Int.bits) {
@compileError("32 arguments max are supported per format call");
}
@ -324,7 +327,7 @@ pub fn formatType(
max_depth: usize,
) @TypeOf(writer).Error!void {
if (comptime std.mem.eql(u8, fmt, "*")) {
try writer.writeAll(@typeName(@TypeOf(value).Child));
try writer.writeAll(@typeName(@typeInfo(@TypeOf(value)).Pointer.child));
try writer.writeAll("@");
try formatInt(@ptrToInt(value), 16, false, FormatOptions{}, writer);
return;
@ -429,12 +432,12 @@ pub fn formatType(
if (info.child == u8) {
return formatText(value, fmt, options, writer);
}
return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) });
return format(writer, "{}@{x}", .{ @typeName(@typeInfo(T).Pointer.child), @ptrToInt(value) });
},
.Enum, .Union, .Struct => {
return formatType(value.*, fmt, options, writer, max_depth);
},
else => return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) }),
else => return format(writer, "{}@{x}", .{ @typeName(@typeInfo(T).Pointer.child), @ptrToInt(value) }),
},
.Many, .C => {
if (ptr_info.sentinel) |sentinel| {
@ -445,7 +448,7 @@ pub fn formatType(
return formatText(mem.span(value), fmt, options, writer);
}
}
return format(writer, "{}@{x}", .{ @typeName(T.Child), @ptrToInt(value) });
return format(writer, "{}@{x}", .{ @typeName(@typeInfo(T).Pointer.child), @ptrToInt(value) });
},
.Slice => {
if (fmt.len > 0 and ((fmt[0] == 'x') or (fmt[0] == 'X'))) {
@ -535,7 +538,7 @@ pub fn formatIntValue(
radix = 10;
uppercase = false;
} else if (comptime std.mem.eql(u8, fmt, "c")) {
if (@TypeOf(int_value).bit_count <= 8) {
if (@typeInfo(@TypeOf(int_value)).Int.bits <= 8) {
return formatAsciiChar(@as(u8, int_value), options, writer);
} else {
@compileError("Cannot print integer that is larger than 8 bits as a ascii");
@ -599,6 +602,16 @@ pub fn formatText(
try formatInt(c, 16, fmt[0] == 'X', FormatOptions{ .width = 2, .fill = '0' }, writer);
}
return;
} else if (comptime (std.mem.eql(u8, fmt, "e") or std.mem.eql(u8, fmt, "E"))) {
for (bytes) |c| {
if (std.ascii.isPrint(c)) {
try writer.writeByte(c);
} else {
try writer.writeAll("\\x");
try formatInt(c, 16, fmt[0] == 'E', FormatOptions{ .width = 2, .fill = '0' }, writer);
}
}
return;
} else {
@compileError("Unknown format string: '" ++ fmt ++ "'");
}
@ -934,7 +947,7 @@ pub fn formatInt(
} else
value;
if (@TypeOf(int_value).is_signed) {
if (@typeInfo(@TypeOf(int_value)).Int.is_signed) {
return formatIntSigned(int_value, base, uppercase, options, writer);
} else {
return formatIntUnsigned(int_value, base, uppercase, options, writer);
@ -976,9 +989,10 @@ fn formatIntUnsigned(
writer: anytype,
) !void {
assert(base >= 2);
var buf: [math.max(@TypeOf(value).bit_count, 1)]u8 = undefined;
const min_int_bits = comptime math.max(@TypeOf(value).bit_count, @TypeOf(base).bit_count);
const MinInt = std.meta.Int(@TypeOf(value).is_signed, min_int_bits);
const value_info = @typeInfo(@TypeOf(value)).Int;
var buf: [math.max(value_info.bits, 1)]u8 = undefined;
const min_int_bits = comptime math.max(value_info.bits, @typeInfo(@TypeOf(base)).Int.bits);
const MinInt = std.meta.Int(value_info.is_signed, min_int_bits);
var a: MinInt = value;
var index: usize = buf.len;
@ -1319,6 +1333,12 @@ test "slice" {
try testFmt("buf: Test\n Other text", "buf: {s}\n Other text", .{"Test"});
}
test "escape non-printable" {
try testFmt("abc", "{e}", .{"abc"});
try testFmt("ab\\xffc", "{e}", .{"ab\xffc"});
try testFmt("ab\\xFFc", "{E}", .{"ab\xffc"});
}
test "pointer" {
{
const value = @intToPtr(*align(1) i32, 0xdeadbeef);

View File

@ -37,7 +37,9 @@
const std = @import("../std.zig");
const ascii = std.ascii;
const max_digits = 25;
// The mantissa field in FloatRepr is 64bit wide and holds only 19 digits
// without overflowing
const max_digits = 19;
const f64_plus_zero: u64 = 0x0000000000000000;
const f64_minus_zero: u64 = 0x8000000000000000;
@ -372,7 +374,7 @@ test "fmt.parseFloat" {
const epsilon = 1e-7;
inline for ([_]type{ f16, f32, f64, f128 }) |T| {
const Z = std.meta.Int(false, T.bit_count);
const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
testing.expectError(error.InvalidCharacter, parseFloat(T, ""));
testing.expectError(error.InvalidCharacter, parseFloat(T, " 1"));
@ -409,6 +411,7 @@ test "fmt.parseFloat" {
expect(approxEq(T, try parseFloat(T, "123142.1"), 123142.1, epsilon));
expect(approxEq(T, try parseFloat(T, "-123142.1124"), @as(T, -123142.1124), epsilon));
expect(approxEq(T, try parseFloat(T, "0.7062146892655368"), @as(T, 0.7062146892655368), epsilon));
expect(approxEq(T, try parseFloat(T, "2.71828182845904523536"), @as(T, 2.718281828459045), epsilon));
}
}
}

View File

@ -1437,26 +1437,32 @@ pub const Dir = struct {
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
pub fn readFileAlloc(self: Dir, allocator: *mem.Allocator, file_path: []const u8, max_bytes: usize) ![]u8 {
return self.readFileAllocOptions(allocator, file_path, max_bytes, @alignOf(u8), null);
return self.readFileAllocOptions(allocator, file_path, max_bytes, null, @alignOf(u8), null);
}
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
/// If `size_hint` is specified the initial buffer size is calculated using
/// that value, otherwise the effective file size is used instead.
/// Allows specifying alignment and a sentinel value.
pub fn readFileAllocOptions(
self: Dir,
allocator: *mem.Allocator,
file_path: []const u8,
max_bytes: usize,
size_hint: ?usize,
comptime alignment: u29,
comptime optional_sentinel: ?u8,
) !(if (optional_sentinel) |s| [:s]align(alignment) u8 else []align(alignment) u8) {
var file = try self.openFile(file_path, .{});
defer file.close();
const stat_size = try file.getEndPos();
// If the file size doesn't fit a usize it'll be certainly greater than
// `max_bytes`
const stat_size = size_hint orelse math.cast(usize, try file.getEndPos()) catch
return error.FileTooBig;
return file.readAllAllocOptions(allocator, stat_size, max_bytes, alignment, optional_sentinel);
return file.readToEndAllocOptions(allocator, max_bytes, stat_size, alignment, optional_sentinel);
}
pub const DeleteTreeError = error{

View File

@ -363,31 +363,49 @@ pub const File = struct {
try os.futimens(self.handle, &times);
}
/// Reads all the bytes from the current position to the end of the file.
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
pub fn readAllAlloc(self: File, allocator: *mem.Allocator, stat_size: u64, max_bytes: usize) ![]u8 {
return self.readAllAllocOptions(allocator, stat_size, max_bytes, @alignOf(u8), null);
pub fn readToEndAlloc(self: File, allocator: *mem.Allocator, max_bytes: usize) ![]u8 {
return self.readToEndAllocOptions(allocator, max_bytes, null, @alignOf(u8), null);
}
/// Reads all the bytes from the current position to the end of the file.
/// On success, caller owns returned buffer.
/// If the file is larger than `max_bytes`, returns `error.FileTooBig`.
/// If `size_hint` is specified the initial buffer size is calculated using
/// that value, otherwise an arbitrary value is used instead.
/// Allows specifying alignment and a sentinel value.
pub fn readAllAllocOptions(
pub fn readToEndAllocOptions(
self: File,
allocator: *mem.Allocator,
stat_size: u64,
max_bytes: usize,
size_hint: ?usize,
comptime alignment: u29,
comptime optional_sentinel: ?u8,
) !(if (optional_sentinel) |s| [:s]align(alignment) u8 else []align(alignment) u8) {
const size = math.cast(usize, stat_size) catch math.maxInt(usize);
if (size > max_bytes) return error.FileTooBig;
// If no size hint is provided fall back to the size=0 code path
const size = size_hint orelse 0;
const buf = try allocator.allocWithOptions(u8, size, alignment, optional_sentinel);
errdefer allocator.free(buf);
// The file size returned by stat is used as hint to set the buffer
// size. If the reported size is zero, as it happens on Linux for files
// in /proc, a small buffer is allocated instead.
const initial_cap = (if (size > 0) size else 1024) + @boolToInt(optional_sentinel != null);
var array_list = try std.ArrayListAligned(u8, alignment).initCapacity(allocator, initial_cap);
defer array_list.deinit();
try self.reader().readNoEof(buf);
return buf;
self.reader().readAllArrayList(&array_list, max_bytes) catch |err| switch (err) {
error.StreamTooLong => return error.FileTooBig,
else => |e| return e,
};
if (optional_sentinel) |sentinel| {
try array_list.append(sentinel);
const buf = array_list.toOwnedSlice();
return buf[0 .. buf.len - 1 :sentinel];
} else {
return array_list.toOwnedSlice();
}
}
pub const ReadError = os.ReadError;

View File

@ -188,30 +188,30 @@ test "readAllAlloc" {
var file = try tmp_dir.dir.createFile("test_file", .{ .read = true });
defer file.close();
const buf1 = try file.readAllAlloc(testing.allocator, 0, 1024);
const buf1 = try file.readToEndAlloc(testing.allocator, 1024);
defer testing.allocator.free(buf1);
testing.expect(buf1.len == 0);
const write_buf: []const u8 = "this is a test.\nthis is a test.\nthis is a test.\nthis is a test.\n";
try file.writeAll(write_buf);
try file.seekTo(0);
const file_size = try file.getEndPos();
// max_bytes > file_size
const buf2 = try file.readAllAlloc(testing.allocator, file_size, 1024);
const buf2 = try file.readToEndAlloc(testing.allocator, 1024);
defer testing.allocator.free(buf2);
testing.expectEqual(write_buf.len, buf2.len);
testing.expect(std.mem.eql(u8, write_buf, buf2));
try file.seekTo(0);
// max_bytes == file_size
const buf3 = try file.readAllAlloc(testing.allocator, file_size, write_buf.len);
const buf3 = try file.readToEndAlloc(testing.allocator, write_buf.len);
defer testing.allocator.free(buf3);
testing.expectEqual(write_buf.len, buf3.len);
testing.expect(std.mem.eql(u8, write_buf, buf3));
try file.seekTo(0);
// max_bytes < file_size
testing.expectError(error.FileTooBig, file.readAllAlloc(testing.allocator, file_size, write_buf.len - 1));
testing.expectError(error.FileTooBig, file.readToEndAlloc(testing.allocator, write_buf.len - 1));
}
test "directory operations on files" {

View File

@ -113,7 +113,7 @@ pub fn hash(hasher: anytype, key: anytype, comptime strat: HashStrategy) void {
.Array => hashArray(hasher, key, strat),
.Vector => |info| {
if (info.child.bit_count % 8 == 0) {
if (std.meta.bitCount(info.child) % 8 == 0) {
// If there's no unused bits in the child type, we can just hash
// this as an array of bytes.
hasher.update(mem.asBytes(&key));

View File

@ -915,6 +915,10 @@ pub fn testAllocator(base_allocator: *mem.Allocator) !void {
testing.expect(slice.len == 10);
allocator.free(slice);
const zero_bit_ptr = try allocator.create(u0);
zero_bit_ptr.* = 0;
allocator.destroy(zero_bit_ptr);
}
pub fn testAllocatorAligned(base_allocator: *mem.Allocator, comptime alignment: u29) !void {
@ -952,7 +956,7 @@ pub fn testAllocatorLargeAlignment(base_allocator: *mem.Allocator) mem.Allocator
// very near usize?
if (mem.page_size << 2 > maxInt(usize)) return;
const USizeShift = std.meta.Int(false, std.math.log2(usize.bit_count));
const USizeShift = std.meta.Int(false, std.math.log2(std.meta.bitCount(usize)));
const large_align = @as(u29, mem.page_size << 2);
var align_mask: usize = undefined;

View File

@ -169,6 +169,15 @@ pub const BitOutStream = BitWriter;
/// Deprecated: use `bitWriter`
pub const bitOutStream = bitWriter;
pub const AutoIndentingStream = @import("io/auto_indenting_stream.zig").AutoIndentingStream;
pub const autoIndentingStream = @import("io/auto_indenting_stream.zig").autoIndentingStream;
pub const ChangeDetectionStream = @import("io/change_detection_stream.zig").ChangeDetectionStream;
pub const changeDetectionStream = @import("io/change_detection_stream.zig").changeDetectionStream;
pub const FindByteOutStream = @import("io/find_byte_out_stream.zig").FindByteOutStream;
pub const findByteOutStream = @import("io/find_byte_out_stream.zig").findByteOutStream;
pub const Packing = @import("io/serialization.zig").Packing;
pub const Serializer = @import("io/serialization.zig").Serializer;

View File

@ -0,0 +1,148 @@
const std = @import("../std.zig");
const io = std.io;
const mem = std.mem;
const assert = std.debug.assert;
/// Automatically inserts indentation of written data by keeping
/// track of the current indentation level
pub fn AutoIndentingStream(comptime UnderlyingWriter: type) type {
return struct {
const Self = @This();
pub const Error = UnderlyingWriter.Error;
pub const Writer = io.Writer(*Self, Error, write);
underlying_writer: UnderlyingWriter,
indent_count: usize = 0,
indent_delta: usize,
current_line_empty: bool = true,
indent_one_shot_count: usize = 0, // automatically popped when applied
applied_indent: usize = 0, // the most recently applied indent
indent_next_line: usize = 0, // not used until the next line
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
pub fn write(self: *Self, bytes: []const u8) Error!usize {
if (bytes.len == 0)
return @as(usize, 0);
try self.applyIndent();
return self.writeNoIndent(bytes);
}
// Change the indent delta without changing the final indentation level
pub fn setIndentDelta(self: *Self, indent_delta: usize) void {
if (self.indent_delta == indent_delta) {
return;
} else if (self.indent_delta > indent_delta) {
assert(self.indent_delta % indent_delta == 0);
self.indent_count = self.indent_count * (self.indent_delta / indent_delta);
} else {
// assert that the current indentation (in spaces) in a multiple of the new delta
assert((self.indent_count * self.indent_delta) % indent_delta == 0);
self.indent_count = self.indent_count / (indent_delta / self.indent_delta);
}
self.indent_delta = indent_delta;
}
fn writeNoIndent(self: *Self, bytes: []const u8) Error!usize {
if (bytes.len == 0)
return @as(usize, 0);
try self.underlying_writer.writeAll(bytes);
if (bytes[bytes.len - 1] == '\n')
self.resetLine();
return bytes.len;
}
pub fn insertNewline(self: *Self) Error!void {
_ = try self.writeNoIndent("\n");
}
fn resetLine(self: *Self) void {
self.current_line_empty = true;
self.indent_next_line = 0;
}
/// Insert a newline unless the current line is blank
pub fn maybeInsertNewline(self: *Self) Error!void {
if (!self.current_line_empty)
try self.insertNewline();
}
/// Push default indentation
pub fn pushIndent(self: *Self) void {
// Doesn't actually write any indentation.
// Just primes the stream to be able to write the correct indentation if it needs to.
self.indent_count += 1;
}
/// Push an indent that is automatically popped after being applied
pub fn pushIndentOneShot(self: *Self) void {
self.indent_one_shot_count += 1;
self.pushIndent();
}
/// Turns all one-shot indents into regular indents
/// Returns number of indents that must now be manually popped
pub fn lockOneShotIndent(self: *Self) usize {
var locked_count = self.indent_one_shot_count;
self.indent_one_shot_count = 0;
return locked_count;
}
/// Push an indent that should not take effect until the next line
pub fn pushIndentNextLine(self: *Self) void {
self.indent_next_line += 1;
self.pushIndent();
}
pub fn popIndent(self: *Self) void {
assert(self.indent_count != 0);
self.indent_count -= 1;
if (self.indent_next_line > 0)
self.indent_next_line -= 1;
}
/// Writes ' ' bytes if the current line is empty
fn applyIndent(self: *Self) Error!void {
const current_indent = self.currentIndent();
if (self.current_line_empty and current_indent > 0) {
try self.underlying_writer.writeByteNTimes(' ', current_indent);
self.applied_indent = current_indent;
}
self.indent_count -= self.indent_one_shot_count;
self.indent_one_shot_count = 0;
self.current_line_empty = false;
}
/// Checks to see if the most recent indentation exceeds the currently pushed indents
pub fn isLineOverIndented(self: *Self) bool {
if (self.current_line_empty) return false;
return self.applied_indent > self.currentIndent();
}
fn currentIndent(self: *Self) usize {
var indent_current: usize = 0;
if (self.indent_count > 0) {
const indent_count = self.indent_count - self.indent_next_line;
indent_current = indent_count * self.indent_delta;
}
return indent_current;
}
};
}
pub fn autoIndentingStream(
indent_delta: usize,
underlying_writer: anytype,
) AutoIndentingStream(@TypeOf(underlying_writer)) {
return AutoIndentingStream(@TypeOf(underlying_writer)){
.underlying_writer = underlying_writer,
.indent_delta = indent_delta,
};
}

View File

@ -0,0 +1,55 @@
const std = @import("../std.zig");
const io = std.io;
const mem = std.mem;
const assert = std.debug.assert;
/// Used to detect if the data written to a stream differs from a source buffer
pub fn ChangeDetectionStream(comptime WriterType: type) type {
return struct {
const Self = @This();
pub const Error = WriterType.Error;
pub const Writer = io.Writer(*Self, Error, write);
anything_changed: bool,
underlying_writer: WriterType,
source_index: usize,
source: []const u8,
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
fn write(self: *Self, bytes: []const u8) Error!usize {
if (!self.anything_changed) {
const end = self.source_index + bytes.len;
if (end > self.source.len) {
self.anything_changed = true;
} else {
const src_slice = self.source[self.source_index..end];
self.source_index += bytes.len;
if (!mem.eql(u8, bytes, src_slice)) {
self.anything_changed = true;
}
}
}
return self.underlying_writer.write(bytes);
}
pub fn changeDetected(self: *Self) bool {
return self.anything_changed or (self.source_index != self.source.len);
}
};
}
pub fn changeDetectionStream(
source: []const u8,
underlying_writer: anytype,
) ChangeDetectionStream(@TypeOf(underlying_writer)) {
return ChangeDetectionStream(@TypeOf(underlying_writer)){
.anything_changed = false,
.underlying_writer = underlying_writer,
.source_index = 0,
.source = source,
};
}

View File

@ -0,0 +1,40 @@
const std = @import("../std.zig");
const io = std.io;
const assert = std.debug.assert;
/// An OutStream that returns whether the given character has been written to it.
/// The contents are not written to anything.
pub fn FindByteOutStream(comptime UnderlyingWriter: type) type {
return struct {
const Self = @This();
pub const Error = UnderlyingWriter.Error;
pub const Writer = io.Writer(*Self, Error, write);
underlying_writer: UnderlyingWriter,
byte_found: bool,
byte: u8,
pub fn writer(self: *Self) Writer {
return .{ .context = self };
}
fn write(self: *Self, bytes: []const u8) Error!usize {
if (!self.byte_found) {
self.byte_found = blk: {
for (bytes) |b|
if (b == self.byte) break :blk true;
break :blk false;
};
}
return self.underlying_writer.write(bytes);
}
};
}
pub fn findByteOutStream(byte: u8, underlying_writer: anytype) FindByteOutStream(@TypeOf(underlying_writer)) {
return FindByteOutStream(@TypeOf(underlying_writer)){
.underlying_writer = underlying_writer,
.byte = byte,
.byte_found = false,
};
}

View File

@ -198,28 +198,28 @@ pub fn Reader(
/// Reads a native-endian integer
pub fn readIntNative(self: Self, comptime T: type) !T {
const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
return mem.readIntNative(T, &bytes);
}
/// Reads a foreign-endian integer
pub fn readIntForeign(self: Self, comptime T: type) !T {
const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
return mem.readIntForeign(T, &bytes);
}
pub fn readIntLittle(self: Self, comptime T: type) !T {
const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
return mem.readIntLittle(T, &bytes);
}
pub fn readIntBig(self: Self, comptime T: type) !T {
const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
return mem.readIntBig(T, &bytes);
}
pub fn readInt(self: Self, comptime T: type, endian: builtin.Endian) !T {
const bytes = try self.readBytesNoEof((T.bit_count + 7) / 8);
const bytes = try self.readBytesNoEof((@typeInfo(T).Int.bits + 7) / 8);
return mem.readInt(T, &bytes, endian);
}

View File

@ -60,7 +60,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
const U = std.meta.Int(false, t_bit_count);
const Log2U = math.Log2Int(U);
const int_size = (U.bit_count + 7) / 8;
const int_size = (t_bit_count + 7) / 8;
if (packing == .Bit) {
const result = try self.in_stream.readBitsNoEof(U, t_bit_count);
@ -73,7 +73,7 @@ pub fn Deserializer(comptime endian: builtin.Endian, comptime packing: Packing,
if (int_size == 1) {
if (t_bit_count == 8) return @bitCast(T, buffer[0]);
const PossiblySignedByte = std.meta.Int(T.is_signed, 8);
const PossiblySignedByte = std.meta.Int(@typeInfo(T).Int.is_signed, 8);
return @truncate(T, @bitCast(PossiblySignedByte, buffer[0]));
}
@ -247,7 +247,7 @@ pub fn Serializer(comptime endian: builtin.Endian, comptime packing: Packing, co
const U = std.meta.Int(false, t_bit_count);
const Log2U = math.Log2Int(U);
const int_size = (U.bit_count + 7) / 8;
const int_size = (t_bit_count + 7) / 8;
const u_value = @bitCast(U, value);

View File

@ -53,7 +53,7 @@ pub fn Writer(
/// Write a native-endian integer.
/// TODO audit non-power-of-two int sizes
pub fn writeIntNative(self: Self, comptime T: type, value: T) Error!void {
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
mem.writeIntNative(T, &bytes, value);
return self.writeAll(&bytes);
}
@ -61,28 +61,28 @@ pub fn Writer(
/// Write a foreign-endian integer.
/// TODO audit non-power-of-two int sizes
pub fn writeIntForeign(self: Self, comptime T: type, value: T) Error!void {
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
mem.writeIntForeign(T, &bytes, value);
return self.writeAll(&bytes);
}
/// TODO audit non-power-of-two int sizes
pub fn writeIntLittle(self: Self, comptime T: type, value: T) Error!void {
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
mem.writeIntLittle(T, &bytes, value);
return self.writeAll(&bytes);
}
/// TODO audit non-power-of-two int sizes
pub fn writeIntBig(self: Self, comptime T: type, value: T) Error!void {
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
mem.writeIntBig(T, &bytes, value);
return self.writeAll(&bytes);
}
/// TODO audit non-power-of-two int sizes
pub fn writeInt(self: Self, comptime T: type, value: T, endian: builtin.Endian) Error!void {
var bytes: [(T.bit_count + 7) / 8]u8 = undefined;
var bytes: [(@typeInfo(T).Int.bits + 7) / 8]u8 = undefined;
mem.writeInt(T, &bytes, value, endian);
return self.writeAll(&bytes);
}

View File

@ -127,6 +127,10 @@ fn log(
if (@enumToInt(message_level) <= @enumToInt(level)) {
if (@hasDecl(root, "log")) {
root.log(message_level, scope, format, args);
} else if (std.Target.current.os.tag == .freestanding) {
// On freestanding one must provide a log function; we do not have
// any I/O configured.
return;
} else if (builtin.mode != .ReleaseSmall) {
const held = std.debug.getStderrMutex().acquire();
defer held.release();

View File

@ -195,7 +195,7 @@ test "" {
pub fn floatMantissaBits(comptime T: type) comptime_int {
assert(@typeInfo(T) == .Float);
return switch (T.bit_count) {
return switch (@typeInfo(T).Float.bits) {
16 => 10,
32 => 23,
64 => 52,
@ -208,7 +208,7 @@ pub fn floatMantissaBits(comptime T: type) comptime_int {
pub fn floatExponentBits(comptime T: type) comptime_int {
assert(@typeInfo(T) == .Float);
return switch (T.bit_count) {
return switch (@typeInfo(T).Float.bits) {
16 => 5,
32 => 8,
64 => 11,
@ -347,9 +347,9 @@ pub fn shlExact(comptime T: type, a: T, shift_amt: Log2Int(T)) !T {
/// A negative shift amount results in a right shift.
pub fn shl(comptime T: type, a: T, shift_amt: anytype) T {
const abs_shift_amt = absCast(shift_amt);
const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
const casted_shift_amt = if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0 else @intCast(Log2Int(T), abs_shift_amt);
if (@TypeOf(shift_amt) == comptime_int or @TypeOf(shift_amt).is_signed) {
if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).Int.is_signed) {
if (shift_amt < 0) {
return a >> casted_shift_amt;
}
@ -373,9 +373,9 @@ test "math.shl" {
/// A negative shift amount results in a left shift.
pub fn shr(comptime T: type, a: T, shift_amt: anytype) T {
const abs_shift_amt = absCast(shift_amt);
const casted_shift_amt = if (abs_shift_amt >= T.bit_count) return 0 else @intCast(Log2Int(T), abs_shift_amt);
const casted_shift_amt = if (abs_shift_amt >= @typeInfo(T).Int.bits) return 0 else @intCast(Log2Int(T), abs_shift_amt);
if (@TypeOf(shift_amt) == comptime_int or @TypeOf(shift_amt).is_signed) {
if (@TypeOf(shift_amt) == comptime_int or @typeInfo(@TypeOf(shift_amt)).Int.is_signed) {
if (shift_amt >= 0) {
return a >> casted_shift_amt;
} else {
@ -400,11 +400,11 @@ test "math.shr" {
/// Rotates right. Only unsigned values can be rotated.
/// Negative shift values results in shift modulo the bit count.
pub fn rotr(comptime T: type, x: T, r: anytype) T {
if (T.is_signed) {
if (@typeInfo(T).Int.is_signed) {
@compileError("cannot rotate signed integer");
} else {
const ar = @mod(r, T.bit_count);
return shr(T, x, ar) | shl(T, x, T.bit_count - ar);
const ar = @mod(r, @typeInfo(T).Int.bits);
return shr(T, x, ar) | shl(T, x, @typeInfo(T).Int.bits - ar);
}
}
@ -419,11 +419,11 @@ test "math.rotr" {
/// Rotates left. Only unsigned values can be rotated.
/// Negative shift values results in shift modulo the bit count.
pub fn rotl(comptime T: type, x: T, r: anytype) T {
if (T.is_signed) {
if (@typeInfo(T).Int.is_signed) {
@compileError("cannot rotate signed integer");
} else {
const ar = @mod(r, T.bit_count);
return shl(T, x, ar) | shr(T, x, T.bit_count - ar);
const ar = @mod(r, @typeInfo(T).Int.bits);
return shl(T, x, ar) | shr(T, x, @typeInfo(T).Int.bits - ar);
}
}
@ -438,7 +438,7 @@ test "math.rotl" {
pub fn Log2Int(comptime T: type) type {
// comptime ceil log2
comptime var count = 0;
comptime var s = T.bit_count - 1;
comptime var s = @typeInfo(T).Int.bits - 1;
inline while (s != 0) : (s >>= 1) {
count += 1;
}
@ -524,7 +524,7 @@ fn testOverflow() void {
pub fn absInt(x: anytype) !@TypeOf(x) {
const T = @TypeOf(x);
comptime assert(@typeInfo(T) == .Int); // must pass an integer to absInt
comptime assert(T.is_signed); // must pass a signed integer to absInt
comptime assert(@typeInfo(T).Int.is_signed); // must pass a signed integer to absInt
if (x == minInt(@TypeOf(x))) {
return error.Overflow;
@ -557,7 +557,7 @@ fn testAbsFloat() void {
pub fn divTrunc(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
if (@typeInfo(T) == .Int and @typeInfo(T).Int.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
return @divTrunc(numerator, denominator);
}
@ -578,7 +578,7 @@ fn testDivTrunc() void {
pub fn divFloor(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
if (@typeInfo(T) == .Int and @typeInfo(T).Int.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
return @divFloor(numerator, denominator);
}
@ -652,7 +652,7 @@ fn testDivCeil() void {
pub fn divExact(comptime T: type, numerator: T, denominator: T) !T {
@setRuntimeSafety(false);
if (denominator == 0) return error.DivisionByZero;
if (@typeInfo(T) == .Int and T.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
if (@typeInfo(T) == .Int and @typeInfo(T).Int.is_signed and numerator == minInt(T) and denominator == -1) return error.Overflow;
const result = @divTrunc(numerator, denominator);
if (result * denominator != numerator) return error.UnexpectedRemainder;
return result;
@ -757,10 +757,10 @@ test "math.absCast" {
/// Returns the negation of the integer parameter.
/// Result is a signed integer.
pub fn negateCast(x: anytype) !std.meta.Int(true, @TypeOf(x).bit_count) {
if (@TypeOf(x).is_signed) return negate(x);
pub fn negateCast(x: anytype) !std.meta.Int(true, std.meta.bitCount(@TypeOf(x))) {
if (@typeInfo(@TypeOf(x)).Int.is_signed) return negate(x);
const int = std.meta.Int(true, @TypeOf(x).bit_count);
const int = std.meta.Int(true, std.meta.bitCount(@TypeOf(x)));
if (x > -minInt(int)) return error.Overflow;
if (x == -minInt(int)) return minInt(int);
@ -823,7 +823,7 @@ pub fn floorPowerOfTwo(comptime T: type, value: T) T {
var x = value;
comptime var i = 1;
inline while (T.bit_count > i) : (i *= 2) {
inline while (@typeInfo(T).Int.bits > i) : (i *= 2) {
x |= (x >> i);
}
@ -847,13 +847,13 @@ fn testFloorPowerOfTwo() void {
/// Returns the next power of two (if the value is not already a power of two).
/// Only unsigned integers can be used. Zero is not an allowed input.
/// Result is a type with 1 more bit than the input type.
pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(T.is_signed, T.bit_count + 1) {
pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits + 1) {
comptime assert(@typeInfo(T) == .Int);
comptime assert(!T.is_signed);
comptime assert(!@typeInfo(T).Int.is_signed);
assert(value != 0);
comptime const PromotedType = std.meta.Int(T.is_signed, T.bit_count + 1);
comptime const PromotedType = std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits + 1);
comptime const shiftType = std.math.Log2Int(PromotedType);
return @as(PromotedType, 1) << @intCast(shiftType, T.bit_count - @clz(T, value - 1));
return @as(PromotedType, 1) << @intCast(shiftType, @typeInfo(T).Int.bits - @clz(T, value - 1));
}
/// Returns the next power of two (if the value is not already a power of two).
@ -861,9 +861,10 @@ pub fn ceilPowerOfTwoPromote(comptime T: type, value: T) std.meta.Int(T.is_signe
/// If the value doesn't fit, returns an error.
pub fn ceilPowerOfTwo(comptime T: type, value: T) (error{Overflow}!T) {
comptime assert(@typeInfo(T) == .Int);
comptime assert(!T.is_signed);
comptime const PromotedType = std.meta.Int(T.is_signed, T.bit_count + 1);
comptime const overflowBit = @as(PromotedType, 1) << T.bit_count;
const info = @typeInfo(T).Int;
comptime assert(!info.is_signed);
comptime const PromotedType = std.meta.Int(info.is_signed, info.bits + 1);
comptime const overflowBit = @as(PromotedType, 1) << info.bits;
var x = ceilPowerOfTwoPromote(T, value);
if (overflowBit & x != 0) {
return error.Overflow;
@ -911,7 +912,7 @@ fn testCeilPowerOfTwo() !void {
pub fn log2_int(comptime T: type, x: T) Log2Int(T) {
assert(x != 0);
return @intCast(Log2Int(T), T.bit_count - 1 - @clz(T, x));
return @intCast(Log2Int(T), @typeInfo(T).Int.bits - 1 - @clz(T, x));
}
pub fn log2_int_ceil(comptime T: type, x: T) Log2Int(T) {
@ -1008,8 +1009,8 @@ test "max value type" {
testing.expect(x == 2147483647);
}
pub fn mulWide(comptime T: type, a: T, b: T) std.meta.Int(T.is_signed, T.bit_count * 2) {
const ResultInt = std.meta.Int(T.is_signed, T.bit_count * 2);
pub fn mulWide(comptime T: type, a: T, b: T) std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits * 2) {
const ResultInt = std.meta.Int(@typeInfo(T).Int.is_signed, @typeInfo(T).Int.bits * 2);
return @as(ResultInt, a) * @as(ResultInt, b);
}

View File

@ -9,14 +9,15 @@ const assert = std.debug.assert;
pub const Rational = @import("big/rational.zig").Rational;
pub const int = @import("big/int.zig");
pub const Limb = usize;
pub const DoubleLimb = std.meta.IntType(false, 2 * Limb.bit_count);
pub const SignedDoubleLimb = std.meta.IntType(true, DoubleLimb.bit_count);
const limb_info = @typeInfo(Limb).Int;
pub const DoubleLimb = std.meta.IntType(false, 2 * limb_info.bits);
pub const SignedDoubleLimb = std.meta.IntType(true, 2 * limb_info.bits);
pub const Log2Limb = std.math.Log2Int(Limb);
comptime {
assert(std.math.floorPowerOfTwo(usize, Limb.bit_count) == Limb.bit_count);
assert(Limb.bit_count <= 64); // u128 set is unsupported
assert(Limb.is_signed == false);
assert(std.math.floorPowerOfTwo(usize, limb_info.bits) == limb_info.bits);
assert(limb_info.bits <= 64); // u128 set is unsupported
assert(limb_info.is_signed == false);
}
test "" {

View File

@ -6,6 +6,7 @@
const std = @import("../../std.zig");
const math = std.math;
const Limb = std.math.big.Limb;
const limb_bits = @typeInfo(Limb).Int.bits;
const DoubleLimb = std.math.big.DoubleLimb;
const SignedDoubleLimb = std.math.big.SignedDoubleLimb;
const Log2Limb = std.math.big.Log2Limb;
@ -28,7 +29,7 @@ pub fn calcLimbLen(scalar: anytype) usize {
},
.ComptimeInt => {
const w_value = if (scalar < 0) -scalar else scalar;
return @divFloor(math.log2(w_value), Limb.bit_count) + 1;
return @divFloor(math.log2(w_value), limb_bits) + 1;
},
else => @compileError("parameter must be a primitive integer type"),
}
@ -54,7 +55,7 @@ pub fn calcSetStringLimbsBufferLen(base: u8, string_len: usize) usize {
}
pub fn calcSetStringLimbCount(base: u8, string_len: usize) usize {
return (string_len + (Limb.bit_count / base - 1)) / (Limb.bit_count / base);
return (string_len + (limb_bits / base - 1)) / (limb_bits / base);
}
/// a + b * c + *carry, sets carry to the overflow bits
@ -68,7 +69,7 @@ pub fn addMulLimbWithCarry(a: Limb, b: Limb, c: Limb, carry: *Limb) Limb {
// r2 = b * c
const bc = @as(DoubleLimb, math.mulWide(Limb, b, c));
const r2 = @truncate(Limb, bc);
const c2 = @truncate(Limb, bc >> Limb.bit_count);
const c2 = @truncate(Limb, bc >> limb_bits);
// r1 = r1 + r2
const c3: Limb = @boolToInt(@addWithOverflow(Limb, r1, r2, &r1));
@ -181,7 +182,7 @@ pub const Mutable = struct {
switch (@typeInfo(T)) {
.Int => |info| {
const UT = if (T.is_signed) std.meta.Int(false, T.bit_count - 1) else T;
const UT = if (info.is_signed) std.meta.Int(false, info.bits - 1) else T;
const needed_limbs = @sizeOf(UT) / @sizeOf(Limb);
assert(needed_limbs <= self.limbs.len); // value too big
@ -190,7 +191,7 @@ pub const Mutable = struct {
var w_value: UT = if (value < 0) @intCast(UT, -value) else @intCast(UT, value);
if (info.bits <= Limb.bit_count) {
if (info.bits <= limb_bits) {
self.limbs[0] = @as(Limb, w_value);
self.len += 1;
} else {
@ -200,15 +201,15 @@ pub const Mutable = struct {
self.len += 1;
// TODO: shift == 64 at compile-time fails. Fails on u128 limbs.
w_value >>= Limb.bit_count / 2;
w_value >>= Limb.bit_count / 2;
w_value >>= limb_bits / 2;
w_value >>= limb_bits / 2;
}
}
},
.ComptimeInt => {
comptime var w_value = if (value < 0) -value else value;
const req_limbs = @divFloor(math.log2(w_value), Limb.bit_count) + 1;
const req_limbs = @divFloor(math.log2(w_value), limb_bits) + 1;
assert(req_limbs <= self.limbs.len); // value too big
self.len = req_limbs;
@ -217,14 +218,14 @@ pub const Mutable = struct {
if (w_value <= maxInt(Limb)) {
self.limbs[0] = w_value;
} else {
const mask = (1 << Limb.bit_count) - 1;
const mask = (1 << limb_bits) - 1;
comptime var i = 0;
inline while (w_value != 0) : (i += 1) {
self.limbs[i] = w_value & mask;
w_value >>= Limb.bit_count / 2;
w_value >>= Limb.bit_count / 2;
w_value >>= limb_bits / 2;
w_value >>= limb_bits / 2;
}
}
},
@ -506,7 +507,7 @@ pub const Mutable = struct {
/// `a.limbs.len + (shift / (@sizeOf(Limb) * 8))`.
pub fn shiftLeft(r: *Mutable, a: Const, shift: usize) void {
llshl(r.limbs[0..], a.limbs[0..a.limbs.len], shift);
r.normalize(a.limbs.len + (shift / Limb.bit_count) + 1);
r.normalize(a.limbs.len + (shift / limb_bits) + 1);
r.positive = a.positive;
}
@ -516,7 +517,7 @@ pub const Mutable = struct {
/// Asserts there is enough memory to fit the result. The upper bound Limb count is
/// `a.limbs.len - (shift / (@sizeOf(Limb) * 8))`.
pub fn shiftRight(r: *Mutable, a: Const, shift: usize) void {
if (a.limbs.len <= shift / Limb.bit_count) {
if (a.limbs.len <= shift / limb_bits) {
r.len = 1;
r.positive = true;
r.limbs[0] = 0;
@ -524,7 +525,7 @@ pub const Mutable = struct {
}
const r_len = llshr(r.limbs[0..], a.limbs[0..a.limbs.len], shift);
r.len = a.limbs.len - (shift / Limb.bit_count);
r.len = a.limbs.len - (shift / limb_bits);
r.positive = a.positive;
}
@ -772,7 +773,7 @@ pub const Mutable = struct {
}
if (ab_zero_limb_count != 0) {
rem.shiftLeft(rem.toConst(), ab_zero_limb_count * Limb.bit_count);
rem.shiftLeft(rem.toConst(), ab_zero_limb_count * limb_bits);
}
}
@ -803,10 +804,10 @@ pub const Mutable = struct {
};
tmp.limbs[0] = 0;
// Normalize so y > Limb.bit_count / 2 (i.e. leading bit is set) and even
// Normalize so y > limb_bits / 2 (i.e. leading bit is set) and even
var norm_shift = @clz(Limb, y.limbs[y.len - 1]);
if (norm_shift == 0 and y.toConst().isOdd()) {
norm_shift = Limb.bit_count;
norm_shift = limb_bits;
}
x.shiftLeft(x.toConst(), norm_shift);
y.shiftLeft(y.toConst(), norm_shift);
@ -820,7 +821,7 @@ pub const Mutable = struct {
mem.set(Limb, q.limbs[0..q.len], 0);
// 2.
tmp.shiftLeft(y.toConst(), Limb.bit_count * (n - t));
tmp.shiftLeft(y.toConst(), limb_bits * (n - t));
while (x.toConst().order(tmp.toConst()) != .lt) {
q.limbs[n - t] += 1;
x.sub(x.toConst(), tmp.toConst());
@ -833,7 +834,7 @@ pub const Mutable = struct {
if (x.limbs[i] == y.limbs[t]) {
q.limbs[i - t - 1] = maxInt(Limb);
} else {
const num = (@as(DoubleLimb, x.limbs[i]) << Limb.bit_count) | @as(DoubleLimb, x.limbs[i - 1]);
const num = (@as(DoubleLimb, x.limbs[i]) << limb_bits) | @as(DoubleLimb, x.limbs[i - 1]);
const z = @intCast(Limb, num / @as(DoubleLimb, y.limbs[t]));
q.limbs[i - t - 1] = if (z > maxInt(Limb)) maxInt(Limb) else @as(Limb, z);
}
@ -862,11 +863,11 @@ pub const Mutable = struct {
// 3.3
tmp.set(q.limbs[i - t - 1]);
tmp.mul(tmp.toConst(), y.toConst(), mul_limb_buf, allocator);
tmp.shiftLeft(tmp.toConst(), Limb.bit_count * (i - t - 1));
tmp.shiftLeft(tmp.toConst(), limb_bits * (i - t - 1));
x.sub(x.toConst(), tmp.toConst());
if (!x.positive) {
tmp.shiftLeft(y.toConst(), Limb.bit_count * (i - t - 1));
tmp.shiftLeft(y.toConst(), limb_bits * (i - t - 1));
x.add(x.toConst(), tmp.toConst());
q.limbs[i - t - 1] -= 1;
}
@ -949,7 +950,7 @@ pub const Const = struct {
/// Returns the number of bits required to represent the absolute value of an integer.
pub fn bitCountAbs(self: Const) usize {
return (self.limbs.len - 1) * Limb.bit_count + (Limb.bit_count - @clz(Limb, self.limbs[self.limbs.len - 1]));
return (self.limbs.len - 1) * limb_bits + (limb_bits - @clz(Limb, self.limbs[self.limbs.len - 1]));
}
/// Returns the number of bits required to represent the integer in twos-complement form.
@ -1019,10 +1020,10 @@ pub const Const = struct {
/// Returns an error if self cannot be narrowed into the requested type without truncation.
pub fn to(self: Const, comptime T: type) ConvertError!T {
switch (@typeInfo(T)) {
.Int => {
const UT = std.meta.Int(false, T.bit_count);
.Int => |info| {
const UT = std.meta.Int(false, info.bits);
if (self.bitCountTwosComp() > T.bit_count) {
if (self.bitCountTwosComp() > info.bits) {
return error.TargetTooSmall;
}
@ -1033,12 +1034,12 @@ pub const Const = struct {
} else {
for (self.limbs[0..self.limbs.len]) |_, ri| {
const limb = self.limbs[self.limbs.len - ri - 1];
r <<= Limb.bit_count;
r <<= limb_bits;
r |= limb;
}
}
if (!T.is_signed) {
if (!info.is_signed) {
return if (self.positive) @intCast(T, r) else error.NegativeIntoUnsigned;
} else {
if (self.positive) {
@ -1149,7 +1150,7 @@ pub const Const = struct {
outer: for (self.limbs[0..self.limbs.len]) |limb| {
var shift: usize = 0;
while (shift < Limb.bit_count) : (shift += base_shift) {
while (shift < limb_bits) : (shift += base_shift) {
const r = @intCast(u8, (limb >> @intCast(Log2Limb, shift)) & @as(Limb, base - 1));
const ch = std.fmt.digitToChar(r, uppercase);
string[digits_len] = ch;
@ -1295,7 +1296,7 @@ pub const Const = struct {
/// Memory is allocated as needed to ensure operations never overflow. The range
/// is bounded only by available memory.
pub const Managed = struct {
pub const sign_bit: usize = 1 << (usize.bit_count - 1);
pub const sign_bit: usize = 1 << (@typeInfo(usize).Int.bits - 1);
/// Default number of limbs to allocate on creation of a `Managed`.
pub const default_capacity = 4;
@ -1448,7 +1449,7 @@ pub const Managed = struct {
for (self.limbs[0..self.len()]) |limb| {
std.debug.warn("{x} ", .{limb});
}
std.debug.warn("capacity={} positive={}\n", .{ self.limbs.len, self.positive });
std.debug.warn("capacity={} positive={}\n", .{ self.limbs.len, self.isPositive() });
}
/// Negate the sign.
@ -1716,7 +1717,7 @@ pub const Managed = struct {
/// r = a << shift, in other words, r = a * 2^shift
pub fn shiftLeft(r: *Managed, a: Managed, shift: usize) !void {
try r.ensureCapacity(a.len() + (shift / Limb.bit_count) + 1);
try r.ensureCapacity(a.len() + (shift / limb_bits) + 1);
var m = r.toMutable();
m.shiftLeft(a.toConst(), shift);
r.setMetadata(m.positive, m.len);
@ -1724,13 +1725,13 @@ pub const Managed = struct {
/// r = a >> shift
pub fn shiftRight(r: *Managed, a: Managed, shift: usize) !void {
if (a.len() <= shift / Limb.bit_count) {
if (a.len() <= shift / limb_bits) {
r.metadata = 1;
r.limbs[0] = 0;
return;
}
try r.ensureCapacity(a.len() - (shift / Limb.bit_count));
try r.ensureCapacity(a.len() - (shift / limb_bits));
var m = r.toMutable();
m.shiftRight(a.toConst(), shift);
r.setMetadata(m.positive, m.len);
@ -2021,7 +2022,7 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
rem.* = 0;
for (a) |_, ri| {
const i = a.len - ri - 1;
const pdiv = ((@as(DoubleLimb, rem.*) << Limb.bit_count) | a[i]);
const pdiv = ((@as(DoubleLimb, rem.*) << limb_bits) | a[i]);
if (pdiv == 0) {
quo[i] = 0;
@ -2042,10 +2043,10 @@ fn lldiv1(quo: []Limb, rem: *Limb, a: []const Limb, b: Limb) void {
fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
@setRuntimeSafety(debug_safety);
assert(a.len >= 1);
assert(r.len >= a.len + (shift / Limb.bit_count) + 1);
assert(r.len >= a.len + (shift / limb_bits) + 1);
const limb_shift = shift / Limb.bit_count + 1;
const interior_limb_shift = @intCast(Log2Limb, shift % Limb.bit_count);
const limb_shift = shift / limb_bits + 1;
const interior_limb_shift = @intCast(Log2Limb, shift % limb_bits);
var carry: Limb = 0;
var i: usize = 0;
@ -2057,7 +2058,7 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
r[dst_i] = carry | @call(.{ .modifier = .always_inline }, math.shr, .{
Limb,
src_digit,
Limb.bit_count - @intCast(Limb, interior_limb_shift),
limb_bits - @intCast(Limb, interior_limb_shift),
});
carry = (src_digit << interior_limb_shift);
}
@ -2069,10 +2070,10 @@ fn llshl(r: []Limb, a: []const Limb, shift: usize) void {
fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
@setRuntimeSafety(debug_safety);
assert(a.len >= 1);
assert(r.len >= a.len - (shift / Limb.bit_count));
assert(r.len >= a.len - (shift / limb_bits));
const limb_shift = shift / Limb.bit_count;
const interior_limb_shift = @intCast(Log2Limb, shift % Limb.bit_count);
const limb_shift = shift / limb_bits;
const interior_limb_shift = @intCast(Log2Limb, shift % limb_bits);
var carry: Limb = 0;
var i: usize = 0;
@ -2085,7 +2086,7 @@ fn llshr(r: []Limb, a: []const Limb, shift: usize) void {
carry = @call(.{ .modifier = .always_inline }, math.shl, .{
Limb,
src_digit,
Limb.bit_count - @intCast(Limb, interior_limb_shift),
limb_bits - @intCast(Limb, interior_limb_shift),
});
}
}
@ -2135,7 +2136,7 @@ fn fixedIntFromSignedDoubleLimb(A: SignedDoubleLimb, storage: []Limb) Mutable {
const A_is_positive = A >= 0;
const Au = @intCast(DoubleLimb, if (A < 0) -A else A);
storage[0] = @truncate(Limb, Au);
storage[1] = @truncate(Limb, Au >> Limb.bit_count);
storage[1] = @truncate(Limb, Au >> limb_bits);
return .{
.limbs = storage[0..2],
.positive = A_is_positive,

View File

@ -23,13 +23,13 @@ test "big.int comptime_int set" {
var a = try Managed.initSet(testing.allocator, s);
defer a.deinit();
const s_limb_count = 128 / Limb.bit_count;
const s_limb_count = 128 / @typeInfo(Limb).Int.bits;
comptime var i: usize = 0;
inline while (i < s_limb_count) : (i += 1) {
const result = @as(Limb, s & maxInt(Limb));
s >>= Limb.bit_count / 2;
s >>= Limb.bit_count / 2;
s >>= @typeInfo(Limb).Int.bits / 2;
s >>= @typeInfo(Limb).Int.bits / 2;
testing.expect(a.limbs[i] == result);
}
}

View File

@ -136,7 +136,7 @@ pub const Rational = struct {
// Translated from golang.go/src/math/big/rat.go.
debug.assert(@typeInfo(T) == .Float);
const UnsignedInt = std.meta.Int(false, T.bit_count);
const UnsignedInt = std.meta.Int(false, @typeInfo(T).Float.bits);
const f_bits = @bitCast(UnsignedInt, f);
const exponent_bits = math.floatExponentBits(T);
@ -194,8 +194,8 @@ pub const Rational = struct {
// TODO: Indicate whether the result is not exact.
debug.assert(@typeInfo(T) == .Float);
const fsize = T.bit_count;
const BitReprType = std.meta.Int(false, T.bit_count);
const fsize = @typeInfo(T).Float.bits;
const BitReprType = std.meta.Int(false, fsize);
const msize = math.floatMantissaBits(T);
const msize1 = msize + 1;
@ -475,16 +475,18 @@ pub const Rational = struct {
fn extractLowBits(a: Int, comptime T: type) T {
testing.expect(@typeInfo(T) == .Int);
if (T.bit_count <= Limb.bit_count) {
const t_bits = @typeInfo(T).Int.bits;
const limb_bits = @typeInfo(Limb).Int.bits;
if (t_bits <= limb_bits) {
return @truncate(T, a.limbs[0]);
} else {
var r: T = 0;
comptime var i: usize = 0;
// Remainder is always 0 since if T.bit_count >= Limb.bit_count -> Limb | T and both
// Remainder is always 0 since if t_bits >= limb_bits -> Limb | T and both
// are powers of two.
inline while (i < T.bit_count / Limb.bit_count) : (i += 1) {
r |= math.shl(T, a.limbs[i], i * Limb.bit_count);
inline while (i < t_bits / limb_bits) : (i += 1) {
r |= math.shl(T, a.limbs[i], i * limb_bits);
}
return r;

View File

@ -49,7 +49,7 @@ const pi4c = 2.69515142907905952645E-15;
const m4pi = 1.273239544735162542821171882678754627704620361328125;
fn cos_(comptime T: type, x_: T) T {
const I = std.meta.Int(true, T.bit_count);
const I = std.meta.Int(true, @typeInfo(T).Float.bits);
var x = x_;
if (math.isNan(x) or math.isInf(x)) {

View File

@ -128,7 +128,7 @@ pub fn pow(comptime T: type, x: T, y: T) T {
if (yf != 0 and x < 0) {
return math.nan(T);
}
if (yi >= 1 << (T.bit_count - 1)) {
if (yi >= 1 << (@typeInfo(T).Float.bits - 1)) {
return math.exp(y * math.ln(x));
}
@ -150,7 +150,7 @@ pub fn pow(comptime T: type, x: T, y: T) T {
var xe = r2.exponent;
var x1 = r2.significand;
var i = @floatToInt(std.meta.Int(true, T.bit_count), yi);
var i = @floatToInt(std.meta.Int(true, @typeInfo(T).Float.bits), yi);
while (i != 0) : (i >>= 1) {
const overflow_shift = math.floatExponentBits(T) + 1;
if (xe < -(1 << overflow_shift) or (1 << overflow_shift) < xe) {

View File

@ -50,7 +50,7 @@ const pi4c = 2.69515142907905952645E-15;
const m4pi = 1.273239544735162542821171882678754627704620361328125;
fn sin_(comptime T: type, x_: T) T {
const I = std.meta.Int(true, T.bit_count);
const I = std.meta.Int(true, @typeInfo(T).Float.bits);
var x = x_;
if (x == 0 or math.isNan(x)) {

View File

@ -36,10 +36,10 @@ pub fn sqrt(x: anytype) Sqrt(@TypeOf(x)) {
}
}
fn sqrt_int(comptime T: type, value: T) std.meta.Int(false, T.bit_count / 2) {
fn sqrt_int(comptime T: type, value: T) std.meta.Int(false, @typeInfo(T).Int.bits / 2) {
var op = value;
var res: T = 0;
var one: T = 1 << (T.bit_count - 2);
var one: T = 1 << (@typeInfo(T).Int.bits - 2);
// "one" starts at the highest power of four <= than the argument.
while (one > op) {
@ -55,7 +55,7 @@ fn sqrt_int(comptime T: type, value: T) std.meta.Int(false, T.bit_count / 2) {
one >>= 2;
}
const ResultType = std.meta.Int(false, T.bit_count / 2);
const ResultType = std.meta.Int(false, @typeInfo(T).Int.bits / 2);
return @intCast(ResultType, res);
}

View File

@ -43,7 +43,7 @@ const pi4c = 2.69515142907905952645E-15;
const m4pi = 1.273239544735162542821171882678754627704620361328125;
fn tan_(comptime T: type, x_: T) T {
const I = std.meta.Int(true, T.bit_count);
const I = std.meta.Int(true, @typeInfo(T).Float.bits);
var x = x_;
if (x == 0 or math.isNan(x)) {

View File

@ -949,7 +949,7 @@ pub fn readVarInt(comptime ReturnType: type, bytes: []const u8, endian: builtin.
/// This function cannot fail and cannot cause undefined behavior.
/// Assumes the endianness of memory is native. This means the function can
/// simply pointer cast memory.
pub fn readIntNative(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8) T {
pub fn readIntNative(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T {
return @ptrCast(*align(1) const T, bytes).*;
}
@ -957,7 +957,7 @@ pub fn readIntNative(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]
/// The bit count of T must be evenly divisible by 8.
/// This function cannot fail and cannot cause undefined behavior.
/// Assumes the endianness of memory is foreign, so it must byte-swap.
pub fn readIntForeign(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8) T {
pub fn readIntForeign(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8) T {
return @byteSwap(T, readIntNative(T, bytes));
}
@ -971,18 +971,18 @@ pub const readIntBig = switch (builtin.endian) {
.Big => readIntNative,
};
/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0
/// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0
/// and ignores extra bytes.
/// The bit count of T must be evenly divisible by 8.
/// Assumes the endianness of memory is native. This means the function can
/// simply pointer cast memory.
pub fn readIntSliceNative(comptime T: type, bytes: []const u8) T {
const n = @divExact(T.bit_count, 8);
const n = @divExact(@typeInfo(T).Int.bits, 8);
assert(bytes.len >= n);
return readIntNative(T, bytes[0..n]);
}
/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0
/// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0
/// and ignores extra bytes.
/// The bit count of T must be evenly divisible by 8.
/// Assumes the endianness of memory is foreign, so it must byte-swap.
@ -1003,7 +1003,7 @@ pub const readIntSliceBig = switch (builtin.endian) {
/// Reads an integer from memory with bit count specified by T.
/// The bit count of T must be evenly divisible by 8.
/// This function cannot fail and cannot cause undefined behavior.
pub fn readInt(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8, endian: builtin.Endian) T {
pub fn readInt(comptime T: type, bytes: *const [@divExact(@typeInfo(T).Int.bits, 8)]u8, endian: builtin.Endian) T {
if (endian == builtin.endian) {
return readIntNative(T, bytes);
} else {
@ -1011,11 +1011,11 @@ pub fn readInt(comptime T: type, bytes: *const [@divExact(T.bit_count, 8)]u8, en
}
}
/// Asserts that bytes.len >= T.bit_count / 8. Reads the integer starting from index 0
/// Asserts that bytes.len >= @typeInfo(T).Int.bits / 8. Reads the integer starting from index 0
/// and ignores extra bytes.
/// The bit count of T must be evenly divisible by 8.
pub fn readIntSlice(comptime T: type, bytes: []const u8, endian: builtin.Endian) T {
const n = @divExact(T.bit_count, 8);
const n = @divExact(@typeInfo(T).Int.bits, 8);
assert(bytes.len >= n);
return readInt(T, bytes[0..n], endian);
}
@ -1060,7 +1060,7 @@ test "readIntBig and readIntLittle" {
/// accepts any integer bit width.
/// This function stores in native endian, which means it is implemented as a simple
/// memory store.
pub fn writeIntNative(comptime T: type, buf: *[(T.bit_count + 7) / 8]u8, value: T) void {
pub fn writeIntNative(comptime T: type, buf: *[(@typeInfo(T).Int.bits + 7) / 8]u8, value: T) void {
@ptrCast(*align(1) T, buf).* = value;
}
@ -1068,7 +1068,7 @@ pub fn writeIntNative(comptime T: type, buf: *[(T.bit_count + 7) / 8]u8, value:
/// This function always succeeds, has defined behavior for all inputs, but
/// the integer bit width must be divisible by 8.
/// This function stores in foreign endian, which means it does a @byteSwap first.
pub fn writeIntForeign(comptime T: type, buf: *[@divExact(T.bit_count, 8)]u8, value: T) void {
pub fn writeIntForeign(comptime T: type, buf: *[@divExact(@typeInfo(T).Int.bits, 8)]u8, value: T) void {
writeIntNative(T, buf, @byteSwap(T, value));
}
@ -1085,7 +1085,7 @@ pub const writeIntBig = switch (builtin.endian) {
/// Writes an integer to memory, storing it in twos-complement.
/// This function always succeeds, has defined behavior for all inputs, but
/// the integer bit width must be divisible by 8.
pub fn writeInt(comptime T: type, buffer: *[@divExact(T.bit_count, 8)]u8, value: T, endian: builtin.Endian) void {
pub fn writeInt(comptime T: type, buffer: *[@divExact(@typeInfo(T).Int.bits, 8)]u8, value: T, endian: builtin.Endian) void {
if (endian == builtin.endian) {
return writeIntNative(T, buffer, value);
} else {
@ -1094,19 +1094,19 @@ pub fn writeInt(comptime T: type, buffer: *[@divExact(T.bit_count, 8)]u8, value:
}
/// Writes a twos-complement little-endian integer to memory.
/// Asserts that buf.len >= T.bit_count / 8.
/// Asserts that buf.len >= @typeInfo(T).Int.bits / 8.
/// The bit count of T must be divisible by 8.
/// Any extra bytes in buffer after writing the integer are set to zero. To
/// avoid the branch to check for extra buffer bytes, use writeIntLittle
/// instead.
pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void {
assert(buffer.len >= @divExact(T.bit_count, 8));
assert(buffer.len >= @divExact(@typeInfo(T).Int.bits, 8));
if (T.bit_count == 0)
if (@typeInfo(T).Int.bits == 0)
return set(u8, buffer, 0);
// TODO I want to call writeIntLittle here but comptime eval facilities aren't good enough
const uint = std.meta.Int(false, T.bit_count);
const uint = std.meta.Int(false, @typeInfo(T).Int.bits);
var bits = @truncate(uint, value);
for (buffer) |*b| {
b.* = @truncate(u8, bits);
@ -1115,18 +1115,18 @@ pub fn writeIntSliceLittle(comptime T: type, buffer: []u8, value: T) void {
}
/// Writes a twos-complement big-endian integer to memory.
/// Asserts that buffer.len >= T.bit_count / 8.
/// Asserts that buffer.len >= @typeInfo(T).Int.bits / 8.
/// The bit count of T must be divisible by 8.
/// Any extra bytes in buffer before writing the integer are set to zero. To
/// avoid the branch to check for extra buffer bytes, use writeIntBig instead.
pub fn writeIntSliceBig(comptime T: type, buffer: []u8, value: T) void {
assert(buffer.len >= @divExact(T.bit_count, 8));
assert(buffer.len >= @divExact(@typeInfo(T).Int.bits, 8));
if (T.bit_count == 0)
if (@typeInfo(T).Int.bits == 0)
return set(u8, buffer, 0);
// TODO I want to call writeIntBig here but comptime eval facilities aren't good enough
const uint = std.meta.Int(false, T.bit_count);
const uint = std.meta.Int(false, @typeInfo(T).Int.bits);
var bits = @truncate(uint, value);
var index: usize = buffer.len;
while (index != 0) {
@ -1147,13 +1147,13 @@ pub const writeIntSliceForeign = switch (builtin.endian) {
};
/// Writes a twos-complement integer to memory, with the specified endianness.
/// Asserts that buf.len >= T.bit_count / 8.
/// Asserts that buf.len >= @typeInfo(T).Int.bits / 8.
/// The bit count of T must be evenly divisible by 8.
/// Any extra bytes in buffer not part of the integer are set to zero, with
/// respect to endianness. To avoid the branch to check for extra buffer bytes,
/// use writeInt instead.
pub fn writeIntSlice(comptime T: type, buffer: []u8, value: T, endian: builtin.Endian) void {
comptime assert(T.bit_count % 8 == 0);
comptime assert(@typeInfo(T).Int.bits % 8 == 0);
return switch (endian) {
.Little => writeIntSliceLittle(T, buffer, value),
.Big => writeIntSliceBig(T, buffer, value),

View File

@ -159,7 +159,7 @@ fn moveBytes(
/// Returns a pointer to undefined memory.
/// Call `destroy` with the result to free the memory.
pub fn create(self: *Allocator, comptime T: type) Error!*T {
if (@sizeOf(T) == 0) return &(T{});
if (@sizeOf(T) == 0) return @as(*T, undefined);
const slice = try self.allocAdvancedWithRetAddr(T, null, 1, .exact, @returnAddress());
return &slice[0];
}
@ -167,11 +167,11 @@ pub fn create(self: *Allocator, comptime T: type) Error!*T {
/// `ptr` should be the return value of `create`, or otherwise
/// have the same address and alignment property.
pub fn destroy(self: *Allocator, ptr: anytype) void {
const T = @TypeOf(ptr).Child;
const info = @typeInfo(@TypeOf(ptr)).Pointer;
const T = info.child;
if (@sizeOf(T) == 0) return;
const non_const_ptr = @intToPtr([*]u8, @ptrToInt(ptr));
const ptr_align = @typeInfo(@TypeOf(ptr)).Pointer.alignment;
_ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], ptr_align, 0, 0, @returnAddress());
_ = self.shrinkBytes(non_const_ptr[0..@sizeOf(T)], info.alignment, 0, 0, @returnAddress());
}
/// Allocates an array of `n` items of type `T` and sets all the

View File

@ -705,34 +705,34 @@ pub fn Vector(comptime len: u32, comptime child: type) type {
pub fn cast(comptime DestType: type, target: anytype) DestType {
const TargetType = @TypeOf(target);
switch (@typeInfo(DestType)) {
.Pointer => {
.Pointer => |dest_ptr| {
switch (@typeInfo(TargetType)) {
.Int, .ComptimeInt => {
return @intToPtr(DestType, target);
},
.Pointer => |ptr| {
return @ptrCast(DestType, @alignCast(ptr.alignment, target));
return @ptrCast(DestType, @alignCast(dest_ptr.alignment, target));
},
.Optional => |opt| {
if (@typeInfo(opt.child) == .Pointer) {
return @ptrCast(DestType, @alignCast(@alignOf(opt.child.Child), target));
return @ptrCast(DestType, @alignCast(dest_ptr, target));
}
},
else => {},
}
},
.Optional => |opt| {
if (@typeInfo(opt.child) == .Pointer) {
.Optional => |dest_opt| {
if (@typeInfo(dest_opt.child) == .Pointer) {
switch (@typeInfo(TargetType)) {
.Int, .ComptimeInt => {
return @intToPtr(DestType, target);
},
.Pointer => |ptr| {
return @ptrCast(DestType, @alignCast(ptr.alignment, target));
.Pointer => {
return @ptrCast(DestType, @alignCast(@alignOf(dest_opt.child.Child), target));
},
.Optional => |target_opt| {
if (@typeInfo(target_opt.child) == .Pointer) {
return @ptrCast(DestType, @alignCast(@alignOf(target_opt.child.Child), target));
return @ptrCast(DestType, @alignCast(@alignOf(dest_opt.child.Child), target));
}
},
else => {},

View File

@ -1164,7 +1164,7 @@ fn linuxLookupNameFromDnsSearch(
}
const search = if (rc.search.isNull() or dots >= rc.ndots or mem.endsWith(u8, name, "."))
&[_]u8{}
""
else
rc.search.span();
@ -1641,6 +1641,9 @@ pub const StreamServer = struct {
/// by the socket buffer limits, not by the system memory.
SystemResources,
/// Socket is not listening for new connections.
SocketNotListening,
ProtocolFailure,
/// Firewall rules forbid connection.

View File

@ -2512,13 +2512,14 @@ pub fn readlinkatZ(dirfd: fd_t, file_path: [*:0]const u8, out_buffer: []u8) Read
}
}
pub const SetIdError = error{
ResourceLimitReached,
pub const SetEidError = error{
InvalidUserId,
PermissionDenied,
} || UnexpectedError;
};
pub fn setuid(uid: u32) SetIdError!void {
pub const SetIdError = error{ResourceLimitReached} || SetEidError || UnexpectedError;
pub fn setuid(uid: uid_t) SetIdError!void {
switch (errno(system.setuid(uid))) {
0 => return,
EAGAIN => return error.ResourceLimitReached,
@ -2528,7 +2529,16 @@ pub fn setuid(uid: u32) SetIdError!void {
}
}
pub fn setreuid(ruid: u32, euid: u32) SetIdError!void {
pub fn seteuid(uid: uid_t) SetEidError!void {
switch (errno(system.seteuid(uid))) {
0 => return,
EINVAL => return error.InvalidUserId,
EPERM => return error.PermissionDenied,
else => |err| return unexpectedErrno(err),
}
}
pub fn setreuid(ruid: uid_t, euid: uid_t) SetIdError!void {
switch (errno(system.setreuid(ruid, euid))) {
0 => return,
EAGAIN => return error.ResourceLimitReached,
@ -2538,7 +2548,7 @@ pub fn setreuid(ruid: u32, euid: u32) SetIdError!void {
}
}
pub fn setgid(gid: u32) SetIdError!void {
pub fn setgid(gid: gid_t) SetIdError!void {
switch (errno(system.setgid(gid))) {
0 => return,
EAGAIN => return error.ResourceLimitReached,
@ -2548,7 +2558,16 @@ pub fn setgid(gid: u32) SetIdError!void {
}
}
pub fn setregid(rgid: u32, egid: u32) SetIdError!void {
pub fn setegid(uid: uid_t) SetEidError!void {
switch (errno(system.setegid(uid))) {
0 => return,
EINVAL => return error.InvalidUserId,
EPERM => return error.PermissionDenied,
else => |err| return unexpectedErrno(err),
}
}
pub fn setregid(rgid: gid_t, egid: gid_t) SetIdError!void {
switch (errno(system.setregid(rgid, egid))) {
0 => return,
EAGAIN => return error.ResourceLimitReached,
@ -2815,6 +2834,9 @@ pub const AcceptError = error{
/// by the socket buffer limits, not by the system memory.
SystemResources,
/// Socket is not listening for new connections.
SocketNotListening,
ProtocolFailure,
/// Firewall rules forbid connection.
@ -2884,21 +2906,21 @@ pub fn accept(
loop.waitUntilFdReadable(sock);
continue;
} else {
return error.WouldBlock;
},
EBADF => unreachable, // always a race condition
ECONNABORTED => return error.ConnectionAborted,
EFAULT => unreachable,
EINVAL => unreachable,
ENOTSOCK => unreachable,
EMFILE => return error.ProcessFdQuotaExceeded,
ENFILE => return error.SystemFdQuotaExceeded,
ENOBUFS => return error.SystemResources,
ENOMEM => return error.SystemResources,
EOPNOTSUPP => unreachable,
EPROTO => return error.ProtocolFailure,
EPERM => return error.BlockedByFirewall,
else => |err| return unexpectedErrno(err),
return error.WouldBlock;
},
EBADF => unreachable, // always a race condition
ECONNABORTED => return error.ConnectionAborted,
EFAULT => unreachable,
EINVAL => return error.SocketNotListening,
ENOTSOCK => unreachable,
EMFILE => return error.ProcessFdQuotaExceeded,
ENFILE => return error.SystemFdQuotaExceeded,
ENOBUFS => return error.SystemResources,
ENOMEM => return error.SystemResources,
EOPNOTSUPP => unreachable,
EPROTO => return error.ProtocolFailure,
EPERM => return error.BlockedByFirewall,
else => |err| return unexpectedErrno(err),
}
}
} else unreachable;
@ -4554,7 +4576,7 @@ pub fn res_mkquery(
// Make a reasonably unpredictable id
var ts: timespec = undefined;
clock_gettime(CLOCK_REALTIME, &ts) catch {};
const UInt = std.meta.Int(false, @TypeOf(ts.tv_nsec).bit_count);
const UInt = std.meta.Int(false, std.meta.bitCount(@TypeOf(ts.tv_nsec)));
const unsec = @bitCast(UInt, ts.tv_nsec);
const id = @truncate(u32, unsec + unsec / 65536);
q[0] = @truncate(u8, id / 256);
@ -5404,3 +5426,71 @@ pub fn signalfd(fd: fd_t, mask: *const sigset_t, flags: u32) !fd_t {
else => |err| return std.os.unexpectedErrno(err),
}
}
pub const SyncError = error{
InputOutput,
NoSpaceLeft,
DiskQuota,
AccessDenied,
} || UnexpectedError;
/// Write all pending file contents and metadata modifications to all filesystems.
pub fn sync() void {
system.sync();
}
/// Write all pending file contents and metadata modifications to the filesystem which contains the specified file.
pub fn syncfs(fd: fd_t) SyncError!void {
const rc = system.syncfs(fd);
switch (errno(rc)) {
0 => return,
EBADF, EINVAL, EROFS => unreachable,
EIO => return error.InputOutput,
ENOSPC => return error.NoSpaceLeft,
EDQUOT => return error.DiskQuota,
else => |err| return std.os.unexpectedErrno(err),
}
}
/// Write all pending file contents and metadata modifications for the specified file descriptor to the underlying filesystem.
pub fn fsync(fd: fd_t) SyncError!void {
if (std.Target.current.os.tag == .windows) {
if (windows.kernel32.FlushFileBuffers(fd) != 0)
return;
switch (windows.kernel32.GetLastError()) {
.SUCCESS => return,
.INVALID_HANDLE => unreachable,
.ACCESS_DENIED => return error.AccessDenied, // a sync was performed but the system couldn't update the access time
.UNEXP_NET_ERR => return error.InputOutput,
else => return error.InputOutput,
}
}
const rc = system.fsync(fd);
switch (errno(rc)) {
0 => return,
EBADF, EINVAL, EROFS => unreachable,
EIO => return error.InputOutput,
ENOSPC => return error.NoSpaceLeft,
EDQUOT => return error.DiskQuota,
else => |err| return std.os.unexpectedErrno(err),
}
}
/// Write all pending file contents for the specified file descriptor to the underlying filesystem, but not necessarily the metadata.
pub fn fdatasync(fd: fd_t) SyncError!void {
if (std.Target.current.os.tag == .windows) {
return fsync(fd) catch |err| switch (err) {
SyncError.AccessDenied => return, // fdatasync doesn't promise that the access time was synced
else => return err,
};
}
const rc = system.fdatasync(fd);
switch (errno(rc)) {
0 => return,
EBADF, EINVAL, EROFS => unreachable,
EIO => return error.InputOutput,
ENOSPC => return error.NoSpaceLeft,
EDQUOT => return error.DiskQuota,
else => |err| return std.os.unexpectedErrno(err),
}
}

View File

@ -7,9 +7,13 @@ const std = @import("../../std.zig");
const assert = std.debug.assert;
const maxInt = std.math.maxInt;
// See: https://opensource.apple.com/source/xnu/xnu-6153.141.1/bsd/sys/_types.h.auto.html
// TODO: audit mode_t/pid_t, should likely be u16/i32
pub const fd_t = c_int;
pub const pid_t = c_int;
pub const mode_t = c_uint;
pub const uid_t = u32;
pub const gid_t = u32;
pub const in_port_t = u16;
pub const sa_family_t = u8;
@ -79,8 +83,8 @@ pub const Stat = extern struct {
mode: u16,
nlink: u16,
ino: ino_t,
uid: u32,
gid: u32,
uid: uid_t,
gid: gid_t,
rdev: i32,
atimesec: isize,
atimensec: isize,

View File

@ -9,10 +9,17 @@ const maxInt = std.math.maxInt;
pub fn S_ISCHR(m: u32) bool {
return m & S_IFMT == S_IFCHR;
}
// See:
// - https://gitweb.dragonflybsd.org/dragonfly.git/blob/HEAD:/include/unistd.h
// - https://gitweb.dragonflybsd.org/dragonfly.git/blob/HEAD:/sys/sys/types.h
// TODO: mode_t should probably be changed to a u16, audit pid_t/off_t as well
pub const fd_t = c_int;
pub const pid_t = c_int;
pub const off_t = c_long;
pub const mode_t = c_uint;
pub const uid_t = u32;
pub const gid_t = u32;
pub const ENOTSUP = EOPNOTSUPP;
pub const EWOULDBLOCK = EAGAIN;
@ -151,8 +158,8 @@ pub const Stat = extern struct {
dev: c_uint,
mode: c_ushort,
padding1: u16,
uid: c_uint,
gid: c_uint,
uid: uid_t,
gid: gid_t,
rdev: c_uint,
atim: timespec,
mtim: timespec,
@ -511,7 +518,7 @@ pub const siginfo_t = extern struct {
si_errno: c_int,
si_code: c_int,
si_pid: c_int,
si_uid: c_uint,
si_uid: uid_t,
si_status: c_int,
si_addr: ?*c_void,
si_value: union_sigval,

View File

@ -6,8 +6,12 @@
const std = @import("../../std.zig");
const maxInt = std.math.maxInt;
// See https://svnweb.freebsd.org/base/head/sys/sys/_types.h?view=co
// TODO: audit pid_t/mode_t. They should likely be i32 and u16, respectively
pub const fd_t = c_int;
pub const pid_t = c_int;
pub const uid_t = u32;
pub const gid_t = u32;
pub const mode_t = c_uint;
pub const socklen_t = u32;
@ -128,8 +132,8 @@ pub const Stat = extern struct {
mode: u16,
__pad0: u16,
uid: u32,
gid: u32,
uid: uid_t,
gid: gid_t,
__pad1: u32,
rdev: u64,

View File

@ -29,7 +29,7 @@ const is_mips = builtin.arch.isMIPS();
pub const pid_t = i32;
pub const fd_t = i32;
pub const uid_t = i32;
pub const uid_t = u32;
pub const gid_t = u32;
pub const clock_t = isize;
@ -846,14 +846,14 @@ pub const SIG_ERR = @intToPtr(?Sigaction.sigaction_fn, maxInt(usize));
pub const SIG_DFL = @intToPtr(?Sigaction.sigaction_fn, 0);
pub const SIG_IGN = @intToPtr(?Sigaction.sigaction_fn, 1);
pub const empty_sigset = [_]u32{0} ** sigset_t.len;
pub const empty_sigset = [_]u32{0} ** @typeInfo(sigset_t).Array.len;
pub const signalfd_siginfo = extern struct {
signo: u32,
errno: i32,
code: i32,
pid: u32,
uid: u32,
uid: uid_t,
fd: i32,
tid: u32,
band: u32,
@ -1491,10 +1491,10 @@ pub const Statx = extern struct {
nlink: u32,
/// User ID of owner
uid: u32,
uid: uid_t,
/// Group ID of owner
gid: u32,
gid: gid_t,
/// File type and mode
mode: u16,

View File

@ -7,6 +7,7 @@
const std = @import("../../../std.zig");
const pid_t = linux.pid_t;
const uid_t = linux.uid_t;
const gid_t = linux.gid_t;
const clock_t = linux.clock_t;
const stack_t = linux.stack_t;
const sigset_t = linux.sigset_t;
@ -523,8 +524,8 @@ pub const Stat = extern struct {
nlink: usize,
mode: u32,
uid: u32,
gid: u32,
uid: uid_t,
gid: gid_t,
__pad0: u32,
rdev: u64,
size: off_t,

View File

@ -655,7 +655,7 @@ pub fn nanosleep(req: *const timespec, rem: ?*timespec) usize {
return syscall2(.nanosleep, @ptrToInt(req), @ptrToInt(rem));
}
pub fn setuid(uid: u32) usize {
pub fn setuid(uid: uid_t) usize {
if (@hasField(SYS, "setuid32")) {
return syscall1(.setuid32, uid);
} else {
@ -663,7 +663,7 @@ pub fn setuid(uid: u32) usize {
}
}
pub fn setgid(gid: u32) usize {
pub fn setgid(gid: gid_t) usize {
if (@hasField(SYS, "setgid32")) {
return syscall1(.setgid32, gid);
} else {
@ -671,7 +671,7 @@ pub fn setgid(gid: u32) usize {
}
}
pub fn setreuid(ruid: u32, euid: u32) usize {
pub fn setreuid(ruid: uid_t, euid: uid_t) usize {
if (@hasField(SYS, "setreuid32")) {
return syscall2(.setreuid32, ruid, euid);
} else {
@ -679,7 +679,7 @@ pub fn setreuid(ruid: u32, euid: u32) usize {
}
}
pub fn setregid(rgid: u32, egid: u32) usize {
pub fn setregid(rgid: gid_t, egid: gid_t) usize {
if (@hasField(SYS, "setregid32")) {
return syscall2(.setregid32, rgid, egid);
} else {
@ -687,47 +687,61 @@ pub fn setregid(rgid: u32, egid: u32) usize {
}
}
pub fn getuid() u32 {
pub fn getuid() uid_t {
if (@hasField(SYS, "getuid32")) {
return @as(u32, syscall0(.getuid32));
return @as(uid_t, syscall0(.getuid32));
} else {
return @as(u32, syscall0(.getuid));
return @as(uid_t, syscall0(.getuid));
}
}
pub fn getgid() u32 {
pub fn getgid() gid_t {
if (@hasField(SYS, "getgid32")) {
return @as(u32, syscall0(.getgid32));
return @as(gid_t, syscall0(.getgid32));
} else {
return @as(u32, syscall0(.getgid));
return @as(gid_t, syscall0(.getgid));
}
}
pub fn geteuid() u32 {
pub fn geteuid() uid_t {
if (@hasField(SYS, "geteuid32")) {
return @as(u32, syscall0(.geteuid32));
return @as(uid_t, syscall0(.geteuid32));
} else {
return @as(u32, syscall0(.geteuid));
return @as(uid_t, syscall0(.geteuid));
}
}
pub fn getegid() u32 {
pub fn getegid() gid_t {
if (@hasField(SYS, "getegid32")) {
return @as(u32, syscall0(.getegid32));
return @as(gid_t, syscall0(.getegid32));
} else {
return @as(u32, syscall0(.getegid));
return @as(gid_t, syscall0(.getegid));
}
}
pub fn seteuid(euid: u32) usize {
return setreuid(std.math.maxInt(u32), euid);
pub fn seteuid(euid: uid_t) usize {
// We use setresuid here instead of setreuid to ensure that the saved uid
// is not changed. This is what musl and recent glibc versions do as well.
//
// The setresuid(2) man page says that if -1 is passed the corresponding
// id will not be changed. Since uid_t is unsigned, this wraps around to the
// max value in C.
comptime assert(@typeInfo(uid_t) == .Int and !@typeInfo(uid_t).Int.is_signed);
return setresuid(std.math.maxInt(uid_t), euid, std.math.maxInt(uid_t));
}
pub fn setegid(egid: u32) usize {
return setregid(std.math.maxInt(u32), egid);
pub fn setegid(egid: gid_t) usize {
// We use setresgid here instead of setregid to ensure that the saved uid
// is not changed. This is what musl and recent glibc versions do as well.
//
// The setresgid(2) man page says that if -1 is passed the corresponding
// id will not be changed. Since gid_t is unsigned, this wraps around to the
// max value in C.
comptime assert(@typeInfo(uid_t) == .Int and !@typeInfo(uid_t).Int.is_signed);
return setresgid(std.math.maxInt(gid_t), egid, std.math.maxInt(gid_t));
}
pub fn getresuid(ruid: *u32, euid: *u32, suid: *u32) usize {
pub fn getresuid(ruid: *uid_t, euid: *uid_t, suid: *uid_t) usize {
if (@hasField(SYS, "getresuid32")) {
return syscall3(.getresuid32, @ptrToInt(ruid), @ptrToInt(euid), @ptrToInt(suid));
} else {
@ -735,7 +749,7 @@ pub fn getresuid(ruid: *u32, euid: *u32, suid: *u32) usize {
}
}
pub fn getresgid(rgid: *u32, egid: *u32, sgid: *u32) usize {
pub fn getresgid(rgid: *gid_t, egid: *gid_t, sgid: *gid_t) usize {
if (@hasField(SYS, "getresgid32")) {
return syscall3(.getresgid32, @ptrToInt(rgid), @ptrToInt(egid), @ptrToInt(sgid));
} else {
@ -743,7 +757,7 @@ pub fn getresgid(rgid: *u32, egid: *u32, sgid: *u32) usize {
}
}
pub fn setresuid(ruid: u32, euid: u32, suid: u32) usize {
pub fn setresuid(ruid: uid_t, euid: uid_t, suid: uid_t) usize {
if (@hasField(SYS, "setresuid32")) {
return syscall3(.setresuid32, ruid, euid, suid);
} else {
@ -751,7 +765,7 @@ pub fn setresuid(ruid: u32, euid: u32, suid: u32) usize {
}
}
pub fn setresgid(rgid: u32, egid: u32, sgid: u32) usize {
pub fn setresgid(rgid: gid_t, egid: gid_t, sgid: gid_t) usize {
if (@hasField(SYS, "setresgid32")) {
return syscall3(.setresgid32, rgid, egid, sgid);
} else {
@ -759,7 +773,7 @@ pub fn setresgid(rgid: u32, egid: u32, sgid: u32) usize {
}
}
pub fn getgroups(size: usize, list: *u32) usize {
pub fn getgroups(size: usize, list: *gid_t) usize {
if (@hasField(SYS, "getgroups32")) {
return syscall2(.getgroups32, size, @ptrToInt(list));
} else {
@ -767,7 +781,7 @@ pub fn getgroups(size: usize, list: *u32) usize {
}
}
pub fn setgroups(size: usize, list: *const u32) usize {
pub fn setgroups(size: usize, list: *const gid_t) usize {
if (@hasField(SYS, "setgroups32")) {
return syscall2(.setgroups32, size, @ptrToInt(list));
} else {
@ -815,17 +829,19 @@ pub fn sigaction(sig: u6, noalias act: *const Sigaction, noalias oact: ?*Sigacti
return 0;
}
const usize_bits = @typeInfo(usize).Int.bits;
pub fn sigaddset(set: *sigset_t, sig: u6) void {
const s = sig - 1;
// shift in musl: s&8*sizeof *set->__bits-1
const shift = @intCast(u5, s & (usize.bit_count - 1));
const shift = @intCast(u5, s & (usize_bits - 1));
const val = @intCast(u32, 1) << shift;
(set.*)[@intCast(usize, s) / usize.bit_count] |= val;
(set.*)[@intCast(usize, s) / usize_bits] |= val;
}
pub fn sigismember(set: *const sigset_t, sig: u6) bool {
const s = sig - 1;
return ((set.*)[@intCast(usize, s) / usize.bit_count] & (@intCast(usize, 1) << (s & (usize.bit_count - 1)))) != 0;
return ((set.*)[@intCast(usize, s) / usize_bits] & (@intCast(usize, 1) << (s & (usize_bits - 1)))) != 0;
}
pub fn getsockname(fd: i32, noalias addr: *sockaddr, noalias len: *socklen_t) usize {
@ -1226,6 +1242,22 @@ pub fn bpf(cmd: BPF.Cmd, attr: *BPF.Attr, size: u32) usize {
return syscall3(.bpf, @enumToInt(cmd), @ptrToInt(attr), size);
}
pub fn sync() void {
_ = syscall0(.sync);
}
pub fn syncfs(fd: fd_t) usize {
return syscall1(.syncfs, @bitCast(usize, @as(isize, fd)));
}
pub fn fsync(fd: fd_t) usize {
return syscall1(.fsync, @bitCast(usize, @as(isize, fd)));
}
pub fn fdatasync(fd: fd_t) usize {
return syscall1(.fdatasync, @bitCast(usize, @as(isize, fd)));
}
test "" {
if (builtin.os.tag == .linux) {
_ = @import("linux/test.zig");

File diff suppressed because it is too large Load Diff

View File

@ -555,3 +555,39 @@ test "signalfd" {
return error.SkipZigTest;
_ = std.os.signalfd;
}
test "sync" {
if (builtin.os.tag != .linux)
return error.SkipZigTest;
var tmp = tmpDir(.{});
defer tmp.cleanup();
const test_out_file = "os_tmp_test";
const file = try tmp.dir.createFile(test_out_file, .{});
defer {
file.close();
tmp.dir.deleteFile(test_out_file) catch {};
}
os.sync();
try os.syncfs(file.handle);
}
test "fsync" {
if (builtin.os.tag != .linux and builtin.os.tag != .windows)
return error.SkipZigTest;
var tmp = tmpDir(.{});
defer tmp.cleanup();
const test_out_file = "os_tmp_test";
const file = try tmp.dir.createFile(test_out_file, .{});
defer {
file.close();
tmp.dir.deleteFile(test_out_file) catch {};
}
try os.fsync(file.handle);
try os.fdatasync(file.handle);
}

View File

@ -287,3 +287,5 @@ pub extern "kernel32" fn K32GetWsChangesEx(hProcess: HANDLE, lpWatchInfoEx: PPSA
pub extern "kernel32" fn K32InitializeProcessForWsWatch(hProcess: HANDLE) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32QueryWorkingSet(hProcess: HANDLE, pv: PVOID, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn K32QueryWorkingSetEx(hProcess: HANDLE, pv: PVOID, cb: DWORD) callconv(.Stdcall) BOOL;
pub extern "kernel32" fn FlushFileBuffers(hFile: HANDLE) callconv(.Stdcall) BOOL;

View File

@ -12,7 +12,7 @@ pub const SOCKET_ERROR = -1;
pub const WSADESCRIPTION_LEN = 256;
pub const WSASYS_STATUS_LEN = 128;
pub const WSADATA = if (usize.bit_count == u64.bit_count)
pub const WSADATA = if (@sizeOf(usize) == @sizeOf(u64))
extern struct {
wVersion: WORD,
wHighVersion: WORD,

View File

@ -636,7 +636,7 @@ const MsfStream = struct {
blocks: []u32 = undefined,
block_size: u32 = undefined,
pub const Error = @TypeOf(read).ReturnType.ErrorSet;
pub const Error = @typeInfo(@typeInfo(@TypeOf(read)).Fn.return_type.?).ErrorUnion.error_set;
fn init(block_size: u32, file: File, blocks: []u32) MsfStream {
const stream = MsfStream{

View File

@ -578,8 +578,8 @@ fn testWindowsCmdLine(input_cmd_line: [*]const u8, expected_args: []const []cons
}
pub const UserInfo = struct {
uid: u32,
gid: u32,
uid: os.uid_t,
gid: os.gid_t,
};
/// POSIX function which gets a uid from username.
@ -607,8 +607,8 @@ pub fn posixGetUserInfo(name: []const u8) !UserInfo {
var buf: [std.mem.page_size]u8 = undefined;
var name_index: usize = 0;
var state = State.Start;
var uid: u32 = 0;
var gid: u32 = 0;
var uid: os.uid_t = 0;
var gid: os.gid_t = 0;
while (true) {
const amt_read = try reader.read(buf[0..]);

View File

@ -197,7 +197,7 @@ pub const Progress = struct {
var maybe_node: ?*Node = &self.root;
while (maybe_node) |node| {
if (need_ellipse) {
self.bufWrite(&end, "...", .{});
self.bufWrite(&end, "... ", .{});
}
need_ellipse = false;
if (node.name.len != 0 or node.estimated_total_items != null) {
@ -218,7 +218,7 @@ pub const Progress = struct {
maybe_node = node.recently_updated_child;
}
if (need_ellipse) {
self.bufWrite(&end, "...", .{});
self.bufWrite(&end, "... ", .{});
}
}
@ -253,7 +253,7 @@ pub const Progress = struct {
const bytes_needed_for_esc_codes_at_end = if (std.builtin.os.tag == .windows) 0 else 11;
const max_end = self.output_buffer.len - bytes_needed_for_esc_codes_at_end;
if (end.* > max_end) {
const suffix = "...";
const suffix = "... ";
self.columns_written = self.columns_written - (end.* - max_end) + suffix.len;
std.mem.copy(u8, self.output_buffer[max_end..], suffix);
end.* = max_end + suffix.len;

View File

@ -51,8 +51,9 @@ pub const Random = struct {
/// Returns a random int `i` such that `0 <= i <= maxInt(T)`.
/// `i` is evenly distributed.
pub fn int(r: *Random, comptime T: type) T {
const UnsignedT = std.meta.Int(false, T.bit_count);
const ByteAlignedT = std.meta.Int(false, @divTrunc(T.bit_count + 7, 8) * 8);
const bits = @typeInfo(T).Int.bits;
const UnsignedT = std.meta.Int(false, bits);
const ByteAlignedT = std.meta.Int(false, @divTrunc(bits + 7, 8) * 8);
var rand_bytes: [@sizeOf(ByteAlignedT)]u8 = undefined;
r.bytes(rand_bytes[0..]);
@ -68,10 +69,11 @@ pub const Random = struct {
/// Constant-time implementation off `uintLessThan`.
/// The results of this function may be biased.
pub fn uintLessThanBiased(r: *Random, comptime T: type, less_than: T) T {
comptime assert(T.is_signed == false);
comptime assert(T.bit_count <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
comptime assert(@typeInfo(T).Int.is_signed == false);
const bits = @typeInfo(T).Int.bits;
comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
assert(0 < less_than);
if (T.bit_count <= 32) {
if (bits <= 32) {
return @intCast(T, limitRangeBiased(u32, r.int(u32), less_than));
} else {
return @intCast(T, limitRangeBiased(u64, r.int(u64), less_than));
@ -87,13 +89,15 @@ pub const Random = struct {
/// this function is guaranteed to return.
/// If you need deterministic runtime bounds, use `uintLessThanBiased`.
pub fn uintLessThan(r: *Random, comptime T: type, less_than: T) T {
comptime assert(T.is_signed == false);
comptime assert(T.bit_count <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
comptime assert(@typeInfo(T).Int.is_signed == false);
const bits = @typeInfo(T).Int.bits;
comptime assert(bits <= 64); // TODO: workaround: LLVM ERROR: Unsupported library call operation!
assert(0 < less_than);
// Small is typically u32
const Small = std.meta.Int(false, @divTrunc(T.bit_count + 31, 32) * 32);
const small_bits = @divTrunc(bits + 31, 32) * 32;
const Small = std.meta.Int(false, small_bits);
// Large is typically u64
const Large = std.meta.Int(false, Small.bit_count * 2);
const Large = std.meta.Int(false, small_bits * 2);
// adapted from:
// http://www.pcg-random.org/posts/bounded-rands.html
@ -105,7 +109,7 @@ pub const Random = struct {
// TODO: workaround for https://github.com/ziglang/zig/issues/1770
// should be:
// var t: Small = -%less_than;
var t: Small = @bitCast(Small, -%@bitCast(std.meta.Int(true, Small.bit_count), @as(Small, less_than)));
var t: Small = @bitCast(Small, -%@bitCast(std.meta.Int(true, small_bits), @as(Small, less_than)));
if (t >= less_than) {
t -= less_than;
@ -119,13 +123,13 @@ pub const Random = struct {
l = @truncate(Small, m);
}
}
return @intCast(T, m >> Small.bit_count);
return @intCast(T, m >> small_bits);
}
/// Constant-time implementation off `uintAtMost`.
/// The results of this function may be biased.
pub fn uintAtMostBiased(r: *Random, comptime T: type, at_most: T) T {
assert(T.is_signed == false);
assert(@typeInfo(T).Int.is_signed == false);
if (at_most == maxInt(T)) {
// have the full range
return r.int(T);
@ -137,7 +141,7 @@ pub const Random = struct {
/// See `uintLessThan`, which this function uses in most cases,
/// for commentary on the runtime of this function.
pub fn uintAtMost(r: *Random, comptime T: type, at_most: T) T {
assert(T.is_signed == false);
assert(@typeInfo(T).Int.is_signed == false);
if (at_most == maxInt(T)) {
// have the full range
return r.int(T);
@ -149,9 +153,10 @@ pub const Random = struct {
/// The results of this function may be biased.
pub fn intRangeLessThanBiased(r: *Random, comptime T: type, at_least: T, less_than: T) T {
assert(at_least < less_than);
if (T.is_signed) {
const info = @typeInfo(T).Int;
if (info.is_signed) {
// Two's complement makes this math pretty easy.
const UnsignedT = std.meta.Int(false, T.bit_count);
const UnsignedT = std.meta.Int(false, info.bits);
const lo = @bitCast(UnsignedT, at_least);
const hi = @bitCast(UnsignedT, less_than);
const result = lo +% r.uintLessThanBiased(UnsignedT, hi -% lo);
@ -167,9 +172,10 @@ pub const Random = struct {
/// for commentary on the runtime of this function.
pub fn intRangeLessThan(r: *Random, comptime T: type, at_least: T, less_than: T) T {
assert(at_least < less_than);
if (T.is_signed) {
const info = @typeInfo(T).Int;
if (info.is_signed) {
// Two's complement makes this math pretty easy.
const UnsignedT = std.meta.Int(false, T.bit_count);
const UnsignedT = std.meta.Int(false, info.bits);
const lo = @bitCast(UnsignedT, at_least);
const hi = @bitCast(UnsignedT, less_than);
const result = lo +% r.uintLessThan(UnsignedT, hi -% lo);
@ -184,9 +190,10 @@ pub const Random = struct {
/// The results of this function may be biased.
pub fn intRangeAtMostBiased(r: *Random, comptime T: type, at_least: T, at_most: T) T {
assert(at_least <= at_most);
if (T.is_signed) {
const info = @typeInfo(T).Int;
if (info.is_signed) {
// Two's complement makes this math pretty easy.
const UnsignedT = std.meta.Int(false, T.bit_count);
const UnsignedT = std.meta.Int(false, info.bits);
const lo = @bitCast(UnsignedT, at_least);
const hi = @bitCast(UnsignedT, at_most);
const result = lo +% r.uintAtMostBiased(UnsignedT, hi -% lo);
@ -202,9 +209,10 @@ pub const Random = struct {
/// for commentary on the runtime of this function.
pub fn intRangeAtMost(r: *Random, comptime T: type, at_least: T, at_most: T) T {
assert(at_least <= at_most);
if (T.is_signed) {
const info = @typeInfo(T).Int;
if (info.is_signed) {
// Two's complement makes this math pretty easy.
const UnsignedT = std.meta.Int(false, T.bit_count);
const UnsignedT = std.meta.Int(false, info.bits);
const lo = @bitCast(UnsignedT, at_least);
const hi = @bitCast(UnsignedT, at_most);
const result = lo +% r.uintAtMost(UnsignedT, hi -% lo);
@ -280,14 +288,15 @@ pub const Random = struct {
/// into an integer 0 <= result < less_than.
/// This function introduces a minor bias.
pub fn limitRangeBiased(comptime T: type, random_int: T, less_than: T) T {
comptime assert(T.is_signed == false);
const T2 = std.meta.Int(false, T.bit_count * 2);
comptime assert(@typeInfo(T).Int.is_signed == false);
const bits = @typeInfo(T).Int.bits;
const T2 = std.meta.Int(false, bits * 2);
// adapted from:
// http://www.pcg-random.org/posts/bounded-rands.html
// "Integer Multiplication (Biased)"
var m: T2 = @as(T2, random_int) * @as(T2, less_than);
return @intCast(T, m >> T.bit_count);
return @intCast(T, m >> bits);
}
const SequentialPrng = struct {

View File

@ -133,7 +133,7 @@ pub fn main() !void {
}
fn runBuild(builder: *Builder) anyerror!void {
switch (@typeInfo(@TypeOf(root.build).ReturnType)) {
switch (@typeInfo(@typeInfo(@TypeOf(root.build)).Fn.return_type.?)) {
.Void => root.build(builder),
.ErrorUnion => try root.build(builder),
else => @compileError("expected return type of build to be 'void' or '!void'"),

View File

@ -516,11 +516,12 @@ export fn roundf(a: f32) f32 {
fn generic_fmod(comptime T: type, x: T, y: T) T {
@setRuntimeSafety(false);
const uint = std.meta.Int(false, T.bit_count);
const bits = @typeInfo(T).Float.bits;
const uint = std.meta.Int(false, bits);
const log2uint = math.Log2Int(uint);
const digits = if (T == f32) 23 else 52;
const exp_bits = if (T == f32) 9 else 12;
const bits_minus_1 = T.bit_count - 1;
const bits_minus_1 = bits - 1;
const mask = if (T == f32) 0xff else 0x7ff;
var ux = @bitCast(uint, x);
var uy = @bitCast(uint, y);

View File

@ -59,23 +59,25 @@ pub fn __aeabi_dsub(a: f64, b: f64) callconv(.AAPCS) f64 {
}
// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 {
const Z = std.meta.Int(false, T.bit_count);
const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 {
const bits = @typeInfo(T).Float.bits;
const Z = std.meta.Int(false, bits);
const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1));
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;
const shift = @clz(std.meta.Int(false, T.bit_count), significand.*) - @clz(Z, implicitBit);
const shift = @clz(std.meta.Int(false, bits), significand.*) - @clz(Z, implicitBit);
significand.* <<= @intCast(S, shift);
return 1 - shift;
}
// TODO: restore inline keyword, see: https://github.com/ziglang/zig/issues/2154
fn addXf3(comptime T: type, a: T, b: T) T {
const Z = std.meta.Int(false, T.bit_count);
const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
const bits = @typeInfo(T).Float.bits;
const Z = std.meta.Int(false, bits);
const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1));
const typeWidth = T.bit_count;
const typeWidth = bits;
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
@ -187,7 +189,7 @@ fn addXf3(comptime T: type, a: T, b: T) T {
// If partial cancellation occured, we need to left-shift the result
// and adjust the exponent:
if (aSignificand < implicitBit << 3) {
const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.Int(false, T.bit_count), implicitBit << 3));
const shift = @intCast(i32, @clz(Z, aSignificand)) - @intCast(i32, @clz(std.meta.Int(false, bits), implicitBit << 3));
aSignificand <<= @intCast(S, shift);
aExponent -= shift;
}

View File

@ -7,8 +7,8 @@ const builtin = @import("builtin");
pub fn _alldiv(a: i64, b: i64) callconv(.Stdcall) i64 {
@setRuntimeSafety(builtin.is_test);
const s_a = a >> (i64.bit_count - 1);
const s_b = b >> (i64.bit_count - 1);
const s_a = a >> (64 - 1);
const s_b = b >> (64 - 1);
const an = (a ^ s_a) -% s_a;
const bn = (b ^ s_b) -% s_b;

View File

@ -7,8 +7,8 @@ const builtin = @import("builtin");
pub fn _allrem(a: i64, b: i64) callconv(.Stdcall) i64 {
@setRuntimeSafety(builtin.is_test);
const s_a = a >> (i64.bit_count - 1);
const s_b = b >> (i64.bit_count - 1);
const s_a = a >> (64 - 1);
const s_b = b >> (64 - 1);
const an = (a ^ s_a) -% s_a;
const bn = (b ^ s_b) -% s_b;

View File

@ -27,8 +27,9 @@ const GE = extern enum(i32) {
pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
@setRuntimeSafety(builtin.is_test);
const srep_t = std.meta.Int(true, T.bit_count);
const rep_t = std.meta.Int(false, T.bit_count);
const bits = @typeInfo(T).Float.bits;
const srep_t = std.meta.Int(true, bits);
const rep_t = std.meta.Int(false, bits);
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
@ -73,7 +74,7 @@ pub fn cmp(comptime T: type, comptime RT: type, a: T, b: T) RT {
pub fn unordcmp(comptime T: type, a: T, b: T) i32 {
@setRuntimeSafety(builtin.is_test);
const rep_t = std.meta.Int(false, T.bit_count);
const rep_t = std.meta.Int(false, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);

View File

@ -12,10 +12,9 @@ const builtin = @import("builtin");
pub fn __divdf3(a: f64, b: f64) callconv(.C) f64 {
@setRuntimeSafety(builtin.is_test);
const Z = std.meta.Int(false, f64.bit_count);
const SignedZ = std.meta.Int(true, f64.bit_count);
const Z = std.meta.Int(false, 64);
const SignedZ = std.meta.Int(true, 64);
const typeWidth = f64.bit_count;
const significandBits = std.math.floatMantissaBits(f64);
const exponentBits = std.math.floatExponentBits(f64);
@ -317,9 +316,9 @@ pub fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
}
}
pub fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 {
pub fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 {
@setRuntimeSafety(builtin.is_test);
const Z = std.meta.Int(false, T.bit_count);
const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;

View File

@ -12,9 +12,8 @@ const builtin = @import("builtin");
pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
@setRuntimeSafety(builtin.is_test);
const Z = std.meta.Int(false, f32.bit_count);
const Z = std.meta.Int(false, 32);
const typeWidth = f32.bit_count;
const significandBits = std.math.floatMantissaBits(f32);
const exponentBits = std.math.floatExponentBits(f32);
@ -190,9 +189,9 @@ pub fn __divsf3(a: f32, b: f32) callconv(.C) f32 {
}
}
fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 {
fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 {
@setRuntimeSafety(builtin.is_test);
const Z = std.meta.Int(false, T.bit_count);
const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;

View File

@ -11,10 +11,9 @@ const wideMultiply = @import("divdf3.zig").wideMultiply;
pub fn __divtf3(a: f128, b: f128) callconv(.C) f128 {
@setRuntimeSafety(builtin.is_test);
const Z = std.meta.Int(false, f128.bit_count);
const SignedZ = std.meta.Int(true, f128.bit_count);
const Z = std.meta.Int(false, 128);
const SignedZ = std.meta.Int(true, 128);
const typeWidth = f128.bit_count;
const significandBits = std.math.floatMantissaBits(f128);
const exponentBits = std.math.floatExponentBits(f128);

View File

@ -9,8 +9,8 @@ const builtin = @import("builtin");
pub fn __divti3(a: i128, b: i128) callconv(.C) i128 {
@setRuntimeSafety(builtin.is_test);
const s_a = a >> (i128.bit_count - 1);
const s_b = b >> (i128.bit_count - 1);
const s_a = a >> (128 - 1);
const s_b = b >> (128 - 1);
const an = (a ^ s_a) -% s_a;
const bn = (b ^ s_b) -% s_b;

View File

@ -28,7 +28,7 @@ pub fn fixint(comptime fp_t: type, comptime fixint_t: type, a: fp_t) fixint_t {
else => unreachable,
};
const typeWidth = rep_t.bit_count;
const typeWidth = @typeInfo(rep_t).Int.bits;
const exponentBits = (typeWidth - significandBits - 1);
const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
const maxExponent = ((1 << exponentBits) - 1);
@ -50,12 +50,13 @@ pub fn fixint(comptime fp_t: type, comptime fixint_t: type, a: fp_t) fixint_t {
if (exponent < 0) return 0;
// The unsigned result needs to be large enough to handle an fixint_t or rep_t
const fixuint_t = std.meta.Int(false, fixint_t.bit_count);
const UintResultType = if (fixint_t.bit_count > rep_t.bit_count) fixuint_t else rep_t;
const fixint_bits = @typeInfo(fixint_t).Int.bits;
const fixuint_t = std.meta.Int(false, fixint_bits);
const UintResultType = if (fixint_bits > typeWidth) fixuint_t else rep_t;
var uint_result: UintResultType = undefined;
// If the value is too large for the integer type, saturate.
if (@intCast(usize, exponent) >= fixint_t.bit_count) {
if (@intCast(usize, exponent) >= fixint_bits) {
return if (negative) @as(fixint_t, minInt(fixint_t)) else @as(fixint_t, maxInt(fixint_t));
}

View File

@ -15,14 +15,14 @@ pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t
f128 => u128,
else => unreachable,
};
const srep_t = @import("std").meta.Int(true, rep_t.bit_count);
const typeWidth = @typeInfo(rep_t).Int.bits;
const srep_t = @import("std").meta.Int(true, typeWidth);
const significandBits = switch (fp_t) {
f32 => 23,
f64 => 52,
f128 => 112,
else => unreachable,
};
const typeWidth = rep_t.bit_count;
const exponentBits = (typeWidth - significandBits - 1);
const signBit = (@as(rep_t, 1) << (significandBits + exponentBits));
const maxExponent = ((1 << exponentBits) - 1);
@ -44,7 +44,7 @@ pub fn fixuint(comptime fp_t: type, comptime fixuint_t: type, a: fp_t) fixuint_t
if (sign == -1 or exponent < 0) return 0;
// If the value is too large for the integer type, saturate.
if (@intCast(c_uint, exponent) >= fixuint_t.bit_count) return ~@as(fixuint_t, 0);
if (@intCast(c_uint, exponent) >= @typeInfo(fixuint_t).Int.bits) return ~@as(fixuint_t, 0);
// If 0 <= exponent < significandBits, right shift to get the result.
// Otherwise, shift left.

View File

@ -12,15 +12,16 @@ const FLT_MANT_DIG = 24;
fn __floatXisf(comptime T: type, arg: T) f32 {
@setRuntimeSafety(builtin.is_test);
const Z = std.meta.Int(false, T.bit_count);
const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
const bits = @typeInfo(T).Int.bits;
const Z = std.meta.Int(false, bits);
const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1));
if (arg == 0) {
return @as(f32, 0.0);
}
var ai = arg;
const N: u32 = T.bit_count;
const N: u32 = bits;
const si = ai >> @intCast(S, (N - 1));
ai = ((ai ^ si) -% si);
var a = @bitCast(Z, ai);
@ -66,7 +67,7 @@ fn __floatXisf(comptime T: type, arg: T) f32 {
// a is now rounded to FLT_MANT_DIG bits
}
const s = @bitCast(Z, arg) >> (T.bit_count - 32);
const s = @bitCast(Z, arg) >> (@typeInfo(T).Int.bits - 32);
const r = (@intCast(u32, s) & 0x80000000) | // sign
(@intCast(u32, (e + 127)) << 23) | // exponent
(@truncate(u32, a) & 0x007fffff); // mantissa-high

View File

@ -10,8 +10,9 @@ const maxInt = std.math.maxInt;
fn floatsiXf(comptime T: type, a: i32) T {
@setRuntimeSafety(builtin.is_test);
const Z = std.meta.Int(false, T.bit_count);
const S = std.meta.Int(false, T.bit_count - @clz(Z, @as(Z, T.bit_count) - 1));
const bits = @typeInfo(T).Float.bits;
const Z = std.meta.Int(false, bits);
const S = std.meta.Int(false, bits - @clz(Z, @as(Z, bits) - 1));
if (a == 0) {
return @as(T, 0.0);
@ -22,7 +23,7 @@ fn floatsiXf(comptime T: type, a: i32) T {
const exponentBias = ((1 << exponentBits - 1) - 1);
const implicitBit = @as(Z, 1) << significandBits;
const signBit = @as(Z, 1 << Z.bit_count - 1);
const signBit = @as(Z, 1 << bits - 1);
const sign = a >> 31;
// Take absolute value of a via abs(x) = (x^(x >> 31)) - (x >> 31).

View File

@ -15,7 +15,7 @@ pub fn __floatundisf(arg: u64) callconv(.C) f32 {
if (arg == 0) return 0;
var a = arg;
const N: usize = @TypeOf(a).bit_count;
const N: usize = @typeInfo(@TypeOf(a)).Int.bits;
// Number of significant digits
const sd = N - @clz(u64, a);
// 8 exponent

View File

@ -19,7 +19,7 @@ pub fn __floatunditf(a: u64) callconv(.C) f128 {
const exponent_bias = (1 << (exponent_bits - 1)) - 1;
const implicit_bit = 1 << mantissa_bits;
const exp: u128 = (u64.bit_count - 1) - @clz(u64, a);
const exp: u128 = (64 - 1) - @clz(u64, a);
const shift: u7 = mantissa_bits - @intCast(u7, exp);
var result: u128 = (@intCast(u128, a) << shift) ^ implicit_bit;

View File

@ -19,7 +19,7 @@ pub fn __floatunsitf(a: u64) callconv(.C) f128 {
const exponent_bias = (1 << (exponent_bits - 1)) - 1;
const implicit_bit = 1 << mantissa_bits;
const exp = (u64.bit_count - 1) - @clz(u64, a);
const exp = (64 - 1) - @clz(u64, a);
const shift = mantissa_bits - @intCast(u7, exp);
// TODO(#1148): @bitCast alignment error

View File

@ -219,7 +219,7 @@ fn test_one_divsi3(a: i32, b: i32, expected_q: i32) void {
pub fn __udivsi3(n: u32, d: u32) callconv(.C) u32 {
@setRuntimeSafety(builtin.is_test);
const n_uword_bits: c_uint = u32.bit_count;
const n_uword_bits: c_uint = 32;
// special cases
if (d == 0) return 0; // ?!
if (n == 0) return 0;

View File

@ -14,8 +14,8 @@ const compiler_rt = @import("../compiler_rt.zig");
pub fn __modti3(a: i128, b: i128) callconv(.C) i128 {
@setRuntimeSafety(builtin.is_test);
const s_a = a >> (i128.bit_count - 1); // s = a < 0 ? -1 : 0
const s_b = b >> (i128.bit_count - 1); // s = b < 0 ? -1 : 0
const s_a = a >> (128 - 1); // s = a < 0 ? -1 : 0
const s_b = b >> (128 - 1); // s = b < 0 ? -1 : 0
const an = (a ^ s_a) -% s_a; // negate if s == -1
const bn = (b ^ s_b) -% s_b; // negate if s == -1

View File

@ -33,9 +33,9 @@ pub fn __aeabi_dmul(a: f64, b: f64) callconv(.C) f64 {
fn mulXf3(comptime T: type, a: T, b: T) T {
@setRuntimeSafety(builtin.is_test);
const Z = std.meta.Int(false, T.bit_count);
const typeWidth = @typeInfo(T).Float.bits;
const Z = std.meta.Int(false, typeWidth);
const typeWidth = T.bit_count;
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);
@ -269,9 +269,9 @@ fn wideMultiply(comptime Z: type, a: Z, b: Z, hi: *Z, lo: *Z) void {
}
}
fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i32 {
fn normalize(comptime T: type, significand: *std.meta.Int(false, @typeInfo(T).Float.bits)) i32 {
@setRuntimeSafety(builtin.is_test);
const Z = std.meta.Int(false, T.bit_count);
const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
const significandBits = std.math.floatMantissaBits(T);
const implicitBit = @as(Z, 1) << significandBits;
@ -282,7 +282,7 @@ fn normalize(comptime T: type, significand: *std.meta.Int(false, T.bit_count)) i
fn wideRightShiftWithSticky(comptime Z: type, hi: *Z, lo: *Z, count: u32) void {
@setRuntimeSafety(builtin.is_test);
const typeWidth = Z.bit_count;
const typeWidth = @typeInfo(Z).Int.bits;
const S = std.math.Log2Int(Z);
if (count < typeWidth) {
const sticky = @truncate(u8, lo.* << @intCast(S, typeWidth -% count));

View File

@ -11,7 +11,7 @@ const minInt = std.math.minInt;
pub fn __mulodi4(a: i64, b: i64, overflow: *c_int) callconv(.C) i64 {
@setRuntimeSafety(builtin.is_test);
const min = @bitCast(i64, @as(u64, 1 << (i64.bit_count - 1)));
const min = @bitCast(i64, @as(u64, 1 << (64 - 1)));
const max = ~min;
overflow.* = 0;

View File

@ -9,7 +9,7 @@ const compiler_rt = @import("../compiler_rt.zig");
pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
@setRuntimeSafety(builtin.is_test);
const min = @bitCast(i128, @as(u128, 1 << (i128.bit_count - 1)));
const min = @bitCast(i128, @as(u128, 1 << (128 - 1)));
const max = ~min;
overflow.* = 0;
@ -27,9 +27,9 @@ pub fn __muloti4(a: i128, b: i128, overflow: *c_int) callconv(.C) i128 {
return r;
}
const sa = a >> (i128.bit_count - 1);
const sa = a >> (128 - 1);
const abs_a = (a ^ sa) -% sa;
const sb = b >> (i128.bit_count - 1);
const sb = b >> (128 - 1);
const abs_b = (b ^ sb) -% sb;
if (abs_a < 2 or abs_b < 2) {

View File

@ -24,9 +24,8 @@ pub fn __aeabi_dneg(arg: f64) callconv(.AAPCS) f64 {
}
fn negXf2(comptime T: type, a: T) T {
const Z = std.meta.Int(false, T.bit_count);
const Z = std.meta.Int(false, @typeInfo(T).Float.bits);
const typeWidth = T.bit_count;
const significandBits = std.math.floatMantissaBits(T);
const exponentBits = std.math.floatExponentBits(T);

View File

@ -9,8 +9,9 @@ const Log2Int = std.math.Log2Int;
fn Dwords(comptime T: type, comptime signed_half: bool) type {
return extern union {
pub const HalfTU = std.meta.Int(false, @divExact(T.bit_count, 2));
pub const HalfTS = std.meta.Int(true, @divExact(T.bit_count, 2));
pub const bits = @divExact(@typeInfo(T).Int.bits, 2);
pub const HalfTU = std.meta.Int(false, bits);
pub const HalfTS = std.meta.Int(true, bits);
pub const HalfT = if (signed_half) HalfTS else HalfTU;
all: T,
@ -30,15 +31,15 @@ pub fn ashlXi3(comptime T: type, a: T, b: i32) T {
const input = dwords{ .all = a };
var output: dwords = undefined;
if (b >= dwords.HalfT.bit_count) {
if (b >= dwords.bits) {
output.s.low = 0;
output.s.high = input.s.low << @intCast(S, b - dwords.HalfT.bit_count);
output.s.high = input.s.low << @intCast(S, b - dwords.bits);
} else if (b == 0) {
return a;
} else {
output.s.low = input.s.low << @intCast(S, b);
output.s.high = input.s.high << @intCast(S, b);
output.s.high |= input.s.low >> @intCast(S, dwords.HalfT.bit_count - b);
output.s.high |= input.s.low >> @intCast(S, dwords.bits - b);
}
return output.all;
@ -53,14 +54,14 @@ pub fn ashrXi3(comptime T: type, a: T, b: i32) T {
const input = dwords{ .all = a };
var output: dwords = undefined;
if (b >= dwords.HalfT.bit_count) {
output.s.high = input.s.high >> (dwords.HalfT.bit_count - 1);
output.s.low = input.s.high >> @intCast(S, b - dwords.HalfT.bit_count);
if (b >= dwords.bits) {
output.s.high = input.s.high >> (dwords.bits - 1);
output.s.low = input.s.high >> @intCast(S, b - dwords.bits);
} else if (b == 0) {
return a;
} else {
output.s.high = input.s.high >> @intCast(S, b);
output.s.low = input.s.high << @intCast(S, dwords.HalfT.bit_count - b);
output.s.low = input.s.high << @intCast(S, dwords.bits - b);
// Avoid sign-extension here
output.s.low |= @bitCast(
dwords.HalfT,
@ -80,14 +81,14 @@ pub fn lshrXi3(comptime T: type, a: T, b: i32) T {
const input = dwords{ .all = a };
var output: dwords = undefined;
if (b >= dwords.HalfT.bit_count) {
if (b >= dwords.bits) {
output.s.high = 0;
output.s.low = input.s.high >> @intCast(S, b - dwords.HalfT.bit_count);
output.s.low = input.s.high >> @intCast(S, b - dwords.bits);
} else if (b == 0) {
return a;
} else {
output.s.high = input.s.high >> @intCast(S, b);
output.s.low = input.s.high << @intCast(S, dwords.HalfT.bit_count - b);
output.s.low = input.s.high << @intCast(S, dwords.bits - b);
output.s.low |= input.s.low >> @intCast(S, b);
}

View File

@ -50,7 +50,7 @@ fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
// Various constants whose values follow from the type parameters.
// Any reasonable optimizer will fold and propagate all of these.
const srcBits = src_t.bit_count;
const srcBits = @typeInfo(src_t).Float.bits;
const srcExpBits = srcBits - srcSigBits - 1;
const srcInfExp = (1 << srcExpBits) - 1;
const srcExpBias = srcInfExp >> 1;
@ -65,7 +65,7 @@ fn truncXfYf2(comptime dst_t: type, comptime src_t: type, a: src_t) dst_t {
const srcQNaN = 1 << (srcSigBits - 1);
const srcNaNCode = srcQNaN - 1;
const dstBits = dst_t.bit_count;
const dstBits = @typeInfo(dst_t).Float.bits;
const dstExpBits = dstBits - dstSigBits - 1;
const dstInfExp = (1 << dstExpBits) - 1;
const dstExpBias = dstInfExp >> 1;

View File

@ -15,8 +15,10 @@ const high = 1 - low;
pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem: ?*DoubleInt) DoubleInt {
@setRuntimeSafety(is_test);
const SingleInt = @import("std").meta.Int(false, @divExact(DoubleInt.bit_count, 2));
const SignedDoubleInt = @import("std").meta.Int(true, DoubleInt.bit_count);
const double_int_bits = @typeInfo(DoubleInt).Int.bits;
const single_int_bits = @divExact(double_int_bits, 2);
const SingleInt = @import("std").meta.Int(false, single_int_bits);
const SignedDoubleInt = @import("std").meta.Int(true, double_int_bits);
const Log2SingleInt = @import("std").math.Log2Int(SingleInt);
const n = @ptrCast(*const [2]SingleInt, &a).*; // TODO issue #421
@ -82,21 +84,21 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// ---
// K 0
sr = @bitCast(c_uint, @as(c_int, @clz(SingleInt, d[high])) - @as(c_int, @clz(SingleInt, n[high])));
// 0 <= sr <= SingleInt.bit_count - 2 or sr large
if (sr > SingleInt.bit_count - 2) {
// 0 <= sr <= single_int_bits - 2 or sr large
if (sr > single_int_bits - 2) {
if (maybe_rem) |rem| {
rem.* = a;
}
return 0;
}
sr += 1;
// 1 <= sr <= SingleInt.bit_count - 1
// q.all = a << (DoubleInt.bit_count - sr);
// 1 <= sr <= single_int_bits - 1
// q.all = a << (double_int_bits - sr);
q[low] = 0;
q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
q[high] = n[low] << @intCast(Log2SingleInt, single_int_bits - sr);
// r.all = a >> sr;
r[high] = n[high] >> @intCast(Log2SingleInt, sr);
r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
r[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
} else {
// d[low] != 0
if (d[high] == 0) {
@ -113,74 +115,74 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
}
sr = @ctz(SingleInt, d[low]);
q[high] = n[high] >> @intCast(Log2SingleInt, sr);
q[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
q[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
return @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &q[0]).*; // TODO issue #421
}
// K X
// ---
// 0 K
sr = 1 + SingleInt.bit_count + @as(c_uint, @clz(SingleInt, d[low])) - @as(c_uint, @clz(SingleInt, n[high]));
// 2 <= sr <= DoubleInt.bit_count - 1
// q.all = a << (DoubleInt.bit_count - sr);
sr = 1 + single_int_bits + @as(c_uint, @clz(SingleInt, d[low])) - @as(c_uint, @clz(SingleInt, n[high]));
// 2 <= sr <= double_int_bits - 1
// q.all = a << (double_int_bits - sr);
// r.all = a >> sr;
if (sr == SingleInt.bit_count) {
if (sr == single_int_bits) {
q[low] = 0;
q[high] = n[low];
r[high] = 0;
r[low] = n[high];
} else if (sr < SingleInt.bit_count) {
// 2 <= sr <= SingleInt.bit_count - 1
} else if (sr < single_int_bits) {
// 2 <= sr <= single_int_bits - 1
q[low] = 0;
q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
q[high] = n[low] << @intCast(Log2SingleInt, single_int_bits - sr);
r[high] = n[high] >> @intCast(Log2SingleInt, sr);
r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
r[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
} else {
// SingleInt.bit_count + 1 <= sr <= DoubleInt.bit_count - 1
q[low] = n[low] << @intCast(Log2SingleInt, DoubleInt.bit_count - sr);
q[high] = (n[high] << @intCast(Log2SingleInt, DoubleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr - SingleInt.bit_count));
// single_int_bits + 1 <= sr <= double_int_bits - 1
q[low] = n[low] << @intCast(Log2SingleInt, double_int_bits - sr);
q[high] = (n[high] << @intCast(Log2SingleInt, double_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr - single_int_bits));
r[high] = 0;
r[low] = n[high] >> @intCast(Log2SingleInt, sr - SingleInt.bit_count);
r[low] = n[high] >> @intCast(Log2SingleInt, sr - single_int_bits);
}
} else {
// K X
// ---
// K K
sr = @bitCast(c_uint, @as(c_int, @clz(SingleInt, d[high])) - @as(c_int, @clz(SingleInt, n[high])));
// 0 <= sr <= SingleInt.bit_count - 1 or sr large
if (sr > SingleInt.bit_count - 1) {
// 0 <= sr <= single_int_bits - 1 or sr large
if (sr > single_int_bits - 1) {
if (maybe_rem) |rem| {
rem.* = a;
}
return 0;
}
sr += 1;
// 1 <= sr <= SingleInt.bit_count
// q.all = a << (DoubleInt.bit_count - sr);
// 1 <= sr <= single_int_bits
// q.all = a << (double_int_bits - sr);
// r.all = a >> sr;
q[low] = 0;
if (sr == SingleInt.bit_count) {
if (sr == single_int_bits) {
q[high] = n[low];
r[high] = 0;
r[low] = n[high];
} else {
r[high] = n[high] >> @intCast(Log2SingleInt, sr);
r[low] = (n[high] << @intCast(Log2SingleInt, SingleInt.bit_count - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
q[high] = n[low] << @intCast(Log2SingleInt, SingleInt.bit_count - sr);
r[low] = (n[high] << @intCast(Log2SingleInt, single_int_bits - sr)) | (n[low] >> @intCast(Log2SingleInt, sr));
q[high] = n[low] << @intCast(Log2SingleInt, single_int_bits - sr);
}
}
}
// Not a special case
// q and r are initialized with:
// q.all = a << (DoubleInt.bit_count - sr);
// q.all = a << (double_int_bits - sr);
// r.all = a >> sr;
// 1 <= sr <= DoubleInt.bit_count - 1
// 1 <= sr <= double_int_bits - 1
var carry: u32 = 0;
var r_all: DoubleInt = undefined;
while (sr > 0) : (sr -= 1) {
// r:q = ((r:q) << 1) | carry
r[high] = (r[high] << 1) | (r[low] >> (SingleInt.bit_count - 1));
r[low] = (r[low] << 1) | (q[high] >> (SingleInt.bit_count - 1));
q[high] = (q[high] << 1) | (q[low] >> (SingleInt.bit_count - 1));
r[high] = (r[high] << 1) | (r[low] >> (single_int_bits - 1));
r[low] = (r[low] << 1) | (q[high] >> (single_int_bits - 1));
q[high] = (q[high] << 1) | (q[low] >> (single_int_bits - 1));
q[low] = (q[low] << 1) | carry;
// carry = 0;
// if (r.all >= b)
@ -189,7 +191,7 @@ pub fn udivmod(comptime DoubleInt: type, a: DoubleInt, b: DoubleInt, maybe_rem:
// carry = 1;
// }
r_all = @ptrCast(*align(@alignOf(SingleInt)) DoubleInt, &r[0]).*; // TODO issue #421
const s: SignedDoubleInt = @bitCast(SignedDoubleInt, b -% r_all -% 1) >> (DoubleInt.bit_count - 1);
const s: SignedDoubleInt = @bitCast(SignedDoubleInt, b -% r_all -% 1) >> (double_int_bits - 1);
carry = @intCast(u32, s & 1);
r_all -= b & @bitCast(DoubleInt, s);
r = @ptrCast(*[2]SingleInt, &r_all).*; // TODO issue #421

View File

@ -40,7 +40,7 @@ pub fn main() anyerror!void {
test_node.activate();
progress.refresh();
if (progress.terminal == null) {
std.debug.print("{}/{} {}...", .{ i + 1, test_fn_list.len, test_fn.name });
std.debug.print("{}/{} {}... ", .{ i + 1, test_fn_list.len, test_fn.name });
}
const result = if (test_fn.async_frame_size) |size| switch (io_mode) {
.evented => blk: {

View File

@ -67,7 +67,7 @@ fn EfiMain(handle: uefi.Handle, system_table: *uefi.tables.SystemTable) callconv
uefi.handle = handle;
uefi.system_table = system_table;
switch (@TypeOf(root.main).ReturnType) {
switch (@typeInfo(@TypeOf(root.main)).Fn.return_type.?) {
noreturn => {
root.main();
},
@ -239,7 +239,7 @@ fn callMainAsync(loop: *std.event.Loop) callconv(.Async) u8 {
// This is not marked inline because it is called with @asyncCall when
// there is an event loop.
pub fn callMain() u8 {
switch (@typeInfo(@TypeOf(root.main).ReturnType)) {
switch (@typeInfo(@typeInfo(@TypeOf(root.main)).Fn.return_type.?)) {
.NoReturn => {
root.main();
},

View File

@ -50,6 +50,7 @@ pub const builtin = @import("builtin.zig");
pub const c = @import("c.zig");
pub const cache_hash = @import("cache_hash.zig");
pub const coff = @import("coff.zig");
pub const compress = @import("compress.zig");
pub const crypto = @import("crypto.zig");
pub const cstr = @import("cstr.zig");
pub const debug = @import("debug.zig");

View File

@ -101,7 +101,7 @@ pub const Target = struct {
/// Latest Windows version that the Zig Standard Library is aware of
pub const latest = WindowsVersion.win10_20h1;
pub const Range = struct {
min: WindowsVersion,
max: WindowsVersion,
@ -468,6 +468,7 @@ pub const Target = struct {
/// TODO Get rid of this one.
unknown,
coff,
pe,
elf,
macho,
wasm,
@ -771,6 +772,63 @@ pub const Target = struct {
};
}
pub fn toCoffMachine(arch: Arch) std.coff.MachineType {
return switch (arch) {
.avr => .Unknown,
.msp430 => .Unknown,
.arc => .Unknown,
.arm => .ARM,
.armeb => .Unknown,
.hexagon => .Unknown,
.le32 => .Unknown,
.mips => .Unknown,
.mipsel => .Unknown,
.powerpc => .POWERPC,
.r600 => .Unknown,
.riscv32 => .RISCV32,
.sparc => .Unknown,
.sparcel => .Unknown,
.tce => .Unknown,
.tcele => .Unknown,
.thumb => .Thumb,
.thumbeb => .Thumb,
.i386 => .I386,
.xcore => .Unknown,
.nvptx => .Unknown,
.amdil => .Unknown,
.hsail => .Unknown,
.spir => .Unknown,
.kalimba => .Unknown,
.shave => .Unknown,
.lanai => .Unknown,
.wasm32 => .Unknown,
.renderscript32 => .Unknown,
.aarch64_32 => .ARM64,
.aarch64 => .ARM64,
.aarch64_be => .Unknown,
.mips64 => .Unknown,
.mips64el => .Unknown,
.powerpc64 => .Unknown,
.powerpc64le => .Unknown,
.riscv64 => .RISCV64,
.x86_64 => .X64,
.nvptx64 => .Unknown,
.le64 => .Unknown,
.amdil64 => .Unknown,
.hsail64 => .Unknown,
.spir64 => .Unknown,
.wasm64 => .Unknown,
.renderscript64 => .Unknown,
.amdgcn => .Unknown,
.bpfel => .Unknown,
.bpfeb => .Unknown,
.sparcv9 => .Unknown,
.s390x => .Unknown,
.ve => .Unknown,
.spu_2 => .Unknown,
};
}
pub fn endian(arch: Arch) builtin.Endian {
return switch (arch) {
.avr,

View File

@ -166,7 +166,7 @@ pub const Thread = struct {
fn threadMain(raw_arg: windows.LPVOID) callconv(.C) windows.DWORD {
const arg = if (@sizeOf(Context) == 0) {} else @ptrCast(*Context, @alignCast(@alignOf(Context), raw_arg)).*;
switch (@typeInfo(@TypeOf(startFn).ReturnType)) {
switch (@typeInfo(@typeInfo(@TypeOf(startFn)).Fn.return_type.?)) {
.NoReturn => {
startFn(arg);
},
@ -227,7 +227,7 @@ pub const Thread = struct {
fn linuxThreadMain(ctx_addr: usize) callconv(.C) u8 {
const arg = if (@sizeOf(Context) == 0) {} else @intToPtr(*const Context, ctx_addr).*;
switch (@typeInfo(@TypeOf(startFn).ReturnType)) {
switch (@typeInfo(@typeInfo(@TypeOf(startFn)).Fn.return_type.?)) {
.NoReturn => {
startFn(arg);
},
@ -259,7 +259,7 @@ pub const Thread = struct {
fn posixThreadMain(ctx: ?*c_void) callconv(.C) ?*c_void {
const arg = if (@sizeOf(Context) == 0) {} else @ptrCast(*Context, @alignCast(@alignOf(Context), ctx)).*;
switch (@typeInfo(@TypeOf(startFn).ReturnType)) {
switch (@typeInfo(@typeInfo(@TypeOf(startFn)).Fn.return_type.?)) {
.NoReturn => {
startFn(arg);
},

View File

@ -22,7 +22,7 @@ pub const SrcHash = [16]u8;
/// If it is long, blake3 hash is computed.
pub fn hashSrc(src: []const u8) SrcHash {
var out: SrcHash = undefined;
if (src.len <= SrcHash.len) {
if (src.len <= @typeInfo(SrcHash).Array.len) {
std.mem.copy(u8, &out, src);
std.mem.set(u8, out[src.len..], 0);
} else {

View File

@ -615,6 +615,17 @@ test "zig fmt: infix operator and then multiline string literal" {
);
}
test "zig fmt: infix operator and then multiline string literal" {
try testCanonical(
\\const x = "" ++
\\ \\ hi0
\\ \\ hi1
\\ \\ hi2
\\;
\\
);
}
test "zig fmt: C pointers" {
try testCanonical(
\\const Ptr = [*c]i32;
@ -885,6 +896,28 @@ test "zig fmt: 2nd arg multiline string" {
);
}
test "zig fmt: 2nd arg multiline string many args" {
try testCanonical(
\\comptime {
\\ cases.addAsm("hello world linux x86_64",
\\ \\.text
\\ , "Hello, world!\n", "Hello, world!\n");
\\}
\\
);
}
test "zig fmt: final arg multiline string" {
try testCanonical(
\\comptime {
\\ cases.addAsm("hello world linux x86_64", "Hello, world!\n",
\\ \\.text
\\ );
\\}
\\
);
}
test "zig fmt: if condition wraps" {
try testTransform(
\\comptime {
@ -915,6 +948,11 @@ test "zig fmt: if condition wraps" {
\\ var a = if (a) |*f| x: {
\\ break :x &a.b;
\\ } else |err| err;
\\ var a = if (cond and
\\ cond) |*f|
\\ x: {
\\ break :x &a.b;
\\ } else |err| err;
\\}
,
\\comptime {
@ -951,6 +989,35 @@ test "zig fmt: if condition wraps" {
\\ var a = if (a) |*f| x: {
\\ break :x &a.b;
\\ } else |err| err;
\\ var a = if (cond and
\\ cond) |*f|
\\ x: {
\\ break :x &a.b;
\\ } else |err| err;
\\}
\\
);
}
test "zig fmt: if condition has line break but must not wrap" {
try testCanonical(
\\comptime {
\\ if (self.user_input_options.put(
\\ name,
\\ UserInputOption{
\\ .name = name,
\\ .used = false,
\\ },
\\ ) catch unreachable) |*prev_value| {
\\ foo();
\\ bar();
\\ }
\\ if (put(
\\ a,
\\ b,
\\ )) {
\\ foo();
\\ }
\\}
\\
);
@ -977,6 +1044,18 @@ test "zig fmt: if condition has line break but must not wrap" {
);
}
test "zig fmt: function call with multiline argument" {
try testCanonical(
\\comptime {
\\ self.user_input_options.put(name, UserInputOption{
\\ .name = name,
\\ .used = false,
\\ });
\\}
\\
);
}
test "zig fmt: same-line doc comment on variable declaration" {
try testTransform(
\\pub const MAP_ANONYMOUS = 0x1000; /// allocated from memory, swap space
@ -1228,7 +1307,7 @@ test "zig fmt: array literal with hint" {
\\const a = []u8{
\\ 1, 2,
\\ 3, //
\\ 4,
\\ 4,
\\ 5, 6,
\\ 7,
\\};
@ -1293,7 +1372,7 @@ test "zig fmt: multiline string parameter in fn call with trailing comma" {
\\ \\ZIG_C_HEADER_FILES {}
\\ \\ZIG_DIA_GUIDS_LIB {}
\\ \\
\\ ,
\\ ,
\\ std.cstr.toSliceConst(c.ZIG_CMAKE_BINARY_DIR),
\\ std.cstr.toSliceConst(c.ZIG_CXX_COMPILER),
\\ std.cstr.toSliceConst(c.ZIG_DIA_GUIDS_LIB),
@ -2885,20 +2964,20 @@ test "zig fmt: multiline string in array" {
try testCanonical(
\\const Foo = [][]const u8{
\\ \\aaa
\\,
\\ ,
\\ \\bbb
\\};
\\
\\fn bar() void {
\\ const Foo = [][]const u8{
\\ \\aaa
\\ ,
\\ ,
\\ \\bbb
\\ };
\\ const Bar = [][]const u8{ // comment here
\\ \\aaa
\\ \\
\\ , // and another comment can go here
\\ , // and another comment can go here
\\ \\bbb
\\ };
\\}
@ -3214,6 +3293,34 @@ test "zig fmt: C var args" {
);
}
test "zig fmt: Only indent multiline string literals in function calls" {
try testCanonical(
\\test "zig fmt:" {
\\ try testTransform(
\\ \\const X = struct {
\\ \\ foo: i32, bar: i8 };
\\ ,
\\ \\const X = struct {
\\ \\ foo: i32, bar: i8
\\ \\};
\\ \\
\\ );
\\}
\\
);
}
test "zig fmt: Don't add extra newline after if" {
try testCanonical(
\\pub fn atomicSymLink(allocator: *Allocator, existing_path: []const u8, new_path: []const u8) !void {
\\ if (cwd().symLink(existing_path, new_path, .{})) {
\\ return;
\\ }
\\}
\\
);
}
const std = @import("std");
const mem = std.mem;
const warn = std.debug.warn;
@ -3256,7 +3363,8 @@ fn testParse(source: []const u8, allocator: *mem.Allocator, anything_changed: *b
var buffer = std.ArrayList(u8).init(allocator);
errdefer buffer.deinit();
anything_changed.* = try std.zig.render(allocator, buffer.outStream(), tree);
const outStream = buffer.outStream();
anything_changed.* = try std.zig.render(allocator, outStream, tree);
return buffer.toOwnedSlice();
}
fn testTransform(source: []const u8, expected_source: []const u8) !void {

File diff suppressed because it is too large Load Diff

View File

@ -1175,6 +1175,7 @@ pub const Tokenizer = struct {
},
.num_dot_dec => switch (c) {
'.' => {
result.id = .IntegerLiteral;
self.index -= 1;
state = .start;
break;
@ -1183,7 +1184,6 @@ pub const Tokenizer = struct {
state = .float_exponent_unsigned;
},
'0'...'9' => {
result.id = .FloatLiteral;
state = .float_fraction_dec;
},
else => {
@ -1769,6 +1769,7 @@ test "tokenizer - number literals decimal" {
testTokenize("7", &[_]Token.Id{.IntegerLiteral});
testTokenize("8", &[_]Token.Id{.IntegerLiteral});
testTokenize("9", &[_]Token.Id{.IntegerLiteral});
testTokenize("1..", &[_]Token.Id{ .IntegerLiteral, .Ellipsis2 });
testTokenize("0a", &[_]Token.Id{ .Invalid, .Identifier });
testTokenize("9b", &[_]Token.Id{ .Invalid, .Identifier });
testTokenize("1z", &[_]Token.Id{ .Invalid, .Identifier });

View File

@ -125,7 +125,7 @@ pub const Decl = struct {
/// mapping them to an address in the output file.
/// Memory owned by this decl, using Module's allocator.
name: [*:0]const u8,
/// The direct parent container of the Decl. This is either a `Scope.File` or `Scope.ZIRModule`.
/// The direct parent container of the Decl. This is either a `Scope.Container` or `Scope.ZIRModule`.
/// Reference to externally owned memory.
scope: *Scope,
/// The AST Node decl index or ZIR Inst index that contains this declaration.
@ -217,9 +217,10 @@ pub const Decl = struct {
pub fn src(self: Decl) usize {
switch (self.scope.tag) {
.file => {
const file = @fieldParentPtr(Scope.File, "base", self.scope);
const tree = file.contents.tree;
.container => {
const container = @fieldParentPtr(Scope.Container, "base", self.scope);
const tree = container.file_scope.contents.tree;
// TODO Container should have it's own decls()
const decl_node = tree.root_node.decls()[self.src_index];
return tree.token_locs[decl_node.firstToken()].start;
},
@ -229,7 +230,7 @@ pub const Decl = struct {
const src_decl = module.decls[self.src_index];
return src_decl.inst.src;
},
.block => unreachable,
.file, .block => unreachable,
.gen_zir => unreachable,
.local_val => unreachable,
.local_ptr => unreachable,
@ -359,6 +360,7 @@ pub const Scope = struct {
.local_ptr => return self.cast(LocalPtr).?.gen_zir.arena,
.zir_module => return &self.cast(ZIRModule).?.contents.module.arena.allocator,
.file => unreachable,
.container => unreachable,
}
}
@ -368,15 +370,16 @@ pub const Scope = struct {
return switch (self.tag) {
.block => self.cast(Block).?.decl,
.gen_zir => self.cast(GenZIR).?.decl,
.local_val => return self.cast(LocalVal).?.gen_zir.decl,
.local_ptr => return self.cast(LocalPtr).?.gen_zir.decl,
.local_val => self.cast(LocalVal).?.gen_zir.decl,
.local_ptr => self.cast(LocalPtr).?.gen_zir.decl,
.decl => self.cast(DeclAnalysis).?.decl,
.zir_module => null,
.file => null,
.container => null,
};
}
/// Asserts the scope has a parent which is a ZIRModule or File and
/// Asserts the scope has a parent which is a ZIRModule or Container and
/// returns it.
pub fn namespace(self: *Scope) *Scope {
switch (self.tag) {
@ -385,7 +388,8 @@ pub const Scope = struct {
.local_val => return self.cast(LocalVal).?.gen_zir.decl.scope,
.local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope,
.decl => return self.cast(DeclAnalysis).?.decl.scope,
.zir_module, .file => return self,
.file => return &self.cast(File).?.root_container.base,
.zir_module, .container => return self,
}
}
@ -399,8 +403,9 @@ pub const Scope = struct {
.local_val => unreachable,
.local_ptr => unreachable,
.decl => unreachable,
.file => unreachable,
.zir_module => return self.cast(ZIRModule).?.fullyQualifiedNameHash(name),
.file => return self.cast(File).?.fullyQualifiedNameHash(name),
.container => return self.cast(Container).?.fullyQualifiedNameHash(name),
}
}
@ -409,11 +414,12 @@ pub const Scope = struct {
switch (self.tag) {
.file => return self.cast(File).?.contents.tree,
.zir_module => unreachable,
.decl => return self.cast(DeclAnalysis).?.decl.scope.cast(File).?.contents.tree,
.block => return self.cast(Block).?.decl.scope.cast(File).?.contents.tree,
.gen_zir => return self.cast(GenZIR).?.decl.scope.cast(File).?.contents.tree,
.local_val => return self.cast(LocalVal).?.gen_zir.decl.scope.cast(File).?.contents.tree,
.local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope.cast(File).?.contents.tree,
.decl => return self.cast(DeclAnalysis).?.decl.scope.cast(Container).?.file_scope.contents.tree,
.block => return self.cast(Block).?.decl.scope.cast(Container).?.file_scope.contents.tree,
.gen_zir => return self.cast(GenZIR).?.decl.scope.cast(Container).?.file_scope.contents.tree,
.local_val => return self.cast(LocalVal).?.gen_zir.decl.scope.cast(Container).?.file_scope.contents.tree,
.local_ptr => return self.cast(LocalPtr).?.gen_zir.decl.scope.cast(Container).?.file_scope.contents.tree,
.container => return self.cast(Container).?.file_scope.contents.tree,
}
}
@ -427,13 +433,15 @@ pub const Scope = struct {
.decl => unreachable,
.zir_module => unreachable,
.file => unreachable,
.container => unreachable,
};
}
/// Asserts the scope has a parent which is a ZIRModule or File and
/// Asserts the scope has a parent which is a ZIRModule, Contaienr or File and
/// returns the sub_file_path field.
pub fn subFilePath(base: *Scope) []const u8 {
switch (base.tag) {
.container => return @fieldParentPtr(Container, "base", base).file_scope.sub_file_path,
.file => return @fieldParentPtr(File, "base", base).sub_file_path,
.zir_module => return @fieldParentPtr(ZIRModule, "base", base).sub_file_path,
.block => unreachable,
@ -453,11 +461,13 @@ pub const Scope = struct {
.local_val => unreachable,
.local_ptr => unreachable,
.decl => unreachable,
.container => unreachable,
}
}
pub fn getSource(base: *Scope, module: *Module) ![:0]const u8 {
switch (base.tag) {
.container => return @fieldParentPtr(Container, "base", base).file_scope.getSource(module),
.file => return @fieldParentPtr(File, "base", base).getSource(module),
.zir_module => return @fieldParentPtr(ZIRModule, "base", base).getSource(module),
.gen_zir => unreachable,
@ -471,8 +481,9 @@ pub const Scope = struct {
/// Asserts the scope is a namespace Scope and removes the Decl from the namespace.
pub fn removeDecl(base: *Scope, child: *Decl) void {
switch (base.tag) {
.file => return @fieldParentPtr(File, "base", base).removeDecl(child),
.container => return @fieldParentPtr(Container, "base", base).removeDecl(child),
.zir_module => return @fieldParentPtr(ZIRModule, "base", base).removeDecl(child),
.file => unreachable,
.block => unreachable,
.gen_zir => unreachable,
.local_val => unreachable,
@ -499,6 +510,7 @@ pub const Scope = struct {
.local_val => unreachable,
.local_ptr => unreachable,
.decl => unreachable,
.container => unreachable,
}
}
@ -515,6 +527,8 @@ pub const Scope = struct {
zir_module,
/// .zig source code.
file,
/// struct, enum or union, every .file contains one of these.
container,
block,
decl,
gen_zir,
@ -522,6 +536,33 @@ pub const Scope = struct {
local_ptr,
};
pub const Container = struct {
pub const base_tag: Tag = .container;
base: Scope = Scope{ .tag = base_tag },
file_scope: *Scope.File,
/// Direct children of the file.
decls: std.AutoArrayHashMapUnmanaged(*Decl, void),
// TODO implement container types and put this in a status union
// ty: Type
pub fn deinit(self: *Container, gpa: *Allocator) void {
self.decls.deinit(gpa);
self.* = undefined;
}
pub fn removeDecl(self: *Container, child: *Decl) void {
_ = self.decls.remove(child);
}
pub fn fullyQualifiedNameHash(self: *Container, name: []const u8) NameHash {
// TODO container scope qualified names.
return std.zig.hashSrc(name);
}
};
pub const File = struct {
pub const base_tag: Tag = .file;
base: Scope = Scope{ .tag = base_tag },
@ -544,8 +585,7 @@ pub const Scope = struct {
loaded_success,
},
/// Direct children of the file.
decls: ArrayListUnmanaged(*Decl),
root_container: Container,
pub fn unload(self: *File, gpa: *Allocator) void {
switch (self.status) {
@ -569,20 +609,11 @@ pub const Scope = struct {
}
pub fn deinit(self: *File, gpa: *Allocator) void {
self.decls.deinit(gpa);
self.root_container.deinit(gpa);
self.unload(gpa);
self.* = undefined;
}
pub fn removeDecl(self: *File, child: *Decl) void {
for (self.decls.items) |item, i| {
if (item == child) {
_ = self.decls.swapRemove(i);
return;
}
}
}
pub fn dumpSrc(self: *File, src: usize) void {
const loc = std.zig.findLineColumn(self.source.bytes, src);
std.debug.print("{}:{}:{}\n", .{ self.sub_file_path, loc.line + 1, loc.column + 1 });
@ -595,6 +626,7 @@ pub const Scope = struct {
module.gpa,
self.sub_file_path,
std.math.maxInt(u32),
null,
1,
0,
);
@ -604,11 +636,6 @@ pub const Scope = struct {
.bytes => |bytes| return bytes,
}
}
pub fn fullyQualifiedNameHash(self: *File, name: []const u8) NameHash {
// We don't have struct scopes yet so this is currently just a simple name hash.
return std.zig.hashSrc(name);
}
};
pub const ZIRModule = struct {
@ -697,6 +724,7 @@ pub const Scope = struct {
module.gpa,
self.sub_file_path,
std.math.maxInt(u32),
null,
1,
0,
);
@ -861,7 +889,10 @@ pub fn init(gpa: *Allocator, options: InitOptions) !Module {
.source = .{ .unloaded = {} },
.contents = .{ .not_available = {} },
.status = .never_loaded,
.decls = .{},
.root_container = .{
.file_scope = root_scope,
.decls = .{},
},
};
break :blk &root_scope.base;
} else if (mem.endsWith(u8, options.root_pkg.root_src_path, ".zir")) {
@ -969,7 +1000,7 @@ pub fn update(self: *Module) !void {
// to force a refresh we unload now.
if (self.root_scope.cast(Scope.File)) |zig_file| {
zig_file.unload(self.gpa);
self.analyzeRootSrcFile(zig_file) catch |err| switch (err) {
self.analyzeContainer(&zig_file.root_container) catch |err| switch (err) {
error.AnalysisFail => {
assert(self.totalErrorCount() != 0);
},
@ -1237,8 +1268,8 @@ fn astGenAndAnalyzeDecl(self: *Module, decl: *Decl) !bool {
const tracy = trace(@src());
defer tracy.end();
const file_scope = decl.scope.cast(Scope.File).?;
const tree = try self.getAstTree(file_scope);
const container_scope = decl.scope.cast(Scope.Container).?;
const tree = try self.getAstTree(container_scope);
const ast_node = tree.root_node.decls()[decl.src_index];
switch (ast_node.tag) {
.FnProto => {
@ -1698,10 +1729,12 @@ fn getSrcModule(self: *Module, root_scope: *Scope.ZIRModule) !*zir.Module {
}
}
fn getAstTree(self: *Module, root_scope: *Scope.File) !*ast.Tree {
fn getAstTree(self: *Module, container_scope: *Scope.Container) !*ast.Tree {
const tracy = trace(@src());
defer tracy.end();
const root_scope = container_scope.file_scope;
switch (root_scope.status) {
.never_loaded, .unloaded_success => {
try self.failed_files.ensureCapacity(self.gpa, self.failed_files.items().len + 1);
@ -1743,25 +1776,25 @@ fn getAstTree(self: *Module, root_scope: *Scope.File) !*ast.Tree {
}
}
fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
fn analyzeContainer(self: *Module, container_scope: *Scope.Container) !void {
const tracy = trace(@src());
defer tracy.end();
// We may be analyzing it for the first time, or this may be
// an incremental update. This code handles both cases.
const tree = try self.getAstTree(root_scope);
const tree = try self.getAstTree(container_scope);
const decls = tree.root_node.decls();
try self.work_queue.ensureUnusedCapacity(decls.len);
try root_scope.decls.ensureCapacity(self.gpa, decls.len);
try container_scope.decls.ensureCapacity(self.gpa, decls.len);
// Keep track of the decls that we expect to see in this file so that
// we know which ones have been deleted.
var deleted_decls = std.AutoArrayHashMap(*Decl, void).init(self.gpa);
defer deleted_decls.deinit();
try deleted_decls.ensureCapacity(root_scope.decls.items.len);
for (root_scope.decls.items) |file_decl| {
deleted_decls.putAssumeCapacityNoClobber(file_decl, {});
try deleted_decls.ensureCapacity(container_scope.decls.items().len);
for (container_scope.decls.items()) |entry| {
deleted_decls.putAssumeCapacityNoClobber(entry.key, {});
}
for (decls) |src_decl, decl_i| {
@ -1773,7 +1806,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
const name_loc = tree.token_locs[name_tok];
const name = tree.tokenSliceLoc(name_loc);
const name_hash = root_scope.fullyQualifiedNameHash(name);
const name_hash = container_scope.fullyQualifiedNameHash(name);
const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
if (self.decl_table.get(name_hash)) |decl| {
// Update the AST Node index of the decl, even if its contents are unchanged, it may
@ -1789,6 +1822,9 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
try self.markOutdatedDecl(decl);
decl.contents_hash = contents_hash;
} else switch (self.bin_file.tag) {
.coff => {
// TODO Implement for COFF
},
.elf => if (decl.fn_link.elf.len != 0) {
// TODO Look into detecting when this would be unnecessary by storing enough state
// in `Decl` to notice that the line number did not change.
@ -1801,8 +1837,8 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
}
}
} else {
const new_decl = try self.createNewDecl(&root_scope.base, name, decl_i, name_hash, contents_hash);
root_scope.decls.appendAssumeCapacity(new_decl);
const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
container_scope.decls.putAssumeCapacity(new_decl, {});
if (fn_proto.getExternExportInlineToken()) |maybe_export_token| {
if (tree.token_ids[maybe_export_token] == .Keyword_export) {
self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
@ -1812,7 +1848,7 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
} else if (src_decl.castTag(.VarDecl)) |var_decl| {
const name_loc = tree.token_locs[var_decl.name_token];
const name = tree.tokenSliceLoc(name_loc);
const name_hash = root_scope.fullyQualifiedNameHash(name);
const name_hash = container_scope.fullyQualifiedNameHash(name);
const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
if (self.decl_table.get(name_hash)) |decl| {
// Update the AST Node index of the decl, even if its contents are unchanged, it may
@ -1828,8 +1864,8 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
decl.contents_hash = contents_hash;
}
} else {
const new_decl = try self.createNewDecl(&root_scope.base, name, decl_i, name_hash, contents_hash);
root_scope.decls.appendAssumeCapacity(new_decl);
const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
container_scope.decls.putAssumeCapacity(new_decl, {});
if (var_decl.getExternExportToken()) |maybe_export_token| {
if (tree.token_ids[maybe_export_token] == .Keyword_export) {
self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
@ -1841,11 +1877,11 @@ fn analyzeRootSrcFile(self: *Module, root_scope: *Scope.File) !void {
const name = try std.fmt.allocPrint(self.gpa, "__comptime_{}", .{name_index});
defer self.gpa.free(name);
const name_hash = root_scope.fullyQualifiedNameHash(name);
const name_hash = container_scope.fullyQualifiedNameHash(name);
const contents_hash = std.zig.hashSrc(tree.getNodeSource(src_decl));
const new_decl = try self.createNewDecl(&root_scope.base, name, decl_i, name_hash, contents_hash);
root_scope.decls.appendAssumeCapacity(new_decl);
const new_decl = try self.createNewDecl(&container_scope.base, name, decl_i, name_hash, contents_hash);
container_scope.decls.putAssumeCapacity(new_decl, {});
self.work_queue.writeItemAssumeCapacity(.{ .analyze_decl = new_decl });
} else if (src_decl.castTag(.ContainerField)) |container_field| {
log.err("TODO: analyze container field", .{});
@ -2047,12 +2083,14 @@ fn allocateNewDecl(
.deletion_flag = false,
.contents_hash = contents_hash,
.link = switch (self.bin_file.tag) {
.coff => .{ .coff = link.File.Coff.TextBlock.empty },
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.TextBlock.empty },
.c => .{ .c = {} },
.wasm => .{ .wasm = {} },
},
.fn_link = switch (self.bin_file.tag) {
.coff => .{ .coff = {} },
.elf => .{ .elf = link.File.Elf.SrcFn.empty },
.macho => .{ .macho = link.File.MachO.SrcFn.empty },
.c => .{ .c = {} },
@ -2591,6 +2629,72 @@ pub fn analyzeIsErr(self: *Module, scope: *Scope, src: usize, operand: *Inst) In
return self.fail(scope, src, "TODO implement analysis of iserr", .{});
}
pub fn analyzeSlice(self: *Module, scope: *Scope, src: usize, array_ptr: *Inst, start: *Inst, end_opt: ?*Inst, sentinel_opt: ?*Inst) InnerError!*Inst {
const ptr_child = switch (array_ptr.ty.zigTypeTag()) {
.Pointer => array_ptr.ty.elemType(),
else => return self.fail(scope, src, "expected pointer, found '{}'", .{array_ptr.ty}),
};
var array_type = ptr_child;
const elem_type = switch (ptr_child.zigTypeTag()) {
.Array => ptr_child.elemType(),
.Pointer => blk: {
if (ptr_child.isSinglePointer()) {
if (ptr_child.elemType().zigTypeTag() == .Array) {
array_type = ptr_child.elemType();
break :blk ptr_child.elemType().elemType();
}
return self.fail(scope, src, "slice of single-item pointer", .{});
}
break :blk ptr_child.elemType();
},
else => return self.fail(scope, src, "slice of non-array type '{}'", .{ptr_child}),
};
const slice_sentinel = if (sentinel_opt) |sentinel| blk: {
const casted = try self.coerce(scope, elem_type, sentinel);
break :blk try self.resolveConstValue(scope, casted);
} else null;
var return_ptr_size: std.builtin.TypeInfo.Pointer.Size = .Slice;
var return_elem_type = elem_type;
if (end_opt) |end| {
if (end.value()) |end_val| {
if (start.value()) |start_val| {
const start_u64 = start_val.toUnsignedInt();
const end_u64 = end_val.toUnsignedInt();
if (start_u64 > end_u64) {
return self.fail(scope, src, "out of bounds slice", .{});
}
const len = end_u64 - start_u64;
const array_sentinel = if (array_type.zigTypeTag() == .Array and end_u64 == array_type.arrayLen())
array_type.sentinel()
else
slice_sentinel;
return_elem_type = try self.arrayType(scope, len, array_sentinel, elem_type);
return_ptr_size = .One;
}
}
}
const return_type = try self.ptrType(
scope,
src,
return_elem_type,
if (end_opt == null) slice_sentinel else null,
0, // TODO alignment
0,
0,
!ptr_child.isConstPtr(),
ptr_child.isAllowzeroPtr(),
ptr_child.isVolatilePtr(),
return_ptr_size,
);
return self.fail(scope, src, "TODO implement analysis of slice", .{});
}
/// Asserts that lhs and rhs types are both numeric.
pub fn cmpNumeric(
self: *Module,
@ -2801,6 +2905,12 @@ pub fn resolvePeerTypes(self: *Module, scope: *Scope, instructions: []*Inst) !Ty
prev_inst = next_inst;
continue;
}
if (next_inst.ty.zigTypeTag() == .Undefined)
continue;
if (prev_inst.ty.zigTypeTag() == .Undefined) {
prev_inst = next_inst;
continue;
}
if (prev_inst.ty.isInt() and
next_inst.ty.isInt() and
prev_inst.ty.isSignedInt() == next_inst.ty.isSignedInt())
@ -3052,6 +3162,7 @@ fn failWithOwnedErrorMsg(self: *Module, scope: *Scope, src: usize, err_msg: *Err
self.failed_files.putAssumeCapacityNoClobber(scope, err_msg);
},
.file => unreachable,
.container => unreachable,
}
return error.AnalysisFail;
}

View File

@ -275,16 +275,16 @@ pub fn expr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node) InnerEr
.ErrorType => return rlWrap(mod, scope, rl, try errorType(mod, scope, node.castTag(.ErrorType).?)),
.For => return forExpr(mod, scope, rl, node.castTag(.For).?),
.ArrayAccess => return arrayAccess(mod, scope, rl, node.castTag(.ArrayAccess).?),
.Slice => return rlWrap(mod, scope, rl, try sliceExpr(mod, scope, node.castTag(.Slice).?)),
.Catch => return catchExpr(mod, scope, rl, node.castTag(.Catch).?),
.Comptime => return comptimeKeyword(mod, scope, rl, node.castTag(.Comptime).?),
.OrElse => return orelseExpr(mod, scope, rl, node.castTag(.OrElse).?),
.Defer => return mod.failNode(scope, node, "TODO implement astgen.expr for .Defer", .{}),
.Range => return mod.failNode(scope, node, "TODO implement astgen.expr for .Range", .{}),
.OrElse => return mod.failNode(scope, node, "TODO implement astgen.expr for .OrElse", .{}),
.Await => return mod.failNode(scope, node, "TODO implement astgen.expr for .Await", .{}),
.Resume => return mod.failNode(scope, node, "TODO implement astgen.expr for .Resume", .{}),
.Try => return mod.failNode(scope, node, "TODO implement astgen.expr for .Try", .{}),
.Slice => return mod.failNode(scope, node, "TODO implement astgen.expr for .Slice", .{}),
.ArrayInitializer => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayInitializer", .{}),
.ArrayInitializerDot => return mod.failNode(scope, node, "TODO implement astgen.expr for .ArrayInitializerDot", .{}),
.StructInitializer => return mod.failNode(scope, node, "TODO implement astgen.expr for .StructInitializer", .{}),
@ -790,13 +790,31 @@ fn errorType(mod: *Module, scope: *Scope, node: *ast.Node.OneToken) InnerError!*
}
fn catchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Catch) InnerError!*zir.Inst {
const tree = scope.tree();
const src = tree.token_locs[node.op_token].start;
return orelseCatchExpr(mod, scope, rl, node.lhs, node.op_token, .iserr, .unwrap_err_unsafe, node.rhs, node.payload);
}
const err_union_ptr = try expr(mod, scope, .ref, node.lhs);
// TODO we could avoid an unnecessary copy if .iserr took a pointer
const err_union = try addZIRUnOp(mod, scope, src, .deref, err_union_ptr);
const cond = try addZIRUnOp(mod, scope, src, .iserr, err_union);
fn orelseExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.SimpleInfixOp) InnerError!*zir.Inst {
return orelseCatchExpr(mod, scope, rl, node.lhs, node.op_token, .isnull, .unwrap_optional_unsafe, node.rhs, null);
}
fn orelseCatchExpr(
mod: *Module,
scope: *Scope,
rl: ResultLoc,
lhs: *ast.Node,
op_token: ast.TokenIndex,
cond_op: zir.Inst.Tag,
unwrap_op: zir.Inst.Tag,
rhs: *ast.Node,
payload_node: ?*ast.Node,
) InnerError!*zir.Inst {
const tree = scope.tree();
const src = tree.token_locs[op_token].start;
const operand_ptr = try expr(mod, scope, .ref, lhs);
// TODO we could avoid an unnecessary copy if .iserr, .isnull took a pointer
const err_union = try addZIRUnOp(mod, scope, src, .deref, operand_ptr);
const cond = try addZIRUnOp(mod, scope, src, cond_op, err_union);
var block_scope: Scope.GenZIR = .{
.parent = scope,
@ -825,55 +843,55 @@ fn catchExpr(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Catch)
.inferred_ptr, .bitcasted_ptr, .block_ptr => .{ .block_ptr = block },
};
var err_scope: Scope.GenZIR = .{
var then_scope: Scope.GenZIR = .{
.parent = scope,
.decl = block_scope.decl,
.arena = block_scope.arena,
.instructions = .{},
};
defer err_scope.instructions.deinit(mod.gpa);
defer then_scope.instructions.deinit(mod.gpa);
var err_val_scope: Scope.LocalVal = undefined;
const err_sub_scope = blk: {
const payload = node.payload orelse
break :blk &err_scope.base;
const then_sub_scope = blk: {
const payload = payload_node orelse
break :blk &then_scope.base;
const err_name = tree.tokenSlice(payload.castTag(.Payload).?.error_symbol.firstToken());
if (mem.eql(u8, err_name, "_"))
break :blk &err_scope.base;
break :blk &then_scope.base;
const unwrapped_err_ptr = try addZIRUnOp(mod, &err_scope.base, src, .unwrap_err_code, err_union_ptr);
const unwrapped_err_ptr = try addZIRUnOp(mod, &then_scope.base, src, .unwrap_err_code, operand_ptr);
err_val_scope = .{
.parent = &err_scope.base,
.gen_zir = &err_scope,
.parent = &then_scope.base,
.gen_zir = &then_scope,
.name = err_name,
.inst = try addZIRUnOp(mod, &err_scope.base, src, .deref, unwrapped_err_ptr),
.inst = try addZIRUnOp(mod, &then_scope.base, src, .deref, unwrapped_err_ptr),
};
break :blk &err_val_scope.base;
};
_ = try addZIRInst(mod, &err_scope.base, src, zir.Inst.Break, .{
_ = try addZIRInst(mod, &then_scope.base, src, zir.Inst.Break, .{
.block = block,
.operand = try expr(mod, err_sub_scope, branch_rl, node.rhs),
.operand = try expr(mod, then_sub_scope, branch_rl, rhs),
}, .{});
var not_err_scope: Scope.GenZIR = .{
var else_scope: Scope.GenZIR = .{
.parent = scope,
.decl = block_scope.decl,
.arena = block_scope.arena,
.instructions = .{},
};
defer not_err_scope.instructions.deinit(mod.gpa);
defer else_scope.instructions.deinit(mod.gpa);
const unwrapped_payload = try addZIRUnOp(mod, &not_err_scope.base, src, .unwrap_err_unsafe, err_union_ptr);
_ = try addZIRInst(mod, &not_err_scope.base, src, zir.Inst.Break, .{
const unwrapped_payload = try addZIRUnOp(mod, &else_scope.base, src, unwrap_op, operand_ptr);
_ = try addZIRInst(mod, &else_scope.base, src, zir.Inst.Break, .{
.block = block,
.operand = unwrapped_payload,
}, .{});
condbr.positionals.then_body = .{ .instructions = try err_scope.arena.dupe(*zir.Inst, err_scope.instructions.items) };
condbr.positionals.else_body = .{ .instructions = try not_err_scope.arena.dupe(*zir.Inst, not_err_scope.instructions.items) };
return rlWrap(mod, scope, rl, &block.base);
condbr.positionals.then_body = .{ .instructions = try then_scope.arena.dupe(*zir.Inst, then_scope.instructions.items) };
condbr.positionals.else_body = .{ .instructions = try else_scope.arena.dupe(*zir.Inst, else_scope.instructions.items) };
return rlWrapPtr(mod, scope, rl, &block.base);
}
/// Return whether the identifier names of two tokens are equal. Resolves @"" tokens without allocating.
@ -933,6 +951,36 @@ fn arrayAccess(mod: *Module, scope: *Scope, rl: ResultLoc, node: *ast.Node.Array
return rlWrapPtr(mod, scope, rl, try addZIRInst(mod, scope, src, zir.Inst.ElemPtr, .{ .array_ptr = array_ptr, .index = index }, .{}));
}
fn sliceExpr(mod: *Module, scope: *Scope, node: *ast.Node.Slice) InnerError!*zir.Inst {
const tree = scope.tree();
const src = tree.token_locs[node.rtoken].start;
const usize_type = try addZIRInstConst(mod, scope, src, .{
.ty = Type.initTag(.type),
.val = Value.initTag(.usize_type),
});
const array_ptr = try expr(mod, scope, .ref, node.lhs);
const start = try expr(mod, scope, .{ .ty = usize_type }, node.start);
if (node.end == null and node.sentinel == null) {
return try addZIRBinOp(mod, scope, src, .slice_start, array_ptr, start);
}
const end = if (node.end) |end| try expr(mod, scope, .{ .ty = usize_type }, end) else null;
// we could get the child type here, but it is easier to just do it in semantic analysis.
const sentinel = if (node.sentinel) |sentinel| try expr(mod, scope, .none, sentinel) else null;
return try addZIRInst(
mod,
scope,
src,
zir.Inst.Slice,
.{ .array_ptr = array_ptr, .start = start },
.{ .end = end, .sentinel = sentinel },
);
}
fn deref(mod: *Module, scope: *Scope, node: *ast.Node.SimpleSuffixOp) InnerError!*zir.Inst {
const tree = scope.tree();
const src = tree.token_locs[node.rtoken].start;

View File

@ -59,14 +59,21 @@ pub const GenerateSymbolError = error{
AnalysisFail,
};
pub const DebugInfoOutput = union(enum) {
dwarf: struct {
dbg_line: *std.ArrayList(u8),
dbg_info: *std.ArrayList(u8),
dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
},
none,
};
pub fn generateSymbol(
bin_file: *link.File,
src: usize,
typed_value: TypedValue,
code: *std.ArrayList(u8),
dbg_line: *std.ArrayList(u8),
dbg_info: *std.ArrayList(u8),
dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
debug_output: DebugInfoOutput,
) GenerateSymbolError!Result {
const tracy = trace(@src());
defer tracy.end();
@ -76,70 +83,70 @@ pub fn generateSymbol(
switch (bin_file.options.target.cpu.arch) {
.wasm32 => unreachable, // has its own code path
.wasm64 => unreachable, // has its own code path
.arm => return Function(.arm).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
.armeb => return Function(.armeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.aarch64 => return Function(.aarch64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.arc => return Function(.arc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.avr => return Function(.avr).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.bpfel => return Function(.bpfel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.hexagon => return Function(.hexagon).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.mips => return Function(.mips).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.mipsel => return Function(.mipsel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.mips64 => return Function(.mips64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.mips64el => return Function(.mips64el).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.msp430 => return Function(.msp430).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.powerpc => return Function(.powerpc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.r600 => return Function(.r600).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
.riscv64 => return Function(.riscv64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.sparc => return Function(.sparc).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.sparcel => return Function(.sparcel).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.s390x => return Function(.s390x).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
.spu_2 => return Function(.spu_2).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.tce => return Function(.tce).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.tcele => return Function(.tcele).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.thumb => return Function(.thumb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.i386 => return Function(.i386).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
.x86_64 => return Function(.x86_64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.xcore => return Function(.xcore).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.nvptx => return Function(.nvptx).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.le32 => return Function(.le32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.le64 => return Function(.le64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.amdil => return Function(.amdil).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.hsail => return Function(.hsail).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.spir => return Function(.spir).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.spir64 => return Function(.spir64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.kalimba => return Function(.kalimba).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.shave => return Function(.shave).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.lanai => return Function(.lanai).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
//.ve => return Function(.ve).generateSymbol(bin_file, src, typed_value, code, dbg_line, dbg_info, dbg_info_type_relocs),
.arm => return Function(.arm).generateSymbol(bin_file, src, typed_value, code, debug_output),
.armeb => return Function(.armeb).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.aarch64 => return Function(.aarch64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.aarch64_be => return Function(.aarch64_be).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.aarch64_32 => return Function(.aarch64_32).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.arc => return Function(.arc).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.avr => return Function(.avr).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.bpfel => return Function(.bpfel).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.bpfeb => return Function(.bpfeb).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.hexagon => return Function(.hexagon).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.mips => return Function(.mips).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.mipsel => return Function(.mipsel).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.mips64 => return Function(.mips64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.mips64el => return Function(.mips64el).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.msp430 => return Function(.msp430).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.powerpc => return Function(.powerpc).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.powerpc64 => return Function(.powerpc64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.powerpc64le => return Function(.powerpc64le).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.r600 => return Function(.r600).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.amdgcn => return Function(.amdgcn).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.riscv32 => return Function(.riscv32).generateSymbol(bin_file, src, typed_value, code, debug_output),
.riscv64 => return Function(.riscv64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.sparc => return Function(.sparc).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.sparcv9 => return Function(.sparcv9).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.sparcel => return Function(.sparcel).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.s390x => return Function(.s390x).generateSymbol(bin_file, src, typed_value, code, debug_output),
.spu_2 => return Function(.spu_2).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.tce => return Function(.tce).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.tcele => return Function(.tcele).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.thumb => return Function(.thumb).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.thumbeb => return Function(.thumbeb).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.i386 => return Function(.i386).generateSymbol(bin_file, src, typed_value, code, debug_output),
.x86_64 => return Function(.x86_64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.xcore => return Function(.xcore).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.nvptx => return Function(.nvptx).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.nvptx64 => return Function(.nvptx64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.le32 => return Function(.le32).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.le64 => return Function(.le64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.amdil => return Function(.amdil).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.amdil64 => return Function(.amdil64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.hsail => return Function(.hsail).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.hsail64 => return Function(.hsail64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.spir => return Function(.spir).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.spir64 => return Function(.spir64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.kalimba => return Function(.kalimba).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.shave => return Function(.shave).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.lanai => return Function(.lanai).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.renderscript32 => return Function(.renderscript32).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.renderscript64 => return Function(.renderscript64).generateSymbol(bin_file, src, typed_value, code, debug_output),
//.ve => return Function(.ve).generateSymbol(bin_file, src, typed_value, code, debug_output),
else => @panic("Backend architectures that don't have good support yet are commented out, to improve compilation performance. If you are interested in one of these other backends feel free to uncomment them. Eventually these will be completed, but stage1 is slow and a memory hog."),
}
},
.Array => {
// TODO populate .debug_info for the array
if (typed_value.val.cast(Value.Payload.Bytes)) |payload| {
if (typed_value.ty.arraySentinel()) |sentinel| {
if (typed_value.ty.sentinel()) |sentinel| {
try code.ensureCapacity(code.items.len + payload.data.len + 1);
code.appendSliceAssumeCapacity(payload.data);
const prev_len = code.items.len;
switch (try generateSymbol(bin_file, src, .{
.ty = typed_value.ty.elemType(),
.val = sentinel,
}, code, dbg_line, dbg_info, dbg_info_type_relocs)) {
}, code, debug_output)) {
.appended => return Result{ .appended = {} },
.externally_managed => |slice| {
code.appendSliceAssumeCapacity(slice);
@ -239,9 +246,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
target: *const std.Target,
mod_fn: *const Module.Fn,
code: *std.ArrayList(u8),
dbg_line: *std.ArrayList(u8),
dbg_info: *std.ArrayList(u8),
dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
debug_output: DebugInfoOutput,
err_msg: ?*ErrorMsg,
args: []MCValue,
ret_mcv: MCValue,
@ -419,9 +424,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
src: usize,
typed_value: TypedValue,
code: *std.ArrayList(u8),
dbg_line: *std.ArrayList(u8),
dbg_info: *std.ArrayList(u8),
dbg_info_type_relocs: *link.File.DbgInfoTypeRelocsTable,
debug_output: DebugInfoOutput,
) GenerateSymbolError!Result {
const module_fn = typed_value.val.cast(Value.Payload.Function).?.func;
@ -436,8 +439,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
try branch_stack.append(.{});
const src_data: struct {lbrace_src: usize, rbrace_src: usize, source: []const u8} = blk: {
if (module_fn.owner_decl.scope.cast(Module.Scope.File)) |scope_file| {
const tree = scope_file.contents.tree;
if (module_fn.owner_decl.scope.cast(Module.Scope.Container)) |container_scope| {
const tree = container_scope.file_scope.contents.tree;
const fn_proto = tree.root_node.decls()[module_fn.owner_decl.src_index].castTag(.FnProto).?;
const block = fn_proto.getBodyNode().?.castTag(.Block).?;
const lbrace_src = tree.token_locs[block.lbrace].start;
@ -457,9 +460,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
.bin_file = bin_file,
.mod_fn = module_fn,
.code = code,
.dbg_line = dbg_line,
.dbg_info = dbg_info,
.dbg_info_type_relocs = dbg_info_type_relocs,
.debug_output = debug_output,
.err_msg = null,
.args = undefined, // populated after `resolveCallingConventionValues`
.ret_mcv = undefined, // populated after `resolveCallingConventionValues`
@ -598,35 +599,50 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
fn dbgSetPrologueEnd(self: *Self) InnerError!void {
try self.dbg_line.append(DW.LNS_set_prologue_end);
try self.dbgAdvancePCAndLine(self.prev_di_src);
switch (self.debug_output) {
.dwarf => |dbg_out| {
try dbg_out.dbg_line.append(DW.LNS_set_prologue_end);
try self.dbgAdvancePCAndLine(self.prev_di_src);
},
.none => {},
}
}
fn dbgSetEpilogueBegin(self: *Self) InnerError!void {
try self.dbg_line.append(DW.LNS_set_epilogue_begin);
try self.dbgAdvancePCAndLine(self.prev_di_src);
switch (self.debug_output) {
.dwarf => |dbg_out| {
try dbg_out.dbg_line.append(DW.LNS_set_epilogue_begin);
try self.dbgAdvancePCAndLine(self.prev_di_src);
},
.none => {},
}
}
fn dbgAdvancePCAndLine(self: *Self, src: usize) InnerError!void {
// TODO Look into improving the performance here by adding a token-index-to-line
// lookup table, and changing ir.Inst from storing byte offset to token. Currently
// this involves scanning over the source code for newlines
// (but only from the previous byte offset to the new one).
const delta_line = std.zig.lineDelta(self.source, self.prev_di_src, src);
const delta_pc = self.code.items.len - self.prev_di_pc;
self.prev_di_src = src;
self.prev_di_pc = self.code.items.len;
// TODO Look into using the DWARF special opcodes to compress this data. It lets you emit
// single-byte opcodes that add different numbers to both the PC and the line number
// at the same time.
try self.dbg_line.ensureCapacity(self.dbg_line.items.len + 11);
self.dbg_line.appendAssumeCapacity(DW.LNS_advance_pc);
leb128.writeULEB128(self.dbg_line.writer(), delta_pc) catch unreachable;
if (delta_line != 0) {
self.dbg_line.appendAssumeCapacity(DW.LNS_advance_line);
leb128.writeILEB128(self.dbg_line.writer(), delta_line) catch unreachable;
switch (self.debug_output) {
.dwarf => |dbg_out| {
// TODO Look into improving the performance here by adding a token-index-to-line
// lookup table, and changing ir.Inst from storing byte offset to token. Currently
// this involves scanning over the source code for newlines
// (but only from the previous byte offset to the new one).
const delta_line = std.zig.lineDelta(self.source, self.prev_di_src, src);
const delta_pc = self.code.items.len - self.prev_di_pc;
// TODO Look into using the DWARF special opcodes to compress this data. It lets you emit
// single-byte opcodes that add different numbers to both the PC and the line number
// at the same time.
try dbg_out.dbg_line.ensureCapacity(dbg_out.dbg_line.items.len + 11);
dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_advance_pc);
leb128.writeULEB128(dbg_out.dbg_line.writer(), delta_pc) catch unreachable;
if (delta_line != 0) {
dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_advance_line);
leb128.writeILEB128(dbg_out.dbg_line.writer(), delta_line) catch unreachable;
}
dbg_out.dbg_line.appendAssumeCapacity(DW.LNS_copy);
},
.none => {},
}
self.dbg_line.appendAssumeCapacity(DW.LNS_copy);
}
/// Asserts there is already capacity to insert into top branch inst_table.
@ -654,18 +670,23 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
/// Adds a Type to the .debug_info at the current position. The bytes will be populated later,
/// after codegen for this symbol is done.
fn addDbgInfoTypeReloc(self: *Self, ty: Type) !void {
assert(ty.hasCodeGenBits());
const index = self.dbg_info.items.len;
try self.dbg_info.resize(index + 4); // DW.AT_type, DW.FORM_ref4
switch (self.debug_output) {
.dwarf => |dbg_out| {
assert(ty.hasCodeGenBits());
const index = dbg_out.dbg_info.items.len;
try dbg_out.dbg_info.resize(index + 4); // DW.AT_type, DW.FORM_ref4
const gop = try self.dbg_info_type_relocs.getOrPut(self.gpa, ty);
if (!gop.found_existing) {
gop.entry.value = .{
.off = undefined,
.relocs = .{},
};
const gop = try dbg_out.dbg_info_type_relocs.getOrPut(self.gpa, ty);
if (!gop.found_existing) {
gop.entry.value = .{
.off = undefined,
.relocs = .{},
};
}
try gop.entry.value.relocs.append(self.gpa, @intCast(u32, index));
},
.none => {},
}
try gop.entry.value.relocs.append(self.gpa, @intCast(u32, index));
}
fn genFuncInst(self: *Self, inst: *ir.Inst) !MCValue {
@ -1258,14 +1279,19 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
self.registers.putAssumeCapacityNoClobber(toCanonicalReg(reg), &inst.base);
self.markRegUsed(reg);
try self.dbg_info.ensureCapacity(self.dbg_info.items.len + 8 + name_with_null.len);
self.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter);
self.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT_location, DW.FORM_exprloc
1, // ULEB128 dwarf expression length
reg.dwarfLocOp(),
});
try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4
self.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string
switch (self.debug_output) {
.dwarf => |dbg_out| {
try dbg_out.dbg_info.ensureCapacity(dbg_out.dbg_info.items.len + 8 + name_with_null.len);
dbg_out.dbg_info.appendAssumeCapacity(link.File.Elf.abbrev_parameter);
dbg_out.dbg_info.appendSliceAssumeCapacity(&[2]u8{ // DW.AT_location, DW.FORM_exprloc
1, // ULEB128 dwarf expression length
reg.dwarfLocOp(),
});
try self.addDbgInfoTypeReloc(inst.base.ty); // DW.AT_type, DW.FORM_ref4
dbg_out.dbg_info.appendSliceAssumeCapacity(name_with_null); // DW.AT_name, DW.FORM_string
},
.none => {},
}
},
else => {},
}
@ -1302,7 +1328,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
// Due to incremental compilation, how function calls are generated depends
// on linking.
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) {
switch (arch) {
.x86_64 => {
for (info.args) |mc_arg, arg_i| {
@ -1341,10 +1367,17 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
@intCast(u32, coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes)
else
unreachable;
// ff 14 25 xx xx xx xx call [addr]
try self.code.ensureCapacity(self.code.items.len + 7);
self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
@ -1362,10 +1395,16 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes
else
unreachable;
try self.genSetReg(inst.base.src, .ra, .{ .memory = got_addr });
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.jalr(.ra, 0, .ra).toU32());
@ -1383,8 +1422,14 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = @intCast(u16, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * 2);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u16, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * 2);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
@intCast(u16, coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * 2)
else
unreachable;
const return_type = func.owner_decl.typed_value.most_recent.typed_value.ty.fnReturnType();
// First, push the return address, then jump; if noreturn, don't bother with the first step
// TODO: implement packed struct -> u16 at comptime and move the bitcast here
@ -1420,10 +1465,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const got_addr = @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + func.owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
coff_file.offset_table_virtual_address + func.owner_decl.link.coff.offset_table_index * ptr_bytes
else
unreachable;
// TODO only works with leaf functions
// at the moment, which works fine for
@ -1443,7 +1493,57 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
}
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
switch (arch) {
.x86_64 => return self.fail(inst.base.src, "TODO implement codegen for call when linking with MachO for x86_64 arch", .{}),
.x86_64 => {
for (info.args) |mc_arg, arg_i| {
const arg = inst.args[arg_i];
const arg_mcv = try self.resolveInst(inst.args[arg_i]);
// Here we do not use setRegOrMem even though the logic is similar, because
// the function call will move the stack pointer, so the offsets are different.
switch (mc_arg) {
.none => continue,
.register => |reg| {
try self.genSetReg(arg.src, reg, arg_mcv);
// TODO interact with the register allocator to mark the instruction as moved.
},
.stack_offset => {
// Here we need to emit instructions like this:
// mov qword ptr [rsp + stack_offset], x
return self.fail(inst.base.src, "TODO implement calling with parameters in memory", .{});
},
.ptr_stack_offset => {
return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_stack_offset arg", .{});
},
.ptr_embedded_in_code => {
return self.fail(inst.base.src, "TODO implement calling with MCValue.ptr_embedded_in_code arg", .{});
},
.undef => unreachable,
.immediate => unreachable,
.unreach => unreachable,
.dead => unreachable,
.embedded_in_code => unreachable,
.memory => unreachable,
.compare_flags_signed => unreachable,
.compare_flags_unsigned => unreachable,
}
}
if (inst.func.cast(ir.Inst.Constant)) |func_inst| {
if (func_inst.val.cast(Value.Payload.Function)) |func_val| {
const func = func_val.func;
const got = &macho_file.sections.items[macho_file.got_section_index.?];
const ptr_bytes = 8;
const got_addr = @intCast(u32, got.addr + func.owner_decl.link.macho.offset_table_index.? * ptr_bytes);
// ff 14 25 xx xx xx xx call [addr]
try self.code.ensureCapacity(self.code.items.len + 7);
self.code.appendSliceAssumeCapacity(&[3]u8{ 0xff, 0x14, 0x25 });
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), got_addr);
} else {
return self.fail(inst.base.src, "TODO implement calling bitcasted functions", .{});
}
} else {
return self.fail(inst.base.src, "TODO implement calling runtime known function pointer", .{});
}
},
.aarch64 => return self.fail(inst.base.src, "TODO implement codegen for call when linking with MachO for aarch64 arch", .{}),
else => unreachable,
}
@ -1933,7 +2033,7 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
if (mem.eql(u8, inst.asm_source, "syscall")) {
try self.code.appendSlice(&[_]u8{ 0x0f, 0x05 });
} else {
} else if (inst.asm_source.len != 0) {
return self.fail(inst.base.src, "TODO implement support for more x86 assembly instructions", .{});
}
@ -2486,6 +2586,15 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
const decl = payload.decl;
const got = &macho_file.sections.items[macho_file.got_section_index.?];
const got_addr = got.addr + decl.link.macho.offset_table_index.? * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const decl = payload.decl;
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else {
return self.fail(src, "TODO codegen non-ELF const Decl pointer", .{});
}

View File

@ -85,7 +85,7 @@ fn genArray(file: *C, decl: *Decl) !void {
const name = try map(file.base.allocator, mem.span(decl.name));
defer file.base.allocator.free(name);
if (tv.val.cast(Value.Payload.Bytes)) |payload|
if (tv.ty.arraySentinel()) |sentinel|
if (tv.ty.sentinel()) |sentinel|
if (sentinel.toUnsignedInt() == 0)
try file.constants.writer().print("const char *const {} = \"{}\";\n", .{ name, payload.data })
else

View File

@ -34,6 +34,7 @@ pub const File = struct {
pub const LinkBlock = union {
elf: Elf.TextBlock,
coff: Coff.TextBlock,
macho: MachO.TextBlock,
c: void,
wasm: void,
@ -41,6 +42,7 @@ pub const File = struct {
pub const LinkFn = union {
elf: Elf.SrcFn,
coff: Coff.SrcFn,
macho: MachO.SrcFn,
c: void,
wasm: ?Wasm.FnData,
@ -66,7 +68,7 @@ pub const File = struct {
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: Options) !*File {
switch (options.object_format) {
.unknown => unreachable,
.coff => return error.TODOImplementCoff,
.coff, .pe => return Coff.openPath(allocator, dir, sub_path, options),
.elf => return Elf.openPath(allocator, dir, sub_path, options),
.macho => return MachO.openPath(allocator, dir, sub_path, options),
.wasm => return Wasm.openPath(allocator, dir, sub_path, options),
@ -85,7 +87,7 @@ pub const File = struct {
pub fn makeWritable(base: *File, dir: fs.Dir, sub_path: []const u8) !void {
switch (base.tag) {
.elf, .macho => {
.coff, .elf, .macho => {
if (base.file != null) return;
base.file = try dir.createFile(sub_path, .{
.truncate = false,
@ -112,6 +114,7 @@ pub const File = struct {
/// after allocateDeclIndexes for any given Decl.
pub fn updateDecl(base: *File, module: *Module, decl: *Module.Decl) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDecl(module, decl),
.elf => return @fieldParentPtr(Elf, "base", base).updateDecl(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDecl(module, decl),
.c => return @fieldParentPtr(C, "base", base).updateDecl(module, decl),
@ -121,6 +124,7 @@ pub const File = struct {
pub fn updateDeclLineNumber(base: *File, module: *Module, decl: *Module.Decl) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclLineNumber(module, decl),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclLineNumber(module, decl),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclLineNumber(module, decl),
.c, .wasm => {},
@ -131,6 +135,7 @@ pub const File = struct {
/// any given Decl.
pub fn allocateDeclIndexes(base: *File, decl: *Module.Decl) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).allocateDeclIndexes(decl),
.elf => return @fieldParentPtr(Elf, "base", base).allocateDeclIndexes(decl),
.macho => return @fieldParentPtr(MachO, "base", base).allocateDeclIndexes(decl),
.c, .wasm => {},
@ -140,6 +145,7 @@ pub const File = struct {
pub fn deinit(base: *File) void {
if (base.file) |f| f.close();
switch (base.tag) {
.coff => @fieldParentPtr(Coff, "base", base).deinit(),
.elf => @fieldParentPtr(Elf, "base", base).deinit(),
.macho => @fieldParentPtr(MachO, "base", base).deinit(),
.c => @fieldParentPtr(C, "base", base).deinit(),
@ -149,6 +155,11 @@ pub const File = struct {
pub fn destroy(base: *File) void {
switch (base.tag) {
.coff => {
const parent = @fieldParentPtr(Coff, "base", base);
parent.deinit();
base.allocator.destroy(parent);
},
.elf => {
const parent = @fieldParentPtr(Elf, "base", base);
parent.deinit();
@ -177,6 +188,7 @@ pub const File = struct {
defer tracy.end();
try switch (base.tag) {
.coff => @fieldParentPtr(Coff, "base", base).flush(module),
.elf => @fieldParentPtr(Elf, "base", base).flush(module),
.macho => @fieldParentPtr(MachO, "base", base).flush(module),
.c => @fieldParentPtr(C, "base", base).flush(module),
@ -186,6 +198,7 @@ pub const File = struct {
pub fn freeDecl(base: *File, decl: *Module.Decl) void {
switch (base.tag) {
.coff => @fieldParentPtr(Coff, "base", base).freeDecl(decl),
.elf => @fieldParentPtr(Elf, "base", base).freeDecl(decl),
.macho => @fieldParentPtr(MachO, "base", base).freeDecl(decl),
.c => unreachable,
@ -195,6 +208,7 @@ pub const File = struct {
pub fn errorFlags(base: *File) ErrorFlags {
return switch (base.tag) {
.coff => @fieldParentPtr(Coff, "base", base).error_flags,
.elf => @fieldParentPtr(Elf, "base", base).error_flags,
.macho => @fieldParentPtr(MachO, "base", base).error_flags,
.c => return .{ .no_entry_point_found = false },
@ -211,6 +225,7 @@ pub const File = struct {
exports: []const *Module.Export,
) !void {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).updateDeclExports(module, decl, exports),
.elf => return @fieldParentPtr(Elf, "base", base).updateDeclExports(module, decl, exports),
.macho => return @fieldParentPtr(MachO, "base", base).updateDeclExports(module, decl, exports),
.c => return {},
@ -220,6 +235,7 @@ pub const File = struct {
pub fn getDeclVAddr(base: *File, decl: *const Module.Decl) u64 {
switch (base.tag) {
.coff => return @fieldParentPtr(Coff, "base", base).getDeclVAddr(decl),
.elf => return @fieldParentPtr(Elf, "base", base).getDeclVAddr(decl),
.macho => return @fieldParentPtr(MachO, "base", base).getDeclVAddr(decl),
.c => unreachable,
@ -228,6 +244,7 @@ pub const File = struct {
}
pub const Tag = enum {
coff,
elf,
macho,
c,
@ -239,6 +256,7 @@ pub const File = struct {
};
pub const C = @import("link/C.zig");
pub const Coff = @import("link/Coff.zig");
pub const Elf = @import("link/Elf.zig");
pub const MachO = @import("link/MachO.zig");
pub const Wasm = @import("link/Wasm.zig");

View File

@ -0,0 +1,792 @@
const Coff = @This();
const std = @import("std");
const log = std.log.scoped(.link);
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const fs = std.fs;
const trace = @import("../tracy.zig").trace;
const Module = @import("../Module.zig");
const codegen = @import("../codegen.zig");
const link = @import("../link.zig");
const allocation_padding = 4 / 3;
const minimum_text_block_size = 64 * allocation_padding;
const section_alignment = 4096;
const file_alignment = 512;
const image_base = 0x400_000;
const section_table_size = 2 * 40;
comptime {
std.debug.assert(std.mem.isAligned(image_base, section_alignment));
}
pub const base_tag: link.File.Tag = .coff;
const msdos_stub = @embedFile("msdos-stub.bin");
base: link.File,
ptr_width: enum { p32, p64 },
error_flags: link.File.ErrorFlags = .{},
text_block_free_list: std.ArrayListUnmanaged(*TextBlock) = .{},
last_text_block: ?*TextBlock = null,
/// Section table file pointer.
section_table_offset: u32 = 0,
/// Section data file pointer.
section_data_offset: u32 = 0,
/// Optiona header file pointer.
optional_header_offset: u32 = 0,
/// Absolute virtual address of the offset table when the executable is loaded in memory.
offset_table_virtual_address: u32 = 0,
/// Current size of the offset table on disk, must be a multiple of `file_alignment`
offset_table_size: u32 = 0,
/// Contains absolute virtual addresses
offset_table: std.ArrayListUnmanaged(u64) = .{},
/// Free list of offset table indices
offset_table_free_list: std.ArrayListUnmanaged(u32) = .{},
/// Virtual address of the entry point procedure relative to `image_base`
entry_addr: ?u32 = null,
/// Absolute virtual address of the text section when the executable is loaded in memory.
text_section_virtual_address: u32 = 0,
/// Current size of the `.text` section on disk, must be a multiple of `file_alignment`
text_section_size: u32 = 0,
offset_table_size_dirty: bool = false,
text_section_size_dirty: bool = false,
/// This flag is set when the virtual size of the whole image file when loaded in memory has changed
/// and needs to be updated in the optional header.
size_of_image_dirty: bool = false,
pub const TextBlock = struct {
/// Offset of the code relative to the start of the text section
text_offset: u32,
/// Used size of the text block
size: u32,
/// This field is undefined for symbols with size = 0.
offset_table_index: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `TextBlock`.
prev: ?*TextBlock,
next: ?*TextBlock,
pub const empty = TextBlock{
.text_offset = 0,
.size = 0,
.offset_table_index = undefined,
.prev = null,
.next = null,
};
/// Returns how much room there is to grow in virtual address space.
fn capacity(self: TextBlock) u64 {
if (self.next) |next| {
return next.text_offset - self.text_offset;
}
// This is the last block, the capacity is only limited by the address space.
return std.math.maxInt(u32) - self.text_offset;
}
fn freeListEligible(self: TextBlock) bool {
// No need to keep a free list node for the last block.
const next = self.next orelse return false;
const cap = next.text_offset - self.text_offset;
const ideal_cap = self.size * allocation_padding;
if (cap <= ideal_cap) return false;
const surplus = cap - ideal_cap;
return surplus >= minimum_text_block_size;
}
/// Absolute virtual address of the text block when the file is loaded in memory.
fn getVAddr(self: TextBlock, coff: Coff) u32 {
return coff.text_section_virtual_address + self.text_offset;
}
};
pub const SrcFn = void;
pub fn openPath(allocator: *Allocator, dir: fs.Dir, sub_path: []const u8, options: link.Options) !*link.File {
assert(options.object_format == .coff);
const file = try dir.createFile(sub_path, .{ .truncate = false, .read = true, .mode = link.determineMode(options) });
errdefer file.close();
var coff_file = try allocator.create(Coff);
errdefer allocator.destroy(coff_file);
coff_file.* = openFile(allocator, file, options) catch |err| switch (err) {
error.IncrFailed => try createFile(allocator, file, options),
else => |e| return e,
};
return &coff_file.base;
}
/// Returns error.IncrFailed if incremental update could not be performed.
fn openFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff {
switch (options.output_mode) {
.Exe => {},
.Obj => return error.IncrFailed,
.Lib => return error.IncrFailed,
}
var self: Coff = .{
.base = .{
.file = file,
.tag = .coff,
.options = options,
.allocator = allocator,
},
.ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
32 => .p32,
64 => .p64,
else => return error.UnsupportedELFArchitecture,
},
};
errdefer self.deinit();
// TODO implement reading the PE/COFF file
return error.IncrFailed;
}
/// Truncates the existing file contents and overwrites the contents.
/// Returns an error if `file` is not already open with +read +write +seek abilities.
fn createFile(allocator: *Allocator, file: fs.File, options: link.Options) !Coff {
// TODO Write object specific relocations, COFF symbol table, then enable object file output.
switch (options.output_mode) {
.Exe => {},
.Obj => return error.TODOImplementWritingObjFiles,
.Lib => return error.TODOImplementWritingLibFiles,
}
var self: Coff = .{
.base = .{
.tag = .coff,
.options = options,
.allocator = allocator,
.file = file,
},
.ptr_width = switch (options.target.cpu.arch.ptrBitWidth()) {
32 => .p32,
64 => .p64,
else => return error.UnsupportedCOFFArchitecture,
},
};
errdefer self.deinit();
var coff_file_header_offset: u32 = 0;
if (options.output_mode == .Exe) {
// Write the MS-DOS stub and the PE signature
try self.base.file.?.pwriteAll(msdos_stub ++ "PE\x00\x00", 0);
coff_file_header_offset = msdos_stub.len + 4;
}
// COFF file header
const data_directory_count = 0;
var hdr_data: [112 + data_directory_count * 8 + section_table_size]u8 = undefined;
var index: usize = 0;
const machine = self.base.options.target.cpu.arch.toCoffMachine();
if (machine == .Unknown) {
return error.UnsupportedCOFFArchitecture;
}
std.mem.writeIntLittle(u16, hdr_data[0..2], @enumToInt(machine));
index += 2;
// Number of sections (we only use .got, .text)
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 2);
index += 2;
// TimeDateStamp (u32), PointerToSymbolTable (u32), NumberOfSymbols (u32)
std.mem.set(u8, hdr_data[index..][0..12], 0);
index += 12;
const optional_header_size = switch (options.output_mode) {
.Exe => data_directory_count * 8 + switch (self.ptr_width) {
.p32 => @as(u16, 96),
.p64 => 112,
},
else => 0,
};
const section_table_offset = coff_file_header_offset + 20 + optional_header_size;
const default_offset_table_size = file_alignment;
const default_size_of_code = 0;
self.section_data_offset = std.mem.alignForwardGeneric(u32, self.section_table_offset + section_table_size, file_alignment);
const section_data_relative_virtual_address = std.mem.alignForwardGeneric(u32, self.section_table_offset + section_table_size, section_alignment);
self.offset_table_virtual_address = image_base + section_data_relative_virtual_address;
self.offset_table_size = default_offset_table_size;
self.section_table_offset = section_table_offset;
self.text_section_virtual_address = image_base + section_data_relative_virtual_address + section_alignment;
self.text_section_size = default_size_of_code;
// Size of file when loaded in memory
const size_of_image = std.mem.alignForwardGeneric(u32, self.text_section_virtual_address - image_base + default_size_of_code, section_alignment);
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], optional_header_size);
index += 2;
// Characteristics
var characteristics: u16 = std.coff.IMAGE_FILE_DEBUG_STRIPPED | std.coff.IMAGE_FILE_RELOCS_STRIPPED; // TODO Remove debug info stripped flag when necessary
if (options.output_mode == .Exe) {
characteristics |= std.coff.IMAGE_FILE_EXECUTABLE_IMAGE;
}
switch (self.ptr_width) {
.p32 => characteristics |= std.coff.IMAGE_FILE_32BIT_MACHINE,
.p64 => characteristics |= std.coff.IMAGE_FILE_LARGE_ADDRESS_AWARE,
}
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], characteristics);
index += 2;
assert(index == 20);
try self.base.file.?.pwriteAll(hdr_data[0..index], coff_file_header_offset);
if (options.output_mode == .Exe) {
self.optional_header_offset = coff_file_header_offset + 20;
// Optional header
index = 0;
std.mem.writeIntLittle(u16, hdr_data[0..2], switch (self.ptr_width) {
.p32 => @as(u16, 0x10b),
.p64 => 0x20b,
});
index += 2;
// Linker version (u8 + u8)
std.mem.set(u8, hdr_data[index..][0..2], 0);
index += 2;
// SizeOfCode (UNUSED, u32), SizeOfInitializedData (u32), SizeOfUninitializedData (u32), AddressOfEntryPoint (u32), BaseOfCode (UNUSED, u32)
std.mem.set(u8, hdr_data[index..][0..20], 0);
index += 20;
if (self.ptr_width == .p32) {
// Base of data relative to the image base (UNUSED)
std.mem.set(u8, hdr_data[index..][0..4], 0);
index += 4;
// Image base address
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], image_base);
index += 4;
} else {
// Image base address
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], image_base);
index += 8;
}
// Section alignment
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], section_alignment);
index += 4;
// File alignment
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], file_alignment);
index += 4;
// Required OS version, 6.0 is vista
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 6);
index += 2;
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0);
index += 2;
// Image version
std.mem.set(u8, hdr_data[index..][0..4], 0);
index += 4;
// Required subsystem version, same as OS version
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 6);
index += 2;
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0);
index += 2;
// Reserved zeroes (u32)
std.mem.set(u8, hdr_data[index..][0..4], 0);
index += 4;
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], size_of_image);
index += 4;
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset);
index += 4;
// CheckSum (u32)
std.mem.set(u8, hdr_data[index..][0..4], 0);
index += 4;
// Subsystem, TODO: Let users specify the subsystem, always CUI for now
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 3);
index += 2;
// DLL characteristics
std.mem.writeIntLittle(u16, hdr_data[index..][0..2], 0x0);
index += 2;
switch (self.ptr_width) {
.p32 => {
// Size of stack reserve + commit
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000_000);
index += 4;
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000);
index += 4;
// Size of heap reserve + commit
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x100_000);
index += 4;
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], 0x1_000);
index += 4;
},
.p64 => {
// Size of stack reserve + commit
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000_000);
index += 8;
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000);
index += 8;
// Size of heap reserve + commit
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x100_000);
index += 8;
std.mem.writeIntLittle(u64, hdr_data[index..][0..8], 0x1_000);
index += 8;
},
}
// Reserved zeroes
std.mem.set(u8, hdr_data[index..][0..4], 0);
index += 4;
// Number of data directories
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], data_directory_count);
index += 4;
// Initialize data directories to zero
std.mem.set(u8, hdr_data[index..][0 .. data_directory_count * 8], 0);
index += data_directory_count * 8;
assert(index == optional_header_size);
}
// Write section table.
// First, the .got section
hdr_data[index..][0..8].* = ".got\x00\x00\x00\x00".*;
index += 8;
if (options.output_mode == .Exe) {
// Virtual size (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_offset_table_size);
index += 4;
// Virtual address (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.offset_table_virtual_address - image_base);
index += 4;
} else {
std.mem.set(u8, hdr_data[index..][0..8], 0);
index += 8;
}
// Size of raw data (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_offset_table_size);
index += 4;
// File pointer to the start of the section
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset);
index += 4;
// Pointer to relocations (u32), PointerToLinenumbers (u32), NumberOfRelocations (u16), NumberOfLinenumbers (u16)
std.mem.set(u8, hdr_data[index..][0..12], 0);
index += 12;
// Section flags
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], std.coff.IMAGE_SCN_CNT_INITIALIZED_DATA | std.coff.IMAGE_SCN_MEM_READ);
index += 4;
// Then, the .text section
hdr_data[index..][0..8].* = ".text\x00\x00\x00".*;
index += 8;
if (options.output_mode == .Exe) {
// Virtual size (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_size_of_code);
index += 4;
// Virtual address (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.text_section_virtual_address - image_base);
index += 4;
} else {
std.mem.set(u8, hdr_data[index..][0..8], 0);
index += 8;
}
// Size of raw data (u32)
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], default_size_of_code);
index += 4;
// File pointer to the start of the section
std.mem.writeIntLittle(u32, hdr_data[index..][0..4], self.section_data_offset + default_offset_table_size);
index += 4;
// Pointer to relocations (u32), PointerToLinenumbers (u32), NumberOfRelocations (u16), NumberOfLinenumbers (u16)
std.mem.set(u8, hdr_data[index..][0..12], 0);
index += 12;
// Section flags
std.mem.writeIntLittle(
u32,
hdr_data[index..][0..4],
std.coff.IMAGE_SCN_CNT_CODE | std.coff.IMAGE_SCN_MEM_EXECUTE | std.coff.IMAGE_SCN_MEM_READ | std.coff.IMAGE_SCN_MEM_WRITE,
);
index += 4;
assert(index == optional_header_size + section_table_size);
try self.base.file.?.pwriteAll(hdr_data[0..index], self.optional_header_offset);
try self.base.file.?.setEndPos(self.section_data_offset + default_offset_table_size + default_size_of_code);
return self;
}
pub fn allocateDeclIndexes(self: *Coff, decl: *Module.Decl) !void {
try self.offset_table.ensureCapacity(self.base.allocator, self.offset_table.items.len + 1);
if (self.offset_table_free_list.popOrNull()) |i| {
decl.link.coff.offset_table_index = i;
} else {
decl.link.coff.offset_table_index = @intCast(u32, self.offset_table.items.len);
_ = self.offset_table.addOneAssumeCapacity();
const entry_size = self.base.options.target.cpu.arch.ptrBitWidth() / 8;
if (self.offset_table.items.len > self.offset_table_size / entry_size) {
self.offset_table_size_dirty = true;
}
}
self.offset_table.items[decl.link.coff.offset_table_index] = 0;
}
fn allocateTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
const new_block_min_capacity = new_block_size * allocation_padding;
// We use these to indicate our intention to update metadata, placing the new block,
// and possibly removing a free list node.
// It would be simpler to do it inside the for loop below, but that would cause a
// problem if an error was returned later in the function. So this action
// is actually carried out at the end of the function, when errors are no longer possible.
var block_placement: ?*TextBlock = null;
var free_list_removal: ?usize = null;
const vaddr = blk: {
var i: usize = 0;
while (i < self.text_block_free_list.items.len) {
const free_block = self.text_block_free_list.items[i];
const next_block_text_offset = free_block.text_offset + free_block.capacity();
const new_block_text_offset = std.mem.alignForwardGeneric(u64, free_block.getVAddr(self.*) + free_block.size, alignment) - self.text_section_virtual_address;
if (new_block_text_offset < next_block_text_offset and next_block_text_offset - new_block_text_offset >= new_block_min_capacity) {
block_placement = free_block;
const remaining_capacity = next_block_text_offset - new_block_text_offset - new_block_min_capacity;
if (remaining_capacity < minimum_text_block_size) {
free_list_removal = i;
}
break :blk new_block_text_offset + self.text_section_virtual_address;
} else {
if (!free_block.freeListEligible()) {
_ = self.text_block_free_list.swapRemove(i);
} else {
i += 1;
}
continue;
}
} else if (self.last_text_block) |last| {
const new_block_vaddr = std.mem.alignForwardGeneric(u64, last.getVAddr(self.*) + last.size, alignment);
block_placement = last;
break :blk new_block_vaddr;
} else {
break :blk self.text_section_virtual_address;
}
};
const expand_text_section = block_placement == null or block_placement.?.next == null;
if (expand_text_section) {
const needed_size = @intCast(u32, std.mem.alignForwardGeneric(u64, vaddr + new_block_size - self.text_section_virtual_address, file_alignment));
if (needed_size > self.text_section_size) {
const current_text_section_virtual_size = std.mem.alignForwardGeneric(u32, self.text_section_size, section_alignment);
const new_text_section_virtual_size = std.mem.alignForwardGeneric(u32, needed_size, section_alignment);
if (current_text_section_virtual_size != new_text_section_virtual_size) {
self.size_of_image_dirty = true;
// Write new virtual size
var buf: [4]u8 = undefined;
std.mem.writeIntLittle(u32, &buf, new_text_section_virtual_size);
try self.base.file.?.pwriteAll(&buf, self.section_table_offset + 40 + 8);
}
self.text_section_size = needed_size;
self.text_section_size_dirty = true;
}
self.last_text_block = text_block;
}
text_block.text_offset = @intCast(u32, vaddr - self.text_section_virtual_address);
text_block.size = @intCast(u32, new_block_size);
// This function can also reallocate a text block.
// In this case we need to "unplug" it from its previous location before
// plugging it in to its new location.
if (text_block.prev) |prev| {
prev.next = text_block.next;
}
if (text_block.next) |next| {
next.prev = text_block.prev;
}
if (block_placement) |big_block| {
text_block.prev = big_block;
text_block.next = big_block.next;
big_block.next = text_block;
} else {
text_block.prev = null;
text_block.next = null;
}
if (free_list_removal) |i| {
_ = self.text_block_free_list.swapRemove(i);
}
return vaddr;
}
fn growTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64, alignment: u64) !u64 {
const block_vaddr = text_block.getVAddr(self.*);
const align_ok = std.mem.alignBackwardGeneric(u64, block_vaddr, alignment) == block_vaddr;
const need_realloc = !align_ok or new_block_size > text_block.capacity();
if (!need_realloc) return @as(u64, block_vaddr);
return self.allocateTextBlock(text_block, new_block_size, alignment);
}
fn shrinkTextBlock(self: *Coff, text_block: *TextBlock, new_block_size: u64) void {
text_block.size = @intCast(u32, new_block_size);
if (text_block.capacity() - text_block.size >= minimum_text_block_size) {
self.text_block_free_list.append(self.base.allocator, text_block) catch {};
}
}
fn freeTextBlock(self: *Coff, text_block: *TextBlock) void {
var already_have_free_list_node = false;
{
var i: usize = 0;
// TODO turn text_block_free_list into a hash map
while (i < self.text_block_free_list.items.len) {
if (self.text_block_free_list.items[i] == text_block) {
_ = self.text_block_free_list.swapRemove(i);
continue;
}
if (self.text_block_free_list.items[i] == text_block.prev) {
already_have_free_list_node = true;
}
i += 1;
}
}
if (self.last_text_block == text_block) {
self.last_text_block = text_block.prev;
}
if (text_block.prev) |prev| {
prev.next = text_block.next;
if (!already_have_free_list_node and prev.freeListEligible()) {
// The free list is heuristics, it doesn't have to be perfect, so we can
// ignore the OOM here.
self.text_block_free_list.append(self.base.allocator, prev) catch {};
}
}
if (text_block.next) |next| {
next.prev = text_block.prev;
}
}
fn writeOffsetTableEntry(self: *Coff, index: usize) !void {
const entry_size = self.base.options.target.cpu.arch.ptrBitWidth() / 8;
const endian = self.base.options.target.cpu.arch.endian();
const offset_table_start = self.section_data_offset;
if (self.offset_table_size_dirty) {
const current_raw_size = self.offset_table_size;
const new_raw_size = self.offset_table_size * 2;
log.debug("growing offset table from raw size {} to {}\n", .{ current_raw_size, new_raw_size });
// Move the text section to a new place in the executable
const current_text_section_start = self.section_data_offset + current_raw_size;
const new_text_section_start = self.section_data_offset + new_raw_size;
const amt = try self.base.file.?.copyRangeAll(current_text_section_start, self.base.file.?, new_text_section_start, self.text_section_size);
if (amt != self.text_section_size) return error.InputOutput;
// Write the new raw size in the .got header
var buf: [8]u8 = undefined;
std.mem.writeIntLittle(u32, buf[0..4], new_raw_size);
try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 16);
// Write the new .text section file offset in the .text section header
std.mem.writeIntLittle(u32, buf[0..4], new_text_section_start);
try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 40 + 20);
const current_virtual_size = std.mem.alignForwardGeneric(u32, self.offset_table_size, section_alignment);
const new_virtual_size = std.mem.alignForwardGeneric(u32, new_raw_size, section_alignment);
// If we had to move in the virtual address space, we need to fix the VAs in the offset table, as well as the virtual address of the `.text` section
// and the virutal size of the `.got` section
if (new_virtual_size != current_virtual_size) {
log.debug("growing offset table from virtual size {} to {}\n", .{ current_virtual_size, new_virtual_size });
self.size_of_image_dirty = true;
const va_offset = new_virtual_size - current_virtual_size;
// Write .got virtual size
std.mem.writeIntLittle(u32, buf[0..4], new_virtual_size);
try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 8);
// Write .text new virtual address
self.text_section_virtual_address = self.text_section_virtual_address + va_offset;
std.mem.writeIntLittle(u32, buf[0..4], self.text_section_virtual_address - image_base);
try self.base.file.?.pwriteAll(buf[0..4], self.section_table_offset + 40 + 12);
// Fix the VAs in the offset table
for (self.offset_table.items) |*va, idx| {
if (va.* != 0) {
va.* += va_offset;
switch (entry_size) {
4 => {
std.mem.writeInt(u32, buf[0..4], @intCast(u32, va.*), endian);
try self.base.file.?.pwriteAll(buf[0..4], offset_table_start + idx * entry_size);
},
8 => {
std.mem.writeInt(u64, &buf, va.*, endian);
try self.base.file.?.pwriteAll(&buf, offset_table_start + idx * entry_size);
},
else => unreachable,
}
}
}
}
self.offset_table_size = new_raw_size;
self.offset_table_size_dirty = false;
}
// Write the new entry
switch (entry_size) {
4 => {
var buf: [4]u8 = undefined;
std.mem.writeInt(u32, &buf, @intCast(u32, self.offset_table.items[index]), endian);
try self.base.file.?.pwriteAll(&buf, offset_table_start + index * entry_size);
},
8 => {
var buf: [8]u8 = undefined;
std.mem.writeInt(u64, &buf, self.offset_table.items[index], endian);
try self.base.file.?.pwriteAll(&buf, offset_table_start + index * entry_size);
},
else => unreachable,
}
}
pub fn updateDecl(self: *Coff, module: *Module, decl: *Module.Decl) !void {
// TODO COFF/PE debug information
// TODO Implement exports
const tracy = trace(@src());
defer tracy.end();
var code_buffer = std.ArrayList(u8).init(self.base.allocator);
defer code_buffer.deinit();
const typed_value = decl.typed_value.most_recent.typed_value;
const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .none);
const code = switch (res) {
.externally_managed => |x| x,
.appended => code_buffer.items,
.fail => |em| {
decl.analysis = .codegen_failure;
try module.failed_decls.put(module.gpa, decl, em);
return;
},
};
const required_alignment = typed_value.ty.abiAlignment(self.base.options.target);
const curr_size = decl.link.coff.size;
if (curr_size != 0) {
const capacity = decl.link.coff.capacity();
const need_realloc = code.len > capacity or
!std.mem.isAlignedGeneric(u32, decl.link.coff.text_offset, required_alignment);
if (need_realloc) {
const curr_vaddr = self.getDeclVAddr(decl);
const vaddr = try self.growTextBlock(&decl.link.coff, code.len, required_alignment);
log.debug("growing {} from 0x{x} to 0x{x}\n", .{ decl.name, curr_vaddr, vaddr });
if (vaddr != curr_vaddr) {
log.debug(" (writing new offset table entry)\n", .{});
self.offset_table.items[decl.link.coff.offset_table_index] = vaddr;
try self.writeOffsetTableEntry(decl.link.coff.offset_table_index);
}
} else if (code.len < curr_size) {
self.shrinkTextBlock(&decl.link.coff, code.len);
}
} else {
const vaddr = try self.allocateTextBlock(&decl.link.coff, code.len, required_alignment);
log.debug("allocated text block for {} at 0x{x} (size: {Bi})\n", .{ std.mem.spanZ(decl.name), vaddr, code.len });
errdefer self.freeTextBlock(&decl.link.coff);
self.offset_table.items[decl.link.coff.offset_table_index] = vaddr;
try self.writeOffsetTableEntry(decl.link.coff.offset_table_index);
}
// Write the code into the file
try self.base.file.?.pwriteAll(code, self.section_data_offset + self.offset_table_size + decl.link.coff.text_offset);
// Since we updated the vaddr and the size, each corresponding export symbol also needs to be updated.
const decl_exports = module.decl_exports.get(decl) orelse &[0]*Module.Export{};
return self.updateDeclExports(module, decl, decl_exports);
}
pub fn freeDecl(self: *Coff, decl: *Module.Decl) void {
// Appending to free lists is allowed to fail because the free lists are heuristics based anyway.
self.freeTextBlock(&decl.link.coff);
self.offset_table_free_list.append(self.base.allocator, decl.link.coff.offset_table_index) catch {};
}
pub fn updateDeclExports(self: *Coff, module: *Module, decl: *const Module.Decl, exports: []const *Module.Export) !void {
for (exports) |exp| {
if (exp.options.section) |section_name| {
if (!std.mem.eql(u8, section_name, ".text")) {
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: ExportOptions.section", .{}),
);
continue;
}
}
if (std.mem.eql(u8, exp.options.name, "_start")) {
self.entry_addr = decl.link.coff.getVAddr(self.*) - image_base;
} else {
try module.failed_exports.ensureCapacity(module.gpa, module.failed_exports.items().len + 1);
module.failed_exports.putAssumeCapacityNoClobber(
exp,
try Module.ErrorMsg.create(self.base.allocator, 0, "Unimplemented: Exports other than '_start'", .{}),
);
continue;
}
}
}
pub fn flush(self: *Coff, module: *Module) !void {
if (self.text_section_size_dirty) {
// Write the new raw size in the .text header
var buf: [4]u8 = undefined;
std.mem.writeIntLittle(u32, &buf, self.text_section_size);
try self.base.file.?.pwriteAll(&buf, self.section_table_offset + 40 + 16);
try self.base.file.?.setEndPos(self.section_data_offset + self.offset_table_size + self.text_section_size);
self.text_section_size_dirty = false;
}
if (self.base.options.output_mode == .Exe and self.size_of_image_dirty) {
const new_size_of_image = std.mem.alignForwardGeneric(u32, self.text_section_virtual_address - image_base + self.text_section_size, section_alignment);
var buf: [4]u8 = undefined;
std.mem.writeIntLittle(u32, &buf, new_size_of_image);
try self.base.file.?.pwriteAll(&buf, self.optional_header_offset + 56);
self.size_of_image_dirty = false;
}
if (self.entry_addr == null and self.base.options.output_mode == .Exe) {
log.debug("flushing. no_entry_point_found = true\n", .{});
self.error_flags.no_entry_point_found = true;
} else {
log.debug("flushing. no_entry_point_found = false\n", .{});
self.error_flags.no_entry_point_found = false;
if (self.base.options.output_mode == .Exe) {
// Write AddressOfEntryPoint
var buf: [4]u8 = undefined;
std.mem.writeIntLittle(u32, &buf, self.entry_addr.?);
try self.base.file.?.pwriteAll(&buf, self.optional_header_offset + 16);
}
}
}
pub fn getDeclVAddr(self: *Coff, decl: *const Module.Decl) u64 {
return self.text_section_virtual_address + decl.link.coff.text_offset;
}
pub fn updateDeclLineNumber(self: *Coff, module: *Module, decl: *Module.Decl) !void {
// TODO Implement this
}
pub fn deinit(self: *Coff) void {
self.text_block_free_list.deinit(self.base.allocator);
self.offset_table.deinit(self.base.allocator);
self.offset_table_free_list.deinit(self.base.allocator);
}

View File

@ -1656,8 +1656,8 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
try dbg_line_buffer.ensureCapacity(26);
const line_off: u28 = blk: {
if (decl.scope.cast(Module.Scope.File)) |scope_file| {
const tree = scope_file.contents.tree;
if (decl.scope.cast(Module.Scope.Container)) |container_scope| {
const tree = container_scope.file_scope.contents.tree;
const file_ast_decls = tree.root_node.decls();
// TODO Look into improving the performance here by adding a token-index-to-line
// lookup table. Currently this involves scanning over the source code for newlines.
@ -1735,7 +1735,13 @@ pub fn updateDecl(self: *Elf, module: *Module, decl: *Module.Decl) !void {
} else {
// TODO implement .debug_info for global variables
}
const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, &dbg_line_buffer, &dbg_info_buffer, &dbg_info_type_relocs);
const res = try codegen.generateSymbol(&self.base, decl.src(), typed_value, &code_buffer, .{
.dwarf = .{
.dbg_line = &dbg_line_buffer,
.dbg_info = &dbg_info_buffer,
.dbg_info_type_relocs = &dbg_info_type_relocs,
},
});
const code = switch (res) {
.externally_managed => |x| x,
.appended => code_buffer.items,
@ -2157,8 +2163,8 @@ pub fn updateDeclLineNumber(self: *Elf, module: *Module, decl: *const Module.Dec
const tracy = trace(@src());
defer tracy.end();
const scope_file = decl.scope.cast(Module.Scope.File).?;
const tree = scope_file.contents.tree;
const container_scope = decl.scope.cast(Module.Scope.Container).?;
const tree = container_scope.file_scope.contents.tree;
const file_ast_decls = tree.root_node.decls();
// TODO Look into improving the performance here by adding a token-index-to-line
// lookup table. Currently this involves scanning over the source code for newlines.

Some files were not shown because too many files have changed in this diff Show More