mirror of
https://github.com/ziglang/zig.git
synced 2026-02-13 12:59:04 +00:00
Merge remote-tracking branch 'origin/master' into zir-memory-layout
I need the enum arrays that were just merged into master.
This commit is contained in:
commit
f5aca4a6a1
@ -564,7 +564,14 @@ set(ZIG_STAGE2_SOURCES
|
||||
"${CMAKE_SOURCE_DIR}/src/link/Coff.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/Elf.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/Archive.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/CodeSignature.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/DebugSymbols.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/Object.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/Trie.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/Zld.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/bind.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/MachO/commands.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/Wasm.zig"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/C/zig.h"
|
||||
"${CMAKE_SOURCE_DIR}/src/link/msdos-stub.bin"
|
||||
|
||||
@ -176,7 +176,7 @@ pub fn IntegerBitSet(comptime size: u16) type {
|
||||
/// The default options (.{}) will iterate indices of set bits in
|
||||
/// ascending order. Modifications to the underlying bit set may
|
||||
/// or may not be observed by the iterator.
|
||||
pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options.direction) {
|
||||
pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options) {
|
||||
return .{
|
||||
.bits_remain = switch (options.kind) {
|
||||
.set => self.mask,
|
||||
@ -185,7 +185,11 @@ pub fn IntegerBitSet(comptime size: u16) type {
|
||||
};
|
||||
}
|
||||
|
||||
fn Iterator(comptime direction: IteratorOptions.Direction) type {
|
||||
pub fn Iterator(comptime options: IteratorOptions) type {
|
||||
return SingleWordIterator(options.direction);
|
||||
}
|
||||
|
||||
fn SingleWordIterator(comptime direction: IteratorOptions.Direction) type {
|
||||
return struct {
|
||||
const IterSelf = @This();
|
||||
// all bits which have not yet been iterated over
|
||||
@ -425,8 +429,12 @@ pub fn ArrayBitSet(comptime MaskIntType: type, comptime size: usize) type {
|
||||
/// The default options (.{}) will iterate indices of set bits in
|
||||
/// ascending order. Modifications to the underlying bit set may
|
||||
/// or may not be observed by the iterator.
|
||||
pub fn iterator(self: *const Self, comptime options: IteratorOptions) BitSetIterator(MaskInt, options) {
|
||||
return BitSetIterator(MaskInt, options).init(&self.masks, last_item_mask);
|
||||
pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options) {
|
||||
return Iterator(options).init(&self.masks, last_item_mask);
|
||||
}
|
||||
|
||||
pub fn Iterator(comptime options: IteratorOptions) type {
|
||||
return BitSetIterator(MaskInt, options);
|
||||
}
|
||||
|
||||
fn maskBit(index: usize) MaskInt {
|
||||
@ -700,11 +708,15 @@ pub const DynamicBitSetUnmanaged = struct {
|
||||
/// ascending order. Modifications to the underlying bit set may
|
||||
/// or may not be observed by the iterator. Resizing the underlying
|
||||
/// bit set invalidates the iterator.
|
||||
pub fn iterator(self: *const Self, comptime options: IteratorOptions) BitSetIterator(MaskInt, options) {
|
||||
pub fn iterator(self: *const Self, comptime options: IteratorOptions) Iterator(options) {
|
||||
const num_masks = numMasks(self.bit_length);
|
||||
const padding_bits = num_masks * @bitSizeOf(MaskInt) - self.bit_length;
|
||||
const last_item_mask = (~@as(MaskInt, 0)) >> @intCast(ShiftInt, padding_bits);
|
||||
return BitSetIterator(MaskInt, options).init(self.masks[0..num_masks], last_item_mask);
|
||||
return Iterator(options).init(self.masks[0..num_masks], last_item_mask);
|
||||
}
|
||||
|
||||
pub fn Iterator(comptime options: IteratorOptions) type {
|
||||
return BitSetIterator(MaskInt, options);
|
||||
}
|
||||
|
||||
fn maskBit(index: usize) MaskInt {
|
||||
@ -858,9 +870,11 @@ pub const DynamicBitSet = struct {
|
||||
/// ascending order. Modifications to the underlying bit set may
|
||||
/// or may not be observed by the iterator. Resizing the underlying
|
||||
/// bit set invalidates the iterator.
|
||||
pub fn iterator(self: *Self, comptime options: IteratorOptions) BitSetIterator(MaskInt, options) {
|
||||
pub fn iterator(self: *Self, comptime options: IteratorOptions) Iterator(options) {
|
||||
return self.unmanaged.iterator(options);
|
||||
}
|
||||
|
||||
pub const Iterator = DynamicBitSetUnmanaged.Iterator;
|
||||
};
|
||||
|
||||
/// Options for configuring an iterator over a bit set
|
||||
|
||||
@ -140,7 +140,7 @@ pub fn __builtin_object_size(ptr: ?*const c_void, ty: c_int) callconv(.Inline) u
|
||||
// If it is not possible to determine which objects ptr points to at compile time,
|
||||
// __builtin_object_size should return (size_t) -1 for type 0 or 1 and (size_t) 0
|
||||
// for type 2 or 3.
|
||||
if (ty == 0 or ty == 1) return @bitCast(usize, -@as(c_long, 1));
|
||||
if (ty == 0 or ty == 1) return @bitCast(usize, -@as(isize, 1));
|
||||
if (ty == 2 or ty == 3) return 0;
|
||||
unreachable;
|
||||
}
|
||||
@ -188,3 +188,9 @@ pub fn __builtin_memcpy(
|
||||
pub fn __builtin_expect(expr: c_long, c: c_long) callconv(.Inline) c_long {
|
||||
return expr;
|
||||
}
|
||||
|
||||
// __builtin_alloca_with_align is not currently implemented.
|
||||
// It is used in a run-translated-c test and a test-translate-c test to ensure that non-implemented
|
||||
// builtins are correctly demoted. If you implement __builtin_alloca_with_align, please update the
|
||||
// run-translated-c test and the test-translate-c test to use a different non-implemented builtin.
|
||||
// pub fn __builtin_alloca_with_align(size: usize, alignment: usize) callconv(.Inline) *c_void {}
|
||||
|
||||
@ -24,8 +24,12 @@ pub const aead = struct {
|
||||
pub const Gimli = @import("crypto/gimli.zig").Aead;
|
||||
|
||||
pub const chacha_poly = struct {
|
||||
pub const ChaCha20Poly1305 = @import("crypto/chacha20.zig").Chacha20Poly1305;
|
||||
pub const XChaCha20Poly1305 = @import("crypto/chacha20.zig").XChacha20Poly1305;
|
||||
pub const ChaCha20Poly1305 = @import("crypto/chacha20.zig").ChaCha20Poly1305;
|
||||
pub const ChaCha12Poly1305 = @import("crypto/chacha20.zig").ChaCha12Poly1305;
|
||||
pub const ChaCha8Poly1305 = @import("crypto/chacha20.zig").ChaCha8Poly1305;
|
||||
pub const XChaCha20Poly1305 = @import("crypto/chacha20.zig").XChaCha20Poly1305;
|
||||
pub const XChaCha12Poly1305 = @import("crypto/chacha20.zig").XChaCha12Poly1305;
|
||||
pub const XChaCha8Poly1305 = @import("crypto/chacha20.zig").XChaCha8Poly1305;
|
||||
};
|
||||
|
||||
pub const isap = @import("crypto/isap.zig");
|
||||
@ -119,8 +123,14 @@ pub const sign = struct {
|
||||
pub const stream = struct {
|
||||
pub const chacha = struct {
|
||||
pub const ChaCha20IETF = @import("crypto/chacha20.zig").ChaCha20IETF;
|
||||
pub const ChaCha12IETF = @import("crypto/chacha20.zig").ChaCha12IETF;
|
||||
pub const ChaCha8IETF = @import("crypto/chacha20.zig").ChaCha8IETF;
|
||||
pub const ChaCha20With64BitNonce = @import("crypto/chacha20.zig").ChaCha20With64BitNonce;
|
||||
pub const ChaCha12With64BitNonce = @import("crypto/chacha20.zig").ChaCha12With64BitNonce;
|
||||
pub const ChaCha8With64BitNonce = @import("crypto/chacha20.zig").ChaCha8With64BitNonce;
|
||||
pub const XChaCha20IETF = @import("crypto/chacha20.zig").XChaCha20IETF;
|
||||
pub const XChaCha12IETF = @import("crypto/chacha20.zig").XChaCha12IETF;
|
||||
pub const XChaCha8IETF = @import("crypto/chacha20.zig").XChaCha8IETF;
|
||||
};
|
||||
|
||||
pub const salsa = struct {
|
||||
|
||||
@ -202,6 +202,7 @@ pub fn benchmarkBatchSignatureVerification(comptime Signature: anytype, comptime
|
||||
const aeads = [_]Crypto{
|
||||
Crypto{ .ty = crypto.aead.chacha_poly.ChaCha20Poly1305, .name = "chacha20Poly1305" },
|
||||
Crypto{ .ty = crypto.aead.chacha_poly.XChaCha20Poly1305, .name = "xchacha20Poly1305" },
|
||||
Crypto{ .ty = crypto.aead.chacha_poly.XChaCha8Poly1305, .name = "xchacha8Poly1305" },
|
||||
Crypto{ .ty = crypto.aead.salsa_poly.XSalsa20Poly1305, .name = "xsalsa20Poly1305" },
|
||||
Crypto{ .ty = crypto.aead.Gimli, .name = "gimli-aead" },
|
||||
Crypto{ .ty = crypto.aead.aegis.Aegis128L, .name = "aegis-128l" },
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@ -20,20 +20,20 @@ const Error = std.crypto.Error;
|
||||
// pseudorandom function. See Appendix B.1 for further discussion.)
|
||||
// PBKDF2 is recommended for new applications.
|
||||
//
|
||||
// PBKDF2 (P, S, c, dkLen)
|
||||
// PBKDF2 (P, S, c, dk_len)
|
||||
//
|
||||
// Options: PRF underlying pseudorandom function (hLen
|
||||
// Options: PRF underlying pseudorandom function (h_len
|
||||
// denotes the length in octets of the
|
||||
// pseudorandom function output)
|
||||
//
|
||||
// Input: P password, an octet string
|
||||
// S salt, an octet string
|
||||
// c iteration count, a positive integer
|
||||
// dkLen intended length in octets of the derived
|
||||
// dk_len intended length in octets of the derived
|
||||
// key, a positive integer, at most
|
||||
// (2^32 - 1) * hLen
|
||||
// (2^32 - 1) * h_len
|
||||
//
|
||||
// Output: DK derived key, a dkLen-octet string
|
||||
// Output: DK derived key, a dk_len-octet string
|
||||
|
||||
// Based on Apple's CommonKeyDerivation, based originally on code by Damien Bergamini.
|
||||
|
||||
@ -41,7 +41,7 @@ const Error = std.crypto.Error;
|
||||
///
|
||||
/// PBKDF2 is defined in RFC 2898, and is a recommendation of NIST SP 800-132.
|
||||
///
|
||||
/// derivedKey: Slice of appropriate size for generated key. Generally 16 or 32 bytes in length.
|
||||
/// dk: Slice of appropriate size for generated key. Generally 16 or 32 bytes in length.
|
||||
/// May be uninitialized. All bytes will be overwritten.
|
||||
/// Maximum size is `maxInt(u32) * Hash.digest_length`
|
||||
/// It is a programming error to pass buffer longer than the maximum size.
|
||||
@ -52,43 +52,38 @@ const Error = std.crypto.Error;
|
||||
///
|
||||
/// rounds: Iteration count. Must be greater than 0. Common values range from 1,000 to 100,000.
|
||||
/// Larger iteration counts improve security by increasing the time required to compute
|
||||
/// the derivedKey. It is common to tune this parameter to achieve approximately 100ms.
|
||||
/// the dk. It is common to tune this parameter to achieve approximately 100ms.
|
||||
///
|
||||
/// Prf: Pseudo-random function to use. A common choice is `std.crypto.auth.hmac.HmacSha256`.
|
||||
pub fn pbkdf2(derivedKey: []u8, password: []const u8, salt: []const u8, rounds: u32, comptime Prf: type) Error!void {
|
||||
pub fn pbkdf2(dk: []u8, password: []const u8, salt: []const u8, rounds: u32, comptime Prf: type) Error!void {
|
||||
if (rounds < 1) return error.WeakParameters;
|
||||
|
||||
const dkLen = derivedKey.len;
|
||||
const hLen = Prf.mac_length;
|
||||
comptime std.debug.assert(hLen >= 1);
|
||||
const dk_len = dk.len;
|
||||
const h_len = Prf.mac_length;
|
||||
comptime std.debug.assert(h_len >= 1);
|
||||
|
||||
// FromSpec:
|
||||
//
|
||||
// 1. If dkLen > maxInt(u32) * hLen, output "derived key too long" and
|
||||
// 1. If dk_len > maxInt(u32) * h_len, output "derived key too long" and
|
||||
// stop.
|
||||
//
|
||||
if (comptime (maxInt(usize) > maxInt(u32) * hLen) and (dkLen > @as(usize, maxInt(u32) * hLen))) {
|
||||
// If maxInt(usize) is less than `maxInt(u32) * hLen` then dkLen is always inbounds
|
||||
if (dk_len / h_len >= maxInt(u32)) {
|
||||
// Counter starts at 1 and is 32 bit, so if we have to return more blocks, we would overflow
|
||||
return error.OutputTooLong;
|
||||
}
|
||||
|
||||
// FromSpec:
|
||||
//
|
||||
// 2. Let l be the number of hLen-long blocks of bytes in the derived key,
|
||||
// 2. Let l be the number of h_len-long blocks of bytes in the derived key,
|
||||
// rounding up, and let r be the number of bytes in the last
|
||||
// block
|
||||
//
|
||||
|
||||
// l will not overflow, proof:
|
||||
// let `L(dkLen, hLen) = (dkLen + hLen - 1) / hLen`
|
||||
// then `L^-1(l, hLen) = l*hLen - hLen + 1`
|
||||
// 1) L^-1(maxInt(u32), hLen) <= maxInt(u32)*hLen
|
||||
// 2) maxInt(u32)*hLen - hLen + 1 <= maxInt(u32)*hLen // subtract maxInt(u32)*hLen + 1
|
||||
// 3) -hLen <= -1 // multiply by -1
|
||||
// 4) hLen >= 1
|
||||
const r_ = dkLen % hLen;
|
||||
const l = @intCast(u32, (dkLen / hLen) + @as(u1, if (r_ == 0) 0 else 1)); // original: (dkLen + hLen - 1) / hLen
|
||||
const r = if (r_ == 0) hLen else r_;
|
||||
const blocks_count = @intCast(u32, std.math.divCeil(usize, dk_len, h_len) catch unreachable);
|
||||
var r = dk_len % h_len;
|
||||
if (r == 0) {
|
||||
r = h_len;
|
||||
}
|
||||
|
||||
// FromSpec:
|
||||
//
|
||||
@ -118,37 +113,38 @@ pub fn pbkdf2(derivedKey: []u8, password: []const u8, salt: []const u8, rounds:
|
||||
// Here, INT (i) is a four-octet encoding of the integer i, most
|
||||
// significant octet first.
|
||||
//
|
||||
// 4. Concatenate the blocks and extract the first dkLen octets to
|
||||
// 4. Concatenate the blocks and extract the first dk_len octets to
|
||||
// produce a derived key DK:
|
||||
//
|
||||
// DK = T_1 || T_2 || ... || T_l<0..r-1>
|
||||
var block: u32 = 0; // Spec limits to u32
|
||||
while (block < l) : (block += 1) {
|
||||
var prevBlock: [hLen]u8 = undefined;
|
||||
var newBlock: [hLen]u8 = undefined;
|
||||
|
||||
var block: u32 = 0;
|
||||
while (block < blocks_count) : (block += 1) {
|
||||
var prev_block: [h_len]u8 = undefined;
|
||||
var new_block: [h_len]u8 = undefined;
|
||||
|
||||
// U_1 = PRF (P, S || INT (i))
|
||||
const blockIndex = mem.toBytes(mem.nativeToBig(u32, block + 1)); // Block index starts at 0001
|
||||
const block_index = mem.toBytes(mem.nativeToBig(u32, block + 1)); // Block index starts at 0001
|
||||
var ctx = Prf.init(password);
|
||||
ctx.update(salt);
|
||||
ctx.update(blockIndex[0..]);
|
||||
ctx.final(prevBlock[0..]);
|
||||
ctx.update(block_index[0..]);
|
||||
ctx.final(prev_block[0..]);
|
||||
|
||||
// Choose portion of DK to write into (T_n) and initialize
|
||||
const offset = block * hLen;
|
||||
const blockLen = if (block != l - 1) hLen else r;
|
||||
const dkBlock: []u8 = derivedKey[offset..][0..blockLen];
|
||||
mem.copy(u8, dkBlock, prevBlock[0..dkBlock.len]);
|
||||
const offset = block * h_len;
|
||||
const block_len = if (block != blocks_count - 1) h_len else r;
|
||||
const dk_block: []u8 = dk[offset..][0..block_len];
|
||||
mem.copy(u8, dk_block, prev_block[0..dk_block.len]);
|
||||
|
||||
var i: u32 = 1;
|
||||
while (i < rounds) : (i += 1) {
|
||||
// U_c = PRF (P, U_{c-1})
|
||||
Prf.create(&newBlock, prevBlock[0..], password);
|
||||
mem.copy(u8, prevBlock[0..], newBlock[0..]);
|
||||
Prf.create(&new_block, prev_block[0..], password);
|
||||
mem.copy(u8, prev_block[0..], new_block[0..]);
|
||||
|
||||
// F (P, S, c, i) = U_1 \xor U_2 \xor ... \xor U_c
|
||||
for (dkBlock) |_, j| {
|
||||
dkBlock[j] ^= newBlock[j];
|
||||
for (dk_block) |_, j| {
|
||||
dk_block[j] ^= new_block[j];
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -158,49 +154,50 @@ const htest = @import("test.zig");
|
||||
const HmacSha1 = std.crypto.auth.hmac.HmacSha1;
|
||||
|
||||
// RFC 6070 PBKDF2 HMAC-SHA1 Test Vectors
|
||||
|
||||
test "RFC 6070 one iteration" {
|
||||
const p = "password";
|
||||
const s = "salt";
|
||||
const c = 1;
|
||||
const dkLen = 20;
|
||||
const dk_len = 20;
|
||||
|
||||
var derivedKey: [dkLen]u8 = undefined;
|
||||
var dk: [dk_len]u8 = undefined;
|
||||
|
||||
try pbkdf2(&derivedKey, p, s, c, HmacSha1);
|
||||
try pbkdf2(&dk, p, s, c, HmacSha1);
|
||||
|
||||
const expected = "0c60c80f961f0e71f3a9b524af6012062fe037a6";
|
||||
|
||||
htest.assertEqual(expected, derivedKey[0..]);
|
||||
htest.assertEqual(expected, dk[0..]);
|
||||
}
|
||||
|
||||
test "RFC 6070 two iterations" {
|
||||
const p = "password";
|
||||
const s = "salt";
|
||||
const c = 2;
|
||||
const dkLen = 20;
|
||||
const dk_len = 20;
|
||||
|
||||
var derivedKey: [dkLen]u8 = undefined;
|
||||
var dk: [dk_len]u8 = undefined;
|
||||
|
||||
try pbkdf2(&derivedKey, p, s, c, HmacSha1);
|
||||
try pbkdf2(&dk, p, s, c, HmacSha1);
|
||||
|
||||
const expected = "ea6c014dc72d6f8ccd1ed92ace1d41f0d8de8957";
|
||||
|
||||
htest.assertEqual(expected, derivedKey[0..]);
|
||||
htest.assertEqual(expected, dk[0..]);
|
||||
}
|
||||
|
||||
test "RFC 6070 4096 iterations" {
|
||||
const p = "password";
|
||||
const s = "salt";
|
||||
const c = 4096;
|
||||
const dkLen = 20;
|
||||
const dk_len = 20;
|
||||
|
||||
var derivedKey: [dkLen]u8 = undefined;
|
||||
var dk: [dk_len]u8 = undefined;
|
||||
|
||||
try pbkdf2(&derivedKey, p, s, c, HmacSha1);
|
||||
try pbkdf2(&dk, p, s, c, HmacSha1);
|
||||
|
||||
const expected = "4b007901b765489abead49d926f721d065a429c1";
|
||||
|
||||
htest.assertEqual(expected, derivedKey[0..]);
|
||||
htest.assertEqual(expected, dk[0..]);
|
||||
}
|
||||
|
||||
test "RFC 6070 16,777,216 iterations" {
|
||||
@ -212,48 +209,48 @@ test "RFC 6070 16,777,216 iterations" {
|
||||
const p = "password";
|
||||
const s = "salt";
|
||||
const c = 16777216;
|
||||
const dkLen = 20;
|
||||
const dk_len = 20;
|
||||
|
||||
var derivedKey = [_]u8{0} ** dkLen;
|
||||
var dk = [_]u8{0} ** dk_len;
|
||||
|
||||
try pbkdf2(&derivedKey, p, s, c, HmacSha1);
|
||||
try pbkdf2(&dk, p, s, c, HmacSha1);
|
||||
|
||||
const expected = "eefe3d61cd4da4e4e9945b3d6ba2158c2634e984";
|
||||
|
||||
htest.assertEqual(expected, derivedKey[0..]);
|
||||
htest.assertEqual(expected, dk[0..]);
|
||||
}
|
||||
|
||||
test "RFC 6070 multi-block salt and password" {
|
||||
const p = "passwordPASSWORDpassword";
|
||||
const s = "saltSALTsaltSALTsaltSALTsaltSALTsalt";
|
||||
const c = 4096;
|
||||
const dkLen = 25;
|
||||
const dk_len = 25;
|
||||
|
||||
var derivedKey: [dkLen]u8 = undefined;
|
||||
var dk: [dk_len]u8 = undefined;
|
||||
|
||||
try pbkdf2(&derivedKey, p, s, c, HmacSha1);
|
||||
try pbkdf2(&dk, p, s, c, HmacSha1);
|
||||
|
||||
const expected = "3d2eec4fe41c849b80c8d83662c0e44a8b291a964cf2f07038";
|
||||
|
||||
htest.assertEqual(expected, derivedKey[0..]);
|
||||
htest.assertEqual(expected, dk[0..]);
|
||||
}
|
||||
|
||||
test "RFC 6070 embedded NUL" {
|
||||
const p = "pass\x00word";
|
||||
const s = "sa\x00lt";
|
||||
const c = 4096;
|
||||
const dkLen = 16;
|
||||
const dk_len = 16;
|
||||
|
||||
var derivedKey: [dkLen]u8 = undefined;
|
||||
var dk: [dk_len]u8 = undefined;
|
||||
|
||||
try pbkdf2(&derivedKey, p, s, c, HmacSha1);
|
||||
try pbkdf2(&dk, p, s, c, HmacSha1);
|
||||
|
||||
const expected = "56fa6aa75548099dcc37d7f03425e0c3";
|
||||
|
||||
htest.assertEqual(expected, derivedKey[0..]);
|
||||
htest.assertEqual(expected, dk[0..]);
|
||||
}
|
||||
|
||||
test "Very large dkLen" {
|
||||
test "Very large dk_len" {
|
||||
// This test allocates 8GB of memory and is expected to take several hours to run.
|
||||
if (true) {
|
||||
return error.SkipZigTest;
|
||||
@ -261,13 +258,13 @@ test "Very large dkLen" {
|
||||
const p = "password";
|
||||
const s = "salt";
|
||||
const c = 1;
|
||||
const dkLen = 1 << 33;
|
||||
const dk_len = 1 << 33;
|
||||
|
||||
var derivedKey = try std.testing.allocator.alloc(u8, dkLen);
|
||||
var dk = try std.testing.allocator.alloc(u8, dk_len);
|
||||
defer {
|
||||
std.testing.allocator.free(derivedKey);
|
||||
std.testing.allocator.free(dk);
|
||||
}
|
||||
|
||||
try pbkdf2(derivedKey, p, s, c, HmacSha1);
|
||||
// Just verify this doesn't crash with an overflow
|
||||
try pbkdf2(dk, p, s, c, HmacSha1);
|
||||
}
|
||||
|
||||
@ -250,24 +250,6 @@ pub fn panicExtra(trace: ?*const builtin.StackTrace, first_trace_addr: ?usize, c
|
||||
resetSegfaultHandler();
|
||||
}
|
||||
|
||||
if (comptime std.Target.current.isDarwin() and std.Target.current.cpu.arch == .aarch64)
|
||||
nosuspend {
|
||||
// As a workaround for not having threadlocal variable support in LLD for this target,
|
||||
// we have a simpler panic implementation that does not use threadlocal variables.
|
||||
// TODO https://github.com/ziglang/zig/issues/7527
|
||||
const stderr = io.getStdErr().writer();
|
||||
if (@atomicRmw(u8, &panicking, .Add, 1, .SeqCst) == 0) {
|
||||
stderr.print("panic: " ++ format ++ "\n", args) catch os.abort();
|
||||
if (trace) |t| {
|
||||
dumpStackTrace(t.*);
|
||||
}
|
||||
dumpCurrentStackTrace(first_trace_addr);
|
||||
} else {
|
||||
stderr.print("Panicked during a panic. Aborting.\n", .{}) catch os.abort();
|
||||
}
|
||||
os.abort();
|
||||
};
|
||||
|
||||
nosuspend switch (panic_stage) {
|
||||
0 => {
|
||||
panic_stage = 1;
|
||||
|
||||
1281
lib/std/enums.zig
Normal file
1281
lib/std/enums.zig
Normal file
File diff suppressed because it is too large
Load Diff
@ -92,7 +92,7 @@ pub fn join(allocator: *Allocator, paths: []const []const u8) ![]u8 {
|
||||
/// Naively combines a series of paths with the native path seperator and null terminator.
|
||||
/// Allocates memory for the result, which must be freed by the caller.
|
||||
pub fn joinZ(allocator: *Allocator, paths: []const []const u8) ![:0]u8 {
|
||||
const out = joinSepMaybeZ(allocator, sep, isSep, paths, true);
|
||||
const out = try joinSepMaybeZ(allocator, sep, isSep, paths, true);
|
||||
return out[0 .. out.len - 1 :0];
|
||||
}
|
||||
|
||||
@ -119,6 +119,16 @@ fn testJoinMaybeZPosix(paths: []const []const u8, expected: []const u8, zero: bo
|
||||
}
|
||||
|
||||
test "join" {
|
||||
{
|
||||
const actual: []u8 = try join(testing.allocator, &[_][]const u8{});
|
||||
defer testing.allocator.free(actual);
|
||||
testing.expectEqualSlices(u8, "", actual);
|
||||
}
|
||||
{
|
||||
const actual: [:0]u8 = try joinZ(testing.allocator, &[_][]const u8{});
|
||||
defer testing.allocator.free(actual);
|
||||
testing.expectEqualSlices(u8, "", actual);
|
||||
}
|
||||
for (&[_]bool{ false, true }) |zero| {
|
||||
testJoinMaybeZWindows(&[_][]const u8{}, "", zero);
|
||||
testJoinMaybeZWindows(&[_][]const u8{ "c:\\a\\b", "c" }, "c:\\a\\b\\c", zero);
|
||||
|
||||
@ -1227,6 +1227,24 @@ pub const S_ATTR_EXT_RELOC = 0x200;
|
||||
/// section has local relocation entries
|
||||
pub const S_ATTR_LOC_RELOC = 0x100;
|
||||
|
||||
/// template of initial values for TLVs
|
||||
pub const S_THREAD_LOCAL_REGULAR = 0x11;
|
||||
|
||||
/// template of initial values for TLVs
|
||||
pub const S_THREAD_LOCAL_ZEROFILL = 0x12;
|
||||
|
||||
/// TLV descriptors
|
||||
pub const S_THREAD_LOCAL_VARIABLES = 0x13;
|
||||
|
||||
/// pointers to TLV descriptors
|
||||
pub const S_THREAD_LOCAL_VARIABLE_POINTERS = 0x14;
|
||||
|
||||
/// functions to call to initialize TLV values
|
||||
pub const S_THREAD_LOCAL_INIT_FUNCTION_POINTERS = 0x15;
|
||||
|
||||
/// 32-bit offsets to initializers
|
||||
pub const S_INIT_FUNC_OFFSETS = 0x16;
|
||||
|
||||
pub const cpu_type_t = integer_t;
|
||||
pub const cpu_subtype_t = integer_t;
|
||||
pub const integer_t = c_int;
|
||||
@ -1422,6 +1440,14 @@ pub const EXPORT_SYMBOL_FLAGS_KIND_WEAK_DEFINITION: u8 = 0x04;
|
||||
pub const EXPORT_SYMBOL_FLAGS_REEXPORT: u8 = 0x08;
|
||||
pub const EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER: u8 = 0x10;
|
||||
|
||||
// An indirect symbol table entry is simply a 32bit index into the symbol table
|
||||
// to the symbol that the pointer or stub is refering to. Unless it is for a
|
||||
// non-lazy symbol pointer section for a defined symbol which strip(1) as
|
||||
// removed. In which case it has the value INDIRECT_SYMBOL_LOCAL. If the
|
||||
// symbol was also absolute INDIRECT_SYMBOL_ABS is or'ed with that.
|
||||
pub const INDIRECT_SYMBOL_LOCAL: u32 = 0x80000000;
|
||||
pub const INDIRECT_SYMBOL_ABS: u32 = 0x40000000;
|
||||
|
||||
// Codesign consts and structs taken from:
|
||||
// https://opensource.apple.com/source/xnu/xnu-6153.81.5/osfmk/kern/cs_blobs.h.auto.html
|
||||
|
||||
@ -1589,3 +1615,17 @@ pub const GenericBlob = extern struct {
|
||||
/// Total length of blob
|
||||
length: u32,
|
||||
};
|
||||
|
||||
/// The LC_DATA_IN_CODE load commands uses a linkedit_data_command
|
||||
/// to point to an array of data_in_code_entry entries. Each entry
|
||||
/// describes a range of data in a code section.
|
||||
pub const data_in_code_entry = extern struct {
|
||||
/// From mach_header to start of data range.
|
||||
offset: u32,
|
||||
|
||||
/// Number of bytes in data range.
|
||||
length: u16,
|
||||
|
||||
/// A DICE_KIND value.
|
||||
kind: u16,
|
||||
};
|
||||
|
||||
@ -888,19 +888,20 @@ pub fn Vector(comptime len: u32, comptime child: type) type {
|
||||
/// Given a type and value, cast the value to the type as c would.
|
||||
/// This is for translate-c and is not intended for general use.
|
||||
pub fn cast(comptime DestType: type, target: anytype) DestType {
|
||||
const TargetType = @TypeOf(target);
|
||||
// this function should behave like transCCast in translate-c, except it's for macros
|
||||
const SourceType = @TypeOf(target);
|
||||
switch (@typeInfo(DestType)) {
|
||||
.Pointer => |dest_ptr| {
|
||||
switch (@typeInfo(TargetType)) {
|
||||
.Pointer => {
|
||||
switch (@typeInfo(SourceType)) {
|
||||
.Int, .ComptimeInt => {
|
||||
return @intToPtr(DestType, target);
|
||||
},
|
||||
.Pointer => |ptr| {
|
||||
return @ptrCast(DestType, @alignCast(dest_ptr.alignment, target));
|
||||
.Pointer => {
|
||||
return castPtr(DestType, target);
|
||||
},
|
||||
.Optional => |opt| {
|
||||
if (@typeInfo(opt.child) == .Pointer) {
|
||||
return @ptrCast(DestType, @alignCast(dest_ptr.alignment, target));
|
||||
return castPtr(DestType, target);
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
@ -908,17 +909,16 @@ pub fn cast(comptime DestType: type, target: anytype) DestType {
|
||||
},
|
||||
.Optional => |dest_opt| {
|
||||
if (@typeInfo(dest_opt.child) == .Pointer) {
|
||||
const dest_ptr = @typeInfo(dest_opt.child).Pointer;
|
||||
switch (@typeInfo(TargetType)) {
|
||||
switch (@typeInfo(SourceType)) {
|
||||
.Int, .ComptimeInt => {
|
||||
return @intToPtr(DestType, target);
|
||||
},
|
||||
.Pointer => {
|
||||
return @ptrCast(DestType, @alignCast(dest_ptr.alignment, target));
|
||||
return castPtr(DestType, target);
|
||||
},
|
||||
.Optional => |target_opt| {
|
||||
if (@typeInfo(target_opt.child) == .Pointer) {
|
||||
return @ptrCast(DestType, @alignCast(dest_ptr.alignment, target));
|
||||
return castPtr(DestType, target);
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
@ -926,25 +926,25 @@ pub fn cast(comptime DestType: type, target: anytype) DestType {
|
||||
}
|
||||
},
|
||||
.Enum => {
|
||||
if (@typeInfo(TargetType) == .Int or @typeInfo(TargetType) == .ComptimeInt) {
|
||||
if (@typeInfo(SourceType) == .Int or @typeInfo(SourceType) == .ComptimeInt) {
|
||||
return @intToEnum(DestType, target);
|
||||
}
|
||||
},
|
||||
.Int, .ComptimeInt => {
|
||||
switch (@typeInfo(TargetType)) {
|
||||
.Int => {
|
||||
switch (@typeInfo(SourceType)) {
|
||||
.Pointer => {
|
||||
return @intCast(DestType, @ptrToInt(target));
|
||||
return castInt(DestType, @ptrToInt(target));
|
||||
},
|
||||
.Optional => |opt| {
|
||||
if (@typeInfo(opt.child) == .Pointer) {
|
||||
return @intCast(DestType, @ptrToInt(target));
|
||||
return castInt(DestType, @ptrToInt(target));
|
||||
}
|
||||
},
|
||||
.Enum => {
|
||||
return @intCast(DestType, @enumToInt(target));
|
||||
return castInt(DestType, @enumToInt(target));
|
||||
},
|
||||
.Int, .ComptimeInt => {
|
||||
return @intCast(DestType, target);
|
||||
.Int => {
|
||||
return castInt(DestType, target);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
@ -954,6 +954,34 @@ pub fn cast(comptime DestType: type, target: anytype) DestType {
|
||||
return @as(DestType, target);
|
||||
}
|
||||
|
||||
fn castInt(comptime DestType: type, target: anytype) DestType {
|
||||
const dest = @typeInfo(DestType).Int;
|
||||
const source = @typeInfo(@TypeOf(target)).Int;
|
||||
|
||||
if (dest.bits < source.bits)
|
||||
return @bitCast(DestType, @truncate(Int(source.signedness, dest.bits), target))
|
||||
else
|
||||
return @bitCast(DestType, @as(Int(source.signedness, dest.bits), target));
|
||||
}
|
||||
|
||||
fn castPtr(comptime DestType: type, target: anytype) DestType {
|
||||
const dest = ptrInfo(DestType);
|
||||
const source = ptrInfo(@TypeOf(target));
|
||||
|
||||
if (source.is_const and !dest.is_const or source.is_volatile and !dest.is_volatile)
|
||||
return @intToPtr(DestType, @ptrToInt(target))
|
||||
else
|
||||
return @ptrCast(DestType, @alignCast(dest.alignment, target));
|
||||
}
|
||||
|
||||
fn ptrInfo(comptime PtrType: type) TypeInfo.Pointer {
|
||||
return switch(@typeInfo(PtrType)){
|
||||
.Optional => |opt_info| @typeInfo(opt_info.child).Pointer,
|
||||
.Pointer => |ptr_info| ptr_info,
|
||||
else => unreachable,
|
||||
};
|
||||
}
|
||||
|
||||
test "std.meta.cast" {
|
||||
const E = enum(u2) {
|
||||
Zero,
|
||||
@ -977,6 +1005,11 @@ test "std.meta.cast" {
|
||||
testing.expectEqual(@as(u32, 4), cast(u32, @intToPtr(?*u32, 4)));
|
||||
testing.expectEqual(@as(u32, 10), cast(u32, @as(u64, 10)));
|
||||
testing.expectEqual(@as(u8, 2), cast(u8, E.Two));
|
||||
|
||||
testing.expectEqual(@bitCast(i32, @as(u32, 0x8000_0000)), cast(i32, @as(u32, 0x8000_0000)));
|
||||
|
||||
testing.expectEqual(@intToPtr(*u8, 2), cast(*u8, @intToPtr(*const u8, 2)));
|
||||
testing.expectEqual(@intToPtr(*u8, 2), cast(*u8, @intToPtr(*volatile u8, 2)));
|
||||
}
|
||||
|
||||
/// Given a value returns its size as C's sizeof operator would.
|
||||
|
||||
@ -408,6 +408,84 @@ test "std.meta.trait.isTuple" {
|
||||
testing.expect(isTuple(@TypeOf(t3)));
|
||||
}
|
||||
|
||||
/// Returns true if the passed type will coerce to []const u8.
|
||||
/// Any of the following are considered strings:
|
||||
/// ```
|
||||
/// []const u8, [:S]const u8, *const [N]u8, *const [N:S]u8,
|
||||
/// []u8, [:S]u8, *[:S]u8, *[N:S]u8.
|
||||
/// ```
|
||||
/// These types are not considered strings:
|
||||
/// ```
|
||||
/// u8, [N]u8, [*]const u8, [*:0]const u8,
|
||||
/// [*]const [N]u8, []const u16, []const i8,
|
||||
/// *const u8, ?[]const u8, ?*const [N]u8.
|
||||
/// ```
|
||||
pub fn isZigString(comptime T: type) bool {
|
||||
comptime {
|
||||
// Only pointer types can be strings, no optionals
|
||||
const info = @typeInfo(T);
|
||||
if (info != .Pointer) return false;
|
||||
|
||||
const ptr = &info.Pointer;
|
||||
// Check for CV qualifiers that would prevent coerction to []const u8
|
||||
if (ptr.is_volatile or ptr.is_allowzero) return false;
|
||||
|
||||
// If it's already a slice, simple check.
|
||||
if (ptr.size == .Slice) {
|
||||
return ptr.child == u8;
|
||||
}
|
||||
|
||||
// Otherwise check if it's an array type that coerces to slice.
|
||||
if (ptr.size == .One) {
|
||||
const child = @typeInfo(ptr.child);
|
||||
if (child == .Array) {
|
||||
const arr = &child.Array;
|
||||
return arr.child == u8;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
test "std.meta.trait.isZigString" {
|
||||
testing.expect(isZigString([]const u8));
|
||||
testing.expect(isZigString([]u8));
|
||||
testing.expect(isZigString([:0]const u8));
|
||||
testing.expect(isZigString([:0]u8));
|
||||
testing.expect(isZigString([:5]const u8));
|
||||
testing.expect(isZigString([:5]u8));
|
||||
testing.expect(isZigString(*const [0]u8));
|
||||
testing.expect(isZigString(*[0]u8));
|
||||
testing.expect(isZigString(*const [0:0]u8));
|
||||
testing.expect(isZigString(*[0:0]u8));
|
||||
testing.expect(isZigString(*const [0:5]u8));
|
||||
testing.expect(isZigString(*[0:5]u8));
|
||||
testing.expect(isZigString(*const [10]u8));
|
||||
testing.expect(isZigString(*[10]u8));
|
||||
testing.expect(isZigString(*const [10:0]u8));
|
||||
testing.expect(isZigString(*[10:0]u8));
|
||||
testing.expect(isZigString(*const [10:5]u8));
|
||||
testing.expect(isZigString(*[10:5]u8));
|
||||
|
||||
testing.expect(!isZigString(u8));
|
||||
testing.expect(!isZigString([4]u8));
|
||||
testing.expect(!isZigString([4:0]u8));
|
||||
testing.expect(!isZigString([*]const u8));
|
||||
testing.expect(!isZigString([*]const [4]u8));
|
||||
testing.expect(!isZigString([*c]const u8));
|
||||
testing.expect(!isZigString([*c]const [4]u8));
|
||||
testing.expect(!isZigString([*:0]const u8));
|
||||
testing.expect(!isZigString([*:0]const u8));
|
||||
testing.expect(!isZigString(*[]const u8));
|
||||
testing.expect(!isZigString(?[]const u8));
|
||||
testing.expect(!isZigString(?*const [4]u8));
|
||||
testing.expect(!isZigString([]allowzero u8));
|
||||
testing.expect(!isZigString([]volatile u8));
|
||||
testing.expect(!isZigString(*allowzero [4]u8));
|
||||
testing.expect(!isZigString(*volatile [4]u8));
|
||||
}
|
||||
|
||||
pub fn hasDecls(comptime T: type, comptime names: anytype) bool {
|
||||
inline for (names) |name| {
|
||||
if (!@hasDecl(T, name))
|
||||
|
||||
@ -2879,7 +2879,7 @@ pub fn bind(sock: socket_t, addr: *const sockaddr, len: socklen_t) BindError!voi
|
||||
unreachable;
|
||||
}
|
||||
|
||||
const ListenError = error{
|
||||
pub const ListenError = error{
|
||||
/// Another socket is already listening on the same port.
|
||||
/// For Internet domain sockets, the socket referred to by sockfd had not previously
|
||||
/// been bound to an address and, upon attempting to bind it to an ephemeral port, it
|
||||
@ -5827,7 +5827,7 @@ pub fn tcsetattr(handle: fd_t, optional_action: TCSA, termios_p: termios) Termio
|
||||
}
|
||||
}
|
||||
|
||||
const IoCtl_SIOCGIFINDEX_Error = error{
|
||||
pub const IoCtl_SIOCGIFINDEX_Error = error{
|
||||
FileSystem,
|
||||
InterfaceNotFound,
|
||||
} || UnexpectedError;
|
||||
|
||||
@ -20,6 +20,9 @@ pub const ComptimeStringMap = @import("comptime_string_map.zig").ComptimeStringM
|
||||
pub const DynLib = @import("dynamic_library.zig").DynLib;
|
||||
pub const DynamicBitSet = bit_set.DynamicBitSet;
|
||||
pub const DynamicBitSetUnmanaged = bit_set.DynamicBitSetUnmanaged;
|
||||
pub const EnumArray = enums.EnumArray;
|
||||
pub const EnumMap = enums.EnumMap;
|
||||
pub const EnumSet = enums.EnumSet;
|
||||
pub const HashMap = hash_map.HashMap;
|
||||
pub const HashMapUnmanaged = hash_map.HashMapUnmanaged;
|
||||
pub const MultiArrayList = @import("multi_array_list.zig").MultiArrayList;
|
||||
@ -54,6 +57,7 @@ pub const cstr = @import("cstr.zig");
|
||||
pub const debug = @import("debug.zig");
|
||||
pub const dwarf = @import("dwarf.zig");
|
||||
pub const elf = @import("elf.zig");
|
||||
pub const enums = @import("enums.zig");
|
||||
pub const event = @import("event.zig");
|
||||
pub const fifo = @import("fifo.zig");
|
||||
pub const fmt = @import("fmt.zig");
|
||||
|
||||
@ -4,6 +4,31 @@
|
||||
// The MIT license requires this copyright notice to be included in all copies
|
||||
// and substantial portions of the software.
|
||||
|
||||
test "zig fmt: respect line breaks in struct field value declaration" {
|
||||
try testCanonical(
|
||||
\\const Foo = struct {
|
||||
\\ bar: u32 =
|
||||
\\ 42,
|
||||
\\ bar: u32 =
|
||||
\\ // a comment
|
||||
\\ 42,
|
||||
\\ bar: u32 =
|
||||
\\ 42,
|
||||
\\ // a comment
|
||||
\\ bar: []const u8 =
|
||||
\\ \\ foo
|
||||
\\ \\ bar
|
||||
\\ \\ baz
|
||||
\\ ,
|
||||
\\ bar: u32 =
|
||||
\\ blk: {
|
||||
\\ break :blk 42;
|
||||
\\ },
|
||||
\\};
|
||||
\\
|
||||
);
|
||||
}
|
||||
|
||||
// TODO Remove this after zig 0.9.0 is released.
|
||||
test "zig fmt: rewrite inline functions as callconv(.Inline)" {
|
||||
try testTransform(
|
||||
@ -3038,6 +3063,54 @@ test "zig fmt: switch" {
|
||||
\\}
|
||||
\\
|
||||
);
|
||||
|
||||
try testTransform(
|
||||
\\test {
|
||||
\\ switch (x) {
|
||||
\\ foo =>
|
||||
\\ "bar",
|
||||
\\ }
|
||||
\\}
|
||||
\\
|
||||
,
|
||||
\\test {
|
||||
\\ switch (x) {
|
||||
\\ foo => "bar",
|
||||
\\ }
|
||||
\\}
|
||||
\\
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: switch multiline string" {
|
||||
try testCanonical(
|
||||
\\test "switch multiline string" {
|
||||
\\ const x: u32 = 0;
|
||||
\\ const str = switch (x) {
|
||||
\\ 1 => "one",
|
||||
\\ 2 =>
|
||||
\\ \\ Comma after the multiline string
|
||||
\\ \\ is needed
|
||||
\\ ,
|
||||
\\ 3 => "three",
|
||||
\\ else => "else",
|
||||
\\ };
|
||||
\\
|
||||
\\ const Union = union(enum) {
|
||||
\\ Int: i64,
|
||||
\\ Float: f64,
|
||||
\\ };
|
||||
\\
|
||||
\\ const str = switch (u) {
|
||||
\\ Union.Int => |int|
|
||||
\\ \\ Comma after the multiline string
|
||||
\\ \\ is needed
|
||||
\\ ,
|
||||
\\ Union.Float => |*float| unreachable,
|
||||
\\ };
|
||||
\\}
|
||||
\\
|
||||
);
|
||||
}
|
||||
|
||||
test "zig fmt: while" {
|
||||
|
||||
@ -1159,8 +1159,29 @@ fn renderContainerField(
|
||||
try renderToken(ais, tree, rparen_token, .space); // )
|
||||
}
|
||||
const eq_token = tree.firstToken(field.ast.value_expr) - 1;
|
||||
try renderToken(ais, tree, eq_token, .space); // =
|
||||
return renderExpressionComma(gpa, ais, tree, field.ast.value_expr, space); // value
|
||||
const eq_space: Space = if (tree.tokensOnSameLine(eq_token, eq_token + 1)) .space else .newline;
|
||||
{
|
||||
ais.pushIndent();
|
||||
try renderToken(ais, tree, eq_token, eq_space); // =
|
||||
ais.popIndent();
|
||||
}
|
||||
|
||||
if (eq_space == .space)
|
||||
return renderExpressionComma(gpa, ais, tree, field.ast.value_expr, space); // value
|
||||
|
||||
const token_tags = tree.tokens.items(.tag);
|
||||
const maybe_comma = tree.lastToken(field.ast.value_expr) + 1;
|
||||
|
||||
if (token_tags[maybe_comma] == .comma) {
|
||||
ais.pushIndent();
|
||||
try renderExpression(gpa, ais, tree, field.ast.value_expr, .none); // value
|
||||
ais.popIndent();
|
||||
try renderToken(ais, tree, maybe_comma, space);
|
||||
} else {
|
||||
ais.pushIndent();
|
||||
try renderExpression(gpa, ais, tree, field.ast.value_expr, space); // value
|
||||
ais.popIndent();
|
||||
}
|
||||
}
|
||||
|
||||
fn renderBuiltinCall(
|
||||
@ -1423,6 +1444,7 @@ fn renderSwitchCase(
|
||||
switch_case: ast.full.SwitchCase,
|
||||
space: Space,
|
||||
) Error!void {
|
||||
const node_tags = tree.nodes.items(.tag);
|
||||
const token_tags = tree.tokens.items(.tag);
|
||||
const trailing_comma = token_tags[switch_case.ast.arrow_token - 1] == .comma;
|
||||
|
||||
@ -1445,17 +1467,23 @@ fn renderSwitchCase(
|
||||
}
|
||||
|
||||
// Render the arrow and everything after it
|
||||
try renderToken(ais, tree, switch_case.ast.arrow_token, .space);
|
||||
const pre_target_space = if (node_tags[switch_case.ast.target_expr] == .multiline_string_literal)
|
||||
// Newline gets inserted when rendering the target expr.
|
||||
Space.none
|
||||
else
|
||||
Space.space;
|
||||
const after_arrow_space: Space = if (switch_case.payload_token == null) pre_target_space else .space;
|
||||
try renderToken(ais, tree, switch_case.ast.arrow_token, after_arrow_space);
|
||||
|
||||
if (switch_case.payload_token) |payload_token| {
|
||||
try renderToken(ais, tree, payload_token - 1, .none); // pipe
|
||||
if (token_tags[payload_token] == .asterisk) {
|
||||
try renderToken(ais, tree, payload_token, .none); // asterisk
|
||||
try renderToken(ais, tree, payload_token + 1, .none); // identifier
|
||||
try renderToken(ais, tree, payload_token + 2, .space); // pipe
|
||||
try renderToken(ais, tree, payload_token + 2, pre_target_space); // pipe
|
||||
} else {
|
||||
try renderToken(ais, tree, payload_token, .none); // identifier
|
||||
try renderToken(ais, tree, payload_token + 1, .space); // pipe
|
||||
try renderToken(ais, tree, payload_token + 1, pre_target_space); // pipe
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -477,7 +477,7 @@ pub const list = list: {
|
||||
"@intCast",
|
||||
.{
|
||||
.tag = .int_cast,
|
||||
.param_count = 1,
|
||||
.param_count = 2,
|
||||
},
|
||||
},
|
||||
.{
|
||||
|
||||
@ -537,6 +537,11 @@ pub const FunctionType = opaque {
|
||||
extern fn ZigClangFunctionType_getReturnType(*const FunctionType) QualType;
|
||||
};
|
||||
|
||||
pub const GenericSelectionExpr = opaque {
|
||||
pub const getResultExpr = ZigClangGenericSelectionExpr_getResultExpr;
|
||||
extern fn ZigClangGenericSelectionExpr_getResultExpr(*const GenericSelectionExpr) *const Expr;
|
||||
};
|
||||
|
||||
pub const IfStmt = opaque {
|
||||
pub const getThen = ZigClangIfStmt_getThen;
|
||||
extern fn ZigClangIfStmt_getThen(*const IfStmt) *const Stmt;
|
||||
|
||||
192
src/codegen.zig
192
src/codegen.zig
@ -2133,9 +2133,12 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
if (inst.func.value()) |func_value| {
|
||||
if (func_value.castTag(.function)) |func_payload| {
|
||||
const func = func_payload.data;
|
||||
const text_segment = &macho_file.load_commands.items[macho_file.text_segment_cmd_index.?].Segment;
|
||||
const got = &text_segment.sections.items[macho_file.got_section_index.?];
|
||||
const got_addr = got.addr + func.owner_decl.link.macho.offset_table_index * @sizeOf(u64);
|
||||
const got_addr = blk: {
|
||||
const seg = macho_file.load_commands.items[macho_file.data_const_segment_cmd_index.?].Segment;
|
||||
const got = seg.sections.items[macho_file.got_section_index.?];
|
||||
break :blk got.addr + func.owner_decl.link.macho.offset_table_index * @sizeOf(u64);
|
||||
};
|
||||
log.debug("got_addr = 0x{x}", .{got_addr});
|
||||
switch (arch) {
|
||||
.x86_64 => {
|
||||
try self.genSetReg(inst.base.src, Type.initTag(.u32), .rax, .{ .memory = got_addr });
|
||||
@ -2153,8 +2156,8 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
const decl = func_payload.data;
|
||||
const decl_name = try std.fmt.allocPrint(self.bin_file.allocator, "_{s}", .{decl.name});
|
||||
defer self.bin_file.allocator.free(decl_name);
|
||||
const already_defined = macho_file.extern_lazy_symbols.contains(decl_name);
|
||||
const symbol: u32 = if (macho_file.extern_lazy_symbols.getIndex(decl_name)) |index|
|
||||
const already_defined = macho_file.lazy_imports.contains(decl_name);
|
||||
const symbol: u32 = if (macho_file.lazy_imports.getIndex(decl_name)) |index|
|
||||
@intCast(u32, index)
|
||||
else
|
||||
try macho_file.addExternSymbol(decl_name);
|
||||
@ -3304,80 +3307,32 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
},
|
||||
.memory => |addr| {
|
||||
if (self.bin_file.options.pie) {
|
||||
// For MachO, the binary, with the exception of object files, has to be a PIE.
|
||||
// Therefore we cannot load an absolute address.
|
||||
// Instead, we need to make use of PC-relative addressing.
|
||||
if (reg.id() == 0) { // x0 is special-cased
|
||||
// TODO This needs to be optimised in the stack usage (perhaps use a shadow stack
|
||||
// like described here:
|
||||
// https://community.arm.com/developer/ip-products/processors/b/processors-ip-blog/posts/using-the-stack-in-aarch64-implementing-push-and-pop)
|
||||
// str x28, [sp, #-16]
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.str(.x28, Register.sp, .{
|
||||
.offset = Instruction.LoadStoreOffset.imm_pre_index(-16),
|
||||
}).toU32());
|
||||
// adr x28, #8
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.adr(.x28, 8).toU32());
|
||||
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
try macho_file.pie_fixups.append(self.bin_file.allocator, .{
|
||||
.address = addr,
|
||||
.start = self.code.items.len,
|
||||
.len = 4,
|
||||
});
|
||||
} else {
|
||||
return self.fail(src, "TODO implement genSetReg for PIE on this platform", .{});
|
||||
}
|
||||
// b [label]
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.b(0).toU32());
|
||||
// mov r, x0
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.orr(
|
||||
reg,
|
||||
.xzr,
|
||||
.x0,
|
||||
Instruction.Shift.none,
|
||||
).toU32());
|
||||
// ldr x28, [sp], #16
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(.x28, .{
|
||||
.register = .{
|
||||
.rn = Register.sp,
|
||||
.offset = Instruction.LoadStoreOffset.imm_post_index(16),
|
||||
},
|
||||
}).toU32());
|
||||
// PC-relative displacement to the entry in the GOT table.
|
||||
// TODO we should come up with our own, backend independent relocation types
|
||||
// which each backend (Elf, MachO, etc.) would then translate into an actual
|
||||
// fixup when linking.
|
||||
// adrp reg, pages
|
||||
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
try macho_file.pie_fixups.append(self.bin_file.allocator, .{
|
||||
.target_addr = addr,
|
||||
.offset = self.code.items.len,
|
||||
.size = 4,
|
||||
});
|
||||
} else {
|
||||
// stp x0, x28, [sp, #-16]
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.stp(
|
||||
.x0,
|
||||
.x28,
|
||||
Register.sp,
|
||||
Instruction.LoadStorePairOffset.pre_index(-16),
|
||||
).toU32());
|
||||
// adr x28, #8
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.adr(.x28, 8).toU32());
|
||||
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
try macho_file.pie_fixups.append(self.bin_file.allocator, .{
|
||||
.address = addr,
|
||||
.start = self.code.items.len,
|
||||
.len = 4,
|
||||
});
|
||||
} else {
|
||||
return self.fail(src, "TODO implement genSetReg for PIE on this platform", .{});
|
||||
}
|
||||
// b [label]
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.b(0).toU32());
|
||||
// mov r, x0
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.orr(
|
||||
reg,
|
||||
.xzr,
|
||||
.x0,
|
||||
Instruction.Shift.none,
|
||||
).toU32());
|
||||
// ldp x0, x28, [sp, #16]
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldp(
|
||||
.x0,
|
||||
.x28,
|
||||
Register.sp,
|
||||
Instruction.LoadStorePairOffset.post_index(16),
|
||||
).toU32());
|
||||
return self.fail(src, "TODO implement genSetReg for PIE GOT indirection on this platform", .{});
|
||||
}
|
||||
mem.writeIntLittle(
|
||||
u32,
|
||||
try self.code.addManyAsArray(4),
|
||||
Instruction.adrp(reg, 0).toU32(),
|
||||
);
|
||||
// ldr reg, reg, offset
|
||||
mem.writeIntLittle(u32, try self.code.addManyAsArray(4), Instruction.ldr(reg, .{
|
||||
.register = .{
|
||||
.rn = reg,
|
||||
.offset = Instruction.LoadStoreOffset.imm(0),
|
||||
},
|
||||
}).toU32());
|
||||
} else {
|
||||
// The value is in memory at a hard-coded address.
|
||||
// If the type is a pointer, it means the pointer address is at this memory location.
|
||||
@ -3561,62 +3516,31 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
},
|
||||
.memory => |x| {
|
||||
if (self.bin_file.options.pie) {
|
||||
// For MachO, the binary, with the exception of object files, has to be a PIE.
|
||||
// Therefore, we cannot load an absolute address.
|
||||
assert(x > math.maxInt(u32)); // 32bit direct addressing is not supported by MachO.
|
||||
// The plan here is to use unconditional relative jump to GOT entry, where we store
|
||||
// pre-calculated and stored effective address to load into the target register.
|
||||
// We leave the actual displacement information empty (0-padded) and fixing it up
|
||||
// later in the linker.
|
||||
if (reg.id() == 0) { // %rax is special-cased
|
||||
try self.code.ensureCapacity(self.code.items.len + 5);
|
||||
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
try macho_file.pie_fixups.append(self.bin_file.allocator, .{
|
||||
.address = x,
|
||||
.start = self.code.items.len,
|
||||
.len = 5,
|
||||
});
|
||||
} else {
|
||||
return self.fail(src, "TODO implement genSetReg for PIE on this platform", .{});
|
||||
}
|
||||
// call [label]
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{
|
||||
0xE8,
|
||||
0x0,
|
||||
0x0,
|
||||
0x0,
|
||||
0x0,
|
||||
// RIP-relative displacement to the entry in the GOT table.
|
||||
// TODO we should come up with our own, backend independent relocation types
|
||||
// which each backend (Elf, MachO, etc.) would then translate into an actual
|
||||
// fixup when linking.
|
||||
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
try macho_file.pie_fixups.append(self.bin_file.allocator, .{
|
||||
.target_addr = x,
|
||||
.offset = self.code.items.len + 3,
|
||||
.size = 4,
|
||||
});
|
||||
} else {
|
||||
try self.code.ensureCapacity(self.code.items.len + 10);
|
||||
// push %rax
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{0x50});
|
||||
if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
try macho_file.pie_fixups.append(self.bin_file.allocator, .{
|
||||
.address = x,
|
||||
.start = self.code.items.len,
|
||||
.len = 5,
|
||||
});
|
||||
} else {
|
||||
return self.fail(src, "TODO implement genSetReg for PIE on this platform", .{});
|
||||
}
|
||||
// call [label]
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{
|
||||
0xE8,
|
||||
0x0,
|
||||
0x0,
|
||||
0x0,
|
||||
0x0,
|
||||
});
|
||||
// mov %r, %rax
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{
|
||||
0x48,
|
||||
0x89,
|
||||
0xC0 | @as(u8, reg.id()),
|
||||
});
|
||||
// pop %rax
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{0x58});
|
||||
return self.fail(src, "TODO implement genSetReg for PIE GOT indirection on this platform", .{});
|
||||
}
|
||||
try self.code.ensureCapacity(self.code.items.len + 7);
|
||||
self.rex(.{ .w = reg.size() == 64, .r = reg.isExtended() });
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{
|
||||
0x8D,
|
||||
0x05 | (@as(u8, reg.id() & 0b111) << 3),
|
||||
});
|
||||
mem.writeIntLittle(u32, self.code.addManyAsArrayAssumeCapacity(4), 0);
|
||||
|
||||
try self.code.ensureCapacity(self.code.items.len + 3);
|
||||
self.rex(.{ .w = reg.size() == 64, .b = reg.isExtended(), .r = reg.isExtended() });
|
||||
const RM = (@as(u8, reg.id() & 0b111) << 3) | @truncate(u3, reg.id());
|
||||
self.code.appendSliceAssumeCapacity(&[_]u8{ 0x8B, RM });
|
||||
} else if (x <= math.maxInt(u32)) {
|
||||
// Moving from memory to a register is a variant of `8B /r`.
|
||||
// Since we're using 64-bit moves, we require a REX.
|
||||
@ -3779,9 +3703,11 @@ fn Function(comptime arch: std.Target.Cpu.Arch) type {
|
||||
return MCValue{ .memory = got_addr };
|
||||
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
|
||||
const decl = payload.data;
|
||||
const text_segment = &macho_file.load_commands.items[macho_file.text_segment_cmd_index.?].Segment;
|
||||
const got = &text_segment.sections.items[macho_file.got_section_index.?];
|
||||
const got_addr = got.addr + decl.link.macho.offset_table_index * ptr_bytes;
|
||||
const got_addr = blk: {
|
||||
const seg = macho_file.load_commands.items[macho_file.data_const_segment_cmd_index.?].Segment;
|
||||
const got = seg.sections.items[macho_file.got_section_index.?];
|
||||
break :blk got.addr + decl.link.macho.offset_table_index * ptr_bytes;
|
||||
};
|
||||
return MCValue{ .memory = got_addr };
|
||||
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
|
||||
const decl = payload.data;
|
||||
|
||||
@ -221,7 +221,8 @@ pub const Instruction = union(enum) {
|
||||
offset: u12,
|
||||
opc: u2,
|
||||
op1: u2,
|
||||
fixed: u4 = 0b111_0,
|
||||
v: u1,
|
||||
fixed: u3 = 0b111,
|
||||
size: u2,
|
||||
},
|
||||
LoadStorePairOfRegisters: packed struct {
|
||||
@ -505,6 +506,7 @@ pub const Instruction = union(enum) {
|
||||
.offset = offset.toU12(),
|
||||
.opc = opc,
|
||||
.op1 = op1,
|
||||
.v = 0,
|
||||
.size = 0b10,
|
||||
},
|
||||
};
|
||||
@ -517,6 +519,7 @@ pub const Instruction = union(enum) {
|
||||
.offset = offset.toU12(),
|
||||
.opc = opc,
|
||||
.op1 = op1,
|
||||
.v = 0,
|
||||
.size = 0b11,
|
||||
},
|
||||
};
|
||||
|
||||
@ -219,7 +219,7 @@ pub const LLVMIRModule = struct {
|
||||
|
||||
var error_message: [*:0]const u8 = undefined;
|
||||
var target: *const llvm.Target = undefined;
|
||||
if (llvm.Target.getFromTriple(llvm_target_triple.ptr, &target, &error_message)) {
|
||||
if (llvm.Target.getFromTriple(llvm_target_triple.ptr, &target, &error_message).toBool()) {
|
||||
defer llvm.disposeMessage(error_message);
|
||||
|
||||
const stderr = std.io.getStdErr().writer();
|
||||
@ -303,7 +303,7 @@ pub const LLVMIRModule = struct {
|
||||
// verifyModule always allocs the error_message even if there is no error
|
||||
defer llvm.disposeMessage(error_message);
|
||||
|
||||
if (self.llvm_module.verify(.ReturnStatus, &error_message)) {
|
||||
if (self.llvm_module.verify(.ReturnStatus, &error_message).toBool()) {
|
||||
const stderr = std.io.getStdErr().writer();
|
||||
try stderr.print("broken LLVM module found: {s}\nThis is a bug in the Zig compiler.", .{error_message});
|
||||
return error.BrokenLLVMModule;
|
||||
@ -319,7 +319,7 @@ pub const LLVMIRModule = struct {
|
||||
object_pathZ.ptr,
|
||||
.ObjectFile,
|
||||
&error_message,
|
||||
)) {
|
||||
).toBool()) {
|
||||
defer llvm.disposeMessage(error_message);
|
||||
|
||||
const stderr = std.io.getStdErr().writer();
|
||||
@ -614,7 +614,7 @@ pub const LLVMIRModule = struct {
|
||||
|
||||
var indices: [2]*const llvm.Value = .{
|
||||
index_type.constNull(),
|
||||
index_type.constInt(1, false),
|
||||
index_type.constInt(1, .False),
|
||||
};
|
||||
|
||||
return self.builder.buildLoad(self.builder.buildInBoundsGEP(operand, &indices, 2, ""), "");
|
||||
@ -676,7 +676,7 @@ pub const LLVMIRModule = struct {
|
||||
const signed = inst.base.ty.isSignedInt();
|
||||
// TODO: Should we use intcast here or just a simple bitcast?
|
||||
// LLVM does truncation vs bitcast (+signed extension) in the intcast depending on the sizes
|
||||
return self.builder.buildIntCast2(val, try self.getLLVMType(inst.base.ty, inst.base.src), signed, "");
|
||||
return self.builder.buildIntCast2(val, try self.getLLVMType(inst.base.ty, inst.base.src), llvm.Bool.fromBool(signed), "");
|
||||
}
|
||||
|
||||
fn genBitCast(self: *LLVMIRModule, inst: *Inst.UnOp) !?*const llvm.Value {
|
||||
@ -782,7 +782,7 @@ pub const LLVMIRModule = struct {
|
||||
if (bigint.limbs.len != 1) {
|
||||
return self.fail(src, "TODO implement bigger bigint", .{});
|
||||
}
|
||||
const llvm_int = llvm_type.constInt(bigint.limbs[0], false);
|
||||
const llvm_int = llvm_type.constInt(bigint.limbs[0], .False);
|
||||
if (!bigint.positive) {
|
||||
return llvm.constNeg(llvm_int);
|
||||
}
|
||||
@ -820,7 +820,7 @@ pub const LLVMIRModule = struct {
|
||||
return self.fail(src, "TODO handle other sentinel values", .{});
|
||||
} else false;
|
||||
|
||||
return self.context.constString(payload.data.ptr, @intCast(c_uint, payload.data.len), !zero_sentinel);
|
||||
return self.context.constString(payload.data.ptr, @intCast(c_uint, payload.data.len), llvm.Bool.fromBool(!zero_sentinel));
|
||||
} else {
|
||||
return self.fail(src, "TODO handle more array values", .{});
|
||||
}
|
||||
@ -836,13 +836,13 @@ pub const LLVMIRModule = struct {
|
||||
llvm_child_type.constNull(),
|
||||
self.context.intType(1).constNull(),
|
||||
};
|
||||
return self.context.constStruct(&optional_values, 2, false);
|
||||
return self.context.constStruct(&optional_values, 2, .False);
|
||||
} else {
|
||||
var optional_values: [2]*const llvm.Value = .{
|
||||
try self.genTypedValue(src, .{ .ty = child_type, .val = tv.val }),
|
||||
self.context.intType(1).constAllOnes(),
|
||||
};
|
||||
return self.context.constStruct(&optional_values, 2, false);
|
||||
return self.context.constStruct(&optional_values, 2, .False);
|
||||
}
|
||||
} else {
|
||||
return self.fail(src, "TODO implement const of optional pointer", .{});
|
||||
@ -882,7 +882,7 @@ pub const LLVMIRModule = struct {
|
||||
try self.getLLVMType(child_type, src),
|
||||
self.context.intType(1),
|
||||
};
|
||||
return self.context.structType(&optional_types, 2, false);
|
||||
return self.context.structType(&optional_types, 2, .False);
|
||||
} else {
|
||||
return self.fail(src, "TODO implement optional pointers as actual pointers", .{});
|
||||
}
|
||||
@ -934,7 +934,7 @@ pub const LLVMIRModule = struct {
|
||||
try self.getLLVMType(return_type, src),
|
||||
if (fn_param_len == 0) null else llvm_param.ptr,
|
||||
@intCast(c_uint, fn_param_len),
|
||||
false,
|
||||
.False,
|
||||
);
|
||||
const llvm_fn = self.llvm_module.addFunction(func.name, fn_type);
|
||||
|
||||
|
||||
@ -1,7 +1,20 @@
|
||||
//! We do this instead of @cImport because the self-hosted compiler is easier
|
||||
//! to bootstrap if it does not depend on translate-c.
|
||||
|
||||
const LLVMBool = bool;
|
||||
/// Do not compare directly to .True, use toBool() instead.
|
||||
pub const Bool = enum(c_int) {
|
||||
False,
|
||||
True,
|
||||
_,
|
||||
|
||||
pub fn fromBool(b: bool) Bool {
|
||||
return @intToEnum(Bool, @boolToInt(b));
|
||||
}
|
||||
|
||||
pub fn toBool(b: Bool) bool {
|
||||
return b != .False;
|
||||
}
|
||||
};
|
||||
pub const AttributeIndex = c_uint;
|
||||
|
||||
/// Make sure to use the *InContext functions instead of the global ones.
|
||||
@ -22,13 +35,13 @@ pub const Context = opaque {
|
||||
extern fn LLVMVoidTypeInContext(C: *const Context) *const Type;
|
||||
|
||||
pub const structType = LLVMStructTypeInContext;
|
||||
extern fn LLVMStructTypeInContext(C: *const Context, ElementTypes: [*]*const Type, ElementCount: c_uint, Packed: LLVMBool) *const Type;
|
||||
extern fn LLVMStructTypeInContext(C: *const Context, ElementTypes: [*]*const Type, ElementCount: c_uint, Packed: Bool) *const Type;
|
||||
|
||||
pub const constString = LLVMConstStringInContext;
|
||||
extern fn LLVMConstStringInContext(C: *const Context, Str: [*]const u8, Length: c_uint, DontNullTerminate: LLVMBool) *const Value;
|
||||
extern fn LLVMConstStringInContext(C: *const Context, Str: [*]const u8, Length: c_uint, DontNullTerminate: Bool) *const Value;
|
||||
|
||||
pub const constStruct = LLVMConstStructInContext;
|
||||
extern fn LLVMConstStructInContext(C: *const Context, ConstantVals: [*]*const Value, Count: c_uint, Packed: LLVMBool) *const Value;
|
||||
extern fn LLVMConstStructInContext(C: *const Context, ConstantVals: [*]*const Value, Count: c_uint, Packed: Bool) *const Value;
|
||||
|
||||
pub const createBasicBlock = LLVMCreateBasicBlockInContext;
|
||||
extern fn LLVMCreateBasicBlockInContext(C: *const Context, Name: [*:0]const u8) *const BasicBlock;
|
||||
@ -59,7 +72,7 @@ pub const Value = opaque {
|
||||
|
||||
pub const Type = opaque {
|
||||
pub const functionType = LLVMFunctionType;
|
||||
extern fn LLVMFunctionType(ReturnType: *const Type, ParamTypes: ?[*]*const Type, ParamCount: c_uint, IsVarArg: LLVMBool) *const Type;
|
||||
extern fn LLVMFunctionType(ReturnType: *const Type, ParamTypes: ?[*]*const Type, ParamCount: c_uint, IsVarArg: Bool) *const Type;
|
||||
|
||||
pub const constNull = LLVMConstNull;
|
||||
extern fn LLVMConstNull(Ty: *const Type) *const Value;
|
||||
@ -68,7 +81,7 @@ pub const Type = opaque {
|
||||
extern fn LLVMConstAllOnes(Ty: *const Type) *const Value;
|
||||
|
||||
pub const constInt = LLVMConstInt;
|
||||
extern fn LLVMConstInt(IntTy: *const Type, N: c_ulonglong, SignExtend: LLVMBool) *const Value;
|
||||
extern fn LLVMConstInt(IntTy: *const Type, N: c_ulonglong, SignExtend: Bool) *const Value;
|
||||
|
||||
pub const constArray = LLVMConstArray;
|
||||
extern fn LLVMConstArray(ElementTy: *const Type, ConstantVals: ?[*]*const Value, Length: c_uint) *const Value;
|
||||
@ -91,7 +104,7 @@ pub const Module = opaque {
|
||||
extern fn LLVMDisposeModule(*const Module) void;
|
||||
|
||||
pub const verify = LLVMVerifyModule;
|
||||
extern fn LLVMVerifyModule(*const Module, Action: VerifierFailureAction, OutMessage: *[*:0]const u8) LLVMBool;
|
||||
extern fn LLVMVerifyModule(*const Module, Action: VerifierFailureAction, OutMessage: *[*:0]const u8) Bool;
|
||||
|
||||
pub const addFunction = LLVMAddFunction;
|
||||
extern fn LLVMAddFunction(*const Module, Name: [*:0]const u8, FunctionTy: *const Type) *const Value;
|
||||
@ -191,7 +204,7 @@ pub const Builder = opaque {
|
||||
extern fn LLVMBuildNUWSub(*const Builder, LHS: *const Value, RHS: *const Value, Name: [*:0]const u8) *const Value;
|
||||
|
||||
pub const buildIntCast2 = LLVMBuildIntCast2;
|
||||
extern fn LLVMBuildIntCast2(*const Builder, Val: *const Value, DestTy: *const Type, IsSigned: LLVMBool, Name: [*:0]const u8) *const Value;
|
||||
extern fn LLVMBuildIntCast2(*const Builder, Val: *const Value, DestTy: *const Type, IsSigned: Bool, Name: [*:0]const u8) *const Value;
|
||||
|
||||
pub const buildBitCast = LLVMBuildBitCast;
|
||||
extern fn LLVMBuildBitCast(*const Builder, Val: *const Value, DestTy: *const Type, Name: [*:0]const u8) *const Value;
|
||||
@ -258,7 +271,7 @@ pub const TargetMachine = opaque {
|
||||
Filename: [*:0]const u8,
|
||||
codegen: CodeGenFileType,
|
||||
ErrorMessage: *[*:0]const u8,
|
||||
) LLVMBool;
|
||||
) Bool;
|
||||
};
|
||||
|
||||
pub const CodeMode = extern enum {
|
||||
@ -295,7 +308,7 @@ pub const CodeGenFileType = extern enum {
|
||||
|
||||
pub const Target = opaque {
|
||||
pub const getFromTriple = LLVMGetTargetFromTriple;
|
||||
extern fn LLVMGetTargetFromTriple(Triple: [*:0]const u8, T: **const Target, ErrorMessage: *[*:0]const u8) LLVMBool;
|
||||
extern fn LLVMGetTargetFromTriple(Triple: [*:0]const u8, T: **const Target, ErrorMessage: *[*:0]const u8) Bool;
|
||||
};
|
||||
|
||||
extern fn LLVMInitializeAArch64TargetInfo() void;
|
||||
|
||||
1085
src/link/MachO.zig
1085
src/link/MachO.zig
File diff suppressed because it is too large
Load Diff
256
src/link/MachO/Archive.zig
Normal file
256
src/link/MachO/Archive.zig
Normal file
@ -0,0 +1,256 @@
|
||||
const Archive = @This();
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const fs = std.fs;
|
||||
const log = std.log.scoped(.archive);
|
||||
const macho = std.macho;
|
||||
const mem = std.mem;
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
const Object = @import("Object.zig");
|
||||
const parseName = @import("Zld.zig").parseName;
|
||||
|
||||
usingnamespace @import("commands.zig");
|
||||
|
||||
allocator: *Allocator,
|
||||
file: fs.File,
|
||||
header: ar_hdr,
|
||||
name: []u8,
|
||||
|
||||
objects: std.ArrayListUnmanaged(Object) = .{},
|
||||
|
||||
// Archive files start with the ARMAG identifying string. Then follows a
|
||||
// `struct ar_hdr', and as many bytes of member file data as its `ar_size'
|
||||
// member indicates, for each member file.
|
||||
/// String that begins an archive file.
|
||||
const ARMAG: *const [SARMAG:0]u8 = "!<arch>\n";
|
||||
/// Size of that string.
|
||||
const SARMAG: u4 = 8;
|
||||
|
||||
/// String in ar_fmag at the end of each header.
|
||||
const ARFMAG: *const [2:0]u8 = "`\n";
|
||||
|
||||
const ar_hdr = extern struct {
|
||||
/// Member file name, sometimes / terminated.
|
||||
ar_name: [16]u8,
|
||||
|
||||
/// File date, decimal seconds since Epoch.
|
||||
ar_date: [12]u8,
|
||||
|
||||
/// User ID, in ASCII format.
|
||||
ar_uid: [6]u8,
|
||||
|
||||
/// Group ID, in ASCII format.
|
||||
ar_gid: [6]u8,
|
||||
|
||||
/// File mode, in ASCII octal.
|
||||
ar_mode: [8]u8,
|
||||
|
||||
/// File size, in ASCII decimal.
|
||||
ar_size: [10]u8,
|
||||
|
||||
/// Always contains ARFMAG.
|
||||
ar_fmag: [2]u8,
|
||||
|
||||
const NameOrLength = union(enum) {
|
||||
Name: []const u8,
|
||||
Length: u64,
|
||||
};
|
||||
pub fn nameOrLength(self: ar_hdr) !NameOrLength {
|
||||
const value = getValue(&self.ar_name);
|
||||
const slash_index = mem.indexOf(u8, value, "/") orelse return error.MalformedArchive;
|
||||
const len = value.len;
|
||||
if (slash_index == len - 1) {
|
||||
// Name stored directly
|
||||
return NameOrLength{ .Name = value };
|
||||
} else {
|
||||
// Name follows the header directly and its length is encoded in
|
||||
// the name field.
|
||||
const length = try std.fmt.parseInt(u64, value[slash_index + 1 ..], 10);
|
||||
return NameOrLength{ .Length = length };
|
||||
}
|
||||
}
|
||||
|
||||
pub fn size(self: ar_hdr) !u64 {
|
||||
const value = getValue(&self.ar_size);
|
||||
return std.fmt.parseInt(u64, value, 10);
|
||||
}
|
||||
|
||||
fn getValue(raw: []const u8) []const u8 {
|
||||
return mem.trimRight(u8, raw, &[_]u8{@as(u8, 0x20)});
|
||||
}
|
||||
};
|
||||
|
||||
pub fn deinit(self: *Archive) void {
|
||||
self.allocator.free(self.name);
|
||||
for (self.objects.items) |*object| {
|
||||
object.deinit();
|
||||
}
|
||||
self.objects.deinit(self.allocator);
|
||||
self.file.close();
|
||||
}
|
||||
|
||||
/// Caller owns the returned Archive instance and is responsible for calling
|
||||
/// `deinit` to free allocated memory.
|
||||
pub fn initFromFile(allocator: *Allocator, arch: std.Target.Cpu.Arch, ar_name: []const u8, file: fs.File) !Archive {
|
||||
var reader = file.reader();
|
||||
var magic = try readMagic(allocator, reader);
|
||||
defer allocator.free(magic);
|
||||
|
||||
if (!mem.eql(u8, magic, ARMAG)) {
|
||||
// Reset file cursor.
|
||||
try file.seekTo(0);
|
||||
return error.NotArchive;
|
||||
}
|
||||
|
||||
const header = try reader.readStruct(ar_hdr);
|
||||
|
||||
if (!mem.eql(u8, &header.ar_fmag, ARFMAG))
|
||||
return error.MalformedArchive;
|
||||
|
||||
var embedded_name = try getName(allocator, header, reader);
|
||||
log.debug("parsing archive '{s}' at '{s}'", .{ embedded_name, ar_name });
|
||||
defer allocator.free(embedded_name);
|
||||
|
||||
var name = try allocator.dupe(u8, ar_name);
|
||||
var self = Archive{
|
||||
.allocator = allocator,
|
||||
.file = file,
|
||||
.header = header,
|
||||
.name = name,
|
||||
};
|
||||
|
||||
var object_offsets = try self.readTableOfContents(reader);
|
||||
defer self.allocator.free(object_offsets);
|
||||
|
||||
var i: usize = 1;
|
||||
while (i < object_offsets.len) : (i += 1) {
|
||||
const offset = object_offsets[i];
|
||||
try reader.context.seekTo(offset);
|
||||
try self.readObject(arch, ar_name, reader);
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
fn readTableOfContents(self: *Archive, reader: anytype) ![]u32 {
|
||||
const symtab_size = try reader.readIntLittle(u32);
|
||||
var symtab = try self.allocator.alloc(u8, symtab_size);
|
||||
defer self.allocator.free(symtab);
|
||||
try reader.readNoEof(symtab);
|
||||
|
||||
const strtab_size = try reader.readIntLittle(u32);
|
||||
var strtab = try self.allocator.alloc(u8, strtab_size);
|
||||
defer self.allocator.free(strtab);
|
||||
try reader.readNoEof(strtab);
|
||||
|
||||
var symtab_stream = std.io.fixedBufferStream(symtab);
|
||||
var symtab_reader = symtab_stream.reader();
|
||||
|
||||
var object_offsets = std.ArrayList(u32).init(self.allocator);
|
||||
try object_offsets.append(0);
|
||||
var last: usize = 0;
|
||||
|
||||
while (true) {
|
||||
const n_strx = symtab_reader.readIntLittle(u32) catch |err| switch (err) {
|
||||
error.EndOfStream => break,
|
||||
else => |e| return e,
|
||||
};
|
||||
const object_offset = try symtab_reader.readIntLittle(u32);
|
||||
|
||||
// TODO Store the table of contents for later reuse.
|
||||
|
||||
// Here, we assume that symbols are NOT sorted in any way, and
|
||||
// they point to objects in sequence.
|
||||
if (object_offsets.items[last] != object_offset) {
|
||||
try object_offsets.append(object_offset);
|
||||
last += 1;
|
||||
}
|
||||
}
|
||||
|
||||
return object_offsets.toOwnedSlice();
|
||||
}
|
||||
|
||||
fn readObject(self: *Archive, arch: std.Target.Cpu.Arch, ar_name: []const u8, reader: anytype) !void {
|
||||
const object_header = try reader.readStruct(ar_hdr);
|
||||
|
||||
if (!mem.eql(u8, &object_header.ar_fmag, ARFMAG))
|
||||
return error.MalformedArchive;
|
||||
|
||||
var object_name = try getName(self.allocator, object_header, reader);
|
||||
log.debug("extracting object '{s}' from archive '{s}'", .{ object_name, self.name });
|
||||
|
||||
const offset = @intCast(u32, try reader.context.getPos());
|
||||
const header = try reader.readStruct(macho.mach_header_64);
|
||||
|
||||
const this_arch: std.Target.Cpu.Arch = switch (header.cputype) {
|
||||
macho.CPU_TYPE_ARM64 => .aarch64,
|
||||
macho.CPU_TYPE_X86_64 => .x86_64,
|
||||
else => |value| {
|
||||
log.err("unsupported cpu architecture 0x{x}", .{value});
|
||||
return error.UnsupportedCpuArchitecture;
|
||||
},
|
||||
};
|
||||
if (this_arch != arch) {
|
||||
log.err("mismatched cpu architecture: found {s}, expected {s}", .{ this_arch, arch });
|
||||
return error.MismatchedCpuArchitecture;
|
||||
}
|
||||
|
||||
// TODO Implement std.fs.File.clone() or similar.
|
||||
var new_file = try fs.cwd().openFile(ar_name, .{});
|
||||
var object = Object{
|
||||
.allocator = self.allocator,
|
||||
.name = object_name,
|
||||
.ar_name = try mem.dupe(self.allocator, u8, ar_name),
|
||||
.file = new_file,
|
||||
.header = header,
|
||||
};
|
||||
|
||||
try object.readLoadCommands(reader, .{ .offset = offset });
|
||||
|
||||
if (object.symtab_cmd_index != null) {
|
||||
try object.readSymtab();
|
||||
try object.readStrtab();
|
||||
}
|
||||
|
||||
if (object.data_in_code_cmd_index != null) try object.readDataInCode();
|
||||
|
||||
log.debug("\n\n", .{});
|
||||
log.debug("{s} defines symbols", .{object.name});
|
||||
for (object.symtab.items) |sym| {
|
||||
const symname = object.getString(sym.n_strx);
|
||||
log.debug("'{s}': {}", .{ symname, sym });
|
||||
}
|
||||
|
||||
try self.objects.append(self.allocator, object);
|
||||
}
|
||||
|
||||
fn readMagic(allocator: *Allocator, reader: anytype) ![]u8 {
|
||||
var magic = std.ArrayList(u8).init(allocator);
|
||||
try magic.ensureCapacity(SARMAG);
|
||||
var i: usize = 0;
|
||||
while (i < SARMAG) : (i += 1) {
|
||||
const next = try reader.readByte();
|
||||
magic.appendAssumeCapacity(next);
|
||||
}
|
||||
return magic.toOwnedSlice();
|
||||
}
|
||||
|
||||
fn getName(allocator: *Allocator, header: ar_hdr, reader: anytype) ![]u8 {
|
||||
const name_or_length = try header.nameOrLength();
|
||||
var name: []u8 = undefined;
|
||||
switch (name_or_length) {
|
||||
.Name => |n| {
|
||||
name = try allocator.dupe(u8, n);
|
||||
},
|
||||
.Length => |len| {
|
||||
var n = try allocator.alloc(u8, len);
|
||||
defer allocator.free(n);
|
||||
try reader.readNoEof(n);
|
||||
const actual_len = mem.indexOfScalar(u8, n, @as(u8, 0));
|
||||
name = try allocator.dupe(u8, n[0..actual_len.?]);
|
||||
},
|
||||
}
|
||||
return name;
|
||||
}
|
||||
@ -839,8 +839,8 @@ fn findFreeSpaceLinkedit(self: *DebugSymbols, object_size: u64, min_alignment: u
|
||||
|
||||
fn relocateSymbolTable(self: *DebugSymbols) !void {
|
||||
const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab;
|
||||
const nlocals = self.base.local_symbols.items.len;
|
||||
const nglobals = self.base.global_symbols.items.len;
|
||||
const nlocals = self.base.locals.items.len;
|
||||
const nglobals = self.base.globals.items.len;
|
||||
const nsyms = nlocals + nglobals;
|
||||
|
||||
if (symtab.nsyms < nsyms) {
|
||||
@ -875,7 +875,7 @@ pub fn writeLocalSymbol(self: *DebugSymbols, index: usize) !void {
|
||||
const symtab = &self.load_commands.items[self.symtab_cmd_index.?].Symtab;
|
||||
const off = symtab.symoff + @sizeOf(macho.nlist_64) * index;
|
||||
log.debug("writing dSym local symbol {} at 0x{x}", .{ index, off });
|
||||
try self.file.pwriteAll(mem.asBytes(&self.base.local_symbols.items[index]), off);
|
||||
try self.file.pwriteAll(mem.asBytes(&self.base.locals.items[index]), off);
|
||||
}
|
||||
|
||||
fn writeStringTable(self: *DebugSymbols) !void {
|
||||
@ -1057,7 +1057,7 @@ pub fn commitDeclDebugInfo(
|
||||
var dbg_info_buffer = &debug_buffers.dbg_info_buffer;
|
||||
var dbg_info_type_relocs = &debug_buffers.dbg_info_type_relocs;
|
||||
|
||||
const symbol = self.base.local_symbols.items[decl.link.macho.local_sym_index];
|
||||
const symbol = self.base.locals.items[decl.link.macho.local_sym_index];
|
||||
const text_block = &decl.link.macho;
|
||||
// If the Decl is a function, we need to update the __debug_line program.
|
||||
const typed_value = decl.typed_value.most_recent.typed_value;
|
||||
|
||||
228
src/link/MachO/Object.zig
Normal file
228
src/link/MachO/Object.zig
Normal file
@ -0,0 +1,228 @@
|
||||
const Object = @This();
|
||||
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const fs = std.fs;
|
||||
const io = std.io;
|
||||
const log = std.log.scoped(.object);
|
||||
const macho = std.macho;
|
||||
const mem = std.mem;
|
||||
|
||||
const Allocator = mem.Allocator;
|
||||
const parseName = @import("Zld.zig").parseName;
|
||||
|
||||
usingnamespace @import("commands.zig");
|
||||
|
||||
allocator: *Allocator,
|
||||
file: fs.File,
|
||||
name: []u8,
|
||||
ar_name: ?[]u8 = null,
|
||||
|
||||
header: macho.mach_header_64,
|
||||
|
||||
load_commands: std.ArrayListUnmanaged(LoadCommand) = .{},
|
||||
|
||||
segment_cmd_index: ?u16 = null,
|
||||
symtab_cmd_index: ?u16 = null,
|
||||
dysymtab_cmd_index: ?u16 = null,
|
||||
build_version_cmd_index: ?u16 = null,
|
||||
data_in_code_cmd_index: ?u16 = null,
|
||||
text_section_index: ?u16 = null,
|
||||
|
||||
// __DWARF segment sections
|
||||
dwarf_debug_info_index: ?u16 = null,
|
||||
dwarf_debug_abbrev_index: ?u16 = null,
|
||||
dwarf_debug_str_index: ?u16 = null,
|
||||
dwarf_debug_line_index: ?u16 = null,
|
||||
dwarf_debug_ranges_index: ?u16 = null,
|
||||
|
||||
symtab: std.ArrayListUnmanaged(macho.nlist_64) = .{},
|
||||
strtab: std.ArrayListUnmanaged(u8) = .{},
|
||||
|
||||
data_in_code_entries: std.ArrayListUnmanaged(macho.data_in_code_entry) = .{},
|
||||
|
||||
pub fn deinit(self: *Object) void {
|
||||
for (self.load_commands.items) |*lc| {
|
||||
lc.deinit(self.allocator);
|
||||
}
|
||||
self.load_commands.deinit(self.allocator);
|
||||
self.symtab.deinit(self.allocator);
|
||||
self.strtab.deinit(self.allocator);
|
||||
self.data_in_code_entries.deinit(self.allocator);
|
||||
self.allocator.free(self.name);
|
||||
if (self.ar_name) |v| {
|
||||
self.allocator.free(v);
|
||||
}
|
||||
self.file.close();
|
||||
}
|
||||
|
||||
/// Caller owns the returned Object instance and is responsible for calling
|
||||
/// `deinit` to free allocated memory.
|
||||
pub fn initFromFile(allocator: *Allocator, arch: std.Target.Cpu.Arch, name: []const u8, file: fs.File) !Object {
|
||||
var reader = file.reader();
|
||||
const header = try reader.readStruct(macho.mach_header_64);
|
||||
|
||||
if (header.filetype != macho.MH_OBJECT) {
|
||||
// Reset file cursor.
|
||||
try file.seekTo(0);
|
||||
return error.NotObject;
|
||||
}
|
||||
|
||||
const this_arch: std.Target.Cpu.Arch = switch (header.cputype) {
|
||||
macho.CPU_TYPE_ARM64 => .aarch64,
|
||||
macho.CPU_TYPE_X86_64 => .x86_64,
|
||||
else => |value| {
|
||||
log.err("unsupported cpu architecture 0x{x}", .{value});
|
||||
return error.UnsupportedCpuArchitecture;
|
||||
},
|
||||
};
|
||||
if (this_arch != arch) {
|
||||
log.err("mismatched cpu architecture: found {s}, expected {s}", .{ this_arch, arch });
|
||||
return error.MismatchedCpuArchitecture;
|
||||
}
|
||||
|
||||
var self = Object{
|
||||
.allocator = allocator,
|
||||
.name = try allocator.dupe(u8, name),
|
||||
.file = file,
|
||||
.header = header,
|
||||
};
|
||||
|
||||
try self.readLoadCommands(reader, .{});
|
||||
|
||||
if (self.symtab_cmd_index != null) {
|
||||
try self.readSymtab();
|
||||
try self.readStrtab();
|
||||
}
|
||||
|
||||
if (self.data_in_code_cmd_index != null) try self.readDataInCode();
|
||||
|
||||
log.debug("\n\n", .{});
|
||||
log.debug("{s} defines symbols", .{self.name});
|
||||
for (self.symtab.items) |sym| {
|
||||
const symname = self.getString(sym.n_strx);
|
||||
log.debug("'{s}': {}", .{ symname, sym });
|
||||
}
|
||||
|
||||
return self;
|
||||
}
|
||||
|
||||
pub const ReadOffset = struct {
|
||||
offset: ?u32 = null,
|
||||
};
|
||||
|
||||
pub fn readLoadCommands(self: *Object, reader: anytype, offset: ReadOffset) !void {
|
||||
const offset_mod = offset.offset orelse 0;
|
||||
try self.load_commands.ensureCapacity(self.allocator, self.header.ncmds);
|
||||
|
||||
var i: u16 = 0;
|
||||
while (i < self.header.ncmds) : (i += 1) {
|
||||
var cmd = try LoadCommand.read(self.allocator, reader);
|
||||
switch (cmd.cmd()) {
|
||||
macho.LC_SEGMENT_64 => {
|
||||
self.segment_cmd_index = i;
|
||||
var seg = cmd.Segment;
|
||||
for (seg.sections.items) |*sect, j| {
|
||||
const index = @intCast(u16, j);
|
||||
const segname = parseName(§.segname);
|
||||
const sectname = parseName(§.sectname);
|
||||
if (mem.eql(u8, segname, "__DWARF")) {
|
||||
if (mem.eql(u8, sectname, "__debug_info")) {
|
||||
self.dwarf_debug_info_index = index;
|
||||
} else if (mem.eql(u8, sectname, "__debug_abbrev")) {
|
||||
self.dwarf_debug_abbrev_index = index;
|
||||
} else if (mem.eql(u8, sectname, "__debug_str")) {
|
||||
self.dwarf_debug_str_index = index;
|
||||
} else if (mem.eql(u8, sectname, "__debug_line")) {
|
||||
self.dwarf_debug_line_index = index;
|
||||
} else if (mem.eql(u8, sectname, "__debug_ranges")) {
|
||||
self.dwarf_debug_ranges_index = index;
|
||||
}
|
||||
} else if (mem.eql(u8, segname, "__TEXT")) {
|
||||
if (mem.eql(u8, sectname, "__text")) {
|
||||
self.text_section_index = index;
|
||||
}
|
||||
}
|
||||
|
||||
sect.offset += offset_mod;
|
||||
if (sect.reloff > 0)
|
||||
sect.reloff += offset_mod;
|
||||
}
|
||||
|
||||
seg.inner.fileoff += offset_mod;
|
||||
},
|
||||
macho.LC_SYMTAB => {
|
||||
self.symtab_cmd_index = i;
|
||||
cmd.Symtab.symoff += offset_mod;
|
||||
cmd.Symtab.stroff += offset_mod;
|
||||
},
|
||||
macho.LC_DYSYMTAB => {
|
||||
self.dysymtab_cmd_index = i;
|
||||
},
|
||||
macho.LC_BUILD_VERSION => {
|
||||
self.build_version_cmd_index = i;
|
||||
},
|
||||
macho.LC_DATA_IN_CODE => {
|
||||
self.data_in_code_cmd_index = i;
|
||||
},
|
||||
else => {
|
||||
log.debug("Unknown load command detected: 0x{x}.", .{cmd.cmd()});
|
||||
},
|
||||
}
|
||||
self.load_commands.appendAssumeCapacity(cmd);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn readSymtab(self: *Object) !void {
|
||||
const symtab_cmd = self.load_commands.items[self.symtab_cmd_index.?].Symtab;
|
||||
var buffer = try self.allocator.alloc(u8, @sizeOf(macho.nlist_64) * symtab_cmd.nsyms);
|
||||
defer self.allocator.free(buffer);
|
||||
_ = try self.file.preadAll(buffer, symtab_cmd.symoff);
|
||||
try self.symtab.ensureCapacity(self.allocator, symtab_cmd.nsyms);
|
||||
// TODO this align case should not be needed.
|
||||
// Probably a bug in stage1.
|
||||
const slice = @alignCast(@alignOf(macho.nlist_64), mem.bytesAsSlice(macho.nlist_64, buffer));
|
||||
self.symtab.appendSliceAssumeCapacity(slice);
|
||||
}
|
||||
|
||||
pub fn readStrtab(self: *Object) !void {
|
||||
const symtab_cmd = self.load_commands.items[self.symtab_cmd_index.?].Symtab;
|
||||
var buffer = try self.allocator.alloc(u8, symtab_cmd.strsize);
|
||||
defer self.allocator.free(buffer);
|
||||
_ = try self.file.preadAll(buffer, symtab_cmd.stroff);
|
||||
try self.strtab.ensureCapacity(self.allocator, symtab_cmd.strsize);
|
||||
self.strtab.appendSliceAssumeCapacity(buffer);
|
||||
}
|
||||
|
||||
pub fn getString(self: *const Object, str_off: u32) []const u8 {
|
||||
assert(str_off < self.strtab.items.len);
|
||||
return mem.spanZ(@ptrCast([*:0]const u8, self.strtab.items.ptr + str_off));
|
||||
}
|
||||
|
||||
pub fn readSection(self: Object, allocator: *Allocator, index: u16) ![]u8 {
|
||||
const seg = self.load_commands.items[self.segment_cmd_index.?].Segment;
|
||||
const sect = seg.sections.items[index];
|
||||
var buffer = try allocator.alloc(u8, sect.size);
|
||||
_ = try self.file.preadAll(buffer, sect.offset);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
pub fn readDataInCode(self: *Object) !void {
|
||||
const index = self.data_in_code_cmd_index orelse return;
|
||||
const data_in_code = self.load_commands.items[index].LinkeditData;
|
||||
|
||||
var buffer = try self.allocator.alloc(u8, data_in_code.datasize);
|
||||
defer self.allocator.free(buffer);
|
||||
|
||||
_ = try self.file.preadAll(buffer, data_in_code.dataoff);
|
||||
|
||||
var stream = io.fixedBufferStream(buffer);
|
||||
var reader = stream.reader();
|
||||
while (true) {
|
||||
const dice = reader.readStruct(macho.data_in_code_entry) catch |err| switch (err) {
|
||||
error.EndOfStream => break,
|
||||
else => |e| return e,
|
||||
};
|
||||
try self.data_in_code_entries.append(self.allocator, dice);
|
||||
}
|
||||
}
|
||||
3192
src/link/MachO/Zld.zig
Normal file
3192
src/link/MachO/Zld.zig
Normal file
File diff suppressed because it is too large
Load Diff
145
src/link/MachO/bind.zig
Normal file
145
src/link/MachO/bind.zig
Normal file
@ -0,0 +1,145 @@
|
||||
const std = @import("std");
|
||||
const leb = std.leb;
|
||||
const macho = std.macho;
|
||||
|
||||
pub const Pointer = struct {
|
||||
offset: u64,
|
||||
segment_id: u16,
|
||||
dylib_ordinal: ?i64 = null,
|
||||
name: ?[]const u8 = null,
|
||||
};
|
||||
|
||||
pub fn pointerCmp(context: void, a: Pointer, b: Pointer) bool {
|
||||
if (a.segment_id < b.segment_id) return true;
|
||||
if (a.segment_id == b.segment_id) {
|
||||
return a.offset < b.offset;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
pub fn rebaseInfoSize(pointers: []const Pointer) !u64 {
|
||||
var stream = std.io.countingWriter(std.io.null_writer);
|
||||
var writer = stream.writer();
|
||||
var size: u64 = 0;
|
||||
|
||||
for (pointers) |pointer| {
|
||||
size += 2;
|
||||
try leb.writeILEB128(writer, pointer.offset);
|
||||
size += 1;
|
||||
}
|
||||
|
||||
size += 1 + stream.bytes_written;
|
||||
return size;
|
||||
}
|
||||
|
||||
pub fn writeRebaseInfo(pointers: []const Pointer, writer: anytype) !void {
|
||||
for (pointers) |pointer| {
|
||||
try writer.writeByte(macho.REBASE_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.REBASE_TYPE_POINTER));
|
||||
try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, pointer.segment_id));
|
||||
|
||||
try leb.writeILEB128(writer, pointer.offset);
|
||||
try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_IMM_TIMES | @truncate(u4, 1));
|
||||
}
|
||||
try writer.writeByte(macho.REBASE_OPCODE_DONE);
|
||||
}
|
||||
|
||||
pub fn bindInfoSize(pointers: []const Pointer) !u64 {
|
||||
var stream = std.io.countingWriter(std.io.null_writer);
|
||||
var writer = stream.writer();
|
||||
var size: u64 = 0;
|
||||
|
||||
for (pointers) |pointer| {
|
||||
size += 1;
|
||||
if (pointer.dylib_ordinal.? > 15) {
|
||||
try leb.writeULEB128(writer, @bitCast(u64, pointer.dylib_ordinal.?));
|
||||
}
|
||||
size += 1;
|
||||
|
||||
size += 1;
|
||||
size += pointer.name.?.len;
|
||||
size += 1;
|
||||
|
||||
size += 1;
|
||||
|
||||
try leb.writeILEB128(writer, pointer.offset);
|
||||
size += 1;
|
||||
}
|
||||
|
||||
size += stream.bytes_written + 1;
|
||||
return size;
|
||||
}
|
||||
|
||||
pub fn writeBindInfo(pointers: []const Pointer, writer: anytype) !void {
|
||||
for (pointers) |pointer| {
|
||||
if (pointer.dylib_ordinal.? > 15) {
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
|
||||
try leb.writeULEB128(writer, @bitCast(u64, pointer.dylib_ordinal.?));
|
||||
} else if (pointer.dylib_ordinal.? > 0) {
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @truncate(u4, @bitCast(u64, pointer.dylib_ordinal.?)));
|
||||
} else {
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @truncate(u4, @bitCast(u64, pointer.dylib_ordinal.?)));
|
||||
}
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.BIND_TYPE_POINTER));
|
||||
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM); // TODO Sometimes we might want to add flags.
|
||||
try writer.writeAll(pointer.name.?);
|
||||
try writer.writeByte(0);
|
||||
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, pointer.segment_id));
|
||||
|
||||
try leb.writeILEB128(writer, pointer.offset);
|
||||
try writer.writeByte(macho.BIND_OPCODE_DO_BIND);
|
||||
}
|
||||
|
||||
try writer.writeByte(macho.BIND_OPCODE_DONE);
|
||||
}
|
||||
|
||||
pub fn lazyBindInfoSize(pointers: []const Pointer) !u64 {
|
||||
var stream = std.io.countingWriter(std.io.null_writer);
|
||||
var writer = stream.writer();
|
||||
var size: u64 = 0;
|
||||
|
||||
for (pointers) |pointer| {
|
||||
size += 1;
|
||||
|
||||
try leb.writeILEB128(writer, pointer.offset);
|
||||
|
||||
size += 1;
|
||||
if (pointer.dylib_ordinal.? > 15) {
|
||||
try leb.writeULEB128(writer, @bitCast(u64, pointer.dylib_ordinal.?));
|
||||
}
|
||||
|
||||
size += 1;
|
||||
size += pointer.name.?.len;
|
||||
size += 1;
|
||||
|
||||
size += 2;
|
||||
}
|
||||
|
||||
size += stream.bytes_written;
|
||||
return size;
|
||||
}
|
||||
|
||||
pub fn writeLazyBindInfo(pointers: []const Pointer, writer: anytype) !void {
|
||||
for (pointers) |pointer| {
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, pointer.segment_id));
|
||||
|
||||
try leb.writeILEB128(writer, pointer.offset);
|
||||
|
||||
if (pointer.dylib_ordinal.? > 15) {
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
|
||||
try leb.writeULEB128(writer, @bitCast(u64, pointer.dylib_ordinal.?));
|
||||
} else if (pointer.dylib_ordinal.? > 0) {
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @truncate(u4, @bitCast(u64, pointer.dylib_ordinal.?)));
|
||||
} else {
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @truncate(u4, @bitCast(u64, pointer.dylib_ordinal.?)));
|
||||
}
|
||||
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM); // TODO Sometimes we might want to add flags.
|
||||
try writer.writeAll(pointer.name.?);
|
||||
try writer.writeByte(0);
|
||||
|
||||
try writer.writeByte(macho.BIND_OPCODE_DO_BIND);
|
||||
try writer.writeByte(macho.BIND_OPCODE_DONE);
|
||||
}
|
||||
}
|
||||
@ -1,152 +0,0 @@
|
||||
const std = @import("std");
|
||||
const leb = std.leb;
|
||||
const macho = std.macho;
|
||||
const mem = std.mem;
|
||||
|
||||
const assert = std.debug.assert;
|
||||
const Allocator = mem.Allocator;
|
||||
|
||||
pub const ExternSymbol = struct {
|
||||
/// MachO symbol table entry.
|
||||
inner: macho.nlist_64,
|
||||
|
||||
/// Id of the dynamic library where the specified entries can be found.
|
||||
/// Id of 0 means self.
|
||||
/// TODO this should really be an id into the table of all defined
|
||||
/// dylibs.
|
||||
dylib_ordinal: i64 = 0,
|
||||
|
||||
/// Id of the segment where this symbol is defined (will have its address
|
||||
/// resolved).
|
||||
segment: u16 = 0,
|
||||
|
||||
/// Offset relative to the start address of the `segment`.
|
||||
offset: u32 = 0,
|
||||
};
|
||||
|
||||
pub fn rebaseInfoSize(symbols: anytype) !u64 {
|
||||
var stream = std.io.countingWriter(std.io.null_writer);
|
||||
var writer = stream.writer();
|
||||
var size: u64 = 0;
|
||||
|
||||
for (symbols) |entry| {
|
||||
size += 2;
|
||||
try leb.writeILEB128(writer, entry.value.offset);
|
||||
size += 1;
|
||||
}
|
||||
|
||||
size += 1 + stream.bytes_written;
|
||||
return size;
|
||||
}
|
||||
|
||||
pub fn writeRebaseInfo(symbols: anytype, writer: anytype) !void {
|
||||
for (symbols) |entry| {
|
||||
const symbol = entry.value;
|
||||
try writer.writeByte(macho.REBASE_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.REBASE_TYPE_POINTER));
|
||||
try writer.writeByte(macho.REBASE_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, symbol.segment));
|
||||
try leb.writeILEB128(writer, symbol.offset);
|
||||
try writer.writeByte(macho.REBASE_OPCODE_DO_REBASE_IMM_TIMES | @truncate(u4, 1));
|
||||
}
|
||||
try writer.writeByte(macho.REBASE_OPCODE_DONE);
|
||||
}
|
||||
|
||||
pub fn bindInfoSize(symbols: anytype) !u64 {
|
||||
var stream = std.io.countingWriter(std.io.null_writer);
|
||||
var writer = stream.writer();
|
||||
var size: u64 = 0;
|
||||
|
||||
for (symbols) |entry| {
|
||||
const symbol = entry.value;
|
||||
|
||||
size += 1;
|
||||
if (symbol.dylib_ordinal > 15) {
|
||||
try leb.writeULEB128(writer, @bitCast(u64, symbol.dylib_ordinal));
|
||||
}
|
||||
size += 1;
|
||||
|
||||
size += 1;
|
||||
size += entry.key.len;
|
||||
size += 1;
|
||||
|
||||
size += 1;
|
||||
try leb.writeILEB128(writer, symbol.offset);
|
||||
size += 2;
|
||||
}
|
||||
|
||||
size += stream.bytes_written;
|
||||
return size;
|
||||
}
|
||||
|
||||
pub fn writeBindInfo(symbols: anytype, writer: anytype) !void {
|
||||
for (symbols) |entry| {
|
||||
const symbol = entry.value;
|
||||
|
||||
if (symbol.dylib_ordinal > 15) {
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
|
||||
try leb.writeULEB128(writer, @bitCast(u64, symbol.dylib_ordinal));
|
||||
} else if (symbol.dylib_ordinal > 0) {
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @truncate(u4, @bitCast(u64, symbol.dylib_ordinal)));
|
||||
} else {
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @truncate(u4, @bitCast(u64, symbol.dylib_ordinal)));
|
||||
}
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_TYPE_IMM | @truncate(u4, macho.BIND_TYPE_POINTER));
|
||||
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM); // TODO Sometimes we might want to add flags.
|
||||
try writer.writeAll(entry.key);
|
||||
try writer.writeByte(0);
|
||||
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, symbol.segment));
|
||||
try leb.writeILEB128(writer, symbol.offset);
|
||||
try writer.writeByte(macho.BIND_OPCODE_DO_BIND);
|
||||
try writer.writeByte(macho.BIND_OPCODE_DONE);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn lazyBindInfoSize(symbols: anytype) !u64 {
|
||||
var stream = std.io.countingWriter(std.io.null_writer);
|
||||
var writer = stream.writer();
|
||||
var size: u64 = 0;
|
||||
|
||||
for (symbols) |entry| {
|
||||
const symbol = entry.value;
|
||||
size += 1;
|
||||
try leb.writeILEB128(writer, symbol.offset);
|
||||
size += 1;
|
||||
if (symbol.dylib_ordinal > 15) {
|
||||
try leb.writeULEB128(writer, @bitCast(u64, symbol.dylib_ordinal));
|
||||
}
|
||||
|
||||
size += 1;
|
||||
size += entry.key.len;
|
||||
size += 1;
|
||||
|
||||
size += 2;
|
||||
}
|
||||
|
||||
size += stream.bytes_written;
|
||||
return size;
|
||||
}
|
||||
|
||||
pub fn writeLazyBindInfo(symbols: anytype, writer: anytype) !void {
|
||||
for (symbols) |entry| {
|
||||
const symbol = entry.value;
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_SEGMENT_AND_OFFSET_ULEB | @truncate(u4, symbol.segment));
|
||||
try leb.writeILEB128(writer, symbol.offset);
|
||||
|
||||
if (symbol.dylib_ordinal > 15) {
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_ULEB);
|
||||
try leb.writeULEB128(writer, @bitCast(u64, symbol.dylib_ordinal));
|
||||
} else if (symbol.dylib_ordinal > 0) {
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_ORDINAL_IMM | @truncate(u4, @bitCast(u64, symbol.dylib_ordinal)));
|
||||
} else {
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_DYLIB_SPECIAL_IMM | @truncate(u4, @bitCast(u64, symbol.dylib_ordinal)));
|
||||
}
|
||||
|
||||
try writer.writeByte(macho.BIND_OPCODE_SET_SYMBOL_TRAILING_FLAGS_IMM); // TODO Sometimes we might want to add flags.
|
||||
try writer.writeAll(entry.key);
|
||||
try writer.writeByte(0);
|
||||
|
||||
try writer.writeByte(macho.BIND_OPCODE_DO_BIND);
|
||||
try writer.writeByte(macho.BIND_OPCODE_DONE);
|
||||
}
|
||||
}
|
||||
71
src/main.zig
71
src/main.zig
@ -2637,6 +2637,50 @@ fn argvCmd(allocator: *Allocator, argv: []const []const u8) ![]u8 {
|
||||
return cmd.toOwnedSlice();
|
||||
}
|
||||
|
||||
fn readSourceFileToEndAlloc(allocator: *mem.Allocator, input: *const fs.File, size_hint: ?usize) ![]const u8 {
|
||||
const source_code = input.readToEndAllocOptions(
|
||||
allocator,
|
||||
max_src_size,
|
||||
size_hint,
|
||||
@alignOf(u16),
|
||||
null,
|
||||
) catch |err| switch (err) {
|
||||
error.ConnectionResetByPeer => unreachable,
|
||||
error.ConnectionTimedOut => unreachable,
|
||||
error.NotOpenForReading => unreachable,
|
||||
else => |e| return e,
|
||||
};
|
||||
errdefer allocator.free(source_code);
|
||||
|
||||
// Detect unsupported file types with their Byte Order Mark
|
||||
const unsupported_boms = [_][]const u8{
|
||||
"\xff\xfe\x00\x00", // UTF-32 little endian
|
||||
"\xfe\xff\x00\x00", // UTF-32 big endian
|
||||
"\xfe\xff", // UTF-16 big endian
|
||||
};
|
||||
for (unsupported_boms) |bom| {
|
||||
if (mem.startsWith(u8, source_code, bom)) {
|
||||
return error.UnsupportedEncoding;
|
||||
}
|
||||
}
|
||||
|
||||
// If the file starts with a UTF-16 little endian BOM, translate it to UTF-8
|
||||
if (mem.startsWith(u8, source_code, "\xff\xfe")) {
|
||||
const source_code_utf16_le = mem.bytesAsSlice(u16, source_code);
|
||||
const source_code_utf8 = std.unicode.utf16leToUtf8Alloc(allocator, source_code_utf16_le) catch |err| switch (err) {
|
||||
error.DanglingSurrogateHalf => error.UnsupportedEncoding,
|
||||
error.ExpectedSecondSurrogateHalf => error.UnsupportedEncoding,
|
||||
error.UnexpectedSecondSurrogateHalf => error.UnsupportedEncoding,
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
allocator.free(source_code);
|
||||
return source_code_utf8;
|
||||
}
|
||||
|
||||
return source_code;
|
||||
}
|
||||
|
||||
pub const usage_fmt =
|
||||
\\Usage: zig fmt [file]...
|
||||
\\
|
||||
@ -2708,9 +2752,10 @@ pub fn cmdFmt(gpa: *Allocator, args: []const []const u8) !void {
|
||||
fatal("cannot use --stdin with positional arguments", .{});
|
||||
}
|
||||
|
||||
const stdin = io.getStdIn().reader();
|
||||
|
||||
const source_code = try stdin.readAllAlloc(gpa, max_src_size);
|
||||
const stdin = io.getStdIn();
|
||||
const source_code = readSourceFileToEndAlloc(gpa, &stdin, null) catch |err| {
|
||||
fatal("unable to read stdin: {s}", .{err});
|
||||
};
|
||||
defer gpa.free(source_code);
|
||||
|
||||
var tree = std.zig.parse(gpa, source_code) catch |err| {
|
||||
@ -2785,6 +2830,7 @@ const FmtError = error{
|
||||
EndOfStream,
|
||||
Unseekable,
|
||||
NotOpenForWriting,
|
||||
UnsupportedEncoding,
|
||||
} || fs.File.OpenError;
|
||||
|
||||
fn fmtPath(fmt: *Fmt, file_path: []const u8, check_mode: bool, dir: fs.Dir, sub_path: []const u8) FmtError!void {
|
||||
@ -2850,21 +2896,15 @@ fn fmtPathFile(
|
||||
if (stat.kind == .Directory)
|
||||
return error.IsDir;
|
||||
|
||||
const source_code = source_file.readToEndAllocOptions(
|
||||
const source_code = try readSourceFileToEndAlloc(
|
||||
fmt.gpa,
|
||||
max_src_size,
|
||||
&source_file,
|
||||
std.math.cast(usize, stat.size) catch return error.FileTooBig,
|
||||
@alignOf(u8),
|
||||
null,
|
||||
) catch |err| switch (err) {
|
||||
error.ConnectionResetByPeer => unreachable,
|
||||
error.ConnectionTimedOut => unreachable,
|
||||
error.NotOpenForReading => unreachable,
|
||||
else => |e| return e,
|
||||
};
|
||||
);
|
||||
defer fmt.gpa.free(source_code);
|
||||
|
||||
source_file.close();
|
||||
file_closed = true;
|
||||
defer fmt.gpa.free(source_code);
|
||||
|
||||
// Add to set after no longer possible to get error.IsDir.
|
||||
if (try fmt.seen.fetchPut(stat.inode, {})) |_| return;
|
||||
@ -3241,7 +3281,8 @@ pub const ClangArgIterator = struct {
|
||||
self.zig_equivalent = clang_arg.zig_equivalent;
|
||||
break :find_clang_arg;
|
||||
},
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
fatal("Unknown Clang option: '{s}'", .{arg});
|
||||
}
|
||||
}
|
||||
|
||||
@ -391,6 +391,8 @@ enum LazyValueId {
|
||||
LazyValueIdAlignOf,
|
||||
LazyValueIdSizeOf,
|
||||
LazyValueIdPtrType,
|
||||
LazyValueIdPtrTypeSimple,
|
||||
LazyValueIdPtrTypeSimpleConst,
|
||||
LazyValueIdOptType,
|
||||
LazyValueIdSliceType,
|
||||
LazyValueIdFnType,
|
||||
@ -467,6 +469,13 @@ struct LazyValuePtrType {
|
||||
bool is_allowzero;
|
||||
};
|
||||
|
||||
struct LazyValuePtrTypeSimple {
|
||||
LazyValue base;
|
||||
|
||||
IrAnalyze *ira;
|
||||
IrInstGen *elem_type;
|
||||
};
|
||||
|
||||
struct LazyValueOptType {
|
||||
LazyValue base;
|
||||
|
||||
@ -2610,7 +2619,8 @@ enum IrInstSrcId {
|
||||
IrInstSrcIdEnumToInt,
|
||||
IrInstSrcIdIntToErr,
|
||||
IrInstSrcIdErrToInt,
|
||||
IrInstSrcIdCheckSwitchProngs,
|
||||
IrInstSrcIdCheckSwitchProngsUnderYes,
|
||||
IrInstSrcIdCheckSwitchProngsUnderNo,
|
||||
IrInstSrcIdCheckStatementIsVoid,
|
||||
IrInstSrcIdTypeName,
|
||||
IrInstSrcIdDeclRef,
|
||||
@ -2624,12 +2634,15 @@ enum IrInstSrcId {
|
||||
IrInstSrcIdHasField,
|
||||
IrInstSrcIdSetEvalBranchQuota,
|
||||
IrInstSrcIdPtrType,
|
||||
IrInstSrcIdPtrTypeSimple,
|
||||
IrInstSrcIdPtrTypeSimpleConst,
|
||||
IrInstSrcIdAlignCast,
|
||||
IrInstSrcIdImplicitCast,
|
||||
IrInstSrcIdResolveResult,
|
||||
IrInstSrcIdResetResult,
|
||||
IrInstSrcIdSetAlignStack,
|
||||
IrInstSrcIdArgType,
|
||||
IrInstSrcIdArgTypeAllowVarFalse,
|
||||
IrInstSrcIdArgTypeAllowVarTrue,
|
||||
IrInstSrcIdExport,
|
||||
IrInstSrcIdExtern,
|
||||
IrInstSrcIdErrorReturnTrace,
|
||||
@ -3294,6 +3307,12 @@ struct IrInstSrcArrayType {
|
||||
IrInstSrc *child_type;
|
||||
};
|
||||
|
||||
struct IrInstSrcPtrTypeSimple {
|
||||
IrInstSrc base;
|
||||
|
||||
IrInstSrc *child_type;
|
||||
};
|
||||
|
||||
struct IrInstSrcPtrType {
|
||||
IrInstSrc base;
|
||||
|
||||
@ -4020,7 +4039,6 @@ struct IrInstSrcCheckSwitchProngs {
|
||||
IrInstSrcCheckSwitchProngsRange *ranges;
|
||||
size_t range_count;
|
||||
AstNode* else_prong;
|
||||
bool have_underscore_prong;
|
||||
};
|
||||
|
||||
struct IrInstSrcCheckStatementIsVoid {
|
||||
@ -4144,7 +4162,6 @@ struct IrInstSrcArgType {
|
||||
|
||||
IrInstSrc *fn_type;
|
||||
IrInstSrc *arg_index;
|
||||
bool allow_var;
|
||||
};
|
||||
|
||||
struct IrInstSrcExport {
|
||||
|
||||
@ -1237,6 +1237,22 @@ Error type_val_resolve_zero_bits(CodeGen *g, ZigValue *type_val, ZigType *parent
|
||||
parent_type_val, is_zero_bits);
|
||||
}
|
||||
}
|
||||
case LazyValueIdPtrTypeSimple:
|
||||
case LazyValueIdPtrTypeSimpleConst: {
|
||||
LazyValuePtrTypeSimple *lazy_ptr_type = reinterpret_cast<LazyValuePtrTypeSimple *>(type_val->data.x_lazy);
|
||||
|
||||
if (parent_type_val == lazy_ptr_type->elem_type->value) {
|
||||
// Does a struct which contains a pointer field to itself have bits? Yes.
|
||||
*is_zero_bits = false;
|
||||
return ErrorNone;
|
||||
} else {
|
||||
if (parent_type_val == nullptr) {
|
||||
parent_type_val = type_val;
|
||||
}
|
||||
return type_val_resolve_zero_bits(g, lazy_ptr_type->elem_type->value, parent_type,
|
||||
parent_type_val, is_zero_bits);
|
||||
}
|
||||
}
|
||||
case LazyValueIdArrayType: {
|
||||
LazyValueArrayType *lazy_array_type =
|
||||
reinterpret_cast<LazyValueArrayType *>(type_val->data.x_lazy);
|
||||
@ -1285,6 +1301,8 @@ Error type_val_resolve_is_opaque_type(CodeGen *g, ZigValue *type_val, bool *is_o
|
||||
zig_unreachable();
|
||||
case LazyValueIdSliceType:
|
||||
case LazyValueIdPtrType:
|
||||
case LazyValueIdPtrTypeSimple:
|
||||
case LazyValueIdPtrTypeSimpleConst:
|
||||
case LazyValueIdFnType:
|
||||
case LazyValueIdOptType:
|
||||
case LazyValueIdErrUnionType:
|
||||
@ -1313,6 +1331,11 @@ static ReqCompTime type_val_resolve_requires_comptime(CodeGen *g, ZigValue *type
|
||||
LazyValuePtrType *lazy_ptr_type = reinterpret_cast<LazyValuePtrType *>(type_val->data.x_lazy);
|
||||
return type_val_resolve_requires_comptime(g, lazy_ptr_type->elem_type->value);
|
||||
}
|
||||
case LazyValueIdPtrTypeSimple:
|
||||
case LazyValueIdPtrTypeSimpleConst: {
|
||||
LazyValuePtrTypeSimple *lazy_ptr_type = reinterpret_cast<LazyValuePtrTypeSimple *>(type_val->data.x_lazy);
|
||||
return type_val_resolve_requires_comptime(g, lazy_ptr_type->elem_type->value);
|
||||
}
|
||||
case LazyValueIdOptType: {
|
||||
LazyValueOptType *lazy_opt_type = reinterpret_cast<LazyValueOptType *>(type_val->data.x_lazy);
|
||||
return type_val_resolve_requires_comptime(g, lazy_opt_type->payload_type->value);
|
||||
@ -1413,6 +1436,24 @@ start_over:
|
||||
}
|
||||
return ErrorNone;
|
||||
}
|
||||
case LazyValueIdPtrTypeSimple:
|
||||
case LazyValueIdPtrTypeSimpleConst: {
|
||||
LazyValuePtrTypeSimple *lazy_ptr_type = reinterpret_cast<LazyValuePtrTypeSimple *>(type_val->data.x_lazy);
|
||||
bool is_zero_bits;
|
||||
if ((err = type_val_resolve_zero_bits(g, lazy_ptr_type->elem_type->value, nullptr,
|
||||
nullptr, &is_zero_bits)))
|
||||
{
|
||||
return err;
|
||||
}
|
||||
if (is_zero_bits) {
|
||||
*abi_size = 0;
|
||||
*size_in_bits = 0;
|
||||
} else {
|
||||
*abi_size = g->builtin_types.entry_usize->abi_size;
|
||||
*size_in_bits = g->builtin_types.entry_usize->size_in_bits;
|
||||
}
|
||||
return ErrorNone;
|
||||
}
|
||||
case LazyValueIdFnType:
|
||||
*abi_size = g->builtin_types.entry_usize->abi_size;
|
||||
*size_in_bits = g->builtin_types.entry_usize->size_in_bits;
|
||||
@ -1449,6 +1490,8 @@ Error type_val_resolve_abi_align(CodeGen *g, AstNode *source_node, ZigValue *typ
|
||||
zig_unreachable();
|
||||
case LazyValueIdSliceType:
|
||||
case LazyValueIdPtrType:
|
||||
case LazyValueIdPtrTypeSimple:
|
||||
case LazyValueIdPtrTypeSimpleConst:
|
||||
case LazyValueIdFnType:
|
||||
*abi_align = g->builtin_types.entry_usize->abi_align;
|
||||
return ErrorNone;
|
||||
@ -1506,7 +1549,9 @@ static OnePossibleValue type_val_resolve_has_one_possible_value(CodeGen *g, ZigV
|
||||
return OnePossibleValueYes;
|
||||
return type_val_resolve_has_one_possible_value(g, lazy_array_type->elem_type->value);
|
||||
}
|
||||
case LazyValueIdPtrType: {
|
||||
case LazyValueIdPtrType:
|
||||
case LazyValueIdPtrTypeSimple:
|
||||
case LazyValueIdPtrTypeSimpleConst: {
|
||||
Error err;
|
||||
bool zero_bits;
|
||||
if ((err = type_val_resolve_zero_bits(g, type_val, nullptr, nullptr, &zero_bits))) {
|
||||
@ -5758,6 +5803,8 @@ static bool can_mutate_comptime_var_state(ZigValue *value) {
|
||||
case LazyValueIdAlignOf:
|
||||
case LazyValueIdSizeOf:
|
||||
case LazyValueIdPtrType:
|
||||
case LazyValueIdPtrTypeSimple:
|
||||
case LazyValueIdPtrTypeSimpleConst:
|
||||
case LazyValueIdOptType:
|
||||
case LazyValueIdSliceType:
|
||||
case LazyValueIdFnType:
|
||||
|
||||
@ -476,7 +476,8 @@ static void destroy_instruction_src(IrInstSrc *inst) {
|
||||
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcIntToErr *>(inst));
|
||||
case IrInstSrcIdErrToInt:
|
||||
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcErrToInt *>(inst));
|
||||
case IrInstSrcIdCheckSwitchProngs:
|
||||
case IrInstSrcIdCheckSwitchProngsUnderNo:
|
||||
case IrInstSrcIdCheckSwitchProngsUnderYes:
|
||||
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCheckSwitchProngs *>(inst));
|
||||
case IrInstSrcIdCheckStatementIsVoid:
|
||||
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcCheckStatementIsVoid *>(inst));
|
||||
@ -486,6 +487,9 @@ static void destroy_instruction_src(IrInstSrc *inst) {
|
||||
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcTagName *>(inst));
|
||||
case IrInstSrcIdPtrType:
|
||||
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcPtrType *>(inst));
|
||||
case IrInstSrcIdPtrTypeSimple:
|
||||
case IrInstSrcIdPtrTypeSimpleConst:
|
||||
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcPtrTypeSimple *>(inst));
|
||||
case IrInstSrcIdDeclRef:
|
||||
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcDeclRef *>(inst));
|
||||
case IrInstSrcIdPanic:
|
||||
@ -514,7 +518,8 @@ static void destroy_instruction_src(IrInstSrc *inst) {
|
||||
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcResetResult *>(inst));
|
||||
case IrInstSrcIdSetAlignStack:
|
||||
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcSetAlignStack *>(inst));
|
||||
case IrInstSrcIdArgType:
|
||||
case IrInstSrcIdArgTypeAllowVarFalse:
|
||||
case IrInstSrcIdArgTypeAllowVarTrue:
|
||||
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcArgType *>(inst));
|
||||
case IrInstSrcIdExport:
|
||||
return heap::c_allocator.destroy(reinterpret_cast<IrInstSrcExport *>(inst));
|
||||
@ -1470,10 +1475,6 @@ static constexpr IrInstSrcId ir_inst_id(IrInstSrcErrToInt *) {
|
||||
return IrInstSrcIdErrToInt;
|
||||
}
|
||||
|
||||
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCheckSwitchProngs *) {
|
||||
return IrInstSrcIdCheckSwitchProngs;
|
||||
}
|
||||
|
||||
static constexpr IrInstSrcId ir_inst_id(IrInstSrcCheckStatementIsVoid *) {
|
||||
return IrInstSrcIdCheckStatementIsVoid;
|
||||
}
|
||||
@ -1546,10 +1547,6 @@ static constexpr IrInstSrcId ir_inst_id(IrInstSrcSetAlignStack *) {
|
||||
return IrInstSrcIdSetAlignStack;
|
||||
}
|
||||
|
||||
static constexpr IrInstSrcId ir_inst_id(IrInstSrcArgType *) {
|
||||
return IrInstSrcIdArgType;
|
||||
}
|
||||
|
||||
static constexpr IrInstSrcId ir_inst_id(IrInstSrcExport *) {
|
||||
return IrInstSrcIdExport;
|
||||
}
|
||||
@ -2615,11 +2612,35 @@ static IrInstGen *ir_build_br_gen(IrAnalyze *ira, IrInst *source_instr, IrBasicB
|
||||
return &inst->base;
|
||||
}
|
||||
|
||||
static IrInstSrc *ir_build_ptr_type_simple(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
||||
IrInstSrc *child_type, bool is_const)
|
||||
{
|
||||
IrInstSrcPtrTypeSimple *inst = heap::c_allocator.create<IrInstSrcPtrTypeSimple>();
|
||||
inst->base.id = is_const ? IrInstSrcIdPtrTypeSimpleConst : IrInstSrcIdPtrTypeSimple;
|
||||
inst->base.base.scope = scope;
|
||||
inst->base.base.source_node = source_node;
|
||||
inst->base.base.debug_id = exec_next_debug_id(irb->exec);
|
||||
inst->base.owner_bb = irb->current_basic_block;
|
||||
ir_instruction_append(irb->current_basic_block, &inst->base);
|
||||
|
||||
inst->child_type = child_type;
|
||||
|
||||
ir_ref_instruction(child_type, irb->current_basic_block);
|
||||
|
||||
return &inst->base;
|
||||
}
|
||||
|
||||
static IrInstSrc *ir_build_ptr_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
||||
IrInstSrc *child_type, bool is_const, bool is_volatile, PtrLen ptr_len,
|
||||
IrInstSrc *sentinel, IrInstSrc *align_value,
|
||||
uint32_t bit_offset_start, uint32_t host_int_bytes, bool is_allow_zero)
|
||||
{
|
||||
if (!is_volatile && ptr_len == PtrLenSingle && sentinel == nullptr && align_value == nullptr &&
|
||||
bit_offset_start == 0 && host_int_bytes == 0 && is_allow_zero == 0)
|
||||
{
|
||||
return ir_build_ptr_type_simple(irb, scope, source_node, child_type, is_const);
|
||||
}
|
||||
|
||||
IrInstSrcPtrType *inst = ir_build_instruction<IrInstSrcPtrType>(irb, scope, source_node);
|
||||
inst->sentinel = sentinel;
|
||||
inst->align_value = align_value;
|
||||
@ -4354,13 +4375,19 @@ static IrInstSrc *ir_build_check_switch_prongs(IrBuilderSrc *irb, Scope *scope,
|
||||
IrInstSrc *target_value, IrInstSrcCheckSwitchProngsRange *ranges, size_t range_count,
|
||||
AstNode* else_prong, bool have_underscore_prong)
|
||||
{
|
||||
IrInstSrcCheckSwitchProngs *instruction = ir_build_instruction<IrInstSrcCheckSwitchProngs>(
|
||||
irb, scope, source_node);
|
||||
IrInstSrcCheckSwitchProngs *instruction = heap::c_allocator.create<IrInstSrcCheckSwitchProngs>();
|
||||
instruction->base.id = have_underscore_prong ?
|
||||
IrInstSrcIdCheckSwitchProngsUnderYes : IrInstSrcIdCheckSwitchProngsUnderNo;
|
||||
instruction->base.base.scope = scope;
|
||||
instruction->base.base.source_node = source_node;
|
||||
instruction->base.base.debug_id = exec_next_debug_id(irb->exec);
|
||||
instruction->base.owner_bb = irb->current_basic_block;
|
||||
ir_instruction_append(irb->current_basic_block, &instruction->base);
|
||||
|
||||
instruction->target_value = target_value;
|
||||
instruction->ranges = ranges;
|
||||
instruction->range_count = range_count;
|
||||
instruction->else_prong = else_prong;
|
||||
instruction->have_underscore_prong = have_underscore_prong;
|
||||
|
||||
ir_ref_instruction(target_value, irb->current_basic_block);
|
||||
for (size_t i = 0; i < range_count; i += 1) {
|
||||
@ -4590,10 +4617,17 @@ static IrInstSrc *ir_build_set_align_stack(IrBuilderSrc *irb, Scope *scope, AstN
|
||||
static IrInstSrc *ir_build_arg_type(IrBuilderSrc *irb, Scope *scope, AstNode *source_node,
|
||||
IrInstSrc *fn_type, IrInstSrc *arg_index, bool allow_var)
|
||||
{
|
||||
IrInstSrcArgType *instruction = ir_build_instruction<IrInstSrcArgType>(irb, scope, source_node);
|
||||
IrInstSrcArgType *instruction = heap::c_allocator.create<IrInstSrcArgType>();
|
||||
instruction->base.id = allow_var ?
|
||||
IrInstSrcIdArgTypeAllowVarTrue : IrInstSrcIdArgTypeAllowVarFalse;
|
||||
instruction->base.base.scope = scope;
|
||||
instruction->base.base.source_node = source_node;
|
||||
instruction->base.base.debug_id = exec_next_debug_id(irb->exec);
|
||||
instruction->base.owner_bb = irb->current_basic_block;
|
||||
ir_instruction_append(irb->current_basic_block, &instruction->base);
|
||||
|
||||
instruction->fn_type = fn_type;
|
||||
instruction->arg_index = arg_index;
|
||||
instruction->allow_var = allow_var;
|
||||
|
||||
ir_ref_instruction(fn_type, irb->current_basic_block);
|
||||
ir_ref_instruction(arg_index, irb->current_basic_block);
|
||||
@ -29702,7 +29736,7 @@ static IrInstGen *ir_analyze_instruction_test_comptime(IrAnalyze *ira, IrInstSrc
|
||||
}
|
||||
|
||||
static IrInstGen *ir_analyze_instruction_check_switch_prongs(IrAnalyze *ira,
|
||||
IrInstSrcCheckSwitchProngs *instruction)
|
||||
IrInstSrcCheckSwitchProngs *instruction, bool have_underscore_prong)
|
||||
{
|
||||
IrInstGen *target_value = instruction->target_value->child;
|
||||
ZigType *switch_type = target_value->value->type;
|
||||
@ -29767,7 +29801,7 @@ static IrInstGen *ir_analyze_instruction_check_switch_prongs(IrAnalyze *ira,
|
||||
bigint_incr(&field_index);
|
||||
}
|
||||
}
|
||||
if (instruction->have_underscore_prong) {
|
||||
if (have_underscore_prong) {
|
||||
if (!switch_type->data.enumeration.non_exhaustive) {
|
||||
ir_add_error(ira, &instruction->base.base,
|
||||
buf_sprintf("switch on exhaustive enum has `_` prong"));
|
||||
@ -30871,6 +30905,24 @@ static IrInstGen *ir_analyze_instruction_ptr_to_int(IrAnalyze *ira, IrInstSrcPtr
|
||||
return ir_build_ptr_to_int_gen(ira, &instruction->base.base, target);
|
||||
}
|
||||
|
||||
static IrInstGen *ir_analyze_instruction_ptr_type_simple(IrAnalyze *ira,
|
||||
IrInstSrcPtrTypeSimple *instruction, bool is_const)
|
||||
{
|
||||
IrInstGen *result = ir_const(ira, &instruction->base.base, ira->codegen->builtin_types.entry_type);
|
||||
result->value->special = ConstValSpecialLazy;
|
||||
|
||||
LazyValuePtrTypeSimple *lazy_ptr_type = heap::c_allocator.create<LazyValuePtrTypeSimple>();
|
||||
lazy_ptr_type->ira = ira; ira_ref(ira);
|
||||
result->value->data.x_lazy = &lazy_ptr_type->base;
|
||||
lazy_ptr_type->base.id = is_const ? LazyValueIdPtrTypeSimpleConst : LazyValueIdPtrTypeSimple;
|
||||
|
||||
lazy_ptr_type->elem_type = instruction->child_type->child;
|
||||
if (ir_resolve_type_lazy(ira, lazy_ptr_type->elem_type) == nullptr)
|
||||
return ira->codegen->invalid_inst_gen;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static IrInstGen *ir_analyze_instruction_ptr_type(IrAnalyze *ira, IrInstSrcPtrType *instruction) {
|
||||
IrInstGen *result = ir_const(ira, &instruction->base.base, ira->codegen->builtin_types.entry_type);
|
||||
result->value->special = ConstValSpecialLazy;
|
||||
@ -30976,7 +31028,9 @@ static IrInstGen *ir_analyze_instruction_set_align_stack(IrAnalyze *ira, IrInstS
|
||||
return ir_const_void(ira, &instruction->base.base);
|
||||
}
|
||||
|
||||
static IrInstGen *ir_analyze_instruction_arg_type(IrAnalyze *ira, IrInstSrcArgType *instruction) {
|
||||
static IrInstGen *ir_analyze_instruction_arg_type(IrAnalyze *ira, IrInstSrcArgType *instruction,
|
||||
bool allow_var)
|
||||
{
|
||||
IrInstGen *fn_type_inst = instruction->fn_type->child;
|
||||
ZigType *fn_type = ir_resolve_type(ira, fn_type_inst);
|
||||
if (type_is_invalid(fn_type))
|
||||
@ -30998,7 +31052,7 @@ static IrInstGen *ir_analyze_instruction_arg_type(IrAnalyze *ira, IrInstSrcArgTy
|
||||
|
||||
FnTypeId *fn_type_id = &fn_type->data.fn.fn_type_id;
|
||||
if (arg_index >= fn_type_id->param_count) {
|
||||
if (instruction->allow_var) {
|
||||
if (allow_var) {
|
||||
// TODO remove this with var args
|
||||
return ir_const_type(ira, &instruction->base.base, ira->codegen->builtin_types.entry_anytype);
|
||||
}
|
||||
@ -31013,7 +31067,7 @@ static IrInstGen *ir_analyze_instruction_arg_type(IrAnalyze *ira, IrInstSrcArgTy
|
||||
// Args are only unresolved if our function is generic.
|
||||
ir_assert(fn_type->data.fn.is_generic, &instruction->base.base);
|
||||
|
||||
if (instruction->allow_var) {
|
||||
if (allow_var) {
|
||||
return ir_const_type(ira, &instruction->base.base, ira->codegen->builtin_types.entry_anytype);
|
||||
} else {
|
||||
ir_add_error(ira, &arg_index_inst->base,
|
||||
@ -32341,8 +32395,10 @@ static IrInstGen *ir_analyze_instruction_base(IrAnalyze *ira, IrInstSrc *instruc
|
||||
return ir_analyze_instruction_fn_proto(ira, (IrInstSrcFnProto *)instruction);
|
||||
case IrInstSrcIdTestComptime:
|
||||
return ir_analyze_instruction_test_comptime(ira, (IrInstSrcTestComptime *)instruction);
|
||||
case IrInstSrcIdCheckSwitchProngs:
|
||||
return ir_analyze_instruction_check_switch_prongs(ira, (IrInstSrcCheckSwitchProngs *)instruction);
|
||||
case IrInstSrcIdCheckSwitchProngsUnderNo:
|
||||
return ir_analyze_instruction_check_switch_prongs(ira, (IrInstSrcCheckSwitchProngs *)instruction, false);
|
||||
case IrInstSrcIdCheckSwitchProngsUnderYes:
|
||||
return ir_analyze_instruction_check_switch_prongs(ira, (IrInstSrcCheckSwitchProngs *)instruction, true);
|
||||
case IrInstSrcIdCheckStatementIsVoid:
|
||||
return ir_analyze_instruction_check_statement_is_void(ira, (IrInstSrcCheckStatementIsVoid *)instruction);
|
||||
case IrInstSrcIdDeclRef:
|
||||
@ -32373,6 +32429,10 @@ static IrInstGen *ir_analyze_instruction_base(IrAnalyze *ira, IrInstSrc *instruc
|
||||
return ir_analyze_instruction_set_eval_branch_quota(ira, (IrInstSrcSetEvalBranchQuota *)instruction);
|
||||
case IrInstSrcIdPtrType:
|
||||
return ir_analyze_instruction_ptr_type(ira, (IrInstSrcPtrType *)instruction);
|
||||
case IrInstSrcIdPtrTypeSimple:
|
||||
return ir_analyze_instruction_ptr_type_simple(ira, (IrInstSrcPtrTypeSimple *)instruction, false);
|
||||
case IrInstSrcIdPtrTypeSimpleConst:
|
||||
return ir_analyze_instruction_ptr_type_simple(ira, (IrInstSrcPtrTypeSimple *)instruction, true);
|
||||
case IrInstSrcIdAlignCast:
|
||||
return ir_analyze_instruction_align_cast(ira, (IrInstSrcAlignCast *)instruction);
|
||||
case IrInstSrcIdImplicitCast:
|
||||
@ -32383,8 +32443,10 @@ static IrInstGen *ir_analyze_instruction_base(IrAnalyze *ira, IrInstSrc *instruc
|
||||
return ir_analyze_instruction_reset_result(ira, (IrInstSrcResetResult *)instruction);
|
||||
case IrInstSrcIdSetAlignStack:
|
||||
return ir_analyze_instruction_set_align_stack(ira, (IrInstSrcSetAlignStack *)instruction);
|
||||
case IrInstSrcIdArgType:
|
||||
return ir_analyze_instruction_arg_type(ira, (IrInstSrcArgType *)instruction);
|
||||
case IrInstSrcIdArgTypeAllowVarFalse:
|
||||
return ir_analyze_instruction_arg_type(ira, (IrInstSrcArgType *)instruction, false);
|
||||
case IrInstSrcIdArgTypeAllowVarTrue:
|
||||
return ir_analyze_instruction_arg_type(ira, (IrInstSrcArgType *)instruction, true);
|
||||
case IrInstSrcIdExport:
|
||||
return ir_analyze_instruction_export(ira, (IrInstSrcExport *)instruction);
|
||||
case IrInstSrcIdExtern:
|
||||
@ -32737,12 +32799,15 @@ bool ir_inst_src_has_side_effects(IrInstSrc *instruction) {
|
||||
case IrInstSrcIdMemcpy:
|
||||
case IrInstSrcIdBreakpoint:
|
||||
case IrInstSrcIdOverflowOp: // TODO when we support multiple returns this can be side effect free
|
||||
case IrInstSrcIdCheckSwitchProngs:
|
||||
case IrInstSrcIdCheckSwitchProngsUnderNo:
|
||||
case IrInstSrcIdCheckSwitchProngsUnderYes:
|
||||
case IrInstSrcIdCheckStatementIsVoid:
|
||||
case IrInstSrcIdCheckRuntimeScope:
|
||||
case IrInstSrcIdPanic:
|
||||
case IrInstSrcIdSetEvalBranchQuota:
|
||||
case IrInstSrcIdPtrType:
|
||||
case IrInstSrcIdPtrTypeSimple:
|
||||
case IrInstSrcIdPtrTypeSimpleConst:
|
||||
case IrInstSrcIdSetAlignStack:
|
||||
case IrInstSrcIdExport:
|
||||
case IrInstSrcIdExtern:
|
||||
@ -32826,7 +32891,8 @@ bool ir_inst_src_has_side_effects(IrInstSrc *instruction) {
|
||||
case IrInstSrcIdAlignCast:
|
||||
case IrInstSrcIdImplicitCast:
|
||||
case IrInstSrcIdResolveResult:
|
||||
case IrInstSrcIdArgType:
|
||||
case IrInstSrcIdArgTypeAllowVarFalse:
|
||||
case IrInstSrcIdArgTypeAllowVarTrue:
|
||||
case IrInstSrcIdErrorReturnTrace:
|
||||
case IrInstSrcIdErrorUnion:
|
||||
case IrInstSrcIdFloatOp:
|
||||
@ -33249,6 +33315,54 @@ static Error ir_resolve_lazy_raw(AstNode *source_node, ZigValue *val) {
|
||||
// We can't free the lazy value here, because multiple other ZigValues might be pointing to it.
|
||||
return ErrorNone;
|
||||
}
|
||||
case LazyValueIdPtrTypeSimple: {
|
||||
LazyValuePtrTypeSimple *lazy_ptr_type = reinterpret_cast<LazyValuePtrTypeSimple *>(val->data.x_lazy);
|
||||
IrAnalyze *ira = lazy_ptr_type->ira;
|
||||
|
||||
ZigType *elem_type = ir_resolve_type(ira, lazy_ptr_type->elem_type);
|
||||
if (type_is_invalid(elem_type))
|
||||
return ErrorSemanticAnalyzeFail;
|
||||
|
||||
if (elem_type->id == ZigTypeIdUnreachable) {
|
||||
ir_add_error(ira, &lazy_ptr_type->elem_type->base,
|
||||
buf_create_from_str("pointer to noreturn not allowed"));
|
||||
return ErrorSemanticAnalyzeFail;
|
||||
}
|
||||
|
||||
assert(val->type->id == ZigTypeIdMetaType);
|
||||
val->data.x_type = get_pointer_to_type_extra2(ira->codegen, elem_type,
|
||||
false, false, PtrLenSingle, 0,
|
||||
0, 0,
|
||||
false, VECTOR_INDEX_NONE, nullptr, nullptr);
|
||||
val->special = ConstValSpecialStatic;
|
||||
|
||||
// We can't free the lazy value here, because multiple other ZigValues might be pointing to it.
|
||||
return ErrorNone;
|
||||
}
|
||||
case LazyValueIdPtrTypeSimpleConst: {
|
||||
LazyValuePtrTypeSimple *lazy_ptr_type = reinterpret_cast<LazyValuePtrTypeSimple *>(val->data.x_lazy);
|
||||
IrAnalyze *ira = lazy_ptr_type->ira;
|
||||
|
||||
ZigType *elem_type = ir_resolve_type(ira, lazy_ptr_type->elem_type);
|
||||
if (type_is_invalid(elem_type))
|
||||
return ErrorSemanticAnalyzeFail;
|
||||
|
||||
if (elem_type->id == ZigTypeIdUnreachable) {
|
||||
ir_add_error(ira, &lazy_ptr_type->elem_type->base,
|
||||
buf_create_from_str("pointer to noreturn not allowed"));
|
||||
return ErrorSemanticAnalyzeFail;
|
||||
}
|
||||
|
||||
assert(val->type->id == ZigTypeIdMetaType);
|
||||
val->data.x_type = get_pointer_to_type_extra2(ira->codegen, elem_type,
|
||||
true, false, PtrLenSingle, 0,
|
||||
0, 0,
|
||||
false, VECTOR_INDEX_NONE, nullptr, nullptr);
|
||||
val->special = ConstValSpecialStatic;
|
||||
|
||||
// We can't free the lazy value here, because multiple other ZigValues might be pointing to it.
|
||||
return ErrorNone;
|
||||
}
|
||||
case LazyValueIdArrayType: {
|
||||
LazyValueArrayType *lazy_array_type = reinterpret_cast<LazyValueArrayType *>(val->data.x_lazy);
|
||||
IrAnalyze *ira = lazy_array_type->ira;
|
||||
|
||||
@ -270,8 +270,10 @@ const char* ir_inst_src_type_str(IrInstSrcId id) {
|
||||
return "SrcIntToErr";
|
||||
case IrInstSrcIdErrToInt:
|
||||
return "SrcErrToInt";
|
||||
case IrInstSrcIdCheckSwitchProngs:
|
||||
return "SrcCheckSwitchProngs";
|
||||
case IrInstSrcIdCheckSwitchProngsUnderNo:
|
||||
return "SrcCheckSwitchProngsUnderNo";
|
||||
case IrInstSrcIdCheckSwitchProngsUnderYes:
|
||||
return "SrcCheckSwitchProngsUnderYes";
|
||||
case IrInstSrcIdCheckStatementIsVoid:
|
||||
return "SrcCheckStatementIsVoid";
|
||||
case IrInstSrcIdTypeName:
|
||||
@ -298,6 +300,10 @@ const char* ir_inst_src_type_str(IrInstSrcId id) {
|
||||
return "SrcSetEvalBranchQuota";
|
||||
case IrInstSrcIdPtrType:
|
||||
return "SrcPtrType";
|
||||
case IrInstSrcIdPtrTypeSimple:
|
||||
return "SrcPtrTypeSimple";
|
||||
case IrInstSrcIdPtrTypeSimpleConst:
|
||||
return "SrcPtrTypeSimpleConst";
|
||||
case IrInstSrcIdAlignCast:
|
||||
return "SrcAlignCast";
|
||||
case IrInstSrcIdImplicitCast:
|
||||
@ -308,8 +314,10 @@ const char* ir_inst_src_type_str(IrInstSrcId id) {
|
||||
return "SrcResetResult";
|
||||
case IrInstSrcIdSetAlignStack:
|
||||
return "SrcSetAlignStack";
|
||||
case IrInstSrcIdArgType:
|
||||
return "SrcArgType";
|
||||
case IrInstSrcIdArgTypeAllowVarFalse:
|
||||
return "SrcArgTypeAllowVarFalse";
|
||||
case IrInstSrcIdArgTypeAllowVarTrue:
|
||||
return "SrcArgTypeAllowVarTrue";
|
||||
case IrInstSrcIdExport:
|
||||
return "SrcExport";
|
||||
case IrInstSrcIdExtern:
|
||||
@ -2187,7 +2195,9 @@ static void ir_print_err_to_int(IrPrintGen *irp, IrInstGenErrToInt *instruction)
|
||||
ir_print_other_inst_gen(irp, instruction->target);
|
||||
}
|
||||
|
||||
static void ir_print_check_switch_prongs(IrPrintSrc *irp, IrInstSrcCheckSwitchProngs *instruction) {
|
||||
static void ir_print_check_switch_prongs(IrPrintSrc *irp, IrInstSrcCheckSwitchProngs *instruction,
|
||||
bool have_underscore_prong)
|
||||
{
|
||||
fprintf(irp->f, "@checkSwitchProngs(");
|
||||
ir_print_other_inst_src(irp, instruction->target_value);
|
||||
fprintf(irp->f, ",");
|
||||
@ -2200,6 +2210,8 @@ static void ir_print_check_switch_prongs(IrPrintSrc *irp, IrInstSrcCheckSwitchPr
|
||||
}
|
||||
const char *have_else_str = instruction->else_prong != nullptr ? "yes" : "no";
|
||||
fprintf(irp->f, ")else:%s", have_else_str);
|
||||
const char *have_under_str = have_underscore_prong ? "yes" : "no";
|
||||
fprintf(irp->f, " _:%s", have_under_str);
|
||||
}
|
||||
|
||||
static void ir_print_check_statement_is_void(IrPrintSrc *irp, IrInstSrcCheckStatementIsVoid *instruction) {
|
||||
@ -2237,6 +2249,15 @@ static void ir_print_ptr_type(IrPrintSrc *irp, IrInstSrcPtrType *instruction) {
|
||||
ir_print_other_inst_src(irp, instruction->child_type);
|
||||
}
|
||||
|
||||
static void ir_print_ptr_type_simple(IrPrintSrc *irp, IrInstSrcPtrTypeSimple *instruction,
|
||||
bool is_const)
|
||||
{
|
||||
fprintf(irp->f, "&");
|
||||
const char *const_str = is_const ? "const " : "";
|
||||
fprintf(irp->f, "*%s", const_str);
|
||||
ir_print_other_inst_src(irp, instruction->child_type);
|
||||
}
|
||||
|
||||
static void ir_print_decl_ref(IrPrintSrc *irp, IrInstSrcDeclRef *instruction) {
|
||||
const char *ptr_str = (instruction->lval != LValNone) ? "ptr " : "";
|
||||
fprintf(irp->f, "declref %s%s", ptr_str, buf_ptr(instruction->tld->name));
|
||||
@ -2344,11 +2365,17 @@ static void ir_print_set_align_stack(IrPrintSrc *irp, IrInstSrcSetAlignStack *in
|
||||
fprintf(irp->f, ")");
|
||||
}
|
||||
|
||||
static void ir_print_arg_type(IrPrintSrc *irp, IrInstSrcArgType *instruction) {
|
||||
static void ir_print_arg_type(IrPrintSrc *irp, IrInstSrcArgType *instruction, bool allow_var) {
|
||||
fprintf(irp->f, "@ArgType(");
|
||||
ir_print_other_inst_src(irp, instruction->fn_type);
|
||||
fprintf(irp->f, ",");
|
||||
ir_print_other_inst_src(irp, instruction->arg_index);
|
||||
fprintf(irp->f, ",");
|
||||
if (allow_var) {
|
||||
fprintf(irp->f, "allow_var=true");
|
||||
} else {
|
||||
fprintf(irp->f, "allow_var=false");
|
||||
}
|
||||
fprintf(irp->f, ")");
|
||||
}
|
||||
|
||||
@ -2885,8 +2912,11 @@ static void ir_print_inst_src(IrPrintSrc *irp, IrInstSrc *instruction, bool trai
|
||||
case IrInstSrcIdErrToInt:
|
||||
ir_print_err_to_int(irp, (IrInstSrcErrToInt *)instruction);
|
||||
break;
|
||||
case IrInstSrcIdCheckSwitchProngs:
|
||||
ir_print_check_switch_prongs(irp, (IrInstSrcCheckSwitchProngs *)instruction);
|
||||
case IrInstSrcIdCheckSwitchProngsUnderNo:
|
||||
ir_print_check_switch_prongs(irp, (IrInstSrcCheckSwitchProngs *)instruction, false);
|
||||
break;
|
||||
case IrInstSrcIdCheckSwitchProngsUnderYes:
|
||||
ir_print_check_switch_prongs(irp, (IrInstSrcCheckSwitchProngs *)instruction, true);
|
||||
break;
|
||||
case IrInstSrcIdCheckStatementIsVoid:
|
||||
ir_print_check_statement_is_void(irp, (IrInstSrcCheckStatementIsVoid *)instruction);
|
||||
@ -2900,6 +2930,12 @@ static void ir_print_inst_src(IrPrintSrc *irp, IrInstSrc *instruction, bool trai
|
||||
case IrInstSrcIdPtrType:
|
||||
ir_print_ptr_type(irp, (IrInstSrcPtrType *)instruction);
|
||||
break;
|
||||
case IrInstSrcIdPtrTypeSimple:
|
||||
ir_print_ptr_type_simple(irp, (IrInstSrcPtrTypeSimple *)instruction, false);
|
||||
break;
|
||||
case IrInstSrcIdPtrTypeSimpleConst:
|
||||
ir_print_ptr_type_simple(irp, (IrInstSrcPtrTypeSimple *)instruction, true);
|
||||
break;
|
||||
case IrInstSrcIdDeclRef:
|
||||
ir_print_decl_ref(irp, (IrInstSrcDeclRef *)instruction);
|
||||
break;
|
||||
@ -2942,8 +2978,11 @@ static void ir_print_inst_src(IrPrintSrc *irp, IrInstSrc *instruction, bool trai
|
||||
case IrInstSrcIdSetAlignStack:
|
||||
ir_print_set_align_stack(irp, (IrInstSrcSetAlignStack *)instruction);
|
||||
break;
|
||||
case IrInstSrcIdArgType:
|
||||
ir_print_arg_type(irp, (IrInstSrcArgType *)instruction);
|
||||
case IrInstSrcIdArgTypeAllowVarFalse:
|
||||
ir_print_arg_type(irp, (IrInstSrcArgType *)instruction, false);
|
||||
break;
|
||||
case IrInstSrcIdArgTypeAllowVarTrue:
|
||||
ir_print_arg_type(irp, (IrInstSrcArgType *)instruction, true);
|
||||
break;
|
||||
case IrInstSrcIdExport:
|
||||
ir_print_export(irp, (IrInstSrcExport *)instruction);
|
||||
|
||||
@ -11,6 +11,7 @@ const math = std.math;
|
||||
const ast = @import("translate_c/ast.zig");
|
||||
const Node = ast.Node;
|
||||
const Tag = Node.Tag;
|
||||
const c_builtins = std.c.builtins;
|
||||
|
||||
const CallingConvention = std.builtin.CallingConvention;
|
||||
|
||||
@ -635,7 +636,7 @@ fn visitVarDecl(c: *Context, var_decl: *const clang.VarDecl, mangled_name: ?[]co
|
||||
if (has_init) trans_init: {
|
||||
if (decl_init) |expr| {
|
||||
const node_or_error = if (expr.getStmtClass() == .StringLiteralClass)
|
||||
transStringLiteralAsArray(c, scope, @ptrCast(*const clang.StringLiteral, expr), zigArraySize(c, type_node) catch 0)
|
||||
transStringLiteralInitializer(c, scope, @ptrCast(*const clang.StringLiteral, expr), type_node)
|
||||
else
|
||||
transExprCoercing(c, scope, expr, .used);
|
||||
init_node = node_or_error catch |err| switch (err) {
|
||||
@ -1058,6 +1059,10 @@ fn transStmt(
|
||||
const compound_literal = @ptrCast(*const clang.CompoundLiteralExpr, stmt);
|
||||
return transExpr(c, scope, compound_literal.getInitializer(), result_used);
|
||||
},
|
||||
.GenericSelectionExprClass => {
|
||||
const gen_sel = @ptrCast(*const clang.GenericSelectionExpr, stmt);
|
||||
return transExpr(c, scope, gen_sel.getResultExpr(), result_used);
|
||||
},
|
||||
else => {
|
||||
return fail(c, error.UnsupportedTranslation, stmt.getBeginLoc(), "TODO implement translation of stmt class {s}", .{@tagName(sc)});
|
||||
},
|
||||
@ -1407,7 +1412,7 @@ fn transDeclStmtOne(
|
||||
|
||||
var init_node = if (decl_init) |expr|
|
||||
if (expr.getStmtClass() == .StringLiteralClass)
|
||||
try transStringLiteralAsArray(c, scope, @ptrCast(*const clang.StringLiteral, expr), try zigArraySize(c, type_node))
|
||||
try transStringLiteralInitializer(c, scope, @ptrCast(*const clang.StringLiteral, expr), type_node)
|
||||
else
|
||||
try transExprCoercing(c, scope, expr, .used)
|
||||
else
|
||||
@ -1522,7 +1527,7 @@ fn transImplicitCastExpr(
|
||||
return maybeSuppressResult(c, scope, result_used, ne);
|
||||
},
|
||||
.BuiltinFnToFnPtr => {
|
||||
return transExpr(c, scope, sub_expr, result_used);
|
||||
return transBuiltinFnExpr(c, scope, sub_expr, result_used);
|
||||
},
|
||||
.ToVoid => {
|
||||
// Should only appear in the rhs and lhs of a ConditionalOperator
|
||||
@ -1538,6 +1543,22 @@ fn transImplicitCastExpr(
|
||||
}
|
||||
}
|
||||
|
||||
fn isBuiltinDefined(name: []const u8) bool {
|
||||
inline for (std.meta.declarations(c_builtins)) |decl| {
|
||||
if (std.mem.eql(u8, name, decl.name)) return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
fn transBuiltinFnExpr(c: *Context, scope: *Scope, expr: *const clang.Expr, used: ResultUsed) TransError!Node {
|
||||
const node = try transExpr(c, scope, expr, used);
|
||||
if (node.castTag(.identifier)) |ident| {
|
||||
const name = ident.data;
|
||||
if (!isBuiltinDefined(name)) return fail(c, error.UnsupportedTranslation, expr.getBeginLoc(), "TODO implement function '{s}' in std.c.builtins", .{name});
|
||||
}
|
||||
return node;
|
||||
}
|
||||
|
||||
fn transBoolExpr(
|
||||
c: *Context,
|
||||
scope: *Scope,
|
||||
@ -1582,6 +1603,10 @@ fn exprIsNarrowStringLiteral(expr: *const clang.Expr) bool {
|
||||
const op_expr = @ptrCast(*const clang.ParenExpr, expr).getSubExpr();
|
||||
return exprIsNarrowStringLiteral(op_expr);
|
||||
},
|
||||
.GenericSelectionExprClass => {
|
||||
const gen_sel = @ptrCast(*const clang.GenericSelectionExpr, expr);
|
||||
return exprIsNarrowStringLiteral(gen_sel.getResultExpr());
|
||||
},
|
||||
else => return false,
|
||||
}
|
||||
}
|
||||
@ -1733,6 +1758,20 @@ fn transReturnStmt(
|
||||
return Tag.@"return".create(c.arena, rhs);
|
||||
}
|
||||
|
||||
fn transNarrowStringLiteral(
|
||||
c: *Context,
|
||||
scope: *Scope,
|
||||
stmt: *const clang.StringLiteral,
|
||||
result_used: ResultUsed,
|
||||
) TransError!Node {
|
||||
var len: usize = undefined;
|
||||
const bytes_ptr = stmt.getString_bytes_begin_size(&len);
|
||||
|
||||
const str = try std.fmt.allocPrint(c.arena, "\"{}\"", .{std.zig.fmtEscapes(bytes_ptr[0..len])});
|
||||
const node = try Tag.string_literal.create(c.arena, str);
|
||||
return maybeSuppressResult(c, scope, result_used, node);
|
||||
}
|
||||
|
||||
fn transStringLiteral(
|
||||
c: *Context,
|
||||
scope: *Scope,
|
||||
@ -1741,19 +1780,14 @@ fn transStringLiteral(
|
||||
) TransError!Node {
|
||||
const kind = stmt.getKind();
|
||||
switch (kind) {
|
||||
.Ascii, .UTF8 => {
|
||||
var len: usize = undefined;
|
||||
const bytes_ptr = stmt.getString_bytes_begin_size(&len);
|
||||
|
||||
const str = try std.fmt.allocPrint(c.arena, "\"{}\"", .{std.zig.fmtEscapes(bytes_ptr[0..len])});
|
||||
const node = try Tag.string_literal.create(c.arena, str);
|
||||
return maybeSuppressResult(c, scope, result_used, node);
|
||||
},
|
||||
.Ascii, .UTF8 => return transNarrowStringLiteral(c, scope, stmt, result_used),
|
||||
.UTF16, .UTF32, .Wide => {
|
||||
const str_type = @tagName(stmt.getKind());
|
||||
const name = try std.fmt.allocPrint(c.arena, "zig.{s}_string_{d}", .{ str_type, c.getMangle() });
|
||||
const lit_array = try transStringLiteralAsArray(c, scope, stmt, stmt.getLength() + 1);
|
||||
|
||||
const expr_base = @ptrCast(*const clang.Expr, stmt);
|
||||
const array_type = try transQualTypeInitialized(c, scope, expr_base.getType(), expr_base, expr_base.getBeginLoc());
|
||||
const lit_array = try transStringLiteralInitializer(c, scope, stmt, array_type);
|
||||
const decl = try Tag.var_simple.create(c.arena, .{ .name = name, .init = lit_array });
|
||||
try scope.appendNode(decl);
|
||||
const node = try Tag.identifier.create(c.arena, name);
|
||||
@ -1762,52 +1796,67 @@ fn transStringLiteral(
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse the size of an array back out from an ast Node.
|
||||
fn zigArraySize(c: *Context, node: Node) TransError!usize {
|
||||
if (node.castTag(.array_type)) |array| {
|
||||
return array.data.len;
|
||||
}
|
||||
return error.UnsupportedTranslation;
|
||||
fn getArrayPayload(array_type: Node) ast.Payload.Array.ArrayTypeInfo {
|
||||
return (array_type.castTag(.array_type) orelse array_type.castTag(.null_sentinel_array_type).?).data;
|
||||
}
|
||||
|
||||
/// Translate a string literal to an array of integers. Used when an
|
||||
/// array is initialized from a string literal. `array_size` is the
|
||||
/// size of the array being initialized. If the string literal is larger
|
||||
/// than the array, truncate the string. If the array is larger than the
|
||||
/// string literal, pad the array with 0's
|
||||
fn transStringLiteralAsArray(
|
||||
/// Translate a string literal that is initializing an array. In general narrow string
|
||||
/// literals become `"<string>".*` or `"<string>"[0..<size>].*` if they need truncation.
|
||||
/// Wide string literals become an array of integers. zero-fillers pad out the array to
|
||||
/// the appropriate length, if necessary.
|
||||
fn transStringLiteralInitializer(
|
||||
c: *Context,
|
||||
scope: *Scope,
|
||||
stmt: *const clang.StringLiteral,
|
||||
array_size: usize,
|
||||
array_type: Node,
|
||||
) TransError!Node {
|
||||
if (array_size == 0) return error.UnsupportedType;
|
||||
assert(array_type.tag() == .array_type or array_type.tag() == .null_sentinel_array_type);
|
||||
|
||||
const is_narrow = stmt.getKind() == .Ascii or stmt.getKind() == .UTF8;
|
||||
|
||||
const str_length = stmt.getLength();
|
||||
const payload = getArrayPayload(array_type);
|
||||
const array_size = payload.len;
|
||||
const elem_type = payload.elem_type;
|
||||
|
||||
const expr_base = @ptrCast(*const clang.Expr, stmt);
|
||||
const ty = expr_base.getType().getTypePtr();
|
||||
const const_arr_ty = @ptrCast(*const clang.ConstantArrayType, ty);
|
||||
if (array_size == 0) return Tag.empty_array.create(c.arena, elem_type);
|
||||
|
||||
const elem_type = try transQualType(c, scope, const_arr_ty.getElementType(), expr_base.getBeginLoc());
|
||||
const arr_type = try Tag.array_type.create(c.arena, .{ .len = array_size, .elem_type = elem_type });
|
||||
const init_list = try c.arena.alloc(Node, array_size);
|
||||
const num_inits = math.min(str_length, array_size);
|
||||
const init_node = if (num_inits > 0) blk: {
|
||||
if (is_narrow) {
|
||||
// "string literal".* or string literal"[0..num_inits].*
|
||||
var str = try transNarrowStringLiteral(c, scope, stmt, .used);
|
||||
if (str_length != array_size) str = try Tag.string_slice.create(c.arena, .{ .string = str, .end = num_inits });
|
||||
break :blk try Tag.deref.create(c.arena, str);
|
||||
} else {
|
||||
const init_list = try c.arena.alloc(Node, num_inits);
|
||||
var i: c_uint = 0;
|
||||
while (i < num_inits) : (i += 1) {
|
||||
init_list[i] = try transCreateCharLitNode(c, false, stmt.getCodeUnit(i));
|
||||
}
|
||||
const init_args = .{ .len = num_inits, .elem_type = elem_type };
|
||||
const init_array_type = try if (array_type.tag() == .array_type) Tag.array_type.create(c.arena, init_args) else Tag.null_sentinel_array_type.create(c.arena, init_args);
|
||||
break :blk try Tag.array_init.create(c.arena, .{
|
||||
.cond = init_array_type,
|
||||
.cases = init_list,
|
||||
});
|
||||
}
|
||||
} else null;
|
||||
|
||||
var i: c_uint = 0;
|
||||
const kind = stmt.getKind();
|
||||
const narrow = kind == .Ascii or kind == .UTF8;
|
||||
while (i < str_length and i < array_size) : (i += 1) {
|
||||
const code_unit = stmt.getCodeUnit(i);
|
||||
init_list[i] = try transCreateCharLitNode(c, narrow, code_unit);
|
||||
}
|
||||
while (i < array_size) : (i += 1) {
|
||||
init_list[i] = try transCreateNodeNumber(c, 0, .int);
|
||||
}
|
||||
if (num_inits == array_size) return init_node.?; // init_node is only null if num_inits == 0; but if num_inits == array_size == 0 we've already returned
|
||||
assert(array_size > str_length); // If array_size <= str_length, `num_inits == array_size` and we've already returned.
|
||||
|
||||
return Tag.array_init.create(c.arena, .{
|
||||
.cond = arr_type,
|
||||
.cases = init_list,
|
||||
const filler_node = try Tag.array_filler.create(c.arena, .{
|
||||
.type = elem_type,
|
||||
.filler = Tag.zero_literal.init(),
|
||||
.count = array_size - str_length,
|
||||
});
|
||||
|
||||
if (init_node) |some| {
|
||||
return Tag.array_cat.create(c.arena, .{ .lhs = some, .rhs = filler_node });
|
||||
} else {
|
||||
return filler_node;
|
||||
}
|
||||
}
|
||||
|
||||
/// determine whether `stmt` is a "pointer subtraction expression" - a subtraction where
|
||||
@ -1836,6 +1885,7 @@ fn cIntTypeForEnum(enum_qt: clang.QualType) clang.QualType {
|
||||
return enum_decl.getIntegerType();
|
||||
}
|
||||
|
||||
// when modifying this function, make sure to also update std.meta.cast
|
||||
fn transCCast(
|
||||
c: *Context,
|
||||
scope: *Scope,
|
||||
@ -2725,6 +2775,10 @@ fn cIsFunctionDeclRef(expr: *const clang.Expr) bool {
|
||||
const opcode = un_op.getOpcode();
|
||||
return (opcode == .AddrOf or opcode == .Deref) and cIsFunctionDeclRef(un_op.getSubExpr());
|
||||
},
|
||||
.GenericSelectionExprClass => {
|
||||
const gen_sel = @ptrCast(*const clang.GenericSelectionExpr, expr);
|
||||
return cIsFunctionDeclRef(gen_sel.getResultExpr());
|
||||
},
|
||||
else => return false,
|
||||
}
|
||||
}
|
||||
@ -3194,11 +3248,11 @@ fn transFloatingLiteral(c: *Context, scope: *Scope, stmt: *const clang.FloatingL
|
||||
var dbl = stmt.getValueAsApproximateDouble();
|
||||
const is_negative = dbl < 0;
|
||||
if (is_negative) dbl = -dbl;
|
||||
const str = try std.fmt.allocPrint(c.arena, "{d}", .{dbl});
|
||||
var node = if (dbl == std.math.floor(dbl))
|
||||
try Tag.integer_literal.create(c.arena, str)
|
||||
const str = if (dbl == std.math.floor(dbl))
|
||||
try std.fmt.allocPrint(c.arena, "{d}.0", .{dbl})
|
||||
else
|
||||
try Tag.float_literal.create(c.arena, str);
|
||||
try std.fmt.allocPrint(c.arena, "{d}", .{dbl});
|
||||
var node = try Tag.float_literal.create(c.arena, str);
|
||||
if (is_negative) node = try Tag.negate.create(c.arena, node);
|
||||
return maybeSuppressResult(c, scope, used, node);
|
||||
}
|
||||
@ -3312,9 +3366,8 @@ fn addTopLevelDecl(c: *Context, name: []const u8, decl_node: Node) !void {
|
||||
try c.global_scope.nodes.append(decl_node);
|
||||
}
|
||||
|
||||
/// Translate a qual type for a variable with an initializer. The initializer
|
||||
/// only matters for incomplete arrays, since the size of the array is determined
|
||||
/// by the size of the initializer
|
||||
/// Translate a qualtype for a variable with an initializer. This only matters
|
||||
/// for incomplete arrays, since the initializer determines the size of the array.
|
||||
fn transQualTypeInitialized(
|
||||
c: *Context,
|
||||
scope: *Scope,
|
||||
@ -3330,9 +3383,14 @@ fn transQualTypeInitialized(
|
||||
switch (decl_init.getStmtClass()) {
|
||||
.StringLiteralClass => {
|
||||
const string_lit = @ptrCast(*const clang.StringLiteral, decl_init);
|
||||
const string_lit_size = string_lit.getLength() + 1; // +1 for null terminator
|
||||
const string_lit_size = string_lit.getLength();
|
||||
const array_size = @intCast(usize, string_lit_size);
|
||||
return Tag.array_type.create(c.arena, .{ .len = array_size, .elem_type = elem_ty });
|
||||
|
||||
// incomplete array initialized with empty string, will be translated as [1]T{0}
|
||||
// see https://github.com/ziglang/zig/issues/8256
|
||||
if (array_size == 0) return Tag.array_type.create(c.arena, .{ .len = 1, .elem_type = elem_ty });
|
||||
|
||||
return Tag.null_sentinel_array_type.create(c.arena, .{ .len = array_size, .elem_type = elem_ty });
|
||||
},
|
||||
.InitListExprClass => {
|
||||
const init_expr = @ptrCast(*const clang.InitListExpr, decl_init);
|
||||
@ -4746,6 +4804,10 @@ fn parseCPrimaryExprInner(c: *Context, m: *MacroCtx, scope: *Scope) ParseError!N
|
||||
},
|
||||
.Identifier => {
|
||||
const mangled_name = scope.getAlias(slice);
|
||||
if (mem.startsWith(u8, mangled_name, "__builtin_") and !isBuiltinDefined(mangled_name)) {
|
||||
try m.fail(c, "TODO implement function '{s}' in std.c.builtins", .{mangled_name});
|
||||
return error.ParseError;
|
||||
}
|
||||
return Tag.identifier.create(c.arena, builtin_typedef_map.get(mangled_name) orelse mangled_name);
|
||||
},
|
||||
.LParen => {
|
||||
|
||||
@ -40,6 +40,8 @@ pub const Node = extern union {
|
||||
string_literal,
|
||||
char_literal,
|
||||
enum_literal,
|
||||
/// "string"[0..end]
|
||||
string_slice,
|
||||
identifier,
|
||||
@"if",
|
||||
/// if (!operand) break;
|
||||
@ -176,6 +178,7 @@ pub const Node = extern union {
|
||||
c_pointer,
|
||||
single_pointer,
|
||||
array_type,
|
||||
null_sentinel_array_type,
|
||||
|
||||
/// @import("std").meta.sizeof(operand)
|
||||
std_meta_sizeof,
|
||||
@ -334,7 +337,7 @@ pub const Node = extern union {
|
||||
.std_meta_promoteIntLiteral => Payload.PromoteIntLiteral,
|
||||
.block => Payload.Block,
|
||||
.c_pointer, .single_pointer => Payload.Pointer,
|
||||
.array_type => Payload.Array,
|
||||
.array_type, .null_sentinel_array_type => Payload.Array,
|
||||
.arg_redecl, .alias, .fail_decl => Payload.ArgRedecl,
|
||||
.log2_int_type => Payload.Log2IntType,
|
||||
.var_simple, .pub_var_simple => Payload.SimpleVarDecl,
|
||||
@ -342,6 +345,7 @@ pub const Node = extern union {
|
||||
.array_filler => Payload.ArrayFiller,
|
||||
.pub_inline_fn => Payload.PubInlineFn,
|
||||
.field_access => Payload.FieldAccess,
|
||||
.string_slice => Payload.StringSlice,
|
||||
};
|
||||
}
|
||||
|
||||
@ -584,10 +588,12 @@ pub const Payload = struct {
|
||||
|
||||
pub const Array = struct {
|
||||
base: Payload,
|
||||
data: struct {
|
||||
data: ArrayTypeInfo,
|
||||
|
||||
pub const ArrayTypeInfo = struct {
|
||||
elem_type: Node,
|
||||
len: usize,
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
pub const Pointer = struct {
|
||||
@ -664,6 +670,14 @@ pub const Payload = struct {
|
||||
radix: Node,
|
||||
},
|
||||
};
|
||||
|
||||
pub const StringSlice = struct {
|
||||
base: Payload,
|
||||
data: struct {
|
||||
string: Node,
|
||||
end: usize,
|
||||
},
|
||||
};
|
||||
};
|
||||
|
||||
/// Converts the nodes into a Zig ast.
|
||||
@ -1015,6 +1029,36 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
|
||||
.data = undefined,
|
||||
});
|
||||
},
|
||||
.string_slice => {
|
||||
const payload = node.castTag(.string_slice).?.data;
|
||||
|
||||
const string = try renderNode(c, payload.string);
|
||||
const l_bracket = try c.addToken(.l_bracket, "[");
|
||||
const start = try c.addNode(.{
|
||||
.tag = .integer_literal,
|
||||
.main_token = try c.addToken(.integer_literal, "0"),
|
||||
.data = undefined,
|
||||
});
|
||||
_ = try c.addToken(.ellipsis2, "..");
|
||||
const end = try c.addNode(.{
|
||||
.tag = .integer_literal,
|
||||
.main_token = try c.addTokenFmt(.integer_literal, "{d}", .{payload.end}),
|
||||
.data = undefined,
|
||||
});
|
||||
_ = try c.addToken(.r_bracket, "]");
|
||||
|
||||
return c.addNode(.{
|
||||
.tag = .slice,
|
||||
.main_token = l_bracket,
|
||||
.data = .{
|
||||
.lhs = string,
|
||||
.rhs = try c.addExtra(std.zig.ast.Node.Slice{
|
||||
.start = start,
|
||||
.end = end,
|
||||
}),
|
||||
},
|
||||
});
|
||||
},
|
||||
.fail_decl => {
|
||||
const payload = node.castTag(.fail_decl).?.data;
|
||||
// pub const name = @compileError(msg);
|
||||
@ -1581,6 +1625,10 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
|
||||
const payload = node.castTag(.array_type).?.data;
|
||||
return renderArrayType(c, payload.len, payload.elem_type);
|
||||
},
|
||||
.null_sentinel_array_type => {
|
||||
const payload = node.castTag(.null_sentinel_array_type).?.data;
|
||||
return renderNullSentinelArrayType(c, payload.len, payload.elem_type);
|
||||
},
|
||||
.array_filler => {
|
||||
const payload = node.castTag(.array_filler).?.data;
|
||||
|
||||
@ -1946,6 +1994,36 @@ fn renderArrayType(c: *Context, len: usize, elem_type: Node) !NodeIndex {
|
||||
});
|
||||
}
|
||||
|
||||
fn renderNullSentinelArrayType(c: *Context, len: usize, elem_type: Node) !NodeIndex {
|
||||
const l_bracket = try c.addToken(.l_bracket, "[");
|
||||
const len_expr = try c.addNode(.{
|
||||
.tag = .integer_literal,
|
||||
.main_token = try c.addTokenFmt(.integer_literal, "{d}", .{len}),
|
||||
.data = undefined,
|
||||
});
|
||||
_ = try c.addToken(.colon, ":");
|
||||
|
||||
const sentinel_expr = try c.addNode(.{
|
||||
.tag = .integer_literal,
|
||||
.main_token = try c.addToken(.integer_literal, "0"),
|
||||
.data = undefined,
|
||||
});
|
||||
|
||||
_ = try c.addToken(.r_bracket, "]");
|
||||
const elem_type_expr = try renderNode(c, elem_type);
|
||||
return c.addNode(.{
|
||||
.tag = .array_type_sentinel,
|
||||
.main_token = l_bracket,
|
||||
.data = .{
|
||||
.lhs = len_expr,
|
||||
.rhs = try c.addExtra(std.zig.ast.Node.ArrayTypeSentinel {
|
||||
.sentinel = sentinel_expr,
|
||||
.elem_type = elem_type_expr,
|
||||
}),
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
fn addSemicolonIfNeeded(c: *Context, node: Node) !void {
|
||||
switch (node.tag()) {
|
||||
.warning => unreachable,
|
||||
@ -2014,6 +2092,7 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex {
|
||||
.integer_literal,
|
||||
.float_literal,
|
||||
.string_literal,
|
||||
.string_slice,
|
||||
.char_literal,
|
||||
.enum_literal,
|
||||
.identifier,
|
||||
@ -2035,6 +2114,7 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex {
|
||||
.func,
|
||||
.call,
|
||||
.array_type,
|
||||
.null_sentinel_array_type,
|
||||
.bool_to_int,
|
||||
.div_exact,
|
||||
.byte_offset_of,
|
||||
|
||||
@ -2445,6 +2445,11 @@ struct ZigClangQualType ZigClangFunctionType_getReturnType(const struct ZigClang
|
||||
return bitcast(casted->getReturnType());
|
||||
}
|
||||
|
||||
const struct ZigClangExpr *ZigClangGenericSelectionExpr_getResultExpr(const struct ZigClangGenericSelectionExpr *self) {
|
||||
auto casted = reinterpret_cast<const clang::GenericSelectionExpr *>(self);
|
||||
return reinterpret_cast<const struct ZigClangExpr *>(casted->getResultExpr());
|
||||
}
|
||||
|
||||
bool ZigClangFunctionProtoType_isVariadic(const struct ZigClangFunctionProtoType *self) {
|
||||
auto casted = reinterpret_cast<const clang::FunctionProtoType *>(self);
|
||||
return casted->isVariadic();
|
||||
|
||||
@ -1116,6 +1116,8 @@ ZIG_EXTERN_C bool ZigClangFunctionType_getNoReturnAttr(const struct ZigClangFunc
|
||||
ZIG_EXTERN_C enum ZigClangCallingConv ZigClangFunctionType_getCallConv(const struct ZigClangFunctionType *self);
|
||||
ZIG_EXTERN_C struct ZigClangQualType ZigClangFunctionType_getReturnType(const struct ZigClangFunctionType *self);
|
||||
|
||||
ZIG_EXTERN_C const struct ZigClangExpr *ZigClangGenericSelectionExpr_getResultExpr(const struct ZigClangGenericSelectionExpr *self);
|
||||
|
||||
ZIG_EXTERN_C bool ZigClangFunctionProtoType_isVariadic(const struct ZigClangFunctionProtoType *self);
|
||||
ZIG_EXTERN_C unsigned ZigClangFunctionProtoType_getNumParams(const struct ZigClangFunctionProtoType *self);
|
||||
ZIG_EXTERN_C struct ZigClangQualType ZigClangFunctionProtoType_getParamType(const struct ZigClangFunctionProtoType *self, unsigned i);
|
||||
|
||||
11
test/cli.zig
11
test/cli.zig
@ -28,6 +28,8 @@ pub fn main() !void {
|
||||
const zig_exe = try fs.path.resolve(a, &[_][]const u8{zig_exe_rel});
|
||||
|
||||
const dir_path = try fs.path.join(a, &[_][]const u8{ cache_root, "clitest" });
|
||||
defer fs.cwd().deleteTree(dir_path) catch {};
|
||||
|
||||
const TestFn = fn ([]const u8, []const u8) anyerror!void;
|
||||
const test_fns = [_]TestFn{
|
||||
testZigInitLib,
|
||||
@ -174,4 +176,13 @@ fn testZigFmt(zig_exe: []const u8, dir_path: []const u8) !void {
|
||||
const run_result3 = try exec(dir_path, true, &[_][]const u8{ zig_exe, "fmt", dir_path });
|
||||
// both files have been formatted, nothing should change now
|
||||
testing.expect(run_result3.stdout.len == 0);
|
||||
|
||||
// Check UTF-16 decoding
|
||||
const fmt4_zig_path = try fs.path.join(a, &[_][]const u8{ dir_path, "fmt4.zig" });
|
||||
var unformatted_code_utf16 = "\xff\xfe \x00 \x00 \x00 \x00/\x00/\x00 \x00n\x00o\x00 \x00r\x00e\x00a\x00s\x00o\x00n\x00";
|
||||
try fs.cwd().writeFile(fmt4_zig_path, unformatted_code_utf16);
|
||||
|
||||
const run_result4 = try exec(dir_path, true, &[_][]const u8{ zig_exe, "fmt", dir_path });
|
||||
testing.expect(std.mem.startsWith(u8, run_result4.stdout, fmt4_zig_path));
|
||||
testing.expect(run_result4.stdout.len == fmt4_zig_path.len + 1 and run_result4.stdout[run_result4.stdout.len - 1] == '\n');
|
||||
}
|
||||
|
||||
@ -3,6 +3,17 @@ const tests = @import("tests.zig");
|
||||
const nl = std.cstr.line_sep;
|
||||
|
||||
pub fn addCases(cases: *tests.RunTranslatedCContext) void {
|
||||
cases.add("division of floating literals",
|
||||
\\#define _NO_CRT_STDIO_INLINE 1
|
||||
\\#include <stdio.h>
|
||||
\\#define PI 3.14159265358979323846f
|
||||
\\#define DEG2RAD (PI/180.0f)
|
||||
\\int main(void) {
|
||||
\\ printf("DEG2RAD is: %f\n", DEG2RAD);
|
||||
\\ return 0;
|
||||
\\}
|
||||
, "DEG2RAD is: 0.017453" ++ nl);
|
||||
|
||||
cases.add("use global scope for record/enum/typedef type transalation if needed",
|
||||
\\void bar(void);
|
||||
\\void baz(void);
|
||||
@ -1187,4 +1198,50 @@ pub fn addCases(cases: *tests.RunTranslatedCContext) void {
|
||||
\\ return 0;
|
||||
\\}
|
||||
, "");
|
||||
|
||||
cases.add("Generic selections",
|
||||
\\#include <stdlib.h>
|
||||
\\#include <string.h>
|
||||
\\#include <stdint.h>
|
||||
\\#define my_generic_fn(X) _Generic((X), \
|
||||
\\ int: abs, \
|
||||
\\ char *: strlen, \
|
||||
\\ size_t: malloc, \
|
||||
\\ default: free \
|
||||
\\)(X)
|
||||
\\#define my_generic_val(X) _Generic((X), \
|
||||
\\ int: 1, \
|
||||
\\ const char *: "bar" \
|
||||
\\)
|
||||
\\int main(void) {
|
||||
\\ if (my_generic_val(100) != 1) abort();
|
||||
\\
|
||||
\\ const char *foo = "foo";
|
||||
\\ const char *bar = my_generic_val(foo);
|
||||
\\ if (strcmp(bar, "bar") != 0) abort();
|
||||
\\
|
||||
\\ if (my_generic_fn(-42) != 42) abort();
|
||||
\\ if (my_generic_fn("hello") != 5) abort();
|
||||
\\
|
||||
\\ size_t size = 8192;
|
||||
\\ uint8_t *mem = my_generic_fn(size);
|
||||
\\ memset(mem, 42, size);
|
||||
\\ if (mem[size - 1] != 42) abort();
|
||||
\\ my_generic_fn(mem);
|
||||
\\
|
||||
\\ return 0;
|
||||
\\}
|
||||
, "");
|
||||
|
||||
// See __builtin_alloca_with_align comment in std.c.builtins
|
||||
cases.add("use of unimplemented builtin in unused function does not prevent compilation",
|
||||
\\#include <stdlib.h>
|
||||
\\void unused() {
|
||||
\\ __builtin_alloca_with_align(1, 8);
|
||||
\\}
|
||||
\\int main(void) {
|
||||
\\ if (__builtin_sqrt(1.0) != 1.0) abort();
|
||||
\\ return 0;
|
||||
\\}
|
||||
, "");
|
||||
}
|
||||
|
||||
@ -51,7 +51,7 @@ pub fn addCases(ctx: *TestContext) !void {
|
||||
\\ _ = printf("Hello, %s!\n", "world");
|
||||
\\ return 0;
|
||||
\\}
|
||||
, "Hello, world!\n");
|
||||
, "Hello, world!" ++ std.cstr.line_sep);
|
||||
}
|
||||
|
||||
{
|
||||
|
||||
@ -9,7 +9,10 @@ pub fn addCases(cases: *tests.StandaloneContext) void {
|
||||
cases.add("test/standalone/main_return_error/error_u8.zig");
|
||||
cases.add("test/standalone/main_return_error/error_u8_non_zero.zig");
|
||||
cases.addBuildFile("test/standalone/main_pkg_path/build.zig");
|
||||
cases.addBuildFile("test/standalone/shared_library/build.zig");
|
||||
if (std.Target.current.os.tag != .macos) {
|
||||
// TODO zld cannot link shared libraries yet.
|
||||
cases.addBuildFile("test/standalone/shared_library/build.zig");
|
||||
}
|
||||
cases.addBuildFile("test/standalone/mix_o_files/build.zig");
|
||||
cases.addBuildFile("test/standalone/global_linkage/build.zig");
|
||||
cases.addBuildFile("test/standalone/static_c_lib/build.zig");
|
||||
|
||||
@ -745,14 +745,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\ static const char v2[] = "2.2.2";
|
||||
\\}
|
||||
, &[_][]const u8{
|
||||
\\const v2: [6]u8 = [6]u8{
|
||||
\\ '2',
|
||||
\\ '.',
|
||||
\\ '2',
|
||||
\\ '.',
|
||||
\\ '2',
|
||||
\\ 0,
|
||||
\\};
|
||||
\\const v2: [5:0]u8 = "2.2.2".*;
|
||||
\\pub export fn foo() void {}
|
||||
});
|
||||
|
||||
@ -1600,30 +1593,9 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\static char arr1[] = "hello";
|
||||
\\char arr2[] = "hello";
|
||||
, &[_][]const u8{
|
||||
\\pub export var arr0: [6]u8 = [6]u8{
|
||||
\\ 'h',
|
||||
\\ 'e',
|
||||
\\ 'l',
|
||||
\\ 'l',
|
||||
\\ 'o',
|
||||
\\ 0,
|
||||
\\};
|
||||
\\pub var arr1: [6]u8 = [6]u8{
|
||||
\\ 'h',
|
||||
\\ 'e',
|
||||
\\ 'l',
|
||||
\\ 'l',
|
||||
\\ 'o',
|
||||
\\ 0,
|
||||
\\};
|
||||
\\pub export var arr2: [6]u8 = [6]u8{
|
||||
\\ 'h',
|
||||
\\ 'e',
|
||||
\\ 'l',
|
||||
\\ 'l',
|
||||
\\ 'o',
|
||||
\\ 0,
|
||||
\\};
|
||||
\\pub export var arr0: [5:0]u8 = "hello".*;
|
||||
\\pub var arr1: [5:0]u8 = "hello".*;
|
||||
\\pub export var arr2: [5:0]u8 = "hello".*;
|
||||
});
|
||||
|
||||
cases.add("array initializer expr",
|
||||
@ -2456,7 +2428,7 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\ b: c_int,
|
||||
\\};
|
||||
\\pub extern var a: struct_Foo;
|
||||
\\pub export var b: f32 = 2;
|
||||
\\pub export var b: f32 = 2.0;
|
||||
\\pub export fn foo() void {
|
||||
\\ var c: [*c]struct_Foo = undefined;
|
||||
\\ _ = a.b;
|
||||
@ -3020,17 +2992,17 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\pub extern fn fn_bool(x: bool) void;
|
||||
\\pub extern fn fn_ptr(x: ?*c_void) void;
|
||||
\\pub export fn call() void {
|
||||
\\ fn_int(@floatToInt(c_int, 3));
|
||||
\\ fn_int(@floatToInt(c_int, 3));
|
||||
\\ fn_int(@floatToInt(c_int, 3));
|
||||
\\ fn_int(@floatToInt(c_int, 3.0));
|
||||
\\ fn_int(@floatToInt(c_int, 3.0));
|
||||
\\ fn_int(@floatToInt(c_int, 3.0));
|
||||
\\ fn_int(@as(c_int, 1094861636));
|
||||
\\ fn_f32(@intToFloat(f32, @as(c_int, 3)));
|
||||
\\ fn_f64(@intToFloat(f64, @as(c_int, 3)));
|
||||
\\ fn_char(@bitCast(u8, @truncate(i8, @as(c_int, '3'))));
|
||||
\\ fn_char(@bitCast(u8, @truncate(i8, @as(c_int, '\x01'))));
|
||||
\\ fn_char(@bitCast(u8, @truncate(i8, @as(c_int, 0))));
|
||||
\\ fn_f32(3);
|
||||
\\ fn_f64(3);
|
||||
\\ fn_f32(3.0);
|
||||
\\ fn_f64(3.0);
|
||||
\\ fn_bool(@as(c_int, 123) != 0);
|
||||
\\ fn_bool(@as(c_int, 0) != 0);
|
||||
\\ fn_bool(@ptrToInt(fn_int) != 0);
|
||||
@ -3418,4 +3390,56 @@ pub fn addCases(cases: *tests.TranslateCContext) void {
|
||||
\\pub const MAY_NEED_PROMOTION_HEX = @import("std").meta.promoteIntLiteral(c_int, 0x80000000, .hexadecimal);
|
||||
\\pub const MAY_NEED_PROMOTION_OCT = @import("std").meta.promoteIntLiteral(c_int, 0o20000000000, .octal);
|
||||
});
|
||||
|
||||
// See __builtin_alloca_with_align comment in std.c.builtins
|
||||
cases.add("demote un-implemented builtins",
|
||||
\\#define FOO(X) __builtin_alloca_with_align((X), 8)
|
||||
, &[_][]const u8{
|
||||
\\pub const FOO = @compileError("TODO implement function '__builtin_alloca_with_align' in std.c.builtins");
|
||||
});
|
||||
|
||||
cases.add("null sentinel arrays when initialized from string literal. Issue #8256",
|
||||
\\#include <stdint.h>
|
||||
\\char zero[0] = "abc";
|
||||
\\uint32_t zero_w[0] = U"π―π―π―";
|
||||
\\char empty_incomplete[] = "";
|
||||
\\uint32_t empty_incomplete_w[] = U"";
|
||||
\\char empty_constant[100] = "";
|
||||
\\uint32_t empty_constant_w[100] = U"";
|
||||
\\char incomplete[] = "abc";
|
||||
\\uint32_t incomplete_w[] = U"π―π―π―";
|
||||
\\char truncated[1] = "abc";
|
||||
\\uint32_t truncated_w[1] = U"π―π―π―";
|
||||
\\char extend[5] = "a";
|
||||
\\uint32_t extend_w[5] = U"π―";
|
||||
\\char no_null[3] = "abc";
|
||||
\\uint32_t no_null_w[3] = U"π―π―π―";
|
||||
, &[_][]const u8{
|
||||
\\pub export var zero: [0]u8 = [0]u8{};
|
||||
\\pub export var zero_w: [0]u32 = [0]u32{};
|
||||
\\pub export var empty_incomplete: [1]u8 = [1]u8{0} ** 1;
|
||||
\\pub export var empty_incomplete_w: [1]u32 = [1]u32{0} ** 1;
|
||||
\\pub export var empty_constant: [100]u8 = [1]u8{0} ** 100;
|
||||
\\pub export var empty_constant_w: [100]u32 = [1]u32{0} ** 100;
|
||||
\\pub export var incomplete: [3:0]u8 = "abc".*;
|
||||
\\pub export var incomplete_w: [3:0]u32 = [3:0]u32{
|
||||
\\ '\u{1f4af}',
|
||||
\\ '\u{1f4af}',
|
||||
\\ '\u{1f4af}',
|
||||
\\};
|
||||
\\pub export var truncated: [1]u8 = "abc"[0..1].*;
|
||||
\\pub export var truncated_w: [1]u32 = [1]u32{
|
||||
\\ '\u{1f4af}',
|
||||
\\};
|
||||
\\pub export var extend: [5]u8 = "a"[0..1].* ++ [1]u8{0} ** 4;
|
||||
\\pub export var extend_w: [5]u32 = [1]u32{
|
||||
\\ '\u{1f4af}',
|
||||
\\} ++ [1]u32{0} ** 4;
|
||||
\\pub export var no_null: [3]u8 = "abc".*;
|
||||
\\pub export var no_null_w: [3]u32 = [3]u32{
|
||||
\\ '\u{1f4af}',
|
||||
\\ '\u{1f4af}',
|
||||
\\ '\u{1f4af}',
|
||||
\\};
|
||||
});
|
||||
}
|
||||
|
||||
Loadingβ¦
x
Reference in New Issue
Block a user