Merge remote-tracking branch 'origin/master' into llvm15

This commit is contained in:
Andrew Kelley 2022-08-30 13:02:43 -07:00
commit 1e21876de2
58 changed files with 2987 additions and 1698 deletions

View File

@ -753,6 +753,9 @@ set(ZIG_STAGE2_SOURCES
"${CMAKE_SOURCE_DIR}/src/link.zig"
"${CMAKE_SOURCE_DIR}/src/link/C.zig"
"${CMAKE_SOURCE_DIR}/src/link/Coff.zig"
"${CMAKE_SOURCE_DIR}/src/link/Coff/Atom.zig"
"${CMAKE_SOURCE_DIR}/src/link/Coff/Object.zig"
"${CMAKE_SOURCE_DIR}/src/link/Coff/lld.zig"
"${CMAKE_SOURCE_DIR}/src/link/Elf.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO.zig"
"${CMAKE_SOURCE_DIR}/src/link/MachO/Archive.zig"

View File

@ -49,6 +49,7 @@ unset CXX
make $JOBS install
stage3-release/bin/zig build test docs \
--zig-lib-dir "$(pwd)/../lib" \
-Denable-macos-sdk \
-Dstatic-llvm \
--search-prefix "$PREFIX"

View File

@ -7,7 +7,8 @@ INSTALL_PREFIX="$DRONE_WORKSPACE/stage3-release"
ZIG="$INSTALL_PREFIX/bin/zig"
export ZIG_GLOBAL_CACHE_DIR="$DRONE_WORKSPACE/zig-cache"
$ZIG build test-behavior -Dskip-non-native
$ZIG build test-compiler-rt -Dskip-non-native
$ZIG build test-fmt
$ZIG build docs
# https://github.com/ziglang/zig/issues/12689
# $ZIG build test-behavior -Dskip-non-native --zig-lib-dir lib
$ZIG build test-compiler-rt -Dskip-non-native --zig-lib-dir lib
$ZIG build test-fmt --zig-lib-dir lib
$ZIG build docs --zig-lib-dir lib

View File

@ -7,5 +7,6 @@ INSTALL_PREFIX="$DRONE_WORKSPACE/stage3-release"
ZIG="$INSTALL_PREFIX/bin/zig"
export ZIG_GLOBAL_CACHE_DIR="$DRONE_WORKSPACE/zig-cache"
$ZIG build -Dskip-non-native # test building self-hosted without LLVM
$ZIG build -Dskip-non-native test-cases
# test building self-hosted without LLVM
$ZIG build -Dskip-non-native --zig-lib-dir lib
$ZIG build test-cases -Dskip-non-native --zig-lib-dir lib

View File

@ -7,10 +7,11 @@ INSTALL_PREFIX="$DRONE_WORKSPACE/stage3-release"
ZIG="$INSTALL_PREFIX/bin/zig"
export ZIG_GLOBAL_CACHE_DIR="$DRONE_WORKSPACE/zig-cache"
$ZIG build test-universal-libc -Dskip-non-native
$ZIG build test-compare-output -Dskip-non-native
$ZIG build test-standalone -Dskip-non-native -Dskip-release-safe
$ZIG build test-stack-traces -Dskip-non-native
$ZIG build test-cli -Dskip-non-native
$ZIG build test-asm-link -Dskip-non-native
$ZIG build test-translate-c -Dskip-non-native
$ZIG build test-universal-libc -Dskip-non-native --zig-lib-dir lib
# https://github.com/ziglang/zig/issues/12689
# $ZIG build test-compare-output -Dskip-non-native --zig-lib-dir lib
$ZIG build test-standalone -Dskip-non-native --zig-lib-dir lib -Dskip-release-safe
$ZIG build test-stack-traces -Dskip-non-native --zig-lib-dir lib
$ZIG build test-cli -Dskip-non-native --zig-lib-dir lib
$ZIG build test-asm-link -Dskip-non-native --zig-lib-dir lib
$ZIG build test-translate-c -Dskip-non-native --zig-lib-dir lib

View File

@ -7,4 +7,9 @@ INSTALL_PREFIX="$DRONE_WORKSPACE/stage3-release"
ZIG="$INSTALL_PREFIX/bin/zig"
export ZIG_GLOBAL_CACHE_DIR="$DRONE_WORKSPACE/zig-cache"
$ZIG build test-std -Dskip-release-safe -Dskip-release-fast -Dskip-release-small -Dskip-non-native
$ZIG build test-std \
--zig-lib-dir lib \
-Dskip-release-safe \
-Dskip-release-fast \
-Dskip-release-small \
-Dskip-non-native

View File

@ -7,4 +7,10 @@ INSTALL_PREFIX="$DRONE_WORKSPACE/stage3-release"
ZIG="$INSTALL_PREFIX/bin/zig"
export ZIG_GLOBAL_CACHE_DIR="$DRONE_WORKSPACE/zig-cache"
$ZIG build test-std -Dskip-debug -Dskip-release-safe -Dskip-release-small -Dskip-non-native -Dskip-single-threaded
$ZIG build test-std \
--zig-lib-dir lib \
-Dskip-debug \
-Dskip-release-safe \
-Dskip-release-small \
-Dskip-non-native \
-Dskip-single-threaded

View File

@ -7,4 +7,10 @@ INSTALL_PREFIX="$DRONE_WORKSPACE/stage3-release"
ZIG="$INSTALL_PREFIX/bin/zig"
export ZIG_GLOBAL_CACHE_DIR="$DRONE_WORKSPACE/zig-cache"
$ZIG build test-std -Dskip-debug -Dskip-release-fast -Dskip-release-small -Dskip-non-native -Dskip-single-threaded
$ZIG build test-std \
--zig-lib-dir lib \
-Dskip-debug \
-Dskip-release-fast \
-Dskip-release-small \
-Dskip-non-native \
-Dskip-single-threaded

View File

@ -12,5 +12,5 @@ export ZIG_GLOBAL_CACHE_DIR="$DRONE_WORKSPACE/zig-cache"
# of ReleaseSmall std lib tests.
# $ZIG build test-std -Dskip-debug -Dskip-release-safe -Dskip-release-fast -Dskip-non-native
$ZIG test lib/std/std.zig -OReleaseSmall
$ZIG test lib/std/std.zig -OReleaseSmall -lc
$ZIG test lib/std/std.zig -OReleaseSmall --zig-lib-dir lib
$ZIG test lib/std/std.zig -OReleaseSmall -lc --zig-lib-dir lib

View File

@ -56,6 +56,7 @@ ZIG_LIBC="$ZIG_LIBC_TXT" samu install
# Here we skip some tests to save time.
stage3/bin/zig build test docs \
--zig-lib-dir "$(pwd)/../lib" \
-Dstatic-llvm \
--search-prefix "$PREFIX" \
-Dskip-stage1 \

View File

@ -16,5 +16,5 @@ SEARCH_PREFIX="/deps/$TARGET"
-Drelease \
-Dstrip \
-Dtarget="$TARGET" \
-Dmcpu="$MCPU" \
-Dcpu="$MCPU" \
-Denable-stage1

View File

@ -55,7 +55,8 @@ stage3/bin/zig build test \
-fwasmtime \
-Dstatic-llvm \
-Dtarget=native-native-musl \
--search-prefix "$DEPS_LOCAL"
--search-prefix "$DEPS_LOCAL" \
--zig-lib-dir "$(pwd)/../lib"
# Explicit exit helps show last command duration.
exit

View File

@ -40,13 +40,15 @@ ninja install
-fwasmtime \
-Dstatic-llvm \
-Dtarget=native-native-musl \
--search-prefix "$DEPS_LOCAL"
--search-prefix "$DEPS_LOCAL" \
--zig-lib-dir "$(pwd)/../lib"
# Produce the experimental std lib documentation.
mkdir -p "$RELEASE_STAGING/docs/std"
"$RELEASE_STAGING/bin/zig" test ../lib/std/std.zig \
-femit-docs=$RELEASE_STAGING/docs/std \
-fno-emit-bin
-fno-emit-bin \
--zig-lib-dir "$(pwd)/../lib"
cp ../LICENSE $RELEASE_STAGING/
cp ../zig-cache/langref.html $RELEASE_STAGING/docs/

View File

@ -256,15 +256,53 @@ pub const OptionalHeaderPE64 = extern struct {
number_of_rva_and_sizes: u32,
};
pub const DebugDirectoryEntry = extern struct {
characteristiccs: u32,
time_date_stamp: u32,
major_version: u16,
minor_version: u16,
@"type": u32,
size_of_data: u32,
address_of_raw_data: u32,
pointer_to_raw_data: u32,
pub const IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16;
pub const DirectoryEntry = enum(u16) {
/// Export Directory
EXPORT = 0,
/// Import Directory
IMPORT = 1,
/// Resource Directory
RESOURCE = 2,
/// Exception Directory
EXCEPTION = 3,
/// Security Directory
SECURITY = 4,
/// Base Relocation Table
BASERELOC = 5,
/// Debug Directory
DEBUG = 6,
/// Architecture Specific Data
ARCHITECTURE = 7,
/// RVA of GP
GLOBALPTR = 8,
/// TLS Directory
TLS = 9,
/// Load Configuration Directory
LOAD_CONFIG = 10,
/// Bound Import Directory in headers
BOUND_IMPORT = 11,
/// Import Address Table
IAT = 12,
/// Delay Load Import Descriptors
DELAY_IMPORT = 13,
/// COM Runtime descriptor
COM_DESCRIPTOR = 14,
};
pub const ImageDataDirectory = extern struct {
@ -272,6 +310,37 @@ pub const ImageDataDirectory = extern struct {
size: u32,
};
pub const DebugDirectoryEntry = extern struct {
characteristics: u32,
time_date_stamp: u32,
major_version: u16,
minor_version: u16,
@"type": DebugType,
size_of_data: u32,
address_of_raw_data: u32,
pointer_to_raw_data: u32,
};
pub const DebugType = enum(u32) {
UNKNOWN = 0,
COFF = 1,
CODEVIEW = 2,
FPO = 3,
MISC = 4,
EXCEPTION = 5,
FIXUP = 6,
OMAP_TO_SRC = 7,
OMAP_FROM_SRC = 8,
BORLAND = 9,
RESERVED10 = 10,
VC_FEATURE = 12,
POGO = 13,
ILTCG = 14,
MPX = 15,
REPRO = 16,
EX_DLLCHARACTERISTICS = 20,
};
pub const SectionHeader = extern struct {
name: [8]u8,
virtual_size: u32,
@ -303,6 +372,15 @@ pub const SectionHeader = extern struct {
return std.math.powi(u16, 2, self.flags.ALIGN - 1) catch unreachable;
}
pub fn setAlignment(self: *SectionHeader, new_alignment: u16) void {
assert(new_alignment > 0 and new_alignment <= 8192);
self.flags.ALIGN = std.math.log2(new_alignment);
}
pub fn isCode(self: SectionHeader) bool {
return self.flags.CNT_CODE == 0b1;
}
pub fn isComdat(self: SectionHeader) bool {
return self.flags.LNK_COMDAT == 0b1;
}
@ -778,6 +856,21 @@ pub const MachineType = enum(u16) {
/// MIPS little-endian WCE v2
WCEMIPSV2 = 0x169,
pub fn fromTargetCpuArch(arch: std.Target.Cpu.Arch) MachineType {
return switch (arch) {
.arm => .ARM,
.powerpc => .POWERPC,
.riscv32 => .RISCV32,
.thumb => .Thumb,
.i386 => .I386,
.aarch64 => .ARM64,
.riscv64 => .RISCV64,
.x86_64 => .X64,
// there's cases we don't (yet) handle
else => unreachable,
};
}
pub fn toTargetCpuArch(machine_type: MachineType) ?std.Target.Cpu.Arch {
return switch (machine_type) {
.ARM => .arm,
@ -794,10 +887,6 @@ pub const MachineType = enum(u16) {
}
};
const IMAGE_NUMBEROF_DIRECTORY_ENTRIES = 16;
const IMAGE_DEBUG_TYPE_CODEVIEW = 2;
const DEBUG_DIRECTORY = 6;
pub const CoffError = error{
InvalidPEMagic,
InvalidPEHeader,
@ -831,7 +920,7 @@ pub const Coff = struct {
var stream = std.io.fixedBufferStream(self.data);
const reader = stream.reader();
try stream.seekTo(pe_pointer_offset);
const coff_header_offset = try reader.readByte();
const coff_header_offset = try reader.readIntLittle(u32);
try stream.seekTo(coff_header_offset);
var buf: [4]u8 = undefined;
try reader.readNoEof(&buf);
@ -862,7 +951,7 @@ pub const Coff = struct {
};
const data_dirs = self.getDataDirectories();
const debug_dir = data_dirs[DEBUG_DIRECTORY];
const debug_dir = data_dirs[@enumToInt(DirectoryEntry.DEBUG)];
const file_offset = debug_dir.virtual_address - header.virtual_address + header.pointer_to_raw_data;
var stream = std.io.fixedBufferStream(self.data);
@ -875,7 +964,7 @@ pub const Coff = struct {
var i: u32 = 0;
blk: while (i < debug_dir_entry_count) : (i += 1) {
const debug_dir_entry = try reader.readStruct(DebugDirectoryEntry);
if (debug_dir_entry.type == IMAGE_DEBUG_TYPE_CODEVIEW) {
if (debug_dir_entry.type == .CODEVIEW) {
for (self.getSectionHeaders()) |*section| {
const section_start = section.virtual_address;
const section_size = section.virtual_size;
@ -1011,132 +1100,132 @@ pub const Coff = struct {
mem.copy(u8, out_buff, self.data[sec.pointer_to_raw_data..][0..sec.virtual_size]);
return out_buff;
}
};
pub const Symtab = struct {
pub const Symtab = struct {
buffer: []const u8,
pub fn len(self: Symtab) usize {
return @divExact(self.buffer.len, Symbol.sizeOf());
}
pub const Tag = enum {
symbol,
func_def,
debug_info,
weak_ext,
file_def,
sect_def,
};
pub const Record = union(Tag) {
symbol: Symbol,
debug_info: DebugInfoDefinition,
func_def: FunctionDefinition,
weak_ext: WeakExternalDefinition,
file_def: FileDefinition,
sect_def: SectionDefinition,
};
/// Lives as long as Symtab instance.
pub fn at(self: Symtab, index: usize, tag: Tag) Record {
const offset = index * Symbol.sizeOf();
const raw = self.buffer[offset..][0..Symbol.sizeOf()];
return switch (tag) {
.symbol => .{ .symbol = asSymbol(raw) },
.debug_info => .{ .debug_info = asDebugInfo(raw) },
.func_def => .{ .func_def = asFuncDef(raw) },
.weak_ext => .{ .weak_ext = asWeakExtDef(raw) },
.file_def => .{ .file_def = asFileDef(raw) },
.sect_def => .{ .sect_def = asSectDef(raw) },
};
}
fn asSymbol(raw: []const u8) Symbol {
return .{
.name = raw[0..8].*,
.value = mem.readIntLittle(u32, raw[8..12]),
.section_number = @intToEnum(SectionNumber, mem.readIntLittle(u16, raw[12..14])),
.@"type" = @bitCast(SymType, mem.readIntLittle(u16, raw[14..16])),
.storage_class = @intToEnum(StorageClass, raw[16]),
.number_of_aux_symbols = raw[17],
};
}
fn asDebugInfo(raw: []const u8) DebugInfoDefinition {
return .{
.unused_1 = raw[0..4].*,
.linenumber = mem.readIntLittle(u16, raw[4..6]),
.unused_2 = raw[6..12].*,
.pointer_to_next_function = mem.readIntLittle(u32, raw[12..16]),
.unused_3 = raw[16..18].*,
};
}
fn asFuncDef(raw: []const u8) FunctionDefinition {
return .{
.tag_index = mem.readIntLittle(u32, raw[0..4]),
.total_size = mem.readIntLittle(u32, raw[4..8]),
.pointer_to_linenumber = mem.readIntLittle(u32, raw[8..12]),
.pointer_to_next_function = mem.readIntLittle(u32, raw[12..16]),
.unused = raw[16..18].*,
};
}
fn asWeakExtDef(raw: []const u8) WeakExternalDefinition {
return .{
.tag_index = mem.readIntLittle(u32, raw[0..4]),
.flag = @intToEnum(WeakExternalFlag, mem.readIntLittle(u32, raw[4..8])),
.unused = raw[8..18].*,
};
}
fn asFileDef(raw: []const u8) FileDefinition {
return .{
.file_name = raw[0..18].*,
};
}
fn asSectDef(raw: []const u8) SectionDefinition {
return .{
.length = mem.readIntLittle(u32, raw[0..4]),
.number_of_relocations = mem.readIntLittle(u16, raw[4..6]),
.number_of_linenumbers = mem.readIntLittle(u16, raw[6..8]),
.checksum = mem.readIntLittle(u32, raw[8..12]),
.number = mem.readIntLittle(u16, raw[12..14]),
.selection = @intToEnum(ComdatSelection, raw[14]),
.unused = raw[15..18].*,
};
}
pub const Slice = struct {
buffer: []const u8,
fn len(self: Symtab) usize {
return @divExact(self.buffer.len, Symbol.sizeOf());
}
const Tag = enum {
symbol,
func_def,
debug_info,
weak_ext,
file_def,
sect_def,
};
const Record = union(Tag) {
symbol: Symbol,
debug_info: DebugInfoDefinition,
func_def: FunctionDefinition,
weak_ext: WeakExternalDefinition,
file_def: FileDefinition,
sect_def: SectionDefinition,
};
num: usize,
count: usize = 0,
/// Lives as long as Symtab instance.
fn at(self: Symtab, index: usize, tag: Tag) Record {
const offset = index * Symbol.sizeOf();
const raw = self.buffer[offset..][0..Symbol.sizeOf()];
return switch (tag) {
.symbol => .{ .symbol = asSymbol(raw) },
.debug_info => .{ .debug_info = asDebugInfo(raw) },
.func_def => .{ .func_def = asFuncDef(raw) },
.weak_ext => .{ .weak_ext = asWeakExtDef(raw) },
.file_def => .{ .file_def = asFileDef(raw) },
.sect_def => .{ .sect_def = asSectDef(raw) },
};
}
fn asSymbol(raw: []const u8) Symbol {
return .{
.name = raw[0..8].*,
.value = mem.readIntLittle(u32, raw[8..12]),
.section_number = @intToEnum(SectionNumber, mem.readIntLittle(u16, raw[12..14])),
.@"type" = @bitCast(SymType, mem.readIntLittle(u16, raw[14..16])),
.storage_class = @intToEnum(StorageClass, raw[16]),
.number_of_aux_symbols = raw[17],
};
}
fn asDebugInfo(raw: []const u8) DebugInfoDefinition {
return .{
.unused_1 = raw[0..4].*,
.linenumber = mem.readIntLittle(u16, raw[4..6]),
.unused_2 = raw[6..12].*,
.pointer_to_next_function = mem.readIntLittle(u32, raw[12..16]),
.unused_3 = raw[16..18].*,
};
}
fn asFuncDef(raw: []const u8) FunctionDefinition {
return .{
.tag_index = mem.readIntLittle(u32, raw[0..4]),
.total_size = mem.readIntLittle(u32, raw[4..8]),
.pointer_to_linenumber = mem.readIntLittle(u32, raw[8..12]),
.pointer_to_next_function = mem.readIntLittle(u32, raw[12..16]),
.unused = raw[16..18].*,
};
}
fn asWeakExtDef(raw: []const u8) WeakExternalDefinition {
return .{
.tag_index = mem.readIntLittle(u32, raw[0..4]),
.flag = @intToEnum(WeakExternalFlag, mem.readIntLittle(u32, raw[4..8])),
.unused = raw[8..18].*,
};
}
fn asFileDef(raw: []const u8) FileDefinition {
return .{
.file_name = raw[0..18].*,
};
}
fn asSectDef(raw: []const u8) SectionDefinition {
return .{
.length = mem.readIntLittle(u32, raw[0..4]),
.number_of_relocations = mem.readIntLittle(u16, raw[4..6]),
.number_of_linenumbers = mem.readIntLittle(u16, raw[6..8]),
.checksum = mem.readIntLittle(u32, raw[8..12]),
.number = mem.readIntLittle(u16, raw[12..14]),
.selection = @intToEnum(ComdatSelection, raw[14]),
.unused = raw[15..18].*,
};
}
const Slice = struct {
buffer: []const u8,
num: usize,
count: usize = 0,
/// Lives as long as Symtab instance.
fn next(self: *Slice) ?Symbol {
if (self.count >= self.num) return null;
const sym = asSymbol(self.buffer[0..Symbol.sizeOf()]);
self.count += 1;
self.buffer = self.buffer[Symbol.sizeOf()..];
return sym;
}
};
fn slice(self: Symtab, start: usize, end: ?usize) Slice {
const offset = start * Symbol.sizeOf();
const llen = if (end) |e| e * Symbol.sizeOf() else self.buffer.len;
const num = @divExact(llen - offset, Symbol.sizeOf());
return Slice{ .buffer = self.buffer[offset..][0..llen], .num = num };
pub fn next(self: *Slice) ?Symbol {
if (self.count >= self.num) return null;
const sym = asSymbol(self.buffer[0..Symbol.sizeOf()]);
self.count += 1;
self.buffer = self.buffer[Symbol.sizeOf()..];
return sym;
}
};
pub const Strtab = struct {
buffer: []const u8,
fn get(self: Strtab, off: u32) []const u8 {
assert(off < self.buffer.len);
return mem.sliceTo(@ptrCast([*:0]const u8, self.buffer.ptr + off), 0);
}
};
pub fn slice(self: Symtab, start: usize, end: ?usize) Slice {
const offset = start * Symbol.sizeOf();
const llen = if (end) |e| e * Symbol.sizeOf() else self.buffer.len;
const num = @divExact(llen - offset, Symbol.sizeOf());
return Slice{ .buffer = self.buffer[offset..][0..llen], .num = num };
}
};
pub const Strtab = struct {
buffer: []const u8,
pub fn get(self: Strtab, off: u32) []const u8 {
assert(off < self.buffer.len);
return mem.sliceTo(@ptrCast([*:0]const u8, self.buffer.ptr + off), 0);
}
};

View File

@ -343,7 +343,7 @@ pub const Random = struct {
///
/// This is useful for selecting an item from a slice where weights are not equal.
/// `T` must be a numeric type capable of holding the sum of `proportions`.
pub fn weightedIndex(r: std.rand.Random, comptime T: type, proportions: []T) usize {
pub fn weightedIndex(r: std.rand.Random, comptime T: type, proportions: []const T) usize {
// This implementation works by summing the proportions and picking a random
// point in [0, sum). We then loop over the proportions, accumulating
// until our accumulator is greater than the random point.

View File

@ -452,7 +452,7 @@ test "Random weightedIndex" {
var prng = DefaultPrng.init(0);
const random = prng.random();
var proportions = [_]T{ 2, 1, 1, 2 };
const proportions = [_]T{ 2, 1, 1, 2 };
var counts = [_]f64{ 0, 0, 0, 0 };
const n_trials: u64 = 10_000;

View File

@ -36,8 +36,6 @@ comptime {
if (@typeInfo(@TypeOf(root.main)).Fn.calling_convention != .C) {
@export(main2, .{ .name = "main" });
}
} else if (builtin.os.tag == .windows) {
@export(wWinMainCRTStartup2, .{ .name = "wWinMainCRTStartup" });
} else if (builtin.os.tag == .wasi and @hasDecl(root, "main")) {
@export(wasiMain2, .{ .name = "_start" });
} else {

View File

@ -226,6 +226,8 @@ pub const ResultLoc = union(enum) {
ref,
/// The expression will be coerced into this type, but it will be evaluated as an rvalue.
ty: Zir.Inst.Ref,
/// Same as `ty` but for shift operands.
ty_shift_operand: Zir.Inst.Ref,
/// Same as `ty` but it is guaranteed that Sema will additionally perform the coercion,
/// so no `as` instruction needs to be emitted.
coerced_ty: Zir.Inst.Ref,
@ -259,7 +261,7 @@ pub const ResultLoc = union(enum) {
fn strategy(rl: ResultLoc, block_scope: *GenZir) Strategy {
switch (rl) {
// In this branch there will not be any store_to_block_ptr instructions.
.none, .ty, .coerced_ty, .ref => return .{
.none, .ty, .ty_shift_operand, .coerced_ty, .ref => return .{
.tag = .break_operand,
.elide_store_to_block_ptr_instructions = false,
},
@ -302,6 +304,14 @@ pub const ResultLoc = union(enum) {
else => rl,
};
}
fn zirTag(rl: ResultLoc) Zir.Inst.Tag {
return switch (rl) {
.ty => .as_node,
.ty_shift_operand => .as_shift_operand,
else => unreachable,
};
}
};
pub const align_rl: ResultLoc = .{ .ty = .u29_type };
@ -1385,7 +1395,7 @@ fn arrayInitExpr(
const tag: Zir.Inst.Tag = if (types.array != .none) .array_init else .array_init_anon;
return arrayInitExprInner(gz, scope, node, array_init.ast.elements, types.array, types.elem, tag);
},
.ty, .coerced_ty => {
.ty, .ty_shift_operand, .coerced_ty => {
const tag: Zir.Inst.Tag = if (types.array != .none) .array_init else .array_init_anon;
const result = try arrayInitExprInner(gz, scope, node, array_init.ast.elements, types.array, types.elem, tag);
return rvalue(gz, rl, result, node);
@ -1631,7 +1641,7 @@ fn structInitExpr(
return structInitExprRlNone(gz, scope, node, struct_init, .none, .struct_init_anon);
}
},
.ty, .coerced_ty => |ty_inst| {
.ty, .ty_shift_operand, .coerced_ty => |ty_inst| {
if (struct_init.ast.type_expr == 0) {
const result = try structInitExprRlNone(gz, scope, node, struct_init, ty_inst, .struct_init_anon);
return rvalue(gz, rl, result, node);
@ -2327,6 +2337,7 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.anyframe_type,
.as,
.as_node,
.as_shift_operand,
.bit_and,
.bitcast,
.bit_or,
@ -2497,7 +2508,6 @@ fn addEnsureResult(gz: *GenZir, maybe_unused_result: Zir.Inst.Ref, statement: As
.field_parent_ptr,
.maximum,
.minimum,
.builtin_async_call,
.c_import,
.@"resume",
.@"await",
@ -7278,7 +7288,7 @@ fn as(
) InnerError!Zir.Inst.Ref {
const dest_type = try typeExpr(gz, scope, lhs);
switch (rl) {
.none, .discard, .ref, .ty, .coerced_ty => {
.none, .discard, .ref, .ty, .ty_shift_operand, .coerced_ty => {
const result = try reachableExpr(gz, scope, .{ .ty = dest_type }, rhs, node);
return rvalue(gz, rl, result, node);
},
@ -7959,7 +7969,8 @@ fn builtinCall(
return rvalue(gz, rl, result, node);
},
.async_call => {
const result = try gz.addPlNode(.builtin_async_call, node, Zir.Inst.AsyncCall{
const result = try gz.addExtendedPayload(.builtin_async_call, Zir.Inst.AsyncCall{
.node = gz.nodeIndexToRelative(node),
.frame_buffer = try expr(gz, scope, .none, params[0]),
.result_ptr = try expr(gz, scope, .none, params[1]),
.fn_ptr = try expr(gz, scope, .none, params[2]),
@ -8178,7 +8189,7 @@ fn shiftOp(
) InnerError!Zir.Inst.Ref {
const lhs = try expr(gz, scope, .none, lhs_node);
const log2_int_type = try gz.addUnNode(.typeof_log2_int_type, lhs, lhs_node);
const rhs = try expr(gz, scope, .{ .ty = log2_int_type }, rhs_node);
const rhs = try expr(gz, scope, .{ .ty_shift_operand = log2_int_type }, rhs_node);
const result = try gz.addPlNode(tag, node, Zir.Inst.Bin{
.lhs = lhs,
.rhs = rhs,
@ -9409,7 +9420,7 @@ fn rvalue(
}
return indexToRef(gop.value_ptr.*);
},
.ty => |ty_inst| {
.ty, .ty_shift_operand => |ty_inst| {
// Quickly eliminate some common, unnecessary type coercion.
const as_ty = @as(u64, @enumToInt(Zir.Inst.Ref.type_type)) << 32;
const as_comptime_int = @as(u64, @enumToInt(Zir.Inst.Ref.comptime_int_type)) << 32;
@ -9470,7 +9481,7 @@ fn rvalue(
=> return result, // type of result is already correct
// Need an explicit type coercion instruction.
else => return gz.addPlNode(.as_node, src_node, Zir.Inst.As{
else => return gz.addPlNode(rl.zirTag(), src_node, Zir.Inst.As{
.dest_type = ty_inst,
.operand = result,
}),
@ -10350,7 +10361,7 @@ const GenZir = struct {
// we emit ZIR for the block break instructions to have the result values,
// and then rvalue() on that to pass the value to the result location.
switch (parent_rl) {
.ty, .coerced_ty => |ty_inst| {
.ty, .ty_shift_operand, .coerced_ty => |ty_inst| {
gz.rl_ty_inst = ty_inst;
gz.break_result_loc = parent_rl;
},
@ -11506,7 +11517,7 @@ const GenZir = struct {
fn addRet(gz: *GenZir, rl: ResultLoc, operand: Zir.Inst.Ref, node: Ast.Node.Index) !void {
switch (rl) {
.ptr => |ret_ptr| _ = try gz.addUnNode(.ret_load, ret_ptr, node),
.ty => _ = try gz.addUnNode(.ret_node, operand, node),
.ty, .ty_shift_operand => _ = try gz.addUnNode(.ret_node, operand, node),
else => unreachable,
}
}

View File

@ -1888,7 +1888,7 @@ fn walkInstruction(
.expr = .{ .typeInfo = operand_index },
};
},
.as_node => {
.as_node, .as_shift_operand => {
const pl_node = data[inst_index].pl_node;
const extra = file.zir.extraData(Zir.Inst.As, pl_node.payload_index);
const dest_type_walk = try self.walkRef(

View File

@ -1127,7 +1127,6 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
link_eh_frame_hdr or
options.link_emit_relocs or
options.output_mode == .Lib or
options.image_base_override != null or
options.linker_script != null or options.version_script != null or
options.emit_implib != null or
build_id)
@ -4767,6 +4766,24 @@ pub fn dump_argv(argv: []const []const u8) void {
std.debug.print("{s}\n", .{argv[argv.len - 1]});
}
pub fn getZigBackend(comp: Compilation) std.builtin.CompilerBackend {
const use_stage1 = build_options.have_stage1 and comp.bin_file.options.use_stage1;
if (use_stage1) return .stage1;
if (build_options.have_llvm and comp.bin_file.options.use_llvm) return .stage2_llvm;
const target = comp.bin_file.options.target;
if (target.ofmt == .c) return .stage2_c;
return switch (target.cpu.arch) {
.wasm32, .wasm64 => std.builtin.CompilerBackend.stage2_wasm,
.arm, .armeb, .thumb, .thumbeb => .stage2_arm,
.x86_64 => .stage2_x86_64,
.i386 => .stage2_x86,
.aarch64, .aarch64_be, .aarch64_32 => .stage2_aarch64,
.riscv64 => .stage2_riscv64,
.sparc64 => .stage2_sparc64,
else => .other,
};
}
pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Allocator.Error![:0]u8 {
const tracy_trace = trace(@src());
defer tracy_trace.end();
@ -4776,23 +4793,7 @@ pub fn generateBuiltinZigSource(comp: *Compilation, allocator: Allocator) Alloca
const target = comp.getTarget();
const generic_arch_name = target.cpu.arch.genericName();
const use_stage1 = build_options.have_stage1 and comp.bin_file.options.use_stage1;
const zig_backend: std.builtin.CompilerBackend = blk: {
if (use_stage1) break :blk .stage1;
if (build_options.have_llvm and comp.bin_file.options.use_llvm) break :blk .stage2_llvm;
if (target.ofmt == .c) break :blk .stage2_c;
break :blk switch (target.cpu.arch) {
.wasm32, .wasm64 => std.builtin.CompilerBackend.stage2_wasm,
.arm, .armeb, .thumb, .thumbeb => .stage2_arm,
.x86_64 => .stage2_x86_64,
.i386 => .stage2_x86,
.aarch64, .aarch64_be, .aarch64_32 => .stage2_aarch64,
.riscv64 => .stage2_riscv64,
.sparc64 => .stage2_sparc64,
else => .other,
};
};
const zig_backend = comp.getZigBackend();
@setEvalBranchQuota(4000);
try buffer.writer().print(

View File

@ -5259,9 +5259,9 @@ pub fn clearDecl(
// TODO instead of a union, put this memory trailing Decl objects,
// and allow it to be variably sized.
decl.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = link.File.Coff.TextBlock.empty },
.coff => .{ .coff = link.File.Coff.Atom.empty },
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.Atom.empty },
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },
@ -5391,6 +5391,9 @@ fn deleteDeclExports(mod: *Module, decl_index: Decl.Index) void {
if (mod.comp.bin_file.cast(link.File.Wasm)) |wasm| {
wasm.deleteExport(exp.link.wasm);
}
if (mod.comp.bin_file.cast(link.File.Coff)) |coff| {
coff.deleteExport(exp.link.coff);
}
if (mod.failed_exports.fetchSwapRemove(exp)) |failed_kv| {
failed_kv.value.destroy(mod.gpa);
}
@ -5680,9 +5683,9 @@ pub fn allocateNewDecl(
.zir_decl_index = 0,
.src_scope = src_scope,
.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = link.File.Coff.TextBlock.empty },
.coff => .{ .coff = link.File.Coff.Atom.empty },
.elf => .{ .elf = link.File.Elf.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.TextBlock.empty },
.macho => .{ .macho = link.File.MachO.Atom.empty },
.plan9 => .{ .plan9 = link.File.Plan9.DeclBlock.empty },
.c => .{ .c = {} },
.wasm => .{ .wasm = link.File.Wasm.DeclBlock.empty },

View File

@ -712,6 +712,7 @@ fn analyzeBodyInner(
.vector_type => try sema.zirVectorType(block, inst),
.as => try sema.zirAs(block, inst),
.as_node => try sema.zirAsNode(block, inst),
.as_shift_operand => try sema.zirAsShiftOperand(block, inst),
.bit_and => try sema.zirBitwise(block, inst, .bit_and),
.bit_not => try sema.zirBitNot(block, inst),
.bit_or => try sema.zirBitwise(block, inst, .bit_or),
@ -848,7 +849,6 @@ fn analyzeBodyInner(
.mul_add => try sema.zirMulAdd(block, inst),
.builtin_call => try sema.zirBuiltinCall(block, inst),
.field_parent_ptr => try sema.zirFieldParentPtr(block, inst),
.builtin_async_call => try sema.zirBuiltinAsyncCall(block, inst),
.@"resume" => try sema.zirResume(block, inst),
.@"await" => try sema.zirAwait(block, inst),
.array_base_ptr => try sema.zirArrayBasePtr(block, inst),
@ -956,6 +956,7 @@ fn analyzeBodyInner(
.error_to_int => try sema.zirErrorToInt( block, extended),
.int_to_error => try sema.zirIntToError( block, extended),
.reify => try sema.zirReify( block, extended, inst),
.builtin_async_call => try sema.zirBuiltinAsyncCall( block, extended),
// zig fmt: on
.fence => {
try sema.zirFence(block, extended);
@ -5076,7 +5077,7 @@ pub fn analyzeExport(
},
.src = src,
.link = switch (mod.comp.bin_file.tag) {
.coff => .{ .coff = {} },
.coff => .{ .coff = .{} },
.elf => .{ .elf = .{} },
.macho => .{ .macho = .{} },
.plan9 => .{ .plan9 = null },
@ -6152,9 +6153,30 @@ fn analyzeCall(
if (ensure_result_used) {
try sema.ensureResultUsed(block, result, call_src);
}
if (call_tag == .call_always_tail) {
return sema.handleTailCall(block, call_src, func_ty, result);
}
return result;
}
fn handleTailCall(sema: *Sema, block: *Block, call_src: LazySrcLoc, func_ty: Type, result: Air.Inst.Ref) !Air.Inst.Ref {
const target = sema.mod.getTarget();
const backend = sema.mod.comp.getZigBackend();
if (!target_util.supportsTailCall(target, backend)) {
return sema.fail(block, call_src, "unable to perform tail call: compiler backend '{s}' does not support tail calls on target architecture '{s}' with the selected CPU feature flags", .{
@tagName(backend), @tagName(target.cpu.arch),
});
}
const func_decl = sema.mod.declPtr(sema.owner_func.?.owner_decl);
if (!func_ty.eql(func_decl.ty, sema.mod)) {
return sema.fail(block, call_src, "unable to perform tail call: type of function being called '{}' does not match type of calling function '{}'", .{
func_ty.fmt(sema.mod), func_decl.ty.fmt(sema.mod),
});
}
_ = try block.addUnOp(.ret, result);
return Air.Inst.Ref.unreachable_value;
}
fn analyzeInlineCallArg(
sema: *Sema,
arg_block: *Block,
@ -6670,7 +6692,8 @@ fn instantiateGenericCall(
try sema.requireFunctionBlock(block, call_src);
const comptime_args = callee.comptime_args.?;
const new_fn_info = mod.declPtr(callee.owner_decl).ty.fnInfo();
const func_ty = mod.declPtr(callee.owner_decl).ty;
const new_fn_info = func_ty.fnInfo();
const runtime_args_len = @intCast(u32, new_fn_info.param_types.len);
const runtime_args = try sema.arena.alloc(Air.Inst.Ref, runtime_args_len);
{
@ -6717,7 +6740,7 @@ fn instantiateGenericCall(
try sema.air_extra.ensureUnusedCapacity(sema.gpa, @typeInfo(Air.Call).Struct.fields.len +
runtime_args_len);
const func_inst = try block.addInst(.{
const result = try block.addInst(.{
.tag = call_tag,
.data = .{ .pl_op = .{
.operand = callee_inst,
@ -6729,9 +6752,12 @@ fn instantiateGenericCall(
sema.appendRefsAssumeCapacity(runtime_args);
if (ensure_result_used) {
try sema.ensureResultUsed(block, func_inst, call_src);
try sema.ensureResultUsed(block, result, call_src);
}
return func_inst;
if (call_tag == .call_always_tail) {
return sema.handleTailCall(block, call_src, func_ty, result);
}
return result;
}
fn emitDbgInline(
@ -8177,8 +8203,22 @@ fn zirParam(
.is_comptime = comptime_syntax,
.name = param_name,
});
const result = try sema.addConstant(param_ty, Value.initTag(.generic_poison));
try sema.inst_map.putNoClobber(sema.gpa, inst, result);
if (is_comptime) {
// If this is a comptime parameter we can add a constant generic_poison
// since this is also a generic parameter.
const result = try sema.addConstant(param_ty, Value.initTag(.generic_poison));
try sema.inst_map.putNoClobber(sema.gpa, inst, result);
} else {
// Otherwise we need a dummy runtime instruction.
const result_index = @intCast(Air.Inst.Index, sema.air_instructions.len);
try sema.air_instructions.append(sema.gpa, .{
.tag = .alloc,
.data = .{ .ty = param_ty },
});
const result = Air.indexToRef(result_index);
try sema.inst_map.putNoClobber(sema.gpa, inst, result);
}
}
fn zirParamAnytype(
@ -8225,7 +8265,7 @@ fn zirAs(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst
defer tracy.end();
const bin_inst = sema.code.instructions.items(.data)[inst].bin;
return sema.analyzeAs(block, sema.src, bin_inst.lhs, bin_inst.rhs);
return sema.analyzeAs(block, sema.src, bin_inst.lhs, bin_inst.rhs, false);
}
fn zirAsNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
@ -8235,7 +8275,17 @@ fn zirAsNode(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.As, inst_data.payload_index).data;
return sema.analyzeAs(block, src, extra.dest_type, extra.operand);
return sema.analyzeAs(block, src, extra.dest_type, extra.operand, false);
}
fn zirAsShiftOperand(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const tracy = trace(@src());
defer tracy.end();
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
const extra = sema.code.extraData(Zir.Inst.As, inst_data.payload_index).data;
return sema.analyzeAs(block, src, extra.dest_type, extra.operand, true);
}
fn analyzeAs(
@ -8244,6 +8294,7 @@ fn analyzeAs(
src: LazySrcLoc,
zir_dest_type: Zir.Inst.Ref,
zir_operand: Zir.Inst.Ref,
no_cast_to_comptime_int: bool,
) CompileError!Air.Inst.Ref {
const is_ret = if (Zir.refToIndex(zir_dest_type)) |ptr_index|
sema.code.instructions.items(.tag)[ptr_index] == .ret_type
@ -8255,7 +8306,7 @@ fn analyzeAs(
if (dest_ty.zigTypeTag() == .NoReturn) {
return sema.fail(block, src, "cannot cast to noreturn", .{});
}
return sema.coerceExtra(block, dest_ty, operand, src, true, is_ret) catch |err| switch (err) {
return sema.coerceExtra(block, dest_ty, operand, src, .{ .is_ret = is_ret, .no_cast_to_comptime_int = no_cast_to_comptime_int }) catch |err| switch (err) {
error.NotCoercible => unreachable,
else => |e| return e,
};
@ -10459,7 +10510,12 @@ fn zirShl(
const runtime_src = if (maybe_lhs_val) |lhs_val| rs: {
if (lhs_val.isUndef()) return sema.addConstUndef(lhs_ty);
const rhs_val = maybe_rhs_val orelse break :rs rhs_src;
const rhs_val = maybe_rhs_val orelse {
if (scalar_ty.zigTypeTag() == .ComptimeInt) {
return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be a comptime known", .{});
}
break :rs rhs_src;
};
const val = switch (air_tag) {
.shl_exact => val: {
@ -10583,7 +10639,10 @@ fn zirShr(
const target = sema.mod.getTarget();
const scalar_ty = lhs_ty.scalarType();
const runtime_src = if (try sema.resolveMaybeUndefVal(block, rhs_src, rhs)) |rhs_val| rs: {
const maybe_lhs_val = try sema.resolveMaybeUndefVal(block, lhs_src, lhs);
const maybe_rhs_val = try sema.resolveMaybeUndefVal(block, rhs_src, rhs);
const runtime_src = if (maybe_rhs_val) |rhs_val| rs: {
if (rhs_val.isUndef()) {
return sema.addConstUndef(lhs_ty);
}
@ -10615,7 +10674,7 @@ fn zirShr(
});
}
}
if (try sema.resolveMaybeUndefVal(block, lhs_src, lhs)) |lhs_val| {
if (maybe_lhs_val) |lhs_val| {
if (lhs_val.isUndef()) {
return sema.addConstUndef(lhs_ty);
}
@ -10633,6 +10692,10 @@ fn zirShr(
}
} else rhs_src;
if (maybe_rhs_val == null and scalar_ty.zigTypeTag() == .ComptimeInt) {
return sema.fail(block, src, "LHS of shift must be a fixed-width integer type, or RHS must be a comptime known", .{});
}
try sema.requireRuntimeBlock(block, src, runtime_src);
const result = try block.addBinOp(air_tag, lhs, rhs);
if (block.wantSafety()) {
@ -15353,7 +15416,7 @@ fn analyzeRet(
if (sema.fn_ret_ty.zigTypeTag() == .ErrorUnion) {
try sema.addToInferredErrorSet(uncasted_operand);
}
const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, src, true, true) catch |err| switch (err) {
const operand = sema.coerceExtra(block, sema.fn_ret_ty, uncasted_operand, src, .{ .is_ret = true }) catch |err| switch (err) {
error.NotCoercible => unreachable,
else => |e| return e,
};
@ -19262,7 +19325,7 @@ fn resolveCallOptions(
return wanted_modifier;
},
// These can be upgraded to comptime. nosuspend bit can be safely ignored.
.always_tail, .always_inline, .compile_time => {
.always_inline, .compile_time => {
_ = (try sema.resolveDefinedValue(block, func_src, func)) orelse {
return sema.fail(block, func_src, "modifier '{s}' requires a comptime-known function", .{@tagName(wanted_modifier)});
};
@ -19272,6 +19335,12 @@ fn resolveCallOptions(
}
return wanted_modifier;
},
.always_tail => {
if (is_comptime) {
return .compile_time;
}
return wanted_modifier;
},
.async_kw => {
if (is_nosuspend) {
return sema.fail(block, modifier_src, "modifier 'async_kw' cannot be used inside nosuspend block", .{});
@ -19614,9 +19683,9 @@ fn zirMemset(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!void
});
}
fn zirBuiltinAsyncCall(sema: *Sema, block: *Block, inst: Zir.Inst.Index) CompileError!Air.Inst.Ref {
const inst_data = sema.code.instructions.items(.data)[inst].pl_node;
const src = inst_data.src();
fn zirBuiltinAsyncCall(sema: *Sema, block: *Block, extended: Zir.Inst.Extended.InstData) CompileError!Air.Inst.Ref {
const extra = sema.code.extraData(Zir.Inst.UnNode, extended.operand).data;
const src = LazySrcLoc.nodeOffset(extra.node);
return sema.failWithUseOfAsync(block, src);
}
@ -21871,6 +21940,7 @@ fn unionFieldPtr(
.mutable = union_ptr_ty.ptrIsMutable(),
.@"addrspace" = union_ptr_ty.ptrAddressSpace(),
});
const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?);
if (initializing and field.ty.zigTypeTag() == .NoReturn) {
const msg = msg: {
@ -21892,11 +21962,10 @@ fn unionFieldPtr(
if (union_val.isUndef()) {
return sema.failWithUseOfUndef(block, src);
}
const enum_field_index = union_obj.tag_ty.enumFieldIndex(field_name).?;
const tag_and_val = union_val.castTag(.@"union").?.data;
var field_tag_buf: Value.Payload.U32 = .{
.base = .{ .tag = .enum_field_index },
.data = @intCast(u32, enum_field_index),
.data = enum_field_index,
};
const field_tag = Value.initPayload(&field_tag_buf.base);
const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, sema.mod);
@ -21928,7 +21997,7 @@ fn unionFieldPtr(
if (!initializing and union_obj.layout == .Auto and block.wantSafety() and
union_ty.unionTagTypeSafety() != null and union_obj.fields.count() > 1)
{
const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, field_index);
const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index);
const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val);
// TODO would it be better if get_union_tag supported pointers to unions?
const union_val = try block.addTyOp(.load, union_ty, union_ptr);
@ -21958,15 +22027,15 @@ fn unionFieldVal(
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
const field_index = try sema.unionFieldIndex(block, union_ty, field_name, field_name_src);
const field = union_obj.fields.values()[field_index];
const enum_field_index = @intCast(u32, union_obj.tag_ty.enumFieldIndex(field_name).?);
if (try sema.resolveMaybeUndefVal(block, src, union_byval)) |union_val| {
if (union_val.isUndef()) return sema.addConstUndef(field.ty);
const tag_and_val = union_val.castTag(.@"union").?.data;
const enum_field_index = union_obj.tag_ty.enumFieldIndex(field_name).?;
var field_tag_buf: Value.Payload.U32 = .{
.base = .{ .tag = .enum_field_index },
.data = @intCast(u32, enum_field_index),
.data = enum_field_index,
};
const field_tag = Value.initPayload(&field_tag_buf.base);
const tag_matches = tag_and_val.tag.eql(field_tag, union_obj.tag_ty, sema.mod);
@ -22002,7 +22071,7 @@ fn unionFieldVal(
if (union_obj.layout == .Auto and block.wantSafety() and
union_ty.unionTagTypeSafety() != null and union_obj.fields.count() > 1)
{
const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, field_index);
const wanted_tag_val = try Value.Tag.enum_field_index.create(sema.arena, enum_field_index);
const wanted_tag = try sema.addConstant(union_obj.tag_ty, wanted_tag_val);
const active_tag = try block.addTyOp(.get_union_tag, union_obj.tag_ty, union_byval);
const ok = try block.addBinOp(.cmp_eq, active_tag, wanted_tag);
@ -22504,7 +22573,7 @@ fn coerce(
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
) CompileError!Air.Inst.Ref {
return sema.coerceExtra(block, dest_ty_unresolved, inst, inst_src, true, false) catch |err| switch (err) {
return sema.coerceExtra(block, dest_ty_unresolved, inst, inst_src, .{}) catch |err| switch (err) {
error.NotCoercible => unreachable,
else => |e| return e,
};
@ -22516,14 +22585,22 @@ const CoersionError = CompileError || error{
NotCoercible,
};
const CoerceOpts = struct {
/// Should coerceExtra emit error messages.
report_err: bool = true,
/// Ignored if `report_err == false`.
is_ret: bool = false,
/// Should coercion to comptime_int ermit an error message.
no_cast_to_comptime_int: bool = false,
};
fn coerceExtra(
sema: *Sema,
block: *Block,
dest_ty_unresolved: Type,
inst: Air.Inst.Ref,
inst_src: LazySrcLoc,
report_err: bool,
is_ret: bool,
opts: CoerceOpts,
) CoersionError!Air.Inst.Ref {
switch (dest_ty_unresolved.tag()) {
.var_args_param => return sema.coerceVarArgParam(block, inst, inst_src),
@ -22575,7 +22652,7 @@ fn coerceExtra(
// T to ?T
const child_type = try dest_ty.optionalChildAlloc(sema.arena);
const intermediate = sema.coerceExtra(block, child_type, inst, inst_src, false, is_ret) catch |err| switch (err) {
const intermediate = sema.coerceExtra(block, child_type, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
error.NotCoercible => {
if (in_memory_result == .no_match) {
// Try to give more useful notes
@ -22691,7 +22768,7 @@ fn coerceExtra(
return sema.addConstant(dest_ty, Value.@"null");
},
.ComptimeInt => {
const addr = sema.coerceExtra(block, Type.usize, inst, inst_src, false, is_ret) catch |err| switch (err) {
const addr = sema.coerceExtra(block, Type.usize, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
error.NotCoercible => break :pointer,
else => |e| return e,
};
@ -22702,7 +22779,7 @@ fn coerceExtra(
.signed => Type.isize,
.unsigned => Type.usize,
};
const addr = sema.coerceExtra(block, ptr_size_ty, inst, inst_src, false, is_ret) catch |err| switch (err) {
const addr = sema.coerceExtra(block, ptr_size_ty, inst, inst_src, .{ .report_err = false }) catch |err| switch (err) {
error.NotCoercible => {
// Try to give more useful notes
in_memory_result = try sema.coerceInMemoryAllowed(block, ptr_size_ty, inst_ty, false, target, dest_ty_src, inst_src);
@ -22828,7 +22905,13 @@ fn coerceExtra(
},
.Int, .ComptimeInt => switch (inst_ty.zigTypeTag()) {
.Float, .ComptimeFloat => float: {
const val = (try sema.resolveDefinedValue(block, inst_src, inst)) orelse break :float;
const val = (try sema.resolveDefinedValue(block, inst_src, inst)) orelse {
if (dest_ty.zigTypeTag() == .ComptimeInt) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime known");
}
break :float;
};
if (val.floatHasFraction()) {
return sema.fail(
@ -22845,11 +22928,16 @@ fn coerceExtra(
if (try sema.resolveDefinedValue(block, inst_src, inst)) |val| {
// comptime known integer to other number
if (!(try sema.intFitsInType(block, inst_src, val, dest_ty, null))) {
if (!report_err) return error.NotCoercible;
if (!opts.report_err) return error.NotCoercible;
return sema.fail(block, inst_src, "type '{}' cannot represent integer value '{}'", .{ dest_ty.fmt(sema.mod), val.fmtValue(inst_ty, sema.mod) });
}
return try sema.addConstant(dest_ty, val);
}
if (dest_ty.zigTypeTag() == .ComptimeInt) {
if (!opts.report_err) return error.NotCoercible;
if (opts.no_cast_to_comptime_int) return inst;
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_int' must be comptime known");
}
// integer widening
const dst_info = dest_ty.intInfo(target);
@ -22886,6 +22974,7 @@ fn coerceExtra(
}
return try sema.addConstant(dest_ty, result_val);
} else if (dest_ty.zigTypeTag() == .ComptimeFloat) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime known");
}
@ -22898,7 +22987,13 @@ fn coerceExtra(
}
},
.Int, .ComptimeInt => int: {
const val = (try sema.resolveDefinedValue(block, inst_src, inst)) orelse break :int;
const val = (try sema.resolveDefinedValue(block, inst_src, inst)) orelse {
if (dest_ty.zigTypeTag() == .ComptimeFloat) {
if (!opts.report_err) return error.NotCoercible;
return sema.failWithNeededComptime(block, inst_src, "value being casted to 'comptime_float' must be comptime known");
}
break :int;
};
const result_val = try val.intToFloat(sema.arena, inst_ty, dest_ty, target);
// TODO implement this compile error
//const int_again_val = try result_val.floatToInt(sema.arena, inst_ty);
@ -23050,9 +23145,9 @@ fn coerceExtra(
return sema.addConstUndef(dest_ty);
}
if (!report_err) return error.NotCoercible;
if (!opts.report_err) return error.NotCoercible;
if (is_ret and dest_ty.zigTypeTag() == .NoReturn) {
if (opts.is_ret and dest_ty.zigTypeTag() == .NoReturn) {
const msg = msg: {
const msg = try sema.errMsg(block, inst_src, "function declared 'noreturn' returns", .{});
errdefer msg.destroy(sema.gpa);
@ -23089,7 +23184,7 @@ fn coerceExtra(
try in_memory_result.report(sema, block, inst_src, msg);
// Add notes about function return type
if (is_ret and sema.mod.test_functions.get(sema.func.?.owner_decl) == null) {
if (opts.is_ret and sema.mod.test_functions.get(sema.func.?.owner_decl) == null) {
const ret_ty_src: LazySrcLoc = .{ .node_offset_fn_type_ret_ty = 0 };
const src_decl = sema.mod.declPtr(sema.func.?.owner_decl);
if (inst_ty.isError() and !dest_ty.isError()) {
@ -24050,7 +24145,7 @@ fn storePtr2(
// https://github.com/ziglang/zig/issues/11154
if (sema.obtainBitCastedVectorPtr(ptr)) |vector_ptr| {
const vector_ty = sema.typeOf(vector_ptr).childType();
const vector = sema.coerceExtra(block, vector_ty, uncasted_operand, operand_src, true, is_ret) catch |err| switch (err) {
const vector = sema.coerceExtra(block, vector_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) {
error.NotCoercible => unreachable,
else => |e| return e,
};
@ -24058,7 +24153,7 @@ fn storePtr2(
return;
}
const operand = sema.coerceExtra(block, elem_ty, uncasted_operand, operand_src, true, is_ret) catch |err| switch (err) {
const operand = sema.coerceExtra(block, elem_ty, uncasted_operand, operand_src, .{ .is_ret = is_ret }) catch |err| switch (err) {
error.NotCoercible => unreachable,
else => |e| return e,
};
@ -26793,7 +26888,7 @@ fn wrapErrorUnionPayload(
inst_src: LazySrcLoc,
) !Air.Inst.Ref {
const dest_payload_ty = dest_ty.errorUnionPayload();
const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, false, false);
const coerced = try sema.coerceExtra(block, dest_payload_ty, inst, inst_src, .{ .report_err = false });
if (try sema.resolveMaybeUndefVal(block, inst_src, coerced)) |val| {
return sema.addConstant(dest_ty, try Value.Tag.eu_payload.create(sema.arena, val));
}

View File

@ -242,6 +242,8 @@ pub const Inst = struct {
/// Type coercion to the function's return type.
/// Uses the `pl_node` field. Payload is `As`. AST node could be many things.
as_node,
/// Same as `as_node` but ignores runtime to comptime int error.
as_shift_operand,
/// Bitwise AND. `&`
bit_and,
/// Reinterpret the memory representation of a value as a different type.
@ -942,9 +944,6 @@ pub const Inst = struct {
/// Implements the `@maximum` builtin.
/// Uses the `pl_node` union field with payload `Bin`
maximum,
/// Implements the `@asyncCall` builtin.
/// Uses the `pl_node` union field with payload `AsyncCall`.
builtin_async_call,
/// Implements the `@cImport` builtin.
/// Uses the `pl_node` union field with payload `Block`.
c_import,
@ -1029,6 +1028,7 @@ pub const Inst = struct {
.anyframe_type,
.as,
.as_node,
.as_shift_operand,
.bit_and,
.bitcast,
.bit_or,
@ -1231,7 +1231,6 @@ pub const Inst = struct {
.memcpy,
.memset,
.minimum,
.builtin_async_call,
.c_import,
.@"resume",
.@"await",
@ -1339,6 +1338,7 @@ pub const Inst = struct {
.anyframe_type,
.as,
.as_node,
.as_shift_operand,
.bit_and,
.bitcast,
.bit_or,
@ -1513,7 +1513,6 @@ pub const Inst = struct {
.field_parent_ptr,
.maximum,
.minimum,
.builtin_async_call,
.c_import,
.@"resume",
.@"await",
@ -1577,6 +1576,7 @@ pub const Inst = struct {
.anyframe_type = .un_node,
.as = .bin,
.as_node = .pl_node,
.as_shift_operand = .pl_node,
.bit_and = .pl_node,
.bitcast = .pl_node,
.bit_not = .un_node,
@ -1801,7 +1801,6 @@ pub const Inst = struct {
.memcpy = .pl_node,
.memset = .pl_node,
.minimum = .pl_node,
.builtin_async_call = .pl_node,
.c_import = .pl_node,
.alloc = .un_node,
@ -1972,6 +1971,9 @@ pub const Inst = struct {
/// `operand` is payload index to `UnNode`.
/// `small` contains `NameStrategy
reify,
/// Implements the `@asyncCall` builtin.
/// `operand` is payload index to `AsyncCall`.
builtin_async_call,
pub const InstData = struct {
opcode: Extended,
@ -3454,6 +3456,7 @@ pub const Inst = struct {
};
pub const AsyncCall = struct {
node: i32,
frame_buffer: Ref,
result_ptr: Ref,
fn_ptr: Ref,

View File

@ -3466,19 +3466,16 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
// on linking.
const mod = self.bin_file.options.module.?;
if (self.air.value(callee)) |func_value| {
if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) {
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got_addr = blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes
else
unreachable;
};
try self.genSetReg(Type.initTag(.usize), .x30, .{ .memory = got_addr });
@ -3546,6 +3543,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO implement calling in COFF for {}", .{self.target.cpu.arch});
} else unreachable;
} else {
assert(ty.zigTypeTag() == .Pointer);
@ -5109,9 +5108,8 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
// the linker has enough info to perform relocations.
assert(decl.link.macho.sym_index != 0);
return MCValue{ .got_load = decl.link.macho.sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;

View File

@ -3698,7 +3698,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
// Due to incremental compilation, how function calls are generated depends
// on linking.
switch (self.bin_file.tag) {
.elf, .coff => {
.elf => {
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
@ -3709,11 +3709,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes
else
unreachable;
} else unreachable;
try self.genSetReg(Type.initTag(.usize), .lr, .{ .memory = got_addr });
} else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{});
@ -3751,6 +3747,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
}
},
.macho => unreachable, // unsupported architecture for MachO
.coff => return self.fail("TODO implement call in COFF for {}", .{self.target.cpu.arch}),
.plan9 => return self.fail("TODO implement call on plan9 for {}", .{self.target.cpu.arch}),
else => unreachable,
}
@ -5548,9 +5545,8 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;

View File

@ -1718,7 +1718,7 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
// Due to incremental compilation, how function calls are generated depends
// on linking.
if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) {
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
for (info.args) |mc_arg, arg_i| {
const arg = args[arg_i];
const arg_ty = self.air.typeOf(arg);
@ -1752,13 +1752,10 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got_addr = blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes
else
unreachable;
};
try self.genSetReg(Type.initTag(.usize), .ra, .{ .memory = got_addr });
_ = try self.addInst(.{
@ -1777,6 +1774,8 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
} else {
return self.fail("TODO implement calling runtime known function pointer", .{});
}
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO implement calling in COFF for {}", .{self.target.cpu.arch});
} else if (self.bin_file.cast(link.File.MachO)) |_| {
unreachable; // unsupported architecture for MachO
} else if (self.bin_file.cast(link.File.Plan9)) |_| {
@ -2591,9 +2590,8 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
// TODO I'm hacking my way through here by repurposing .memory for storing
// index to the GOT target symbol index.
return MCValue{ .memory = decl.link.macho.sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Coff)) |_| {
return self.fail("TODO codegen COFF const Decl pointer", .{});
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;

View File

@ -2657,22 +2657,26 @@ fn loadMemPtrIntoRegister(self: *Self, reg: Register, ptr_ty: Type, ptr: MCValue
.direct_load,
=> |sym_index| {
const abi_size = @intCast(u32, ptr_ty.abiSize(self.target.*));
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl);
const atom_index = if (self.bin_file.tag == link.File.MachO.base_tag)
fn_owner_decl.link.macho.sym_index
else
fn_owner_decl.link.coff.sym_index;
const flags: u2 = switch (ptr) {
.got_load => 0b00,
.direct_load => 0b01,
else => unreachable,
};
const mod = self.bin_file.options.module.?;
const fn_owner_decl = mod.declPtr(self.mod_fn.owner_decl);
_ = try self.addInst(.{
.tag = .lea_pie,
.tag = .lea_pic,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = registerAlias(reg, abi_size),
.flags = flags,
}),
.data = .{
.relocation = .{
.atom_index = fn_owner_decl.link.macho.sym_index,
.atom_index = atom_index,
.sym_index = sym_index,
},
},
@ -3961,20 +3965,17 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
// Due to incremental compilation, how function calls are generated depends
// on linking.
const mod = self.bin_file.options.module.?;
if (self.bin_file.tag == link.File.Elf.base_tag or self.bin_file.tag == link.File.Coff.base_tag) {
if (self.bin_file.cast(link.File.Elf)) |elf_file| {
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const ptr_bits = self.target.cpu.arch.ptrBitWidth();
const ptr_bytes: u64 = @divExact(ptr_bits, 8);
const fn_owner_decl = mod.declPtr(func.owner_decl);
const got_addr = if (self.bin_file.cast(link.File.Elf)) |elf_file| blk: {
const got_addr = blk: {
const got = &elf_file.program_headers.items[elf_file.phdr_got_index.?];
break :blk @intCast(u32, got.p_vaddr + fn_owner_decl.link.elf.offset_table_index * ptr_bytes);
} else if (self.bin_file.cast(link.File.Coff)) |coff_file|
@intCast(u32, coff_file.offset_table_virtual_address + fn_owner_decl.link.coff.offset_table_index * ptr_bytes)
else
unreachable;
};
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{ .flags = 0b01 }),
@ -3998,14 +3999,47 @@ fn airCall(self: *Self, inst: Air.Inst.Index, modifier: std.builtin.CallOptions.
.data = undefined,
});
}
} else if (self.bin_file.cast(link.File.Coff)) |_| {
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
try self.genSetReg(Type.initTag(.usize), .rax, .{
.got_load = fn_owner_decl.link.coff.sym_index,
});
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
}),
.data = undefined,
});
} else if (func_value.castTag(.extern_fn)) |_| {
return self.fail("TODO implement calling extern functions", .{});
} else {
return self.fail("TODO implement calling bitcasted functions", .{});
}
} else {
assert(ty.zigTypeTag() == .Pointer);
const mcv = try self.resolveInst(callee);
try self.genSetReg(Type.initTag(.usize), .rax, mcv);
_ = try self.addInst(.{
.tag = .call,
.ops = Mir.Inst.Ops.encode(.{
.reg1 = .rax,
.flags = 0b01,
}),
.data = undefined,
});
}
} else if (self.bin_file.cast(link.File.MachO)) |macho_file| {
if (self.air.value(callee)) |func_value| {
if (func_value.castTag(.function)) |func_payload| {
const func = func_payload.data;
const fn_owner_decl = mod.declPtr(func.owner_decl);
try self.genSetReg(Type.initTag(.usize), .rax, .{
.got_load = fn_owner_decl.link.macho.sym_index,
});
const sym_index = fn_owner_decl.link.macho.sym_index;
try self.genSetReg(Type.initTag(.usize), .rax, .{ .got_load = sym_index });
// callq *%rax
_ = try self.addInst(.{
.tag = .call,
@ -6842,13 +6876,11 @@ fn lowerDeclRef(self: *Self, tv: TypedValue, decl_index: Module.Decl.Index) Inne
const got_addr = got.p_vaddr + decl.link.elf.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.MachO)) |_| {
// Because MachO is PIE-always-on, we defer memory address resolution until
// the linker has enough info to perform relocations.
assert(decl.link.macho.sym_index != 0);
return MCValue{ .got_load = decl.link.macho.sym_index };
} else if (self.bin_file.cast(link.File.Coff)) |coff_file| {
const got_addr = coff_file.offset_table_virtual_address + decl.link.coff.offset_table_index * ptr_bytes;
return MCValue{ .memory = got_addr };
} else if (self.bin_file.cast(link.File.Coff)) |_| {
assert(decl.link.coff.sym_index != 0);
return MCValue{ .got_load = decl.link.coff.sym_index };
} else if (self.bin_file.cast(link.File.Plan9)) |p9| {
try p9.seeDecl(decl_index);
const got_addr = p9.bases.data + decl.link.plan9.got_index.? * ptr_bytes;

View File

@ -137,7 +137,7 @@ pub fn lowerMir(emit: *Emit) InnerError!void {
.fld => try emit.mirFld(inst),
.lea => try emit.mirLea(inst),
.lea_pie => try emit.mirLeaPie(inst),
.lea_pic => try emit.mirLeaPic(inst),
.shl => try emit.mirShift(.shl, inst),
.sal => try emit.mirShift(.sal, inst),
@ -338,7 +338,7 @@ fn mirJmpCall(emit: *Emit, tag: Tag, inst: Mir.Inst.Index) InnerError!void {
.base = ops.reg1,
}), emit.code);
},
0b11 => return emit.fail("TODO unused JMP/CALL variant 0b11", .{}),
0b11 => return emit.fail("TODO unused variant jmp/call 0b11", .{}),
}
}
@ -784,7 +784,7 @@ fn mirMovabs(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
// FD
return lowerToFdEnc(.mov, ops.reg1, imm, emit.code);
},
else => return emit.fail("TODO unused variant: movabs 0b{b}", .{ops.flags}),
else => return emit.fail("TODO unused movabs variant", .{}),
}
}
@ -978,12 +978,17 @@ fn mirLea(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
}
}
fn mirLeaPie(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
fn mirLeaPic(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
const tag = emit.mir.instructions.items(.tag)[inst];
assert(tag == .lea_pie);
assert(tag == .lea_pic);
const ops = emit.mir.instructions.items(.ops)[inst].decode();
const relocation = emit.mir.instructions.items(.data)[inst].relocation;
switch (ops.flags) {
0b00, 0b01 => {},
else => return emit.fail("TODO unused LEA PIC variants 0b10 and 0b11", .{}),
}
// lea reg1, [rip + reloc]
// RM
try lowerToRmEnc(
@ -994,16 +999,17 @@ fn mirLeaPie(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
);
const end_offset = emit.code.items.len;
const gpa = emit.bin_file.allocator;
if (emit.bin_file.cast(link.File.MachO)) |macho_file| {
const reloc_type = switch (ops.flags) {
0b00 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_GOT),
0b01 => @enumToInt(std.macho.reloc_type_x86_64.X86_64_RELOC_SIGNED),
else => return emit.fail("TODO unused LEA PIE variants 0b10 and 0b11", .{}),
else => unreachable,
};
const atom = macho_file.atom_by_index_table.get(relocation.atom_index).?;
log.debug("adding reloc of type {} to local @{d}", .{ reloc_type, relocation.sym_index });
try atom.relocs.append(emit.bin_file.allocator, .{
try atom.relocs.append(gpa, .{
.offset = @intCast(u32, end_offset - 4),
.target = .{ .sym_index = relocation.sym_index, .file = null },
.addend = 0,
@ -1012,11 +1018,23 @@ fn mirLeaPie(emit: *Emit, inst: Mir.Inst.Index) InnerError!void {
.length = 2,
.@"type" = reloc_type,
});
} else if (emit.bin_file.cast(link.File.Coff)) |coff_file| {
const atom = coff_file.atom_by_index_table.get(relocation.atom_index).?;
try atom.addRelocation(coff_file, .{
.@"type" = switch (ops.flags) {
0b00 => .got,
0b01 => .direct,
else => unreachable,
},
.target = .{ .sym_index = relocation.sym_index, .file = null },
.offset = @intCast(u32, end_offset - 4),
.addend = 0,
.pcrel = true,
.length = 2,
.prev_vaddr = atom.getSymbol(coff_file).value,
});
} else {
return emit.fail(
"TODO implement lea reg, [rip + reloc] for linking backends different than MachO",
.{},
);
return emit.fail("TODO implement lea reg, [rip + reloc] for linking backends different than MachO", .{});
}
}

View File

@ -178,11 +178,11 @@ pub const Inst = struct {
lea,
/// ops flags: form:
/// 0b00 reg1, [rip + reloc] // via GOT emits X86_64_RELOC_GOT relocation
/// 0b01 reg1, [rip + reloc] // direct load emits X86_64_RELOC_SIGNED relocation
/// 0b00 reg1, [rip + reloc] // via GOT PIC
/// 0b01 reg1, [rip + reloc] // direct load PIC
/// Notes:
/// * `Data` contains `relocation`
lea_pie,
lea_pic,
/// ops flags: form:
/// 0b00 reg1, 1
@ -242,15 +242,14 @@ pub const Inst = struct {
imul_complex,
/// ops flags: form:
/// 0bX0 reg1, imm64
/// 0bX1 rax, moffs64
/// 0b00 reg1, imm64
/// 0b01 rax, moffs64
/// Notes:
/// * If reg1 is 64-bit, the immediate is 64-bit and stored
/// within extra data `Imm64`.
/// * For 0bX1, reg1 (or reg2) need to be
/// * For 0b01, reg1 (or reg2) need to be
/// a version of rax. If reg1 == .none, then reg2 == .rax,
/// or vice versa.
/// TODO handle scaling
movabs,
/// ops flags: form:

View File

@ -198,6 +198,122 @@ pub fn targetTriple(allocator: Allocator, target: std.Target) ![:0]u8 {
return llvm_triple.toOwnedSliceSentinel(0);
}
pub fn targetOs(os_tag: std.Target.Os.Tag) llvm.OSType {
return switch (os_tag) {
.freestanding, .other, .opencl, .glsl450, .vulkan, .plan9 => .UnknownOS,
.windows, .uefi => .Win32,
.ananas => .Ananas,
.cloudabi => .CloudABI,
.dragonfly => .DragonFly,
.freebsd => .FreeBSD,
.fuchsia => .Fuchsia,
.ios => .IOS,
.kfreebsd => .KFreeBSD,
.linux => .Linux,
.lv2 => .Lv2,
.macos => .MacOSX,
.netbsd => .NetBSD,
.openbsd => .OpenBSD,
.solaris => .Solaris,
.zos => .ZOS,
.haiku => .Haiku,
.minix => .Minix,
.rtems => .RTEMS,
.nacl => .NaCl,
.aix => .AIX,
.cuda => .CUDA,
.nvcl => .NVCL,
.amdhsa => .AMDHSA,
.ps4 => .PS4,
.ps5 => .PS5,
.elfiamcu => .ELFIAMCU,
.tvos => .TvOS,
.watchos => .WatchOS,
.mesa3d => .Mesa3D,
.contiki => .Contiki,
.amdpal => .AMDPAL,
.hermit => .HermitCore,
.hurd => .Hurd,
.wasi => .WASI,
.emscripten => .Emscripten,
.driverkit => .DriverKit,
.shadermodel => .ShaderModel,
};
}
pub fn targetArch(arch_tag: std.Target.Cpu.Arch) llvm.ArchType {
return switch (arch_tag) {
.arm => .arm,
.armeb => .armeb,
.aarch64 => .aarch64,
.aarch64_be => .aarch64_be,
.aarch64_32 => .aarch64_32,
.arc => .arc,
.avr => .avr,
.bpfel => .bpfel,
.bpfeb => .bpfeb,
.csky => .csky,
.dxil => .dxil,
.hexagon => .hexagon,
.loongarch32 => .loongarch32,
.loongarch64 => .loongarch64,
.m68k => .m68k,
.mips => .mips,
.mipsel => .mipsel,
.mips64 => .mips64,
.mips64el => .mips64el,
.msp430 => .msp430,
.powerpc => .ppc,
.powerpcle => .ppcle,
.powerpc64 => .ppc64,
.powerpc64le => .ppc64le,
.r600 => .r600,
.amdgcn => .amdgcn,
.riscv32 => .riscv32,
.riscv64 => .riscv64,
.sparc => .sparc,
.sparc64 => .sparcv9, // In LLVM, sparc64 == sparcv9.
.sparcel => .sparcel,
.s390x => .systemz,
.tce => .tce,
.tcele => .tcele,
.thumb => .thumb,
.thumbeb => .thumbeb,
.i386 => .x86,
.x86_64 => .x86_64,
.xcore => .xcore,
.nvptx => .nvptx,
.nvptx64 => .nvptx64,
.le32 => .le32,
.le64 => .le64,
.amdil => .amdil,
.amdil64 => .amdil64,
.hsail => .hsail,
.hsail64 => .hsail64,
.spir => .spir,
.spir64 => .spir64,
.kalimba => .kalimba,
.shave => .shave,
.lanai => .lanai,
.wasm32 => .wasm32,
.wasm64 => .wasm64,
.renderscript32 => .renderscript32,
.renderscript64 => .renderscript64,
.ve => .ve,
.spu_2, .spirv32, .spirv64 => .UnknownArch,
};
}
pub fn supportsTailCall(target: std.Target) bool {
switch (target.cpu.arch) {
.wasm32, .wasm64 => return std.Target.wasm.featureSetHas(target.cpu.features, .tail_call),
// Although these ISAs support tail calls, LLVM does not support tail calls on them.
.mips, .mipsel, .mips64, .mips64el => return false,
.powerpc, .powerpcle, .powerpc64, .powerpc64le => return false,
else => return true,
}
}
pub const Object = struct {
gpa: Allocator,
module: *Module,
@ -4559,7 +4675,7 @@ pub const FuncGen = struct {
"",
);
if (return_type.isNoReturn()) {
if (return_type.isNoReturn() and attr != .AlwaysTail) {
_ = self.builder.buildUnreachable();
return null;
}
@ -8644,12 +8760,26 @@ pub const FuncGen = struct {
const union_llvm_ty = try self.dg.lowerType(union_ty);
const target = self.dg.module.getTarget();
const layout = union_ty.unionGetLayout(target);
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
const tag_int = blk: {
const tag_ty = union_ty.unionTagTypeHypothetical();
const union_field_name = union_obj.fields.keys()[extra.field_index];
const enum_field_index = tag_ty.enumFieldIndex(union_field_name).?;
var tag_val_payload: Value.Payload.U32 = .{
.base = .{ .tag = .enum_field_index },
.data = @intCast(u32, enum_field_index),
};
const tag_val = Value.initPayload(&tag_val_payload.base);
var int_payload: Value.Payload.U64 = undefined;
const tag_int_val = tag_val.enumToInt(tag_ty, &int_payload);
break :blk tag_int_val.toUnsignedInt(target);
};
if (layout.payload_size == 0) {
if (layout.tag_size == 0) {
return null;
}
assert(!isByRef(union_ty));
return union_llvm_ty.constInt(extra.field_index, .False);
return union_llvm_ty.constInt(tag_int, .False);
}
assert(isByRef(union_ty));
// The llvm type of the alloca will the the named LLVM union type, which will not
@ -8658,7 +8788,6 @@ pub const FuncGen = struct {
// then set the fields appropriately.
const result_ptr = self.buildAlloca(union_llvm_ty);
const llvm_payload = try self.resolveInst(extra.init);
const union_obj = union_ty.cast(Type.Payload.Union).?.data;
assert(union_obj.haveFieldTypes());
const field = union_obj.fields.values()[extra.field_index];
const field_llvm_ty = try self.dg.lowerType(field.ty);
@ -8742,7 +8871,7 @@ pub const FuncGen = struct {
};
const field_ptr = self.builder.buildInBoundsGEP(llvm_union_ty, casted_ptr, &indices, indices.len, "");
const tag_llvm_ty = try self.dg.lowerType(union_obj.tag_ty);
const llvm_tag = tag_llvm_ty.constInt(extra.field_index, .False);
const llvm_tag = tag_llvm_ty.constInt(tag_int, .False);
const store_inst = self.builder.buildStore(llvm_tag, field_ptr);
store_inst.setAlignment(union_obj.tag_ty.abiAlignment(target));
}

View File

@ -245,8 +245,8 @@ pub const File = struct {
pub const LinkBlock = union {
elf: Elf.TextBlock,
coff: Coff.TextBlock,
macho: MachO.TextBlock,
coff: Coff.Atom,
macho: MachO.Atom,
plan9: Plan9.DeclBlock,
c: void,
wasm: Wasm.DeclBlock,
@ -267,7 +267,7 @@ pub const File = struct {
pub const Export = union {
elf: Elf.Export,
coff: void,
coff: Coff.Export,
macho: MachO.Export,
plan9: Plan9.Export,
c: void,
@ -934,9 +934,10 @@ pub const File = struct {
std.debug.print("\n", .{});
}
const llvm = @import("codegen/llvm/bindings.zig");
const os_type = @import("target.zig").osToLLVM(base.options.target.os.tag);
const bad = llvm.WriteArchive(full_out_path_z, object_files.items.ptr, object_files.items.len, os_type);
const llvm_bindings = @import("codegen/llvm/bindings.zig");
const llvm = @import("codegen/llvm.zig");
const os_tag = llvm.targetOs(base.options.target.os.tag);
const bad = llvm_bindings.WriteArchive(full_out_path_z, object_files.items.ptr, object_files.items.len, os_tag);
if (bad) return error.UnableToWriteArchive;
if (!base.options.disable_lld_caching) {

File diff suppressed because it is too large Load Diff

110
src/link/Coff/Atom.zig Normal file
View File

@ -0,0 +1,110 @@
const Atom = @This();
const std = @import("std");
const coff = std.coff;
const Allocator = std.mem.Allocator;
const Coff = @import("../Coff.zig");
const Reloc = Coff.Reloc;
const SymbolWithLoc = Coff.SymbolWithLoc;
/// Each decl always gets a local symbol with the fully qualified name.
/// The vaddr and size are found here directly.
/// The file offset is found by computing the vaddr offset from the section vaddr
/// the symbol references, and adding that to the file offset of the section.
/// If this field is 0, it means the codegen size = 0 and there is no symbol or
/// offset table entry.
sym_index: u32,
/// null means symbol defined by Zig source.
file: ?u32,
/// Used size of the atom
size: u32,
/// Alignment of the atom
alignment: u32,
/// Points to the previous and next neighbors, based on the `text_offset`.
/// This can be used to find, for example, the capacity of this `Atom`.
prev: ?*Atom,
next: ?*Atom,
pub const empty = Atom{
.sym_index = 0,
.file = null,
.size = 0,
.alignment = 0,
.prev = null,
.next = null,
};
pub fn deinit(self: *Atom, gpa: Allocator) void {
_ = self;
_ = gpa;
}
/// Returns symbol referencing this atom.
pub fn getSymbol(self: Atom, coff_file: *const Coff) *const coff.Symbol {
return coff_file.getSymbol(.{
.sym_index = self.sym_index,
.file = self.file,
});
}
/// Returns pointer-to-symbol referencing this atom.
pub fn getSymbolPtr(self: Atom, coff_file: *Coff) *coff.Symbol {
return coff_file.getSymbolPtr(.{
.sym_index = self.sym_index,
.file = self.file,
});
}
pub fn getSymbolWithLoc(self: Atom) SymbolWithLoc {
return .{ .sym_index = self.sym_index, .file = self.file };
}
/// Returns the name of this atom.
pub fn getName(self: Atom, coff_file: *const Coff) []const u8 {
return coff_file.getSymbolName(.{
.sym_index = self.sym_index,
.file = self.file,
});
}
/// Returns how much room there is to grow in virtual address space.
pub fn capacity(self: Atom, coff_file: *const Coff) u32 {
const self_sym = self.getSymbol(coff_file);
if (self.next) |next| {
const next_sym = next.getSymbol(coff_file);
return next_sym.value - self_sym.value;
} else {
// We are the last atom.
// The capacity is limited only by virtual address space.
return std.math.maxInt(u32) - self_sym.value;
}
}
pub fn freeListEligible(self: Atom, coff_file: *const Coff) bool {
// No need to keep a free list node for the last atom.
const next = self.next orelse return false;
const self_sym = self.getSymbol(coff_file);
const next_sym = next.getSymbol(coff_file);
const cap = next_sym.value - self_sym.value;
const ideal_cap = Coff.padToIdeal(self.size);
if (cap <= ideal_cap) return false;
const surplus = cap - ideal_cap;
return surplus >= Coff.min_text_capacity;
}
pub fn addRelocation(self: *Atom, coff_file: *Coff, reloc: Reloc) !void {
const gpa = coff_file.base.allocator;
// TODO causes a segfault on Windows
// log.debug("adding reloc of type {s} to target %{d}", .{ @tagName(reloc.@"type"), reloc.target.sym_index });
const gop = try coff_file.relocs.getOrPut(gpa, self);
if (!gop.found_existing) {
gop.value_ptr.* = .{};
}
try gop.value_ptr.append(gpa, reloc);
}

12
src/link/Coff/Object.zig Normal file
View File

@ -0,0 +1,12 @@
const Object = @This();
const std = @import("std");
const mem = std.mem;
const Allocator = mem.Allocator;
name: []const u8,
pub fn deinit(self: *Object, gpa: Allocator) void {
gpa.free(self.name);
}

602
src/link/Coff/lld.zig Normal file
View File

@ -0,0 +1,602 @@
const std = @import("std");
const build_options = @import("build_options");
const allocPrint = std.fmt.allocPrint;
const assert = std.debug.assert;
const fs = std.fs;
const log = std.log.scoped(.link);
const mem = std.mem;
const mingw = @import("../../mingw.zig");
const link = @import("../../link.zig");
const lldMain = @import("../../main.zig").lldMain;
const trace = @import("../../tracy.zig").trace;
const Allocator = mem.Allocator;
const Cache = @import("../../Cache.zig");
const Coff = @import("../Coff.zig");
const Compilation = @import("../../Compilation.zig");
pub fn linkWithLLD(self: *Coff, comp: *Compilation, prog_node: *std.Progress.Node) !void {
const tracy = trace(@src());
defer tracy.end();
var arena_allocator = std.heap.ArenaAllocator.init(self.base.allocator);
defer arena_allocator.deinit();
const arena = arena_allocator.allocator();
const directory = self.base.options.emit.?.directory; // Just an alias to make it shorter to type.
const full_out_path = try directory.join(arena, &[_][]const u8{self.base.options.emit.?.sub_path});
// If there is no Zig code to compile, then we should skip flushing the output file because it
// will not be part of the linker line anyway.
const module_obj_path: ?[]const u8 = if (self.base.options.module) |module| blk: {
const use_stage1 = build_options.have_stage1 and self.base.options.use_stage1;
if (use_stage1) {
const obj_basename = try std.zig.binNameAlloc(arena, .{
.root_name = self.base.options.root_name,
.target = self.base.options.target,
.output_mode = .Obj,
});
switch (self.base.options.cache_mode) {
.incremental => break :blk try module.zig_cache_artifact_directory.join(
arena,
&[_][]const u8{obj_basename},
),
.whole => break :blk try fs.path.join(arena, &.{
fs.path.dirname(full_out_path).?, obj_basename,
}),
}
}
try self.flushModule(comp, prog_node);
if (fs.path.dirname(full_out_path)) |dirname| {
break :blk try fs.path.join(arena, &.{ dirname, self.base.intermediary_basename.? });
} else {
break :blk self.base.intermediary_basename.?;
}
} else null;
var sub_prog_node = prog_node.start("LLD Link", 0);
sub_prog_node.activate();
sub_prog_node.context.refresh();
defer sub_prog_node.end();
const is_lib = self.base.options.output_mode == .Lib;
const is_dyn_lib = self.base.options.link_mode == .Dynamic and is_lib;
const is_exe_or_dyn_lib = is_dyn_lib or self.base.options.output_mode == .Exe;
const link_in_crt = self.base.options.link_libc and is_exe_or_dyn_lib;
const target = self.base.options.target;
// See link/Elf.zig for comments on how this mechanism works.
const id_symlink_basename = "lld.id";
var man: Cache.Manifest = undefined;
defer if (!self.base.options.disable_lld_caching) man.deinit();
var digest: [Cache.hex_digest_len]u8 = undefined;
if (!self.base.options.disable_lld_caching) {
man = comp.cache_parent.obtain();
self.base.releaseLock();
comptime assert(Compilation.link_hash_implementation_version == 7);
for (self.base.options.objects) |obj| {
_ = try man.addFile(obj.path, null);
man.hash.add(obj.must_link);
}
for (comp.c_object_table.keys()) |key| {
_ = try man.addFile(key.status.success.object_path, null);
}
try man.addOptionalFile(module_obj_path);
man.hash.addOptionalBytes(self.base.options.entry);
man.hash.addOptional(self.base.options.stack_size_override);
man.hash.addOptional(self.base.options.image_base_override);
man.hash.addListOfBytes(self.base.options.lib_dirs);
man.hash.add(self.base.options.skip_linker_dependencies);
if (self.base.options.link_libc) {
man.hash.add(self.base.options.libc_installation != null);
if (self.base.options.libc_installation) |libc_installation| {
man.hash.addBytes(libc_installation.crt_dir.?);
if (target.abi == .msvc) {
man.hash.addBytes(libc_installation.msvc_lib_dir.?);
man.hash.addBytes(libc_installation.kernel32_lib_dir.?);
}
}
}
link.hashAddSystemLibs(&man.hash, self.base.options.system_libs);
man.hash.addListOfBytes(self.base.options.force_undefined_symbols.keys());
man.hash.addOptional(self.base.options.subsystem);
man.hash.add(self.base.options.is_test);
man.hash.add(self.base.options.tsaware);
man.hash.add(self.base.options.nxcompat);
man.hash.add(self.base.options.dynamicbase);
// strip does not need to go into the linker hash because it is part of the hash namespace
man.hash.addOptional(self.base.options.major_subsystem_version);
man.hash.addOptional(self.base.options.minor_subsystem_version);
// We don't actually care whether it's a cache hit or miss; we just need the digest and the lock.
_ = try man.hit();
digest = man.final();
var prev_digest_buf: [digest.len]u8 = undefined;
const prev_digest: []u8 = Cache.readSmallFile(
directory.handle,
id_symlink_basename,
&prev_digest_buf,
) catch |err| blk: {
log.debug("COFF LLD new_digest={s} error: {s}", .{ std.fmt.fmtSliceHexLower(&digest), @errorName(err) });
// Handle this as a cache miss.
break :blk prev_digest_buf[0..0];
};
if (mem.eql(u8, prev_digest, &digest)) {
log.debug("COFF LLD digest={s} match - skipping invocation", .{std.fmt.fmtSliceHexLower(&digest)});
// Hot diggity dog! The output binary is already there.
self.base.lock = man.toOwnedLock();
return;
}
log.debug("COFF LLD prev_digest={s} new_digest={s}", .{ std.fmt.fmtSliceHexLower(prev_digest), std.fmt.fmtSliceHexLower(&digest) });
// We are about to change the output file to be different, so we invalidate the build hash now.
directory.handle.deleteFile(id_symlink_basename) catch |err| switch (err) {
error.FileNotFound => {},
else => |e| return e,
};
}
if (self.base.options.output_mode == .Obj) {
// LLD's COFF driver does not support the equivalent of `-r` so we do a simple file copy
// here. TODO: think carefully about how we can avoid this redundant operation when doing
// build-obj. See also the corresponding TODO in linkAsArchive.
const the_object_path = blk: {
if (self.base.options.objects.len != 0)
break :blk self.base.options.objects[0].path;
if (comp.c_object_table.count() != 0)
break :blk comp.c_object_table.keys()[0].status.success.object_path;
if (module_obj_path) |p|
break :blk p;
// TODO I think this is unreachable. Audit this situation when solving the above TODO
// regarding eliding redundant object -> object transformations.
return error.NoObjectsToLink;
};
// This can happen when using --enable-cache and using the stage1 backend. In this case
// we can skip the file copy.
if (!mem.eql(u8, the_object_path, full_out_path)) {
try fs.cwd().copyFile(the_object_path, fs.cwd(), full_out_path, .{});
}
} else {
// Create an LLD command line and invoke it.
var argv = std.ArrayList([]const u8).init(self.base.allocator);
defer argv.deinit();
// We will invoke ourselves as a child process to gain access to LLD.
// This is necessary because LLD does not behave properly as a library -
// it calls exit() and does not reset all global data between invocations.
try argv.appendSlice(&[_][]const u8{ comp.self_exe_path.?, "lld-link" });
try argv.append("-ERRORLIMIT:0");
try argv.append("-NOLOGO");
if (!self.base.options.strip) {
try argv.append("-DEBUG");
}
if (self.base.options.lto) {
switch (self.base.options.optimize_mode) {
.Debug => {},
.ReleaseSmall => try argv.append("-OPT:lldlto=2"),
.ReleaseFast, .ReleaseSafe => try argv.append("-OPT:lldlto=3"),
}
}
if (self.base.options.output_mode == .Exe) {
const stack_size = self.base.options.stack_size_override orelse 16777216;
try argv.append(try allocPrint(arena, "-STACK:{d}", .{stack_size}));
}
if (self.base.options.image_base_override) |image_base| {
try argv.append(try std.fmt.allocPrint(arena, "-BASE:{d}", .{image_base}));
}
if (target.cpu.arch == .i386) {
try argv.append("-MACHINE:X86");
} else if (target.cpu.arch == .x86_64) {
try argv.append("-MACHINE:X64");
} else if (target.cpu.arch.isARM()) {
if (target.cpu.arch.ptrBitWidth() == 32) {
try argv.append("-MACHINE:ARM");
} else {
try argv.append("-MACHINE:ARM64");
}
}
for (self.base.options.force_undefined_symbols.keys()) |symbol| {
try argv.append(try allocPrint(arena, "-INCLUDE:{s}", .{symbol}));
}
if (is_dyn_lib) {
try argv.append("-DLL");
}
if (self.base.options.entry) |entry| {
try argv.append(try allocPrint(arena, "-ENTRY:{s}", .{entry}));
}
if (self.base.options.tsaware) {
try argv.append("-tsaware");
}
if (self.base.options.nxcompat) {
try argv.append("-nxcompat");
}
if (self.base.options.dynamicbase) {
try argv.append("-dynamicbase");
}
try argv.append(try allocPrint(arena, "-OUT:{s}", .{full_out_path}));
if (self.base.options.implib_emit) |emit| {
const implib_out_path = try emit.directory.join(arena, &[_][]const u8{emit.sub_path});
try argv.append(try allocPrint(arena, "-IMPLIB:{s}", .{implib_out_path}));
}
if (self.base.options.link_libc) {
if (self.base.options.libc_installation) |libc_installation| {
try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{libc_installation.crt_dir.?}));
if (target.abi == .msvc) {
try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{libc_installation.msvc_lib_dir.?}));
try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{libc_installation.kernel32_lib_dir.?}));
}
}
}
for (self.base.options.lib_dirs) |lib_dir| {
try argv.append(try allocPrint(arena, "-LIBPATH:{s}", .{lib_dir}));
}
try argv.ensureUnusedCapacity(self.base.options.objects.len);
for (self.base.options.objects) |obj| {
if (obj.must_link) {
argv.appendAssumeCapacity(try allocPrint(arena, "-WHOLEARCHIVE:{s}", .{obj.path}));
} else {
argv.appendAssumeCapacity(obj.path);
}
}
for (comp.c_object_table.keys()) |key| {
try argv.append(key.status.success.object_path);
}
if (module_obj_path) |p| {
try argv.append(p);
}
const resolved_subsystem: ?std.Target.SubSystem = blk: {
if (self.base.options.subsystem) |explicit| break :blk explicit;
switch (target.os.tag) {
.windows => {
if (self.base.options.module) |module| {
if (module.stage1_flags.have_dllmain_crt_startup or is_dyn_lib)
break :blk null;
if (module.stage1_flags.have_c_main or self.base.options.is_test or
module.stage1_flags.have_winmain_crt_startup or
module.stage1_flags.have_wwinmain_crt_startup)
{
break :blk .Console;
}
if (module.stage1_flags.have_winmain or module.stage1_flags.have_wwinmain)
break :blk .Windows;
}
},
.uefi => break :blk .EfiApplication,
else => {},
}
break :blk null;
};
const Mode = enum { uefi, win32 };
const mode: Mode = mode: {
if (resolved_subsystem) |subsystem| {
const subsystem_suffix = ss: {
if (self.base.options.major_subsystem_version) |major| {
if (self.base.options.minor_subsystem_version) |minor| {
break :ss try allocPrint(arena, ",{d}.{d}", .{ major, minor });
} else {
break :ss try allocPrint(arena, ",{d}", .{major});
}
}
break :ss "";
};
switch (subsystem) {
.Console => {
try argv.append(try allocPrint(arena, "-SUBSYSTEM:console{s}", .{
subsystem_suffix,
}));
break :mode .win32;
},
.EfiApplication => {
try argv.append(try allocPrint(arena, "-SUBSYSTEM:efi_application{s}", .{
subsystem_suffix,
}));
break :mode .uefi;
},
.EfiBootServiceDriver => {
try argv.append(try allocPrint(arena, "-SUBSYSTEM:efi_boot_service_driver{s}", .{
subsystem_suffix,
}));
break :mode .uefi;
},
.EfiRom => {
try argv.append(try allocPrint(arena, "-SUBSYSTEM:efi_rom{s}", .{
subsystem_suffix,
}));
break :mode .uefi;
},
.EfiRuntimeDriver => {
try argv.append(try allocPrint(arena, "-SUBSYSTEM:efi_runtime_driver{s}", .{
subsystem_suffix,
}));
break :mode .uefi;
},
.Native => {
try argv.append(try allocPrint(arena, "-SUBSYSTEM:native{s}", .{
subsystem_suffix,
}));
break :mode .win32;
},
.Posix => {
try argv.append(try allocPrint(arena, "-SUBSYSTEM:posix{s}", .{
subsystem_suffix,
}));
break :mode .win32;
},
.Windows => {
try argv.append(try allocPrint(arena, "-SUBSYSTEM:windows{s}", .{
subsystem_suffix,
}));
break :mode .win32;
},
}
} else if (target.os.tag == .uefi) {
break :mode .uefi;
} else {
break :mode .win32;
}
};
switch (mode) {
.uefi => try argv.appendSlice(&[_][]const u8{
"-BASE:0",
"-ENTRY:EfiMain",
"-OPT:REF",
"-SAFESEH:NO",
"-MERGE:.rdata=.data",
"-ALIGN:32",
"-NODEFAULTLIB",
"-SECTION:.xdata,D",
}),
.win32 => {
if (link_in_crt) {
if (target.abi.isGnu()) {
try argv.append("-lldmingw");
if (target.cpu.arch == .i386) {
try argv.append("-ALTERNATENAME:__image_base__=___ImageBase");
} else {
try argv.append("-ALTERNATENAME:__image_base__=__ImageBase");
}
if (is_dyn_lib) {
try argv.append(try comp.get_libc_crt_file(arena, "dllcrt2.obj"));
if (target.cpu.arch == .i386) {
try argv.append("-ALTERNATENAME:__DllMainCRTStartup@12=_DllMainCRTStartup@12");
} else {
try argv.append("-ALTERNATENAME:_DllMainCRTStartup=DllMainCRTStartup");
}
} else {
try argv.append(try comp.get_libc_crt_file(arena, "crt2.obj"));
}
try argv.append(try comp.get_libc_crt_file(arena, "mingw32.lib"));
try argv.append(try comp.get_libc_crt_file(arena, "mingwex.lib"));
try argv.append(try comp.get_libc_crt_file(arena, "msvcrt-os.lib"));
for (mingw.always_link_libs) |name| {
if (!self.base.options.system_libs.contains(name)) {
const lib_basename = try allocPrint(arena, "{s}.lib", .{name});
try argv.append(try comp.get_libc_crt_file(arena, lib_basename));
}
}
} else {
const lib_str = switch (self.base.options.link_mode) {
.Dynamic => "",
.Static => "lib",
};
const d_str = switch (self.base.options.optimize_mode) {
.Debug => "d",
else => "",
};
switch (self.base.options.link_mode) {
.Static => try argv.append(try allocPrint(arena, "libcmt{s}.lib", .{d_str})),
.Dynamic => try argv.append(try allocPrint(arena, "msvcrt{s}.lib", .{d_str})),
}
try argv.append(try allocPrint(arena, "{s}vcruntime{s}.lib", .{ lib_str, d_str }));
try argv.append(try allocPrint(arena, "{s}ucrt{s}.lib", .{ lib_str, d_str }));
//Visual C++ 2015 Conformance Changes
//https://msdn.microsoft.com/en-us/library/bb531344.aspx
try argv.append("legacy_stdio_definitions.lib");
// msvcrt depends on kernel32 and ntdll
try argv.append("kernel32.lib");
try argv.append("ntdll.lib");
}
} else {
try argv.append("-NODEFAULTLIB");
if (!is_lib) {
if (self.base.options.module) |module| {
if (module.stage1_flags.have_winmain_crt_startup) {
try argv.append("-ENTRY:WinMainCRTStartup");
} else {
try argv.append("-ENTRY:wWinMainCRTStartup");
}
} else {
try argv.append("-ENTRY:wWinMainCRTStartup");
}
}
}
},
}
// libc++ dep
if (self.base.options.link_libcpp) {
try argv.append(comp.libcxxabi_static_lib.?.full_object_path);
try argv.append(comp.libcxx_static_lib.?.full_object_path);
}
// libunwind dep
if (self.base.options.link_libunwind) {
try argv.append(comp.libunwind_static_lib.?.full_object_path);
}
if (is_exe_or_dyn_lib and !self.base.options.skip_linker_dependencies) {
if (!self.base.options.link_libc) {
if (comp.libc_static_lib) |lib| {
try argv.append(lib.full_object_path);
}
}
// MinGW doesn't provide libssp symbols
if (target.abi.isGnu()) {
if (comp.libssp_static_lib) |lib| {
try argv.append(lib.full_object_path);
}
}
// MSVC compiler_rt is missing some stuff, so we build it unconditionally but
// and rely on weak linkage to allow MSVC compiler_rt functions to override ours.
if (comp.compiler_rt_lib) |lib| {
try argv.append(lib.full_object_path);
}
}
try argv.ensureUnusedCapacity(self.base.options.system_libs.count());
for (self.base.options.system_libs.keys()) |key| {
const lib_basename = try allocPrint(arena, "{s}.lib", .{key});
if (comp.crt_files.get(lib_basename)) |crt_file| {
argv.appendAssumeCapacity(crt_file.full_object_path);
continue;
}
if (try findLib(arena, lib_basename, self.base.options.lib_dirs)) |full_path| {
argv.appendAssumeCapacity(full_path);
continue;
}
if (target.abi.isGnu()) {
const fallback_name = try allocPrint(arena, "lib{s}.dll.a", .{key});
if (try findLib(arena, fallback_name, self.base.options.lib_dirs)) |full_path| {
argv.appendAssumeCapacity(full_path);
continue;
}
}
log.err("DLL import library for -l{s} not found", .{key});
return error.DllImportLibraryNotFound;
}
if (self.base.options.verbose_link) {
// Skip over our own name so that the LLD linker name is the first argv item.
Compilation.dump_argv(argv.items[1..]);
}
if (std.process.can_spawn) {
// If possible, we run LLD as a child process because it does not always
// behave properly as a library, unfortunately.
// https://github.com/ziglang/zig/issues/3825
var child = std.ChildProcess.init(argv.items, arena);
if (comp.clang_passthrough_mode) {
child.stdin_behavior = .Inherit;
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
const term = child.spawnAndWait() catch |err| {
log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
return error.UnableToSpawnSelf;
};
switch (term) {
.Exited => |code| {
if (code != 0) {
std.process.exit(code);
}
},
else => std.process.abort(),
}
} else {
child.stdin_behavior = .Ignore;
child.stdout_behavior = .Ignore;
child.stderr_behavior = .Pipe;
try child.spawn();
const stderr = try child.stderr.?.reader().readAllAlloc(arena, 10 * 1024 * 1024);
const term = child.wait() catch |err| {
log.err("unable to spawn {s}: {s}", .{ argv.items[0], @errorName(err) });
return error.UnableToSpawnSelf;
};
switch (term) {
.Exited => |code| {
if (code != 0) {
// TODO parse this output and surface with the Compilation API rather than
// directly outputting to stderr here.
std.debug.print("{s}", .{stderr});
return error.LLDReportedFailure;
}
},
else => {
log.err("{s} terminated with stderr:\n{s}", .{ argv.items[0], stderr });
return error.LLDCrashed;
},
}
if (stderr.len != 0) {
log.warn("unexpected LLD stderr:\n{s}", .{stderr});
}
}
} else {
const exit_code = try lldMain(arena, argv.items, false);
if (exit_code != 0) {
if (comp.clang_passthrough_mode) {
std.process.exit(exit_code);
} else {
return error.LLDReportedFailure;
}
}
}
}
if (!self.base.options.disable_lld_caching) {
// Update the file with the digest. If it fails we can continue; it only
// means that the next invocation will have an unnecessary cache miss.
Cache.writeSmallFile(directory.handle, id_symlink_basename, &digest) catch |err| {
log.warn("failed to save linking hash digest file: {s}", .{@errorName(err)});
};
// Again failure here only means an unnecessary cache miss.
man.writeManifest() catch |err| {
log.warn("failed to write cache manifest when linking: {s}", .{@errorName(err)});
};
// We hang on to this lock so that the output file path can be used without
// other processes clobbering it.
self.base.lock = man.toOwnedLock();
}
}
fn findLib(arena: Allocator, name: []const u8, lib_dirs: []const []const u8) !?[]const u8 {
for (lib_dirs) |lib_dir| {
const full_path = try fs.path.join(arena, &.{ lib_dir, name });
fs.cwd().access(full_path, .{}) catch |err| switch (err) {
error.FileNotFound => continue,
else => |e| return e,
};
return full_path;
}
return null;
}

View File

@ -26,7 +26,7 @@ const trace = @import("../tracy.zig").trace;
const Air = @import("../Air.zig");
const Allocator = mem.Allocator;
const Archive = @import("MachO/Archive.zig");
const Atom = @import("MachO/Atom.zig");
pub const Atom = @import("MachO/Atom.zig");
const Cache = @import("../Cache.zig");
const CodeSignature = @import("MachO/CodeSignature.zig");
const Compilation = @import("../Compilation.zig");
@ -44,7 +44,6 @@ const Type = @import("../type.zig").Type;
const TypedValue = @import("../TypedValue.zig");
const Value = @import("../value.zig").Value;
pub const TextBlock = Atom;
pub const DebugSymbols = @import("MachO/DebugSymbols.zig");
pub const base_tag: File.Tag = File.Tag.macho;

View File

@ -18,7 +18,6 @@ const Dwarf = @import("../Dwarf.zig");
const MachO = @import("../MachO.zig");
const Module = @import("../../Module.zig");
const StringTable = @import("../strtab.zig").StringTable;
const TextBlock = MachO.TextBlock;
const Type = @import("../../type.zig").Type;
base: *MachO,

View File

@ -2836,24 +2836,19 @@ fn linkWithLLD(self: *Wasm, comp: *Compilation, prog_node: *std.Progress.Node) !
try argv.append(entry);
}
if (self.base.options.output_mode == .Exe) {
// Increase the default stack size to a more reasonable value of 1MB instead of
// the default of 1 Wasm page being 64KB, unless overridden by the user.
try argv.append("-z");
const stack_size = self.base.options.stack_size_override orelse 1048576;
const arg = try std.fmt.allocPrint(arena, "stack-size={d}", .{stack_size});
try argv.append(arg);
// Increase the default stack size to a more reasonable value of 1MB instead of
// the default of 1 Wasm page being 64KB, unless overridden by the user.
try argv.append("-z");
const stack_size = self.base.options.stack_size_override orelse wasm.page_size * 16;
const arg = try std.fmt.allocPrint(arena, "stack-size={d}", .{stack_size});
try argv.append(arg);
if (self.base.options.output_mode == .Exe) {
if (self.base.options.wasi_exec_model == .reactor) {
// Reactor execution model does not have _start so lld doesn't look for it.
try argv.append("--no-entry");
}
} else {
if (self.base.options.stack_size_override) |stack_size| {
try argv.append("-z");
const arg = try std.fmt.allocPrint(arena, "stack-size={d}", .{stack_size});
try argv.append(arg);
}
} else if (self.base.options.entry == null) {
try argv.append("--no-entry"); // So lld doesn't look for _start.
}
try argv.appendSlice(&[_][]const u8{

View File

@ -109,5 +109,9 @@ pub fn StringTable(comptime log_scope: @Type(.EnumLiteral)) type {
pub fn getAssumeExists(self: Self, off: u32) []const u8 {
return self.get(off) orelse unreachable;
}
pub fn len(self: Self) usize {
return self.buffer.items.len;
}
};
}

View File

@ -3734,9 +3734,9 @@ pub fn cmdBuild(gpa: Allocator, arena: Allocator, args: []const []const u8) !voi
const self_exe_path = try introspect.findZigExePath(arena);
var build_file: ?[]const u8 = null;
var override_lib_dir: ?[]const u8 = null;
var override_global_cache_dir: ?[]const u8 = null;
var override_local_cache_dir: ?[]const u8 = null;
var override_lib_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LIB_DIR");
var override_global_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_GLOBAL_CACHE_DIR");
var override_local_cache_dir: ?[]const u8 = try optionalStringEnvVar(arena, "ZIG_LOCAL_CACHE_DIR");
var child_argv = std.ArrayList([]const u8).init(arena);
const argv_index_exe = child_argv.items.len;

View File

@ -6,7 +6,6 @@ const assert = std.debug.assert;
const log = std.log.scoped(.mingw);
const builtin = @import("builtin");
const target_util = @import("target.zig");
const Compilation = @import("Compilation.zig");
const build_options = @import("build_options");
const Cache = @import("Cache.zig");
@ -404,11 +403,12 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
});
errdefer comp.gpa.free(lib_final_path);
const llvm = @import("codegen/llvm/bindings.zig");
const arch_type = target_util.archToLLVM(target.cpu.arch);
const llvm_bindings = @import("codegen/llvm/bindings.zig");
const llvm = @import("codegen/llvm.zig");
const arch_tag = llvm.targetArch(target.cpu.arch);
const def_final_path_z = try arena.dupeZ(u8, def_final_path);
const lib_final_path_z = try arena.dupeZ(u8, lib_final_path);
if (llvm.WriteImportLibrary(def_final_path_z.ptr, arch_type, lib_final_path_z.ptr, true)) {
if (llvm_bindings.WriteImportLibrary(def_final_path_z.ptr, arch_tag, lib_final_path_z.ptr, true)) {
// TODO surface a proper error here
log.err("unable to turn {s}.def into {s}.lib", .{ lib_name, lib_name });
return error.WritingImportLibFailed;

View File

@ -283,7 +283,6 @@ const Writer = struct {
.mul_add => try self.writeMulAdd(stream, inst),
.field_parent_ptr => try self.writeFieldParentPtr(stream, inst),
.builtin_call => try self.writeBuiltinCall(stream, inst),
.builtin_async_call => try self.writeBuiltinAsyncCall(stream, inst),
.struct_init_anon,
.struct_init_anon_ref,
@ -397,7 +396,7 @@ const Writer = struct {
.field_val_named,
=> try self.writePlNodeFieldNamed(stream, inst),
.as_node => try self.writeAs(stream, inst),
.as_node, .as_shift_operand => try self.writeAs(stream, inst),
.repeat,
.repeat_inline,
@ -531,6 +530,7 @@ const Writer = struct {
try stream.writeAll(") ");
try self.writeSrc(stream, src);
},
.builtin_async_call => try self.writeBuiltinAsyncCall(stream, extended),
}
}
@ -814,9 +814,8 @@ const Writer = struct {
try self.writeSrc(stream, inst_data.src());
}
fn writeBuiltinAsyncCall(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {
const inst_data = self.code.instructions.items(.data)[inst].pl_node;
const extra = self.code.extraData(Zir.Inst.AsyncCall, inst_data.payload_index).data;
fn writeBuiltinAsyncCall(self: *Writer, stream: anytype, extended: Zir.Inst.Extended.InstData) !void {
const extra = self.code.extraData(Zir.Inst.AsyncCall, extended.operand).data;
try self.writeInstRef(stream, extra.frame_buffer);
try stream.writeAll(", ");
try self.writeInstRef(stream, extra.result_ptr);
@ -825,7 +824,7 @@ const Writer = struct {
try stream.writeAll(", ");
try self.writeInstRef(stream, extra.args);
try stream.writeAll(") ");
try self.writeSrc(stream, inst_data.src());
try self.writeSrc(stream, LazySrcLoc.nodeOffset(extra.node));
}
fn writeParam(self: *Writer, stream: anytype, inst: Zir.Inst.Index) !void {

View File

@ -1,5 +1,4 @@
const std = @import("std");
const llvm = @import("codegen/llvm/bindings.zig");
const Type = @import("type.zig").Type;
pub const ArchOsAbi = struct {
@ -338,112 +337,6 @@ pub fn supportsReturnAddress(target: std.Target) bool {
};
}
pub fn osToLLVM(os_tag: std.Target.Os.Tag) llvm.OSType {
return switch (os_tag) {
.freestanding, .other, .opencl, .glsl450, .vulkan, .plan9 => .UnknownOS,
.windows, .uefi => .Win32,
.ananas => .Ananas,
.cloudabi => .CloudABI,
.dragonfly => .DragonFly,
.freebsd => .FreeBSD,
.fuchsia => .Fuchsia,
.ios => .IOS,
.kfreebsd => .KFreeBSD,
.linux => .Linux,
.lv2 => .Lv2,
.macos => .MacOSX,
.netbsd => .NetBSD,
.openbsd => .OpenBSD,
.solaris => .Solaris,
.zos => .ZOS,
.haiku => .Haiku,
.minix => .Minix,
.rtems => .RTEMS,
.nacl => .NaCl,
.aix => .AIX,
.cuda => .CUDA,
.nvcl => .NVCL,
.amdhsa => .AMDHSA,
.ps4 => .PS4,
.ps5 => .PS5,
.elfiamcu => .ELFIAMCU,
.tvos => .TvOS,
.watchos => .WatchOS,
.mesa3d => .Mesa3D,
.contiki => .Contiki,
.amdpal => .AMDPAL,
.hermit => .HermitCore,
.hurd => .Hurd,
.wasi => .WASI,
.emscripten => .Emscripten,
.driverkit => .DriverKit,
.shadermodel => .ShaderModel,
};
}
pub fn archToLLVM(arch_tag: std.Target.Cpu.Arch) llvm.ArchType {
return switch (arch_tag) {
.arm => .arm,
.armeb => .armeb,
.aarch64 => .aarch64,
.aarch64_be => .aarch64_be,
.aarch64_32 => .aarch64_32,
.arc => .arc,
.avr => .avr,
.bpfel => .bpfel,
.bpfeb => .bpfeb,
.csky => .csky,
.dxil => .dxil,
.hexagon => .hexagon,
.loongarch32 => .loongarch32,
.loongarch64 => .loongarch64,
.m68k => .m68k,
.mips => .mips,
.mipsel => .mipsel,
.mips64 => .mips64,
.mips64el => .mips64el,
.msp430 => .msp430,
.powerpc => .ppc,
.powerpcle => .ppcle,
.powerpc64 => .ppc64,
.powerpc64le => .ppc64le,
.r600 => .r600,
.amdgcn => .amdgcn,
.riscv32 => .riscv32,
.riscv64 => .riscv64,
.sparc => .sparc,
.sparc64 => .sparcv9, // In LLVM, sparc64 == sparcv9.
.sparcel => .sparcel,
.s390x => .systemz,
.tce => .tce,
.tcele => .tcele,
.thumb => .thumb,
.thumbeb => .thumbeb,
.i386 => .x86,
.x86_64 => .x86_64,
.xcore => .xcore,
.nvptx => .nvptx,
.nvptx64 => .nvptx64,
.le32 => .le32,
.le64 => .le64,
.amdil => .amdil,
.amdil64 => .amdil64,
.hsail => .hsail,
.hsail64 => .hsail64,
.spir => .spir,
.spir64 => .spir64,
.kalimba => .kalimba,
.shave => .shave,
.lanai => .lanai,
.wasm32 => .wasm32,
.wasm64 => .wasm64,
.renderscript32 => .renderscript32,
.renderscript64 => .renderscript64,
.ve => .ve,
.spu_2, .spirv32, .spirv64 => .UnknownArch,
};
}
fn eqlIgnoreCase(ignore_case: bool, a: []const u8, b: []const u8) bool {
if (ignore_case) {
return std.ascii.eqlIgnoreCase(a, b);
@ -800,3 +693,10 @@ pub fn supportsFunctionAlignment(target: std.Target) bool {
else => true,
};
}
pub fn supportsTailCall(target: std.Target, backend: std.builtin.CompilerBackend) bool {
switch (backend) {
.stage1, .stage2_llvm => return @import("codegen/llvm.zig").supportsTailCall(target),
else => return false,
}
}

View File

@ -6598,6 +6598,8 @@ pub const CType = enum {
.powerpcle,
.powerpc64,
.powerpc64le,
.wasm32,
.wasm64,
=> return 128,
else => return 64,
@ -6646,6 +6648,8 @@ pub const CType = enum {
.powerpcle,
.powerpc64,
.powerpc64le,
.wasm32,
.wasm64,
=> return 128,
else => return 64,

View File

@ -261,3 +261,71 @@ test "arguments to comptime parameters generated in comptime blocks" {
};
S.foo(S.fortyTwo());
}
test "forced tail call" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm) {
// Only attempt this test on targets we know have tail call support in LLVM.
if (builtin.cpu.arch != .x86_64 and builtin.cpu.arch != .aarch64) {
return error.SkipZigTest;
}
}
const S = struct {
fn fibonacciTailInternal(n: u16, a: u16, b: u16) u16 {
if (n == 0) return a;
if (n == 1) return b;
return @call(
.{ .modifier = .always_tail },
fibonacciTailInternal,
.{ n - 1, b, a + b },
);
}
fn fibonacciTail(n: u16) u16 {
return fibonacciTailInternal(n, 0, 1);
}
};
try expect(S.fibonacciTail(10) == 55);
}
test "inline call preserves tail call" {
if (builtin.zig_backend == .stage1) return error.SkipZigTest;
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_llvm) {
// Only attempt this test on targets we know have tail call support in LLVM.
if (builtin.cpu.arch != .x86_64 and builtin.cpu.arch != .aarch64) {
return error.SkipZigTest;
}
}
const max = std.math.maxInt(u16);
const S = struct {
var a: u16 = 0;
fn foo() void {
return bar();
}
inline fn bar() void {
if (a == max) return;
// Stack overflow if not tail called
var buf: [max]u16 = undefined;
buf[a] = a;
a += 1;
return @call(.{ .modifier = .always_tail }, foo, .{});
}
};
S.foo();
try expect(S.a == std.math.maxInt(u16));
}

View File

@ -1430,6 +1430,7 @@ test "coerce between pointers of compatible differently-named floats" {
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
if (builtin.os.tag == .windows) {
// https://github.com/ziglang/zig/issues/12396

View File

@ -1324,3 +1324,31 @@ test "union and enum field order doesn't match" {
x = .b;
try expect(x == .b);
}
test "@unionInit uses tag value instead of field index" {
if (builtin.zig_backend == .stage2_c) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_x86_64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_aarch64) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_arm) return error.SkipZigTest; // TODO
if (builtin.zig_backend == .stage2_wasm) return error.SkipZigTest; // TODO
const E = enum(u8) {
b = 255,
a = 3,
};
const U = union(E) {
a: usize,
b: isize,
};
var i: isize = -1;
var u = @unionInit(U, "b", i);
{
var a = u.b;
try expect(a == i);
}
{
var a = &u.b;
try expect(a.* == i);
}
try expect(@enumToInt(u) == 255);
}

View File

@ -33,6 +33,7 @@ void zig_five_integers(int32_t, int32_t, int32_t, int32_t, int32_t);
void zig_f32(float);
void zig_f64(double);
void zig_longdouble(long double);
void zig_five_floats(float, float, float, float, float);
bool zig_ret_bool();
@ -157,6 +158,7 @@ void run_c_tests(void) {
zig_f32(12.34f);
zig_f64(56.78);
zig_longdouble(12.34l);
zig_five_floats(1.0f, 2.0f, 3.0f, 4.0f, 5.0f);
zig_ptr((void*)0xdeadbeefL);
@ -271,6 +273,10 @@ void c_f64(double x) {
assert_or_panic(x == 56.78);
}
void c_long_double(long double x) {
assert_or_panic(x == 12.34l);
}
void c_ptr(void *x) {
assert_or_panic(x == (void*)0xdeadbeefL);
}

View File

@ -1,4 +1,5 @@
const std = @import("std");
const builtin = @import("builtin");
const print = std.debug.print;
const expect = std.testing.expect;
@ -89,6 +90,7 @@ export fn zig_struct_u128(a: U128) void {
extern fn c_f32(f32) void;
extern fn c_f64(f64) void;
extern fn c_long_double(c_longdouble) void;
// On windows x64, the first 4 are passed via registers, others on the stack.
extern fn c_five_floats(f32, f32, f32, f32, f32) void;
@ -107,12 +109,21 @@ test "C ABI floats" {
c_five_floats(1.0, 2.0, 3.0, 4.0, 5.0);
}
test "C ABI long double" {
if (!builtin.cpu.arch.isWasm()) return error.SkipZigTest;
c_long_double(12.34);
}
export fn zig_f32(x: f32) void {
expect(x == 12.34) catch @panic("test failure: zig_f32");
}
export fn zig_f64(x: f64) void {
expect(x == 56.78) catch @panic("test failure: zig_f64");
}
export fn zig_longdouble(x: c_longdouble) void {
if (!builtin.cpu.arch.isWasm()) return; // waiting for #1481
expect(x == 12.34) catch @panic("test failure: zig_longdouble");
}
extern fn c_ptr(*anyopaque) void;

View File

@ -2,5 +2,5 @@
// output_mode=Exe
// target=aarch64-macos
//
// :107:9: error: struct 'tmp.tmp' has no member named 'main'
// :105:9: error: struct 'tmp.tmp' has no member named 'main'
// :7:1: note: struct declared here

View File

@ -0,0 +1,12 @@
fn myFn(_: usize) void {
return;
}
pub export fn entry() void {
@call(.{ .modifier = .always_tail }, myFn, .{0});
}
// error
// backend=llvm
// target=native
//
// :5:5: error: unable to perform tail call: type of function being called 'fn(usize) void' does not match type of calling function 'fn() callconv(.C) void'

View File

@ -0,0 +1,16 @@
export fn entry() void {
const llamas1 = makeLlamas(5);
const llamas2 = makeLlamas(5);
_ = llamas1;
_ = llamas2;
}
fn makeLlamas(count: usize) [count]u8 {
_ = count;
}
// error
// target=native
//
// :8:30: error: unable to resolve comptime value
// :8:30: note: array length must be comptime known

View File

@ -0,0 +1,31 @@
pub export fn entry() void {
var a: u32 = 0;
_ = @as(comptime_int, a);
}
pub export fn entry2() void{
var a: u32 = 0;
_ = @as(comptime_float, a);
}
pub export fn entry3() void{
comptime var aa: comptime_float = 0.0;
var a: f32 = 4;
aa = a;
}
pub export fn entry4() void{
comptime var aa: comptime_int = 0.0;
var a: f32 = 4;
aa = a;
}
// error
// backend=stage2
// target=native
//
// :3:27: error: unable to resolve comptime value
// :3:27: note: value being casted to 'comptime_int' must be comptime known
// :7:29: error: unable to resolve comptime value
// :7:29: note: value being casted to 'comptime_float' must be comptime known
// :12:10: error: unable to resolve comptime value
// :12:10: note: value being casted to 'comptime_float' must be comptime known
// :17:10: error: unable to resolve comptime value
// :17:10: note: value being casted to 'comptime_int' must be comptime known

View File

@ -0,0 +1,23 @@
export fn entry(x: u8) u8 {
return 0x11 << x;
}
export fn entry1(x: u8) u8 {
return 0x11 >> x;
}
export fn entry2() void {
var x: u5 = 1;
_ = @shlExact(12345, x);
}
export fn entry3() void {
var x: u5 = 1;
_ = @shrExact(12345, x);
}
// error
// backend=stage2
// target=native
//
// :2:17: error: LHS of shift must be a fixed-width integer type, or RHS must be a comptime known
// :5:17: error: LHS of shift must be a fixed-width integer type, or RHS must be a comptime known
// :9:9: error: LHS of shift must be a fixed-width integer type, or RHS must be a comptime known
// :13:9: error: LHS of shift must be a fixed-width integer type, or RHS must be a comptime known

View File

@ -1,9 +0,0 @@
export fn entry(x: u8) u8 {
return 0x11 << x;
}
// error
// backend=stage1
// target=native
//
// tmp.zig:2:17: error: LHS of shift must be a fixed-width integer type, or RHS must be compile-time known

View File

@ -0,0 +1,18 @@
const std = @import("std");
const builtin = std.builtin;
pub fn foo(message: []const u8, stack_trace: ?*builtin.StackTrace) noreturn {
@call(.{ .modifier = .always_tail }, bar, .{ message, stack_trace });
}
pub fn bar(message: []const u8, stack_trace: ?*builtin.StackTrace) noreturn {
_ = message;
_ = stack_trace;
std.process.exit(0);
}
pub fn main() void {
foo("foo", null);
}
// run
// backend=llvm
// target=x86_64-linux,x86_64-macos,aarch64-linux,aarch64-macos

View File

@ -2,5 +2,5 @@
// output_mode=Exe
// target=x86_64-linux
//
// :107:9: error: struct 'tmp.tmp' has no member named 'main'
// :105:9: error: struct 'tmp.tmp' has no member named 'main'
// :7:1: note: struct declared here

View File

@ -2,5 +2,5 @@
// output_mode=Exe
// target=x86_64-macos
//
// :107:9: error: struct 'tmp.tmp' has no member named 'main'
// :105:9: error: struct 'tmp.tmp' has no member named 'main'
// :7:1: note: struct declared here