Compare commits

..

24 Commits

Author SHA1 Message Date
Andrew Kelley
c6e7c2cb80 WIP 2025-11-21 08:04:52 -08:00
Andrew Kelley
c91dec3b6f std.Io.Threaded: store thread capacity differently 2025-11-21 07:09:44 -08:00
Andrew Kelley
fba9fdf54f std.Io.Threaded: import std.mem.Alignment 2025-11-21 07:09:44 -08:00
Alex Rønne Petersen
d3e20e71be
std.zig.system.linux: implement s390x native CPU detection 2025-11-21 12:15:15 +01:00
Alex Rønne Petersen
1cd913a0ec
std.zig.system: disable vector support on s390x for now
https://github.com/llvm/llvm-project/issues/168992
2025-11-21 12:15:15 +01:00
Alex Rønne Petersen
7e54ee06d8
test: disable big simd vector C ABI test on s390x 2025-11-21 12:15:15 +01:00
Alex Rønne Petersen
3333bcc4f2
std.crypto.ml_kem: disable some tests on s390x with vector support 2025-11-21 12:15:15 +01:00
Alex Rønne Petersen
47df2f9abe
std.zig.system.linux: don't arbitrarily limit sparc CPU detection to 64-bit 2025-11-21 12:15:15 +01:00
Alex Rønne Petersen
534370c4de
ci: bump loongarch64-linux-debug timeout to 4 hours on Forgejo Actions 2025-11-21 09:44:22 +01:00
Alex Rønne Petersen
f3eef35c05
aro: unbreak s390x
https://github.com/ziglang/zig/pull/25780#discussion_r2548496117
2025-11-21 06:28:19 +01:00
rpkak
6b4f45f782 system specific errno 2025-11-20 15:03:23 -08:00
Benjamin Jurk
4b5351bc0d
update deprecated ArrayListUnmanaged usage (#25958) 2025-11-20 14:46:23 -08:00
Andrew Kelley
db622f14c4
Merge pull request #25780 from Vexu/translate-c
Update Aro and translate-c to latest
2025-11-20 10:24:31 -08:00
Matthew Lugg
8a73fc8d8e
Merge pull request #25981 from mlugg/macos-fuzz-2
make the fuzzer vaguely work on macOS
2025-11-20 17:48:35 +00:00
Veikka Tuominen
df50f9e28e update resinator to Aro changes 2025-11-20 13:12:53 +02:00
Veikka Tuominen
21f3ff2a8d update Aro and translate-c to latest 2025-11-20 13:12:53 +02:00
Matthew Lugg
a87b533231
std.Io.Writer: fix some bugs 2025-11-20 10:42:21 +00:00
Matthew Lugg
b05fefb9c9
std.http: stop assuming previous chunk state
The full file may not be written, either due to a previous chunk being
in-progress when `sendFile` was called, or due to `limit`.
2025-11-20 10:42:21 +00:00
Matthew Lugg
bc524a2b1a
std.Build: fix crashes running fuzz tests 2025-11-20 10:42:21 +00:00
Matthew Lugg
0f06b5b583
std.debug.MachOFile: handle 'path/to/archive.a(entry.o)' form 2025-11-20 10:42:21 +00:00
Matthew Lugg
e1fa4011fb
fuzz: hack around unknown module structure 2025-11-20 10:42:20 +00:00
Matthew Lugg
010dcd6a9b
fuzzer: account for runtime address slide
This is relevant to PIEs, which are notably enabled by default on macOS.
The build system needs to only see virtual addresses, that is, those
which do not have the slide applied; but the fuzzer itself naturally
sees relocated addresses (i.e. with the slide applied). We just need to
subtract the slide when we communicate addresses to the build system.
2025-11-20 10:42:20 +00:00
Matthew Lugg
0a330d4f94
std.debug.Info: basic Mach-O support 2025-11-20 10:42:20 +00:00
Matthew Lugg
0caca625eb
std.debug: split up Mach-O debug info handling
Like ELF, we now have `std.debug.MachOFile` for the host-independent
parts, and `std.debug.SelfInfo.MachO` for logic requiring the file to
correspond to the running program.
2025-11-20 10:42:20 +00:00
200 changed files with 49327 additions and 17876 deletions

View File

@ -40,7 +40,7 @@ jobs:
fetch-depth: 0
- name: Build and Test
run: sh ci/loongarch64-linux-debug.sh
timeout-minutes: 180
timeout-minutes: 240
loongarch64-linux-release:
runs-on: [self-hosted, loongarch64-linux]
steps:

View File

@ -9,7 +9,7 @@ const Instruction = enum {
fn evaluate(initial_stack: []const i32, code: []const Instruction) !i32 {
var buffer: [8]i32 = undefined;
var stack = std.ArrayListUnmanaged(i32).initBuffer(&buffer);
var stack = std.ArrayList(i32).initBuffer(&buffer);
try stack.appendSliceBounded(initial_stack);
var ip: usize = 0;

View File

@ -42,7 +42,7 @@ pub fn sourceIndexMessage(msg_bytes: []u8) error{OutOfMemory}!void {
var coverage = Coverage.init;
/// Index of type `SourceLocationIndex`.
var coverage_source_locations: std.ArrayListUnmanaged(Coverage.SourceLocation) = .empty;
var coverage_source_locations: std.ArrayList(Coverage.SourceLocation) = .empty;
/// Contains the most recent coverage update message, unmodified.
var recent_coverage_update: std.ArrayListAlignedUnmanaged(u8, .of(u64)) = .empty;
@ -76,7 +76,7 @@ pub fn coverageUpdateMessage(msg_bytes: []u8) error{OutOfMemory}!void {
try updateCoverage();
}
var entry_points: std.ArrayListUnmanaged(SourceLocationIndex) = .empty;
var entry_points: std.ArrayList(SourceLocationIndex) = .empty;
pub fn entryPointsMessage(msg_bytes: []u8) error{OutOfMemory}!void {
const header: abi.fuzz.EntryPointHeader = @bitCast(msg_bytes[0..@sizeOf(abi.fuzz.EntryPointHeader)].*);
@ -127,7 +127,7 @@ const SourceLocationIndex = enum(u32) {
}
fn toWalkFile(sli: SourceLocationIndex) ?Walk.File.Index {
var buf: std.ArrayListUnmanaged(u8) = .empty;
var buf: std.ArrayList(u8) = .empty;
defer buf.deinit(gpa);
sli.appendPath(&buf) catch @panic("OOM");
return @enumFromInt(Walk.files.getIndex(buf.items) orelse return null);
@ -135,11 +135,11 @@ const SourceLocationIndex = enum(u32) {
fn fileHtml(
sli: SourceLocationIndex,
out: *std.ArrayListUnmanaged(u8),
out: *std.ArrayList(u8),
) error{ OutOfMemory, SourceUnavailable }!void {
const walk_file_index = sli.toWalkFile() orelse return error.SourceUnavailable;
const root_node = walk_file_index.findRootDecl().get().ast_node;
var annotations: std.ArrayListUnmanaged(html_render.Annotation) = .empty;
var annotations: std.ArrayList(html_render.Annotation) = .empty;
defer annotations.deinit(gpa);
try computeSourceAnnotations(sli.ptr().file, walk_file_index, &annotations, coverage_source_locations.items);
html_render.fileSourceHtml(walk_file_index, out, root_node, .{
@ -153,13 +153,13 @@ const SourceLocationIndex = enum(u32) {
fn computeSourceAnnotations(
cov_file_index: Coverage.File.Index,
walk_file_index: Walk.File.Index,
annotations: *std.ArrayListUnmanaged(html_render.Annotation),
annotations: *std.ArrayList(html_render.Annotation),
source_locations: []const Coverage.SourceLocation,
) !void {
// Collect all the source locations from only this file into this array
// first, then sort by line, col, so that we can collect annotations with
// O(N) time complexity.
var locs: std.ArrayListUnmanaged(SourceLocationIndex) = .empty;
var locs: std.ArrayList(SourceLocationIndex) = .empty;
defer locs.deinit(gpa);
for (source_locations, 0..) |sl, sli_usize| {
@ -228,20 +228,21 @@ fn unpackSourcesInner(tar_bytes: []u8) !void {
if (std.mem.endsWith(u8, tar_file.name, ".zig")) {
log.debug("found file: '{s}'", .{tar_file.name});
const file_name = try gpa.dupe(u8, tar_file.name);
if (std.mem.indexOfScalar(u8, file_name, '/')) |pkg_name_end| {
const pkg_name = file_name[0..pkg_name_end];
const gop = try Walk.modules.getOrPut(gpa, pkg_name);
const file: Walk.File.Index = @enumFromInt(Walk.files.entries.len);
if (!gop.found_existing or
std.mem.eql(u8, file_name[pkg_name_end..], "/root.zig") or
std.mem.eql(u8, file_name[pkg_name_end + 1 .. file_name.len - ".zig".len], pkg_name))
{
gop.value_ptr.* = file;
}
const file_bytes = tar_reader.take(@intCast(tar_file.size)) catch unreachable;
it.unread_file_bytes = 0; // we have read the whole thing
assert(file == try Walk.add_file(file_name, file_bytes));
}
// This is a hack to guess modules from the tar file contents. To handle modules
// properly, the build system will need to change the structure here to have one
// directory per module. This in turn requires compiler enhancements to allow
// the build system to actually discover the required information.
const mod_name, const is_module_root = p: {
if (std.mem.find(u8, file_name, "std/")) |i| break :p .{ "std", std.mem.eql(u8, file_name[i + 4 ..], "std.zig") };
if (std.mem.endsWith(u8, file_name, "/builtin.zig")) break :p .{ "builtin", true };
break :p .{ "root", std.mem.endsWith(u8, file_name, "/root.zig") };
};
const gop = try Walk.modules.getOrPut(gpa, mod_name);
const file: Walk.File.Index = @enumFromInt(Walk.files.entries.len);
if (!gop.found_existing or is_module_root) gop.value_ptr.* = file;
const file_bytes = tar_reader.take(@intCast(tar_file.size)) catch unreachable;
it.unread_file_bytes = 0; // we have read the whole thing
assert(file == try Walk.add_file(file_name, file_bytes));
} else {
log.warn("skipping: '{s}' - the tar creation should have done that", .{tar_file.name});
}
@ -308,7 +309,7 @@ fn updateCoverage() error{OutOfMemory}!void {
if (recent_coverage_update.items.len == 0) return;
const want_file = (selected_source_location orelse return).ptr().file;
var covered: std.ArrayListUnmanaged(SourceLocationIndex) = .empty;
var covered: std.ArrayList(SourceLocationIndex) = .empty;
defer covered.deinit(gpa);
// This code assumes 64-bit elements, which is incorrect if the executable
@ -339,7 +340,7 @@ fn updateCoverage() error{OutOfMemory}!void {
fn updateSource() error{OutOfMemory}!void {
if (recent_coverage_update.items.len == 0) return;
const file_sli = selected_source_location.?;
var html: std.ArrayListUnmanaged(u8) = .empty;
var html: std.ArrayList(u8) = .empty;
defer html.deinit(gpa);
file_sli.fileHtml(&html) catch |err| switch (err) {
error.OutOfMemory => |e| return e,

View File

@ -254,7 +254,7 @@ pub fn runTestResultMessage(msg_bytes: []u8) error{OutOfMemory}!void {
const durations: []align(1) const u64 = @ptrCast(trailing[0 .. hdr.tests_len * 8]);
var offset: usize = hdr.tests_len * 8;
var table_html: std.ArrayListUnmanaged(u8) = .empty;
var table_html: std.ArrayList(u8) = .empty;
defer table_html.deinit(gpa);
for (durations) |test_ns| {

View File

@ -6,7 +6,7 @@ pub const Parser = @import("aro/Parser.zig");
pub const Preprocessor = @import("aro/Preprocessor.zig");
pub const Source = @import("aro/Source.zig");
pub const StringInterner = @import("aro/StringInterner.zig");
pub const target_util = @import("aro/target.zig");
pub const Target = @import("aro/Target.zig");
pub const Tokenizer = @import("aro/Tokenizer.zig");
pub const Toolchain = @import("aro/Toolchain.zig");
pub const Tree = @import("aro/Tree.zig");
@ -31,11 +31,11 @@ test {
_ = @import("aro/char_info.zig");
_ = @import("aro/Compilation.zig");
_ = @import("aro/Driver/Distro.zig");
_ = @import("aro/Driver/Filesystem.zig");
_ = @import("aro/Driver/GCCVersion.zig");
_ = @import("aro/InitList.zig");
_ = @import("aro/LangOpts.zig");
_ = @import("aro/Preprocessor.zig");
_ = @import("aro/target.zig");
_ = @import("aro/Target.zig");
_ = @import("aro/Tokenizer.zig");
_ = @import("aro/Value.zig");
}

View File

@ -61,25 +61,21 @@ pub const Iterator = struct {
return .{ self.slice[self.index], self.index };
}
if (self.source) |*source| {
var cur = source.qt;
if (cur.isInvalid()) {
if (source.qt.isInvalid()) {
self.source = null;
return null;
}
while (true) switch (cur.type(source.comp)) {
.typeof => |typeof| cur = typeof.base,
loop: switch (source.qt.type(source.comp)) {
.typeof => |typeof| continue :loop typeof.base.type(source.comp),
.attributed => |attributed| {
self.slice = attributed.attributes;
self.index = 1;
source.qt = attributed.base;
return .{ self.slice[0], 0 };
},
.typedef => |typedef| cur = typedef.base,
else => {
self.source = null;
break;
},
};
.typedef => |typedef| continue :loop typedef.base.type(source.comp),
else => self.source = null,
}
}
return null;
}
@ -712,6 +708,9 @@ const attributes = struct {
pub const thiscall = struct {};
pub const sysv_abi = struct {};
pub const ms_abi = struct {};
// TODO cannot be combined with weak or selectany
pub const internal_linkage = struct {};
pub const availability = struct {};
};
pub const Tag = std.meta.DeclEnum(attributes);
@ -776,9 +775,9 @@ pub fn fromString(kind: Kind, namespace: ?[]const u8, name: []const u8) ?Tag {
const tag_and_opts = attribute_names.fromName(normalized) orelse return null;
switch (actual_kind) {
inline else => |tag| {
if (@field(tag_and_opts.properties, @tagName(tag)))
return tag_and_opts.properties.tag;
inline else => |available_kind| {
if (@field(tag_and_opts, @tagName(available_kind)))
return tag_and_opts.tag;
},
}
return null;
@ -814,7 +813,7 @@ fn applyVariableOrParameterAttributes(p: *Parser, qt: QualType, attr_buf_start:
for (attrs, toks) |attr, tok| switch (attr.tag) {
// zig fmt: off
.alias, .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .weak, .used,
.noinit, .retain, .persistent, .section, .mode, .asm_label, .nullability, .unaligned,
.noinit, .retain, .persistent, .section, .mode, .asm_label, .nullability, .unaligned, .selectany, .internal_linkage,
=> try p.attr_application_buf.append(gpa, attr),
// zig fmt: on
.common => if (nocommon) {
@ -874,18 +873,18 @@ fn applyVariableOrParameterAttributes(p: *Parser, qt: QualType, attr_buf_start:
pub fn applyFieldAttributes(p: *Parser, field_qt: *QualType, attr_buf_start: usize) ![]const Attribute {
const attrs = p.attr_buf.items(.attr)[attr_buf_start..];
const toks = p.attr_buf.items(.tok)[attr_buf_start..];
const seen = p.attr_buf.items(.seen)[attr_buf_start..];
p.attr_application_buf.items.len = 0;
for (attrs, toks) |attr, tok| switch (attr.tag) {
// zig fmt: off
.@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned,
.mode, .warn_unused_result, .nodiscard, .nullability, .unaligned,
=> try p.attr_application_buf.append(p.comp.gpa, attr),
// zig fmt: on
.vector_size => try attr.applyVectorSize(p, tok, field_qt),
.aligned => try attr.applyAligned(p, field_qt.*, null),
.calling_convention => try applyCallingConvention(attr, p, tok, field_qt.*),
else => try ignoredAttrErr(p, tok, attr.tag, "fields"),
for (attrs, 0..) |attr, i| switch (attr.tag) {
.@"packed" => {
try p.attr_application_buf.append(p.comp.gpa, attr);
seen[i] = true;
},
.aligned => {
try attr.applyAligned(p, field_qt.*, null);
seen[i] = true;
},
else => {},
};
return p.attr_application_buf.items;
}
@ -894,29 +893,35 @@ pub fn applyTypeAttributes(p: *Parser, qt: QualType, attr_buf_start: usize, diag
const gpa = p.comp.gpa;
const attrs = p.attr_buf.items(.attr)[attr_buf_start..];
const toks = p.attr_buf.items(.tok)[attr_buf_start..];
const seens = p.attr_buf.items(.seen)[attr_buf_start..];
p.attr_application_buf.items.len = 0;
var base_qt = qt;
for (attrs, toks) |attr, tok| switch (attr.tag) {
// zig fmt: off
.@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode, .nullability, .unaligned,
=> try p.attr_application_buf.append(gpa, attr),
// zig fmt: on
.transparent_union => try attr.applyTransparentUnion(p, tok, base_qt),
.vector_size => try attr.applyVectorSize(p, tok, &base_qt),
.aligned => try attr.applyAligned(p, base_qt, diagnostic),
.designated_init => if (base_qt.is(p.comp, .@"struct")) {
try p.attr_application_buf.append(gpa, attr);
} else {
try p.err(tok, .designated_init_invalid, .{});
},
.calling_convention => try applyCallingConvention(attr, p, tok, base_qt),
.alloc_size,
.copy,
.scalar_storage_order,
.nonstring,
=> |t| try p.err(tok, .attribute_todo, .{ @tagName(t), "types" }),
else => try ignoredAttrErr(p, tok, attr.tag, "types"),
};
for (attrs, toks, seens) |attr, tok, seen| {
if (seen) continue;
switch (attr.tag) {
// zig fmt: off
.@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode,
.nullability, .unaligned, .warn_unused_result,
=> try p.attr_application_buf.append(gpa, attr),
// zig fmt: on
.transparent_union => try attr.applyTransparentUnion(p, tok, base_qt),
.vector_size => try attr.applyVectorSize(p, tok, &base_qt),
.aligned => try attr.applyAligned(p, base_qt, diagnostic),
.designated_init => if (base_qt.is(p.comp, .@"struct")) {
try p.attr_application_buf.append(gpa, attr);
} else {
try p.err(tok, .designated_init_invalid, .{});
},
.calling_convention => try applyCallingConvention(attr, p, tok, base_qt),
.alloc_size,
.copy,
.scalar_storage_order,
.nonstring,
=> |t| try p.err(tok, .attribute_todo, .{ @tagName(t), "types" }),
else => try ignoredAttrErr(p, tok, attr.tag, "types"),
}
}
return applySelected(base_qt, p);
}
@ -935,7 +940,7 @@ pub fn applyFunctionAttributes(p: *Parser, qt: QualType, attr_buf_start: usize)
.noreturn, .unused, .used, .warning, .deprecated, .unavailable, .weak, .pure, .leaf,
.@"const", .warn_unused_result, .section, .returns_nonnull, .returns_twice, .@"error",
.externally_visible, .retain, .flatten, .gnu_inline, .alias, .asm_label, .nodiscard,
.reproducible, .unsequenced, .nothrow, .nullability, .unaligned,
.reproducible, .unsequenced, .nothrow, .nullability, .unaligned, .internal_linkage,
=> try p.attr_application_buf.append(gpa, attr),
// zig fmt: on
.hot => if (cold) {
@ -1164,7 +1169,7 @@ pub fn applyStatementAttributes(p: *Parser, expr_start: TokenIndex, attr_buf_sta
try p.attr_application_buf.append(p.comp.gpa, attr);
break;
},
.r_brace => {},
.r_brace, .semicolon => {},
else => {
try p.err(expr_start, .invalid_fallthrough, .{});
break;

File diff suppressed because it is too large Load Diff

View File

@ -3,25 +3,75 @@ const std = @import("std");
const Compilation = @import("Compilation.zig");
const LangOpts = @import("LangOpts.zig");
const Parser = @import("Parser.zig");
const target_util = @import("target.zig");
const Target = @import("Target.zig");
const TypeStore = @import("TypeStore.zig");
const QualType = TypeStore.QualType;
const Builder = TypeStore.Builder;
const TypeDescription = @import("Builtins/TypeDescription.zig");
const properties = @import("Builtins/properties.zig");
const Properties = @import("Builtins/Properties.zig");
pub const Builtin = @import("Builtins/Builtin.zig").with(Properties);
const Expanded = struct {
qt: QualType,
builtin: Builtin,
const BuiltinBase = struct {
param_str: [*:0]const u8,
language: properties.Language = .all_languages,
attributes: properties.Attributes = .{},
header: properties.Header = .none,
};
const NameToTypeMap = std.StringHashMapUnmanaged(QualType);
const BuiltinTarget = struct {
param_str: [*:0]const u8,
language: properties.Language = .all_languages,
attributes: properties.Attributes = .{},
header: properties.Header = .none,
features: ?[*:0]const u8 = null,
};
const aarch64 = @import("Builtins/aarch64.zig").with(BuiltinTarget);
const amdgcn = @import("Builtins/amdgcn.zig").with(BuiltinTarget);
const arm = @import("Builtins/arm.zig").with(BuiltinTarget);
const bpf = @import("Builtins/bpf.zig").with(BuiltinTarget);
const common = @import("Builtins/common.zig").with(BuiltinBase);
const hexagon = @import("Builtins/hexagon.zig").with(BuiltinTarget);
const loongarch = @import("Builtins/loongarch.zig").with(BuiltinTarget);
const mips = @import("Builtins/mips.zig").with(BuiltinBase);
const nvptx = @import("Builtins/nvptx.zig").with(BuiltinTarget);
const powerpc = @import("Builtins/powerpc.zig").with(BuiltinTarget);
const riscv = @import("Builtins/riscv.zig").with(BuiltinTarget);
const s390x = @import("Builtins/s390x.zig").with(BuiltinTarget);
const ve = @import("Builtins/ve.zig").with(BuiltinBase);
const x86_64 = @import("Builtins/x86_64.zig").with(BuiltinTarget);
const x86 = @import("Builtins/x86.zig").with(BuiltinTarget);
const xcore = @import("Builtins/xcore.zig").with(BuiltinBase);
pub const Tag = union(enum) {
aarch64: aarch64.Tag,
amdgcn: amdgcn.Tag,
arm: arm.Tag,
bpf: bpf.Tag,
common: common.Tag,
hexagon: hexagon.Tag,
loongarch: loongarch.Tag,
mips: mips.Tag,
nvptx: nvptx.Tag,
powerpc: powerpc.Tag,
riscv: riscv.Tag,
s390x: s390x.Tag,
ve: ve.Tag,
x86_64: x86_64.Tag,
x86: x86.Tag,
xcore: xcore.Tag,
};
pub const Expanded = struct {
tag: Tag,
qt: QualType,
language: properties.Language = .all_languages,
attributes: properties.Attributes = .{},
header: properties.Header = .none,
};
const Builtins = @This();
_name_to_type_map: NameToTypeMap = .{},
_name_to_type_map: std.StringHashMapUnmanaged(Expanded) = .{},
pub fn deinit(b: *Builtins, gpa: std.mem.Allocator) void {
b._name_to_type_map.deinit(gpa);
@ -47,6 +97,7 @@ fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *C
var parser: Parser = undefined;
parser.comp = comp;
var builder: TypeStore.Builder = .{ .parser = &parser, .error_on_invalid = true };
var actual_suffix = desc.suffix;
var require_native_int32 = false;
var require_native_int64 = false;
@ -66,7 +117,7 @@ fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *C
.W => require_native_int64 = true,
.N => {
std.debug.assert(desc.spec == .i);
if (!target_util.isLP64(comp.target)) {
if (!comp.target.isLP64()) {
builder.combine(.long, 0) catch unreachable;
}
},
@ -102,10 +153,7 @@ fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *C
},
.h => builder.combine(.fp16, 0) catch unreachable,
.x => builder.combine(.float16, 0) catch unreachable,
.y => {
// Todo: __bf16
return .invalid;
},
.y => builder.combine(.bf16, 0) catch unreachable,
.f => builder.combine(.float, 0) catch unreachable,
.d => {
if (builder.type == .long_long) {
@ -126,18 +174,6 @@ fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *C
std.debug.assert(builder.type == .none);
builder.type = Builder.fromType(comp, comp.type_store.ns_constant_string);
},
.G => {
// Todo: id
return .invalid;
},
.H => {
// Todo: SEL
return .invalid;
},
.M => {
// Todo: struct objc_super
return .invalid;
},
.a => {
std.debug.assert(builder.type == .none);
std.debug.assert(desc.suffix.len == 0);
@ -152,7 +188,9 @@ fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *C
},
.V => |element_count| {
std.debug.assert(desc.suffix.len == 0);
const child_desc = it.next().?;
var child_desc = it.next().?;
actual_suffix = child_desc.suffix;
child_desc.suffix = &.{};
const elem_qt = try createType(child_desc, undefined, comp);
const vector_qt = try comp.type_store.put(comp.gpa, .{ .vector = .{
.elem = elem_qt,
@ -160,8 +198,8 @@ fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *C
} });
builder.type = .{ .other = vector_qt };
},
.q => {
// Todo: scalable vector
.Q => {
// Todo: target builtin type
return .invalid;
},
.E => {
@ -219,9 +257,8 @@ fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *C
std.debug.assert(desc.suffix.len == 0);
builder.type = Builder.fromType(comp, comp.type_store.pid_t);
},
.@"!" => return .invalid,
}
for (desc.suffix) |suffix| {
for (actual_suffix) |suffix| {
switch (suffix) {
.@"*" => |address_space| {
_ = address_space; // TODO: handle address space
@ -243,144 +280,122 @@ fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *C
return builder.finish() catch unreachable;
}
fn createBuiltin(comp: *Compilation, builtin: Builtin) !QualType {
var it = TypeDescription.TypeIterator.init(builtin.properties.param_str);
fn createBuiltin(comp: *Compilation, param_str: [*:0]const u8) !QualType {
var it = TypeDescription.TypeIterator.init(param_str);
const ret_ty_desc = it.next().?;
if (ret_ty_desc.spec == .@"!") {
// Todo: handle target-dependent definition
}
const ret_ty = try createType(ret_ty_desc, &it, comp);
var param_count: usize = 0;
var params: [Builtin.max_param_count]TypeStore.Type.Func.Param = undefined;
var params: [32]TypeStore.Type.Func.Param = undefined;
while (it.next()) |desc| : (param_count += 1) {
params[param_count] = .{ .name_tok = 0, .qt = try createType(desc, &it, comp), .name = .empty, .node = .null };
}
return comp.type_store.put(comp.gpa, .{ .func = .{
.return_type = ret_ty,
.kind = if (builtin.properties.isVarArgs()) .variadic else .normal,
.kind = if (properties.isVarArgs(param_str)) .variadic else .normal,
.params = params[0..param_count],
} });
}
/// Asserts that the builtin has already been created
pub fn lookup(b: *const Builtins, name: []const u8) Expanded {
const builtin = Builtin.fromName(name).?;
const qt = b._name_to_type_map.get(name).?;
return .{ .builtin = builtin, .qt = qt };
return b._name_to_type_map.get(name).?;
}
pub fn getOrCreate(b: *Builtins, comp: *Compilation, name: []const u8) !?Expanded {
const qt = b._name_to_type_map.get(name) orelse {
const builtin = Builtin.fromName(name) orelse return null;
if (!comp.hasBuiltinFunction(builtin)) return null;
if (b._name_to_type_map.get(name)) |expanded| return expanded;
try b._name_to_type_map.ensureUnusedCapacity(comp.gpa, 1);
const qt = try createBuiltin(comp, builtin);
b._name_to_type_map.putAssumeCapacity(name, qt);
const builtin = fromName(comp, name) orelse return null;
if (builtin.features) |_| {
// TODO check features
}
return .{
.builtin = builtin,
.qt = qt,
};
try b._name_to_type_map.ensureUnusedCapacity(comp.gpa, 1);
const expanded: Expanded = .{
.tag = builtin.tag,
.qt = try createBuiltin(comp, builtin.param_str),
.attributes = builtin.attributes,
.header = builtin.header,
.language = builtin.language,
};
const builtin = Builtin.fromName(name).?;
return .{ .builtin = builtin, .qt = qt };
b._name_to_type_map.putAssumeCapacity(name, expanded);
return expanded;
}
pub const Iterator = struct {
index: u16 = 1,
name_buf: [Builtin.longest_name]u8 = undefined,
pub const Entry = struct {
/// Memory of this slice is overwritten on every call to `next`
name: []const u8,
builtin: Builtin,
};
pub fn next(self: *Iterator) ?Entry {
if (self.index > Builtin.data.len) return null;
const index = self.index;
const data_index = index - 1;
self.index += 1;
return .{
.name = Builtin.nameFromUniqueIndex(index, &self.name_buf),
.builtin = Builtin.data[data_index],
};
}
pub const FromName = struct {
tag: Tag,
param_str: [*:0]const u8,
language: properties.Language = .all_languages,
attributes: properties.Attributes = .{},
header: properties.Header = .none,
features: ?[*:0]const u8 = null,
};
test Iterator {
const gpa = std.testing.allocator;
var it = Iterator{};
var seen: std.StringHashMapUnmanaged(Builtin) = .empty;
defer seen.deinit(gpa);
var arena_state = std.heap.ArenaAllocator.init(gpa);
defer arena_state.deinit();
const arena = arena_state.allocator();
while (it.next()) |entry| {
const index = Builtin.uniqueIndex(entry.name).?;
var buf: [Builtin.longest_name]u8 = undefined;
const name_from_index = Builtin.nameFromUniqueIndex(index, &buf);
try std.testing.expectEqualStrings(entry.name, name_from_index);
if (seen.contains(entry.name)) {
std.debug.print("iterated over {s} twice\n", .{entry.name});
std.debug.print("current data: {}\n", .{entry.builtin});
std.debug.print("previous data: {}\n", .{seen.get(entry.name).?});
return error.TestExpectedUniqueEntries;
}
try seen.put(gpa, try arena.dupe(u8, entry.name), entry.builtin);
pub fn fromName(comp: *Compilation, name: []const u8) ?FromName {
if (fromNameExtra(name, .common)) |found| return found;
switch (comp.target.cpu.arch) {
.aarch64, .aarch64_be => if (fromNameExtra(name, .aarch64)) |found| return found,
.amdgcn => if (fromNameExtra(name, .amdgcn)) |found| return found,
.arm, .armeb, .thumb, .thumbeb => if (fromNameExtra(name, .arm)) |found| return found,
.bpfeb, .bpfel => if (fromNameExtra(name, .bpf)) |found| return found,
.hexagon => if (fromNameExtra(name, .hexagon)) |found| return found,
.loongarch32, .loongarch64 => if (fromNameExtra(name, .loongarch)) |found| return found,
.mips64, .mips64el, .mips, .mipsel => if (fromNameExtra(name, .mips)) |found| return found,
.nvptx, .nvptx64 => if (fromNameExtra(name, .nvptx)) |found| return found,
.powerpc64, .powerpc64le, .powerpc, .powerpcle => if (fromNameExtra(name, .powerpc)) |found| return found,
.riscv32, .riscv32be, .riscv64, .riscv64be => if (fromNameExtra(name, .riscv)) |found| return found,
.s390x => if (fromNameExtra(name, .s390x)) |found| return found,
.ve => if (fromNameExtra(name, .ve)) |found| return found,
.xcore => if (fromNameExtra(name, .xcore)) |found| return found,
.x86_64 => {
if (fromNameExtra(name, .x86_64)) |found| return found;
if (fromNameExtra(name, .x86)) |found| return found;
},
.x86 => if (fromNameExtra(name, .x86)) |found| return found,
else => {},
}
try std.testing.expectEqual(@as(usize, Builtin.data.len), seen.count());
return null;
}
test "All builtins" {
var arena_state: std.heap.ArenaAllocator = .init(std.testing.allocator);
defer arena_state.deinit();
const arena = arena_state.allocator();
fn fromNameExtra(name: []const u8, comptime arch: std.meta.Tag(Tag)) ?FromName {
const list = @field(@This(), @tagName(arch));
const tag = list.tagFromName(name) orelse return null;
const builtin = list.data[@intFromEnum(tag)];
var comp = Compilation.init(std.testing.allocator, arena, undefined, std.fs.cwd());
defer comp.deinit();
try comp.type_store.initNamedTypes(&comp);
comp.type_store.va_list = try comp.type_store.va_list.decay(&comp);
var builtin_it = Iterator{};
while (builtin_it.next()) |entry| {
const name = try arena.dupe(u8, entry.name);
if (try comp.builtins.getOrCreate(&comp, name)) |func_ty| {
const get_again = (try comp.builtins.getOrCreate(&comp, name)).?;
const found_by_lookup = comp.builtins.lookup(name);
try std.testing.expectEqual(func_ty.builtin.tag, get_again.builtin.tag);
try std.testing.expectEqual(func_ty.builtin.tag, found_by_lookup.builtin.tag);
}
}
return .{
.tag = @unionInit(Tag, @tagName(arch), tag),
.param_str = builtin.param_str,
.header = builtin.header,
.language = builtin.language,
.attributes = builtin.attributes,
.features = if (@hasField(@TypeOf(builtin), "features")) builtin.features else null,
};
}
test "Allocation failures" {
const Test = struct {
fn testOne(allocator: std.mem.Allocator) !void {
var arena_state: std.heap.ArenaAllocator = .init(allocator);
defer arena_state.deinit();
const arena = arena_state.allocator();
test "all builtins" {
const list_names = comptime std.meta.fieldNames(Tag);
inline for (list_names) |list_name| {
const list = @field(Builtins, list_name);
for (list.data, 0..) |builtin, index| {
{
var it = TypeDescription.TypeIterator.init(builtin.param_str);
while (it.next()) |_| {}
}
if (@hasField(@TypeOf(builtin), "features")) {
const corrected_name = comptime if (std.mem.eql(u8, list_name, "x86_64")) "x86" else list_name;
const features = &@field(std.Target, corrected_name).all_features;
var comp = Compilation.init(allocator, arena, undefined, std.fs.cwd());
defer comp.deinit();
_ = try comp.generateBuiltinMacros(.include_system_defines);
const feature_string = builtin.features orelse continue;
var it = std.mem.tokenizeAny(u8, std.mem.span(feature_string), "()|,");
const num_builtins = 40;
var builtin_it = Iterator{};
for (0..num_builtins) |_| {
const entry = builtin_it.next().?;
_ = try comp.builtins.getOrCreate(&comp, entry.name);
outer: while (it.next()) |feature| {
for (features) |valid_feature| {
if (std.mem.eql(u8, feature, valid_feature.name)) continue :outer;
}
std.debug.panic("unknown feature {s} on {t}\n", .{ feature, @as(list.Tag, @enumFromInt(index)) });
}
}
}
};
try std.testing.checkAllAllocationFailures(std.testing.allocator, Test.testOne, .{});
}
}

File diff suppressed because it is too large Load Diff

View File

@ -13,10 +13,10 @@ pub const Component = union(enum) {
};
pub const ComponentIterator = struct {
str: []const u8,
str: [*:0]const u8,
idx: usize,
pub fn init(str: []const u8) ComponentIterator {
pub fn init(str: [*:0]const u8) ComponentIterator {
return .{
.str = str,
.idx = 0,
@ -30,8 +30,8 @@ pub const ComponentIterator = struct {
}
pub fn next(self: *ComponentIterator) ?Component {
if (self.idx == self.str.len) return null;
const c = self.str[self.idx];
if (c == 0) return null;
self.idx += 1;
switch (c) {
'L' => {
@ -68,18 +68,14 @@ pub const ComponentIterator = struct {
'z' => return .{ .spec = .z },
'w' => return .{ .spec = .w },
'F' => return .{ .spec = .F },
'G' => return .{ .spec = .G },
'H' => return .{ .spec = .H },
'M' => return .{ .spec = .M },
'a' => return .{ .spec = .a },
'A' => return .{ .spec = .A },
'V', 'q', 'E' => {
'V', 'E' => {
const start = self.idx;
while (std.ascii.isDigit(self.str[self.idx])) : (self.idx += 1) {}
const count = std.fmt.parseUnsigned(u32, self.str[start..self.idx], 10) catch unreachable;
return switch (c) {
'V' => .{ .spec = .{ .V = count } },
'q' => .{ .spec = .{ .q = count } },
'E' => .{ .spec = .{ .E = count } },
else => unreachable,
};
@ -103,16 +99,12 @@ pub const ComponentIterator = struct {
'p' => return .{ .spec = .p },
'.' => {
// can only appear at end of param string; indicates varargs function
std.debug.assert(self.idx == self.str.len);
std.debug.assert(self.str[self.idx] == 0);
return null;
},
'!' => {
std.debug.assert(self.str.len == 1);
return .{ .spec = .@"!" };
},
'*' => {
if (self.idx < self.str.len and std.ascii.isDigit(self.str[self.idx])) {
if (std.ascii.isDigit(self.str[self.idx])) {
defer self.idx += 1;
const addr_space = self.str[self.idx] - '0';
return .{ .suffix = .{ .@"*" = addr_space } };
@ -123,6 +115,14 @@ pub const ComponentIterator = struct {
'C' => return .{ .suffix = .C },
'D' => return .{ .suffix = .D },
'R' => return .{ .suffix = .R },
'Q' => {
defer self.idx += 1;
switch (self.str[self.idx]) {
'a' => return .{ .spec = .{ .Q = .aarch64_svcount_t } },
'b' => return .{ .spec = .{ .Q = .amdgpu_buffer_rsrc_t } },
else => unreachable,
}
},
else => unreachable,
}
return null;
@ -130,13 +130,13 @@ pub const ComponentIterator = struct {
};
pub const TypeIterator = struct {
param_str: []const u8,
param_str: [*:0]const u8,
prefix: [4]Prefix,
spec: Spec,
suffix: [4]Suffix,
idx: usize,
pub fn init(param_str: []const u8) TypeIterator {
pub fn init(param_str: [*:0]const u8) TypeIterator {
return .{
.param_str = param_str,
.prefix = undefined,
@ -176,7 +176,7 @@ pub const TypeIterator = struct {
_ = it.next();
}
if (maybe_spec) |spec| {
return TypeDescription{
return .{
.prefix = self.prefix[0..prefix_count],
.spec = spec,
.suffix = self.suffix[0..suffix_count],
@ -236,20 +236,17 @@ const Spec = union(enum) {
w,
/// constant CFString
F,
/// id
G,
/// SEL
H,
/// struct objc_super
M,
/// __builtin_va_list
a,
/// "reference" to __builtin_va_list
A,
/// Vector, followed by the number of elements and the base type.
V: u32,
/// Scalable vector, followed by the number of elements and the base type.
q: u32,
/// target builtin type, followed by a character to distinguish the builtin type
Q: enum {
aarch64_svcount_t,
amdgpu_buffer_rsrc_t,
},
/// ext_vector, followed by the number of elements and the base type.
E: u32,
/// _Complex, followed by the base type.
@ -270,8 +267,6 @@ const Spec = union(enum) {
K,
/// pid_t
p,
/// Used to indicate a builtin with target-dependent param types. Must appear by itself
@"!",
};
const Suffix = union(enum) {

1150
lib/compiler/aro/aro/Builtins/aarch64.zig vendored Normal file

File diff suppressed because it is too large Load Diff

2851
lib/compiler/aro/aro/Builtins/amdgcn.zig vendored Normal file

File diff suppressed because it is too large Load Diff

1076
lib/compiler/aro/aro/Builtins/arm.zig vendored Normal file

File diff suppressed because it is too large Load Diff

231
lib/compiler/aro/aro/Builtins/bpf.zig vendored Normal file
View File

@ -0,0 +1,231 @@
//! Autogenerated by GenerateDef from src/aro/Builtins/bpf.def, do not edit
// zig fmt: off
const std = @import("std");
pub fn with(comptime Properties: type) type {
return struct {
/// Integer starting at 0 derived from the unique index,
/// corresponds with the data array index.
pub const Tag = enum(u16) { __builtin_btf_type_id,
__builtin_preserve_enum_value,
__builtin_preserve_field_info,
__builtin_preserve_type_info,
};
pub fn fromName(name: []const u8) ?Properties {
const data_index = tagFromName(name) orelse return null;
return data[@intFromEnum(data_index)];
}
pub fn tagFromName(name: []const u8) ?Tag {
const unique_index = uniqueIndex(name) orelse return null;
return @enumFromInt(unique_index - 1);
}
pub fn fromTag(tag: Tag) Properties {
return data[@intFromEnum(tag)];
}
pub fn nameFromTagIntoBuf(tag: Tag, name_buf: []u8) []u8 {
std.debug.assert(name_buf.len >= longest_name);
const unique_index = @intFromEnum(tag) + 1;
return nameFromUniqueIndex(unique_index, name_buf);
}
pub fn nameFromTag(tag: Tag) NameBuf {
var name_buf: NameBuf = undefined;
const unique_index = @intFromEnum(tag) + 1;
const name = nameFromUniqueIndex(unique_index, &name_buf.buf);
name_buf.len = @intCast(name.len);
return name_buf;
}
pub const NameBuf = struct {
buf: [longest_name]u8 = undefined,
len: std.math.IntFittingRange(0, longest_name),
pub fn span(self: *const NameBuf) []const u8 {
return self.buf[0..self.len];
}
};
pub fn exists(name: []const u8) bool {
if (name.len < shortest_name or name.len > longest_name) return false;
var index: u16 = 0;
for (name) |c| {
index = findInList(dafsa[index].child_index, c) orelse return false;
}
return dafsa[index].end_of_word;
}
pub const shortest_name = 21;
pub const longest_name = 29;
/// Search siblings of `first_child_index` for the `char`
/// If found, returns the index of the node within the `dafsa` array.
/// Otherwise, returns `null`.
pub fn findInList(first_child_index: u16, char: u8) ?u16 {
@setEvalBranchQuota(8);
var index = first_child_index;
while (true) {
if (dafsa[index].char == char) return index;
if (dafsa[index].end_of_list) return null;
index += 1;
}
unreachable;
}
/// Returns a unique (minimal perfect hash) index (starting at 1) for the `name`,
/// or null if the name was not found.
pub fn uniqueIndex(name: []const u8) ?u16 {
if (name.len < shortest_name or name.len > longest_name) return null;
var index: u16 = 0;
var node_index: u16 = 0;
for (name) |c| {
const child_index = findInList(dafsa[node_index].child_index, c) orelse return null;
var sibling_index = dafsa[node_index].child_index;
while (true) {
const sibling_c = dafsa[sibling_index].char;
std.debug.assert(sibling_c != 0);
if (sibling_c < c) {
index += dafsa[sibling_index].number;
}
if (dafsa[sibling_index].end_of_list) break;
sibling_index += 1;
}
node_index = child_index;
if (dafsa[node_index].end_of_word) index += 1;
}
if (!dafsa[node_index].end_of_word) return null;
return index;
}
/// Returns a slice of `buf` with the name associated with the given `index`.
/// This function should only be called with an `index` that
/// is already known to exist within the `dafsa`, e.g. an index
/// returned from `uniqueIndex`.
pub fn nameFromUniqueIndex(index: u16, buf: []u8) []u8 {
std.debug.assert(index >= 1 and index <= data.len);
var node_index: u16 = 0;
var count: u16 = index;
var w = std.Io.Writer.fixed(buf);
while (true) {
var sibling_index = dafsa[node_index].child_index;
while (true) {
if (dafsa[sibling_index].number > 0 and dafsa[sibling_index].number < count) {
count -= dafsa[sibling_index].number;
} else {
w.writeByte(dafsa[sibling_index].char) catch unreachable;
node_index = sibling_index;
if (dafsa[node_index].end_of_word) {
count -= 1;
}
break;
}
if (dafsa[sibling_index].end_of_list) break;
sibling_index += 1;
}
if (count == 0) break;
}
return w.buffered();
}
const Node = packed struct {
char: u8,
/// Nodes are numbered with "an integer which gives the number of words that
/// would be accepted by the automaton starting from that state." This numbering
/// allows calculating "a one-to-one correspondence between the integers 1 to L
/// (L is the number of words accepted by the automaton) and the words themselves."
///
/// Essentially, this allows us to have a minimal perfect hashing scheme such that
/// it's possible to store & lookup the properties of each builtin using a separate array.
number: std.math.IntFittingRange(0, data.len),
/// If true, this node is the end of a valid builtin.
/// Note: This does not necessarily mean that this node does not have child nodes.
end_of_word: bool,
/// If true, this node is the end of a sibling list.
/// If false, then (index + 1) will contain the next sibling.
end_of_list: bool,
/// Index of the first child of this node.
child_index: u16,
};
const dafsa = [_]Node{
.{ .char = 0, .end_of_word = false, .end_of_list = true, .number = 0, .child_index = 1 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 2 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 3 },
.{ .char = 'b', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 4 },
.{ .char = 'u', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 5 },
.{ .char = 'i', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 6 },
.{ .char = 'l', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 7 },
.{ .char = 't', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 8 },
.{ .char = 'i', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 9 },
.{ .char = 'n', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 10 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 11 },
.{ .char = 'b', .end_of_word = false, .end_of_list = false, .number = 1, .child_index = 13 },
.{ .char = 'p', .end_of_word = false, .end_of_list = true, .number = 3, .child_index = 14 },
.{ .char = 't', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 15 },
.{ .char = 'r', .end_of_word = false, .end_of_list = true, .number = 3, .child_index = 16 },
.{ .char = 'f', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 17 },
.{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 3, .child_index = 18 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 19 },
.{ .char = 's', .end_of_word = false, .end_of_list = true, .number = 3, .child_index = 20 },
.{ .char = 't', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 21 },
.{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 3, .child_index = 22 },
.{ .char = 'y', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 23 },
.{ .char = 'r', .end_of_word = false, .end_of_list = true, .number = 3, .child_index = 24 },
.{ .char = 'p', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 25 },
.{ .char = 'v', .end_of_word = false, .end_of_list = true, .number = 3, .child_index = 26 },
.{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 27 },
.{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 3, .child_index = 28 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 29 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 3, .child_index = 30 },
.{ .char = 'i', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 33 },
.{ .char = 'e', .end_of_word = false, .end_of_list = false, .number = 1, .child_index = 34 },
.{ .char = 'f', .end_of_word = false, .end_of_list = false, .number = 1, .child_index = 35 },
.{ .char = 't', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 36 },
.{ .char = 'd', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = 'n', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 37 },
.{ .char = 'i', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 38 },
.{ .char = 'y', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 39 },
.{ .char = 'u', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 40 },
.{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 41 },
.{ .char = 'p', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 42 },
.{ .char = 'm', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 43 },
.{ .char = 'l', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 44 },
.{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 45 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 46 },
.{ .char = 'd', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 45 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 47 },
.{ .char = 'v', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 48 },
.{ .char = 'i', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 49 },
.{ .char = 'a', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 50 },
.{ .char = 'n', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 51 },
.{ .char = 'l', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 52 },
.{ .char = 'f', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 53 },
.{ .char = 'u', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 54 },
.{ .char = 'o', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = 'e', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
};
pub const data = blk: {
@setEvalBranchQuota(36);
break :blk [_]Properties{
.{ .param_str = "LUi.", .attributes = .{ .custom_typecheck = true } },
.{ .param_str = "Li.", .attributes = .{ .custom_typecheck = true } },
.{ .param_str = "Ui.", .attributes = .{ .custom_typecheck = true } },
.{ .param_str = "LUi.", .attributes = .{ .custom_typecheck = true } },
};
};
};
}

5527
lib/compiler/aro/aro/Builtins/common.zig vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,6 @@ const std = @import("std");
const backend = @import("../../backend.zig");
const Interner = backend.Interner;
const Builtins = @import("../Builtins.zig");
const Builtin = Builtins.Builtin;
const Parser = @import("../Parser.zig");
const Tree = @import("../Tree.zig");
const TypeStore = @import("../TypeStore.zig");
@ -23,63 +22,65 @@ fn makeNan(comptime T: type, str: []const u8) T {
return @bitCast(@as(UnsignedSameSize, bits) | @as(UnsignedSameSize, @bitCast(std.math.nan(T))));
}
pub fn eval(tag: Builtin.Tag, p: *Parser, args: []const Tree.Node.Index) !Value {
const builtin = Builtin.fromTag(tag);
if (!builtin.properties.attributes.const_evaluable) return .{};
pub fn eval(expanded: Builtins.Expanded, p: *Parser, args: []const Tree.Node.Index) !Value {
if (!expanded.attributes.const_evaluable) return .{};
switch (tag) {
.__builtin_inff,
.__builtin_inf,
.__builtin_infl,
=> {
const qt: QualType = switch (tag) {
.__builtin_inff => .float,
.__builtin_inf => .double,
.__builtin_infl => .long_double,
else => unreachable,
};
const f: Interner.Key.Float = switch (qt.bitSizeof(p.comp)) {
32 => .{ .f32 = std.math.inf(f32) },
64 => .{ .f64 = std.math.inf(f64) },
80 => .{ .f80 = std.math.inf(f80) },
128 => .{ .f128 = std.math.inf(f128) },
else => unreachable,
};
return Value.intern(p.comp, .{ .float = f });
},
.__builtin_isinf => blk: {
if (args.len == 0) break :blk;
const val = p.tree.value_map.get(args[0]) orelse break :blk;
return Value.fromBool(val.isInf(p.comp));
},
.__builtin_isinf_sign => blk: {
if (args.len == 0) break :blk;
const val = p.tree.value_map.get(args[0]) orelse break :blk;
switch (val.isInfSign(p.comp)) {
.unknown => {},
.finite => return Value.zero,
.positive => return Value.one,
.negative => return Value.int(@as(i64, -1), p.comp),
}
},
.__builtin_isnan => blk: {
if (args.len == 0) break :blk;
const val = p.tree.value_map.get(args[0]) orelse break :blk;
return Value.fromBool(val.isNan(p.comp));
},
.__builtin_nan => blk: {
if (args.len == 0) break :blk;
const val = p.getDecayedStringLiteral(args[0]) orelse break :blk;
const bytes = p.comp.interner.get(val.ref()).bytes;
switch (expanded.tag) {
.common => |tag| switch (tag) {
.__builtin_inff,
.__builtin_inf,
.__builtin_infl,
=> {
const qt: QualType = switch (tag) {
.__builtin_inff => .float,
.__builtin_inf => .double,
.__builtin_infl => .long_double,
else => unreachable,
};
const f: Interner.Key.Float = switch (qt.bitSizeof(p.comp)) {
32 => .{ .f32 = std.math.inf(f32) },
64 => .{ .f64 = std.math.inf(f64) },
80 => .{ .f80 = std.math.inf(f80) },
128 => .{ .f128 = std.math.inf(f128) },
else => unreachable,
};
return Value.intern(p.comp, .{ .float = f });
},
.__builtin_isinf => blk: {
if (args.len == 0) break :blk;
const val = p.tree.value_map.get(args[0]) orelse break :blk;
return Value.fromBool(val.isInf(p.comp));
},
.__builtin_isinf_sign => blk: {
if (args.len == 0) break :blk;
const val = p.tree.value_map.get(args[0]) orelse break :blk;
switch (val.isInfSign(p.comp)) {
.unknown => {},
.finite => return Value.zero,
.positive => return Value.one,
.negative => return Value.int(@as(i64, -1), p.comp),
}
},
.__builtin_isnan => blk: {
if (args.len == 0) break :blk;
const val = p.tree.value_map.get(args[0]) orelse break :blk;
return Value.fromBool(val.isNan(p.comp));
},
.__builtin_nan => blk: {
if (args.len == 0) break :blk;
const val = p.getDecayedStringLiteral(args[0]) orelse break :blk;
const bytes = p.comp.interner.get(val.ref()).bytes;
const f: Interner.Key.Float = switch (Type.Float.double.bits(p.comp)) {
32 => .{ .f32 = makeNan(f32, bytes) },
64 => .{ .f64 = makeNan(f64, bytes) },
80 => .{ .f80 = makeNan(f80, bytes) },
128 => .{ .f128 = makeNan(f128, bytes) },
else => unreachable,
};
return Value.intern(p.comp, .{ .float = f });
const f: Interner.Key.Float = switch (Type.Float.double.bits(p.comp)) {
32 => .{ .f32 = makeNan(f32, bytes) },
64 => .{ .f64 = makeNan(f64, bytes) },
80 => .{ .f80 = makeNan(f80, bytes) },
128 => .{ .f128 = makeNan(f128, bytes) },
else => unreachable,
};
return Value.intern(p.comp, .{ .float = f });
},
else => {},
},
else => {},
}

6500
lib/compiler/aro/aro/Builtins/hexagon.zig vendored Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

2110
lib/compiler/aro/aro/Builtins/mips.zig vendored Normal file

File diff suppressed because it is too large Load Diff

3253
lib/compiler/aro/aro/Builtins/nvptx.zig vendored Normal file

File diff suppressed because it is too large Load Diff

2491
lib/compiler/aro/aro/Builtins/powerpc.zig vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,5 @@
const std = @import("std");
const Properties = @This();
param_str: []const u8,
language: Language = .all_languages,
attributes: Attributes = Attributes{},
header: Header = .none,
target_set: TargetSet = TargetSet.initOne(.basic),
/// Header which must be included for a builtin to be available
pub const Header = enum {
none,
@ -41,6 +33,18 @@ pub const Header = enum {
complex,
/// Blocks.h
blocks,
/// intrin.h
intrin,
/// immintrin.h
immintrin,
/// xmmintrin.h
xmmintrin,
/// emmintrin.h
emmintrin,
/// mmintrin.h
mmintrin,
/// arm_acle.h
arm_acle,
};
/// Languages in which a builtin is available
@ -49,6 +53,7 @@ pub const Language = enum {
all_ms_languages,
all_gnu_languages,
gnu_lang,
c23_lang,
};
pub const Attributes = packed struct {
@ -106,38 +111,7 @@ pub const Attributes = packed struct {
const_evaluable: bool = false,
};
pub const Target = enum {
/// Supported on all targets
basic,
aarch64,
aarch64_neon_sve_bridge,
aarch64_neon_sve_bridge_cg,
amdgpu,
arm,
bpf,
hexagon,
hexagon_dep,
hexagon_map_custom_dep,
loong_arch,
mips,
neon,
nvptx,
ppc,
riscv,
riscv_vector,
sve,
systemz,
ve,
vevl_gen,
webassembly,
x86,
x86_64,
xcore,
};
/// Targets for which a builtin is enabled
pub const TargetSet = std.enums.EnumSet(Target);
pub fn isVarArgs(properties: Properties) bool {
return properties.param_str[properties.param_str.len - 1] == '.';
pub fn isVarArgs(param_str: [*:0]const u8) bool {
const slice = std.mem.sliceTo(param_str, 0);
return slice[slice.len - 1] == '.';
}

469
lib/compiler/aro/aro/Builtins/riscv.zig vendored Normal file
View File

@ -0,0 +1,469 @@
//! Autogenerated by GenerateDef from src/aro/Builtins/riscv.def, do not edit
// zig fmt: off
const std = @import("std");
pub fn with(comptime Properties: type) type {
return struct {
/// Integer starting at 0 derived from the unique index,
/// corresponds with the data array index.
pub const Tag = enum(u16) { __builtin_riscv_aes32dsi,
__builtin_riscv_aes32dsmi,
__builtin_riscv_aes32esi,
__builtin_riscv_aes32esmi,
__builtin_riscv_aes64ds,
__builtin_riscv_aes64dsm,
__builtin_riscv_aes64es,
__builtin_riscv_aes64esm,
__builtin_riscv_aes64im,
__builtin_riscv_aes64ks1i,
__builtin_riscv_aes64ks2,
__builtin_riscv_brev8_32,
__builtin_riscv_brev8_64,
__builtin_riscv_clmul_32,
__builtin_riscv_clmul_64,
__builtin_riscv_clmulh_32,
__builtin_riscv_clmulh_64,
__builtin_riscv_clmulr_32,
__builtin_riscv_clmulr_64,
__builtin_riscv_clz_32,
__builtin_riscv_clz_64,
__builtin_riscv_ctz_32,
__builtin_riscv_ctz_64,
__builtin_riscv_cv_alu_addN,
__builtin_riscv_cv_alu_addRN,
__builtin_riscv_cv_alu_adduN,
__builtin_riscv_cv_alu_adduRN,
__builtin_riscv_cv_alu_clip,
__builtin_riscv_cv_alu_clipu,
__builtin_riscv_cv_alu_extbs,
__builtin_riscv_cv_alu_extbz,
__builtin_riscv_cv_alu_exths,
__builtin_riscv_cv_alu_exthz,
__builtin_riscv_cv_alu_sle,
__builtin_riscv_cv_alu_sleu,
__builtin_riscv_cv_alu_subN,
__builtin_riscv_cv_alu_subRN,
__builtin_riscv_cv_alu_subuN,
__builtin_riscv_cv_alu_subuRN,
__builtin_riscv_ntl_load,
__builtin_riscv_ntl_store,
__builtin_riscv_orc_b_32,
__builtin_riscv_orc_b_64,
__builtin_riscv_pause,
__builtin_riscv_sha256sig0,
__builtin_riscv_sha256sig1,
__builtin_riscv_sha256sum0,
__builtin_riscv_sha256sum1,
__builtin_riscv_sha512sig0,
__builtin_riscv_sha512sig0h,
__builtin_riscv_sha512sig0l,
__builtin_riscv_sha512sig1,
__builtin_riscv_sha512sig1h,
__builtin_riscv_sha512sig1l,
__builtin_riscv_sha512sum0,
__builtin_riscv_sha512sum0r,
__builtin_riscv_sha512sum1,
__builtin_riscv_sha512sum1r,
__builtin_riscv_sm3p0,
__builtin_riscv_sm3p1,
__builtin_riscv_sm4ed,
__builtin_riscv_sm4ks,
__builtin_riscv_unzip_32,
__builtin_riscv_xperm4_32,
__builtin_riscv_xperm4_64,
__builtin_riscv_xperm8_32,
__builtin_riscv_xperm8_64,
__builtin_riscv_zip_32,
};
pub fn fromName(name: []const u8) ?Properties {
const data_index = tagFromName(name) orelse return null;
return data[@intFromEnum(data_index)];
}
pub fn tagFromName(name: []const u8) ?Tag {
const unique_index = uniqueIndex(name) orelse return null;
return @enumFromInt(unique_index - 1);
}
pub fn fromTag(tag: Tag) Properties {
return data[@intFromEnum(tag)];
}
pub fn nameFromTagIntoBuf(tag: Tag, name_buf: []u8) []u8 {
std.debug.assert(name_buf.len >= longest_name);
const unique_index = @intFromEnum(tag) + 1;
return nameFromUniqueIndex(unique_index, name_buf);
}
pub fn nameFromTag(tag: Tag) NameBuf {
var name_buf: NameBuf = undefined;
const unique_index = @intFromEnum(tag) + 1;
const name = nameFromUniqueIndex(unique_index, &name_buf.buf);
name_buf.len = @intCast(name.len);
return name_buf;
}
pub const NameBuf = struct {
buf: [longest_name]u8 = undefined,
len: std.math.IntFittingRange(0, longest_name),
pub fn span(self: *const NameBuf) []const u8 {
return self.buf[0..self.len];
}
};
pub fn exists(name: []const u8) bool {
if (name.len < shortest_name or name.len > longest_name) return false;
var index: u16 = 0;
for (name) |c| {
index = findInList(dafsa[index].child_index, c) orelse return false;
}
return dafsa[index].end_of_word;
}
pub const shortest_name = 21;
pub const longest_name = 29;
/// Search siblings of `first_child_index` for the `char`
/// If found, returns the index of the node within the `dafsa` array.
/// Otherwise, returns `null`.
pub fn findInList(first_child_index: u16, char: u8) ?u16 {
@setEvalBranchQuota(136);
var index = first_child_index;
while (true) {
if (dafsa[index].char == char) return index;
if (dafsa[index].end_of_list) return null;
index += 1;
}
unreachable;
}
/// Returns a unique (minimal perfect hash) index (starting at 1) for the `name`,
/// or null if the name was not found.
pub fn uniqueIndex(name: []const u8) ?u16 {
if (name.len < shortest_name or name.len > longest_name) return null;
var index: u16 = 0;
var node_index: u16 = 0;
for (name) |c| {
const child_index = findInList(dafsa[node_index].child_index, c) orelse return null;
var sibling_index = dafsa[node_index].child_index;
while (true) {
const sibling_c = dafsa[sibling_index].char;
std.debug.assert(sibling_c != 0);
if (sibling_c < c) {
index += dafsa[sibling_index].number;
}
if (dafsa[sibling_index].end_of_list) break;
sibling_index += 1;
}
node_index = child_index;
if (dafsa[node_index].end_of_word) index += 1;
}
if (!dafsa[node_index].end_of_word) return null;
return index;
}
/// Returns a slice of `buf` with the name associated with the given `index`.
/// This function should only be called with an `index` that
/// is already known to exist within the `dafsa`, e.g. an index
/// returned from `uniqueIndex`.
pub fn nameFromUniqueIndex(index: u16, buf: []u8) []u8 {
std.debug.assert(index >= 1 and index <= data.len);
var node_index: u16 = 0;
var count: u16 = index;
var w = std.Io.Writer.fixed(buf);
while (true) {
var sibling_index = dafsa[node_index].child_index;
while (true) {
if (dafsa[sibling_index].number > 0 and dafsa[sibling_index].number < count) {
count -= dafsa[sibling_index].number;
} else {
w.writeByte(dafsa[sibling_index].char) catch unreachable;
node_index = sibling_index;
if (dafsa[node_index].end_of_word) {
count -= 1;
}
break;
}
if (dafsa[sibling_index].end_of_list) break;
sibling_index += 1;
}
if (count == 0) break;
}
return w.buffered();
}
const Node = packed struct {
char: u8,
/// Nodes are numbered with "an integer which gives the number of words that
/// would be accepted by the automaton starting from that state." This numbering
/// allows calculating "a one-to-one correspondence between the integers 1 to L
/// (L is the number of words accepted by the automaton) and the words themselves."
///
/// Essentially, this allows us to have a minimal perfect hashing scheme such that
/// it's possible to store & lookup the properties of each builtin using a separate array.
number: std.math.IntFittingRange(0, data.len),
/// If true, this node is the end of a valid builtin.
/// Note: This does not necessarily mean that this node does not have child nodes.
end_of_word: bool,
/// If true, this node is the end of a sibling list.
/// If false, then (index + 1) will contain the next sibling.
end_of_list: bool,
/// Index of the first child of this node.
child_index: u16,
};
const dafsa = [_]Node{
.{ .char = 0, .end_of_word = false, .end_of_list = true, .number = 0, .child_index = 1 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 68, .child_index = 2 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 68, .child_index = 3 },
.{ .char = 'b', .end_of_word = false, .end_of_list = true, .number = 68, .child_index = 4 },
.{ .char = 'u', .end_of_word = false, .end_of_list = true, .number = 68, .child_index = 5 },
.{ .char = 'i', .end_of_word = false, .end_of_list = true, .number = 68, .child_index = 6 },
.{ .char = 'l', .end_of_word = false, .end_of_list = true, .number = 68, .child_index = 7 },
.{ .char = 't', .end_of_word = false, .end_of_list = true, .number = 68, .child_index = 8 },
.{ .char = 'i', .end_of_word = false, .end_of_list = true, .number = 68, .child_index = 9 },
.{ .char = 'n', .end_of_word = false, .end_of_list = true, .number = 68, .child_index = 10 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 68, .child_index = 11 },
.{ .char = 'r', .end_of_word = false, .end_of_list = true, .number = 68, .child_index = 12 },
.{ .char = 'i', .end_of_word = false, .end_of_list = true, .number = 68, .child_index = 13 },
.{ .char = 's', .end_of_word = false, .end_of_list = true, .number = 68, .child_index = 14 },
.{ .char = 'c', .end_of_word = false, .end_of_list = true, .number = 68, .child_index = 15 },
.{ .char = 'v', .end_of_word = false, .end_of_list = true, .number = 68, .child_index = 16 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 68, .child_index = 17 },
.{ .char = 'a', .end_of_word = false, .end_of_list = false, .number = 11, .child_index = 27 },
.{ .char = 'b', .end_of_word = false, .end_of_list = false, .number = 2, .child_index = 28 },
.{ .char = 'c', .end_of_word = false, .end_of_list = false, .number = 26, .child_index = 29 },
.{ .char = 'n', .end_of_word = false, .end_of_list = false, .number = 2, .child_index = 32 },
.{ .char = 'o', .end_of_word = false, .end_of_list = false, .number = 2, .child_index = 33 },
.{ .char = 'p', .end_of_word = false, .end_of_list = false, .number = 1, .child_index = 34 },
.{ .char = 's', .end_of_word = false, .end_of_list = false, .number = 18, .child_index = 35 },
.{ .char = 'u', .end_of_word = false, .end_of_list = false, .number = 1, .child_index = 37 },
.{ .char = 'x', .end_of_word = false, .end_of_list = false, .number = 4, .child_index = 38 },
.{ .char = 'z', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 39 },
.{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 11, .child_index = 40 },
.{ .char = 'r', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 41 },
.{ .char = 'l', .end_of_word = false, .end_of_list = false, .number = 8, .child_index = 42 },
.{ .char = 't', .end_of_word = false, .end_of_list = false, .number = 2, .child_index = 44 },
.{ .char = 'v', .end_of_word = false, .end_of_list = true, .number = 16, .child_index = 45 },
.{ .char = 't', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 46 },
.{ .char = 'r', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 47 },
.{ .char = 'a', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 48 },
.{ .char = 'h', .end_of_word = false, .end_of_list = false, .number = 14, .child_index = 49 },
.{ .char = 'm', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 50 },
.{ .char = 'n', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 52 },
.{ .char = 'p', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 53 },
.{ .char = 'i', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 54 },
.{ .char = 's', .end_of_word = false, .end_of_list = true, .number = 11, .child_index = 55 },
.{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 57 },
.{ .char = 'm', .end_of_word = false, .end_of_list = false, .number = 6, .child_index = 58 },
.{ .char = 'z', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 59 },
.{ .char = 'z', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 59 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 16, .child_index = 60 },
.{ .char = 'l', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 61 },
.{ .char = 'c', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 62 },
.{ .char = 'u', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 63 },
.{ .char = 'a', .end_of_word = false, .end_of_list = true, .number = 14, .child_index = 64 },
.{ .char = '3', .end_of_word = false, .end_of_list = false, .number = 2, .child_index = 66 },
.{ .char = '4', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 67 },
.{ .char = 'z', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 39 },
.{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 69 },
.{ .char = 'p', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 70 },
.{ .char = '3', .end_of_word = false, .end_of_list = false, .number = 4, .child_index = 71 },
.{ .char = '6', .end_of_word = false, .end_of_list = true, .number = 7, .child_index = 72 },
.{ .char = 'v', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 73 },
.{ .char = 'u', .end_of_word = false, .end_of_list = true, .number = 6, .child_index = 74 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 75 },
.{ .char = 'a', .end_of_word = false, .end_of_list = true, .number = 16, .child_index = 77 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 78 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 80 },
.{ .char = 's', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 81 },
.{ .char = '2', .end_of_word = false, .end_of_list = false, .number = 4, .child_index = 82 },
.{ .char = '5', .end_of_word = false, .end_of_list = true, .number = 10, .child_index = 83 },
.{ .char = 'p', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 84 },
.{ .char = 'e', .end_of_word = false, .end_of_list = false, .number = 1, .child_index = 86 },
.{ .char = 'k', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 87 },
.{ .char = 'r', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 88 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 89 },
.{ .char = '2', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 90 },
.{ .char = '4', .end_of_word = false, .end_of_list = true, .number = 7, .child_index = 92 },
.{ .char = '8', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 59 },
.{ .char = 'l', .end_of_word = false, .end_of_list = true, .number = 6, .child_index = 96 },
.{ .char = '3', .end_of_word = false, .end_of_list = false, .number = 1, .child_index = 99 },
.{ .char = '6', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 100 },
.{ .char = 'l', .end_of_word = false, .end_of_list = true, .number = 16, .child_index = 101 },
.{ .char = 'l', .end_of_word = false, .end_of_list = false, .number = 1, .child_index = 102 },
.{ .char = 's', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 103 },
.{ .char = 'b', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 59 },
.{ .char = 'e', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = '5', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 104 },
.{ .char = '1', .end_of_word = false, .end_of_list = true, .number = 10, .child_index = 105 },
.{ .char = '0', .end_of_word = true, .end_of_list = false, .number = 1, .child_index = 0 },
.{ .char = '1', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = 'd', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = 's', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = 'm', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 106 },
.{ .char = '3', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 99 },
.{ .char = 'd', .end_of_word = false, .end_of_list = false, .number = 2, .child_index = 108 },
.{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 108 },
.{ .char = 'd', .end_of_word = false, .end_of_list = false, .number = 2, .child_index = 109 },
.{ .char = 'e', .end_of_word = false, .end_of_list = false, .number = 2, .child_index = 109 },
.{ .char = 'i', .end_of_word = false, .end_of_list = false, .number = 1, .child_index = 110 },
.{ .char = 'k', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 111 },
.{ .char = '_', .end_of_word = false, .end_of_list = false, .number = 2, .child_index = 75 },
.{ .char = 'h', .end_of_word = false, .end_of_list = false, .number = 2, .child_index = 59 },
.{ .char = 'r', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 59 },
.{ .char = '2', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = '4', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = 'u', .end_of_word = false, .end_of_list = true, .number = 16, .child_index = 112 },
.{ .char = 'o', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 113 },
.{ .char = 't', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 114 },
.{ .char = '6', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 115 },
.{ .char = '2', .end_of_word = false, .end_of_list = true, .number = 10, .child_index = 116 },
.{ .char = '4', .end_of_word = false, .end_of_list = false, .number = 2, .child_index = 59 },
.{ .char = '8', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 59 },
.{ .char = 's', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 117 },
.{ .char = 's', .end_of_word = true, .end_of_list = true, .number = 2, .child_index = 119 },
.{ .char = 'm', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = 's', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 120 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 16, .child_index = 122 },
.{ .char = 'a', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 86 },
.{ .char = 'o', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 126 },
.{ .char = 's', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 127 },
.{ .char = 's', .end_of_word = false, .end_of_list = true, .number = 10, .child_index = 129 },
.{ .char = 'i', .end_of_word = true, .end_of_list = false, .number = 1, .child_index = 0 },
.{ .char = 'm', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 131 },
.{ .char = 'm', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = '1', .end_of_word = false, .end_of_list = false, .number = 1, .child_index = 131 },
.{ .char = '2', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = 'a', .end_of_word = false, .end_of_list = false, .number = 4, .child_index = 132 },
.{ .char = 'c', .end_of_word = false, .end_of_list = false, .number = 2, .child_index = 133 },
.{ .char = 'e', .end_of_word = false, .end_of_list = false, .number = 4, .child_index = 134 },
.{ .char = 's', .end_of_word = false, .end_of_list = true, .number = 6, .child_index = 135 },
.{ .char = 'r', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 81 },
.{ .char = 'i', .end_of_word = false, .end_of_list = false, .number = 2, .child_index = 137 },
.{ .char = 'u', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 138 },
.{ .char = 'i', .end_of_word = false, .end_of_list = false, .number = 6, .child_index = 139 },
.{ .char = 'u', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 140 },
.{ .char = 'i', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = 'd', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 141 },
.{ .char = 'l', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 142 },
.{ .char = 'x', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 143 },
.{ .char = 'l', .end_of_word = false, .end_of_list = false, .number = 2, .child_index = 144 },
.{ .char = 'u', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 145 },
.{ .char = 'g', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 84 },
.{ .char = 'm', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 84 },
.{ .char = 'g', .end_of_word = false, .end_of_list = true, .number = 6, .child_index = 146 },
.{ .char = 'm', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 148 },
.{ .char = 'd', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 150 },
.{ .char = 'i', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 153 },
.{ .char = 't', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 154 },
.{ .char = 'e', .end_of_word = true, .end_of_list = true, .number = 2, .child_index = 156 },
.{ .char = 'b', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 150 },
.{ .char = '0', .end_of_word = true, .end_of_list = false, .number = 3, .child_index = 157 },
.{ .char = '1', .end_of_word = true, .end_of_list = true, .number = 3, .child_index = 157 },
.{ .char = '0', .end_of_word = true, .end_of_list = false, .number = 2, .child_index = 159 },
.{ .char = '1', .end_of_word = true, .end_of_list = true, .number = 2, .child_index = 159 },
.{ .char = 'N', .end_of_word = true, .end_of_list = false, .number = 1, .child_index = 0 },
.{ .char = 'R', .end_of_word = false, .end_of_list = false, .number = 1, .child_index = 160 },
.{ .char = 'u', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 161 },
.{ .char = 'p', .end_of_word = true, .end_of_list = true, .number = 2, .child_index = 156 },
.{ .char = 'b', .end_of_word = false, .end_of_list = false, .number = 2, .child_index = 163 },
.{ .char = 'h', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 163 },
.{ .char = 'u', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = 'h', .end_of_word = true, .end_of_list = false, .number = 1, .child_index = 0 },
.{ .char = 'l', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = 'r', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = 'N', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = 'N', .end_of_word = true, .end_of_list = false, .number = 1, .child_index = 0 },
.{ .char = 'R', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 160 },
.{ .char = 's', .end_of_word = true, .end_of_list = false, .number = 1, .child_index = 0 },
.{ .char = 'z', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
};
pub const data = blk: {
@setEvalBranchQuota(612);
break :blk [_]Properties{
.{ .param_str = "UiUiUiIUi", .features = "zknd,32bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUiIUi", .features = "zknd,32bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUiIUi", .features = "zkne,32bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUiIUi", .features = "zkne,32bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWiUWi", .features = "zknd,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWiUWi", .features = "zknd,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWiUWi", .features = "zkne,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWiUWi", .features = "zkne,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWi", .features = "zknd,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWiIUi", .features = "zknd|zkne,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWiUWi", .features = "zknd|zkne,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUi", .features = "zbkb", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWi", .features = "zbkb,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUi", .features = "zbc|zbkc", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWiUWi", .features = "zbc|zbkc,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUi", .features = "zbc|zbkc,32bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWiUWi", .features = "zbc|zbkc,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUi", .features = "zbc,32bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWiUWi", .features = "zbc,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUi", .features = "zbb|xtheadbb", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUWi", .features = "zbb|xtheadbb,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUi", .features = "zbb", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUWi", .features = "zbb,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "iiiUi", .features = "xcvalu", .attributes = .{ .@"const" = true } },
.{ .param_str = "iiiUi", .features = "xcvalu", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUiUi", .features = "xcvalu", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUiUi", .features = "xcvalu", .attributes = .{ .@"const" = true } },
.{ .param_str = "iii", .features = "xcvalu", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUi", .features = "xcvalu", .attributes = .{ .@"const" = true } },
.{ .param_str = "ii", .features = "xcvalu", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUi", .features = "xcvalu", .attributes = .{ .@"const" = true } },
.{ .param_str = "ii", .features = "xcvalu", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUi", .features = "xcvalu", .attributes = .{ .@"const" = true } },
.{ .param_str = "iii", .features = "xcvalu", .attributes = .{ .@"const" = true } },
.{ .param_str = "iUiUi", .features = "xcvalu", .attributes = .{ .@"const" = true } },
.{ .param_str = "iiiUi", .features = "xcvalu", .attributes = .{ .@"const" = true } },
.{ .param_str = "iiiUi", .features = "xcvalu", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUiUi", .features = "xcvalu", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUiUi", .features = "xcvalu", .attributes = .{ .@"const" = true } },
.{ .param_str = "v.", .features = "zihintntl", .attributes = .{ .custom_typecheck = true } },
.{ .param_str = "v.", .features = "zihintntl", .attributes = .{ .custom_typecheck = true } },
.{ .param_str = "UiUi", .features = "zbb", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWi", .features = "zbb,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "v", .features = "zihintpause" },
.{ .param_str = "UiUi", .features = "zknh", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUi", .features = "zknh", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUi", .features = "zknh", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUi", .features = "zknh", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWi", .features = "zknh,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUi", .features = "zknh,32bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUi", .features = "zknh,32bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWi", .features = "zknh,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUi", .features = "zknh,32bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUi", .features = "zknh,32bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWi", .features = "zknh,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUi", .features = "zknh,32bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWi", .features = "zknh,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUi", .features = "zknh,32bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUi", .features = "zksh", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUi", .features = "zksh", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUiIUi", .features = "zksed", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUiIUi", .features = "zksed", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUi", .features = "zbkb,32bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUi", .features = "zbkx,32bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWiUWi", .features = "zbkx,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUiUi", .features = "zbkx,32bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UWiUWiUWi", .features = "zbkx,64bit", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUi", .features = "zbkb,32bit", .attributes = .{ .@"const" = true } },
};
};
};
}

1068
lib/compiler/aro/aro/Builtins/s390x.zig vendored Normal file

File diff suppressed because it is too large Load Diff

3370
lib/compiler/aro/aro/Builtins/ve.zig vendored Normal file

File diff suppressed because it is too large Load Diff

6379
lib/compiler/aro/aro/Builtins/x86.zig vendored Normal file

File diff suppressed because it is too large Load Diff

1122
lib/compiler/aro/aro/Builtins/x86_64.zig vendored Normal file

File diff suppressed because it is too large Load Diff

204
lib/compiler/aro/aro/Builtins/xcore.zig vendored Normal file
View File

@ -0,0 +1,204 @@
//! Autogenerated by GenerateDef from src/aro/Builtins/xcore.def, do not edit
// zig fmt: off
const std = @import("std");
pub fn with(comptime Properties: type) type {
return struct {
/// Integer starting at 0 derived from the unique index,
/// corresponds with the data array index.
pub const Tag = enum(u16) { __builtin_bitrev,
__builtin_getid,
__builtin_getps,
__builtin_setps,
};
pub fn fromName(name: []const u8) ?Properties {
const data_index = tagFromName(name) orelse return null;
return data[@intFromEnum(data_index)];
}
pub fn tagFromName(name: []const u8) ?Tag {
const unique_index = uniqueIndex(name) orelse return null;
return @enumFromInt(unique_index - 1);
}
pub fn fromTag(tag: Tag) Properties {
return data[@intFromEnum(tag)];
}
pub fn nameFromTagIntoBuf(tag: Tag, name_buf: []u8) []u8 {
std.debug.assert(name_buf.len >= longest_name);
const unique_index = @intFromEnum(tag) + 1;
return nameFromUniqueIndex(unique_index, name_buf);
}
pub fn nameFromTag(tag: Tag) NameBuf {
var name_buf: NameBuf = undefined;
const unique_index = @intFromEnum(tag) + 1;
const name = nameFromUniqueIndex(unique_index, &name_buf.buf);
name_buf.len = @intCast(name.len);
return name_buf;
}
pub const NameBuf = struct {
buf: [longest_name]u8 = undefined,
len: std.math.IntFittingRange(0, longest_name),
pub fn span(self: *const NameBuf) []const u8 {
return self.buf[0..self.len];
}
};
pub fn exists(name: []const u8) bool {
if (name.len < shortest_name or name.len > longest_name) return false;
var index: u16 = 0;
for (name) |c| {
index = findInList(dafsa[index].child_index, c) orelse return false;
}
return dafsa[index].end_of_word;
}
pub const shortest_name = 15;
pub const longest_name = 16;
/// Search siblings of `first_child_index` for the `char`
/// If found, returns the index of the node within the `dafsa` array.
/// Otherwise, returns `null`.
pub fn findInList(first_child_index: u16, char: u8) ?u16 {
@setEvalBranchQuota(8);
var index = first_child_index;
while (true) {
if (dafsa[index].char == char) return index;
if (dafsa[index].end_of_list) return null;
index += 1;
}
unreachable;
}
/// Returns a unique (minimal perfect hash) index (starting at 1) for the `name`,
/// or null if the name was not found.
pub fn uniqueIndex(name: []const u8) ?u16 {
if (name.len < shortest_name or name.len > longest_name) return null;
var index: u16 = 0;
var node_index: u16 = 0;
for (name) |c| {
const child_index = findInList(dafsa[node_index].child_index, c) orelse return null;
var sibling_index = dafsa[node_index].child_index;
while (true) {
const sibling_c = dafsa[sibling_index].char;
std.debug.assert(sibling_c != 0);
if (sibling_c < c) {
index += dafsa[sibling_index].number;
}
if (dafsa[sibling_index].end_of_list) break;
sibling_index += 1;
}
node_index = child_index;
if (dafsa[node_index].end_of_word) index += 1;
}
if (!dafsa[node_index].end_of_word) return null;
return index;
}
/// Returns a slice of `buf` with the name associated with the given `index`.
/// This function should only be called with an `index` that
/// is already known to exist within the `dafsa`, e.g. an index
/// returned from `uniqueIndex`.
pub fn nameFromUniqueIndex(index: u16, buf: []u8) []u8 {
std.debug.assert(index >= 1 and index <= data.len);
var node_index: u16 = 0;
var count: u16 = index;
var w = std.Io.Writer.fixed(buf);
while (true) {
var sibling_index = dafsa[node_index].child_index;
while (true) {
if (dafsa[sibling_index].number > 0 and dafsa[sibling_index].number < count) {
count -= dafsa[sibling_index].number;
} else {
w.writeByte(dafsa[sibling_index].char) catch unreachable;
node_index = sibling_index;
if (dafsa[node_index].end_of_word) {
count -= 1;
}
break;
}
if (dafsa[sibling_index].end_of_list) break;
sibling_index += 1;
}
if (count == 0) break;
}
return w.buffered();
}
const Node = packed struct {
char: u8,
/// Nodes are numbered with "an integer which gives the number of words that
/// would be accepted by the automaton starting from that state." This numbering
/// allows calculating "a one-to-one correspondence between the integers 1 to L
/// (L is the number of words accepted by the automaton) and the words themselves."
///
/// Essentially, this allows us to have a minimal perfect hashing scheme such that
/// it's possible to store & lookup the properties of each builtin using a separate array.
number: std.math.IntFittingRange(0, data.len),
/// If true, this node is the end of a valid builtin.
/// Note: This does not necessarily mean that this node does not have child nodes.
end_of_word: bool,
/// If true, this node is the end of a sibling list.
/// If false, then (index + 1) will contain the next sibling.
end_of_list: bool,
/// Index of the first child of this node.
child_index: u16,
};
const dafsa = [_]Node{
.{ .char = 0, .end_of_word = false, .end_of_list = true, .number = 0, .child_index = 1 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 2 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 3 },
.{ .char = 'b', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 4 },
.{ .char = 'u', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 5 },
.{ .char = 'i', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 6 },
.{ .char = 'l', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 7 },
.{ .char = 't', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 8 },
.{ .char = 'i', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 9 },
.{ .char = 'n', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 10 },
.{ .char = '_', .end_of_word = false, .end_of_list = true, .number = 4, .child_index = 11 },
.{ .char = 'b', .end_of_word = false, .end_of_list = false, .number = 1, .child_index = 14 },
.{ .char = 'g', .end_of_word = false, .end_of_list = false, .number = 2, .child_index = 15 },
.{ .char = 's', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 16 },
.{ .char = 'i', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 17 },
.{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 18 },
.{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 19 },
.{ .char = 't', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 20 },
.{ .char = 't', .end_of_word = false, .end_of_list = true, .number = 2, .child_index = 21 },
.{ .char = 't', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 23 },
.{ .char = 'r', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 24 },
.{ .char = 'i', .end_of_word = false, .end_of_list = false, .number = 1, .child_index = 25 },
.{ .char = 'p', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 26 },
.{ .char = 'p', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 26 },
.{ .char = 'e', .end_of_word = false, .end_of_list = true, .number = 1, .child_index = 27 },
.{ .char = 'd', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = 's', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
.{ .char = 'v', .end_of_word = true, .end_of_list = true, .number = 1, .child_index = 0 },
};
pub const data = blk: {
@setEvalBranchQuota(36);
break :blk [_]Properties{
.{ .param_str = "UiUi", .attributes = .{ .@"const" = true } },
.{ .param_str = "Si", .attributes = .{ .@"const" = true } },
.{ .param_str = "UiUi" },
.{ .param_str = "vUiUi" },
};
};
};
}

View File

@ -2,13 +2,12 @@ const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const backend = @import("../backend.zig");
const backend = @import("backend");
const Interner = backend.Interner;
const Ir = backend.Ir;
const Builder = Ir.Builder;
const Builtins = @import("Builtins.zig");
const Builtin = Builtins.Builtin;
const Compilation = @import("Compilation.zig");
const StringId = @import("StringInterner.zig").StringId;
const Tree = @import("Tree.zig");
@ -111,6 +110,9 @@ pub fn genIr(tree: *const Tree) Compilation.Error!Ir {
error.FatalError => return error.FatalError,
error.OutOfMemory => return error.OutOfMemory,
},
.global_asm => {
return c.fail("TODO global assembly", .{});
},
else => unreachable,
}
}
@ -497,6 +499,7 @@ fn genExpr(c: *CodeGen, node_index: Node.Index) Error!Ir.Ref {
.goto_stmt,
.computed_goto_stmt,
.nullptr_literal,
.asm_stmt,
=> return c.fail("TODO CodeGen.genStmt {s}\n", .{@tagName(node)}),
.comma_expr => |bin| {
_ = try c.genExpr(bin.lhs);
@ -857,7 +860,7 @@ fn genExpr(c: *CodeGen, node_index: Node.Index) Error!Ir.Ref {
},
.builtin_call_expr => |call| {
const name = c.tree.tokSlice(call.builtin_tok);
const builtin = c.comp.builtins.lookup(name).builtin;
const builtin = c.comp.builtins.lookup(name);
return c.genBuiltinCall(builtin, call.args, call.qt);
},
.addr_of_label,
@ -1074,10 +1077,10 @@ fn genBoolExpr(c: *CodeGen, base: Node.Index, true_label: Ir.Ref, false_label: I
try c.addBranch(cmp, true_label, false_label);
}
fn genBuiltinCall(c: *CodeGen, builtin: Builtin, arg_nodes: []const Node.Index, qt: QualType) Error!Ir.Ref {
fn genBuiltinCall(c: *CodeGen, builtin: Builtins.Expanded, arg_nodes: []const Node.Index, qt: QualType) Error!Ir.Ref {
_ = arg_nodes;
_ = qt;
return c.fail("TODO CodeGen.genBuiltinCall {s}\n", .{Builtin.nameFromTag(builtin.tag).span()});
return c.fail("TODO CodeGen.genBuiltinCall {t}\n", .{builtin.tag});
}
fn genCall(c: *CodeGen, call: Node.Call) Error!Ir.Ref {

File diff suppressed because it is too large Load Diff

View File

@ -195,7 +195,9 @@ pub const Option = enum {
@"out-of-scope-function",
@"date-time",
@"variadic-macro-arguments-omitted",
@"attribute-todo",
@"pragma-once-outside-header",
@"underlying-atomic-qualifier-ignored",
@"underlying-cv-qualifier-ignored",
/// GNU extensions
pub const gnu = [_]Option{

View File

@ -15,7 +15,7 @@ const GCCVersion = @import("Driver/GCCVersion.zig");
const LangOpts = @import("LangOpts.zig");
const Preprocessor = @import("Preprocessor.zig");
const Source = @import("Source.zig");
const target_util = @import("target.zig");
const Target = @import("Target.zig");
const Toolchain = @import("Toolchain.zig");
const Tree = @import("Tree.zig");
@ -46,6 +46,10 @@ comp: *Compilation,
diagnostics: *Diagnostics,
inputs: std.ArrayList(Source) = .empty,
imacros: std.ArrayList(Source) = .empty,
implicit_includes: std.ArrayList(Source) = .empty,
/// List of includes that will be used to construct the compilation's search path
includes: std.ArrayList(Compilation.Include) = .empty,
link_objects: std.ArrayList([]const u8) = .empty,
output_name: ?[]const u8 = null,
sysroot: ?[]const u8 = null,
@ -64,6 +68,7 @@ verbose_ast: bool = false,
verbose_pp: bool = false,
verbose_ir: bool = false,
verbose_linker_args: bool = false,
verbose_search_path: bool = false,
nobuiltininc: bool = false,
nostdinc: bool = false,
nostdlibinc: bool = false,
@ -99,6 +104,8 @@ aro_name: []const u8 = "",
/// Value of -target passed via CLI
raw_target_triple: ?[]const u8 = null,
/// Value of -darwin-target-variant-triple passed via CLI
raw_darwin_variant_target_triple: ?[]const u8 = null,
/// Value of -mcpu passed via CLI
raw_cpu: ?[]const u8 = null,
@ -107,6 +114,7 @@ raw_cpu: ?[]const u8 = null,
use_assembly_backend: bool = false,
// linker options
use_linker: ?[]const u8 = null,
linker_path: ?[]const u8 = null,
nodefaultlibs: bool = false,
nolibc: bool = false,
@ -130,6 +138,9 @@ pub fn deinit(d: *Driver) void {
d.comp.gpa.free(obj);
}
d.inputs.deinit(d.comp.gpa);
d.imacros.deinit(d.comp.gpa);
d.implicit_includes.deinit(d.comp.gpa);
d.includes.deinit(d.comp.gpa);
d.link_objects.deinit(d.comp.gpa);
d.* = undefined;
}
@ -161,6 +172,8 @@ pub const usage =
\\
\\Compile options:
\\ -c, --compile Only run preprocess, compile, and assemble steps
\\ -darwin-target-variant-triple
\\ Specify the darwin target variant triple
\\ -fapple-kext Use Apple's kernel extensions ABI
\\ -fchar8_t Enable char8_t (enabled by default in C23 and later)
\\ -fno-char8_t Disable char8_t (disabled by default for pre-C23)
@ -212,6 +225,8 @@ pub const usage =
\\ --embed-dir=<dir> Add directory to `#embed` search path
\\ --emulate=[clang|gcc|msvc]
\\ Select which C compiler to emulate (default clang)
\\ -imacros <file> Include macros from <file> before parsing
\\ -include <file> Process <file> as if `#include "file"` appeared as the first line of the primary source file.
\\ -mabicalls Enable SVR4-style position-independent code (Mips only)
\\ -mno-abicalls Disable SVR4-style position-independent code (Mips only)
\\ -mcmodel=<code-model> Generate code for the given code model
@ -273,7 +288,8 @@ pub fn parseArgs(
macro_buf: *std.ArrayList(u8),
args: []const []const u8,
) (Compilation.Error || std.Io.Writer.Error)!bool {
const io = d.comp.io;
const gpa = d.comp.gpa;
var i: usize = 1;
var comment_arg: []const u8 = "";
var hosted: ?bool = null;
@ -310,7 +326,7 @@ pub fn parseArgs(
value = macro[some + 1 ..];
macro = macro[0..some];
}
try macro_buf.print(d.comp.gpa, "#define {s} {s}\n", .{ macro, value });
try macro_buf.print(gpa, "#define {s} {s}\n", .{ macro, value });
} else if (mem.startsWith(u8, arg, "-U")) {
var macro = arg["-U".len..];
if (macro.len == 0) {
@ -321,7 +337,7 @@ pub fn parseArgs(
}
macro = args[i];
}
try macro_buf.print(d.comp.gpa, "#undef {s}\n", .{macro});
try macro_buf.print(gpa, "#undef {s}\n", .{macro});
} else if (mem.eql(u8, arg, "-O")) {
d.comp.code_gen_options.optimization_level = .@"1";
} else if (mem.startsWith(u8, arg, "-O")) {
@ -334,6 +350,13 @@ pub fn parseArgs(
d.system_defines = .no_system_defines;
} else if (mem.eql(u8, arg, "-c") or mem.eql(u8, arg, "--compile")) {
d.only_compile = true;
} else if (mem.eql(u8, arg, "-darwin-target-variant-triple")) {
i += 1;
if (i >= args.len) {
try d.err("expected argument after -darwin-target-variant-triple", .{});
continue;
}
d.raw_darwin_variant_target_triple = args[i];
} else if (mem.eql(u8, arg, "-dD")) {
d.debug_dump_letters.d = true;
} else if (mem.eql(u8, arg, "-dM")) {
@ -508,7 +531,7 @@ pub fn parseArgs(
}
path = args[i];
}
try d.comp.include_dirs.append(d.comp.gpa, path);
try d.includes.append(gpa, .{ .kind = .normal, .path = path });
} else if (mem.startsWith(u8, arg, "-idirafter")) {
var path = arg["-idirafter".len..];
if (path.len == 0) {
@ -519,7 +542,7 @@ pub fn parseArgs(
}
path = args[i];
}
try d.comp.after_include_dirs.append(d.comp.gpa, path);
try d.includes.append(gpa, .{ .kind = .after, .path = path });
} else if (mem.startsWith(u8, arg, "-isystem")) {
var path = arg["-isystem".len..];
if (path.len == 0) {
@ -530,7 +553,7 @@ pub fn parseArgs(
}
path = args[i];
}
try d.comp.system_include_dirs.append(d.comp.gpa, path);
try d.includes.append(gpa, .{ .kind = .system, .path = path });
} else if (mem.startsWith(u8, arg, "-iquote")) {
var path = arg["-iquote".len..];
if (path.len == 0) {
@ -541,7 +564,7 @@ pub fn parseArgs(
}
path = args[i];
}
try d.comp.iquote_include_dirs.append(d.comp.gpa, path);
try d.includes.append(gpa, .{ .kind = .quote, .path = path });
} else if (mem.startsWith(u8, arg, "-F")) {
var path = arg["-F".len..];
if (path.len == 0) {
@ -552,7 +575,7 @@ pub fn parseArgs(
}
path = args[i];
}
try d.comp.framework_dirs.append(d.comp.gpa, path);
try d.includes.append(gpa, .{ .kind = .framework, .path = path });
} else if (mem.startsWith(u8, arg, "-iframework")) {
var path = arg["-iframework".len..];
if (path.len == 0) {
@ -563,9 +586,27 @@ pub fn parseArgs(
}
path = args[i];
}
try d.comp.system_framework_dirs.append(d.comp.gpa, path);
try d.includes.append(gpa, .{ .kind = .system_framework, .path = path });
} else if (option(arg, "-include") orelse option(arg, "--include")) |implicit_include| {
try d.addImplicitInclude(implicit_include);
} else if (mem.eql(u8, arg, "-include") or mem.eql(u8, arg, "--include")) {
i += 1;
if (i >= args.len) {
try d.err("expected argument after {s}", .{arg});
continue;
}
try d.addImplicitInclude(args[i]);
} else if (option(arg, "-imacros") orelse option(arg, "--imacros")) |imacro_path| {
try d.addImacros(imacro_path);
} else if (mem.eql(u8, arg, "-imacros") or mem.eql(u8, arg, "--imacros")) {
i += 1;
if (i >= args.len) {
try d.err("expected argument after {s}", .{arg});
continue;
}
try d.addImacros(args[i]);
} else if (option(arg, "--embed-dir=")) |path| {
try d.comp.embed_dirs.append(d.comp.gpa, path);
try d.comp.embed_dirs.append(gpa, path);
} else if (option(arg, "--emulate=")) |compiler_str| {
const compiler = std.meta.stringToEnum(LangOpts.Compiler, compiler_str) orelse {
try d.err("invalid compiler '{s}'", .{arg});
@ -592,6 +633,9 @@ pub fn parseArgs(
d.output_name = file;
} else if (option(arg, "--sysroot=")) |sysroot| {
d.sysroot = sysroot;
} else if (mem.eql(u8, arg, "-Wp,-v")) {
// TODO this is not how this argument should work
d.verbose_search_path = true;
} else if (mem.eql(u8, arg, "-pedantic")) {
d.diagnostics.state.extensions = .warning;
} else if (mem.eql(u8, arg, "-pedantic-errors")) {
@ -744,41 +788,22 @@ pub fn parseArgs(
try d.warn("unknown argument '{s}'", .{arg});
}
} else if (std.mem.endsWith(u8, arg, ".o") or std.mem.endsWith(u8, arg, ".obj")) {
try d.link_objects.append(d.comp.gpa, arg);
try d.link_objects.append(gpa, arg);
} else {
const source = d.addSource(arg) catch |er| {
return d.fatal("unable to add source file '{s}': {s}", .{ arg, errorDescription(er) });
};
try d.inputs.append(d.comp.gpa, source);
try d.inputs.append(gpa, source);
}
}
{
var diags: std.Target.Query.ParseOptions.Diagnostics = .{};
const opts: std.Target.Query.ParseOptions = .{
.arch_os_abi = d.raw_target_triple orelse "native",
.cpu_features = d.raw_cpu,
.diagnostics = &diags,
};
const query = std.Target.Query.parse(opts) catch |er| switch (er) {
error.UnknownCpuModel => {
return d.fatal("unknown CPU: '{s}'", .{diags.cpu_name.?});
},
error.UnknownCpuFeature => {
return d.fatal("unknown CPU feature: '{s}'", .{diags.unknown_feature_name.?});
},
error.UnknownArchitecture => {
return d.fatal("unknown architecture: '{s}'", .{diags.unknown_architecture_name.?});
},
else => |e| return d.fatal("unable to parse target query '{s}': {s}", .{
opts.arch_os_abi, @errorName(e),
}),
};
d.comp.target = std.zig.system.resolveTargetQuery(io, query) catch |e| {
return d.fatal("unable to resolve target: {s}", .{errorDescription(e)});
};
d.comp.target = try d.parseTarget(d.raw_target_triple orelse "native", d.raw_cpu);
if (d.raw_darwin_variant_target_triple) |darwin_triple| {
d.comp.darwin_target_variant = try d.parseTarget(darwin_triple, null);
}
}
if (emulate != null or d.raw_target_triple != null) {
d.comp.langopts.setEmulatedCompiler(emulate orelse target_util.systemCompiler(d.comp.target));
d.comp.langopts.setEmulatedCompiler(emulate orelse d.comp.target.systemCompiler());
switch (d.comp.langopts.emulate) {
.clang => try d.diagnostics.set("clang", .off),
.gcc => try d.diagnostics.set("gnu", .off),
@ -839,6 +864,23 @@ fn addSource(d: *Driver, path: []const u8) !Source {
return d.comp.addSourceFromPath(path);
}
fn findIncludeCLI(d: *Driver, path: []const u8, kind: []const u8) !Source {
const source = (d.comp.findInclude(path, .{ .id = .keyword_include, .source = .generated }, .cli, .first) catch |er|
return d.fatal("unable to add {s} file '{s}': {s}", .{ kind, path, errorDescription(er) })) orelse
return d.fatal("unable to add {s} file '{s}': NotFound", .{ kind, path });
return source;
}
fn addImplicitInclude(d: *Driver, path: []const u8) !void {
const source = try d.findIncludeCLI(path, "implicit include");
try d.implicit_includes.append(d.comp.gpa, source);
}
fn addImacros(d: *Driver, path: []const u8) !void {
const source = try d.findIncludeCLI(path, "imacros");
try d.imacros.append(d.comp.gpa, source);
}
pub fn err(d: *Driver, fmt: []const u8, args: anytype) Compilation.Error!void {
var sf = std.heap.stackFallback(1024, d.comp.gpa);
var allocating: std.Io.Writer.Allocating = .init(sf.get());
@ -857,13 +899,142 @@ pub fn warn(d: *Driver, fmt: []const u8, args: anytype) Compilation.Error!void {
try d.diagnostics.add(.{ .kind = .warning, .text = allocating.written(), .location = null });
}
pub fn unsupportedOptionForTarget(d: *Driver, target: std.Target, opt: []const u8) Compilation.Error!void {
fn unsupportedOptionForTarget(d: *Driver, target: *const Target, opt: []const u8) Compilation.Error!void {
try d.err(
"unsupported option '{s}' for target '{s}-{s}-{s}'",
.{ opt, @tagName(target.cpu.arch), @tagName(target.os.tag), @tagName(target.abi) },
);
}
fn parseTarget(d: *Driver, arch_os_abi: []const u8, opt_cpu_features: ?[]const u8) Compilation.Error!Target {
var query: std.Target.Query = .{
.dynamic_linker = .init(null),
};
var vendor: Target.Vendor = .unknown;
var opt_sub_arch: ?Target.SubArch = null;
var it = mem.splitScalar(u8, arch_os_abi, '-');
const arch_name = it.first();
const arch_is_native = mem.eql(u8, arch_name, "native");
if (!arch_is_native) {
query.cpu_arch, opt_sub_arch = Target.parseArchName(arch_name) orelse {
return d.fatal("unknown architecture: '{s}'", .{arch_name});
};
}
const arch = query.cpu_arch orelse @import("builtin").cpu.arch;
const opt_os_text = blk: {
const opt_os_or_vendor = it.next();
if (opt_os_or_vendor) |os_or_vendor| {
if (Target.parseVendorName(os_or_vendor)) |parsed_vendor| {
vendor = parsed_vendor;
break :blk it.next();
}
}
break :blk opt_os_or_vendor;
};
if (opt_os_text) |os_text| {
var version_str: []const u8 = undefined;
Target.parseOs(&query, os_text, &version_str) catch |er| switch (er) {
error.UnknownOs => return d.fatal("unknown operating system '{s}'", .{os_text}),
error.InvalidOsVersion => return d.fatal("invalid operating system version '{s}'", .{version_str}),
};
}
const opt_abi_text = it.next();
if (opt_abi_text) |abi_text| {
var version_str: []const u8 = undefined;
Target.parseAbi(&query, abi_text, &version_str) catch |er| switch (er) {
error.UnknownAbi => return d.fatal("unknown ABI '{s}'", .{abi_text}),
error.InvalidAbiVersion => return d.fatal("invalid ABI version '{s}'", .{version_str}),
error.InvalidApiVersion => return d.fatal("invalid Android API version '{s}'", .{version_str}),
};
}
if (it.next() != null) {
return d.fatal("unexpected extra field in target: '{s}'", .{arch_os_abi});
}
if (opt_cpu_features) |cpu_features| {
const all_features = arch.allFeaturesList();
var index: usize = 0;
while (index < cpu_features.len and
cpu_features[index] != '+' and
cpu_features[index] != '-')
{
index += 1;
}
const cpu_name = cpu_features[0..index];
const add_set = &query.cpu_features_add;
const sub_set = &query.cpu_features_sub;
if (mem.eql(u8, cpu_name, "native")) {
query.cpu_model = .native;
} else if (mem.eql(u8, cpu_name, "baseline")) {
query.cpu_model = .baseline;
} else {
query.cpu_model = .{ .explicit = arch.parseCpuModel(cpu_name) catch |er| switch (er) {
error.UnknownCpuModel => return d.fatal("unknown CPU model: '{s}'", .{cpu_name}),
} };
}
if (opt_sub_arch) |sub_arch| {
if (sub_arch.toFeature(arch)) |feature| {
add_set.addFeature(feature);
}
}
while (index < cpu_features.len) {
const op = cpu_features[index];
const set = switch (op) {
'+' => add_set,
'-' => sub_set,
else => unreachable,
};
index += 1;
const start = index;
while (index < cpu_features.len and
cpu_features[index] != '+' and
cpu_features[index] != '-')
{
index += 1;
}
const feature_name = cpu_features[start..index];
for (all_features, 0..) |feature, feat_index_usize| {
const feat_index: std.Target.Cpu.Feature.Set.Index = @intCast(feat_index_usize);
if (mem.eql(u8, feature_name, feature.name)) {
set.addFeature(feat_index);
break;
}
} else {
return d.fatal("unknown CPU feature: '{s}'", .{feature_name});
}
}
} else if (opt_sub_arch) |sub_arch| {
if (sub_arch.toFeature(arch)) |feature| {
query.cpu_features_add.addFeature(feature);
}
}
const zig_target = std.zig.system.resolveTargetQuery(d.comp.io, query) catch |e|
return d.fatal("unable to resolve target: {s}", .{errorDescription(e)});
if (query.isNative()) {
if (zig_target.os.tag.isDarwin()) {
vendor = .apple;
}
}
return .{
.cpu = zig_target.cpu,
.vendor = vendor,
.os = zig_target.os,
.abi = zig_target.abi,
.ofmt = zig_target.ofmt,
.dynamic_linker = zig_target.dynamic_linker,
};
}
pub fn fatal(d: *Driver, comptime fmt: []const u8, args: anytype) error{ FatalError, OutOfMemory } {
var sf = std.heap.stackFallback(1024, d.comp.gpa);
var allocating: std.Io.Writer.Allocating = .init(sf.get());
@ -971,8 +1142,9 @@ pub fn main(d: *Driver, tc: *Toolchain, args: []const []const u8, comptime fast_
};
tc.defineSystemIncludes() catch |er| switch (er) {
error.OutOfMemory => return error.OutOfMemory,
error.AroIncludeNotFound => return d.fatal("unable to find Aro builtin headers", .{}),
error.FatalError => return error.FatalError,
};
try d.comp.initSearchPath(d.includes.items, d.verbose_search_path);
const builtin_macros = d.comp.generateBuiltinMacros(d.system_defines) catch |er| switch (er) {
error.FileTooBig => return d.fatal("builtin macro source exceeded max size", .{}),
@ -1110,6 +1282,7 @@ fn processSource(
comptime fast_exit: bool,
asm_gen_fn: ?AsmCodeGenFn,
) !void {
const gpa = d.comp.gpa;
d.comp.generated_buf.items.len = 0;
const prev_total = d.diagnostics.errors;
@ -1118,7 +1291,7 @@ fn processSource(
var name_buf: [std.fs.max_name_bytes]u8 = undefined;
var opt_dep_file = try d.initDepFile(source, &name_buf, false);
defer if (opt_dep_file) |*dep_file| dep_file.deinit(d.comp.gpa);
defer if (opt_dep_file) |*dep_file| dep_file.deinit(gpa);
if (opt_dep_file) |*dep_file| pp.dep_file = dep_file;
@ -1138,7 +1311,13 @@ fn processSource(
}
}
try pp.preprocessSources(&.{ source, builtin, user_macros });
try pp.preprocessSources(.{
.main = source,
.builtin = builtin,
.command_line = user_macros,
.imacros = d.imacros.items,
.implicit_includes = d.implicit_includes.items,
});
var writer_buf: [4096]u8 = undefined;
if (opt_dep_file) |dep_file| {
@ -1219,8 +1398,8 @@ fn processSource(
.{},
);
const assembly = try asm_fn(d.comp.target, &tree);
defer assembly.deinit(d.comp.gpa);
const assembly = try asm_fn(d.comp.target.toZigTarget(), &tree);
defer assembly.deinit(gpa);
if (d.only_preprocess_and_compile) {
const out_file = d.comp.cwd.createFile(out_file_name, .{}) catch |er|
@ -1249,20 +1428,20 @@ fn processSource(
}
} else {
var ir = try tree.genIr();
defer ir.deinit(d.comp.gpa);
defer ir.deinit(gpa);
if (d.verbose_ir) {
var stdout = std.fs.File.stdout().writer(&writer_buf);
ir.dump(d.comp.gpa, d.detectConfig(stdout.file), &stdout.interface) catch {};
ir.dump(gpa, d.detectConfig(stdout.file), &stdout.interface) catch {};
}
var render_errors: Ir.Renderer.ErrorList = .{};
defer {
for (render_errors.values()) |msg| d.comp.gpa.free(msg);
render_errors.deinit(d.comp.gpa);
for (render_errors.values()) |msg| gpa.free(msg);
render_errors.deinit(gpa);
}
var obj = ir.render(d.comp.gpa, d.comp.target, &render_errors) catch |e| switch (e) {
var obj = ir.render(gpa, d.comp.target.toZigTarget(), &render_errors) catch |e| switch (e) {
error.OutOfMemory => return error.OutOfMemory,
error.LowerFail => {
return d.fatal(
@ -1286,8 +1465,8 @@ fn processSource(
if (fast_exit) std.process.exit(0); // Not linking, no need for cleanup.
return;
}
try d.link_objects.ensureUnusedCapacity(d.comp.gpa, 1);
d.link_objects.appendAssumeCapacity(try d.comp.gpa.dupe(u8, out_file_name));
try d.link_objects.ensureUnusedCapacity(gpa, 1);
d.link_objects.appendAssumeCapacity(try gpa.dupe(u8, out_file_name));
d.temp_file_count += 1;
if (fast_exit) {
try d.invokeLinker(tc, fast_exit);
@ -1357,17 +1536,17 @@ fn exitWithCleanup(d: *Driver, code: u8) noreturn {
/// Parses the various -fpic/-fPIC/-fpie/-fPIE arguments.
/// Then, smooshes them together with platform defaults, to decide whether
/// this compile should be using PIC mode or not.
/// Returns a tuple of ( backend.CodeGenOptions.PicLevel, IsPIE).
pub fn getPICMode(d: *Driver, lastpic: []const u8) Compilation.Error!struct { backend.CodeGenOptions.PicLevel, bool } {
const eqlIgnoreCase = std.ascii.eqlIgnoreCase;
const target = d.comp.target;
const is_pie_default = switch (target_util.isPIEDefault(target)) {
const target = &d.comp.target;
const is_pie_default = switch (target.isPIEDefault()) {
.yes => true,
.no => false,
.depends_on_linker => false,
};
const is_pic_default = switch (target_util.isPICdefault(target)) {
const is_pic_default = switch (target.isPICdefault()) {
.yes => true,
.no => false,
.depends_on_linker => false,
@ -1423,7 +1602,7 @@ pub fn getPICMode(d: *Driver, lastpic: []const u8) Compilation.Error!struct { ba
// '-fno-...' arguments, both PIC and PIE are disabled. Any PIE
// option implicitly enables PIC at the same level.
if (target.os.tag == .windows and
!target_util.isCygwinMinGW(target) and
!target.isMinGW() and
(eqlIgnoreCase(lastpic, "-fpic") or eqlIgnoreCase(lastpic, "-fpie"))) // -fpic/-fPIC, -fpie/-fPIE
{
try d.unsupportedOptionForTarget(target, lastpic);
@ -1434,7 +1613,7 @@ pub fn getPICMode(d: *Driver, lastpic: []const u8) Compilation.Error!struct { ba
// Check whether the tool chain trumps the PIC-ness decision. If the PIC-ness
// is forced, then neither PIC nor PIE flags will have no effect.
const forced = switch (target_util.isPICDefaultForced(target)) {
const forced = switch (target.isPICDefaultForced()) {
.yes => true,
.no => false,
.depends_on_linker => false,
@ -1447,7 +1626,7 @@ pub fn getPICMode(d: *Driver, lastpic: []const u8) Compilation.Error!struct { ba
is_piclevel_two = mem.eql(u8, lastpic, "-fPIE") or mem.eql(u8, lastpic, "-fPIC");
} else {
pic, pie = .{ false, false };
if (target_util.isPS(target)) {
if (target.isPS()) {
if (d.comp.cmodel != .kernel) {
pic = true;
try d.warn(
@ -1459,7 +1638,7 @@ pub fn getPICMode(d: *Driver, lastpic: []const u8) Compilation.Error!struct { ba
}
}
if (pic and (target.os.tag.isDarwin() or target_util.isPS(target))) {
if (pic and (target.os.tag.isDarwin() or target.isPS())) {
is_piclevel_two = is_piclevel_two or is_pic_default;
}

View File

@ -2,7 +2,8 @@
const std = @import("std");
const mem = std.mem;
const Filesystem = @import("Filesystem.zig").Filesystem;
const Target = @import("../Target.zig");
const Toolchain = @import("../Toolchain.zig");
const MAX_BYTES = 1024; // TODO: Can we assume 1024 bytes enough for the info we need?
@ -168,9 +169,9 @@ fn scanForOsRelease(buf: []const u8) ?Tag {
return null;
}
fn detectOsRelease(fs: Filesystem) ?Tag {
fn detectOsRelease(tc: *const Toolchain) ?Tag {
var buf: [MAX_BYTES]u8 = undefined;
const data = fs.readFile("/etc/os-release", &buf) orelse fs.readFile("/usr/lib/os-release", &buf) orelse return null;
const data = tc.readFile("/etc/os-release", &buf) orelse tc.readFile("/usr/lib/os-release", &buf) orelse return null;
return scanForOsRelease(data);
}
@ -215,9 +216,9 @@ fn scanForLSBRelease(buf: []const u8) ?Tag {
return null;
}
fn detectLSBRelease(fs: Filesystem) ?Tag {
fn detectLSBRelease(tc: *const Toolchain) ?Tag {
var buf: [MAX_BYTES]u8 = undefined;
const data = fs.readFile("/etc/lsb-release", &buf) orelse return null;
const data = tc.readFile("/etc/lsb-release", &buf) orelse return null;
return scanForLSBRelease(data);
}
@ -233,9 +234,9 @@ fn scanForRedHat(buf: []const u8) Tag {
return .unknown;
}
fn detectRedhat(fs: Filesystem) ?Tag {
fn detectRedhat(tc: *const Toolchain) ?Tag {
var buf: [MAX_BYTES]u8 = undefined;
const data = fs.readFile("/etc/redhat-release", &buf) orelse return null;
const data = tc.readFile("/etc/redhat-release", &buf) orelse return null;
return scanForRedHat(data);
}
@ -269,21 +270,21 @@ fn scanForDebian(buf: []const u8) Tag {
return .unknown;
}
fn detectDebian(fs: Filesystem) ?Tag {
fn detectDebian(tc: *const Toolchain) ?Tag {
var buf: [MAX_BYTES]u8 = undefined;
const data = fs.readFile("/etc/debian_version", &buf) orelse return null;
const data = tc.readFile("/etc/debian_version", &buf) orelse return null;
return scanForDebian(data);
}
pub fn detect(target: std.Target, fs: Filesystem) Tag {
pub fn detect(target: *const Target, tc: *const Toolchain) Tag {
if (target.os.tag != .linux) return .unknown;
if (detectOsRelease(fs)) |tag| return tag;
if (detectLSBRelease(fs)) |tag| return tag;
if (detectRedhat(fs)) |tag| return tag;
if (detectDebian(fs)) |tag| return tag;
if (detectOsRelease(tc)) |tag| return tag;
if (detectLSBRelease(tc)) |tag| return tag;
if (detectRedhat(tc)) |tag| return tag;
if (detectDebian(tc)) |tag| return tag;
if (fs.exists("/etc/gentoo-release")) return .gentoo;
if (tc.exists("/etc/gentoo-release")) return .gentoo;
return .unknown;
}

View File

@ -1,5 +1,5 @@
const std = @import("std");
const Filesystem = @import("Filesystem.zig").Filesystem;
const Toolchain = @import("../Toolchain.zig");
/// Large enough for GCCDetector for Linux; may need to be increased to support other toolchains.
const max_multilibs = 4;
@ -10,10 +10,10 @@ pub const Detected = struct {
selected: Multilib = .{},
biarch_sibling: ?Multilib = null,
pub fn filter(d: *Detected, multilib_filter: Filter, fs: Filesystem) void {
pub fn filter(d: *Detected, multilib_filter: Filter, tc: *const Toolchain) void {
var found_count: u8 = 0;
for (d.multilibs()) |multilib| {
if (multilib_filter.exists(multilib, fs)) {
if (multilib_filter.exists(multilib, tc)) {
d.multilib_buf[found_count] = multilib;
found_count += 1;
}
@ -51,8 +51,8 @@ pub const Detected = struct {
pub const Filter = struct {
base: [2][]const u8,
file: []const u8,
pub fn exists(self: Filter, m: Multilib, fs: Filesystem) bool {
return fs.joinedExists(&.{ self.base[0], self.base[1], m.gcc_suffix, self.file });
pub fn exists(self: Filter, m: Multilib, tc: *const Toolchain) bool {
return tc.joinedExists(&.{ self.base[0], self.base[1], m.gcc_suffix, self.file });
}
};

View File

@ -22,11 +22,12 @@ const Identifier = struct {
byte_offset: u32 = 0,
fn slice(self: Identifier, comp: *const Compilation) []const u8 {
var tmp_tokenizer = Tokenizer{
var tmp_tokenizer: Tokenizer = .{
.buf = comp.getSource(self.id).buf,
.langopts = comp.langopts,
.index = self.byte_offset,
.source = .generated,
.splice_locs = &.{},
};
const res = tmp_tokenizer.next();
return tmp_tokenizer.buf[res.start..res.end];

File diff suppressed because it is too large Load Diff

View File

@ -1348,6 +1348,11 @@ pub const invalid_asm_str: Diagnostic = .{
.kind = .@"error",
};
pub const invalid_asm_output: Diagnostic = .{
.fmt = "invalid lvalue in asm output",
.kind = .@"error",
};
pub const dollar_in_identifier_extension: Diagnostic = .{
.fmt = "'$' in identifier",
.opt = .@"dollar-in-identifier-extension",
@ -1744,6 +1749,7 @@ pub const enum_fixed: Diagnostic = .{
.fmt = "enumeration types with a fixed underlying type are a Clang extension",
.kind = .off,
.opt = .@"fixed-enum-extension",
.suppress_version = .c23,
.extension = true,
};
@ -1767,6 +1773,29 @@ pub const enum_not_representable_fixed: Diagnostic = .{
.kind = .@"error",
};
pub const enum_forward_declaration: Diagnostic = .{
.fmt = "ISO C forbids forward references to 'enum' types",
.kind = .off,
.extension = true,
};
pub const enum_atomic_ignored: Diagnostic = .{
.fmt = "'_Atomic' qualifier ignored; operations involving the enumeration type will be non-atomic",
.kind = .@"error",
.opt = .@"underlying-atomic-qualifier-ignored",
};
pub const enum_qualifiers_ignored: Diagnostic = .{
.fmt = "qualifiers in enumeration underlying type ignored",
.kind = .warning,
.opt = .@"underlying-cv-qualifier-ignored",
};
pub const enum_invalid_underlying_type: Diagnostic = .{
.fmt = "non-integral type {qt} is an invalid underlying type",
.kind = .@"error",
};
pub const transparent_union_wrong_type: Diagnostic = .{
.fmt = "'transparent_union' attribute only applies to unions",
.opt = .@"ignored-attributes",
@ -2184,6 +2213,31 @@ pub const not_floating_type: Diagnostic = .{
.kind = .@"error",
};
pub const elementwise_type: Diagnostic = .{
.fmt = "argument must be a vector{s} (was '{qt}')",
.kind = .@"error",
};
pub const nontemporal_address_pointer: Diagnostic = .{
.fmt = "address argument to nontemporal builtin must be a pointer ('{qt}' invalid)",
.kind = .@"error",
};
pub const nontemporal_address_type: Diagnostic = .{
.fmt = "address argument to nontemporal builtin must be a pointer to integer, float, pointer, or a vector of such types ('{qt}' invalid)",
.kind = .@"error",
};
pub const atomic_address_pointer: Diagnostic = .{
.fmt = "address argument to atomic builtin must be a pointer ('{qt}' invalid)",
.kind = .@"error",
};
pub const atomic_address_type: Diagnostic = .{
.fmt = "address argument to atomic builtin must be a pointer to an integer or a pointer types ('{qt}' invalid)",
.kind = .@"error",
};
pub const argument_types_differ: Diagnostic = .{
.fmt = "arguments are of different types ({qt} vs {qt})",
.kind = .@"error",
@ -2304,12 +2358,6 @@ pub const overflow_result_requires_ptr: Diagnostic = .{
pub const attribute_todo: Diagnostic = .{
.fmt = "TODO: implement '{s}' attribute for {s}",
.kind = .warning,
.opt = .@"attribute-todo",
};
pub const invalid_type_underlying_enum: Diagnostic = .{
.fmt = "non-integral type {qt} is an invalid underlying type",
.kind = .@"error",
};
pub const auto_type_self_initialized: Diagnostic = .{
@ -2422,3 +2470,13 @@ pub const nonnull_not_applicable: Diagnostic = .{
.kind = .warning,
.opt = .@"ignored-attributes",
};
pub const mixing_decimal_floats: Diagnostic = .{
.fmt = "cannot mix operands of decimal floating and other floating types",
.kind = .@"error",
};
pub const invalid_attribute_location: Diagnostic = .{
.fmt = "{s} cannot appear here",
.kind = .@"error",
};

View File

@ -10,6 +10,13 @@ pub const Error = Compilation.Error || error{ UnknownPragma, StopPreprocessing }
const Pragma = @This();
/// A do-nothing pragma; useful for unwrapping an optional pragma into a pragma that does nothing in the null case.
pub const do_nothing: Pragma = .{
.deinit = deinit_nothing,
};
fn deinit_nothing(_: *Pragma, _: *Compilation) void {}
/// Called during Preprocessor.init
beforePreprocess: ?*const fn (*Pragma, *Compilation) void = null,
@ -40,6 +47,12 @@ preserveTokens: ?*const fn (*Pragma, *Preprocessor, start_idx: TokenIndex) bool
/// The parser's `p.tok_i` field must not be changed
parserHandler: ?*const fn (*Pragma, *Parser, start_idx: TokenIndex) Compilation.Error!void = null,
/// Whether to perform preprocessor token expansion on the token at index `i`. 0 is the index of the first
/// token after the name token (the name token is never expanded). Whitespace tokens are always skipped when calculating
/// token indices. For example, in `#pragma GCC warning "A warning"` token 0 is `warning` and token 1 is `"A warning"`
/// By default, all tokens are expanded; use this to override that behavior.
shouldExpandTokenAtIndexHandler: ?*const fn (*const Pragma, i: TokenIndex) bool = null,
pub fn pasteTokens(pp: *Preprocessor, start_idx: TokenIndex) ![]const u8 {
if (pp.tokens.get(start_idx).id == .nl) return error.ExpectedStringLiteral;
@ -84,6 +97,11 @@ pub fn parserCB(self: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation.Er
if (self.parserHandler) |func| return func(self, p, start_idx);
}
pub fn shouldExpandTokenAtIndex(self: *const Pragma, idx: TokenIndex) bool {
if (self.shouldExpandTokenAtIndexHandler) |func| return func(self, idx);
return true;
}
pub const Diagnostic = struct {
fmt: []const u8,
kind: Diagnostics.Message.Kind,

File diff suppressed because it is too large Load Diff

View File

@ -73,6 +73,16 @@ pub const line_invalid_filename: Diagnostic = .{
.kind = .@"error",
};
pub const line_invalid_number: Diagnostic = .{
.fmt = "{s} directive requires a positive integer argument",
.kind = .@"error",
};
pub const line_invalid_flag: Diagnostic = .{
.fmt = "invalid flag '{s}' in line marker directive",
.kind = .@"error",
};
pub const unterminated_conditional_directive: Diagnostic = .{
.fmt = "unterminated conditional directive",
.kind = .@"error",
@ -456,3 +466,9 @@ pub const no_argument_variadic_macro: Diagnostic = .{
.kind = .off,
.extension = true,
};
pub const pragma_once_in_main_file: Diagnostic = .{
.fmt = "#pragma once in main file",
.kind = .warning,
.opt = .@"pragma-once-outside-header",
};

View File

@ -1,9 +1,15 @@
const std = @import("std");
pub const Id = enum(u32) {
unused = 0,
generated = 1,
_,
pub const Id = packed struct(u32) {
index: enum(u31) {
unused = std.math.maxInt(u31) - 0,
generated = std.math.maxInt(u31) - 1,
_,
},
alias: bool = false,
pub const unused: Id = .{ .index = .unused };
pub const generated: Id = .{ .index = .generated };
};
/// Classifies the file for line marker output in -E mode
@ -52,20 +58,6 @@ id: Id,
splice_locs: []const u32,
kind: Kind,
/// Todo: binary search instead of scanning entire `splice_locs`.
pub fn numSplicesBefore(source: Source, byte_offset: u32) u32 {
for (source.splice_locs, 0..) |splice_offset, i| {
if (splice_offset > byte_offset) return @intCast(i);
}
return @intCast(source.splice_locs.len);
}
/// Returns the actual line number (before newline splicing) of a Location
/// This corresponds to what the user would actually see in their text editor
pub fn physicalLine(source: Source, loc: Location) u32 {
return loc.line + source.numSplicesBefore(loc.byte_offset);
}
pub fn lineCol(source: Source, loc: Location) ExpandedLocation {
var start: usize = 0;
// find the start of the line which is either a newline or a splice
@ -117,7 +109,7 @@ pub fn lineCol(source: Source, loc: Location) ExpandedLocation {
return .{
.path = source.path,
.line = source.buf[start..nl],
.line_no = loc.line + splice_index,
.line_no = loc.line,
.col = col,
.width = width,
.end_with_splice = end_with_splice,

1744
lib/compiler/aro/aro/Target.zig vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -144,6 +144,11 @@ pub const Token = struct {
hash,
hash_hash,
/// Special token for handling expansion of parameters to builtin preprocessor functions
macro_param_builtin_func,
/// Special token for implementing builtin object macros
macro_builtin_obj,
/// Special token to speed up preprocessing, `loc.end` will be an index to the param list.
macro_param,
/// Special token to signal that the argument must be replaced without expansion (e.g. in concatenation)
@ -154,40 +159,6 @@ pub const Token = struct {
stringify_va_args,
/// Special macro whitespace, always equal to a single space
macro_ws,
/// Special token for implementing __has_attribute
macro_param_has_attribute,
/// Special token for implementing __has_c_attribute
macro_param_has_c_attribute,
/// Special token for implementing __has_declspec_attribute
macro_param_has_declspec_attribute,
/// Special token for implementing __has_warning
macro_param_has_warning,
/// Special token for implementing __has_feature
macro_param_has_feature,
/// Special token for implementing __has_extension
macro_param_has_extension,
/// Special token for implementing __has_builtin
macro_param_has_builtin,
/// Special token for implementing __has_include
macro_param_has_include,
/// Special token for implementing __has_include_next
macro_param_has_include_next,
/// Special token for implementing __has_embed
macro_param_has_embed,
/// Special token for implementing __is_identifier
macro_param_is_identifier,
/// Special token for implementing __FILE__
macro_file,
/// Special token for implementing __LINE__
macro_line,
/// Special token for implementing __COUNTER__
macro_counter,
/// Special token for implementing _Pragma
macro_param_pragma_operator,
/// Special token for implementing __identifier (MS extension)
macro_param_ms_identifier,
/// Special token for implementing __pragma (MS extension)
macro_param_ms_pragma,
/// Special identifier for implementing __func__
macro_func,
@ -195,12 +166,6 @@ pub const Token = struct {
macro_function,
/// Special identifier for implementing __PRETTY_FUNCTION__
macro_pretty_func,
/// Special identifier for implementing __DATE__
macro_date,
/// Special identifier for implementing __TIME__
macro_time,
/// Special identifier for implementing __TIMESTAMP__
macro_timestamp,
keyword_auto,
keyword_auto_type,
@ -268,6 +233,17 @@ pub const Token = struct {
keyword_false,
keyword_nullptr,
keyword_typeof_unqual,
keyword_float16,
keyword_float32,
keyword_float64,
keyword_float128,
keyword_float32x,
keyword_float64x,
keyword_float128x,
keyword_dfloat32,
keyword_dfloat64,
keyword_dfloat128,
keyword_dfloat64x,
// Preprocessor directives
keyword_include,
@ -307,19 +283,17 @@ pub const Token = struct {
keyword_asm,
keyword_asm1,
keyword_asm2,
/// _Float128
keyword_float128_1,
/// __float128
keyword_float128_2,
keyword_float128_1,
keyword_int128,
keyword_imag1,
keyword_imag2,
keyword_real1,
keyword_real2,
keyword_float16,
// clang keywords
keyword_fp16,
keyword_bf16,
// ms keywords
keyword_declspec,
@ -375,6 +349,9 @@ pub const Token = struct {
/// completion of the preceding #include
include_resume,
/// Virtual linemarker token output from preprocessor to represent actual linemarker in the source file
linemarker,
/// A comment token if asked to preserve comments.
comment,
@ -408,9 +385,6 @@ pub const Token = struct {
.macro_func,
.macro_function,
.macro_pretty_func,
.macro_date,
.macro_time,
.macro_timestamp,
.keyword_auto,
.keyword_auto_type,
.keyword_break,
@ -480,7 +454,6 @@ pub const Token = struct {
.keyword_asm1,
.keyword_asm2,
.keyword_float128_1,
.keyword_float128_2,
.keyword_int128,
.keyword_imag1,
.keyword_imag2,
@ -488,6 +461,7 @@ pub const Token = struct {
.keyword_real2,
.keyword_float16,
.keyword_fp16,
.keyword_bf16,
.keyword_declspec,
.keyword_int64,
.keyword_int64_2,
@ -527,6 +501,16 @@ pub const Token = struct {
.keyword_false,
.keyword_nullptr,
.keyword_typeof_unqual,
.keyword_float32,
.keyword_float64,
.keyword_float128,
.keyword_float32x,
.keyword_float64x,
.keyword_float128x,
.keyword_dfloat32,
.keyword_dfloat64,
.keyword_dfloat128,
.keyword_dfloat64x,
=> return true,
else => return false,
}
@ -570,6 +554,7 @@ pub const Token = struct {
return switch (id) {
.include_start,
.include_resume,
.linemarker,
=> unreachable,
.unterminated_comment,
@ -605,27 +590,9 @@ pub const Token = struct {
.macro_param_no_expand,
.stringify_param,
.stringify_va_args,
.macro_param_has_attribute,
.macro_param_has_c_attribute,
.macro_param_has_declspec_attribute,
.macro_param_has_warning,
.macro_param_has_feature,
.macro_param_has_extension,
.macro_param_has_builtin,
.macro_param_has_include,
.macro_param_has_include_next,
.macro_param_has_embed,
.macro_param_is_identifier,
.macro_file,
.macro_line,
.macro_counter,
.macro_time,
.macro_date,
.macro_timestamp,
.macro_param_pragma_operator,
.macro_param_ms_identifier,
.macro_param_ms_pragma,
.placemarker,
.macro_param_builtin_func,
.macro_builtin_obj,
=> "",
.macro_ws => " ",
@ -744,6 +711,17 @@ pub const Token = struct {
.keyword_false => "false",
.keyword_nullptr => "nullptr",
.keyword_typeof_unqual => "typeof_unqual",
.keyword_float16 => "_Float16",
.keyword_float32 => "_Float32",
.keyword_float64 => "_Float64",
.keyword_float128 => "_Float128",
.keyword_float32x => "_Float32x",
.keyword_float64x => "_Float64x",
.keyword_float128x => "_Float128x",
.keyword_dfloat32 => "_Decimal32",
.keyword_dfloat64 => "_Decimal64",
.keyword_dfloat128 => "_Decimal128",
.keyword_dfloat64x => "_Decimal64x",
.keyword_include => "include",
.keyword_include_next => "include_next",
.keyword_embed => "embed",
@ -780,15 +758,14 @@ pub const Token = struct {
.keyword_asm => "asm",
.keyword_asm1 => "__asm",
.keyword_asm2 => "__asm__",
.keyword_float128_1 => "_Float128",
.keyword_float128_2 => "__float128",
.keyword_float128_1 => "__float128",
.keyword_int128 => "__int128",
.keyword_imag1 => "__imag",
.keyword_imag2 => "__imag__",
.keyword_real1 => "__real",
.keyword_real2 => "__real__",
.keyword_float16 => "_Float16",
.keyword_fp16 => "__fp16",
.keyword_bf16 => "__bf16",
.keyword_declspec => "__declspec",
.keyword_int64 => "__int64",
.keyword_int64_2 => "_int64",
@ -1030,6 +1007,17 @@ pub const Token = struct {
.{ "false", .keyword_false },
.{ "nullptr", .keyword_nullptr },
.{ "typeof_unqual", .keyword_typeof_unqual },
.{ "_Float16", .keyword_float16 },
.{ "_Float32", .keyword_float32 },
.{ "_Float64", .keyword_float64 },
.{ "_Float128", .keyword_float128 },
.{ "_Float32x", .keyword_float32x },
.{ "_Float64x", .keyword_float64x },
.{ "_Float128x", .keyword_float128x },
.{ "_Decimal32", .keyword_dfloat32 },
.{ "_Decimal64", .keyword_dfloat64 },
.{ "_Decimal128", .keyword_dfloat128 },
.{ "_Decimal64x", .keyword_dfloat64x },
// Preprocessor directives
.{ "include", .keyword_include },
@ -1073,17 +1061,16 @@ pub const Token = struct {
.{ "asm", .keyword_asm },
.{ "__asm", .keyword_asm1 },
.{ "__asm__", .keyword_asm2 },
.{ "_Float128", .keyword_float128_1 },
.{ "__float128", .keyword_float128_2 },
.{ "__float128", .keyword_float128_1 },
.{ "__int128", .keyword_int128 },
.{ "__imag", .keyword_imag1 },
.{ "__imag__", .keyword_imag2 },
.{ "__real", .keyword_real1 },
.{ "__real__", .keyword_real2 },
.{ "_Float16", .keyword_float16 },
// clang keywords
.{ "__fp16", .keyword_fp16 },
.{ "__bf16", .keyword_bf16 },
// ms keywords
.{ "__declspec", .keyword_declspec },
@ -1126,6 +1113,8 @@ index: u32 = 0,
source: Source.Id,
langopts: LangOpts,
line: u32 = 1,
splice_index: u32 = 0,
splice_locs: []const u32,
pub fn next(self: *Tokenizer) Token {
var state: enum {
@ -1909,6 +1898,12 @@ pub fn next(self: *Tokenizer) Token {
}
}
for (self.splice_locs[self.splice_index..]) |splice_offset| {
if (splice_offset > start) break;
self.line += 1;
self.splice_index += 1;
}
return .{
.id = id,
.start = start,
@ -2331,7 +2326,7 @@ test "Tokenizer fuzz test" {
fn testOne(_: @This(), input_bytes: []const u8) anyerror!void {
var arena: std.heap.ArenaAllocator = .init(std.testing.allocator);
defer arena.deinit();
var comp = Compilation.init(std.testing.allocator, arena.allocator(), undefined, std.fs.cwd());
var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, std.fs.cwd());
defer comp.deinit();
const source = try comp.addSourceFromBuffer("fuzz.c", input_bytes);
@ -2340,6 +2335,7 @@ test "Tokenizer fuzz test" {
.buf = source.buf,
.source = source.id,
.langopts = comp.langopts,
.splice_locs = &.{},
};
while (true) {
const prev_index = tokenizer.index;
@ -2355,16 +2351,17 @@ test "Tokenizer fuzz test" {
fn expectTokensExtra(contents: []const u8, expected_tokens: []const Token.Id, langopts: ?LangOpts) !void {
var arena: std.heap.ArenaAllocator = .init(std.testing.allocator);
defer arena.deinit();
var comp = Compilation.init(std.testing.allocator, arena.allocator(), undefined, std.fs.cwd());
var comp = Compilation.init(std.testing.allocator, arena.allocator(), std.testing.io, undefined, std.fs.cwd());
defer comp.deinit();
if (langopts) |provided| {
comp.langopts = provided;
}
const source = try comp.addSourceFromBuffer("path", contents);
var tokenizer = Tokenizer{
var tokenizer: Tokenizer = .{
.buf = source.buf,
.source = source.id,
.langopts = comp.langopts,
.splice_locs = &.{},
};
var i: usize = 0;
while (i < expected_tokens.len) {

View File

@ -5,9 +5,8 @@ const system_defaults = @import("system_defaults");
const Compilation = @import("Compilation.zig");
const Driver = @import("Driver.zig");
const Filesystem = @import("Driver/Filesystem.zig").Filesystem;
const Multilib = @import("Driver/Multilib.zig");
const target_util = @import("target.zig");
const Target = @import("Target.zig");
pub const PathList = std.ArrayList([]const u8);
@ -41,7 +40,6 @@ const Inner = union(enum) {
const Toolchain = @This();
filesystem: Filesystem,
driver: *Driver,
/// The list of toolchain specific path prefixes to search for libraries.
@ -57,8 +55,8 @@ selected_multilib: Multilib = .{},
inner: Inner = .{ .uninitialized = {} },
pub fn getTarget(tc: *const Toolchain) std.Target {
return tc.driver.comp.target;
pub fn getTarget(tc: *const Toolchain) *const Target {
return &tc.driver.comp.target;
}
fn getDefaultLinker(tc: *const Toolchain) []const u8 {
@ -107,7 +105,7 @@ pub fn getLinkerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
if (std.fs.path.dirname(path) == null) {
path = tc.getProgramPath(path, buf);
}
if (tc.filesystem.canExecute(path)) {
if (tc.canExecute(path)) {
return path;
}
}
@ -139,7 +137,7 @@ pub fn getLinkerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
}
if (std.fs.path.isAbsolute(use_linker)) {
if (tc.filesystem.canExecute(use_linker)) {
if (tc.canExecute(use_linker)) {
return use_linker;
}
} else {
@ -155,7 +153,7 @@ pub fn getLinkerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
}
linker_name.appendSliceAssumeCapacity(use_linker);
const linker_path = tc.getProgramPath(linker_name.items, buf);
if (tc.filesystem.canExecute(linker_path)) {
if (tc.canExecute(linker_path)) {
return linker_path;
}
}
@ -227,12 +225,12 @@ fn getProgramPath(tc: *const Toolchain, name: []const u8, buf: []u8) []const u8
const candidate = std.fs.path.join(fib.allocator(), &.{ program_path, tool_name }) catch continue;
if (tc.filesystem.canExecute(candidate) and candidate.len <= buf.len) {
if (tc.canExecute(candidate) and candidate.len <= buf.len) {
@memcpy(buf[0..candidate.len], candidate);
return buf[0..candidate.len];
}
}
return tc.filesystem.findProgramByName(tc.driver.comp.gpa, name, tc.driver.comp.environment.path, buf) orelse continue;
return tc.findProgramByName(name, buf) orelse continue;
}
@memcpy(buf[0..name.len], name);
return buf[0..name.len];
@ -256,7 +254,7 @@ pub fn getFilePath(tc: *const Toolchain, name: []const u8) ![]const u8 {
// todo check compiler RT path
const aro_dir = std.fs.path.dirname(tc.driver.aro_name) orelse "";
const candidate = try std.fs.path.join(allocator, &.{ aro_dir, "..", name });
if (tc.filesystem.exists(candidate)) {
if (tc.exists(candidate)) {
return arena.dupe(u8, candidate);
}
@ -283,7 +281,7 @@ fn searchPaths(tc: *const Toolchain, fib: *std.heap.FixedBufferAllocator, sysroo
else
std.fs.path.join(fib.allocator(), &.{ path, name }) catch continue;
if (tc.filesystem.exists(candidate)) {
if (tc.exists(candidate)) {
return candidate;
}
}
@ -304,7 +302,7 @@ pub fn addPathIfExists(tc: *Toolchain, components: []const []const u8, dest_kind
const candidate = try std.fs.path.join(fib.allocator(), components);
if (tc.filesystem.exists(candidate)) {
if (tc.exists(candidate)) {
const duped = try tc.driver.comp.arena.dupe(u8, candidate);
const dest = switch (dest_kind) {
.library => &tc.library_paths,
@ -404,11 +402,11 @@ fn addUnwindLibrary(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !voi
const target = tc.getTarget();
if ((target.abi.isAndroid() and unw == .libgcc) or
target.ofmt == .wasm or
target_util.isWindowsMSVCEnvironment(target) or
target.isWindowsMSVCEnvironment() or
unw == .none) return;
const lgk = tc.getLibGCCKind();
const as_needed = lgk == .unspecified and !target.abi.isAndroid() and !target_util.isCygwinMinGW(target);
const as_needed = lgk == .unspecified and !target.abi.isAndroid() and !target.isMinGW();
try argv.ensureUnusedCapacity(tc.driver.comp.gpa, 3);
if (as_needed) {
@ -420,7 +418,7 @@ fn addUnwindLibrary(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !voi
.compiler_rt => if (lgk == .static) {
argv.appendAssumeCapacity("-l:libunwind.a");
} else if (lgk == .shared) {
if (target_util.isCygwinMinGW(target)) {
if (target.isMinGW()) {
argv.appendAssumeCapacity("-l:libunwind.dll.a");
} else {
argv.appendAssumeCapacity("-l:libunwind.so");
@ -455,7 +453,7 @@ pub fn addRuntimeLibs(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !v
// TODO
},
.libgcc => {
if (target_util.isKnownWindowsMSVCEnvironment(target)) {
if (target.isKnownWindowsMSVCEnvironment()) {
const rtlib_str = tc.driver.rtlib orelse system_defaults.rtlib;
if (!mem.eql(u8, rtlib_str, "platform")) {
try tc.driver.err("unsupported runtime library 'libgcc' for platform 'MSVC'", .{});
@ -477,14 +475,107 @@ pub fn defineSystemIncludes(tc: *Toolchain) !void {
.unknown => {
if (tc.driver.nostdinc) return;
const comp = tc.driver.comp;
if (!tc.driver.nobuiltininc) {
try comp.addBuiltinIncludeDir(tc.driver.aro_name, tc.driver.resource_dir);
try tc.addBuiltinIncludeDir();
}
if (!tc.driver.nostdlibinc) {
try comp.addSystemIncludeDir("/usr/include");
try tc.addSystemIncludeDir("/usr/include");
}
},
};
}
pub fn addSystemIncludeDir(tc: *const Toolchain, path: []const u8) !void {
const d = tc.driver;
_ = try d.includes.append(d.comp.gpa, .{ .kind = .system, .path = try d.comp.arena.dupe(u8, path) });
}
/// Add built-in aro headers directory to system include paths
pub fn addBuiltinIncludeDir(tc: *const Toolchain) !void {
const d = tc.driver;
const comp = d.comp;
const gpa = comp.gpa;
const arena = comp.arena;
try d.includes.ensureUnusedCapacity(gpa, 1);
if (d.resource_dir) |resource_dir| {
const path = try std.fs.path.join(arena, &.{ resource_dir, "include" });
comp.cwd.access(path, .{}) catch {
return d.fatal("Aro builtin headers not found in provided -resource-dir", .{});
};
d.includes.appendAssumeCapacity(.{ .kind = .system, .path = path });
return;
}
var search_path = d.aro_name;
while (std.fs.path.dirname(search_path)) |dirname| : (search_path = dirname) {
var base_dir = d.comp.cwd.openDir(dirname, .{}) catch continue;
defer base_dir.close();
base_dir.access("include/stddef.h", .{}) catch continue;
const path = try std.fs.path.join(arena, &.{ dirname, "include" });
d.includes.appendAssumeCapacity(.{ .kind = .system, .path = path });
break;
} else return d.fatal("unable to find Aro builtin headers", .{});
}
/// Read the file at `path` into `buf`.
/// Returns null if any errors are encountered
/// Otherwise returns a slice of `buf`. If the file is larger than `buf` partial contents are returned
pub fn readFile(tc: *const Toolchain, path: []const u8, buf: []u8) ?[]const u8 {
const comp = tc.driver.comp;
return comp.cwd.adaptToNewApi().readFile(comp.io, path, buf) catch null;
}
pub fn exists(tc: *const Toolchain, path: []const u8) bool {
const comp = tc.driver.comp;
comp.cwd.adaptToNewApi().access(comp.io, path, .{}) catch return false;
return true;
}
pub fn joinedExists(tc: *const Toolchain, parts: []const []const u8) bool {
var buf: [std.fs.max_path_bytes]u8 = undefined;
var fib = std.heap.FixedBufferAllocator.init(&buf);
const joined = std.fs.path.join(fib.allocator(), parts) catch return false;
return tc.exists(joined);
}
pub fn canExecute(tc: *const Toolchain, path: []const u8) bool {
if (@import("builtin").os.tag == .windows) {
// TODO
return true;
}
const comp = tc.driver.comp;
comp.cwd.adaptToNewApi().access(comp.io, path, .{ .execute = true }) catch return false;
// Todo: ensure path is not a directory
return true;
}
/// Search for an executable named `name` using platform-specific logic
/// If it's found, write the full path to `buf` and return a slice of it
/// Otherwise retun null
pub fn findProgramByName(tc: *const Toolchain, name: []const u8, buf: []u8) ?[]const u8 {
std.debug.assert(name.len > 0);
if (@import("builtin").os.tag == .windows) {
// TODO
return null;
}
const comp = tc.driver.comp;
// TODO: does WASI need special handling?
if (mem.indexOfScalar(u8, name, '/') != null) {
@memcpy(buf[0..name.len], name);
return buf[0..name.len];
}
const path_env = comp.environment.path orelse return null;
var fib = std.heap.FixedBufferAllocator.init(buf);
var it = mem.tokenizeScalar(u8, path_env, std.fs.path.delimiter);
while (it.next()) |path_dir| {
defer fib.reset();
const full_path = std.fs.path.join(fib.allocator(), &.{ path_dir, name }) catch continue;
if (tc.canExecute(full_path)) return full_path;
}
return null;
}

View File

@ -36,7 +36,7 @@ pub const TokenWithExpansionLocs = struct {
pub fn expansionSlice(tok: TokenWithExpansionLocs) []const Source.Location {
const locs = tok.expansion_locs orelse return &[0]Source.Location{};
var i: usize = 0;
while (locs[i].id != .unused) : (i += 1) {}
while (locs[i].id.index != .unused) : (i += 1) {}
return locs[0..i];
}
@ -56,7 +56,7 @@ pub const TokenWithExpansionLocs = struct {
if (tok.expansion_locs) |locs| {
var i: usize = 0;
while (locs[i].id != .unused) : (i += 1) {}
while (locs[i].id.index != .unused) : (i += 1) {}
list.items = locs[0..i];
while (locs[i].byte_offset != 1) : (i += 1) {}
list.capacity = i + 1;
@ -68,7 +68,7 @@ pub const TokenWithExpansionLocs = struct {
try list.ensureTotalCapacity(gpa, wanted_len);
for (new) |new_loc| {
if (new_loc.id == .generated) continue;
if (new_loc.id.index == .generated) continue;
list.appendAssumeCapacity(new_loc);
}
}
@ -76,7 +76,7 @@ pub const TokenWithExpansionLocs = struct {
pub fn free(expansion_locs: ?[*]Source.Location, gpa: std.mem.Allocator) void {
const locs = expansion_locs orelse return;
var i: usize = 0;
while (locs[i].id != .unused) : (i += 1) {}
while (locs[i].id.index != .unused) : (i += 1) {}
while (locs[i].byte_offset != 1) : (i += 1) {}
gpa.free(locs[0 .. i + 1]);
}
@ -133,10 +133,11 @@ pub fn deinit(tree: *Tree) void {
tree.* = undefined;
}
pub const GNUAssemblyQualifiers = struct {
pub const GNUAssemblyQualifiers = packed struct(u32) {
@"volatile": bool = false,
@"inline": bool = false,
goto: bool = false,
_: u29 = 0,
};
pub const Node = union(enum) {
@ -146,7 +147,7 @@ pub const Node = union(enum) {
param: Param,
variable: Variable,
typedef: Typedef,
global_asm: SimpleAsm,
global_asm: GlobalAsm,
struct_decl: ContainerDecl,
union_decl: ContainerDecl,
@ -173,7 +174,7 @@ pub const Node = union(enum) {
break_stmt: BreakStmt,
null_stmt: NullStmt,
return_stmt: ReturnStmt,
gnu_asm_simple: SimpleAsm,
asm_stmt: AsmStmt,
assign_expr: Binary,
mul_assign_expr: Binary,
@ -333,7 +334,7 @@ pub const Node = union(enum) {
implicit: bool,
};
pub const SimpleAsm = struct {
pub const GlobalAsm = struct {
asm_tok: TokenIndex,
asm_str: Node.Index,
};
@ -455,6 +456,22 @@ pub const Node = union(enum) {
},
};
pub const AsmStmt = struct {
asm_tok: TokenIndex,
asm_str: Node.Index,
outputs: []const Operand,
inputs: []const Operand,
clobbers: []const Node.Index,
labels: []const Node.Index,
quals: GNUAssemblyQualifiers,
pub const Operand = struct {
name: TokenIndex,
constraint: Node.Index,
expr: Node.Index,
};
};
pub const Binary = struct {
qt: QualType,
lhs: Node.Index,
@ -1027,10 +1044,56 @@ pub const Node = union(enum) {
},
},
},
.gnu_asm_simple => .{
.gnu_asm_simple = .{
.asm_stmt, .asm_stmt_volatile, .asm_stmt_inline, .asm_stmt_inline_volatile => |tag| {
const extra = tree.extra.items;
var extra_index = node_data[2];
const operand_size = @sizeOf(AsmStmt.Operand) / @sizeOf(u32);
const outputs_len = extra[extra_index] * operand_size;
extra_index += 1;
const inputs_len = extra[extra_index] * operand_size;
extra_index += 1;
const clobbers_len = extra[extra_index];
extra_index += 1;
const labels_len = node_data[1];
const quals: GNUAssemblyQualifiers = .{
.@"inline" = tag == .asm_stmt_inline or tag == .asm_stmt_inline_volatile,
.@"volatile" = tag == .asm_stmt_volatile or tag == .asm_stmt_inline_volatile,
.goto = labels_len > 0,
};
const outputs = extra[extra_index..][0..outputs_len];
extra_index += outputs_len;
const inputs = extra[extra_index..][0..inputs_len];
extra_index += inputs_len;
const clobbers = extra[extra_index..][0..clobbers_len];
extra_index += clobbers_len;
const labels = extra[extra_index..][0..labels_len];
extra_index += labels_len;
return .{
.asm_stmt = .{
.asm_tok = node_tok,
.asm_str = @enumFromInt(node_data[0]),
.outputs = @ptrCast(outputs),
.inputs = @ptrCast(inputs),
.clobbers = @ptrCast(clobbers),
.labels = @ptrCast(labels),
.quals = quals,
},
};
},
.asm_stmt_simple => .{
.asm_stmt = .{
.asm_tok = node_tok,
.asm_str = @enumFromInt(node_data[0]),
.outputs = &.{},
.inputs = &.{},
.clobbers = &.{},
.labels = &.{},
.quals = @bitCast(node_data[1]),
},
},
.assign_expr => .{
@ -1726,7 +1789,11 @@ pub const Node = union(enum) {
.computed_goto_stmt,
.continue_stmt,
.break_stmt,
.gnu_asm_simple,
.asm_stmt,
.asm_stmt_volatile,
.asm_stmt_inline,
.asm_stmt_inline_volatile,
.asm_stmt_simple,
.global_asm,
.generic_association_expr,
.generic_default_expr,
@ -1813,7 +1880,11 @@ pub const Node = union(enum) {
return_stmt,
return_none_stmt,
implicit_return,
gnu_asm_simple,
asm_stmt,
asm_stmt_inline,
asm_stmt_volatile,
asm_stmt_inline_volatile,
asm_stmt_simple,
comma_expr,
assign_expr,
mul_assign_expr,
@ -2174,10 +2245,39 @@ pub fn setNode(tree: *Tree, node: Node, index: usize) !void {
}
repr.tok = @"return".return_tok;
},
.gnu_asm_simple => |gnu_asm_simple| {
repr.tag = .gnu_asm_simple;
repr.data[0] = @intFromEnum(gnu_asm_simple.asm_str);
repr.tok = gnu_asm_simple.asm_tok;
.asm_stmt => |asm_stmt| {
repr.tok = asm_stmt.asm_tok;
repr.data[0] = @intFromEnum(asm_stmt.asm_str);
if (asm_stmt.outputs.len == 0 and asm_stmt.inputs.len == 0 and asm_stmt.clobbers.len == 0 and asm_stmt.labels.len == 0) {
repr.tag = .asm_stmt_simple;
repr.data[1] = @bitCast(asm_stmt.quals);
} else {
if (asm_stmt.quals.@"inline" and asm_stmt.quals.@"volatile") {
repr.tag = .asm_stmt_inline_volatile;
} else if (asm_stmt.quals.@"inline") {
repr.tag = .asm_stmt_inline;
} else if (asm_stmt.quals.@"volatile") {
repr.tag = .asm_stmt_volatile;
} else {
repr.tag = .asm_stmt;
}
repr.data[1] = @intCast(asm_stmt.labels.len);
repr.data[2] = @intCast(tree.extra.items.len);
const operand_size = @sizeOf(Node.AsmStmt.Operand) / @sizeOf(u32);
try tree.extra.ensureUnusedCapacity(tree.comp.gpa, 3 // lens
+ (asm_stmt.outputs.len + asm_stmt.inputs.len) * operand_size // outputs inputs
+ asm_stmt.clobbers.len + asm_stmt.labels.len);
tree.extra.appendAssumeCapacity(@intCast(asm_stmt.outputs.len));
tree.extra.appendAssumeCapacity(@intCast(asm_stmt.inputs.len));
tree.extra.appendAssumeCapacity(@intCast(asm_stmt.clobbers.len));
tree.extra.appendSliceAssumeCapacity(@ptrCast(asm_stmt.outputs));
tree.extra.appendSliceAssumeCapacity(@ptrCast(asm_stmt.inputs));
tree.extra.appendSliceAssumeCapacity(@ptrCast(asm_stmt.clobbers));
tree.extra.appendSliceAssumeCapacity(@ptrCast(asm_stmt.labels));
}
},
.assign_expr => |bin| {
repr.tag = .assign_expr;
@ -2804,18 +2904,17 @@ const CallableResultUsage = struct {
};
pub fn callableResultUsage(tree: *const Tree, node: Node.Index) ?CallableResultUsage {
var cur_node = node;
while (true) switch (cur_node.get(tree)) {
loop: switch (node.get(tree)) {
.decl_ref_expr => |decl_ref| return .{
.tok = decl_ref.name_tok,
.nodiscard = decl_ref.qt.hasAttribute(tree.comp, .nodiscard),
.warn_unused_result = decl_ref.qt.hasAttribute(tree.comp, .warn_unused_result),
},
.paren_expr, .addr_of_expr, .deref_expr => |un| cur_node = un.operand,
.comma_expr => |bin| cur_node = bin.rhs,
.cast => |cast| cur_node = cast.operand,
.call_expr => |call| cur_node = call.callee,
.paren_expr, .addr_of_expr, .deref_expr => |un| continue :loop un.operand.get(tree),
.comma_expr => |bin| continue :loop bin.rhs.get(tree),
.cast => |cast| continue :loop cast.operand.get(tree),
.call_expr => |call| continue :loop call.callee.get(tree),
.member_access_expr, .member_access_ptr_expr => |access| {
var qt = access.base.qt(tree);
if (qt.get(tree.comp, .pointer)) |pointer| qt = pointer.child;
@ -2825,19 +2924,14 @@ pub fn callableResultUsage(tree: *const Tree, node: Node.Index) ?CallableResultU
};
const field = record_ty.fields[access.member_index];
const attributes = field.attributes(tree.comp);
return .{
.tok = field.name_tok,
.nodiscard = for (attributes) |attr| {
if (attr.tag == .nodiscard) break true;
} else false,
.warn_unused_result = for (attributes) |attr| {
if (attr.tag == .warn_unused_result) break true;
} else false,
.nodiscard = field.qt.hasAttribute(tree.comp, .nodiscard),
.warn_unused_result = field.qt.hasAttribute(tree.comp, .warn_unused_result),
};
},
else => return null,
};
}
}
pub fn isLval(tree: *const Tree, node: Node.Index) bool {
@ -3004,6 +3098,13 @@ fn dumpNode(
try config.setColor(w, ATTRIBUTE);
try w.writeAll(" bitfield");
}
if (node == .asm_stmt) {
const quals = node.asm_stmt.quals;
try config.setColor(w, ATTRIBUTE);
if (quals.@"inline") try w.writeAll(" inline");
if (quals.@"volatile") try w.writeAll(" volatile");
if (quals.goto) try w.writeAll(" goto");
}
if (tree.value_map.get(node_index)) |val| {
try config.setColor(w, LITERAL);
@ -3028,7 +3129,6 @@ fn dumpNode(
if (node == .return_stmt and node.return_stmt.operand == .implicit and node.return_stmt.operand.implicit) {
try config.setColor(w, IMPLICIT);
try w.writeAll(" (value: 0)");
try config.setColor(w, .reset);
}
try w.writeAll("\n");
@ -3048,8 +3148,7 @@ fn dumpNode(
switch (node) {
.empty_decl => {},
.global_asm, .gnu_asm_simple => |@"asm"| {
try w.splatByteAll(' ', level + 1);
.global_asm => |@"asm"| {
try tree.dumpNode(@"asm".asm_str, level + delta, config, w);
},
.static_assert => |assert| {
@ -3428,6 +3527,73 @@ fn dumpNode(
.none => {},
}
},
.asm_stmt => |@"asm"| {
try tree.dumpNode(@"asm".asm_str, level + delta, config, w);
const write_operand = struct {
fn write_operand(
_w: *std.Io.Writer,
_level: u32,
_config: std.Io.tty.Config,
_tree: *const Tree,
operands: []const Node.AsmStmt.Operand,
) std.Io.tty.Config.SetColorError!void {
for (operands) |operand| {
if (operand.name != 0) {
try _w.splatByteAll(' ', _level + delta);
try _w.writeAll("asm name: ");
try _config.setColor(_w, NAME);
try _w.writeAll(_tree.tokSlice(operand.name));
try _w.writeByte('\n');
try _config.setColor(_w, .reset);
}
try _w.splatByteAll(' ', _level + delta);
try _w.writeAll("constraint: ");
const constraint_val = _tree.value_map.get(operand.constraint).?;
try _config.setColor(_w, LITERAL);
_ = try constraint_val.print(operand.constraint.qt(_tree), _tree.comp, _w);
try _w.writeByte('\n');
try _tree.dumpNode(operand.expr, _level + delta, _config, _w);
}
try _config.setColor(_w, .reset);
}
}.write_operand;
if (@"asm".outputs.len > 0) {
try w.splatByteAll(' ', level + half);
try w.writeAll("ouputs:\n");
try write_operand(w, level, config, tree, @"asm".outputs);
}
if (@"asm".inputs.len > 0) {
try w.splatByteAll(' ', level + half);
try w.writeAll("inputs:\n");
try write_operand(w, level, config, tree, @"asm".inputs);
}
if (@"asm".clobbers.len > 0) {
try w.splatByteAll(' ', level + half);
try w.writeAll("clobbers:\n");
try config.setColor(w, LITERAL);
for (@"asm".clobbers) |clobber| {
const clobber_val = tree.value_map.get(clobber).?;
try w.splatByteAll(' ', level + delta);
_ = try clobber_val.print(clobber.qt(tree), tree.comp, w);
try w.writeByte('\n');
}
try config.setColor(w, .reset);
}
if (@"asm".labels.len > 0) {
try w.splatByteAll(' ', level + half);
try w.writeAll("labels:\n");
for (@"asm".labels) |label| {
try tree.dumpNode(label, level + delta, config, w);
}
}
},
.call_expr => |call| {
try w.splatByteAll(' ', level + half);
try w.writeAll("callee:\n");

View File

@ -95,6 +95,33 @@ pub const Suffix = enum {
// _Bitint
WB, UWB,
// __bf16
BF16,
// _Float32 and imaginary _Float32
F32, IF32,
// _Float64 and imaginary _Float64
F64, IF64,
// _Float32x and imaginary _Float32x
F32x, IF32x,
// _Float64x and imaginary _Float64x
F64x, IF64x,
// _Decimal32
D32,
// _Decimal64
D64,
// _Decimal128
D128,
// _Decimal64x
D64x,
// zig fmt: on
const Tuple = struct { Suffix, []const []const u8 };
@ -126,6 +153,15 @@ pub const Suffix = enum {
.{ .W, &.{"W"} },
.{ .F128, &.{"F128"} },
.{ .Q, &.{"Q"} },
.{ .BF16, &.{"BF16"} },
.{ .F32, &.{"F32"} },
.{ .F64, &.{"F64"} },
.{ .F32x, &.{"F32x"} },
.{ .F64x, &.{"F64x"} },
.{ .D32, &.{"D32"} },
.{ .D64, &.{"D64"} },
.{ .D128, &.{"D128"} },
.{ .D64x, &.{"D64x"} },
.{ .I, &.{"I"} },
.{ .IL, &.{ "I", "L" } },
@ -134,6 +170,10 @@ pub const Suffix = enum {
.{ .IW, &.{ "I", "W" } },
.{ .IF128, &.{ "I", "F128" } },
.{ .IQ, &.{ "I", "Q" } },
.{ .IF32, &.{ "I", "F32" } },
.{ .IF64, &.{ "I", "F64" } },
.{ .IF32x, &.{ "I", "F32x" } },
.{ .IF64x, &.{ "I", "F64x" } },
};
pub fn fromString(buf: []const u8, suffix_kind: enum { int, float }) ?Suffix {
@ -162,8 +202,8 @@ pub const Suffix = enum {
pub fn isImaginary(suffix: Suffix) bool {
return switch (suffix) {
.I, .IL, .IF, .IU, .IUL, .ILL, .IULL, .IWB, .IUWB, .IF128, .IQ, .IW, .IF16 => true,
.None, .L, .F16, .F, .U, .UL, .LL, .ULL, .WB, .UWB, .F128, .Q, .W => false,
.I, .IL, .IF, .IU, .IUL, .ILL, .IULL, .IWB, .IUWB, .IF128, .IQ, .IW, .IF16, .IF32, .IF64, .IF32x, .IF64x => true,
.None, .L, .F16, .F, .U, .UL, .LL, .ULL, .WB, .UWB, .F128, .Q, .W, .F32, .F64, .F32x, .F64x, .D32, .D64, .D128, .D64x, .BF16 => false,
};
}
@ -171,7 +211,7 @@ pub const Suffix = enum {
return switch (suffix) {
.None, .L, .LL, .I, .IL, .ILL, .WB, .IWB => true,
.U, .UL, .ULL, .IU, .IUL, .IULL, .UWB, .IUWB => false,
.F, .IF, .F16, .F128, .IF128, .Q, .IQ, .W, .IW, .IF16 => unreachable,
.F, .IF, .F16, .F128, .IF128, .Q, .IQ, .W, .IW, .IF16, .F32, .IF32, .F64, .IF64, .F32x, .IF32x, .F64x, .IF64x, .D32, .D64, .D128, .D64x, .BF16 => unreachable,
};
}

View File

@ -7,7 +7,6 @@ const record_layout = @import("record_layout.zig");
const Parser = @import("Parser.zig");
const StringInterner = @import("StringInterner.zig");
const StringId = StringInterner.StringId;
const target_util = @import("target.zig");
const Tree = @import("Tree.zig");
const Node = Tree.Node;
const TokenIndex = Tree.TokenIndex;
@ -92,6 +91,16 @@ const Index = enum(u29) {
int_pointer = std.math.maxInt(u29) - 27,
/// Special type used when combining declarators.
declarator_combine = std.math.maxInt(u29) - 28,
float_bf16 = std.math.maxInt(u29) - 29,
float_float32 = std.math.maxInt(u29) - 30,
float_float64 = std.math.maxInt(u29) - 31,
float_float32x = std.math.maxInt(u29) - 32,
float_float64x = std.math.maxInt(u29) - 33,
float_float128x = std.math.maxInt(u29) - 34,
float_dfloat32 = std.math.maxInt(u29) - 35,
float_dfloat64 = std.math.maxInt(u29) - 36,
float_dfloat128 = std.math.maxInt(u29) - 37,
float_dfloat64x = std.math.maxInt(u29) - 38,
_,
};
@ -123,12 +132,22 @@ pub const QualType = packed struct(u32) {
pub const ulong_long: QualType = .{ ._index = .int_ulong_long };
pub const int128: QualType = .{ ._index = .int_int128 };
pub const uint128: QualType = .{ ._index = .int_uint128 };
pub const bf16: QualType = .{ ._index = .float_bf16 };
pub const fp16: QualType = .{ ._index = .float_fp16 };
pub const float16: QualType = .{ ._index = .float_float16 };
pub const float: QualType = .{ ._index = .float_float };
pub const double: QualType = .{ ._index = .float_double };
pub const long_double: QualType = .{ ._index = .float_long_double };
pub const float128: QualType = .{ ._index = .float_float128 };
pub const float32: QualType = .{ ._index = .float_float32 };
pub const float64: QualType = .{ ._index = .float_float64 };
pub const float32x: QualType = .{ ._index = .float_float32x };
pub const float64x: QualType = .{ ._index = .float_float64x };
pub const float128x: QualType = .{ ._index = .float_float128x };
pub const dfloat32: QualType = .{ ._index = .float_dfloat32 };
pub const dfloat64: QualType = .{ ._index = .float_dfloat64 };
pub const dfloat128: QualType = .{ ._index = .float_dfloat128 };
pub const dfloat64x: QualType = .{ ._index = .float_dfloat64x };
pub const void_pointer: QualType = .{ ._index = .void_pointer };
pub const char_pointer: QualType = .{ ._index = .char_pointer };
pub const int_pointer: QualType = .{ ._index = .int_pointer };
@ -184,12 +203,22 @@ pub const QualType = packed struct(u32) {
.int_ulong_long => return .{ .int = .ulong_long },
.int_int128 => return .{ .int = .int128 },
.int_uint128 => return .{ .int = .uint128 },
.float_bf16 => return .{ .float = .bf16 },
.float_fp16 => return .{ .float = .fp16 },
.float_float16 => return .{ .float = .float16 },
.float_float => return .{ .float = .float },
.float_double => return .{ .float = .double },
.float_long_double => return .{ .float = .long_double },
.float_float128 => return .{ .float = .float128 },
.float_float32 => return .{ .float = .float32 },
.float_float64 => return .{ .float = .float64 },
.float_float32x => return .{ .float = .float32x },
.float_float64x => return .{ .float = .float64x },
.float_float128x => return .{ .float = .float128x },
.float_dfloat32 => return .{ .float = .dfloat32 },
.float_dfloat64 => return .{ .float = .dfloat64 },
.float_dfloat128 => return .{ .float = .dfloat128 },
.float_dfloat64x => return .{ .float = .dfloat64x },
.void_pointer => return .{ .pointer = .{ .child = .void, .decayed = null } },
.char_pointer => return .{ .pointer = .{ .child = .char, .decayed = null } },
.int_pointer => return .{ .pointer = .{ .child = .int, .decayed = null } },
@ -608,8 +637,17 @@ pub const QualType = packed struct(u32) {
.float => comp.target.cTypeAlignment(.float),
.double => comp.target.cTypeAlignment(.double),
.long_double => comp.target.cTypeAlignment(.longdouble),
.fp16, .float16 => 2,
.bf16, .fp16, .float16 => 2,
.float128 => 16,
.float32 => comp.target.cTypeAlignment(.float),
.float64 => comp.target.cTypeAlignment(.double),
.float32x => 8,
.float64x => 16,
.float128x => unreachable, // Not supported
.dfloat32 => 4,
.dfloat64 => 8,
.dfloat128 => 16,
.dfloat64x => 16,
},
.bit_int => |bit_int| {
// https://www.open-std.org/jtc1/sc22/wg14/www/docs/n2709.pdf
@ -628,7 +666,7 @@ pub const QualType = packed struct(u32) {
else => comp.target.ptrBitWidth() / 8,
},
.func => target_util.defaultFunctionAlignment(comp.target),
.func => comp.target.defaultFunctionAlignment(),
.array => |array| continue :loop array.elem.base(comp).type,
.vector => |vector| continue :loop vector.elem.base(comp).type,
@ -774,14 +812,23 @@ pub const QualType = packed struct(u32) {
pub fn floatRank(qt: QualType, comp: *const Compilation) usize {
return loop: switch (qt.base(comp).type) {
.float => |float_ty| switch (float_ty) {
// TODO: bfloat16 => 0
.bf16 => 0,
.float16 => 1,
.fp16 => 2,
.float => 3,
.double => 4,
.long_double => 5,
.float128 => 6,
.float32 => 4,
.float32x => 5,
.double => 6,
.float64 => 7,
.float64x => 8,
.long_double => 9,
.float128 => 10,
// TODO: ibm128 => 7
.float128x => unreachable, // Not supported
.dfloat32 => decimal_float_rank + 0,
.dfloat64 => decimal_float_rank + 1,
.dfloat64x => decimal_float_rank + 2,
.dfloat128 => decimal_float_rank + 3,
},
.complex => |complex| continue :loop complex.base(comp).type,
.atomic => |atomic| continue :loop atomic.base(comp).type,
@ -789,6 +836,8 @@ pub const QualType = packed struct(u32) {
};
}
pub const decimal_float_rank = 90;
/// Rank for integer conversions, ignoring domain (complex vs real)
/// Asserts that ty is an integer type
pub fn intRank(qt: QualType, comp: *const Compilation) usize {
@ -1135,7 +1184,7 @@ pub const QualType = packed struct(u32) {
if (index <= aligned_index) break;
}
last_aligned_index = index;
const requested = if (attribute.args.aligned.alignment) |alignment| alignment.requested else target_util.defaultAlignment(comp.target);
const requested = if (attribute.args.aligned.alignment) |alignment| alignment.requested else comp.target.defaultAlignment();
if (max_requested == null or max_requested.? < requested) {
max_requested = requested;
}
@ -1143,9 +1192,16 @@ pub const QualType = packed struct(u32) {
return max_requested;
}
pub fn linkage(qt: QualType, comp: *const Compilation) std.builtin.GlobalLinkage {
if (qt.hasAttribute(comp, .internal_linkage)) return .internal;
if (qt.hasAttribute(comp, .weak)) return .weak;
if (qt.hasAttribute(comp, .selectany)) return .link_once;
return .strong;
}
pub fn enumIsPacked(qt: QualType, comp: *const Compilation) bool {
std.debug.assert(qt.is(comp, .@"enum"));
return comp.langopts.short_enums or target_util.packAllEnums(comp.target) or qt.hasAttribute(comp, .@"packed");
return comp.langopts.short_enums or comp.target.packAllEnums() or qt.hasAttribute(comp, .@"packed");
}
pub fn shouldDesugar(qt: QualType, comp: *const Compilation) bool {
@ -1275,12 +1331,22 @@ pub const QualType = packed struct(u32) {
},
.bit_int => |bit_int| try w.print("{s} _BitInt({d})", .{ @tagName(bit_int.signedness), bit_int.bits }),
.float => |float_ty| switch (float_ty) {
.bf16 => try w.writeAll("__bf16"),
.fp16 => try w.writeAll("__fp16"),
.float16 => try w.writeAll("_Float16"),
.float => try w.writeAll("float"),
.double => try w.writeAll("double"),
.long_double => try w.writeAll("long double"),
.float128 => try w.writeAll("__float128"),
.float32 => try w.writeAll("_Float32"),
.float64 => try w.writeAll("_Float64"),
.float32x => try w.writeAll("_Float32x"),
.float64x => try w.writeAll("_Float64x"),
.float128x => try w.writeAll("_Float128x"),
.dfloat32 => try w.writeAll("_Decimal32"),
.dfloat64 => try w.writeAll("_Decimal64"),
.dfloat128 => try w.writeAll("_Decimal128"),
.dfloat64x => try w.writeAll("_Decimal64x"),
},
.complex => |complex| {
try w.writeAll("_Complex ");
@ -1498,21 +1564,41 @@ pub const Type = union(enum) {
};
pub const Float = enum {
bf16,
fp16,
float16,
float,
double,
long_double,
float128,
float32,
float64,
float32x,
float64x,
float128x,
dfloat32,
dfloat64,
dfloat128,
dfloat64x,
pub fn bits(float: Float, comp: *const Compilation) u16 {
return switch (float) {
.bf16 => 16,
.fp16 => 16,
.float16 => 16,
.float => comp.target.cTypeBitSize(.float),
.double => comp.target.cTypeBitSize(.double),
.long_double => comp.target.cTypeBitSize(.longdouble),
.float128 => 128,
.float32 => 32,
.float64 => 64,
.float32x => 32 * 2,
.float64x => 64 * 2,
.float128x => unreachable, // Not supported
.dfloat32 => 32,
.dfloat64 => 64,
.dfloat128 => 128,
.dfloat64x => 64 * 2,
};
}
};
@ -1747,12 +1833,22 @@ pub fn putExtra(ts: *TypeStore, gpa: std.mem.Allocator, ty: Type) !Index {
.uint128 => return .int_uint128,
},
.float => |float| switch (float) {
.bf16 => return .float_bf16,
.fp16 => return .float_fp16,
.float16 => return .float_float16,
.float => return .float_float,
.double => return .float_double,
.long_double => return .float_long_double,
.float128 => return .float_float128,
.float32 => return .float_float32,
.float64 => return .float_float64,
.float32x => return .float_float32x,
.float64x => return .float_float64x,
.float128x => return .float_float128x,
.dfloat32 => return .float_dfloat32,
.dfloat64 => return .float_dfloat64,
.dfloat128 => return .float_dfloat128,
.dfloat64x => return .float_dfloat64x,
},
else => {},
}
@ -2009,10 +2105,10 @@ pub fn initNamedTypes(ts: *TypeStore, comp: *Compilation) !void {
else => .int,
};
ts.intmax = target_util.intMaxType(comp.target);
ts.intptr = target_util.intPtrType(comp.target);
ts.int16 = target_util.int16Type(comp.target);
ts.int64 = target_util.int64Type(comp.target);
ts.intmax = comp.target.intMaxType();
ts.intptr = comp.target.intPtrType();
ts.int16 = comp.target.int16Type();
ts.int64 = comp.target.int64Type();
ts.uint_least16_t = comp.intLeastN(16, .unsigned);
ts.uint_least32_t = comp.intLeastN(32, .unsigned);
@ -2367,18 +2463,33 @@ pub const Builder = struct {
complex_sbit_int: u64,
complex_ubit_int: u64,
bf16,
fp16,
float16,
float,
double,
long_double,
float128,
float32,
float64,
float32x,
float64x,
float128x,
dfloat32,
dfloat64,
dfloat128,
dfloat64x,
complex,
complex_float16,
complex_float,
complex_double,
complex_long_double,
complex_float128,
complex_float32,
complex_float64,
complex_float32x,
complex_float64x,
complex_float128x,
// Any not simply constructed from specifier keywords.
other: QualType,
@ -2450,6 +2561,7 @@ pub const Builder = struct {
.complex_sint128 => "_Complex signed __int128",
.complex_uint128 => "_Complex unsigned __int128",
.bf16 => "__bf16",
.fp16 => "__fp16",
.float16 => "_Float16",
.float => "float",
@ -2581,18 +2693,33 @@ pub const Builder = struct {
break :blk if (complex) try qt.toComplex(b.parser.comp) else qt;
},
.bf16 => .bf16,
.fp16 => .fp16,
.float16 => .float16,
.float => .float,
.double => .double,
.long_double => .long_double,
.float128 => .float128,
.float32 => .float32,
.float64 => .float64,
.float32x => .float32x,
.float64x => .float64x,
.float128x => .float128x,
.dfloat32 => .dfloat32,
.dfloat64 => .dfloat64,
.dfloat128 => .dfloat128,
.dfloat64x => .dfloat64x,
.complex_float16,
.complex_float,
.complex_double,
.complex_long_double,
.complex_float128,
.complex_float32,
.complex_float64,
.complex_float32x,
.complex_float64x,
.complex_float128x,
.complex,
=> blk: {
const base_qt: QualType = switch (b.type) {
@ -2601,6 +2728,11 @@ pub const Builder = struct {
.complex_double => .double,
.complex_long_double => .long_double,
.complex_float128 => .float128,
.complex_float32 => .float32,
.complex_float64 => .float64,
.complex_float32x => .float32x,
.complex_float64x => .float64x,
.complex_float128x => .float128x,
.complex => .double,
else => unreachable,
};
@ -2749,7 +2881,7 @@ pub const Builder = struct {
else => {},
}
if (new == .int128 and !target_util.hasInt128(b.parser.comp.target)) {
if (new == .int128 and !b.parser.comp.target.hasInt128()) {
try b.parser.err(source_tok, .type_not_supported_on_target, .{"__int128"});
}
@ -2996,13 +3128,59 @@ pub const Builder = struct {
.complex => .complex_float128,
else => return b.cannotCombine(source_tok),
},
.complex => switch (b.type) {
.float32 => switch (b.type) {
.none => .float32,
.complex => .complex_float32,
else => return b.cannotCombine(source_tok),
},
.float64 => switch (b.type) {
.none => .float64,
.complex => .complex_float64,
else => return b.cannotCombine(source_tok),
},
.float32x => switch (b.type) {
.none => .float32x,
.complex => .complex_float32x,
else => return b.cannotCombine(source_tok),
},
.float64x => switch (b.type) {
.none => .float64x,
.complex => .complex_float64x,
else => return b.cannotCombine(source_tok),
},
.float128x => switch (b.type) {
.none => .float128x,
.complex => .complex_float128x,
else => return b.cannotCombine(source_tok),
},
.dfloat32 => switch (b.type) {
.none => .dfloat32,
else => return b.cannotCombine(source_tok),
},
.dfloat64 => switch (b.type) {
.none => .dfloat64,
else => return b.cannotCombine(source_tok),
},
.dfloat128 => switch (b.type) {
.none => .dfloat128,
else => return b.cannotCombine(source_tok),
},
.dfloat64x => switch (b.type) {
.none => .dfloat64x,
else => return b.cannotCombine(source_tok),
},
.complex => switch (b.type) { //
.none => .complex,
.float16 => .complex_float16,
.float => .complex_float,
.double => .complex_double,
.long_double => .complex_long_double,
.float128 => .complex_float128,
.float32 => .complex_float32,
.float64 => .complex_float64,
.float32x => .complex_float32x,
.float64x => .complex_float64x,
.float128x => .complex_float128x,
.char => .complex_char,
.schar => .complex_schar,
.uchar => .complex_uchar,
@ -3072,6 +3250,11 @@ pub const Builder = struct {
.complex_bit_int,
.complex_sbit_int,
.complex_ubit_int,
.complex_float32,
.complex_float64,
.complex_float32x,
.complex_float64x,
.complex_float128x,
=> return b.duplicateSpec(source_tok, "_Complex"),
else => return b.cannotCombine(source_tok),
},
@ -3104,12 +3287,22 @@ pub const Builder = struct {
return .{ .bit_int = bit_int.bits };
},
.float => |float| switch (float) {
.bf16 => .bf16,
.fp16 => .fp16,
.float16 => .float16,
.float => .float,
.double => .double,
.long_double => .long_double,
.float128 => .float128,
.float32 => .float32,
.float64 => .float64,
.float32x => .float32x,
.float64x => .float64x,
.float128x => .float128x,
.dfloat32 => .dfloat32,
.dfloat64 => .dfloat64,
.dfloat128 => .dfloat128,
.dfloat64x => .dfloat64x,
},
.complex => |complex| switch (complex.base(comp).type) {
.int => |int| switch (int) {
@ -3134,11 +3327,21 @@ pub const Builder = struct {
},
.float => |float| switch (float) {
.fp16 => unreachable,
.bf16 => unreachable,
.float16 => .complex_float16,
.float => .complex_float,
.double => .complex_double,
.long_double => .complex_long_double,
.float128 => .complex_float128,
.float32 => .complex_float32,
.float64 => .complex_float64,
.float32x => .complex_float32x,
.float64x => .complex_float64x,
.float128x => .complex_float128x,
.dfloat32 => unreachable,
.dfloat64 => unreachable,
.dfloat128 => unreachable,
.dfloat64x => unreachable,
},
else => unreachable,
},

View File

@ -8,7 +8,7 @@ const BigIntSpace = Interner.Tag.Int.BigIntSpace;
const annex_g = @import("annex_g.zig");
const Compilation = @import("Compilation.zig");
const target_util = @import("target.zig");
const Target = @import("Target.zig");
const QualType = @import("TypeStore.zig").QualType;
const Value = @This();
@ -80,10 +80,10 @@ test "minUnsignedBits" {
defer arena_state.deinit();
const arena = arena_state.allocator();
var comp = Compilation.init(std.testing.allocator, arena, undefined, std.fs.cwd());
var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, std.fs.cwd());
defer comp.deinit();
const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" });
comp.target = try std.zig.system.resolveTargetQuery(target_query);
comp.target = .fromZigTarget(try std.zig.system.resolveTargetQuery(std.testing.io, target_query));
try Test.checkIntBits(&comp, 0, 0);
try Test.checkIntBits(&comp, 1, 1);
@ -119,10 +119,10 @@ test "minSignedBits" {
defer arena_state.deinit();
const arena = arena_state.allocator();
var comp = Compilation.init(std.testing.allocator, arena, undefined, std.fs.cwd());
var comp = Compilation.init(std.testing.allocator, arena, std.testing.io, undefined, std.fs.cwd());
defer comp.deinit();
const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" });
comp.target = try std.zig.system.resolveTargetQuery(target_query);
comp.target = .fromZigTarget(try std.zig.system.resolveTargetQuery(std.testing.io, target_query));
try Test.checkIntBits(&comp, -1, 1);
try Test.checkIntBits(&comp, -2, 2);
@ -401,7 +401,7 @@ pub fn isZero(v: Value, comp: *const Compilation) bool {
switch (v.ref()) {
.zero => return true,
.one => return false,
.null => return target_util.nullRepr(comp.target) == 0,
.null => return comp.target.nullRepr() == 0,
else => {},
}
const key = comp.interner.get(v.ref());

View File

@ -1,6 +1,6 @@
const std = @import("std");
const Compilation = @import("Compilation.zig");
const target_util = @import("target.zig");
const Target = @import("Target.zig");
/// Used to implement the __has_feature macro.
pub fn hasFeature(comp: *Compilation, ext: []const u8) bool {
@ -43,7 +43,7 @@ pub fn hasFeature(comp: *Compilation, ext: []const u8) bool {
.c_atomic = comp.langopts.standard.atLeast(.c11),
.c_generic_selections = comp.langopts.standard.atLeast(.c11),
.c_static_assert = comp.langopts.standard.atLeast(.c11),
.c_thread_local = comp.langopts.standard.atLeast(.c11) and target_util.isTlsSupported(comp.target),
.c_thread_local = comp.langopts.standard.atLeast(.c11) and comp.target.isTlsSupported(),
};
inline for (@typeInfo(@TypeOf(list)).@"struct".fields) |f| {
if (std.mem.eql(u8, f.name, ext)) return @field(list, f.name);
@ -60,7 +60,7 @@ pub fn hasExtension(comp: *Compilation, ext: []const u8) bool {
.c_atomic = true,
.c_generic_selections = true,
.c_static_assert = true,
.c_thread_local = target_util.isTlsSupported(comp.target),
.c_thread_local = comp.target.isTlsSupported(),
// misc
.overloadable_unmarked = false, // TODO
.statement_attributes_with_gnu_syntax = true,

View File

@ -18,6 +18,7 @@ pragma: Pragma = .{
.preprocessorHandler = preprocessorHandler,
.parserHandler = parserHandler,
.preserveTokens = preserveTokens,
.shouldExpandTokenAtIndexHandler = shouldExpandTokenAtIndex,
},
original_state: Diagnostics.State = .{},
state_stack: std.ArrayList(Diagnostics.State) = .empty,
@ -169,3 +170,7 @@ fn preserveTokens(_: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) bool {
}
return true;
}
fn shouldExpandTokenAtIndex(_: *const Pragma, _: TokenIndex) bool {
return false;
}

View File

@ -51,6 +51,15 @@ fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex
.location = name_tok.loc.expand(pp.comp),
}, pp.expansionSlice(start_idx + 1), true);
}
if (pp.include_depth == 0) {
const diagnostic: Preprocessor.Diagnostic = .pragma_once_in_main_file;
return pp.diagnostics.addWithLocation(pp.comp, .{
.text = diagnostic.fmt,
.kind = diagnostic.kind,
.opt = diagnostic.opt,
.location = name_tok.loc.expand(pp.comp),
}, pp.expansionSlice(start_idx + 1), true);
}
const seen = self.preprocess_count == pp.preprocess_count;
const prev = try self.pragma_once.fetchPut(pp.comp.gpa, name_tok.loc.id, {});
if (prev != null and !seen) {

View File

@ -84,10 +84,10 @@ fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation
if (action == .push) {
try pack.stack.append(p.comp.gpa, .{ .label = label orelse "", .val = p.pragma_pack orelse 8 });
} else {
pack.pop(p, label);
const pop_success = pack.pop(p, label);
if (new_val != null) {
try Pragma.err(p.pp, arg, .pragma_pack_undefined_pop, .{});
} else if (pack.stack.items.len == 0) {
} else if (!pop_success) {
try Pragma.err(p.pp, arg, .pragma_pack_empty_stack, .{});
}
}
@ -136,22 +136,25 @@ fn packInt(p: *Parser, tok_i: TokenIndex) Compilation.Error!?u8 {
}
}
fn pop(pack: *Pack, p: *Parser, maybe_label: ?[]const u8) void {
/// Returns true if an item was successfully popped.
fn pop(pack: *Pack, p: *Parser, maybe_label: ?[]const u8) bool {
if (maybe_label) |label| {
var i = pack.stack.items.len;
while (i > 0) {
i -= 1;
if (std.mem.eql(u8, pack.stack.items[i].label, label)) {
const prev = pack.stack.orderedRemove(i);
p.pragma_pack = prev.val;
return;
p.pragma_pack = pack.stack.items[i].val;
pack.stack.items.len = i;
return true;
}
}
return false;
} else {
const prev = pack.stack.pop() orelse {
p.pragma_pack = 2;
return;
return false;
};
p.pragma_pack = prev.val;
return true;
}
}

View File

@ -6,7 +6,7 @@ const std = @import("std");
const Attribute = @import("Attribute.zig");
const Compilation = @import("Compilation.zig");
const Parser = @import("Parser.zig");
const target_util = @import("target.zig");
const Target = @import("Target.zig");
const TypeStore = @import("TypeStore.zig");
const QualType = TypeStore.QualType;
const Type = TypeStore.Type;
@ -281,18 +281,18 @@ const SysVContext = struct {
// Some targets ignore the alignment of the underlying type when laying out
// non-zero-sized bit-fields. See test case 0072. On such targets, bit-fields never
// cross a storage boundary. See test case 0081.
if (target_util.ignoreNonZeroSizedBitfieldTypeAlignment(self.comp.target)) {
if (self.comp.target.ignoreNonZeroSizedBitfieldTypeAlignment()) {
ty_fld_algn_bits = 1;
}
} else {
// Some targets ignore the alignment of the underlying type when laying out
// zero-sized bit-fields. See test case 0073.
if (target_util.ignoreZeroSizedBitfieldTypeAlignment(self.comp.target)) {
if (self.comp.target.ignoreZeroSizedBitfieldTypeAlignment()) {
ty_fld_algn_bits = 1;
}
// Some targets have a minimum alignment of zero-sized bit-fields. See test case
// 0074.
if (target_util.minZeroWidthBitfieldAlignment(self.comp.target)) |target_align| {
if (self.comp.target.minZeroWidthBitfieldAlignment()) |target_align| {
ty_fld_algn_bits = @max(ty_fld_algn_bits, target_align);
}
}
@ -355,7 +355,7 @@ const SysVContext = struct {
// Unnamed fields do not contribute to the record alignment except on a few targets.
// See test case 0079.
if (is_named or target_util.unnamedFieldAffectsAlignment(self.comp.target)) {
if (is_named or self.comp.target.unnamedFieldAffectsAlignment()) {
var inherited_align_bits: u32 = undefined;
if (bit_width == 0) {

View File

@ -1,998 +0,0 @@
const std = @import("std");
const backend = @import("../backend.zig");
const LangOpts = @import("LangOpts.zig");
const TargetSet = @import("Builtins/Properties.zig").TargetSet;
const QualType = @import("TypeStore.zig").QualType;
/// intmax_t for this target
pub fn intMaxType(target: std.Target) QualType {
switch (target.cpu.arch) {
.aarch64,
.aarch64_be,
.sparc64,
=> if (target.os.tag != .openbsd) return .long,
.bpfel,
.bpfeb,
.loongarch64,
.riscv64,
.riscv64be,
.powerpc64,
.powerpc64le,
.ve,
=> return .long,
.x86_64 => switch (target.os.tag) {
.windows, .openbsd => {},
else => switch (target.abi) {
.gnux32, .muslx32 => {},
else => return .long,
},
},
else => {},
}
return .long_long;
}
/// intptr_t for this target
pub fn intPtrType(target: std.Target) QualType {
if (target.os.tag == .haiku) return .long;
switch (target.cpu.arch) {
.aarch64, .aarch64_be => switch (target.os.tag) {
.windows => return .long_long,
else => {},
},
.msp430,
.csky,
.loongarch32,
.riscv32,
.riscv32be,
.xcore,
.hexagon,
.m68k,
.spirv32,
.arc,
.avr,
=> return .int,
.sparc => switch (target.os.tag) {
.netbsd, .openbsd => {},
else => return .int,
},
.powerpc, .powerpcle => switch (target.os.tag) {
.linux, .freebsd, .netbsd => return .int,
else => {},
},
// 32-bit x86 Darwin, OpenBSD, and RTEMS use long (the default); others use int
.x86 => switch (target.os.tag) {
.openbsd, .rtems => {},
else => if (!target.os.tag.isDarwin()) return .int,
},
.x86_64 => switch (target.os.tag) {
.windows => return .long_long,
else => switch (target.abi) {
.gnux32, .muslx32 => return .int,
else => {},
},
},
else => {},
}
return .long;
}
/// int16_t for this target
pub fn int16Type(target: std.Target) QualType {
return switch (target.cpu.arch) {
.avr => .int,
else => .short,
};
}
/// sig_atomic_t for this target
pub fn sigAtomicType(target: std.Target) QualType {
if (target.cpu.arch.isWasm()) return .long;
return switch (target.cpu.arch) {
.avr => .schar,
.msp430 => .long,
else => .int,
};
}
/// int64_t for this target
pub fn int64Type(target: std.Target) QualType {
switch (target.cpu.arch) {
.loongarch64,
.ve,
.riscv64,
.riscv64be,
.powerpc64,
.powerpc64le,
.bpfel,
.bpfeb,
=> return .long,
.sparc64 => return intMaxType(target),
.x86, .x86_64 => if (!target.os.tag.isDarwin()) return intMaxType(target),
.aarch64, .aarch64_be => if (!target.os.tag.isDarwin() and target.os.tag != .openbsd and target.os.tag != .windows) return .long,
else => {},
}
return .long_long;
}
pub fn float80Type(target: std.Target) ?QualType {
switch (target.cpu.arch) {
.x86, .x86_64 => return .long_double,
else => {},
}
return null;
}
/// This function returns 1 if function alignment is not observable or settable.
pub fn defaultFunctionAlignment(target: std.Target) u8 {
return switch (target.cpu.arch) {
.arm, .armeb => 4,
.aarch64, .aarch64_be => 4,
.sparc, .sparc64 => 4,
.riscv64, .riscv64be => 2,
else => 1,
};
}
pub fn isTlsSupported(target: std.Target) bool {
if (target.os.tag.isDarwin()) {
var supported = false;
switch (target.os.tag) {
.macos => supported = !(target.os.isAtLeast(.macos, .{ .major = 10, .minor = 7, .patch = 0 }) orelse false),
else => {},
}
return supported;
}
return switch (target.cpu.arch) {
.bpfel, .bpfeb, .msp430, .nvptx, .nvptx64, .x86, .arm, .armeb, .thumb, .thumbeb => false,
else => true,
};
}
pub fn ignoreNonZeroSizedBitfieldTypeAlignment(target: std.Target) bool {
switch (target.cpu.arch) {
.avr => return true,
.arm => {
if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
switch (target.os.tag) {
.ios => return true,
else => return false,
}
}
},
else => return false,
}
return false;
}
pub fn ignoreZeroSizedBitfieldTypeAlignment(target: std.Target) bool {
switch (target.cpu.arch) {
.avr => return true,
else => return false,
}
}
pub fn minZeroWidthBitfieldAlignment(target: std.Target) ?u29 {
switch (target.cpu.arch) {
.avr => return 8,
.arm => {
if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
switch (target.os.tag) {
.ios => return 32,
else => return null,
}
} else return null;
},
else => return null,
}
}
pub fn unnamedFieldAffectsAlignment(target: std.Target) bool {
switch (target.cpu.arch) {
.aarch64 => {
if (target.os.tag.isDarwin() or target.os.tag == .windows) return false;
return true;
},
.armeb => {
if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
if (std.Target.Abi.default(target.cpu.arch, target.os.tag) == .eabi) return true;
}
},
.arm => return true,
.avr => return true,
.thumb => {
if (target.os.tag == .windows) return false;
return true;
},
else => return false,
}
return false;
}
pub fn packAllEnums(target: std.Target) bool {
return switch (target.cpu.arch) {
.hexagon => true,
else => false,
};
}
/// Default alignment (in bytes) for __attribute__((aligned)) when no alignment is specified
pub fn defaultAlignment(target: std.Target) u29 {
switch (target.cpu.arch) {
.avr => return 1,
.arm => if (target.abi.isAndroid() or target.os.tag == .ios) return 16 else return 8,
.sparc => if (std.Target.sparc.featureSetHas(target.cpu.features, .v9)) return 16 else return 8,
.mips, .mipsel => switch (target.abi) {
.none, .gnuabi64 => return 16,
else => return 8,
},
.s390x, .armeb, .thumbeb, .thumb => return 8,
else => return 16,
}
}
pub fn systemCompiler(target: std.Target) LangOpts.Compiler {
// Android is linux but not gcc, so these checks go first
// the rest for documentation as fn returns .clang
if (target.os.tag.isDarwin() or
target.abi.isAndroid() or
target.os.tag.isBSD() or
target.os.tag == .fuchsia or
target.os.tag == .illumos or
target.os.tag == .haiku or
target.cpu.arch == .hexagon)
{
return .clang;
}
if (target.os.tag == .uefi) return .msvc;
// this is before windows to grab WindowsGnu
if (target.abi.isGnu() or
target.os.tag == .linux)
{
return .gcc;
}
if (target.os.tag == .windows) {
return .msvc;
}
if (target.cpu.arch == .avr) return .gcc;
return .clang;
}
pub fn hasFloat128(target: std.Target) bool {
if (target.cpu.arch.isWasm()) return true;
if (target.os.tag.isDarwin()) return false;
if (target.cpu.arch.isPowerPC()) return std.Target.powerpc.featureSetHas(target.cpu.features, .float128);
return switch (target.os.tag) {
.dragonfly,
.haiku,
.linux,
.openbsd,
.illumos,
=> target.cpu.arch.isX86(),
else => false,
};
}
pub fn hasInt128(target: std.Target) bool {
if (target.cpu.arch == .wasm32) return true;
if (target.cpu.arch == .x86_64) return true;
return target.ptrBitWidth() >= 64;
}
pub fn hasHalfPrecisionFloatABI(target: std.Target) bool {
return switch (target.cpu.arch) {
.thumb, .thumbeb, .arm, .aarch64 => true,
else => false,
};
}
pub const FPSemantics = enum {
None,
IEEEHalf,
BFloat,
IEEESingle,
IEEEDouble,
IEEEQuad,
/// Minifloat 5-bit exponent 2-bit mantissa
E5M2,
/// Minifloat 4-bit exponent 3-bit mantissa
E4M3,
x87ExtendedDouble,
IBMExtendedDouble,
/// Only intended for generating float.h macros for the preprocessor
pub fn forType(ty: std.Target.CType, target: std.Target) FPSemantics {
std.debug.assert(ty == .float or ty == .double or ty == .longdouble);
return switch (target.cTypeBitSize(ty)) {
32 => .IEEESingle,
64 => .IEEEDouble,
80 => .x87ExtendedDouble,
128 => switch (target.cpu.arch) {
.powerpc, .powerpcle, .powerpc64, .powerpc64le => .IBMExtendedDouble,
else => .IEEEQuad,
},
else => unreachable,
};
}
pub fn halfPrecisionType(target: std.Target) ?FPSemantics {
switch (target.cpu.arch) {
.aarch64,
.aarch64_be,
.arm,
.armeb,
.hexagon,
.riscv32,
.riscv32be,
.riscv64,
.riscv64be,
.spirv32,
.spirv64,
=> return .IEEEHalf,
.x86, .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .sse2)) return .IEEEHalf,
else => {},
}
return null;
}
pub fn chooseValue(self: FPSemantics, comptime T: type, values: [6]T) T {
return switch (self) {
.IEEEHalf => values[0],
.IEEESingle => values[1],
.IEEEDouble => values[2],
.x87ExtendedDouble => values[3],
.IBMExtendedDouble => values[4],
.IEEEQuad => values[5],
else => unreachable,
};
}
};
pub fn isLP64(target: std.Target) bool {
return target.cTypeBitSize(.int) == 32 and target.ptrBitWidth() == 64;
}
pub fn isKnownWindowsMSVCEnvironment(target: std.Target) bool {
return target.os.tag == .windows and target.abi == .msvc;
}
pub fn isWindowsMSVCEnvironment(target: std.Target) bool {
return target.os.tag == .windows and (target.abi == .msvc or target.abi == .none);
}
pub fn isCygwinMinGW(target: std.Target) bool {
return target.os.tag == .windows and (target.abi == .gnu);
}
pub fn isPS(target: std.Target) bool {
return (target.os.tag == .ps4 or target.os.tag == .ps5) and target.cpu.arch == .x86_64;
}
pub fn builtinEnabled(target: std.Target, enabled_for: TargetSet) bool {
var it = enabled_for.iterator();
while (it.next()) |val| {
switch (val) {
.basic => return true,
.x86_64 => if (target.cpu.arch == .x86_64) return true,
.aarch64 => if (target.cpu.arch == .aarch64) return true,
.arm => if (target.cpu.arch == .arm) return true,
.ppc => switch (target.cpu.arch) {
.powerpc, .powerpc64, .powerpc64le => return true,
else => {},
},
else => {
// Todo: handle other target predicates
},
}
}
return false;
}
pub fn defaultFpEvalMethod(target: std.Target) LangOpts.FPEvalMethod {
switch (target.cpu.arch) {
.x86, .x86_64 => {
if (target.ptrBitWidth() == 32 and target.os.tag == .netbsd) {
if (target.os.version_range.semver.min.order(.{ .major = 6, .minor = 99, .patch = 26 }) != .gt) {
// NETBSD <= 6.99.26 on 32-bit x86 defaults to double
return .double;
}
}
if (std.Target.x86.featureSetHas(target.cpu.features, .sse)) {
return .source;
}
return .extended;
},
else => {},
}
return .source;
}
/// Value of the `-m` flag for `ld` for this target
pub fn ldEmulationOption(target: std.Target, arm_endianness: ?std.builtin.Endian) ?[]const u8 {
return switch (target.cpu.arch) {
.x86 => "elf_i386",
.arm,
.armeb,
.thumb,
.thumbeb,
=> switch (arm_endianness orelse target.cpu.arch.endian()) {
.little => "armelf_linux_eabi",
.big => "armelfb_linux_eabi",
},
.aarch64 => "aarch64linux",
.aarch64_be => "aarch64linuxb",
.m68k => "m68kelf",
.powerpc => if (target.os.tag == .linux) "elf32ppclinux" else "elf32ppc",
.powerpcle => if (target.os.tag == .linux) "elf32lppclinux" else "elf32lppc",
.powerpc64 => "elf64ppc",
.powerpc64le => "elf64lppc",
.riscv32 => "elf32lriscv",
.riscv32be => "elf32briscv",
.riscv64 => "elf64lriscv",
.riscv64be => "elf64briscv",
.sparc => "elf32_sparc",
.sparc64 => "elf64_sparc",
.loongarch32 => "elf32loongarch",
.loongarch64 => "elf64loongarch",
.mips => "elf32btsmip",
.mipsel => "elf32ltsmip",
.mips64 => switch (target.abi) {
.gnuabin32, .muslabin32 => "elf32btsmipn32",
else => "elf64btsmip",
},
.mips64el => switch (target.abi) {
.gnuabin32, .muslabin32 => "elf32ltsmipn32",
else => "elf64ltsmip",
},
.x86_64 => switch (target.abi) {
.gnux32, .muslx32 => "elf32_x86_64",
else => "elf_x86_64",
},
.ve => "elf64ve",
.csky => "cskyelf_linux",
else => null,
};
}
pub fn get32BitArchVariant(target: std.Target) ?std.Target {
var copy = target;
switch (target.cpu.arch) {
.amdgcn,
.avr,
.msp430,
.ve,
.bpfel,
.bpfeb,
.kvx,
.s390x,
=> return null,
.arc,
.arm,
.armeb,
.csky,
.hexagon,
.m68k,
.mips,
.mipsel,
.powerpc,
.powerpcle,
.riscv32,
.riscv32be,
.sparc,
.thumb,
.thumbeb,
.x86,
.xcore,
.nvptx,
.kalimba,
.lanai,
.wasm32,
.spirv32,
.loongarch32,
.xtensa,
.propeller,
.or1k,
=> {}, // Already 32 bit
.aarch64 => copy.cpu.arch = .arm,
.aarch64_be => copy.cpu.arch = .armeb,
.nvptx64 => copy.cpu.arch = .nvptx,
.wasm64 => copy.cpu.arch = .wasm32,
.spirv64 => copy.cpu.arch = .spirv32,
.loongarch64 => copy.cpu.arch = .loongarch32,
.mips64 => copy.cpu.arch = .mips,
.mips64el => copy.cpu.arch = .mipsel,
.powerpc64 => copy.cpu.arch = .powerpc,
.powerpc64le => copy.cpu.arch = .powerpcle,
.riscv64 => copy.cpu.arch = .riscv32,
.riscv64be => copy.cpu.arch = .riscv32be,
.sparc64 => copy.cpu.arch = .sparc,
.x86_64 => copy.cpu.arch = .x86,
}
return copy;
}
pub fn get64BitArchVariant(target: std.Target) ?std.Target {
var copy = target;
switch (target.cpu.arch) {
.arc,
.avr,
.csky,
.hexagon,
.kalimba,
.lanai,
.m68k,
.msp430,
.xcore,
.xtensa,
.propeller,
.or1k,
=> return null,
.aarch64,
.aarch64_be,
.amdgcn,
.bpfeb,
.bpfel,
.nvptx64,
.wasm64,
.spirv64,
.kvx,
.loongarch64,
.mips64,
.mips64el,
.powerpc64,
.powerpc64le,
.riscv64,
.riscv64be,
.s390x,
.sparc64,
.ve,
.x86_64,
=> {}, // Already 64 bit
.arm => copy.cpu.arch = .aarch64,
.armeb => copy.cpu.arch = .aarch64_be,
.loongarch32 => copy.cpu.arch = .loongarch64,
.mips => copy.cpu.arch = .mips64,
.mipsel => copy.cpu.arch = .mips64el,
.nvptx => copy.cpu.arch = .nvptx64,
.powerpc => copy.cpu.arch = .powerpc64,
.powerpcle => copy.cpu.arch = .powerpc64le,
.riscv32 => copy.cpu.arch = .riscv64,
.riscv32be => copy.cpu.arch = .riscv64be,
.sparc => copy.cpu.arch = .sparc64,
.spirv32 => copy.cpu.arch = .spirv64,
.thumb => copy.cpu.arch = .aarch64,
.thumbeb => copy.cpu.arch = .aarch64_be,
.wasm32 => copy.cpu.arch = .wasm64,
.x86 => copy.cpu.arch = .x86_64,
}
return copy;
}
/// Adapted from Zig's src/codegen/llvm.zig
pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
// 64 bytes is assumed to be large enough to hold any target triple; increase if necessary
std.debug.assert(buf.len >= 64);
var writer: std.Io.Writer = .fixed(buf);
const llvm_arch = switch (target.cpu.arch) {
.arm => "arm",
.armeb => "armeb",
.aarch64 => if (target.abi == .ilp32) "aarch64_32" else "aarch64",
.aarch64_be => "aarch64_be",
.arc => "arc",
.avr => "avr",
.bpfel => "bpfel",
.bpfeb => "bpfeb",
.csky => "csky",
.hexagon => "hexagon",
.loongarch32 => "loongarch32",
.loongarch64 => "loongarch64",
.m68k => "m68k",
.mips => "mips",
.mipsel => "mipsel",
.mips64 => "mips64",
.mips64el => "mips64el",
.msp430 => "msp430",
.powerpc => "powerpc",
.powerpcle => "powerpcle",
.powerpc64 => "powerpc64",
.powerpc64le => "powerpc64le",
.amdgcn => "amdgcn",
.riscv32 => "riscv32",
.riscv32be => "riscv32be",
.riscv64 => "riscv64",
.riscv64be => "riscv64be",
.sparc => "sparc",
.sparc64 => "sparc64",
.s390x => "s390x",
.thumb => "thumb",
.thumbeb => "thumbeb",
.x86 => "i386",
.x86_64 => "x86_64",
.xcore => "xcore",
.xtensa => "xtensa",
.nvptx => "nvptx",
.nvptx64 => "nvptx64",
.spirv32 => "spirv32",
.spirv64 => "spirv64",
.lanai => "lanai",
.wasm32 => "wasm32",
.wasm64 => "wasm64",
.ve => "ve",
// Note: propeller1, kalimba, kvx, and or1k are not supported in LLVM; this is the Zig arch name
.kalimba => "kalimba",
.kvx => "kvx",
.propeller => "propeller",
.or1k => "or1k",
};
writer.writeAll(llvm_arch) catch unreachable;
writer.writeByte('-') catch unreachable;
const llvm_os = switch (target.os.tag) {
.freestanding => "unknown",
.dragonfly => "dragonfly",
.freebsd => "freebsd",
.fuchsia => "fuchsia",
.linux => "linux",
.ps3 => "lv2",
.netbsd => "netbsd",
.openbsd => "openbsd",
.illumos => "solaris",
.windows => "windows",
.haiku => "haiku",
.rtems => "rtems",
.cuda => "cuda",
.nvcl => "nvcl",
.amdhsa => "amdhsa",
.ps4 => "ps4",
.ps5 => "ps5",
.mesa3d => "mesa3d",
.contiki => "contiki",
.amdpal => "amdpal",
.hermit => "hermit",
.hurd => "hurd",
.wasi => "wasi",
.emscripten => "emscripten",
.uefi => "windows",
.macos => "macosx",
.ios, .maccatalyst => "ios",
.tvos => "tvos",
.watchos => "watchos",
.driverkit => "driverkit",
.visionos => "xros",
.serenity => "serenity",
.vulkan => "vulkan",
.managarm => "managarm",
.@"3ds",
.vita,
.opencl,
.opengl,
.plan9,
.other,
=> "unknown",
};
writer.writeAll(llvm_os) catch unreachable;
if (target.os.tag.isDarwin()) {
const min_version = target.os.version_range.semver.min;
writer.print("{d}.{d}.{d}", .{
min_version.major,
min_version.minor,
min_version.patch,
}) catch unreachable;
}
writer.writeByte('-') catch unreachable;
const llvm_abi = switch (target.abi) {
.none => if (target.os.tag == .maccatalyst) "macabi" else "unknown",
.ilp32 => "unknown",
.gnu => "gnu",
.gnuabin32 => "gnuabin32",
.gnuabi64 => "gnuabi64",
.gnueabi => "gnueabi",
.gnueabihf => "gnueabihf",
.gnuf32 => "gnuf32",
.gnusf => "gnusf",
.gnux32 => "gnux32",
.code16 => "code16",
.eabi => "eabi",
.eabihf => "eabihf",
.android => "android",
.androideabi => "androideabi",
.musl => "musl",
.muslabin32 => "muslabin32",
.muslabi64 => "muslabi64",
.musleabi => "musleabi",
.musleabihf => "musleabihf",
.muslf32 => "muslf32",
.muslsf => "muslsf",
.muslx32 => "muslx32",
.msvc => "msvc",
.itanium => "itanium",
.simulator => "simulator",
.ohos => "ohos",
.ohoseabi => "ohoseabi",
};
writer.writeAll(llvm_abi) catch unreachable;
return writer.buffered();
}
pub const DefaultPIStatus = enum { yes, no, depends_on_linker };
pub fn isPIEDefault(target: std.Target) DefaultPIStatus {
return switch (target.os.tag) {
.haiku,
.maccatalyst,
.macos,
.ios,
.tvos,
.watchos,
.visionos,
.driverkit,
.dragonfly,
.netbsd,
.freebsd,
.illumos,
.cuda,
.amdhsa,
.amdpal,
.mesa3d,
.ps4,
.ps5,
.hurd,
=> .no,
.openbsd,
.fuchsia,
=> .yes,
.linux => {
if (target.abi == .ohos)
return .yes;
switch (target.cpu.arch) {
.ve => return .no,
else => return if (target.os.tag == .linux or target.abi.isAndroid() or target.abi.isMusl()) .yes else .no,
}
},
.windows => {
if (target.isMinGW())
return .no;
if (target.abi == .itanium)
return if (target.cpu.arch == .x86_64) .yes else .no;
if (target.abi == .msvc or target.abi == .none)
return .depends_on_linker;
return .no;
},
else => {
switch (target.cpu.arch) {
.hexagon => {
// CLANG_DEFAULT_PIE_ON_LINUX
return if (target.os.tag == .linux or target.abi.isAndroid() or target.abi.isMusl()) .yes else .no;
},
else => return .no,
}
},
};
}
pub fn isPICdefault(target: std.Target) DefaultPIStatus {
return switch (target.os.tag) {
.haiku,
.maccatalyst,
.macos,
.ios,
.tvos,
.watchos,
.visionos,
.driverkit,
.amdhsa,
.amdpal,
.mesa3d,
.ps4,
.ps5,
=> .yes,
.fuchsia,
.cuda,
=> .no,
.dragonfly,
.openbsd,
.netbsd,
.freebsd,
.illumos,
.hurd,
=> {
return switch (target.cpu.arch) {
.mips64, .mips64el => .yes,
else => .no,
};
},
.linux => {
if (target.abi == .ohos)
return .no;
return switch (target.cpu.arch) {
.mips64, .mips64el => .yes,
else => .no,
};
},
.windows => {
if (target.isMinGW())
return if (target.cpu.arch == .x86_64 or target.cpu.arch == .aarch64) .yes else .no;
if (target.abi == .itanium)
return if (target.cpu.arch == .x86_64) .yes else .no;
if (target.abi == .msvc or target.abi == .none)
return .depends_on_linker;
if (target.ofmt == .macho)
return .yes;
return switch (target.cpu.arch) {
.x86_64, .mips64, .mips64el => .yes,
else => .no,
};
},
else => {
if (target.ofmt == .macho)
return .yes;
return switch (target.cpu.arch) {
.mips64, .mips64el => .yes,
else => .no,
};
},
};
}
pub fn isPICDefaultForced(target: std.Target) DefaultPIStatus {
return switch (target.os.tag) {
.amdhsa, .amdpal, .mesa3d => .yes,
.haiku,
.dragonfly,
.openbsd,
.netbsd,
.freebsd,
.illumos,
.cuda,
.ps4,
.ps5,
.hurd,
.linux,
.fuchsia,
=> .no,
.windows => {
if (target.isMinGW())
return .yes;
if (target.abi == .itanium)
return if (target.cpu.arch == .x86_64) .yes else .no;
// if (bfd) return target.cpu.arch == .x86_64 else target.cpu.arch == .x86_64 or target.cpu.arch == .aarch64;
if (target.abi == .msvc or target.abi == .none)
return .depends_on_linker;
if (target.ofmt == .macho)
return if (target.cpu.arch == .aarch64 or target.cpu.arch == .x86_64) .yes else .no;
return if (target.cpu.arch == .x86_64) .yes else .no;
},
.maccatalyst,
.macos,
.ios,
.tvos,
.watchos,
.visionos,
.driverkit,
=> if (target.cpu.arch == .x86_64 or target.cpu.arch == .aarch64) .yes else .no,
else => {
return switch (target.cpu.arch) {
.hexagon,
.lanai,
.avr,
.riscv32,
.riscv64,
.csky,
.xcore,
.wasm32,
.wasm64,
.ve,
.spirv32,
.spirv64,
=> .no,
.msp430 => .yes,
else => {
if (target.ofmt == .macho)
return if (target.cpu.arch == .aarch64 or target.cpu.arch == .x86_64) .yes else .no;
return .no;
},
};
},
};
}
test "alignment functions - smoke test" {
const linux: std.Target.Os = .{ .tag = .linux, .version_range = .{ .none = {} } };
const x86_64_target: std.Target = .{
.abi = std.Target.Abi.default(.x86_64, linux.tag),
.cpu = std.Target.Cpu.Model.generic(.x86_64).toCpu(.x86_64),
.os = linux,
.ofmt = .elf,
};
try std.testing.expect(isTlsSupported(x86_64_target));
try std.testing.expect(!ignoreNonZeroSizedBitfieldTypeAlignment(x86_64_target));
try std.testing.expect(minZeroWidthBitfieldAlignment(x86_64_target) == null);
try std.testing.expect(!unnamedFieldAffectsAlignment(x86_64_target));
try std.testing.expect(defaultAlignment(x86_64_target) == 16);
try std.testing.expect(!packAllEnums(x86_64_target));
try std.testing.expect(systemCompiler(x86_64_target) == .gcc);
}
test "target size/align tests" {
var comp: @import("Compilation.zig") = undefined;
const linux: std.Target.Os = .{ .tag = .linux, .version_range = .{ .none = {} } };
const x86_target: std.Target = .{
.abi = std.Target.Abi.default(.x86, linux.tag),
.cpu = std.Target.Cpu.Model.generic(.x86).toCpu(.x86),
.os = linux,
.ofmt = .elf,
};
comp.target = x86_target;
const tt: QualType = .long_long;
try std.testing.expectEqual(@as(u64, 8), tt.sizeof(&comp));
try std.testing.expectEqual(@as(u64, 4), tt.alignof(&comp));
}
/// The canonical integer representation of nullptr_t.
pub fn nullRepr(_: std.Target) u64 {
return 0;
}

View File

@ -167,7 +167,7 @@ fn genDecls(c: *AsmCodeGen) !void {
if (c.tree.comp.code_gen_options.debug != .strip) {
const sources = c.tree.comp.sources.values();
for (sources) |source| {
try c.data.print(" .file {d} \"{s}\"\n", .{ @intFromEnum(source.id) - 1, source.path });
try c.data.print(" .file {d} \"{s}\"\n", .{ @intFromEnum(source.id.index) + 1, source.path });
}
}

View File

@ -12,9 +12,8 @@ pub fn deinit(self: *const Assembly, gpa: Allocator) void {
}
pub fn writeToFile(self: Assembly, file: std.fs.File) !void {
var vec: [2]std.posix.iovec_const = .{
.{ .base = self.data.ptr, .len = self.data.len },
.{ .base = self.text.ptr, .len = self.text.len },
};
return file.writevAll(&vec);
var file_writer = file.writer(&.{});
var buffers = [_][]const u8{ self.data, self.text };
try file_writer.interface.writeSplatAll(&buffers, 1);
}

View File

@ -9,21 +9,31 @@ const Driver = aro.Driver;
const Toolchain = aro.Toolchain;
const assembly_backend = @import("assembly_backend");
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
var debug_allocator: std.heap.DebugAllocator(.{
.stack_trace_frames = 0,
// A unique value so that when a default-constructed
// GeneralPurposeAllocator is incorrectly passed to testing allocator, or
// vice versa, panic occurs.
.canary = @truncate(0xc647026dc6875134),
}) = .{};
pub fn main() u8 {
const gpa = if (@import("builtin").link_libc)
std.heap.raw_c_allocator
else
general_purpose_allocator.allocator();
debug_allocator.allocator();
defer if (!@import("builtin").link_libc) {
_ = general_purpose_allocator.deinit();
_ = debug_allocator.deinit();
};
var arena_instance = std.heap.ArenaAllocator.init(gpa);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
var threaded: std.Io.Threaded = .init(gpa);
defer threaded.deinit();
const io = threaded.io();
const fast_exit = @import("builtin").mode != .Debug;
const args = process.argsAlloc(arena) catch {
@ -48,7 +58,7 @@ pub fn main() u8 {
} },
};
var comp = Compilation.initDefault(gpa, arena, &diagnostics, std.fs.cwd()) catch |er| switch (er) {
var comp = Compilation.initDefault(gpa, arena, io, &diagnostics, std.fs.cwd()) catch |er| switch (er) {
error.OutOfMemory => {
std.debug.print("out of memory\n", .{});
if (fast_exit) process.exit(1);
@ -60,7 +70,7 @@ pub fn main() u8 {
var driver: Driver = .{ .comp = &comp, .aro_name = aro_name, .diagnostics = &diagnostics };
defer driver.deinit();
var toolchain: Toolchain = .{ .driver = &driver, .filesystem = .{ .real = comp.cwd } };
var toolchain: Toolchain = .{ .driver = &driver };
defer toolchain.deinit();
driver.main(&toolchain, args, fast_exit, assembly_backend.genAsm) catch |er| switch (er) {

View File

@ -459,7 +459,7 @@ pub fn main() !void {
}
if (graph.needed_lazy_dependencies.entries.len != 0) {
var buffer: std.ArrayListUnmanaged(u8) = .empty;
var buffer: std.ArrayList(u8) = .empty;
for (graph.needed_lazy_dependencies.keys()) |k| {
try buffer.appendSlice(arena, k);
try buffer.append(arena, '\n');
@ -672,7 +672,7 @@ const Run = struct {
watch: bool,
web_server: if (!builtin.single_threaded) ?WebServer else ?noreturn,
/// Allocated into `gpa`.
memory_blocked_steps: std.ArrayListUnmanaged(*Step),
memory_blocked_steps: std.ArrayList(*Step),
/// Allocated into `gpa`.
step_stack: std.AutoArrayHashMapUnmanaged(*Step, void),
thread_pool: std.Thread.Pool,
@ -1468,7 +1468,7 @@ pub fn printErrorMessages(
if (error_style.verboseContext()) {
// Provide context for where these error messages are coming from by
// printing the corresponding Step subtree.
var step_stack: std.ArrayListUnmanaged(*Step) = .empty;
var step_stack: std.ArrayList(*Step) = .empty;
defer step_stack.deinit(gpa);
try step_stack.append(gpa, failing_step);
while (step_stack.items[step_stack.items.len - 1].dependants.items.len != 0) {

View File

@ -381,8 +381,8 @@ const BinaryElfSegment = struct {
};
const BinaryElfOutput = struct {
segments: std.ArrayListUnmanaged(*BinaryElfSegment),
sections: std.ArrayListUnmanaged(*BinaryElfSection),
segments: std.ArrayList(*BinaryElfSegment),
sections: std.ArrayList(*BinaryElfSection),
allocator: Allocator,
shstrtab: ?[]const u8,

View File

@ -109,7 +109,7 @@ pub fn main() !void {
const root_source_file_path = opt_root_source_file_path orelse
fatal("missing root source file path argument; see -h for usage", .{});
var interestingness_argv: std.ArrayListUnmanaged([]const u8) = .empty;
var interestingness_argv: std.ArrayList([]const u8) = .empty;
try interestingness_argv.ensureUnusedCapacity(arena, argv.len + 1);
interestingness_argv.appendAssumeCapacity(checker_path);
interestingness_argv.appendSliceAssumeCapacity(argv);

View File

@ -23,7 +23,7 @@ pub const Transformation = union(enum) {
delete_var_decl: struct {
var_decl_node: Ast.Node.Index,
/// Identifier nodes that reference the variable.
references: std.ArrayListUnmanaged(Ast.Node.Index),
references: std.ArrayList(Ast.Node.Index),
},
/// Replace an expression with `undefined`.
replace_with_undef: Ast.Node.Index,

View File

@ -29,6 +29,7 @@ pub fn preprocess(
error.OutOfMemory => |e| return e,
error.WriteFailed => unreachable,
};
try comp.initSearchPath(driver.includes.items, false);
if (hasAnyErrors(comp)) return error.ArgError;
@ -59,7 +60,7 @@ pub fn preprocess(
pp.preserve_whitespace = true;
pp.linemarkers = .line_directives;
pp.preprocessSources(&.{ source, builtin_macros, user_macros }) catch |err| switch (err) {
pp.preprocessSources(.{ .main = source, .builtin = builtin_macros, .command_line = user_macros }) catch |err| switch (err) {
error.FatalError => return error.PreprocessError,
else => |e| return e,
};
@ -71,7 +72,7 @@ pub fn preprocess(
if (maybe_dependencies) |dependencies| {
for (comp.sources.values()) |comp_source| {
if (comp_source.id == builtin_macros.id or comp_source.id == user_macros.id) continue;
if (comp_source.id == .unused or comp_source.id == .generated) continue;
if (comp_source.id.index == .unused or comp_source.id.index == .generated) continue;
const duped_path = try dependencies.allocator.dupe(u8, comp_source.path);
errdefer dependencies.allocator.free(duped_path);
try dependencies.list.append(dependencies.allocator, duped_path);

View File

@ -284,7 +284,7 @@ fn buildWasmBinary(
) !Cache.Path {
const gpa = context.gpa;
var argv: std.ArrayListUnmanaged([]const u8) = .empty;
var argv: std.ArrayList([]const u8) = .empty;
try argv.appendSlice(arena, &.{
context.zig_exe_path, //

View File

@ -104,7 +104,7 @@ fn mainServer() !void {
@panic("internal test runner memory leak");
};
var string_bytes: std.ArrayListUnmanaged(u8) = .empty;
var string_bytes: std.ArrayList(u8) = .empty;
defer string_bytes.deinit(testing.allocator);
try string_bytes.append(testing.allocator, 0); // Reserve 0 for null.
@ -184,7 +184,7 @@ fn mainServer() !void {
const test_fn = builtin.test_functions[index];
const entry_addr = @intFromPtr(test_fn.func);
try server.serveU64Message(.fuzz_start_addr, entry_addr);
try server.serveU64Message(.fuzz_start_addr, fuzz_abi.fuzzer_unslide_address(entry_addr));
defer if (testing.allocator_instance.deinit() == .leak) std.process.exit(1);
is_fuzz_test = false;
fuzz_test_index = index;

View File

@ -79,6 +79,20 @@ pub fn transFnMacro(mt: *MacroTranslator) ParseError!void {
try block_scope.discardVariable(mangled_name);
}
// #define FOO(x)
if (mt.peek() == .eof) {
try block_scope.statements.append(mt.t.gpa, ZigTag.return_void.init());
const fn_decl = try ZigTag.pub_inline_fn.create(mt.t.arena, .{
.name = mt.name,
.params = fn_params,
.return_type = ZigTag.void_type.init(),
.body = try block_scope.complete(),
});
try mt.t.addTopLevelDecl(mt.name, fn_decl);
return;
}
const expr = try mt.parseCExpr(scope);
const last = mt.peek();
if (last != .eof)
@ -252,7 +266,7 @@ fn parseCNumLit(mt: *MacroTranslator) ParseError!ZigNode {
const lit_bytes = mt.tokSlice();
mt.i += 1;
var bytes = try std.ArrayListUnmanaged(u8).initCapacity(arena, lit_bytes.len + 3);
var bytes = try std.ArrayList(u8).initCapacity(arena, lit_bytes.len + 3);
const prefix = aro.Tree.Token.NumberPrefix.fromString(lit_bytes);
switch (prefix) {
@ -637,7 +651,7 @@ fn parseCPrimaryExpr(mt: *MacroTranslator, scope: *Scope) ParseError!ZigNode {
// for handling type macros (EVIL)
// TODO maybe detect and treat type macros as typedefs in parseCSpecifierQualifierList?
if (try mt.parseCTypeName(scope, true)) |type_name| {
if (try mt.parseCTypeName(scope)) |type_name| {
return type_name;
}
@ -825,6 +839,18 @@ fn parseCMulExpr(mt: *MacroTranslator, scope: *Scope) ParseError!ZigNode {
switch (mt.peek()) {
.asterisk => {
mt.i += 1;
switch (mt.peek()) {
.comma, .r_paren, .eof => {
// This is probably a pointer type
return ZigTag.c_pointer.create(mt.t.arena, .{
.is_const = false,
.is_volatile = false,
.is_allowzero = false,
.elem_type = node,
});
},
else => {},
}
const lhs = try mt.macroIntFromBool(node);
const rhs = try mt.macroIntFromBool(try mt.parseCCastExpr(scope));
node = try ZigTag.mul.create(mt.t.arena, .{ .lhs = lhs, .rhs = rhs });
@ -848,7 +874,7 @@ fn parseCMulExpr(mt: *MacroTranslator, scope: *Scope) ParseError!ZigNode {
fn parseCCastExpr(mt: *MacroTranslator, scope: *Scope) ParseError!ZigNode {
if (mt.eat(.l_paren)) {
if (try mt.parseCTypeName(scope, true)) |type_name| {
if (try mt.parseCTypeName(scope)) |type_name| {
while (true) {
const next_tok = mt.peek();
if (next_tok == .r_paren) {
@ -882,14 +908,14 @@ fn parseCCastExpr(mt: *MacroTranslator, scope: *Scope) ParseError!ZigNode {
}
// allow_fail is set when unsure if we are parsing a type-name
fn parseCTypeName(mt: *MacroTranslator, scope: *Scope, allow_fail: bool) ParseError!?ZigNode {
if (try mt.parseCSpecifierQualifierList(scope, allow_fail)) |node| {
fn parseCTypeName(mt: *MacroTranslator, scope: *Scope) ParseError!?ZigNode {
if (try mt.parseCSpecifierQualifierList(scope)) |node| {
return try mt.parseCAbstractDeclarator(node);
}
return null;
}
fn parseCSpecifierQualifierList(mt: *MacroTranslator, scope: *Scope, allow_fail: bool) ParseError!?ZigNode {
fn parseCSpecifierQualifierList(mt: *MacroTranslator, scope: *Scope) ParseError!?ZigNode {
const tok = mt.peek();
switch (tok) {
.macro_param, .macro_param_no_expand => {
@ -897,9 +923,9 @@ fn parseCSpecifierQualifierList(mt: *MacroTranslator, scope: *Scope, allow_fail:
// Assume that this is only a cast if the next token is ')'
// e.g. param)identifier
if (allow_fail and (mt.macro.tokens.len < mt.i + 3 or
if (mt.macro.tokens.len < mt.i + 3 or
mt.macro.tokens[mt.i + 1].id != .r_paren or
mt.macro.tokens[mt.i + 2].id != .identifier))
mt.macro.tokens[mt.i + 2].id != .identifier)
return null;
mt.i += 1;
@ -912,10 +938,10 @@ fn parseCSpecifierQualifierList(mt: *MacroTranslator, scope: *Scope, allow_fail:
if (mt.t.global_scope.blank_macros.contains(slice)) {
mt.i += 1;
return try mt.parseCSpecifierQualifierList(scope, allow_fail);
return try mt.parseCSpecifierQualifierList(scope);
}
if (!allow_fail or mt.t.typedefs.contains(mangled_name)) {
if (mt.t.typedefs.contains(mangled_name)) {
mt.i += 1;
if (Translator.builtin_typedef_map.get(mangled_name)) |ty| {
return try ZigTag.type.create(mt.t.arena, ty);
@ -952,21 +978,27 @@ fn parseCSpecifierQualifierList(mt: *MacroTranslator, scope: *Scope, allow_fail:
.keyword_enum, .keyword_struct, .keyword_union => {
const tag_name = mt.tokSlice();
mt.i += 1;
if (mt.peek() != .identifier) {
mt.i -= 1;
return null;
}
// struct Foo will be declared as struct_Foo by transRecordDecl
const identifier = mt.tokSlice();
try mt.expect(.identifier);
const name = try std.fmt.allocPrint(mt.t.arena, "{s}_{s}", .{ tag_name, identifier });
if (!mt.t.global_scope.contains(name)) {
try mt.fail("unable to translate C expr: '{s}' not found", .{name});
return error.ParseError;
}
return try ZigTag.identifier.create(mt.t.arena, name);
},
else => {},
}
if (allow_fail) return null;
try mt.fail("unable to translate C expr: unexpected token '{s}'", .{tok.symbol()});
return error.ParseError;
return null;
}
fn parseCNumericType(mt: *MacroTranslator) ParseError!ZigNode {
@ -1126,6 +1158,11 @@ fn parseCPostfixExprInner(mt: *MacroTranslator, scope: *Scope, type_name: ?ZigNo
switch (mt.peek()) {
.period => {
mt.i += 1;
const tok = mt.tokens[mt.i];
if (tok.id == .macro_param or tok.id == .macro_param_no_expand) {
try mt.fail("unable to translate C expr: field access using macro parameter", .{});
return error.ParseError;
}
const field_name = mt.tokSlice();
try mt.expect(.identifier);
@ -1133,6 +1170,11 @@ fn parseCPostfixExprInner(mt: *MacroTranslator, scope: *Scope, type_name: ?ZigNo
},
.arrow => {
mt.i += 1;
const tok = mt.tokens[mt.i];
if (tok.id == .macro_param or tok.id == .macro_param_no_expand) {
try mt.fail("unable to translate C expr: field access using macro parameter", .{});
return error.ParseError;
}
const field_name = mt.tokSlice();
try mt.expect(.identifier);
@ -1286,7 +1328,7 @@ fn parseCUnaryExpr(mt: *MacroTranslator, scope: *Scope) ParseError!ZigNode {
.keyword_sizeof => {
mt.i += 1;
const operand = if (mt.eat(.l_paren)) blk: {
const inner = (try mt.parseCTypeName(scope, false)).?;
const inner = (try mt.parseCTypeName(scope)) orelse try mt.parseCUnaryExpr(scope);
try mt.expect(.r_paren);
break :blk inner;
} else try mt.parseCUnaryExpr(scope);
@ -1298,7 +1340,7 @@ fn parseCUnaryExpr(mt: *MacroTranslator, scope: *Scope) ParseError!ZigNode {
// TODO this won't work if using <stdalign.h>'s
// #define alignof _Alignof
try mt.expect(.l_paren);
const operand = (try mt.parseCTypeName(scope, false)).?;
const operand = (try mt.parseCTypeName(scope)) orelse try mt.parseCUnaryExpr(scope);
try mt.expect(.r_paren);
return ZigTag.alignof.create(mt.t.arena, operand);

View File

@ -178,6 +178,7 @@ fn tokenizeMacro(allocator: mem.Allocator, source: []const u8, tok_list: *std.Ar
.buf = source,
.source = .unused,
.langopts = .{},
.splice_locs = &.{},
};
{
const name_tok = tokenizer.nextNoWS();

View File

@ -8,7 +8,7 @@ const Translator = @import("Translator.zig");
const Scope = @This();
pub const SymbolTable = std.StringArrayHashMapUnmanaged(ast.Node);
pub const AliasList = std.ArrayListUnmanaged(struct {
pub const AliasList = std.ArrayList(struct {
alias: []const u8,
name: []const u8,
});
@ -16,7 +16,7 @@ pub const AliasList = std.ArrayListUnmanaged(struct {
/// Associates a container (structure or union) with its relevant member functions.
pub const ContainerMemberFns = struct {
container_decl_ptr: *ast.Node,
member_fns: std.ArrayListUnmanaged(*ast.Payload.Func) = .empty,
member_fns: std.ArrayList(*ast.Payload.Func) = .empty,
};
pub const ContainerMemberFnsHashMap = std.AutoArrayHashMapUnmanaged(aro.QualType, ContainerMemberFns);
@ -55,7 +55,7 @@ pub const Condition = struct {
pub const Block = struct {
base: Scope,
translator: *Translator,
statements: std.ArrayListUnmanaged(ast.Node),
statements: std.ArrayList(ast.Node),
variables: AliasList,
mangle_count: u32 = 0,
label: ?[]const u8 = null,
@ -195,7 +195,7 @@ pub const Root = struct {
translator: *Translator,
sym_table: SymbolTable,
blank_macros: std.StringArrayHashMapUnmanaged(void),
nodes: std.ArrayListUnmanaged(ast.Node),
nodes: std.ArrayList(ast.Node),
container_member_fns_map: ContainerMemberFnsHashMap,
pub fn init(t: *Translator) Root {
@ -252,7 +252,7 @@ pub const Root = struct {
const gpa = root.translator.gpa;
const arena = root.translator.arena;
var member_names: std.StringArrayHashMapUnmanaged(u32) = .empty;
var member_names: std.StringArrayHashMapUnmanaged(void) = .empty;
defer member_names.deinit(gpa);
for (root.container_member_fns_map.values()) |members| {
member_names.clearRetainingCapacity();
@ -261,7 +261,7 @@ pub const Root = struct {
const payload: *ast.Payload.Container = @alignCast(@fieldParentPtr("base", members.container_decl_ptr.ptr_otherwise));
// Avoid duplication with field names
for (payload.data.fields) |field| {
try member_names.put(gpa, field.name, 0);
try member_names.put(gpa, field.name, {});
}
break :blk_record &payload.data.decls;
},
@ -278,34 +278,39 @@ pub const Root = struct {
};
const old_decls = decls_ptr.*;
const new_decls = try arena.alloc(ast.Node, old_decls.len + members.member_fns.items.len);
const new_decls = try arena.alloc(ast.Node, old_decls.len + members.member_fns.items.len * 2);
@memcpy(new_decls[0..old_decls.len], old_decls);
// Assume the allocator of payload.data.decls is arena,
// so don't add arena.free(old_variables).
const func_ref_vars = new_decls[old_decls.len..];
var count: u32 = 0;
// Add members without mangling them - only fields may cause name conflicts
for (members.member_fns.items) |func| {
const func_name = func.data.name.?;
const last_index = std.mem.lastIndexOf(u8, func_name, "_");
const last_name = if (last_index) |index| func_name[index + 1 ..] else continue;
var same_count: u32 = 0;
const gop = try member_names.getOrPutValue(gpa, last_name, same_count);
if (gop.found_existing) {
gop.value_ptr.* += 1;
same_count = gop.value_ptr.*;
}
const var_name = if (same_count == 0)
last_name
else
try std.fmt.allocPrint(arena, "{s}{d}", .{ last_name, same_count });
const member_name_slot = try member_names.getOrPutValue(gpa, func_name, {});
if (member_name_slot.found_existing) continue;
func_ref_vars[count] = try ast.Node.Tag.pub_var_simple.create(arena, .{
.name = var_name,
.init = try ast.Node.Tag.identifier.create(arena, func_name),
.name = func_name,
.init = try ast.Node.Tag.root_ref.create(arena, func_name),
});
count += 1;
}
for (members.member_fns.items) |func| {
const func_name = func.data.name.?;
const func_name_trimmed = std.mem.trimEnd(u8, func_name, "_");
const last_idx = std.mem.findLast(u8, func_name_trimmed, "_") orelse continue;
const func_name_alias = func_name[last_idx + 1 ..];
const member_name_slot = try member_names.getOrPutValue(gpa, func_name_alias, {});
if (member_name_slot.found_existing) continue;
func_ref_vars[count] = try ast.Node.Tag.pub_var_simple.create(arena, .{
.name = func_name_alias,
.init = try ast.Node.Tag.root_ref.create(arena, func_name),
});
count += 1;
}
decls_ptr.* = new_decls[0 .. old_decls.len + count];
}
}

View File

@ -82,12 +82,11 @@ pub fn getMangle(t: *Translator) u32 {
/// Convert an `aro.Source.Location` to a 'file:line:column' string.
pub fn locStr(t: *Translator, loc: aro.Source.Location) ![]const u8 {
const source = t.comp.getSource(loc.id);
const line_col = source.lineCol(loc);
const filename = source.path;
const expanded = loc.expand(t.comp);
const filename = expanded.path;
const line = source.physicalLine(loc);
const col = line_col.col;
const line = expanded.line_no;
const col = expanded.col;
return std.fmt.allocPrint(t.arena, "{s}:{d}:{d}", .{ filename, line, col });
}
@ -139,7 +138,11 @@ pub fn failDeclExtra(
// location
// pub const name = @compileError(msg);
const fail_msg = try std.fmt.allocPrint(t.arena, format, args);
const fail_decl = try ZigTag.fail_decl.create(t.arena, .{ .actual = name, .mangled = fail_msg });
const fail_decl = try ZigTag.fail_decl.create(t.arena, .{
.actual = name,
.mangled = fail_msg,
.local = scope.id != .root,
});
const str = try t.locStr(loc);
const location_comment = try std.fmt.allocPrint(t.arena, "// {s}", .{str});
@ -220,6 +223,7 @@ pub fn translate(options: Options) mem.Allocator.Error![]u8 {
defer allocating.deinit();
allocating.writer.writeAll(
\\const __root = @This();
\\pub const __builtin = @import("std").zig.c_translation.builtins;
\\pub const __helpers = @import("std").zig.c_translation.helpers;
\\
@ -297,7 +301,7 @@ fn prepopulateGlobalNameTable(t: *Translator) !void {
}
for (t.pp.defines.keys(), t.pp.defines.values()) |name, macro| {
if (macro.is_builtin) continue;
if (macro.isBuiltin()) continue;
if (!t.isSelfDefinedMacro(name, macro)) {
try t.global_names.put(t.gpa, name, {});
}
@ -527,6 +531,13 @@ fn transRecordDecl(t: *Translator, scope: *Scope, record_qt: QualType) Error!voi
break :init ZigTag.opaque_literal.init();
}
// Demote record to opaque if it contains an opaque field
if (t.typeWasDemotedToOpaque(field.qt)) {
try t.opaque_demotes.put(t.gpa, base.qt, {});
try t.warn(scope, field_loc, "{s} demoted to opaque type - has opaque field", .{container_kind_name});
break :init ZigTag.opaque_literal.init();
}
var field_name = field.name.lookup(t.comp);
if (field.name_tok == 0) {
field_name = try std.fmt.allocPrint(t.arena, "unnamed_{d}", .{unnamed_field_count});
@ -856,12 +867,18 @@ fn transVarDecl(t: *Translator, scope: *Scope, variable: Node.Variable) Error!vo
break :blk null;
};
// TODO actually set with @export/@extern
const linkage = variable.qt.linkage(t.comp);
if (linkage != .strong) {
try t.warn(scope, variable.name_tok, "TODO {s} linkage ignored", .{@tagName(linkage)});
}
const alignment: ?c_uint = variable.qt.requestedAlignment(t.comp) orelse null;
var node = try ZigTag.var_decl.create(t.arena, .{
.is_pub = toplevel,
.is_const = is_const,
.is_extern = is_extern,
.is_export = toplevel and variable.storage_class == .auto,
.is_export = toplevel and variable.storage_class == .auto and linkage == .strong,
.is_threadlocal = variable.thread_local,
.linksection_string = linksection_string,
.alignment = alignment,
@ -1013,7 +1030,7 @@ fn transStaticAssert(t: *Translator, scope: *Scope, static_assert: Node.StaticAs
try scope.appendNode(assert_node);
}
fn transGlobalAsm(t: *Translator, scope: *Scope, global_asm: Node.SimpleAsm) Error!void {
fn transGlobalAsm(t: *Translator, scope: *Scope, global_asm: Node.GlobalAsm) Error!void {
const asm_string = t.tree.value_map.get(global_asm.asm_str).?;
const bytes = t.comp.interner.get(asm_string.ref()).bytes;
@ -1071,6 +1088,17 @@ fn transType(t: *Translator, scope: *Scope, qt: QualType, source_loc: TokenIndex
.double => return ZigTag.type.create(t.arena, "f64"),
.long_double => return ZigTag.type.create(t.arena, "c_longdouble"),
.float128 => return ZigTag.type.create(t.arena, "f128"),
.bf16,
.float32,
.float64,
.float32x,
.float64x,
.float128x,
.dfloat32,
.dfloat64,
.dfloat128,
.dfloat64x,
=> return t.fail(error.UnsupportedType, source_loc, "TODO support float type: '{s}'", .{try t.getTypeStr(qt)}),
},
.pointer => |pointer_ty| {
const child_qt = pointer_ty.child;
@ -1165,17 +1193,8 @@ fn headFieldAlignment(t: *Translator, record_decl: aro.Type.Record) ?c_uint {
const parent_ptr_alignment_bits = record_decl.layout.?.pointer_alignment_bits;
const parent_ptr_alignment = parent_ptr_alignment_bits / bits_per_byte;
var max_field_alignment_bits: u64 = 0;
for (record_decl.fields) |field| {
if (field.qt.getRecord(t.comp)) |field_record_decl| {
const child_record_alignment = field_record_decl.layout.?.field_alignment_bits;
if (child_record_alignment > max_field_alignment_bits)
max_field_alignment_bits = child_record_alignment;
} else {
const field_size = field.layout.size_bits;
if (field_size > max_field_alignment_bits)
max_field_alignment_bits = field_size;
}
}
for (record_decl.fields) |field|
max_field_alignment_bits = @max(max_field_alignment_bits, bits_per_byte * field.qt.alignof(t.comp));
if (max_field_alignment_bits != parent_ptr_alignment_bits) {
return parent_ptr_alignment;
} else {
@ -1227,10 +1246,7 @@ fn alignmentForField(
// Records have a natural alignment when used as a field, and their size is
// a multiple of this alignment value. For all other types, the natural alignment
// is their size.
const field_natural_alignment_bits: u64 = if (field.qt.getRecord(t.comp)) |record|
record.layout.?.field_alignment_bits
else
field_size_bits;
const field_natural_alignment_bits: u64 = bits_per_byte * field.qt.alignof(t.comp);
const rem_bits = field_offset_bits % field_natural_alignment_bits;
// If there's a remainder, then the alignment is smaller than the field's
@ -1351,13 +1367,19 @@ fn transFnType(
}
};
// TODO actually set with @export/@extern
const linkage = func_qt.linkage(t.comp);
if (linkage != .strong) {
try t.warn(scope, source_loc, "TODO {s} linkage ignored", .{@tagName(linkage)});
}
const payload = try t.arena.create(ast.Payload.Func);
payload.* = .{
.base = .{ .tag = .func },
.data = .{
.is_pub = ctx.is_pub,
.is_extern = ctx.is_extern,
.is_export = ctx.is_export,
.is_export = ctx.is_export and linkage == .strong,
.is_inline = ctx.is_always_inline,
.is_var_args = switch (func_ty.kind) {
.normal => false,
@ -1446,18 +1468,7 @@ fn typeIsOpaque(t: *Translator, qt: QualType) bool {
}
fn typeWasDemotedToOpaque(t: *Translator, qt: QualType) bool {
const base = qt.base(t.comp);
switch (base.type) {
.@"struct", .@"union" => |record_ty| {
if (t.opaque_demotes.contains(base.qt)) return true;
for (record_ty.fields) |field| {
if (t.typeWasDemotedToOpaque(field.qt)) return true;
}
return false;
},
.@"enum" => return t.opaque_demotes.contains(base.qt),
else => return false,
}
return t.opaque_demotes.contains(qt);
}
fn typeHasWrappingOverflow(t: *Translator, qt: QualType) bool {
@ -1539,6 +1550,9 @@ fn transStmt(t: *Translator, scope: *Scope, stmt: Node.Index) TransError!ZigNode
.goto_stmt, .computed_goto_stmt, .labeled_stmt => {
return t.fail(error.UnsupportedTranslation, stmt.tok(t.tree), "TODO goto", .{});
},
.asm_stmt => {
return t.fail(error.UnsupportedTranslation, stmt.tok(t.tree), "TODO asm stmt", .{});
},
else => return t.transExprCoercing(scope, stmt, .unused),
}
}
@ -2197,7 +2211,7 @@ fn transExpr(t: *Translator, scope: *Scope, expr: Node.Index, used: ResultUsed)
.default_stmt,
.goto_stmt,
.computed_goto_stmt,
.gnu_asm_simple,
.asm_stmt,
.global_asm,
.typedef,
.struct_decl,
@ -3031,6 +3045,10 @@ fn transMemberAccess(
.normal => member_access.base.qt(t.tree),
.ptr => member_access.base.qt(t.tree).childType(t.comp),
};
if (t.typeWasDemotedToOpaque(base_info)) {
return t.fail(error.UnsupportedTranslation, member_access.access_tok, "member access of demoted record", .{});
}
const record = base_info.getRecord(t.comp).?;
const field = record.fields[member_access.member_index];
const field_name = if (field.name_tok == 0) t.anonymous_record_field_names.get(.{
@ -3551,7 +3569,7 @@ fn transArrayInit(
const array_item_qt = array_init.container_qt.childType(t.comp);
const array_item_type = try t.transType(scope, array_item_qt, array_init.l_brace_tok);
var maybe_lhs: ?ZigNode = null;
var val_list: std.ArrayListUnmanaged(ZigNode) = .empty;
var val_list: std.ArrayList(ZigNode) = .empty;
defer val_list.deinit(t.gpa);
var i: usize = 0;
while (i < array_init.items.len) {
@ -3671,6 +3689,10 @@ fn transTypeInfo(
const operand = operand: {
if (typeinfo.expr) |expr| {
const operand = try t.transExpr(scope, expr, .used);
if (operand.tag() == .string_literal) {
const deref = try ZigTag.deref.create(t.arena, operand);
break :operand try ZigTag.typeof.create(t.arena, deref);
}
break :operand try ZigTag.typeof.create(t.arena, operand);
}
break :operand try t.transType(scope, typeinfo.operand_qt, typeinfo.op_tok);
@ -3962,7 +3984,8 @@ fn createFlexibleMemberFn(
// return @ptrCast(&self.*.<field_name>);
const address_of = try ZigTag.address_of.create(t.arena, field_access);
const casted = try ZigTag.ptr_cast.create(t.arena, address_of);
const aligned = try ZigTag.align_cast.create(t.arena, address_of);
const casted = try ZigTag.ptr_cast.create(t.arena, aligned);
const return_stmt = try ZigTag.@"return".create(t.arena, casted);
const body = try ZigTag.block_single.create(t.arena, return_stmt);
@ -3994,7 +4017,7 @@ fn transMacros(t: *Translator) !void {
defer pattern_list.deinit(t.gpa);
for (t.pp.defines.keys(), t.pp.defines.values()) |name, macro| {
if (macro.is_builtin) continue;
if (macro.isBuiltin()) continue;
if (t.global_scope.containsNow(name)) {
continue;
}

View File

@ -247,6 +247,9 @@ pub const Node = extern union {
/// comptime { if (!(lhs)) @compileError(rhs); }
static_assert,
/// __root.<name>
root_ref,
pub const last_no_payload_tag = Tag.@"break";
pub const no_payload_count = @intFromEnum(last_no_payload_tag) + 1;
@ -394,7 +397,8 @@ pub const Node = extern union {
.block => Payload.Block,
.c_pointer, .single_pointer => Payload.Pointer,
.array_type, .null_sentinel_array_type => Payload.Array,
.arg_redecl, .alias, .fail_decl => Payload.ArgRedecl,
.arg_redecl, .alias => Payload.ArgRedecl,
.fail_decl => Payload.FailDecl,
.var_simple, .pub_var_simple, .wrapped_local, .mut_str => Payload.SimpleVarDecl,
.enum_constant => Payload.EnumConstant,
.array_filler => Payload.ArrayFiller,
@ -405,6 +409,7 @@ pub const Node = extern union {
.builtin_extern => Payload.Extern,
.helper_call => Payload.HelperCall,
.helper_ref => Payload.HelperRef,
.root_ref => Payload.RootRef,
};
}
@ -708,6 +713,15 @@ pub const Payload = struct {
},
};
pub const FailDecl = struct {
base: Payload,
data: struct {
actual: []const u8,
mangled: []const u8,
local: bool,
},
};
pub const SimpleVarDecl = struct {
base: Payload,
data: struct {
@ -791,6 +805,11 @@ pub const Payload = struct {
base: Payload,
data: []const u8,
};
pub const RootRef = struct {
base: Payload,
data: []const u8,
};
};
/// Converts the nodes into a Zig Ast.
@ -860,7 +879,7 @@ const Context = struct {
gpa: Allocator,
buf: std.ArrayList(u8) = .empty,
nodes: std.zig.Ast.NodeList = .empty,
extra_data: std.ArrayListUnmanaged(u32) = .empty,
extra_data: std.ArrayList(u32) = .empty,
tokens: std.zig.Ast.TokenList = .empty,
fn addTokenFmt(c: *Context, tag: TokenTag, comptime format: []const u8, args: anytype) Allocator.Error!TokenIndex {
@ -1203,12 +1222,25 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
},
.fail_decl => {
const payload = node.castTag(.fail_decl).?.data;
// pub const name = @compileError(msg);
_ = try c.addToken(.keyword_pub, "pub");
// pub const name = (if (true))? @compileError(msg);
if (!payload.local) _ = try c.addToken(.keyword_pub, "pub");
const const_tok = try c.addToken(.keyword_const, "const");
_ = try c.addIdentifier(payload.actual);
_ = try c.addToken(.equal, "=");
var if_tok: TokenIndex = undefined;
var true_node: NodeIndex = undefined;
if (payload.local) {
if_tok = try c.addToken(.keyword_if, "if");
_ = try c.addToken(.l_paren, "(");
true_node = try c.addNode(.{
.tag = .identifier,
.main_token = try c.addToken(.identifier, "true"),
.data = undefined,
});
_ = try c.addToken(.r_paren, ")");
}
const compile_error_tok = try c.addToken(.builtin, "@compileError");
_ = try c.addToken(.l_paren, "(");
const err_msg_tok = try c.addTokenFmt(.string_literal, "\"{f}\"", .{std.zig.fmtString(payload.mangled)});
@ -1233,7 +1265,16 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
.data = .{
.opt_node_and_opt_node = .{
.none, // Type expression
compile_error.toOptional(), // Init expression
if (payload.local) // Init expression
(try c.addNode(.{
.tag = .if_simple,
.main_token = if_tok,
.data = .{ .node_and_node = .{
true_node, compile_error,
} },
})).toOptional()
else
compile_error.toOptional(),
},
},
});
@ -2158,6 +2199,15 @@ fn renderNode(c: *Context, node: Node) Allocator.Error!NodeIndex {
});
},
.@"anytype" => unreachable, // Handled in renderParams
.root_ref => {
const payload = node.castTag(.root_ref).?.data;
const root_tok = try c.addNode(.{
.tag = .identifier,
.main_token = try c.addIdentifier("__root"),
.data = undefined,
});
return renderFieldAccess(c, root_tok, payload);
},
}
}
@ -2480,6 +2530,7 @@ fn renderNodeGrouped(c: *Context, node: Node) !NodeIndex {
.sqrt,
.trunc,
.floor,
.root_ref,
=> {
// no grouping needed
return renderNode(c, node);

View File

@ -22,7 +22,7 @@ pub fn main() u8 {
defer threaded.deinit();
const io = threaded.io();
var args = process.argsAlloc(arena) catch {
const args = process.argsAlloc(arena) catch {
std.debug.print("ran out of memory allocating arguments\n", .{});
if (fast_exit) process.exit(1);
return 1;
@ -58,7 +58,7 @@ pub fn main() u8 {
var driver: aro.Driver = .{ .comp = &comp, .diagnostics = &diagnostics, .aro_name = "aro" };
defer driver.deinit();
var toolchain: aro.Toolchain = .{ .driver = &driver, .filesystem = .{ .real = comp.cwd } };
var toolchain: aro.Toolchain = .{ .driver = &driver };
defer toolchain.deinit();
translate(&driver, &toolchain, args, zig_integration) catch |err| switch (err) {
@ -149,7 +149,7 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration
break :args args[0..i];
};
const user_macros = macros: {
var macro_buf: std.ArrayListUnmanaged(u8) = .empty;
var macro_buf: std.ArrayList(u8) = .empty;
defer macro_buf.deinit(gpa);
var discard_buf: [256]u8 = undefined;
@ -182,12 +182,10 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration
error.OutOfMemory => return error.OutOfMemory,
error.TooManyMultilibs => return d.fatal("found more than one multilib with the same priority", .{}),
};
tc.defineSystemIncludes() catch |er| switch (er) {
error.OutOfMemory => return error.OutOfMemory,
error.AroIncludeNotFound => return d.fatal("unable to find Aro builtin headers", .{}),
};
try tc.defineSystemIncludes();
try d.comp.initSearchPath(d.includes.items, d.verbose_search_path);
const builtin_macros = d.comp.generateBuiltinMacros(.include_system_defines) catch |err| switch (err) {
const builtin_macros = d.comp.generateBuiltinMacros(d.system_defines) catch |err| switch (err) {
error.FileTooBig => return d.fatal("builtin macro source exceeded max size", .{}),
else => |e| return e,
};
@ -205,7 +203,13 @@ fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8, zig_integration
if (opt_dep_file) |*dep_file| pp.dep_file = dep_file;
try pp.preprocessSources(&.{ source, builtin_macros, user_macros });
try pp.preprocessSources(.{
.main = source,
.builtin = builtin_macros,
.command_line = user_macros,
.imacros = d.imacros.items,
.implicit_includes = d.implicit_includes.items,
});
var c_tree = try pp.parse();
defer c_tree.deinit();

View File

@ -11,7 +11,7 @@ const Oom = error{OutOfMemory};
pub const Decl = @import("Decl.zig");
pub var files: std.StringArrayHashMapUnmanaged(File) = .empty;
pub var decls: std.ArrayListUnmanaged(Decl) = .empty;
pub var decls: std.ArrayList(Decl) = .empty;
pub var modules: std.StringArrayHashMapUnmanaged(File.Index) = .empty;
file: File.Index,

View File

@ -29,7 +29,7 @@ const Node = Document.Node;
const ExtraIndex = Document.ExtraIndex;
const ExtraData = Document.ExtraData;
const StringIndex = Document.StringIndex;
const ArrayList = std.ArrayListUnmanaged;
const ArrayList = std.ArrayList;
nodes: Node.List = .{},
extra: ArrayList(u32) = .empty,

View File

@ -116,13 +116,18 @@ const Executable = struct {
"failed to init memory map for coverage file '{s}': {t}",
.{ &coverage_file_name, e },
);
map.appendSliceAssumeCapacity(mem.asBytes(&abi.SeenPcsHeader{
map.appendSliceAssumeCapacity(@ptrCast(&abi.SeenPcsHeader{
.n_runs = 0,
.unique_runs = 0,
.pcs_len = pcs.len,
}));
map.appendNTimesAssumeCapacity(0, pc_bitset_usizes * @sizeOf(usize));
map.appendSliceAssumeCapacity(mem.sliceAsBytes(pcs));
// Relocations have been applied to `pcs` so it contains runtime addresses (with slide
// applied). We need to translate these to the virtual addresses as on disk.
for (pcs) |pc| {
const pc_vaddr = fuzzer_unslide_address(pc);
map.appendSliceAssumeCapacity(@ptrCast(&pc_vaddr));
}
return map;
} else {
const size = coverage_file.getEndPos() catch |e| panic(
@ -215,7 +220,16 @@ const Executable = struct {
.{ self.pc_counters.len, pcs.len },
);
self.pc_digest = std.hash.Wyhash.hash(0, mem.sliceAsBytes(pcs));
self.pc_digest = digest: {
// Relocations have been applied to `pcs` so it contains runtime addresses (with slide
// applied). We need to translate these to the virtual addresses as on disk.
var h: std.hash.Wyhash = .init(0);
for (pcs) |pc| {
const pc_vaddr = fuzzer_unslide_address(pc);
h.update(@ptrCast(&pc_vaddr));
}
break :digest h.final();
};
self.shared_seen_pcs = getCoverageFile(cache_dir, pcs, self.pc_digest);
return self;
@ -266,10 +280,10 @@ const Instrumentation = struct {
/// Values that have been constant operands in comparisons and switch cases.
/// There may be duplicates in this array if they came from different addresses, which is
/// fine as they are likely more important and hence more likely to be selected.
const_vals2: std.ArrayListUnmanaged(u16) = .empty,
const_vals4: std.ArrayListUnmanaged(u32) = .empty,
const_vals8: std.ArrayListUnmanaged(u64) = .empty,
const_vals16: std.ArrayListUnmanaged(u128) = .empty,
const_vals2: std.ArrayList(u16) = .empty,
const_vals4: std.ArrayList(u32) = .empty,
const_vals8: std.ArrayList(u64) = .empty,
const_vals16: std.ArrayList(u128) = .empty,
/// A minimal state for this struct which instrumentation can function on.
/// Used before this structure is initialized to avoid illegal behavior
@ -370,11 +384,11 @@ const Fuzzer = struct {
/// Minimized past inputs leading to new pc hits.
/// These are randomly mutated in round-robin fashion
/// Element zero is always an empty input. It is gauraunteed no other elements are empty.
corpus: std.ArrayListUnmanaged([]const u8),
corpus: std.ArrayList([]const u8),
corpus_pos: usize,
/// List of past mutations that have led to new inputs. This way, the mutations that are the
/// most effective are the most likely to be selected again. Starts with one of each mutation.
mutations: std.ArrayListUnmanaged(Mutation) = .empty,
mutations: std.ArrayList(Mutation) = .empty,
/// Filesystem directory containing found inputs for future runs
corpus_dir: std.fs.Dir,
@ -622,6 +636,14 @@ export fn fuzzer_main(limit_kind: abi.LimitKind, amount: u64) void {
}
}
export fn fuzzer_unslide_address(addr: usize) usize {
const si = std.debug.getSelfDebugInfo() catch @compileError("unsupported");
const slide = si.getModuleSlide(std.debug.getDebugInfoAllocator(), addr) catch |err| {
std.debug.panic("failed to find virtual address slide: {t}", .{err});
};
return addr - slide;
}
/// Helps determine run uniqueness in the face of recursion.
/// Currently not used by the fuzzer.
export threadlocal var __sancov_lowest_stack: usize = 0;
@ -1185,13 +1207,13 @@ const Mutation = enum {
const j = rng.uintAtMostBiased(usize, corpus[splice_i].len - len);
out.appendSliceAssumeCapacity(corpus[splice_i][j..][0..len]);
},
.@"const" => out.appendSliceAssumeCapacity(mem.asBytes(
.@"const" => out.appendSliceAssumeCapacity(@ptrCast(
&data_ctx[rng.uintLessThanBiased(usize, data_ctx.len)],
)),
.small => out.appendSliceAssumeCapacity(mem.asBytes(
.small => out.appendSliceAssumeCapacity(@ptrCast(
&mem.nativeTo(data_ctx[0], rng.int(SmallValue), data_ctx[1]),
)),
.few => out.appendSliceAssumeCapacity(mem.asBytes(
.few => out.appendSliceAssumeCapacity(@ptrCast(
&fewValue(rng, data_ctx[0], data_ctx[1]),
)),
}
@ -1286,7 +1308,7 @@ const Mutation = enum {
}
};
/// Like `std.ArrayListUnmanaged(u8)` but backed by memory mapping.
/// Like `std.ArrayList(u8)` but backed by memory mapping.
pub const MemoryMappedList = struct {
/// Contents of the list.
///

View File

@ -1063,10 +1063,10 @@ pub const Manifest = struct {
const dep_file_contents = try dir.readFileAlloc(dep_file_sub_path, gpa, .limited(manifest_file_size_max));
defer gpa.free(dep_file_contents);
var error_buf: std.ArrayListUnmanaged(u8) = .empty;
var error_buf: std.ArrayList(u8) = .empty;
defer error_buf.deinit(gpa);
var resolve_buf: std.ArrayListUnmanaged(u8) = .empty;
var resolve_buf: std.ArrayList(u8) = .empty;
defer resolve_buf.deinit(gpa);
var it: DepTokenizer = .{ .bytes = dep_file_contents };
@ -1217,7 +1217,7 @@ pub const Manifest = struct {
self.files.deinit(self.cache.gpa);
}
pub fn populateFileSystemInputs(man: *Manifest, buf: *std.ArrayListUnmanaged(u8)) Allocator.Error!void {
pub fn populateFileSystemInputs(man: *Manifest, buf: *std.ArrayList(u8)) Allocator.Error!void {
assert(@typeInfo(std.zig.Server.Message.PathPrefix).@"enum".fields.len == man.cache.prefixes_len);
buf.clearRetainingCapacity();
const gpa = man.cache.gpa;

View File

@ -363,7 +363,7 @@ pub const Token = union(enum) {
};
/// Resolve escapes in target or prereq. Only valid with .target_must_resolve or .prereq_must_resolve.
pub fn resolve(self: Token, gpa: Allocator, list: *std.ArrayListUnmanaged(u8)) error{OutOfMemory}!void {
pub fn resolve(self: Token, gpa: Allocator, list: *std.ArrayList(u8)) error{OutOfMemory}!void {
switch (self) {
.target_must_resolve => |bytes| {
var state: enum { start, escape, dollar } = .start;
@ -429,7 +429,7 @@ pub const Token = union(enum) {
}
}
pub fn printError(self: Token, gpa: Allocator, list: *std.ArrayListUnmanaged(u8)) error{OutOfMemory}!void {
pub fn printError(self: Token, gpa: Allocator, list: *std.ArrayList(u8)) error{OutOfMemory}!void {
switch (self) {
.target, .target_must_resolve, .prereq, .prereq_must_resolve => unreachable, // not an error
.incomplete_quoted_prerequisite,
@ -1027,8 +1027,8 @@ fn depTokenizer(input: []const u8, expect: []const u8) !void {
defer arena_allocator.deinit();
var it: Tokenizer = .{ .bytes = input };
var buffer: std.ArrayListUnmanaged(u8) = .empty;
var resolve_buf: std.ArrayListUnmanaged(u8) = .empty;
var buffer: std.ArrayList(u8) = .empty;
var resolve_buf: std.ArrayList(u8) = .empty;
var i: usize = 0;
while (it.next()) |token| {
if (i != 0) try buffer.appendSlice(arena, "\n");
@ -1076,11 +1076,11 @@ fn depTokenizer(input: []const u8, expect: []const u8) !void {
try testing.expectEqualStrings(expect, buffer.items);
}
fn printCharValues(gpa: Allocator, list: *std.ArrayListUnmanaged(u8), bytes: []const u8) !void {
fn printCharValues(gpa: Allocator, list: *std.ArrayList(u8), bytes: []const u8) !void {
for (bytes) |b| try list.append(gpa, printable_char_tab[b]);
}
fn printUnderstandableChar(gpa: Allocator, list: *std.ArrayListUnmanaged(u8), char: u8) !void {
fn printUnderstandableChar(gpa: Allocator, list: *std.ArrayList(u8), char: u8) !void {
if (std.ascii.isPrint(char)) {
try list.print(gpa, "'{c}'", .{char});
} else {

View File

@ -33,7 +33,7 @@ coverage_files: std.AutoArrayHashMapUnmanaged(u64, CoverageMap),
queue_mutex: std.Thread.Mutex,
queue_cond: std.Thread.Condition,
msg_queue: std.ArrayListUnmanaged(Msg),
msg_queue: std.ArrayList(Msg),
pub const Mode = union(enum) {
forever: struct { ws: *Build.WebServer },
@ -65,7 +65,7 @@ const CoverageMap = struct {
coverage: Coverage,
source_locations: []Coverage.SourceLocation,
/// Elements are indexes into `source_locations` pointing to the unit tests that are being fuzz tested.
entry_points: std.ArrayListUnmanaged(u32),
entry_points: std.ArrayList(u32),
start_timestamp: i64,
fn deinit(cm: *CoverageMap, gpa: Allocator) void {
@ -85,7 +85,7 @@ pub fn init(
mode: Mode,
) Allocator.Error!Fuzz {
const run_steps: []const *Step.Run = steps: {
var steps: std.ArrayListUnmanaged(*Step.Run) = .empty;
var steps: std.ArrayList(*Step.Run) = .empty;
defer steps.deinit(gpa);
const rebuild_node = root_prog_node.start("Rebuilding Unit Tests", 0);
defer rebuild_node.end();
@ -383,7 +383,14 @@ fn prepareTables(fuzz: *Fuzz, run_step: *Step.Run, coverage_id: u64) error{ OutO
errdefer gop.value_ptr.coverage.deinit(fuzz.gpa);
const rebuilt_exe_path = run_step.rebuilt_executable.?;
var debug_info = std.debug.Info.load(fuzz.gpa, rebuilt_exe_path, &gop.value_ptr.coverage) catch |err| {
const target = run_step.producer.?.rootModuleTarget();
var debug_info = std.debug.Info.load(
fuzz.gpa,
rebuilt_exe_path,
&gop.value_ptr.coverage,
target.ofmt,
target.cpu.arch,
) catch |err| {
log.err("step '{s}': failed to load debug information for '{f}': {s}", .{
run_step.step.name, rebuilt_exe_path, @errorName(err),
});
@ -479,9 +486,23 @@ fn addEntryPoint(fuzz: *Fuzz, coverage_id: u64, addr: u64) error{ AlreadyReporte
if (false) {
const sl = coverage_map.source_locations[index];
const file_name = coverage_map.coverage.stringAt(coverage_map.coverage.fileAt(sl.file).basename);
log.debug("server found entry point for 0x{x} at {s}:{d}:{d} - index {d} between {x} and {x}", .{
addr, file_name, sl.line, sl.column, index, pcs[index - 1], pcs[index + 1],
});
if (pcs.len == 1) {
log.debug("server found entry point for 0x{x} at {s}:{d}:{d} - index 0 (final)", .{
addr, file_name, sl.line, sl.column,
});
} else if (index == 0) {
log.debug("server found entry point for 0x{x} at {s}:{d}:{d} - index 0 before {x}", .{
addr, file_name, sl.line, sl.column, pcs[index + 1],
});
} else if (index == pcs.len - 1) {
log.debug("server found entry point for 0x{x} at {s}:{d}:{d} - index {d} (final) after {x}", .{
addr, file_name, sl.line, sl.column, index, pcs[index - 1],
});
} else {
log.debug("server found entry point for 0x{x} at {s}:{d}:{d} - index {d} between {x} and {x}", .{
addr, file_name, sl.line, sl.column, index, pcs[index - 1], pcs[index + 1],
});
}
}
try coverage_map.entry_points.append(fuzz.gpa, @intCast(index));
}

View File

@ -721,18 +721,18 @@ const MachODumper = struct {
gpa: Allocator,
data: []const u8,
header: macho.mach_header_64,
segments: std.ArrayListUnmanaged(macho.segment_command_64) = .empty,
sections: std.ArrayListUnmanaged(macho.section_64) = .empty,
symtab: std.ArrayListUnmanaged(macho.nlist_64) = .empty,
strtab: std.ArrayListUnmanaged(u8) = .empty,
indsymtab: std.ArrayListUnmanaged(u32) = .empty,
imports: std.ArrayListUnmanaged([]const u8) = .empty,
segments: std.ArrayList(macho.segment_command_64) = .empty,
sections: std.ArrayList(macho.section_64) = .empty,
symtab: std.ArrayList(macho.nlist_64) = .empty,
strtab: std.ArrayList(u8) = .empty,
indsymtab: std.ArrayList(u32) = .empty,
imports: std.ArrayList([]const u8) = .empty,
fn parse(ctx: *ObjectContext) !void {
var it = ctx.getLoadCommandIterator();
var it = try ctx.getLoadCommandIterator();
var i: usize = 0;
while (it.next()) |cmd| {
switch (cmd.cmd()) {
while (try it.next()) |cmd| {
switch (cmd.hdr.cmd) {
.SEGMENT_64 => {
const seg = cmd.cast(macho.segment_command_64).?;
try ctx.segments.append(ctx.gpa, seg);
@ -771,14 +771,13 @@ const MachODumper = struct {
return mem.sliceTo(@as([*:0]const u8, @ptrCast(ctx.strtab.items.ptr + off)), 0);
}
fn getLoadCommandIterator(ctx: ObjectContext) macho.LoadCommandIterator {
const data = ctx.data[@sizeOf(macho.mach_header_64)..][0..ctx.header.sizeofcmds];
return .{ .ncmds = ctx.header.ncmds, .buffer = data };
fn getLoadCommandIterator(ctx: ObjectContext) !macho.LoadCommandIterator {
return .init(&ctx.header, ctx.data[@sizeOf(macho.mach_header_64)..]);
}
fn getLoadCommand(ctx: ObjectContext, cmd: macho.LC) ?macho.LoadCommandIterator.LoadCommand {
var it = ctx.getLoadCommandIterator();
while (it.next()) |lc| if (lc.cmd() == cmd) {
fn getLoadCommand(ctx: ObjectContext, cmd: macho.LC) !?macho.LoadCommandIterator.LoadCommand {
var it = try ctx.getLoadCommandIterator();
while (try it.next()) |lc| if (lc.hdr.cmd == cmd) {
return lc;
};
return null;
@ -872,9 +871,9 @@ const MachODumper = struct {
\\LC {d}
\\cmd {s}
\\cmdsize {d}
, .{ index, @tagName(lc.cmd()), lc.cmdsize() });
, .{ index, @tagName(lc.hdr.cmd), lc.hdr.cmdsize });
switch (lc.cmd()) {
switch (lc.hdr.cmd) {
.SEGMENT_64 => {
const seg = lc.cast(macho.segment_command_64).?;
try writer.writeByte('\n');
@ -1592,9 +1591,9 @@ const MachODumper = struct {
.headers => {
try ObjectContext.dumpHeader(ctx.header, writer);
var it = ctx.getLoadCommandIterator();
var it = try ctx.getLoadCommandIterator();
var i: usize = 0;
while (it.next()) |cmd| {
while (try it.next()) |cmd| {
try ObjectContext.dumpLoadCommand(cmd, i, writer);
try writer.writeByte('\n');
@ -1615,7 +1614,7 @@ const MachODumper = struct {
.dyld_weak_bind,
.dyld_lazy_bind,
=> {
const cmd = ctx.getLoadCommand(.DYLD_INFO_ONLY) orelse
const cmd = try ctx.getLoadCommand(.DYLD_INFO_ONLY) orelse
return step.fail("no dyld info found", .{});
const lc = cmd.cast(macho.dyld_info_command).?;
@ -1649,7 +1648,7 @@ const MachODumper = struct {
},
.exports => blk: {
if (ctx.getLoadCommand(.DYLD_INFO_ONLY)) |cmd| {
if (try ctx.getLoadCommand(.DYLD_INFO_ONLY)) |cmd| {
const lc = cmd.cast(macho.dyld_info_command).?;
if (lc.export_size > 0) {
const data = ctx.data[lc.export_off..][0..lc.export_size];
@ -1768,9 +1767,9 @@ const ElfDumper = struct {
const ArchiveContext = struct {
gpa: Allocator,
data: []const u8,
symtab: std.ArrayListUnmanaged(ArSymtabEntry) = .empty,
symtab: std.ArrayList(ArSymtabEntry) = .empty,
strtab: []const u8,
objects: std.ArrayListUnmanaged(struct { name: []const u8, off: usize, len: usize }) = .empty,
objects: std.ArrayList(struct { name: []const u8, off: usize, len: usize }) = .empty,
fn parseSymtab(ctx: *ArchiveContext, raw: []const u8, ptr_width: enum { p32, p64 }) !void {
var reader: std.Io.Reader = .fixed(raw);

View File

@ -1801,7 +1801,7 @@ fn getZigArgs(compile: *Compile, fuzz: bool) ![][]const u8 {
for (arg, 0..) |c, arg_idx| {
if (c == '\\' or c == '"') {
// Slow path for arguments that need to be escaped. We'll need to allocate and copy
var escaped: std.ArrayListUnmanaged(u8) = .empty;
var escaped: std.ArrayList(u8) = .empty;
try escaped.ensureTotalCapacityPrecise(arena, arg.len + 1);
try escaped.appendSlice(arena, arg[0..arg_idx]);
for (arg[arg_idx..]) |to_escape| {
@ -1932,6 +1932,11 @@ pub fn rebuildInFuzzMode(c: *Compile, gpa: Allocator, progress_node: std.Progres
c.step.result_error_bundle.deinit(gpa);
c.step.result_error_bundle = std.zig.ErrorBundle.empty;
if (c.step.result_failed_command) |cmd| {
gpa.free(cmd);
c.step.result_failed_command = null;
}
const zig_args = try getZigArgs(c, true);
const maybe_output_bin_path = try c.step.evalZigProcess(zig_args, progress_node, false, null, gpa);
return maybe_output_bin_path.?;
@ -2030,7 +2035,7 @@ fn checkCompileErrors(compile: *Compile) !void {
};
// Render the expected lines into a string that we can compare verbatim.
var expected_generated: std.ArrayListUnmanaged(u8) = .empty;
var expected_generated: std.ArrayList(u8) = .empty;
const expect_errors = compile.expect_errors.?;
var actual_line_it = mem.splitScalar(u8, actual_errors, '\n');

View File

@ -48,7 +48,7 @@ fn make(step: *Step, options: Step.MakeOptions) !void {
const arena = b.allocator;
const fmt: *Fmt = @fieldParentPtr("step", step);
var argv: std.ArrayListUnmanaged([]const u8) = .empty;
var argv: std.ArrayList([]const u8) = .empty;
try argv.ensureUnusedCapacity(arena, 2 + 1 + fmt.paths.len + 2 * fmt.exclude_paths.len);
argv.appendAssumeCapacity(b.graph.zig_exe);

View File

@ -3,7 +3,6 @@ const ObjCopy = @This();
const Allocator = std.mem.Allocator;
const ArenaAllocator = std.heap.ArenaAllocator;
const ArrayListUnmanaged = std.ArrayListUnmanaged;
const File = std.fs.File;
const InstallDir = std.Build.InstallDir;
const Step = std.Build.Step;

View File

@ -12,8 +12,8 @@ pub const base_id: Step.Id = .options;
step: Step,
generated_file: GeneratedFile,
contents: std.ArrayListUnmanaged(u8),
args: std.ArrayListUnmanaged(Arg),
contents: std.ArrayList(u8),
args: std.ArrayList(Arg),
encountered_types: std.StringHashMapUnmanaged(void),
pub fn create(owner: *std.Build) *Options {
@ -45,7 +45,7 @@ fn addOptionFallible(options: *Options, comptime T: type, name: []const u8, valu
fn printType(
options: *Options,
out: *std.ArrayListUnmanaged(u8),
out: *std.ArrayList(u8),
comptime T: type,
value: T,
indent: u8,
@ -267,7 +267,7 @@ fn printType(
}
}
fn printUserDefinedType(options: *Options, out: *std.ArrayListUnmanaged(u8), comptime T: type, indent: u8) !void {
fn printUserDefinedType(options: *Options, out: *std.ArrayList(u8), comptime T: type, indent: u8) !void {
switch (@typeInfo(T)) {
.@"enum" => |info| {
return try printEnum(options, out, T, info, indent);
@ -281,7 +281,7 @@ fn printUserDefinedType(options: *Options, out: *std.ArrayListUnmanaged(u8), com
fn printEnum(
options: *Options,
out: *std.ArrayListUnmanaged(u8),
out: *std.ArrayList(u8),
comptime T: type,
comptime val: std.builtin.Type.Enum,
indent: u8,
@ -309,7 +309,7 @@ fn printEnum(
try out.appendSlice(gpa, "};\n");
}
fn printStruct(options: *Options, out: *std.ArrayListUnmanaged(u8), comptime T: type, comptime val: std.builtin.Type.Struct, indent: u8) !void {
fn printStruct(options: *Options, out: *std.ArrayList(u8), comptime T: type, comptime val: std.builtin.Type.Struct, indent: u8) !void {
const gpa = options.step.owner.allocator;
const gop = try options.encountered_types.getOrPut(gpa, @typeName(T));
if (gop.found_existing) return;
@ -369,7 +369,7 @@ fn printStruct(options: *Options, out: *std.ArrayListUnmanaged(u8), comptime T:
fn printStructValue(
options: *Options,
out: *std.ArrayListUnmanaged(u8),
out: *std.ArrayList(u8),
comptime struct_val: std.builtin.Type.Struct,
val: anytype,
indent: u8,

View File

@ -16,7 +16,7 @@ pub const base_id: Step.Id = .run;
step: Step,
/// See also addArg and addArgs to modifying this directly
argv: std.ArrayListUnmanaged(Arg),
argv: std.ArrayList(Arg),
/// Use `setCwd` to set the initial current working directory
cwd: ?Build.LazyPath,
@ -63,7 +63,7 @@ stdin: StdIn,
/// If the Run step is determined to have side-effects, the Run step is always
/// executed when it appears in the build graph, regardless of whether these
/// files have been modified.
file_inputs: std.ArrayListUnmanaged(std.Build.LazyPath),
file_inputs: std.ArrayList(std.Build.LazyPath),
/// After adding an output argument, this step will by default rename itself
/// for a better display name in the build summary.
@ -104,7 +104,7 @@ has_side_effects: bool,
/// If this is a Zig unit test binary, this tracks the indexes of the unit
/// tests that are also fuzz tests.
fuzz_tests: std.ArrayListUnmanaged(u32),
fuzz_tests: std.ArrayList(u32),
cached_test_metadata: ?CachedTestMetadata = null,
/// Populated during the fuzz phase if this run step corresponds to a unit test
@ -139,7 +139,7 @@ pub const StdIo = union(enum) {
/// conditions.
/// Note that an explicit check for exit code 0 needs to be added to this
/// list if such a check is desirable.
check: std.ArrayListUnmanaged(Check),
check: std.ArrayList(Check),
/// This Run step is running a zig unit test binary and will communicate
/// extra metadata over the IPC protocol.
zig_test,
@ -1140,6 +1140,12 @@ pub fn rerunInFuzzMode(
.output_file, .output_directory => unreachable,
}
}
if (run.step.result_failed_command) |cmd| {
fuzz.gpa.free(cmd);
run.step.result_failed_command = null;
}
const has_side_effects = false;
const rand_int = std.crypto.random.int(u64);
const tmp_dir_path = "tmp" ++ fs.path.sep_str ++ std.fmt.hex(rand_int);
@ -1150,7 +1156,7 @@ pub fn rerunInFuzzMode(
.web_server = null, // only needed for time reports
.ttyconf = fuzz.ttyconf,
.unit_test_timeout_ns = null, // don't time out fuzz tests for now
.gpa = undefined, // not used by `runCommand`
.gpa = fuzz.gpa,
}, .{
.unit_test_index = unit_test_index,
.fuzz = fuzz,
@ -1870,7 +1876,10 @@ fn pollZigTest(
// test. For instance, if the test runner leaves this much time between us requesting a test to
// start and it acknowledging the test starting, we terminate the child and raise an error. This
// *should* never happen, but could in theory be caused by some very unlucky IB in a test.
const response_timeout_ns = @max(options.unit_test_timeout_ns orelse 0, 60 * std.time.ns_per_s);
const response_timeout_ns: ?u64 = ns: {
if (fuzz_context != null) break :ns null; // don't timeout fuzz tests
break :ns @max(options.unit_test_timeout_ns orelse 0, 60 * std.time.ns_per_s);
};
const stdout = poller.reader(.stdout);
const stderr = poller.reader(.stderr);

View File

@ -12,7 +12,7 @@ const fs = std.fs;
const ArrayList = std.ArrayList;
step: Step,
output_source_files: std.ArrayListUnmanaged(OutputSourceFile),
output_source_files: std.ArrayList(OutputSourceFile),
pub const base_id: Step.Id = .update_source_files;

View File

@ -11,8 +11,8 @@ const WriteFile = @This();
step: Step,
// The elements here are pointers because we need stable pointers for the GeneratedFile field.
files: std.ArrayListUnmanaged(File),
directories: std.ArrayListUnmanaged(Directory),
files: std.ArrayList(File),
directories: std.ArrayList(Directory),
generated_directory: std.Build.GeneratedFile,
pub const base_id: Step.Id = .write_file;

View File

@ -549,7 +549,7 @@ fn buildClientWasm(ws: *WebServer, arena: Allocator, optimize: std.builtin.Optim
.sub_path = "docs/wasm/html_render.zig",
};
var argv: std.ArrayListUnmanaged([]const u8) = .empty;
var argv: std.ArrayList([]const u8) = .empty;
try argv.appendSlice(arena, &.{
graph.zig_exe, "build-exe", //

View File

@ -145,6 +145,7 @@ pub const fuzz = struct {
pub extern fn fuzzer_init_test(test_one: TestOne, unit_test_name: Slice) void;
pub extern fn fuzzer_new_input(bytes: Slice) void;
pub extern fn fuzzer_main(limit_kind: LimitKind, amount: u64) void;
pub extern fn fuzzer_unslide_address(addr: usize) usize;
pub const Slice = extern struct {
ptr: [*]const u8,

View File

@ -361,7 +361,7 @@ pub fn Poller(comptime StreamEnum: type) type {
r.end = data.len;
}
{
var list: std.ArrayListUnmanaged(u8) = .{
var list: std.ArrayList(u8) = .{
.items = r.buffer[0..r.end],
.capacity = r.buffer.len,
};

View File

@ -12,13 +12,10 @@ const Io = std.Io;
const net = std.Io.net;
const HostName = std.Io.net.HostName;
const IpAddress = std.Io.net.IpAddress;
const Allocator = std.mem.Allocator;
const Alignment = std.mem.Alignment;
const assert = std.debug.assert;
const posix = std.posix;
/// Thread-safe.
allocator: Allocator,
main_thread: Thread,
stack_size: usize = default_stack_size,
capacity: std.atomic.Value(Capacity),
@ -41,6 +38,8 @@ pub const Thread = struct {
/// The value that needs to be passed to pthread_kill or tgkill in order to
/// send a signal.
signal_id: SignalId,
/// Points to the next thread in the list. Singly-linked so that
/// it can be updated lock-free.
list_node: std.SinglyLinkedList.Node = .{},
run_queue: std.SinglyLinkedList.Node = .{},
current_closure: ?*Closure = null,
@ -516,7 +515,7 @@ pub const Thread = struct {
linux.CLONE.PARENT_SETTID | linux.CLONE.CHILD_CLEARTID |
linux.CLONE.SIGHAND | linux.CLONE.SYSVSEM | linux.CLONE.SETTLS;
switch (linux.E.init(linux.clone(
switch (linux.errno(linux.clone(
linuxStart,
@intFromPtr(&thread.completion.mapped[thread.completion.stack_offset]),
flags,
@ -745,23 +744,12 @@ pub const CpuCountError = error{
Unsupported,
} || Io.UnexpectedError;
pub const InitError = CpuCountError || Allocator.Error;
/// Related:
/// * `init_single_threaded`
pub fn init(
/// Must be threadsafe. Only used for the following functions:
/// * `Io.VTable.async`
/// * `Io.VTable.concurrent`
/// * `Io.VTable.groupAsync`
/// If these functions are avoided, then `Allocator.failing` may be passed
/// here.
gpa: Allocator,
) Threaded {
pub fn init() Threaded {
const cpu_count = std.Thread.getCpuCount();
var t: Threaded = .{
.allocator = gpa,
.threads = .empty,
.capacity = .init(if (cpu_count) |n| .init(n) else |_| .unknown),
.capacity_error = if (cpu_count) |_| null else |e| e,
@ -771,10 +759,6 @@ pub fn init(
.have_signal_handler = false,
};
if (cpu_count) |n| {
t.threads.ensureTotalCapacityPrecise(gpa, n - 1) catch {};
} else |_| {}
if (posix.Sigaction != void) {
// This causes sending `posix.SIG.IO` to thread to interrupt blocking
// syscalls, returning `posix.E.INTR`.
@ -798,7 +782,6 @@ pub fn init(
/// * cancel requests have no effect.
/// * `deinit` is safe, but unnecessary to call.
pub const init_single_threaded: Threaded = .{
.allocator = .failing,
.threads = .empty,
.capacity = .init(.init(1)),
.capacity_error = null,
@ -1067,14 +1050,14 @@ const AsyncClosure = struct {
}
fn init(
gpa: Allocator,
ac: *AsyncClosure,
mode: enum { async, concurrent },
result_len: usize,
result_alignment: Alignment,
context: []const u8,
context_alignment: Alignment,
func: *const fn (context: *const anyopaque, result: *anyopaque) void,
) Allocator.Error!*AsyncClosure {
) void {
const max_context_misalignment = context_alignment.toByteUnits() -| @alignOf(AsyncClosure);
const worst_case_context_offset = context_alignment.forward(@sizeOf(AsyncClosure) + max_context_misalignment);
const worst_case_result_offset = result_alignment.forward(worst_case_context_offset + context.len);
@ -1911,7 +1894,7 @@ fn dirStatPathLinux(
linux.STATX_INO | linux.STATX_SIZE | linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_ATIME | linux.STATX_MTIME | linux.STATX_CTIME,
&statx,
);
switch (linux.E.init(rc)) {
switch (linux.errno(rc)) {
.SUCCESS => return statFromLinux(&statx),
.INTR => continue,
.CANCELED => return error.Canceled,
@ -2058,7 +2041,7 @@ fn fileStatLinux(userdata: ?*anyopaque, file: Io.File) Io.File.StatError!Io.File
linux.STATX_INO | linux.STATX_SIZE | linux.STATX_TYPE | linux.STATX_MODE | linux.STATX_ATIME | linux.STATX_MTIME | linux.STATX_CTIME,
&statx,
);
switch (linux.E.init(rc)) {
switch (linux.errno(rc)) {
.SUCCESS => return statFromLinux(&statx),
.INTR => continue,
.CANCELED => return error.Canceled,
@ -3553,7 +3536,7 @@ fn sleepLinux(userdata: ?*anyopaque, timeout: Io.Timeout) Io.SleepError!void {
var timespec: posix.timespec = timestampToPosix(deadline_nanoseconds);
while (true) {
try t.checkCancel();
switch (std.os.linux.E.init(std.os.linux.clock_nanosleep(clock_id, .{ .ABSTIME = switch (timeout) {
switch (std.os.linux.errno(std.os.linux.clock_nanosleep(clock_id, .{ .ABSTIME = switch (timeout) {
.none, .duration => false,
.deadline => true,
} }, &timespec, &timespec))) {
@ -6299,7 +6282,7 @@ fn futexWait(t: *Threaded, ptr: *const std.atomic.Value(u32), expect: u32) Io.Ca
const linux = std.os.linux;
try t.checkCancel();
const rc = linux.futex_4arg(ptr, .{ .cmd = .WAIT, .private = true }, expect, null);
if (is_debug) switch (linux.E.init(rc)) {
if (is_debug) switch (linux.errno(rc)) {
.SUCCESS => {}, // notified by `wake()`
.INTR => {}, // gives caller a chance to check cancellation
.AGAIN => {}, // ptr.* != expect
@ -6386,7 +6369,7 @@ pub fn futexWaitUncancelable(ptr: *const std.atomic.Value(u32), expect: u32) voi
.linux => {
const linux = std.os.linux;
const rc = linux.futex_4arg(ptr, .{ .cmd = .WAIT, .private = true }, expect, null);
switch (linux.E.init(rc)) {
switch (linux.errno(rc)) {
.SUCCESS => {}, // notified by `wake()`
.INTR => {}, // gives caller a chance to check cancellation
.AGAIN => {}, // ptr.* != expect
@ -6449,7 +6432,7 @@ pub fn futexWaitDurationUncancelable(ptr: *const std.atomic.Value(u32), expect:
const linux = std.os.linux;
var ts = timestampToPosix(timeout.toNanoseconds());
const rc = linux.futex_4arg(ptr, .{ .cmd = .WAIT, .private = true }, expect, &ts);
if (is_debug) switch (linux.E.init(rc)) {
if (is_debug) switch (linux.errno(rc)) {
.SUCCESS => {}, // notified by `wake()`
.INTR => {}, // gives caller a chance to check cancellation
.AGAIN => {}, // ptr.* != expect
@ -6483,7 +6466,7 @@ pub fn futexWake(ptr: *const std.atomic.Value(u32), max_waiters: u32) void {
} else switch (native_os) {
.linux => {
const linux = std.os.linux;
switch (linux.E.init(linux.futex_3arg(
switch (linux.errno(linux.futex_3arg(
&ptr.raw,
.{ .cmd = .WAKE, .private = true },
@min(max_waiters, std.math.maxInt(i32)),

View File

@ -270,16 +270,17 @@ fn writeSplatHeaderLimitFinish(
remaining -= copy_len;
if (remaining == 0) break :v;
}
for (data[0 .. data.len - 1]) |buf| if (buf.len != 0) {
const copy_len = @min(header.len, remaining);
vecs[i] = buf;
for (data[0 .. data.len - 1]) |buf| {
if (buf.len == 0) continue;
const copy_len = @min(buf.len, remaining);
vecs[i] = buf[0..copy_len];
i += 1;
remaining -= copy_len;
if (remaining == 0) break :v;
if (vecs.len - i == 0) break :v;
};
}
const pattern = data[data.len - 1];
if (splat == 1) {
if (splat == 1 or remaining < pattern.len) {
vecs[i] = pattern[0..@min(remaining, pattern.len)];
i += 1;
break :v;
@ -915,7 +916,16 @@ pub fn sendFileHeader(
if (new_end <= w.buffer.len) {
@memcpy(w.buffer[w.end..][0..header.len], header);
w.end = new_end;
return header.len + try w.vtable.sendFile(w, file_reader, limit);
const file_bytes = w.vtable.sendFile(w, file_reader, limit) catch |err| switch (err) {
error.ReadFailed, error.WriteFailed => |e| return e,
error.EndOfStream, error.Unimplemented => |e| {
// These errors are non-fatal, so if we wrote any header bytes, we will report that
// and suppress this error. Only if there was no header may we return the error.
if (header.len != 0) return header.len;
return e;
},
};
return header.len + file_bytes;
}
const buffered_contents = limit.slice(file_reader.interface.buffered());
const n = try w.vtable.drain(w, &.{ header, buffered_contents }, 1);

View File

@ -1315,7 +1315,7 @@ const LinuxThreadImpl = struct {
linux.CLONE.PARENT_SETTID | linux.CLONE.CHILD_CLEARTID |
linux.CLONE.SIGHAND | linux.CLONE.SYSVSEM | linux.CLONE.SETTLS;
switch (linux.E.init(linux.clone(
switch (linux.errno(linux.clone(
Instance.entryFn,
@intFromPtr(&mapped[stack_offset]),
flags,
@ -1354,7 +1354,7 @@ const LinuxThreadImpl = struct {
const tid = self.thread.child_tid.load(.seq_cst);
if (tid == 0) break;
switch (linux.E.init(linux.futex_4arg(
switch (linux.errno(linux.futex_4arg(
&self.thread.child_tid.raw,
.{ .cmd = .WAIT, .private = false },
@bitCast(tid),

View File

@ -269,7 +269,7 @@ const LinuxImpl = struct {
if (timeout != null) &ts else null,
);
switch (linux.E.init(rc)) {
switch (linux.errno(rc)) {
.SUCCESS => {}, // notified by `wake()`
.INTR => {}, // spurious wakeup
.AGAIN => {}, // ptr.* != expect
@ -290,7 +290,7 @@ const LinuxImpl = struct {
@min(max_waiters, std.math.maxInt(i32)),
);
switch (linux.E.init(rc)) {
switch (linux.errno(rc)) {
.SUCCESS => {}, // successful wake up
.INVAL => {}, // invalid futex_wait() on ptr done elsewhere
.FAULT => {}, // pointer became invalid while doing the wake

View File

@ -505,7 +505,7 @@ pub fn ArrayHashMapWithAllocator(
/// A hash table of keys and values, each stored sequentially.
///
/// Insertion order is preserved. In general, this data structure supports the same
/// operations as `std.ArrayListUnmanaged`.
/// operations as `std.ArrayList`.
///
/// Deletion operations:
/// * `swapRemove` - O(1)

View File

@ -72,6 +72,11 @@ pub inline fn versionCheck(comptime version: std.SemanticVersion) bool {
};
}
/// Get the errno if rc is -1 and SUCCESS if rc is not -1.
pub fn errno(rc: anytype) E {
return if (rc == -1) @enumFromInt(_errno().*) else .SUCCESS;
}
pub const ino_t = switch (native_os) {
.linux => linux.ino_t,
.emscripten => emscripten.ino_t,
@ -11580,6 +11585,6 @@ const private = struct {
extern threadlocal var errno: c_int;
fn errnoFromThreadLocal() *c_int {
return &errno;
return &private.errno;
}
};

View File

@ -21,7 +21,7 @@ const base64 = std.base64.standard.decoderWithIgnore(" \t\r\n");
/// The key is the contents slice of the subject.
map: std.HashMapUnmanaged(der.Element.Slice, u32, MapContext, std.hash_map.default_max_load_percentage) = .empty,
bytes: std.ArrayListUnmanaged(u8) = .empty,
bytes: std.ArrayList(u8) = .empty,
pub const VerifyError = Certificate.Parsed.VerifyError || error{
CertificateIssuerNotFound,

View File

@ -1446,6 +1446,8 @@ fn randPolyNormalized(rnd: anytype) Poly {
}
test "MulHat" {
if (comptime builtin.cpu.has(.s390x, .vector)) return error.SkipZigTest;
var rnd = RndGen.init(0);
for (0..100) |_| {
@ -1600,6 +1602,8 @@ test "Polynomial packing" {
}
test "Test inner PKE" {
if (comptime builtin.cpu.has(.s390x, .vector)) return error.SkipZigTest;
var seed: [32]u8 = undefined;
var pt: [32]u8 = undefined;
for (&seed, &pt, 0..) |*s, *p, i| {
@ -1621,6 +1625,8 @@ test "Test inner PKE" {
}
test "Test happy flow" {
if (comptime builtin.cpu.has(.s390x, .vector)) return error.SkipZigTest;
var seed: [64]u8 = undefined;
for (&seed, 0..) |*s, i| {
s.* = @as(u8, @intCast(i));
@ -1646,18 +1652,21 @@ test "Test happy flow" {
test "NIST KAT test d00.Kyber512" {
if (comptime builtin.cpu.has(.loongarch, .lsx)) return error.SkipZigTest;
if (comptime builtin.cpu.has(.s390x, .vector)) return error.SkipZigTest;
try testNistKat(d00.Kyber512, "e9c2bd37133fcb40772f81559f14b1f58dccd1c816701be9ba6214d43baf4547");
}
test "NIST KAT test d00.Kyber1024" {
if (comptime builtin.cpu.has(.loongarch, .lsx)) return error.SkipZigTest;
if (comptime builtin.cpu.has(.s390x, .vector)) return error.SkipZigTest;
try testNistKat(d00.Kyber1024, "89248f2f33f7f4f7051729111f3049c409a933ec904aedadf035f30fa5646cd5");
}
test "NIST KAT test d00.Kyber768" {
if (comptime builtin.cpu.has(.loongarch, .lsx)) return error.SkipZigTest;
if (comptime builtin.cpu.has(.s390x, .vector)) return error.SkipZigTest;
try testNistKat(d00.Kyber768, "a1e122cad3c24bc51622e4c242d8b8acbcd3f618fee4220400605ca8f9ea02c2");
}

View File

@ -21,6 +21,7 @@ const root = @import("root");
pub const Dwarf = @import("debug/Dwarf.zig");
pub const Pdb = @import("debug/Pdb.zig");
pub const ElfFile = @import("debug/ElfFile.zig");
pub const MachOFile = @import("debug/MachOFile.zig");
pub const Info = @import("debug/Info.zig");
pub const Coverage = @import("debug/Coverage.zig");
pub const cpu_context = @import("debug/cpu_context.zig");
@ -1366,7 +1367,7 @@ test printLineFromFile {
/// The returned allocator should be thread-safe if the compilation is multi-threaded, because
/// multiple threads could capture and/or print stack traces simultaneously.
fn getDebugInfoAllocator() Allocator {
pub fn getDebugInfoAllocator() Allocator {
// Allow overriding the debug info allocator by exposing `root.debug.getDebugInfoAllocator`.
if (@hasDecl(root, "debug") and @hasDecl(root.debug, "getDebugInfoAllocator")) {
return root.debug.getDebugInfoAllocator();

View File

@ -21,7 +21,7 @@ directories: std.ArrayHashMapUnmanaged(String, void, String.MapContext, false),
///
/// Protected by `mutex`.
files: std.ArrayHashMapUnmanaged(File, void, File.MapContext, false),
string_bytes: std.ArrayListUnmanaged(u8),
string_bytes: std.ArrayList(u8),
/// Protects the other fields.
mutex: std.Thread.Mutex,

View File

@ -158,7 +158,7 @@ pub fn StackMachine(comptime options: Options) type {
}
};
stack: std.ArrayListUnmanaged(Value) = .empty,
stack: std.ArrayList(Value) = .empty,
pub fn reset(self: *Self) void {
self.stack.clearRetainingCapacity();

View File

@ -9,49 +9,67 @@
const std = @import("../std.zig");
const Allocator = std.mem.Allocator;
const Path = std.Build.Cache.Path;
const ElfFile = std.debug.ElfFile;
const assert = std.debug.assert;
const Coverage = std.debug.Coverage;
const SourceLocation = std.debug.Coverage.SourceLocation;
const ElfFile = std.debug.ElfFile;
const MachOFile = std.debug.MachOFile;
const Info = @This();
/// Sorted by key, ascending.
address_map: std.AutoArrayHashMapUnmanaged(u64, ElfFile),
impl: union(enum) {
elf: ElfFile,
macho: MachOFile,
},
/// Externally managed, outlives this `Info` instance.
coverage: *Coverage,
pub const LoadError = std.fs.File.OpenError || ElfFile.LoadError || std.debug.Dwarf.ScanError || error{MissingDebugInfo};
pub const LoadError = std.fs.File.OpenError || ElfFile.LoadError || MachOFile.Error || std.debug.Dwarf.ScanError || error{ MissingDebugInfo, UnsupportedDebugInfo };
pub fn load(gpa: Allocator, path: Path, coverage: *Coverage) LoadError!Info {
var file = try path.root_dir.handle.openFile(path.sub_path, .{});
defer file.close();
pub fn load(gpa: Allocator, path: Path, coverage: *Coverage, format: std.Target.ObjectFormat, arch: std.Target.Cpu.Arch) LoadError!Info {
switch (format) {
.elf => {
var file = try path.root_dir.handle.openFile(path.sub_path, .{});
defer file.close();
var elf_file: ElfFile = try .load(gpa, file, null, &.none);
errdefer elf_file.deinit(gpa);
var elf_file: ElfFile = try .load(gpa, file, null, &.none);
errdefer elf_file.deinit(gpa);
if (elf_file.dwarf == null) return error.MissingDebugInfo;
try elf_file.dwarf.?.open(gpa, elf_file.endian);
try elf_file.dwarf.?.populateRanges(gpa, elf_file.endian);
if (elf_file.dwarf == null) return error.MissingDebugInfo;
try elf_file.dwarf.?.open(gpa, elf_file.endian);
try elf_file.dwarf.?.populateRanges(gpa, elf_file.endian);
var info: Info = .{
.address_map = .{},
.coverage = coverage,
};
try info.address_map.put(gpa, 0, elf_file);
errdefer comptime unreachable; // elf_file is owned by the map now
return info;
return .{
.impl = .{ .elf = elf_file },
.coverage = coverage,
};
},
.macho => {
const path_str = try path.toString(gpa);
defer gpa.free(path_str);
var macho_file: MachOFile = try .load(gpa, path_str, arch);
errdefer macho_file.deinit(gpa);
return .{
.impl = .{ .macho = macho_file },
.coverage = coverage,
};
},
else => return error.UnsupportedDebugInfo,
}
}
pub fn deinit(info: *Info, gpa: Allocator) void {
for (info.address_map.values()) |*elf_file| {
elf_file.dwarf.?.deinit(gpa);
switch (info.impl) {
.elf => |*ef| ef.deinit(gpa),
.macho => |*mf| mf.deinit(gpa),
}
info.address_map.deinit(gpa);
info.* = undefined;
}
pub const ResolveAddressesError = Coverage.ResolveAddressesDwarfError;
pub const ResolveAddressesError = Coverage.ResolveAddressesDwarfError || error{UnsupportedDebugInfo};
/// Given an array of virtual memory addresses, sorted ascending, outputs a
/// corresponding array of source locations.
@ -64,7 +82,28 @@ pub fn resolveAddresses(
output: []SourceLocation,
) ResolveAddressesError!void {
assert(sorted_pc_addrs.len == output.len);
if (info.address_map.entries.len != 1) @panic("TODO");
const elf_file = &info.address_map.values()[0];
return info.coverage.resolveAddressesDwarf(gpa, elf_file.endian, sorted_pc_addrs, output, &elf_file.dwarf.?);
switch (info.impl) {
.elf => |*ef| return info.coverage.resolveAddressesDwarf(gpa, ef.endian, sorted_pc_addrs, output, &ef.dwarf.?),
.macho => |*mf| {
// Resolving all of the addresses at once unfortunately isn't so easy in Mach-O binaries
// due to split debug information. For now, we'll just resolve the addreses one by one.
for (sorted_pc_addrs, output) |pc_addr, *src_loc| {
const dwarf, const dwarf_pc_addr = mf.getDwarfForAddress(gpa, pc_addr) catch |err| switch (err) {
error.InvalidMachO, error.InvalidDwarf => return error.InvalidDebugInfo,
else => |e| return e,
};
if (dwarf.ranges.items.len == 0) {
dwarf.populateRanges(gpa, .little) catch |err| switch (err) {
error.EndOfStream,
error.Overflow,
error.StreamTooLong,
error.ReadFailed,
=> return error.InvalidDebugInfo,
else => |e| return e,
};
}
try info.coverage.resolveAddressesDwarf(gpa, .little, &.{dwarf_pc_addr}, src_loc[0..1], dwarf);
}
},
}
}

548
lib/std/debug/MachOFile.zig Normal file
View File

@ -0,0 +1,548 @@
mapped_memory: []align(std.heap.page_size_min) const u8,
symbols: []const Symbol,
strings: []const u8,
text_vmaddr: u64,
/// Key is index into `strings` of the file path.
ofiles: std.AutoArrayHashMapUnmanaged(u32, Error!OFile),
pub const Error = error{
InvalidMachO,
InvalidDwarf,
MissingDebugInfo,
UnsupportedDebugInfo,
ReadFailed,
OutOfMemory,
};
pub fn deinit(mf: *MachOFile, gpa: Allocator) void {
for (mf.ofiles.values()) |*maybe_of| {
const of = &(maybe_of.* catch continue);
posix.munmap(of.mapped_memory);
of.dwarf.deinit(gpa);
of.symbols_by_name.deinit(gpa);
}
mf.ofiles.deinit(gpa);
gpa.free(mf.symbols);
posix.munmap(mf.mapped_memory);
}
pub fn load(gpa: Allocator, path: []const u8, arch: std.Target.Cpu.Arch) Error!MachOFile {
switch (arch) {
.x86_64, .aarch64 => {},
else => unreachable,
}
const all_mapped_memory = try mapDebugInfoFile(path);
errdefer posix.munmap(all_mapped_memory);
// In most cases, the file we just mapped is a Mach-O binary. However, it could be a "universal
// binary": a simple file format which contains Mach-O binaries for multiple targets. For
// instance, `/usr/lib/dyld` is currently distributed as a universal binary containing images
// for both ARM64 macOS and x86_64 macOS.
if (all_mapped_memory.len < 4) return error.InvalidMachO;
const magic = std.mem.readInt(u32, all_mapped_memory.ptr[0..4], .little);
// The contents of a Mach-O file, which may or may not be the whole of `all_mapped_memory`.
const mapped_macho = switch (magic) {
macho.MH_MAGIC_64 => all_mapped_memory,
macho.FAT_CIGAM => mapped_macho: {
// This is the universal binary format (aka a "fat binary").
var fat_r: Io.Reader = .fixed(all_mapped_memory);
const hdr = fat_r.takeStruct(macho.fat_header, .big) catch |err| switch (err) {
error.ReadFailed => unreachable,
error.EndOfStream => return error.InvalidMachO,
};
const want_cpu_type = switch (arch) {
.x86_64 => macho.CPU_TYPE_X86_64,
.aarch64 => macho.CPU_TYPE_ARM64,
else => unreachable,
};
for (0..hdr.nfat_arch) |_| {
const fat_arch = fat_r.takeStruct(macho.fat_arch, .big) catch |err| switch (err) {
error.ReadFailed => unreachable,
error.EndOfStream => return error.InvalidMachO,
};
if (fat_arch.cputype != want_cpu_type) continue;
if (fat_arch.offset + fat_arch.size > all_mapped_memory.len) return error.InvalidMachO;
break :mapped_macho all_mapped_memory[fat_arch.offset..][0..fat_arch.size];
}
// `arch` was not present in the fat binary.
return error.MissingDebugInfo;
},
// Even on modern 64-bit targets, this format doesn't seem to be too extensively used. It
// will be fairly easy to add support here if necessary; it's very similar to above.
macho.FAT_CIGAM_64 => return error.UnsupportedDebugInfo,
else => return error.InvalidMachO,
};
var r: Io.Reader = .fixed(mapped_macho);
const hdr = r.takeStruct(macho.mach_header_64, .little) catch |err| switch (err) {
error.ReadFailed => unreachable,
error.EndOfStream => return error.InvalidMachO,
};
if (hdr.magic != macho.MH_MAGIC_64)
return error.InvalidMachO;
const symtab: macho.symtab_command, const text_vmaddr: u64 = lcs: {
var it: macho.LoadCommandIterator = try .init(&hdr, mapped_macho[@sizeOf(macho.mach_header_64)..]);
var symtab: ?macho.symtab_command = null;
var text_vmaddr: ?u64 = null;
while (try it.next()) |cmd| switch (cmd.hdr.cmd) {
.SYMTAB => symtab = cmd.cast(macho.symtab_command) orelse return error.InvalidMachO,
.SEGMENT_64 => if (cmd.cast(macho.segment_command_64)) |seg_cmd| {
if (!mem.eql(u8, seg_cmd.segName(), "__TEXT")) continue;
text_vmaddr = seg_cmd.vmaddr;
},
else => {},
};
break :lcs .{
symtab orelse return error.MissingDebugInfo,
text_vmaddr orelse return error.MissingDebugInfo,
};
};
const strings = mapped_macho[symtab.stroff..][0 .. symtab.strsize - 1];
var symbols: std.ArrayList(Symbol) = try .initCapacity(gpa, symtab.nsyms);
defer symbols.deinit(gpa);
// This map is temporary; it is used only to detect duplicates here. This is
// necessary because we prefer to use STAB ("symbolic debugging table") symbols,
// but they might not be present, so we track normal symbols too.
// Indices match 1-1 with those of `symbols`.
var symbol_names: std.StringArrayHashMapUnmanaged(void) = .empty;
defer symbol_names.deinit(gpa);
try symbol_names.ensureUnusedCapacity(gpa, symtab.nsyms);
var ofile: u32 = undefined;
var last_sym: Symbol = undefined;
var state: enum {
init,
oso_open,
oso_close,
bnsym,
fun_strx,
fun_size,
ensym,
} = .init;
var sym_r: Io.Reader = .fixed(mapped_macho[symtab.symoff..]);
for (0..symtab.nsyms) |_| {
const sym = sym_r.takeStruct(macho.nlist_64, .little) catch |err| switch (err) {
error.ReadFailed => unreachable,
error.EndOfStream => return error.InvalidMachO,
};
if (sym.n_type.bits.is_stab == 0) {
if (sym.n_strx == 0) continue;
switch (sym.n_type.bits.type) {
.undf, .pbud, .indr, .abs, _ => continue,
.sect => {
const name = std.mem.sliceTo(strings[sym.n_strx..], 0);
const gop = symbol_names.getOrPutAssumeCapacity(name);
if (!gop.found_existing) {
assert(gop.index == symbols.items.len);
symbols.appendAssumeCapacity(.{
.strx = sym.n_strx,
.addr = sym.n_value,
.ofile = Symbol.unknown_ofile,
});
}
},
}
continue;
}
// TODO handle globals N_GSYM, and statics N_STSYM
switch (sym.n_type.stab) {
.oso => switch (state) {
.init, .oso_close => {
state = .oso_open;
ofile = sym.n_strx;
},
else => return error.InvalidMachO,
},
.bnsym => switch (state) {
.oso_open, .ensym => {
state = .bnsym;
last_sym = .{
.strx = 0,
.addr = sym.n_value,
.ofile = ofile,
};
},
else => return error.InvalidMachO,
},
.fun => switch (state) {
.bnsym => {
state = .fun_strx;
last_sym.strx = sym.n_strx;
},
.fun_strx => {
state = .fun_size;
},
else => return error.InvalidMachO,
},
.ensym => switch (state) {
.fun_size => {
state = .ensym;
if (last_sym.strx != 0) {
const name = std.mem.sliceTo(strings[last_sym.strx..], 0);
const gop = symbol_names.getOrPutAssumeCapacity(name);
if (!gop.found_existing) {
assert(gop.index == symbols.items.len);
symbols.appendAssumeCapacity(last_sym);
} else {
symbols.items[gop.index] = last_sym;
}
}
},
else => return error.InvalidMachO,
},
.so => switch (state) {
.init, .oso_close => {},
.oso_open, .ensym => {
state = .oso_close;
},
else => return error.InvalidMachO,
},
else => {},
}
}
switch (state) {
.init => {
// Missing STAB symtab entries is still okay, unless there were also no normal symbols.
if (symbols.items.len == 0) return error.MissingDebugInfo;
},
.oso_close => {},
else => return error.InvalidMachO, // corrupted STAB entries in symtab
}
const symbols_slice = try symbols.toOwnedSlice(gpa);
errdefer gpa.free(symbols_slice);
// Even though lld emits symbols in ascending order, this debug code
// should work for programs linked in any valid way.
// This sort is so that we can binary search later.
mem.sort(Symbol, symbols_slice, {}, Symbol.addressLessThan);
return .{
.mapped_memory = all_mapped_memory,
.symbols = symbols_slice,
.strings = strings,
.ofiles = .empty,
.text_vmaddr = text_vmaddr,
};
}
pub fn getDwarfForAddress(mf: *MachOFile, gpa: Allocator, vaddr: u64) !struct { *Dwarf, u64 } {
const symbol = Symbol.find(mf.symbols, vaddr) orelse return error.MissingDebugInfo;
if (symbol.ofile == Symbol.unknown_ofile) return error.MissingDebugInfo;
// offset of `address` from start of `symbol`
const address_symbol_offset = vaddr - symbol.addr;
// Take the symbol name from the N_FUN STAB entry, we're going to
// use it if we fail to find the DWARF infos
const stab_symbol = mem.sliceTo(mf.strings[symbol.strx..], 0);
const gop = try mf.ofiles.getOrPut(gpa, symbol.ofile);
if (!gop.found_existing) {
const name = mem.sliceTo(mf.strings[symbol.ofile..], 0);
gop.value_ptr.* = loadOFile(gpa, name);
}
const of = &(gop.value_ptr.* catch |err| return err);
const symbol_index = of.symbols_by_name.getKeyAdapted(
@as([]const u8, stab_symbol),
@as(OFile.SymbolAdapter, .{ .strtab = of.strtab, .symtab_raw = of.symtab_raw }),
) orelse return error.MissingDebugInfo;
const symbol_ofile_vaddr = vaddr: {
var sym = of.symtab_raw[symbol_index];
if (builtin.cpu.arch.endian() != .little) std.mem.byteSwapAllFields(macho.nlist_64, &sym);
break :vaddr sym.n_value;
};
return .{ &of.dwarf, symbol_ofile_vaddr + address_symbol_offset };
}
pub fn lookupSymbolName(mf: *MachOFile, vaddr: u64) error{MissingDebugInfo}![]const u8 {
const symbol = Symbol.find(mf.symbols, vaddr) orelse return error.MissingDebugInfo;
return mem.sliceTo(mf.strings[symbol.strx..], 0);
}
const OFile = struct {
mapped_memory: []align(std.heap.page_size_min) const u8,
dwarf: Dwarf,
strtab: []const u8,
symtab_raw: []align(1) const macho.nlist_64,
/// All named symbols in `symtab_raw`. Stored `u32` key is the index into `symtab_raw`. Accessed
/// through `SymbolAdapter`, so that the symbol name is used as the logical key.
symbols_by_name: std.ArrayHashMapUnmanaged(u32, void, void, true),
const SymbolAdapter = struct {
strtab: []const u8,
symtab_raw: []align(1) const macho.nlist_64,
pub fn hash(ctx: SymbolAdapter, sym_name: []const u8) u32 {
_ = ctx;
return @truncate(std.hash.Wyhash.hash(0, sym_name));
}
pub fn eql(ctx: SymbolAdapter, a_sym_name: []const u8, b_sym_index: u32, b_index: usize) bool {
_ = b_index;
var b_sym = ctx.symtab_raw[b_sym_index];
if (builtin.cpu.arch.endian() != .little) std.mem.byteSwapAllFields(macho.nlist_64, &b_sym);
const b_sym_name = std.mem.sliceTo(ctx.strtab[b_sym.n_strx..], 0);
return mem.eql(u8, a_sym_name, b_sym_name);
}
};
};
const Symbol = struct {
strx: u32,
addr: u64,
/// Value may be `unknown_ofile`.
ofile: u32,
const unknown_ofile = std.math.maxInt(u32);
fn addressLessThan(context: void, lhs: Symbol, rhs: Symbol) bool {
_ = context;
return lhs.addr < rhs.addr;
}
/// Assumes that `symbols` is sorted in order of ascending `addr`.
fn find(symbols: []const Symbol, address: usize) ?*const Symbol {
if (symbols.len == 0) return null; // no potential match
if (address < symbols[0].addr) return null; // address is before the lowest-address symbol
var left: usize = 0;
var len: usize = symbols.len;
while (len > 1) {
const mid = left + len / 2;
if (address < symbols[mid].addr) {
len /= 2;
} else {
left = mid;
len -= len / 2;
}
}
return &symbols[left];
}
test find {
const symbols: []const Symbol = &.{
.{ .addr = 100, .strx = undefined, .ofile = undefined },
.{ .addr = 200, .strx = undefined, .ofile = undefined },
.{ .addr = 300, .strx = undefined, .ofile = undefined },
};
try testing.expectEqual(null, find(symbols, 0));
try testing.expectEqual(null, find(symbols, 99));
try testing.expectEqual(&symbols[0], find(symbols, 100).?);
try testing.expectEqual(&symbols[0], find(symbols, 150).?);
try testing.expectEqual(&symbols[0], find(symbols, 199).?);
try testing.expectEqual(&symbols[1], find(symbols, 200).?);
try testing.expectEqual(&symbols[1], find(symbols, 250).?);
try testing.expectEqual(&symbols[1], find(symbols, 299).?);
try testing.expectEqual(&symbols[2], find(symbols, 300).?);
try testing.expectEqual(&symbols[2], find(symbols, 301).?);
try testing.expectEqual(&symbols[2], find(symbols, 5000).?);
}
};
test {
_ = Symbol;
}
fn loadOFile(gpa: Allocator, o_file_name: []const u8) !OFile {
const all_mapped_memory, const mapped_ofile = map: {
const open_paren = paren: {
if (std.mem.endsWith(u8, o_file_name, ")")) {
if (std.mem.findScalarLast(u8, o_file_name, '(')) |i| {
break :paren i;
}
}
// Not an archive, just a normal path to a .o file
const m = try mapDebugInfoFile(o_file_name);
break :map .{ m, m };
};
// We have the form 'path/to/archive.a(entry.o)'. Map the archive and find the object file in question.
const archive_path = o_file_name[0..open_paren];
const target_name_in_archive = o_file_name[open_paren + 1 .. o_file_name.len - 1];
const mapped_archive = try mapDebugInfoFile(archive_path);
errdefer posix.munmap(mapped_archive);
var ar_reader: Io.Reader = .fixed(mapped_archive);
const ar_magic = ar_reader.take(8) catch return error.InvalidMachO;
if (!std.mem.eql(u8, ar_magic, "!<arch>\n")) return error.InvalidMachO;
while (true) {
if (ar_reader.seek == ar_reader.buffer.len) return error.MissingDebugInfo;
const raw_name = ar_reader.takeArray(16) catch return error.InvalidMachO;
ar_reader.discardAll(12 + 6 + 6 + 8) catch return error.InvalidMachO;
const raw_size = ar_reader.takeArray(10) catch return error.InvalidMachO;
const file_magic = ar_reader.takeArray(2) catch return error.InvalidMachO;
if (!std.mem.eql(u8, file_magic, "`\n")) return error.InvalidMachO;
const size = std.fmt.parseInt(u32, mem.sliceTo(raw_size, ' '), 10) catch return error.InvalidMachO;
const raw_data = ar_reader.take(size) catch return error.InvalidMachO;
const entry_name: []const u8, const entry_contents: []const u8 = entry: {
if (!std.mem.startsWith(u8, raw_name, "#1/")) {
break :entry .{ mem.sliceTo(raw_name, '/'), raw_data };
}
const len = std.fmt.parseInt(u32, mem.sliceTo(raw_name[3..], ' '), 10) catch return error.InvalidMachO;
if (len > size) return error.InvalidMachO;
break :entry .{ mem.sliceTo(raw_data[0..len], 0), raw_data[len..] };
};
if (std.mem.eql(u8, entry_name, target_name_in_archive)) {
break :map .{ mapped_archive, entry_contents };
}
}
};
errdefer posix.munmap(all_mapped_memory);
var r: Io.Reader = .fixed(mapped_ofile);
const hdr = r.takeStruct(macho.mach_header_64, .little) catch |err| switch (err) {
error.ReadFailed => unreachable,
error.EndOfStream => return error.InvalidMachO,
};
if (hdr.magic != std.macho.MH_MAGIC_64) return error.InvalidMachO;
const seg_cmd: macho.LoadCommandIterator.LoadCommand, const symtab_cmd: macho.symtab_command = cmds: {
var seg_cmd: ?macho.LoadCommandIterator.LoadCommand = null;
var symtab_cmd: ?macho.symtab_command = null;
var it: macho.LoadCommandIterator = try .init(&hdr, mapped_ofile[@sizeOf(macho.mach_header_64)..]);
while (try it.next()) |lc| switch (lc.hdr.cmd) {
.SEGMENT_64 => seg_cmd = lc,
.SYMTAB => symtab_cmd = lc.cast(macho.symtab_command) orelse return error.InvalidMachO,
else => {},
};
break :cmds .{
seg_cmd orelse return error.MissingDebugInfo,
symtab_cmd orelse return error.MissingDebugInfo,
};
};
if (mapped_ofile.len < symtab_cmd.stroff + symtab_cmd.strsize) return error.InvalidMachO;
if (mapped_ofile[symtab_cmd.stroff + symtab_cmd.strsize - 1] != 0) return error.InvalidMachO;
const strtab = mapped_ofile[symtab_cmd.stroff..][0 .. symtab_cmd.strsize - 1];
const n_sym_bytes = symtab_cmd.nsyms * @sizeOf(macho.nlist_64);
if (mapped_ofile.len < symtab_cmd.symoff + n_sym_bytes) return error.InvalidMachO;
const symtab_raw: []align(1) const macho.nlist_64 = @ptrCast(mapped_ofile[symtab_cmd.symoff..][0..n_sym_bytes]);
// TODO handle tentative (common) symbols
var symbols_by_name: std.ArrayHashMapUnmanaged(u32, void, void, true) = .empty;
defer symbols_by_name.deinit(gpa);
try symbols_by_name.ensureUnusedCapacity(gpa, @intCast(symtab_raw.len));
for (symtab_raw, 0..) |sym_raw, sym_index| {
var sym = sym_raw;
if (builtin.cpu.arch.endian() != .little) std.mem.byteSwapAllFields(macho.nlist_64, &sym);
if (sym.n_strx == 0) continue;
switch (sym.n_type.bits.type) {
.undf => continue, // includes tentative symbols
.abs => continue,
else => {},
}
const sym_name = mem.sliceTo(strtab[sym.n_strx..], 0);
const gop = symbols_by_name.getOrPutAssumeCapacityAdapted(
@as([]const u8, sym_name),
@as(OFile.SymbolAdapter, .{ .strtab = strtab, .symtab_raw = symtab_raw }),
);
if (gop.found_existing) return error.InvalidMachO;
gop.key_ptr.* = @intCast(sym_index);
}
var sections: Dwarf.SectionArray = @splat(null);
for (seg_cmd.getSections()) |sect_raw| {
var sect = sect_raw;
if (builtin.cpu.arch.endian() != .little) std.mem.byteSwapAllFields(macho.section_64, &sect);
if (!std.mem.eql(u8, "__DWARF", sect.segName())) continue;
const section_index: usize = inline for (@typeInfo(Dwarf.Section.Id).@"enum".fields, 0..) |section, i| {
if (mem.eql(u8, "__" ++ section.name, sect.sectName())) break i;
} else continue;
if (mapped_ofile.len < sect.offset + sect.size) return error.InvalidMachO;
const section_bytes = mapped_ofile[sect.offset..][0..sect.size];
sections[section_index] = .{
.data = section_bytes,
.owned = false,
};
}
if (sections[@intFromEnum(Dwarf.Section.Id.debug_info)] == null or
sections[@intFromEnum(Dwarf.Section.Id.debug_abbrev)] == null or
sections[@intFromEnum(Dwarf.Section.Id.debug_str)] == null or
sections[@intFromEnum(Dwarf.Section.Id.debug_line)] == null)
{
return error.MissingDebugInfo;
}
var dwarf: Dwarf = .{ .sections = sections };
errdefer dwarf.deinit(gpa);
dwarf.open(gpa, .little) catch |err| switch (err) {
error.InvalidDebugInfo,
error.EndOfStream,
error.Overflow,
error.StreamTooLong,
=> return error.InvalidDwarf,
error.MissingDebugInfo,
error.ReadFailed,
error.OutOfMemory,
=> |e| return e,
};
return .{
.mapped_memory = all_mapped_memory,
.dwarf = dwarf,
.strtab = strtab,
.symtab_raw = symtab_raw,
.symbols_by_name = symbols_by_name.move(),
};
}
/// Uses `mmap` to map the file at `path` into memory.
fn mapDebugInfoFile(path: []const u8) ![]align(std.heap.page_size_min) const u8 {
const file = std.fs.cwd().openFile(path, .{}) catch |err| switch (err) {
error.FileNotFound => return error.MissingDebugInfo,
else => return error.ReadFailed,
};
defer file.close();
const file_len = std.math.cast(
usize,
file.getEndPos() catch return error.ReadFailed,
) orelse return error.ReadFailed;
return posix.mmap(
null,
file_len,
posix.PROT.READ,
.{ .TYPE = .SHARED },
file.handle,
0,
) catch return error.ReadFailed;
}
const std = @import("std");
const Allocator = std.mem.Allocator;
const Dwarf = std.debug.Dwarf;
const Io = std.Io;
const assert = std.debug.assert;
const posix = std.posix;
const macho = std.macho;
const mem = std.mem;
const testing = std.testing;
const builtin = @import("builtin");
const MachOFile = @This();

Some files were not shown because too many files have changed in this diff Show More