Merge pull request #17221 from Vexu/aro-translate-c

Aro translate-c
This commit is contained in:
Veikka Tuominen 2023-10-02 07:08:53 +03:00 committed by GitHub
commit fc4d53e2ea
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
54 changed files with 70355 additions and 386 deletions

1
.gitattributes vendored
View File

@ -10,3 +10,4 @@ lib/libcxx/** linguist-vendored
lib/libcxxabi/** linguist-vendored
lib/libunwind/** linguist-vendored
lib/tsan/** linguist-vendored
deps/** linguist-vendored

View File

@ -797,7 +797,8 @@ set(BUILD_ZIG2_ARGS
-OReleaseSmall
--name zig2 -femit-bin="${ZIG2_C_SOURCE}"
--mod "build_options::${ZIG_CONFIG_ZIG_OUT}"
--deps build_options
--mod "aro::deps/aro/lib.zig"
--deps build_options,aro
-target "${ZIG_HOST_TARGET_TRIPLE}"
)

View File

@ -580,6 +580,9 @@ fn addCompilerStep(
.optimize = optimize,
});
exe.stack_size = stack_size;
exe.addAnonymousModule("aro", .{
.source_file = .{ .path = "deps/aro/lib.zig" },
});
return exe;
}

1346
deps/aro/Attribute.zig vendored Normal file

File diff suppressed because it is too large Load Diff

337
deps/aro/Builtins.zig vendored Normal file
View File

@ -0,0 +1,337 @@
const std = @import("std");
const Compilation = @import("Compilation.zig");
const Type = @import("Type.zig");
const BuiltinFunction = @import("builtins/BuiltinFunction.zig");
const TypeDescription = @import("builtins/TypeDescription.zig");
const target_util = @import("target.zig");
const StringId = @import("StringInterner.zig").StringId;
const LangOpts = @import("LangOpts.zig");
const Parser = @import("Parser.zig");
const Builtins = @This();
const Expanded = struct {
ty: Type,
builtin: BuiltinFunction,
};
const NameToTypeMap = std.StringHashMapUnmanaged(Type);
_name_to_type_map: NameToTypeMap = .{},
pub fn deinit(b: *Builtins, gpa: std.mem.Allocator) void {
b._name_to_type_map.deinit(gpa);
}
fn specForSize(comp: *const Compilation, size_bits: u32) Type.Builder.Specifier {
var ty = Type{ .specifier = .short };
if (ty.sizeof(comp).? * 8 == size_bits) return .short;
ty.specifier = .int;
if (ty.sizeof(comp).? * 8 == size_bits) return .int;
ty.specifier = .long;
if (ty.sizeof(comp).? * 8 == size_bits) return .long;
ty.specifier = .long_long;
if (ty.sizeof(comp).? * 8 == size_bits) return .long_long;
unreachable;
}
fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *const Compilation, allocator: std.mem.Allocator) !Type {
var builder: Type.Builder = .{ .error_on_invalid = true };
var require_native_int32 = false;
var require_native_int64 = false;
for (desc.prefix) |prefix| {
switch (prefix) {
.L => builder.combine(undefined, .long, 0) catch unreachable,
.LL => {
builder.combine(undefined, .long, 0) catch unreachable;
builder.combine(undefined, .long, 0) catch unreachable;
},
.LLL => {
switch (builder.specifier) {
.none => builder.specifier = .int128,
.signed => builder.specifier = .sint128,
.unsigned => builder.specifier = .uint128,
else => unreachable,
}
},
.Z => require_native_int32 = true,
.W => require_native_int64 = true,
.N => {
std.debug.assert(desc.spec == .i);
if (!target_util.isLP64(comp.target)) {
builder.combine(undefined, .long, 0) catch unreachable;
}
},
.O => {
builder.combine(undefined, .long, 0) catch unreachable;
if (comp.target.os.tag != .opencl) {
builder.combine(undefined, .long, 0) catch unreachable;
}
},
.S => builder.combine(undefined, .signed, 0) catch unreachable,
.U => builder.combine(undefined, .unsigned, 0) catch unreachable,
.I => {
// Todo: compile-time constant integer
},
}
}
switch (desc.spec) {
.v => builder.combine(undefined, .void, 0) catch unreachable,
.b => builder.combine(undefined, .bool, 0) catch unreachable,
.c => builder.combine(undefined, .char, 0) catch unreachable,
.s => builder.combine(undefined, .short, 0) catch unreachable,
.i => {
if (require_native_int32) {
builder.specifier = specForSize(comp, 32);
} else if (require_native_int64) {
builder.specifier = specForSize(comp, 64);
} else {
switch (builder.specifier) {
.int128, .sint128, .uint128 => {},
else => builder.combine(undefined, .int, 0) catch unreachable,
}
}
},
.h => builder.combine(undefined, .fp16, 0) catch unreachable,
.x => {
// Todo: _Float16
return .{ .specifier = .invalid };
},
.y => {
// Todo: __bf16
return .{ .specifier = .invalid };
},
.f => builder.combine(undefined, .float, 0) catch unreachable,
.d => {
if (builder.specifier == .long_long) {
builder.specifier = .float128;
} else {
builder.combine(undefined, .double, 0) catch unreachable;
}
},
.z => {
std.debug.assert(builder.specifier == .none);
builder.specifier = Type.Builder.fromType(comp.types.size);
},
.w => {
std.debug.assert(builder.specifier == .none);
builder.specifier = Type.Builder.fromType(comp.types.wchar);
},
.F => {
std.debug.assert(builder.specifier == .none);
builder.specifier = Type.Builder.fromType(comp.types.ns_constant_string.ty);
},
.a => {
std.debug.assert(builder.specifier == .none);
std.debug.assert(desc.suffix.len == 0);
builder.specifier = Type.Builder.fromType(comp.types.va_list);
},
.A => {
std.debug.assert(builder.specifier == .none);
std.debug.assert(desc.suffix.len == 0);
var va_list = comp.types.va_list;
if (va_list.isArray()) va_list.decayArray();
builder.specifier = Type.Builder.fromType(va_list);
},
.V => |element_count| {
std.debug.assert(desc.suffix.len == 0);
const child_desc = it.next().?;
const child_ty = try createType(child_desc, undefined, comp, allocator);
const arr_ty = try allocator.create(Type.Array);
arr_ty.* = .{
.len = element_count,
.elem = child_ty,
};
const vector_ty = .{ .specifier = .vector, .data = .{ .array = arr_ty } };
builder.specifier = Type.Builder.fromType(vector_ty);
},
.q => {
// Todo: scalable vector
return .{ .specifier = .invalid };
},
.E => {
// Todo: ext_vector (OpenCL vector)
return .{ .specifier = .invalid };
},
.X => |child| {
builder.combine(undefined, .complex, 0) catch unreachable;
switch (child) {
.float => builder.combine(undefined, .float, 0) catch unreachable,
.double => builder.combine(undefined, .double, 0) catch unreachable,
.longdouble => {
builder.combine(undefined, .long, 0) catch unreachable;
builder.combine(undefined, .double, 0) catch unreachable;
},
}
},
.Y => {
std.debug.assert(builder.specifier == .none);
std.debug.assert(desc.suffix.len == 0);
builder.specifier = Type.Builder.fromType(comp.types.ptrdiff);
},
.P => {
std.debug.assert(builder.specifier == .none);
if (comp.types.file.specifier == .invalid) {
return comp.types.file;
}
builder.specifier = Type.Builder.fromType(comp.types.file);
},
.J => {
std.debug.assert(builder.specifier == .none);
std.debug.assert(desc.suffix.len == 0);
if (comp.types.jmp_buf.specifier == .invalid) {
return comp.types.jmp_buf;
}
builder.specifier = Type.Builder.fromType(comp.types.jmp_buf);
},
.SJ => {
std.debug.assert(builder.specifier == .none);
std.debug.assert(desc.suffix.len == 0);
if (comp.types.sigjmp_buf.specifier == .invalid) {
return comp.types.sigjmp_buf;
}
builder.specifier = Type.Builder.fromType(comp.types.sigjmp_buf);
},
.K => {
std.debug.assert(builder.specifier == .none);
if (comp.types.ucontext_t.specifier == .invalid) {
return comp.types.ucontext_t;
}
builder.specifier = Type.Builder.fromType(comp.types.ucontext_t);
},
.p => {
std.debug.assert(builder.specifier == .none);
std.debug.assert(desc.suffix.len == 0);
builder.specifier = Type.Builder.fromType(comp.types.pid_t);
},
.@"!" => return .{ .specifier = .invalid },
}
for (desc.suffix) |suffix| {
switch (suffix) {
.@"*" => |address_space| {
_ = address_space; // TODO: handle address space
const elem_ty = try allocator.create(Type);
elem_ty.* = builder.finish(undefined) catch unreachable;
const ty = Type{
.specifier = .pointer,
.data = .{ .sub_type = elem_ty },
};
builder.qual = .{};
builder.specifier = Type.Builder.fromType(ty);
},
.C => builder.qual.@"const" = 0,
.D => builder.qual.@"volatile" = 0,
.R => builder.qual.restrict = 0,
}
}
return builder.finish(undefined) catch unreachable;
}
fn createBuiltin(comp: *const Compilation, builtin: BuiltinFunction, type_arena: std.mem.Allocator) !Type {
var it = TypeDescription.TypeIterator.init(builtin.param_str);
const ret_ty_desc = it.next().?;
if (ret_ty_desc.spec == .@"!") {
// Todo: handle target-dependent definition
}
const ret_ty = try createType(ret_ty_desc, &it, comp, type_arena);
var param_count: usize = 0;
var params: [BuiltinFunction.MaxParamCount]Type.Func.Param = undefined;
while (it.next()) |desc| : (param_count += 1) {
params[param_count] = .{ .name_tok = 0, .ty = try createType(desc, &it, comp, type_arena), .name = .empty };
}
const duped_params = try type_arena.dupe(Type.Func.Param, params[0..param_count]);
const func = try type_arena.create(Type.Func);
func.* = .{
.return_type = ret_ty,
.params = duped_params,
};
return .{
.specifier = if (builtin.isVarArgs()) .var_args_func else .func,
.data = .{ .func = func },
};
}
/// Asserts that the builtin has already been created
pub fn lookup(b: *const Builtins, name: []const u8) Expanded {
@setEvalBranchQuota(10_000);
const builtin = BuiltinFunction.fromTag(std.meta.stringToEnum(BuiltinFunction.Tag, name).?);
const ty = b._name_to_type_map.get(name).?;
return .{
.builtin = builtin,
.ty = ty,
};
}
pub fn getOrCreate(b: *Builtins, comp: *Compilation, name: []const u8, type_arena: std.mem.Allocator) !?Expanded {
const ty = b._name_to_type_map.get(name) orelse {
@setEvalBranchQuota(10_000);
const tag = std.meta.stringToEnum(BuiltinFunction.Tag, name) orelse return null;
const builtin = BuiltinFunction.fromTag(tag);
if (!comp.hasBuiltinFunction(builtin)) return null;
try b._name_to_type_map.ensureUnusedCapacity(comp.gpa, 1);
const ty = try createBuiltin(comp, builtin, type_arena);
b._name_to_type_map.putAssumeCapacity(name, ty);
return .{
.builtin = builtin,
.ty = ty,
};
};
const builtin = BuiltinFunction.fromTag(std.meta.stringToEnum(BuiltinFunction.Tag, name).?);
return .{
.builtin = builtin,
.ty = ty,
};
}
test "All builtins" {
var comp = Compilation.init(std.testing.allocator);
defer comp.deinit();
_ = try comp.generateBuiltinMacros();
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena.deinit();
const type_arena = arena.allocator();
for (0..@typeInfo(BuiltinFunction.Tag).Enum.fields.len) |i| {
const tag: BuiltinFunction.Tag = @enumFromInt(i);
const name = @tagName(tag);
if (try comp.builtins.getOrCreate(&comp, name, type_arena)) |func_ty| {
const get_again = (try comp.builtins.getOrCreate(&comp, name, std.testing.failing_allocator)).?;
const found_by_lookup = comp.builtins.lookup(name);
try std.testing.expectEqual(func_ty.builtin.tag, get_again.builtin.tag);
try std.testing.expectEqual(func_ty.builtin.tag, found_by_lookup.builtin.tag);
}
}
}
test "Allocation failures" {
const Test = struct {
fn testOne(allocator: std.mem.Allocator) !void {
var comp = Compilation.init(allocator);
defer comp.deinit();
_ = try comp.generateBuiltinMacros();
var arena = std.heap.ArenaAllocator.init(comp.gpa);
defer arena.deinit();
const type_arena = arena.allocator();
const num_builtins = 40;
for (0..num_builtins) |i| {
const tag: BuiltinFunction.Tag = @enumFromInt(i);
const name = @tagName(tag);
_ = try comp.builtins.getOrCreate(&comp, name, type_arena);
}
}
};
try std.testing.checkAllAllocationFailures(std.testing.allocator, Test.testOne, .{});
}

487
deps/aro/CharInfo.zig vendored Normal file
View File

@ -0,0 +1,487 @@
//! This module provides functions for classifying characters according to
//! various C standards. All classification routines *do not* consider
//! characters from the basic character set; it is assumed those will be
//! checked separately
const assert = @import("std").debug.assert;
/// C11 Standard Annex D
pub fn isC11IdChar(codepoint: u21) bool {
assert(codepoint > 0x7F);
return switch (codepoint) {
// 1
0x00A8,
0x00AA,
0x00AD,
0x00AF,
0x00B2...0x00B5,
0x00B7...0x00BA,
0x00BC...0x00BE,
0x00C0...0x00D6,
0x00D8...0x00F6,
0x00F8...0x00FF,
// 2
0x0100...0x167F,
0x1681...0x180D,
0x180F...0x1FFF,
// 3
0x200B...0x200D,
0x202A...0x202E,
0x203F...0x2040,
0x2054,
0x2060...0x206F,
// 4
0x2070...0x218F,
0x2460...0x24FF,
0x2776...0x2793,
0x2C00...0x2DFF,
0x2E80...0x2FFF,
// 5
0x3004...0x3007,
0x3021...0x302F,
0x3031...0x303F,
// 6
0x3040...0xD7FF,
// 7
0xF900...0xFD3D,
0xFD40...0xFDCF,
0xFDF0...0xFE44,
0xFE47...0xFFFD,
// 8
0x10000...0x1FFFD,
0x20000...0x2FFFD,
0x30000...0x3FFFD,
0x40000...0x4FFFD,
0x50000...0x5FFFD,
0x60000...0x6FFFD,
0x70000...0x7FFFD,
0x80000...0x8FFFD,
0x90000...0x9FFFD,
0xA0000...0xAFFFD,
0xB0000...0xBFFFD,
0xC0000...0xCFFFD,
0xD0000...0xDFFFD,
0xE0000...0xEFFFD,
=> true,
else => false,
};
}
/// C99 Standard Annex D
pub fn isC99IdChar(codepoint: u21) bool {
assert(codepoint > 0x7F);
return switch (codepoint) {
// Latin
0x00AA,
0x00BA,
0x00C0...0x00D6,
0x00D8...0x00F6,
0x00F8...0x01F5,
0x01FA...0x0217,
0x0250...0x02A8,
0x1E00...0x1E9B,
0x1EA0...0x1EF9,
0x207F,
// Greek
0x0386,
0x0388...0x038A,
0x038C,
0x038E...0x03A1,
0x03A3...0x03CE,
0x03D0...0x03D6,
0x03DA,
0x03DC,
0x03DE,
0x03E0,
0x03E2...0x03F3,
0x1F00...0x1F15,
0x1F18...0x1F1D,
0x1F20...0x1F45,
0x1F48...0x1F4D,
0x1F50...0x1F57,
0x1F59,
0x1F5B,
0x1F5D,
0x1F5F...0x1F7D,
0x1F80...0x1FB4,
0x1FB6...0x1FBC,
0x1FC2...0x1FC4,
0x1FC6...0x1FCC,
0x1FD0...0x1FD3,
0x1FD6...0x1FDB,
0x1FE0...0x1FEC,
0x1FF2...0x1FF4,
0x1FF6...0x1FFC,
// Cyrillic
0x0401...0x040C,
0x040E...0x044F,
0x0451...0x045C,
0x045E...0x0481,
0x0490...0x04C4,
0x04C7...0x04C8,
0x04CB...0x04CC,
0x04D0...0x04EB,
0x04EE...0x04F5,
0x04F8...0x04F9,
// Armenian
0x0531...0x0556,
0x0561...0x0587,
// Hebrew
0x05B0...0x05B9,
0x05BB...0x05BD,
0x05BF,
0x05C1...0x05C2,
0x05D0...0x05EA,
0x05F0...0x05F2,
// Arabic
0x0621...0x063A,
0x0640...0x0652,
0x0670...0x06B7,
0x06BA...0x06BE,
0x06C0...0x06CE,
0x06D0...0x06DC,
0x06E5...0x06E8,
0x06EA...0x06ED,
// Devanagari
0x0901...0x0903,
0x0905...0x0939,
0x093E...0x094D,
0x0950...0x0952,
0x0958...0x0963,
// Bengali
0x0981...0x0983,
0x0985...0x098C,
0x098F...0x0990,
0x0993...0x09A8,
0x09AA...0x09B0,
0x09B2,
0x09B6...0x09B9,
0x09BE...0x09C4,
0x09C7...0x09C8,
0x09CB...0x09CD,
0x09DC...0x09DD,
0x09DF...0x09E3,
0x09F0...0x09F1,
// Gurmukhi
0x0A02,
0x0A05...0x0A0A,
0x0A0F...0x0A10,
0x0A13...0x0A28,
0x0A2A...0x0A30,
0x0A32...0x0A33,
0x0A35...0x0A36,
0x0A38...0x0A39,
0x0A3E...0x0A42,
0x0A47...0x0A48,
0x0A4B...0x0A4D,
0x0A59...0x0A5C,
0x0A5E,
0x0A74,
// Gujarati
0x0A81...0x0A83,
0x0A85...0x0A8B,
0x0A8D,
0x0A8F...0x0A91,
0x0A93...0x0AA8,
0x0AAA...0x0AB0,
0x0AB2...0x0AB3,
0x0AB5...0x0AB9,
0x0ABD...0x0AC5,
0x0AC7...0x0AC9,
0x0ACB...0x0ACD,
0x0AD0,
0x0AE0,
// Oriya
0x0B01...0x0B03,
0x0B05...0x0B0C,
0x0B0F...0x0B10,
0x0B13...0x0B28,
0x0B2A...0x0B30,
0x0B32...0x0B33,
0x0B36...0x0B39,
0x0B3E...0x0B43,
0x0B47...0x0B48,
0x0B4B...0x0B4D,
0x0B5C...0x0B5D,
0x0B5F...0x0B61,
// Tamil
0x0B82...0x0B83,
0x0B85...0x0B8A,
0x0B8E...0x0B90,
0x0B92...0x0B95,
0x0B99...0x0B9A,
0x0B9C,
0x0B9E...0x0B9F,
0x0BA3...0x0BA4,
0x0BA8...0x0BAA,
0x0BAE...0x0BB5,
0x0BB7...0x0BB9,
0x0BBE...0x0BC2,
0x0BC6...0x0BC8,
0x0BCA...0x0BCD,
// Telugu
0x0C01...0x0C03,
0x0C05...0x0C0C,
0x0C0E...0x0C10,
0x0C12...0x0C28,
0x0C2A...0x0C33,
0x0C35...0x0C39,
0x0C3E...0x0C44,
0x0C46...0x0C48,
0x0C4A...0x0C4D,
0x0C60...0x0C61,
// Kannada
0x0C82...0x0C83,
0x0C85...0x0C8C,
0x0C8E...0x0C90,
0x0C92...0x0CA8,
0x0CAA...0x0CB3,
0x0CB5...0x0CB9,
0x0CBE...0x0CC4,
0x0CC6...0x0CC8,
0x0CCA...0x0CCD,
0x0CDE,
0x0CE0...0x0CE1,
// Malayalam
0x0D02...0x0D03,
0x0D05...0x0D0C,
0x0D0E...0x0D10,
0x0D12...0x0D28,
0x0D2A...0x0D39,
0x0D3E...0x0D43,
0x0D46...0x0D48,
0x0D4A...0x0D4D,
0x0D60...0x0D61,
// Thai (excluding digits 0x0E50...0x0E59; originally 0x0E01...0x0E3A and 0x0E40...0x0E5B
0x0E01...0x0E3A,
0x0E40...0x0E4F,
0x0E5A...0x0E5B,
// Lao
0x0E81...0x0E82,
0x0E84,
0x0E87...0x0E88,
0x0E8A,
0x0E8D,
0x0E94...0x0E97,
0x0E99...0x0E9F,
0x0EA1...0x0EA3,
0x0EA5,
0x0EA7,
0x0EAA...0x0EAB,
0x0EAD...0x0EAE,
0x0EB0...0x0EB9,
0x0EBB...0x0EBD,
0x0EC0...0x0EC4,
0x0EC6,
0x0EC8...0x0ECD,
0x0EDC...0x0EDD,
// Tibetan
0x0F00,
0x0F18...0x0F19,
0x0F35,
0x0F37,
0x0F39,
0x0F3E...0x0F47,
0x0F49...0x0F69,
0x0F71...0x0F84,
0x0F86...0x0F8B,
0x0F90...0x0F95,
0x0F97,
0x0F99...0x0FAD,
0x0FB1...0x0FB7,
0x0FB9,
// Georgian
0x10A0...0x10C5,
0x10D0...0x10F6,
// Hiragana
0x3041...0x3093,
0x309B...0x309C,
// Katakana
0x30A1...0x30F6,
0x30FB...0x30FC,
// Bopomofo
0x3105...0x312C,
// CJK Unified Ideographs
0x4E00...0x9FA5,
// Hangul
0xAC00...0xD7A3,
// Digits
0x0660...0x0669,
0x06F0...0x06F9,
0x0966...0x096F,
0x09E6...0x09EF,
0x0A66...0x0A6F,
0x0AE6...0x0AEF,
0x0B66...0x0B6F,
0x0BE7...0x0BEF,
0x0C66...0x0C6F,
0x0CE6...0x0CEF,
0x0D66...0x0D6F,
0x0E50...0x0E59,
0x0ED0...0x0ED9,
0x0F20...0x0F33,
// Special characters
0x00B5,
0x00B7,
0x02B0...0x02B8,
0x02BB,
0x02BD...0x02C1,
0x02D0...0x02D1,
0x02E0...0x02E4,
0x037A,
0x0559,
0x093D,
0x0B3D,
0x1FBE,
0x203F...0x2040,
0x2102,
0x2107,
0x210A...0x2113,
0x2115,
0x2118...0x211D,
0x2124,
0x2126,
0x2128,
0x212A...0x2131,
0x2133...0x2138,
0x2160...0x2182,
0x3005...0x3007,
0x3021...0x3029,
=> true,
else => false,
};
}
/// C11 standard Annex D
pub fn isC11DisallowedInitialIdChar(codepoint: u21) bool {
assert(codepoint > 0x7F);
return switch (codepoint) {
0x0300...0x036F,
0x1DC0...0x1DFF,
0x20D0...0x20FF,
0xFE20...0xFE2F,
=> true,
else => false,
};
}
/// These are "digit" characters; C99 disallows them as the first
/// character of an identifier
pub fn isC99DisallowedInitialIDChar(codepoint: u21) bool {
assert(codepoint > 0x7F);
return switch (codepoint) {
0x0660...0x0669,
0x06F0...0x06F9,
0x0966...0x096F,
0x09E6...0x09EF,
0x0A66...0x0A6F,
0x0AE6...0x0AEF,
0x0B66...0x0B6F,
0x0BE7...0x0BEF,
0x0C66...0x0C6F,
0x0CE6...0x0CEF,
0x0D66...0x0D6F,
0x0E50...0x0E59,
0x0ED0...0x0ED9,
0x0F20...0x0F33,
=> true,
else => false,
};
}
pub fn isInvisible(codepoint: u21) bool {
assert(codepoint > 0x7F);
return switch (codepoint) {
0x00ad, // SOFT HYPHEN
0x200b, // ZERO WIDTH SPACE
0x200c, // ZERO WIDTH NON-JOINER
0x200d, // ZERO WIDTH JOINER
0x2060, // WORD JOINER
0x2061, // FUNCTION APPLICATION
0x2062, // INVISIBLE TIMES
0x2063, // INVISIBLE SEPARATOR
0x2064, // INVISIBLE PLUS
0xfeff, // ZERO WIDTH NO-BREAK SPACE
=> true,
else => false,
};
}
/// Checks for identifier characters which resemble non-identifier characters
pub fn homoglyph(codepoint: u21) ?u21 {
assert(codepoint > 0x7F);
return switch (codepoint) {
0x01c3 => '!', // LATIN LETTER RETROFLEX CLICK
0x037e => ';', // GREEK QUESTION MARK
0x2212 => '-', // MINUS SIGN
0x2215 => '/', // DIVISION SLASH
0x2216 => '\\', // SET MINUS
0x2217 => '*', // ASTERISK OPERATOR
0x2223 => '|', // DIVIDES
0x2227 => '^', // LOGICAL AND
0x2236 => ':', // RATIO
0x223c => '~', // TILDE OPERATOR
0xa789 => ':', // MODIFIER LETTER COLON
0xff01 => '!', // FULLWIDTH EXCLAMATION MARK
0xff03 => '#', // FULLWIDTH NUMBER SIGN
0xff04 => '$', // FULLWIDTH DOLLAR SIGN
0xff05 => '%', // FULLWIDTH PERCENT SIGN
0xff06 => '&', // FULLWIDTH AMPERSAND
0xff08 => '(', // FULLWIDTH LEFT PARENTHESIS
0xff09 => ')', // FULLWIDTH RIGHT PARENTHESIS
0xff0a => '*', // FULLWIDTH ASTERISK
0xff0b => '+', // FULLWIDTH ASTERISK
0xff0c => ',', // FULLWIDTH COMMA
0xff0d => '-', // FULLWIDTH HYPHEN-MINUS
0xff0e => '.', // FULLWIDTH FULL STOP
0xff0f => '/', // FULLWIDTH SOLIDUS
0xff1a => ':', // FULLWIDTH COLON
0xff1b => ';', // FULLWIDTH SEMICOLON
0xff1c => '<', // FULLWIDTH LESS-THAN SIGN
0xff1d => '=', // FULLWIDTH EQUALS SIGN
0xff1e => '>', // FULLWIDTH GREATER-THAN SIGN
0xff1f => '?', // FULLWIDTH QUESTION MARK
0xff20 => '@', // FULLWIDTH COMMERCIAL AT
0xff3b => '[', // FULLWIDTH LEFT SQUARE BRACKET
0xff3c => '\\', // FULLWIDTH REVERSE SOLIDUS
0xff3d => ']', // FULLWIDTH RIGHT SQUARE BRACKET
0xff3e => '^', // FULLWIDTH CIRCUMFLEX ACCENT
0xff5b => '{', // FULLWIDTH LEFT CURLY BRACKET
0xff5c => '|', // FULLWIDTH VERTICAL LINE
0xff5d => '}', // FULLWIDTH RIGHT CURLY BRACKET
0xff5e => '~', // FULLWIDTH TILDE
else => null,
};
}

1291
deps/aro/CodeGen.zig vendored Normal file

File diff suppressed because it is too large Load Diff

108
deps/aro/Codegen_legacy.zig vendored Normal file
View File

@ -0,0 +1,108 @@
const std = @import("std");
const Compilation = @import("Compilation.zig");
const Tree = @import("Tree.zig");
const NodeIndex = Tree.NodeIndex;
const Object = @import("Object.zig");
const x86_64 = @import("codegen/x86_64.zig");
const Codegen = @This();
comp: *Compilation,
tree: Tree,
obj: *Object,
node_tag: []const Tree.Tag,
node_data: []const Tree.Node.Data,
pub const Error = Compilation.Error || error{CodegenFailed};
/// Generate tree to an object file.
/// Caller is responsible for flushing and freeing the returned object.
pub fn generateTree(comp: *Compilation, tree: Tree) Compilation.Error!*Object {
var c = Codegen{
.comp = comp,
.tree = tree,
.obj = try Object.create(comp),
.node_tag = tree.nodes.items(.tag),
.node_data = tree.nodes.items(.data),
};
errdefer c.obj.deinit();
const node_tags = tree.nodes.items(.tag);
for (tree.root_decls) |decl| {
switch (node_tags[@intFromEnum(decl)]) {
// these produce no code
.static_assert,
.typedef,
.struct_decl_two,
.union_decl_two,
.enum_decl_two,
.struct_decl,
.union_decl,
.enum_decl,
.struct_forward_decl,
.union_forward_decl,
.enum_forward_decl,
=> {},
// define symbol
.fn_proto,
.static_fn_proto,
.inline_fn_proto,
.inline_static_fn_proto,
.extern_var,
.threadlocal_extern_var,
=> {
const name = c.tree.tokSlice(c.node_data[@intFromEnum(decl)].decl.name);
_ = try c.obj.declareSymbol(.undefined, name, .Strong, .external, 0, 0);
},
// function definition
.fn_def,
.static_fn_def,
.inline_fn_def,
.inline_static_fn_def,
=> c.genFn(decl) catch |err| switch (err) {
error.FatalError => return error.FatalError,
error.OutOfMemory => return error.OutOfMemory,
error.CodegenFailed => continue,
},
.@"var",
.static_var,
.threadlocal_var,
.threadlocal_static_var,
.implicit_static_var,
=> c.genVar(decl) catch |err| switch (err) {
error.FatalError => return error.FatalError,
error.OutOfMemory => return error.OutOfMemory,
error.CodegenFailed => continue,
},
// TODO
.file_scope_asm => {},
else => unreachable,
}
}
return c.obj;
}
fn genFn(c: *Codegen, decl: NodeIndex) Error!void {
const section: Object.Section = .func;
const data = try c.obj.getSection(section);
const start_len = data.items.len;
switch (c.comp.target.cpu.arch) {
.x86_64 => try x86_64.genFn(c, decl, data),
else => unreachable,
}
const name = c.tree.tokSlice(c.node_data[@intFromEnum(decl)].decl.name);
_ = try c.obj.declareSymbol(section, name, .Strong, .func, start_len, data.items.len - start_len);
}
fn genVar(c: *Codegen, decl: NodeIndex) Error!void {
switch (c.comp.target.cpu.arch) {
.x86_64 => try x86_64.genVar(c, decl),
else => unreachable,
}
}

1491
deps/aro/Compilation.zig vendored Normal file

File diff suppressed because it is too large Load Diff

2787
deps/aro/Diagnostics.zig vendored Normal file

File diff suppressed because it is too large Load Diff

683
deps/aro/Driver.zig vendored Normal file
View File

@ -0,0 +1,683 @@
const std = @import("std");
const mem = std.mem;
const Allocator = mem.Allocator;
const process = std.process;
const Codegen = @import("Codegen_legacy.zig");
const Compilation = @import("Compilation.zig");
const LangOpts = @import("LangOpts.zig");
const Preprocessor = @import("Preprocessor.zig");
const Parser = @import("Parser.zig");
const Source = @import("Source.zig");
const Toolchain = @import("Toolchain.zig");
const util = @import("util.zig");
const target_util = @import("target.zig");
const Driver = @This();
pub const Linker = enum {
ld,
bfd,
gold,
lld,
mold,
};
comp: *Compilation,
inputs: std.ArrayListUnmanaged(Source) = .{},
link_objects: std.ArrayListUnmanaged([]const u8) = .{},
output_name: ?[]const u8 = null,
sysroot: ?[]const u8 = null,
temp_file_count: u32 = 0,
only_preprocess: bool = false,
only_syntax: bool = false,
only_compile: bool = false,
only_preprocess_and_compile: bool = false,
verbose_ast: bool = false,
verbose_pp: bool = false,
verbose_ir: bool = false,
verbose_linker_args: bool = false,
/// Full path to the aro executable
aro_name: []const u8 = "",
/// Value of --triple= passed via CLI
raw_target_triple: ?[]const u8 = null,
// linker options
use_linker: ?[]const u8 = null,
linker_path: ?[]const u8 = null,
nodefaultlibs: bool = false,
nolibc: bool = false,
nostartfiles: bool = false,
nostdlib: bool = false,
pie: ?bool = null,
rdynamic: bool = false,
relocatable: bool = false,
rtlib: ?[]const u8 = null,
shared: bool = false,
shared_libgcc: bool = false,
static: bool = false,
static_libgcc: bool = false,
static_pie: bool = false,
strip: bool = false,
unwindlib: ?[]const u8 = null,
pub fn deinit(d: *Driver) void {
for (d.link_objects.items[d.link_objects.items.len - d.temp_file_count ..]) |obj| {
std.fs.deleteFileAbsolute(obj) catch {};
d.comp.gpa.free(obj);
}
d.inputs.deinit(d.comp.gpa);
d.link_objects.deinit(d.comp.gpa);
d.* = undefined;
}
pub const usage =
\\Usage {s}: [options] file..
\\
\\General options:
\\ -h, --help Print this message.
\\ -v, --version Print aro version.
\\
\\Compile options:
\\ -c, --compile Only run preprocess, compile, and assemble steps
\\ -D <macro>=<value> Define <macro> to <value> (defaults to 1)
\\ -E Only run the preprocessor
\\ -fchar8_t Enable char8_t (enabled by default in C2X and later)
\\ -fno-char8_t Disable char8_t (disabled by default for pre-C2X)
\\ -fcolor-diagnostics Enable colors in diagnostics
\\ -fno-color-diagnostics Disable colors in diagnostics
\\ -fdeclspec Enable support for __declspec attributes
\\ -fno-declspec Disable support for __declspec attributes
\\ -ffp-eval-method=[source|double|extended]
\\ Evaluation method to use for floating-point arithmetic
\\ -fgnu-inline-asm Enable GNU style inline asm (default: enabled)
\\ -fno-gnu-inline-asm Disable GNU style inline asm
\\ -fms-extensions Enable support for Microsoft extensions
\\ -fno-ms-extensions Disable support for Microsoft extensions
\\ -fdollars-in-identifiers
\\ Allow '$' in identifiers
\\ -fno-dollars-in-identifiers
\\ Disallow '$' in identifiers
\\ -fmacro-backtrace-limit=<limit>
\\ Set limit on how many macro expansion traces are shown in errors (default 6)
\\ -fnative-half-type Use the native half type for __fp16 instead of promoting to float
\\ -fnative-half-arguments-and-returns
\\ Allow half-precision function arguments and return values
\\ -fshort-enums Use the narrowest possible integer type for enums
\\ -fno-short-enums Use "int" as the tag type for enums
\\ -fsigned-char "char" is signed
\\ -fno-signed-char "char" is unsigned
\\ -fsyntax-only Only run the preprocessor, parser, and semantic analysis stages
\\ -funsigned-char "char" is unsigned
\\ -fno-unsigned-char "char" is signed
\\ -I <dir> Add directory to include search path
\\ -isystem Add directory to SYSTEM include search path
\\ --emulate=[clang|gcc|msvc]
\\ Select which C compiler to emulate (default clang)
\\ -o <file> Write output to <file>
\\ -pedantic Warn on language extensions
\\ --rtlib=<arg> Compiler runtime library to use (libgcc or compiler-rt)
\\ -std=<standard> Specify language standard
\\ -S, --assemble Only run preprocess and compilation steps
\\ --sysroot=<dir> Use dir as the logical root directory for headers and libraries (not fully implemented)
\\ --target=<value> Generate code for the given target
\\ -U <macro> Undefine <macro>
\\ -Werror Treat all warnings as errors
\\ -Werror=<warning> Treat warning as error
\\ -W<warning> Enable the specified warning
\\ -Wno-<warning> Disable the specified warning
\\
\\Link options:
\\ -fuse-ld=[bfd|gold|lld|mold]
\\ Use specific linker
\\ -nodefaultlibs Do not use the standard system libraries when linking.
\\ -nolibc Do not use the C library or system libraries tightly coupled with it when linking.
\\ -nostdlib Do not use the standard system startup files or libraries when linking
\\ -nostartfiles Do not use the standard system startup files when linking.
\\ -pie Produce a dynamically linked position independent executable on targets that support it.
\\ --ld-path=<path> Use linker specified by <path>
\\ -r Produce a relocatable object as output.
\\ -rdynamic Pass the flag -export-dynamic to the ELF linker, on targets that support it.
\\ -s Remove all symbol table and relocation information from the executable.
\\ -shared Produce a shared object which can then be linked with other objects to form an executable.
\\ -shared-libgcc On systems that provide libgcc as a shared library, force the use of the shared version
\\ -static On systems that support dynamic linking, this overrides -pie and prevents linking with the shared libraries.
\\ -static-libgcc On systems that provide libgcc as a shared library, force the use of the static version
\\ -static-pie Produce a static position independent executable on targets that support it.
\\ --unwindlib=<arg> Unwind library to use ("none", "libgcc", or "libunwind") If not specified, will match runtime library
\\
\\Debug options:
\\ --verbose-ast Dump produced AST to stdout
\\ --verbose-pp Dump preprocessor state
\\ --verbose-ir Dump ir to stdout
\\ --verbose-linker-args Dump linker args to stdout
\\
\\
;
/// Process command line arguments, returns true if something was written to std_out.
pub fn parseArgs(
d: *Driver,
std_out: anytype,
macro_buf: anytype,
args: []const []const u8,
) !bool {
var i: usize = 1;
var color_setting: enum {
on,
off,
unset,
} = .unset;
while (i < args.len) : (i += 1) {
const arg = args[i];
if (mem.startsWith(u8, arg, "-") and arg.len > 1) {
if (mem.eql(u8, arg, "-h") or mem.eql(u8, arg, "--help")) {
std_out.print(usage, .{args[0]}) catch |er| {
return d.fatal("unable to print usage: {s}", .{util.errorDescription(er)});
};
return true;
} else if (mem.eql(u8, arg, "-v") or mem.eql(u8, arg, "--version")) {
std_out.writeAll(@import("lib.zig").version_str ++ "\n") catch |er| {
return d.fatal("unable to print version: {s}", .{util.errorDescription(er)});
};
return true;
} else if (mem.startsWith(u8, arg, "-D")) {
var macro = arg["-D".len..];
if (macro.len == 0) {
i += 1;
if (i >= args.len) {
try d.err("expected argument after -I");
continue;
}
macro = args[i];
}
var value: []const u8 = "1";
if (mem.indexOfScalar(u8, macro, '=')) |some| {
value = macro[some + 1 ..];
macro = macro[0..some];
}
try macro_buf.print("#define {s} {s}\n", .{ macro, value });
} else if (mem.startsWith(u8, arg, "-U")) {
var macro = arg["-U".len..];
if (macro.len == 0) {
i += 1;
if (i >= args.len) {
try d.err("expected argument after -I");
continue;
}
macro = args[i];
}
try macro_buf.print("#undef {s}\n", .{macro});
} else if (mem.eql(u8, arg, "-c") or mem.eql(u8, arg, "--compile")) {
d.only_compile = true;
} else if (mem.eql(u8, arg, "-E")) {
d.only_preprocess = true;
} else if (mem.eql(u8, arg, "-fchar8_t")) {
d.comp.langopts.has_char8_t_override = true;
} else if (mem.eql(u8, arg, "-fno-char8_t")) {
d.comp.langopts.has_char8_t_override = false;
} else if (mem.eql(u8, arg, "-fcolor-diagnostics")) {
color_setting = .on;
} else if (mem.eql(u8, arg, "-fno-color-diagnostics")) {
color_setting = .off;
} else if (mem.eql(u8, arg, "-fdollars-in-identifiers")) {
d.comp.langopts.dollars_in_identifiers = true;
} else if (mem.eql(u8, arg, "-fno-dollars-in-identifiers")) {
d.comp.langopts.dollars_in_identifiers = false;
} else if (mem.eql(u8, arg, "-fdigraphs")) {
d.comp.langopts.digraphs = true;
} else if (mem.eql(u8, arg, "-fgnu-inline-asm")) {
d.comp.langopts.gnu_asm = true;
} else if (mem.eql(u8, arg, "-fno-gnu-inline-asm")) {
d.comp.langopts.gnu_asm = false;
} else if (mem.eql(u8, arg, "-fno-digraphs")) {
d.comp.langopts.digraphs = false;
} else if (option(arg, "-fmacro-backtrace-limit=")) |limit_str| {
var limit = std.fmt.parseInt(u32, limit_str, 10) catch {
try d.err("-fmacro-backtrace-limit takes a number argument");
continue;
};
if (limit == 0) limit = std.math.maxInt(u32);
d.comp.diag.macro_backtrace_limit = limit;
} else if (mem.eql(u8, arg, "-fnative-half-type")) {
d.comp.langopts.use_native_half_type = true;
} else if (mem.eql(u8, arg, "-fnative-half-arguments-and-returns")) {
d.comp.langopts.allow_half_args_and_returns = true;
} else if (mem.eql(u8, arg, "-fshort-enums")) {
d.comp.langopts.short_enums = true;
} else if (mem.eql(u8, arg, "-fno-short-enums")) {
d.comp.langopts.short_enums = false;
} else if (mem.eql(u8, arg, "-fsigned-char")) {
d.comp.langopts.setCharSignedness(.signed);
} else if (mem.eql(u8, arg, "-fno-signed-char")) {
d.comp.langopts.setCharSignedness(.unsigned);
} else if (mem.eql(u8, arg, "-funsigned-char")) {
d.comp.langopts.setCharSignedness(.unsigned);
} else if (mem.eql(u8, arg, "-fno-unsigned-char")) {
d.comp.langopts.setCharSignedness(.signed);
} else if (mem.eql(u8, arg, "-fdeclspec")) {
d.comp.langopts.declspec_attrs = true;
} else if (mem.eql(u8, arg, "-fno-declspec")) {
d.comp.langopts.declspec_attrs = false;
} else if (mem.eql(u8, arg, "-fms-extensions")) {
d.comp.langopts.enableMSExtensions();
} else if (mem.eql(u8, arg, "-fno-ms-extensions")) {
d.comp.langopts.disableMSExtensions();
} else if (mem.startsWith(u8, arg, "-I")) {
var path = arg["-I".len..];
if (path.len == 0) {
i += 1;
if (i >= args.len) {
try d.err("expected argument after -I");
continue;
}
path = args[i];
}
try d.comp.include_dirs.append(path);
} else if (mem.startsWith(u8, arg, "-fsyntax-only")) {
d.only_syntax = true;
} else if (mem.startsWith(u8, arg, "-fno-syntax-only")) {
d.only_syntax = false;
} else if (mem.startsWith(u8, arg, "-isystem")) {
var path = arg["-isystem".len..];
if (path.len == 0) {
i += 1;
if (i >= args.len) {
try d.err("expected argument after -isystem");
continue;
}
path = args[i];
}
const duped = try d.comp.gpa.dupe(u8, path);
errdefer d.comp.gpa.free(duped);
try d.comp.system_include_dirs.append(duped);
} else if (option(arg, "--emulate=")) |compiler_str| {
const compiler = std.meta.stringToEnum(LangOpts.Compiler, compiler_str) orelse {
try d.comp.diag.add(.{ .tag = .cli_invalid_emulate, .extra = .{ .str = arg } }, &.{});
continue;
};
d.comp.langopts.setEmulatedCompiler(compiler);
} else if (option(arg, "-ffp-eval-method=")) |fp_method_str| {
const fp_eval_method = std.meta.stringToEnum(LangOpts.FPEvalMethod, fp_method_str) orelse .indeterminate;
if (fp_eval_method == .indeterminate) {
try d.comp.diag.add(.{ .tag = .cli_invalid_fp_eval_method, .extra = .{ .str = fp_method_str } }, &.{});
continue;
}
d.comp.langopts.setFpEvalMethod(fp_eval_method);
} else if (mem.startsWith(u8, arg, "-o")) {
var file = arg["-o".len..];
if (file.len == 0) {
i += 1;
if (i >= args.len) {
try d.err("expected argument after -o");
continue;
}
file = args[i];
}
d.output_name = file;
} else if (option(arg, "--sysroot=")) |sysroot| {
d.sysroot = sysroot;
} else if (mem.eql(u8, arg, "-pedantic")) {
d.comp.diag.options.pedantic = .warning;
} else if (option(arg, "--rtlib=")) |rtlib| {
if (mem.eql(u8, rtlib, "compiler-rt") or mem.eql(u8, rtlib, "libgcc") or mem.eql(u8, rtlib, "platform")) {
d.rtlib = rtlib;
} else {
try d.comp.diag.add(.{ .tag = .invalid_rtlib, .extra = .{ .str = rtlib } }, &.{});
}
} else if (option(arg, "-Werror=")) |err_name| {
try d.comp.diag.set(err_name, .@"error");
} else if (mem.eql(u8, arg, "-Wno-fatal-errors")) {
d.comp.diag.fatal_errors = false;
} else if (option(arg, "-Wno-")) |err_name| {
try d.comp.diag.set(err_name, .off);
} else if (mem.eql(u8, arg, "-Wfatal-errors")) {
d.comp.diag.fatal_errors = true;
} else if (option(arg, "-W")) |err_name| {
try d.comp.diag.set(err_name, .warning);
} else if (option(arg, "-std=")) |standard| {
d.comp.langopts.setStandard(standard) catch
try d.comp.diag.add(.{ .tag = .cli_invalid_standard, .extra = .{ .str = arg } }, &.{});
} else if (mem.eql(u8, arg, "-S") or mem.eql(u8, arg, "--assemble")) {
d.only_preprocess_and_compile = true;
} else if (option(arg, "--target=")) |triple| {
const cross = std.zig.CrossTarget.parse(.{ .arch_os_abi = triple }) catch {
try d.comp.diag.add(.{ .tag = .cli_invalid_target, .extra = .{ .str = arg } }, &.{});
continue;
};
d.comp.target = cross.toTarget(); // TODO deprecated
d.comp.langopts.setEmulatedCompiler(target_util.systemCompiler(d.comp.target));
d.raw_target_triple = triple;
} else if (mem.eql(u8, arg, "--verbose-ast")) {
d.verbose_ast = true;
} else if (mem.eql(u8, arg, "--verbose-pp")) {
d.verbose_pp = true;
} else if (mem.eql(u8, arg, "--verbose-ir")) {
d.verbose_ir = true;
} else if (mem.eql(u8, arg, "--verbose-linker-args")) {
d.verbose_linker_args = true;
} else if (option(arg, "-fuse-ld=")) |linker_name| {
d.use_linker = linker_name;
} else if (mem.eql(u8, arg, "-fuse-ld=")) {
d.use_linker = null;
} else if (option(arg, "--ld-path=")) |linker_path| {
d.linker_path = linker_path;
} else if (mem.eql(u8, arg, "-r")) {
d.relocatable = true;
} else if (mem.eql(u8, arg, "-shared")) {
d.shared = true;
} else if (mem.eql(u8, arg, "-shared-libgcc")) {
d.shared_libgcc = true;
} else if (mem.eql(u8, arg, "-static")) {
d.static = true;
} else if (mem.eql(u8, arg, "-static-libgcc")) {
d.static_libgcc = true;
} else if (mem.eql(u8, arg, "-static-pie")) {
d.static_pie = true;
} else if (mem.eql(u8, arg, "-pie")) {
d.pie = true;
} else if (mem.eql(u8, arg, "-no-pie") or mem.eql(u8, arg, "-nopie")) {
d.pie = false;
} else if (mem.eql(u8, arg, "-rdynamic")) {
d.rdynamic = true;
} else if (mem.eql(u8, arg, "-s")) {
d.strip = true;
} else if (mem.eql(u8, arg, "-nodefaultlibs")) {
d.nodefaultlibs = true;
} else if (mem.eql(u8, arg, "-nolibc")) {
d.nolibc = true;
} else if (mem.eql(u8, arg, "-nostdlib")) {
d.nostdlib = true;
} else if (mem.eql(u8, arg, "-nostartfiles")) {
d.nostartfiles = true;
} else if (option(arg, "--unwindlib=")) |unwindlib| {
const valid_unwindlibs: [5][]const u8 = .{ "", "none", "platform", "libunwind", "libgcc" };
for (valid_unwindlibs) |name| {
if (mem.eql(u8, name, unwindlib)) {
d.unwindlib = unwindlib;
break;
}
} else {
try d.comp.diag.add(.{ .tag = .invalid_unwindlib, .extra = .{ .str = unwindlib } }, &.{});
}
} else {
try d.comp.diag.add(.{ .tag = .cli_unknown_arg, .extra = .{ .str = arg } }, &.{});
}
} else if (std.mem.endsWith(u8, arg, ".o") or std.mem.endsWith(u8, arg, ".obj")) {
try d.link_objects.append(d.comp.gpa, arg);
} else {
const source = d.addSource(arg) catch |er| {
return d.fatal("unable to add source file '{s}': {s}", .{ arg, util.errorDescription(er) });
};
try d.inputs.append(d.comp.gpa, source);
}
}
d.comp.diag.color = switch (color_setting) {
.on => true,
.off => false,
.unset => util.fileSupportsColor(std.io.getStdErr()) and !std.process.hasEnvVarConstant("NO_COLOR"),
};
return false;
}
fn option(arg: []const u8, name: []const u8) ?[]const u8 {
if (std.mem.startsWith(u8, arg, name) and arg.len > name.len) {
return arg[name.len..];
}
return null;
}
fn addSource(d: *Driver, path: []const u8) !Source {
if (mem.eql(u8, "-", path)) {
const stdin = std.io.getStdIn().reader();
const input = try stdin.readAllAlloc(d.comp.gpa, std.math.maxInt(u32));
defer d.comp.gpa.free(input);
return d.comp.addSourceFromBuffer("<stdin>", input);
}
return d.comp.addSourceFromPath(path);
}
pub fn err(d: *Driver, msg: []const u8) !void {
try d.comp.diag.add(.{ .tag = .cli_error, .extra = .{ .str = msg } }, &.{});
}
pub fn fatal(d: *Driver, comptime fmt: []const u8, args: anytype) error{FatalError} {
d.comp.renderErrors();
return d.comp.diag.fatalNoSrc(fmt, args);
}
pub fn main(d: *Driver, tc: *Toolchain, args: []const []const u8) !void {
var macro_buf = std.ArrayList(u8).init(d.comp.gpa);
defer macro_buf.deinit();
const std_out = std.io.getStdOut().writer();
if (try parseArgs(d, std_out, macro_buf.writer(), args)) return;
const linking = !(d.only_preprocess or d.only_syntax or d.only_compile or d.only_preprocess_and_compile);
if (d.inputs.items.len == 0) {
return d.fatal("no input files", .{});
} else if (d.inputs.items.len != 1 and d.output_name != null and !linking) {
return d.fatal("cannot specify -o when generating multiple output files", .{});
}
if (!linking) for (d.link_objects.items) |obj| {
try d.comp.diag.add(.{ .tag = .cli_unused_link_object, .extra = .{ .str = obj } }, &.{});
};
d.comp.defineSystemIncludes(d.aro_name) catch |er| switch (er) {
error.OutOfMemory => return error.OutOfMemory,
error.AroIncludeNotFound => return d.fatal("unable to find Aro builtin headers", .{}),
};
const builtin = try d.comp.generateBuiltinMacros();
const user_macros = try d.comp.addSourceFromBuffer("<command line>", macro_buf.items);
const fast_exit = @import("builtin").mode != .Debug;
if (fast_exit and d.inputs.items.len == 1) {
d.processSource(tc, d.inputs.items[0], builtin, user_macros, fast_exit) catch |e| switch (e) {
error.FatalError => {
d.comp.renderErrors();
d.exitWithCleanup(1);
},
else => |er| return er,
};
unreachable;
}
for (d.inputs.items) |source| {
d.processSource(tc, source, builtin, user_macros, fast_exit) catch |e| switch (e) {
error.FatalError => {
d.comp.renderErrors();
},
else => |er| return er,
};
}
if (d.comp.diag.errors != 0) {
if (fast_exit) d.exitWithCleanup(1);
return;
}
if (linking) {
try d.invokeLinker(tc, fast_exit);
}
if (fast_exit) std.process.exit(0);
}
fn processSource(
d: *Driver,
tc: *Toolchain,
source: Source,
builtin: Source,
user_macros: Source,
comptime fast_exit: bool,
) !void {
d.comp.generated_buf.items.len = 0;
var pp = Preprocessor.init(d.comp);
defer pp.deinit();
if (d.verbose_pp) pp.verbose = true;
if (d.only_preprocess) pp.preserve_whitespace = true;
try pp.addBuiltinMacros();
_ = try pp.preprocess(builtin);
_ = try pp.preprocess(user_macros);
const eof = try pp.preprocess(source);
try pp.tokens.append(pp.comp.gpa, eof);
if (d.only_preprocess) {
d.comp.renderErrors();
const file = if (d.output_name) |some|
std.fs.cwd().createFile(some, .{}) catch |er|
return d.fatal("unable to create output file '{s}': {s}", .{ some, util.errorDescription(er) })
else
std.io.getStdOut();
defer if (d.output_name != null) file.close();
var buf_w = std.io.bufferedWriter(file.writer());
pp.prettyPrintTokens(buf_w.writer()) catch |er|
return d.fatal("unable to write result: {s}", .{util.errorDescription(er)});
buf_w.flush() catch |er|
return d.fatal("unable to write result: {s}", .{util.errorDescription(er)});
if (fast_exit) std.process.exit(0); // Not linking, no need for cleanup.
return;
}
var tree = try Parser.parse(&pp);
defer tree.deinit();
if (d.verbose_ast) {
const stdout = std.io.getStdOut();
var buf_writer = std.io.bufferedWriter(stdout.writer());
const color = d.comp.diag.color and util.fileSupportsColor(stdout);
tree.dump(color, buf_writer.writer()) catch {};
buf_writer.flush() catch {};
}
const prev_errors = d.comp.diag.errors;
d.comp.renderErrors();
if (d.comp.diag.errors != prev_errors) {
if (fast_exit) d.exitWithCleanup(1);
return; // do not compile if there were errors
}
if (d.only_syntax) {
if (fast_exit) std.process.exit(0); // Not linking, no need for cleanup.
return;
}
if (d.comp.target.ofmt != .elf or d.comp.target.cpu.arch != .x86_64) {
return d.fatal(
"unsupported target {s}-{s}-{s}, currently only x86-64 elf is supported",
.{ @tagName(d.comp.target.cpu.arch), @tagName(d.comp.target.os.tag), @tagName(d.comp.target.abi) },
);
}
if (d.verbose_ir) {
try @import("CodeGen.zig").generateTree(d.comp, tree);
}
const obj = try Codegen.generateTree(d.comp, tree);
defer obj.deinit();
// If it's used, name_buf will either hold a filename or `/tmp/<12 random bytes with base-64 encoding>.<extension>`
// both of which should fit into MAX_NAME_BYTES for all systems
var name_buf: [std.fs.MAX_NAME_BYTES]u8 = undefined;
const out_file_name = if (d.only_compile) blk: {
const fmt_template = "{s}{s}";
const fmt_args = .{
std.fs.path.stem(source.path),
d.comp.target.ofmt.fileExt(d.comp.target.cpu.arch),
};
break :blk d.output_name orelse
std.fmt.bufPrint(&name_buf, fmt_template, fmt_args) catch return d.fatal("Filename too long for filesystem: " ++ fmt_template, fmt_args);
} else blk: {
const random_bytes_count = 12;
const sub_path_len = comptime std.fs.base64_encoder.calcSize(random_bytes_count);
var random_bytes: [random_bytes_count]u8 = undefined;
std.crypto.random.bytes(&random_bytes);
var random_name: [sub_path_len]u8 = undefined;
_ = std.fs.base64_encoder.encode(&random_name, &random_bytes);
const fmt_template = "/tmp/{s}{s}";
const fmt_args = .{
random_name,
d.comp.target.ofmt.fileExt(d.comp.target.cpu.arch),
};
break :blk std.fmt.bufPrint(&name_buf, fmt_template, fmt_args) catch return d.fatal("Filename too long for filesystem: " ++ fmt_template, fmt_args);
};
const out_file = std.fs.cwd().createFile(out_file_name, .{}) catch |er|
return d.fatal("unable to create output file '{s}': {s}", .{ out_file_name, util.errorDescription(er) });
defer out_file.close();
obj.finish(out_file) catch |er|
return d.fatal("could not output to object file '{s}': {s}", .{ out_file_name, util.errorDescription(er) });
if (d.only_compile) {
if (fast_exit) std.process.exit(0); // Not linking, no need for cleanup.
return;
}
try d.link_objects.ensureUnusedCapacity(d.comp.gpa, 1);
d.link_objects.appendAssumeCapacity(try d.comp.gpa.dupe(u8, out_file_name));
d.temp_file_count += 1;
if (fast_exit) {
try d.invokeLinker(tc, fast_exit);
}
}
fn dumpLinkerArgs(items: []const []const u8) !void {
const stdout = std.io.getStdOut().writer();
for (items, 0..) |item, i| {
if (i > 0) try stdout.writeByte(' ');
try stdout.print("\"{}\"", .{std.zig.fmtEscapes(item)});
}
try stdout.writeByte('\n');
}
pub fn invokeLinker(d: *Driver, tc: *Toolchain, comptime fast_exit: bool) !void {
try tc.discover();
var argv = std.ArrayList([]const u8).init(d.comp.gpa);
defer argv.deinit();
var linker_path_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
const linker_path = try tc.getLinkerPath(&linker_path_buf);
try argv.append(linker_path);
try tc.buildLinkerArgs(&argv);
if (d.verbose_linker_args) {
dumpLinkerArgs(argv.items) catch |er| {
return d.fatal("unable to dump linker args: {s}", .{util.errorDescription(er)});
};
}
var child = std.ChildProcess.init(argv.items, d.comp.gpa);
// TODO handle better
child.stdin_behavior = .Inherit;
child.stdout_behavior = .Inherit;
child.stderr_behavior = .Inherit;
const term = child.spawnAndWait() catch |er| {
return d.fatal("unable to spawn linker: {s}", .{util.errorDescription(er)});
};
switch (term) {
.Exited => |code| if (code != 0) d.exitWithCleanup(code),
else => std.process.abort(),
}
if (fast_exit) d.exitWithCleanup(0);
}
fn exitWithCleanup(d: *Driver, code: u8) noreturn {
for (d.link_objects.items[d.link_objects.items.len - d.temp_file_count ..]) |obj| {
std.fs.deleteFileAbsolute(obj) catch {};
}
std.process.exit(code);
}

329
deps/aro/Driver/Distro.zig vendored Normal file
View File

@ -0,0 +1,329 @@
//! Tools for figuring out what Linux distro we're running on
const std = @import("std");
const mem = std.mem;
const util = @import("../util.zig");
const Filesystem = @import("Filesystem.zig").Filesystem;
const MAX_BYTES = 1024; // TODO: Can we assume 1024 bytes enough for the info we need?
/// Value for linker `--hash-style=` argument
pub const HashStyle = enum {
both,
gnu,
};
pub const Tag = enum {
alpine,
arch,
debian_lenny,
debian_squeeze,
debian_wheezy,
debian_jessie,
debian_stretch,
debian_buster,
debian_bullseye,
debian_bookworm,
debian_trixie,
exherbo,
rhel5,
rhel6,
rhel7,
fedora,
gentoo,
open_suse,
ubuntu_hardy,
ubuntu_intrepid,
ubuntu_jaunty,
ubuntu_karmic,
ubuntu_lucid,
ubuntu_maverick,
ubuntu_natty,
ubuntu_oneiric,
ubuntu_precise,
ubuntu_quantal,
ubuntu_raring,
ubuntu_saucy,
ubuntu_trusty,
ubuntu_utopic,
ubuntu_vivid,
ubuntu_wily,
ubuntu_xenial,
ubuntu_yakkety,
ubuntu_zesty,
ubuntu_artful,
ubuntu_bionic,
ubuntu_cosmic,
ubuntu_disco,
ubuntu_eoan,
ubuntu_focal,
ubuntu_groovy,
ubuntu_hirsute,
ubuntu_impish,
ubuntu_jammy,
ubuntu_kinetic,
ubuntu_lunar,
unknown,
pub fn getHashStyle(self: Tag) HashStyle {
if (self.isOpenSUSE()) return .both;
return switch (self) {
.ubuntu_lucid,
.ubuntu_jaunty,
.ubuntu_karmic,
=> .both,
else => .gnu,
};
}
pub fn isRedhat(self: Tag) bool {
return switch (self) {
.fedora,
.rhel5,
.rhel6,
.rhel7,
=> true,
else => false,
};
}
pub fn isOpenSUSE(self: Tag) bool {
return self == .open_suse;
}
pub fn isDebian(self: Tag) bool {
return switch (self) {
.debian_lenny,
.debian_squeeze,
.debian_wheezy,
.debian_jessie,
.debian_stretch,
.debian_buster,
.debian_bullseye,
.debian_bookworm,
.debian_trixie,
=> true,
else => false,
};
}
pub fn isUbuntu(self: Tag) bool {
return switch (self) {
.ubuntu_hardy,
.ubuntu_intrepid,
.ubuntu_jaunty,
.ubuntu_karmic,
.ubuntu_lucid,
.ubuntu_maverick,
.ubuntu_natty,
.ubuntu_oneiric,
.ubuntu_precise,
.ubuntu_quantal,
.ubuntu_raring,
.ubuntu_saucy,
.ubuntu_trusty,
.ubuntu_utopic,
.ubuntu_vivid,
.ubuntu_wily,
.ubuntu_xenial,
.ubuntu_yakkety,
.ubuntu_zesty,
.ubuntu_artful,
.ubuntu_bionic,
.ubuntu_cosmic,
.ubuntu_disco,
.ubuntu_eoan,
.ubuntu_focal,
.ubuntu_groovy,
.ubuntu_hirsute,
.ubuntu_impish,
.ubuntu_jammy,
.ubuntu_kinetic,
.ubuntu_lunar,
=> true,
else => false,
};
}
pub fn isAlpine(self: Tag) bool {
return self == .alpine;
}
pub fn isGentoo(self: Tag) bool {
return self == .gentoo;
}
};
fn scanForOsRelease(buf: []const u8) ?Tag {
var it = mem.splitScalar(u8, buf, '\n');
while (it.next()) |line| {
if (mem.startsWith(u8, line, "ID=")) {
const rest = line["ID=".len..];
if (mem.eql(u8, rest, "alpine")) return .alpine;
if (mem.eql(u8, rest, "fedora")) return .fedora;
if (mem.eql(u8, rest, "gentoo")) return .gentoo;
if (mem.eql(u8, rest, "arch")) return .arch;
if (mem.eql(u8, rest, "sles")) return .open_suse;
if (mem.eql(u8, rest, "opensuse")) return .open_suse;
if (mem.eql(u8, rest, "exherbo")) return .exherbo;
}
}
return null;
}
fn detectOsRelease(fs: Filesystem) ?Tag {
var buf: [MAX_BYTES]u8 = undefined;
const data = fs.readFile("/etc/os-release", &buf) orelse fs.readFile("/usr/lib/os-release", &buf) orelse return null;
return scanForOsRelease(data);
}
fn scanForLSBRelease(buf: []const u8) ?Tag {
var it = mem.splitScalar(u8, buf, '\n');
while (it.next()) |line| {
if (mem.startsWith(u8, line, "DISTRIB_CODENAME=")) {
const rest = line["DISTRIB_CODENAME=".len..];
if (mem.eql(u8, rest, "hardy")) return .ubuntu_hardy;
if (mem.eql(u8, rest, "intrepid")) return .ubuntu_intrepid;
if (mem.eql(u8, rest, "jaunty")) return .ubuntu_jaunty;
if (mem.eql(u8, rest, "karmic")) return .ubuntu_karmic;
if (mem.eql(u8, rest, "lucid")) return .ubuntu_lucid;
if (mem.eql(u8, rest, "maverick")) return .ubuntu_maverick;
if (mem.eql(u8, rest, "natty")) return .ubuntu_natty;
if (mem.eql(u8, rest, "oneiric")) return .ubuntu_oneiric;
if (mem.eql(u8, rest, "precise")) return .ubuntu_precise;
if (mem.eql(u8, rest, "quantal")) return .ubuntu_quantal;
if (mem.eql(u8, rest, "raring")) return .ubuntu_raring;
if (mem.eql(u8, rest, "saucy")) return .ubuntu_saucy;
if (mem.eql(u8, rest, "trusty")) return .ubuntu_trusty;
if (mem.eql(u8, rest, "utopic")) return .ubuntu_utopic;
if (mem.eql(u8, rest, "vivid")) return .ubuntu_vivid;
if (mem.eql(u8, rest, "wily")) return .ubuntu_wily;
if (mem.eql(u8, rest, "xenial")) return .ubuntu_xenial;
if (mem.eql(u8, rest, "yakkety")) return .ubuntu_yakkety;
if (mem.eql(u8, rest, "zesty")) return .ubuntu_zesty;
if (mem.eql(u8, rest, "artful")) return .ubuntu_artful;
if (mem.eql(u8, rest, "bionic")) return .ubuntu_bionic;
if (mem.eql(u8, rest, "cosmic")) return .ubuntu_cosmic;
if (mem.eql(u8, rest, "disco")) return .ubuntu_disco;
if (mem.eql(u8, rest, "eoan")) return .ubuntu_eoan;
if (mem.eql(u8, rest, "focal")) return .ubuntu_focal;
if (mem.eql(u8, rest, "groovy")) return .ubuntu_groovy;
if (mem.eql(u8, rest, "hirsute")) return .ubuntu_hirsute;
if (mem.eql(u8, rest, "impish")) return .ubuntu_impish;
if (mem.eql(u8, rest, "jammy")) return .ubuntu_jammy;
if (mem.eql(u8, rest, "kinetic")) return .ubuntu_kinetic;
if (mem.eql(u8, rest, "lunar")) return .ubuntu_lunar;
}
}
return null;
}
fn detectLSBRelease(fs: Filesystem) ?Tag {
var buf: [MAX_BYTES]u8 = undefined;
const data = fs.readFile("/etc/lsb-release", &buf) orelse return null;
return scanForLSBRelease(data);
}
fn scanForRedHat(buf: []const u8) Tag {
if (mem.startsWith(u8, buf, "Fedora release")) return .fedora;
if (mem.startsWith(u8, buf, "Red Hat Enterprise Linux") or mem.startsWith(u8, buf, "CentOS") or mem.startsWith(u8, buf, "Scientific Linux")) {
if (mem.indexOfPos(u8, buf, 0, "release 7") != null) return .rhel7;
if (mem.indexOfPos(u8, buf, 0, "release 6") != null) return .rhel6;
if (mem.indexOfPos(u8, buf, 0, "release 5") != null) return .rhel5;
}
return .unknown;
}
fn detectRedhat(fs: Filesystem) ?Tag {
var buf: [MAX_BYTES]u8 = undefined;
const data = fs.readFile("/etc/redhat-release", &buf) orelse return null;
return scanForRedHat(data);
}
fn scanForDebian(buf: []const u8) Tag {
var it = mem.splitScalar(u8, buf, '.');
if (std.fmt.parseInt(u8, it.next().?, 10)) |major| {
return switch (major) {
5 => .debian_lenny,
6 => .debian_squeeze,
7 => .debian_wheezy,
8 => .debian_jessie,
9 => .debian_stretch,
10 => .debian_buster,
11 => .debian_bullseye,
12 => .debian_bookworm,
13 => .debian_trixie,
else => .unknown,
};
} else |_| {}
it = mem.splitScalar(u8, buf, '\n');
const name = it.next().?;
if (mem.eql(u8, name, "squeeze/sid")) return .debian_squeeze;
if (mem.eql(u8, name, "wheezy/sid")) return .debian_wheezy;
if (mem.eql(u8, name, "jessie/sid")) return .debian_jessie;
if (mem.eql(u8, name, "stretch/sid")) return .debian_stretch;
if (mem.eql(u8, name, "buster/sid")) return .debian_buster;
if (mem.eql(u8, name, "bullseye/sid")) return .debian_bullseye;
if (mem.eql(u8, name, "bookworm/sid")) return .debian_bookworm;
return .unknown;
}
fn detectDebian(fs: Filesystem) ?Tag {
var buf: [MAX_BYTES]u8 = undefined;
const data = fs.readFile("/etc/debian_version", &buf) orelse return null;
return scanForDebian(data);
}
pub fn detect(target: std.Target, fs: Filesystem) Tag {
if (target.os.tag != .linux) return .unknown;
if (detectOsRelease(fs)) |tag| return tag;
if (detectLSBRelease(fs)) |tag| return tag;
if (detectRedhat(fs)) |tag| return tag;
if (detectDebian(fs)) |tag| return tag;
if (fs.exists("/etc/gentoo-release")) return .gentoo;
return .unknown;
}
test scanForDebian {
try std.testing.expectEqual(Tag.debian_squeeze, scanForDebian("squeeze/sid"));
try std.testing.expectEqual(Tag.debian_bullseye, scanForDebian("11.1.2"));
try std.testing.expectEqual(Tag.unknown, scanForDebian("None"));
try std.testing.expectEqual(Tag.unknown, scanForDebian(""));
}
test scanForRedHat {
try std.testing.expectEqual(Tag.fedora, scanForRedHat("Fedora release 7"));
try std.testing.expectEqual(Tag.rhel7, scanForRedHat("Red Hat Enterprise Linux release 7"));
try std.testing.expectEqual(Tag.rhel5, scanForRedHat("CentOS release 5"));
try std.testing.expectEqual(Tag.unknown, scanForRedHat("CentOS release 4"));
try std.testing.expectEqual(Tag.unknown, scanForRedHat(""));
}
test scanForLSBRelease {
const text =
\\DISTRIB_ID=Ubuntu
\\DISTRIB_RELEASE=20.04
\\DISTRIB_CODENAME=focal
\\DISTRIB_DESCRIPTION="Ubuntu 20.04.6 LTS"
\\
;
try std.testing.expectEqual(Tag.ubuntu_focal, scanForLSBRelease(text).?);
}
test scanForOsRelease {
const text =
\\NAME="Alpine Linux"
\\ID=alpine
\\VERSION_ID=3.18.2
\\PRETTY_NAME="Alpine Linux v3.18"
\\HOME_URL="https://alpinelinux.org/"
\\BUG_REPORT_URL="https://gitlab.alpinelinux.org/alpine/aports/-/issues"
\\
;
try std.testing.expectEqual(Tag.alpine, scanForOsRelease(text).?);
}

242
deps/aro/Driver/Filesystem.zig vendored Normal file
View File

@ -0,0 +1,242 @@
const std = @import("std");
const mem = std.mem;
const builtin = @import("builtin");
const system_defaults = @import("system_defaults");
const is_windows = builtin.os.tag == .windows;
fn readFileFake(entries: []const Filesystem.Entry, path: []const u8, buf: []u8) ?[]const u8 {
@setCold(true);
for (entries) |entry| {
if (mem.eql(u8, entry.path, path)) {
const len = @min(entry.contents.len, buf.len);
@memcpy(buf[0..len], entry.contents[0..len]);
return buf[0..len];
}
}
return null;
}
fn findProgramByNameFake(entries: []const Filesystem.Entry, name: []const u8, path: ?[]const u8, buf: []u8) ?[]const u8 {
@setCold(true);
if (mem.indexOfScalar(u8, name, '/') != null) {
@memcpy(buf[0..name.len], name);
return buf[0..name.len];
}
const path_env = path orelse return null;
var fib = std.heap.FixedBufferAllocator.init(buf);
var it = mem.tokenizeScalar(u8, path_env, system_defaults.path_sep);
while (it.next()) |path_dir| {
defer fib.reset();
const full_path = std.fs.path.join(fib.allocator(), &.{ path_dir, name }) catch continue;
if (canExecuteFake(entries, full_path)) return full_path;
}
return null;
}
fn canExecuteFake(entries: []const Filesystem.Entry, path: []const u8) bool {
@setCold(true);
for (entries) |entry| {
if (mem.eql(u8, entry.path, path)) {
return entry.executable;
}
}
return false;
}
fn existsFake(entries: []const Filesystem.Entry, path: []const u8) bool {
@setCold(true);
var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
var fib = std.heap.FixedBufferAllocator.init(&buf);
const resolved = std.fs.path.resolvePosix(fib.allocator(), &.{path}) catch return false;
for (entries) |entry| {
if (mem.eql(u8, entry.path, resolved)) return true;
}
return false;
}
fn canExecutePosix(path: []const u8) bool {
std.os.access(path, std.os.X_OK) catch return false;
// Todo: ensure path is not a directory
return true;
}
/// TODO
fn canExecuteWindows(path: []const u8) bool {
_ = path;
return true;
}
/// TODO
fn findProgramByNameWindows(allocator: std.mem.Allocator, name: []const u8, path: ?[]const u8, buf: []u8) ?[]const u8 {
_ = path;
_ = buf;
_ = name;
_ = allocator;
return null;
}
/// TODO: does WASI need special handling?
fn findProgramByNamePosix(name: []const u8, path: ?[]const u8, buf: []u8) ?[]const u8 {
if (mem.indexOfScalar(u8, name, '/') != null) {
@memcpy(buf[0..name.len], name);
return buf[0..name.len];
}
const path_env = path orelse return null;
var fib = std.heap.FixedBufferAllocator.init(buf);
var it = mem.tokenizeScalar(u8, path_env, system_defaults.path_sep);
while (it.next()) |path_dir| {
defer fib.reset();
const full_path = std.fs.path.join(fib.allocator(), &.{ path_dir, name }) catch continue;
if (canExecutePosix(full_path)) return full_path;
}
return null;
}
pub const Filesystem = union(enum) {
real: void,
fake: []const Entry,
const Entry = struct {
path: []const u8,
contents: []const u8 = "",
executable: bool = false,
};
const FakeDir = struct {
entries: []const Entry,
path: []const u8,
fn iterate(self: FakeDir) FakeDir.Iterator {
return .{
.entries = self.entries,
.base = self.path,
};
}
const Iterator = struct {
entries: []const Entry,
base: []const u8,
i: usize = 0,
const Self = @This();
fn next(self: *@This()) !?std.fs.IterableDir.Entry {
while (self.i < self.entries.len) {
const entry = self.entries[self.i];
self.i += 1;
if (entry.path.len == self.base.len) continue;
if (std.mem.startsWith(u8, entry.path, self.base)) {
const remaining = entry.path[self.base.len + 1 ..];
if (std.mem.indexOfScalar(u8, remaining, std.fs.path.sep) != null) continue;
const extension = std.fs.path.extension(remaining);
const kind: std.fs.IterableDir.Entry.Kind = if (extension.len == 0) .directory else .file;
return .{ .name = remaining, .kind = kind };
}
}
return null;
}
};
};
const IterableDir = union(enum) {
dir: std.fs.IterableDir,
fake: FakeDir,
pub fn iterate(self: IterableDir) Iterator {
return switch (self) {
.dir => |dir| .{ .iterator = dir.iterate() },
.fake => |fake| .{ .fake = fake.iterate() },
};
}
pub fn close(self: *IterableDir) void {
switch (self.*) {
.dir => |*d| d.close(),
.fake => {},
}
}
};
const Iterator = union(enum) {
iterator: std.fs.IterableDir.Iterator,
fake: FakeDir.Iterator,
pub fn next(self: *Iterator) std.fs.IterableDir.Iterator.Error!?std.fs.IterableDir.Entry {
return switch (self.*) {
.iterator => |*it| it.next(),
.fake => |*it| it.next(),
};
}
};
pub fn exists(fs: Filesystem, path: []const u8) bool {
switch (fs) {
.real => {
std.os.access(path, std.os.F_OK) catch return false;
return true;
},
.fake => |paths| return existsFake(paths, path),
}
}
pub fn joinedExists(fs: Filesystem, parts: []const []const u8) bool {
var buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
var fib = std.heap.FixedBufferAllocator.init(&buf);
const joined = std.fs.path.join(fib.allocator(), parts) catch return false;
return fs.exists(joined);
}
pub fn canExecute(fs: Filesystem, path: []const u8) bool {
return switch (fs) {
.real => if (is_windows) canExecuteWindows(path) else canExecutePosix(path),
.fake => |entries| canExecuteFake(entries, path),
};
}
/// Search for an executable named `name` using platform-specific logic
/// If it's found, write the full path to `buf` and return a slice of it
/// Otherwise retun null
pub fn findProgramByName(fs: Filesystem, allocator: std.mem.Allocator, name: []const u8, path: ?[]const u8, buf: []u8) ?[]const u8 {
std.debug.assert(name.len > 0);
return switch (fs) {
.real => if (is_windows) findProgramByNameWindows(allocator, name, path, buf) else findProgramByNamePosix(name, path, buf),
.fake => |entries| findProgramByNameFake(entries, name, path, buf),
};
}
/// Read the file at `path` into `buf`.
/// Returns null if any errors are encountered
/// Otherwise returns a slice of `buf`. If the file is larger than `buf` partial contents are returned
pub fn readFile(fs: Filesystem, path: []const u8, buf: []u8) ?[]const u8 {
return switch (fs) {
.real => {
const file = std.fs.cwd().openFile(path, .{}) catch return null;
defer file.close();
const bytes_read = file.readAll(buf) catch return null;
return buf[0..bytes_read];
},
.fake => |entries| readFileFake(entries, path, buf),
};
}
pub fn openIterableDir(fs: Filesystem, dir_name: []const u8) std.fs.Dir.OpenError!IterableDir {
return switch (fs) {
.real => .{ .dir = try std.fs.cwd().openIterableDir(dir_name, .{ .access_sub_paths = false }) },
.fake => |entries| .{ .fake = .{ .entries = entries, .path = dir_name } },
};
}
};
test "Fake filesystem" {
const fs: Filesystem = .{ .fake = &.{
.{ .path = "/usr/bin" },
} };
try std.testing.expect(fs.exists("/usr/bin"));
try std.testing.expect(fs.exists("/usr/bin/foo/.."));
try std.testing.expect(!fs.exists("/usr/bin/bar"));
}

610
deps/aro/Driver/GCCDetector.zig vendored Normal file
View File

@ -0,0 +1,610 @@
const std = @import("std");
const Toolchain = @import("../Toolchain.zig");
const target_util = @import("../target.zig");
const system_defaults = @import("system_defaults");
const util = @import("../util.zig");
const GCCVersion = @import("GCCVersion.zig");
const Multilib = @import("Multilib.zig");
const GCCDetector = @This();
is_valid: bool = false,
install_path: []const u8 = "",
parent_lib_path: []const u8 = "",
version: GCCVersion = .{},
gcc_triple: []const u8 = "",
selected: Multilib = .{},
biarch_sibling: ?Multilib = null,
pub fn deinit(self: *GCCDetector) void {
if (!self.is_valid) return;
}
pub fn appendToolPath(self: *const GCCDetector, tc: *Toolchain) !void {
if (!self.is_valid) return;
return tc.addPathFromComponents(&.{
self.parent_lib_path,
"..",
self.gcc_triple,
"bin",
}, .program);
}
fn addDefaultGCCPrefixes(prefixes: *PathPrefixes, tc: *const Toolchain) !void {
const sysroot = tc.getSysroot();
const target = tc.getTarget();
if (sysroot.len == 0 and target.os.tag == .linux and tc.filesystem.exists("/opt/rh")) {
prefixes.appendAssumeCapacity("/opt/rh/gcc-toolset-12/root/usr");
prefixes.appendAssumeCapacity("/opt/rh/gcc-toolset-11/root/usr");
prefixes.appendAssumeCapacity("/opt/rh/gcc-toolset-10/root/usr");
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-12/root/usr");
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-11/root/usr");
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-10/root/usr");
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-9/root/usr");
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-8/root/usr");
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-7/root/usr");
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-6/root/usr");
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-4/root/usr");
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-3/root/usr");
prefixes.appendAssumeCapacity("/opt/rh/devtoolset-2/root/usr");
}
if (sysroot.len == 0) {
prefixes.appendAssumeCapacity("/usr");
} else {
var usr_path = try tc.arena.alloc(u8, 4 + sysroot.len);
@memcpy(usr_path[0..4], "/usr");
@memcpy(usr_path[4..], sysroot);
prefixes.appendAssumeCapacity(usr_path);
}
}
const PathPrefixes = std.BoundedArray([]const u8, 16);
fn collectLibDirsAndTriples(
tc: *Toolchain,
lib_dirs: *PathPrefixes,
triple_aliases: *PathPrefixes,
biarch_libdirs: *PathPrefixes,
biarch_triple_aliases: *PathPrefixes,
) !void {
const AArch64LibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
const AArch64Triples: [4][]const u8 = .{ "aarch64-none-linux-gnu", "aarch64-linux-gnu", "aarch64-redhat-linux", "aarch64-suse-linux" };
const AArch64beLibDirs: [1][]const u8 = .{"/lib"};
const AArch64beTriples: [2][]const u8 = .{ "aarch64_be-none-linux-gnu", "aarch64_be-linux-gnu" };
const ARMLibDirs: [1][]const u8 = .{"/lib"};
const ARMTriples: [1][]const u8 = .{"arm-linux-gnueabi"};
const ARMHFTriples: [4][]const u8 = .{ "arm-linux-gnueabihf", "armv7hl-redhat-linux-gnueabi", "armv6hl-suse-linux-gnueabi", "armv7hl-suse-linux-gnueabi" };
const ARMebLibDirs: [1][]const u8 = .{"/lib"};
const ARMebTriples: [1][]const u8 = .{"armeb-linux-gnueabi"};
const ARMebHFTriples: [2][]const u8 = .{ "armeb-linux-gnueabihf", "armebv7hl-redhat-linux-gnueabi" };
const AVRLibDirs: [1][]const u8 = .{"/lib"};
const AVRTriples: [1][]const u8 = .{"avr"};
const CSKYLibDirs: [1][]const u8 = .{"/lib"};
const CSKYTriples: [3][]const u8 = .{ "csky-linux-gnuabiv2", "csky-linux-uclibcabiv2", "csky-elf-noneabiv2" };
const X86_64LibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
const X86_64Triples: [11][]const u8 = .{
"x86_64-linux-gnu", "x86_64-unknown-linux-gnu",
"x86_64-pc-linux-gnu", "x86_64-redhat-linux6E",
"x86_64-redhat-linux", "x86_64-suse-linux",
"x86_64-manbo-linux-gnu", "x86_64-linux-gnu",
"x86_64-slackware-linux", "x86_64-unknown-linux",
"x86_64-amazon-linux",
};
const X32Triples: [2][]const u8 = .{ "x86_64-linux-gnux32", "x86_64-pc-linux-gnux32" };
const X32LibDirs: [2][]const u8 = .{ "/libx32", "/lib" };
const X86LibDirs: [2][]const u8 = .{ "/lib32", "/lib" };
const X86Triples: [9][]const u8 = .{
"i586-linux-gnu", "i686-linux-gnu", "i686-pc-linux-gnu",
"i386-redhat-linux6E", "i686-redhat-linux", "i386-redhat-linux",
"i586-suse-linux", "i686-montavista-linux", "i686-gnu",
};
const LoongArch64LibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
const LoongArch64Triples: [2][]const u8 = .{ "loongarch64-linux-gnu", "loongarch64-unknown-linux-gnu" };
const M68kLibDirs: [1][]const u8 = .{"/lib"};
const M68kTriples: [3][]const u8 = .{ "m68k-linux-gnu", "m68k-unknown-linux-gnu", "m68k-suse-linux" };
const MIPSLibDirs: [2][]const u8 = .{ "/libo32", "/lib" };
const MIPSTriples: [5][]const u8 = .{
"mips-linux-gnu", "mips-mti-linux",
"mips-mti-linux-gnu", "mips-img-linux-gnu",
"mipsisa32r6-linux-gnu",
};
const MIPSELLibDirs: [2][]const u8 = .{ "/libo32", "/lib" };
const MIPSELTriples: [3][]const u8 = .{ "mipsel-linux-gnu", "mips-img-linux-gnu", "mipsisa32r6el-linux-gnu" };
const MIPS64LibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
const MIPS64Triples: [6][]const u8 = .{
"mips64-linux-gnu", "mips-mti-linux-gnu",
"mips-img-linux-gnu", "mips64-linux-gnuabi64",
"mipsisa64r6-linux-gnu", "mipsisa64r6-linux-gnuabi64",
};
const MIPS64ELLibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
const MIPS64ELTriples: [6][]const u8 = .{
"mips64el-linux-gnu", "mips-mti-linux-gnu",
"mips-img-linux-gnu", "mips64el-linux-gnuabi64",
"mipsisa64r6el-linux-gnu", "mipsisa64r6el-linux-gnuabi64",
};
const MIPSN32LibDirs: [1][]const u8 = .{"/lib32"};
const MIPSN32Triples: [2][]const u8 = .{ "mips64-linux-gnuabin32", "mipsisa64r6-linux-gnuabin32" };
const MIPSN32ELLibDirs: [1][]const u8 = .{"/lib32"};
const MIPSN32ELTriples: [2][]const u8 = .{ "mips64el-linux-gnuabin32", "mipsisa64r6el-linux-gnuabin32" };
const MSP430LibDirs: [1][]const u8 = .{"/lib"};
const MSP430Triples: [1][]const u8 = .{"msp430-elf"};
const PPCLibDirs: [2][]const u8 = .{ "/lib32", "/lib" };
const PPCTriples: [5][]const u8 = .{
"powerpc-linux-gnu", "powerpc-unknown-linux-gnu", "powerpc-linux-gnuspe",
// On 32-bit PowerPC systems running SUSE Linux, gcc is configured as a
// 64-bit compiler which defaults to "-m32", hence "powerpc64-suse-linux".
"powerpc64-suse-linux", "powerpc-montavista-linuxspe",
};
const PPCLELibDirs: [2][]const u8 = .{ "/lib32", "/lib" };
const PPCLETriples: [3][]const u8 = .{ "powerpcle-linux-gnu", "powerpcle-unknown-linux-gnu", "powerpcle-linux-musl" };
const PPC64LibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
const PPC64Triples: [4][]const u8 = .{
"powerpc64-linux-gnu", "powerpc64-unknown-linux-gnu",
"powerpc64-suse-linux", "ppc64-redhat-linux",
};
const PPC64LELibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
const PPC64LETriples: [5][]const u8 = .{
"powerpc64le-linux-gnu", "powerpc64le-unknown-linux-gnu",
"powerpc64le-none-linux-gnu", "powerpc64le-suse-linux",
"ppc64le-redhat-linux",
};
const RISCV32LibDirs: [2][]const u8 = .{ "/lib32", "/lib" };
const RISCV32Triples: [3][]const u8 = .{ "riscv32-unknown-linux-gnu", "riscv32-linux-gnu", "riscv32-unknown-elf" };
const RISCV64LibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
const RISCV64Triples: [3][]const u8 = .{
"riscv64-unknown-linux-gnu",
"riscv64-linux-gnu",
"riscv64-unknown-elf",
};
const SPARCv8LibDirs: [2][]const u8 = .{ "/lib32", "/lib" };
const SPARCv8Triples: [2][]const u8 = .{ "sparc-linux-gnu", "sparcv8-linux-gnu" };
const SPARCv9LibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
const SPARCv9Triples: [2][]const u8 = .{ "sparc64-linux-gnu", "sparcv9-linux-gnu" };
const SystemZLibDirs: [2][]const u8 = .{ "/lib64", "/lib" };
const SystemZTriples: [5][]const u8 = .{
"s390x-linux-gnu", "s390x-unknown-linux-gnu", "s390x-ibm-linux-gnu",
"s390x-suse-linux", "s390x-redhat-linux",
};
const target = tc.getTarget();
if (target.os.tag == .solaris) {
// TODO
return;
}
if (target.isAndroid()) {
const AArch64AndroidTriples: [1][]const u8 = .{"aarch64-linux-android"};
const ARMAndroidTriples: [1][]const u8 = .{"arm-linux-androideabi"};
const MIPSELAndroidTriples: [1][]const u8 = .{"mipsel-linux-android"};
const MIPS64ELAndroidTriples: [1][]const u8 = .{"mips64el-linux-android"};
const X86AndroidTriples: [1][]const u8 = .{"i686-linux-android"};
const X86_64AndroidTriples: [1][]const u8 = .{"x86_64-linux-android"};
switch (target.cpu.arch) {
.aarch64 => {
lib_dirs.appendSliceAssumeCapacity(&AArch64LibDirs);
triple_aliases.appendSliceAssumeCapacity(&AArch64AndroidTriples);
},
.arm,
.thumb,
=> {
lib_dirs.appendSliceAssumeCapacity(&ARMLibDirs);
triple_aliases.appendSliceAssumeCapacity(&ARMAndroidTriples);
},
.mipsel => {
lib_dirs.appendSliceAssumeCapacity(&MIPSELLibDirs);
triple_aliases.appendSliceAssumeCapacity(&MIPSELAndroidTriples);
biarch_libdirs.appendSliceAssumeCapacity(&MIPS64ELLibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPS64ELAndroidTriples);
},
.mips64el => {
lib_dirs.appendSliceAssumeCapacity(&MIPS64ELLibDirs);
triple_aliases.appendSliceAssumeCapacity(&MIPS64ELAndroidTriples);
biarch_libdirs.appendSliceAssumeCapacity(&MIPSELLibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPSELAndroidTriples);
},
.x86_64 => {
lib_dirs.appendSliceAssumeCapacity(&X86_64LibDirs);
triple_aliases.appendSliceAssumeCapacity(&X86_64AndroidTriples);
biarch_libdirs.appendSliceAssumeCapacity(&X86LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&X86AndroidTriples);
},
.x86 => {
lib_dirs.appendSliceAssumeCapacity(&X86LibDirs);
triple_aliases.appendSliceAssumeCapacity(&X86AndroidTriples);
biarch_libdirs.appendSliceAssumeCapacity(&X86_64LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&X86_64AndroidTriples);
},
else => {},
}
return;
}
switch (target.cpu.arch) {
.aarch64 => {
lib_dirs.appendSliceAssumeCapacity(&AArch64LibDirs);
triple_aliases.appendSliceAssumeCapacity(&AArch64Triples);
biarch_libdirs.appendSliceAssumeCapacity(&AArch64LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&AArch64Triples);
},
.aarch64_be => {
lib_dirs.appendSliceAssumeCapacity(&AArch64beLibDirs);
triple_aliases.appendSliceAssumeCapacity(&AArch64beTriples);
biarch_libdirs.appendSliceAssumeCapacity(&AArch64beLibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&AArch64beTriples);
},
.arm, .thumb => {
lib_dirs.appendSliceAssumeCapacity(&ARMLibDirs);
if (target.abi == .gnueabihf) {
triple_aliases.appendSliceAssumeCapacity(&ARMHFTriples);
} else {
triple_aliases.appendSliceAssumeCapacity(&ARMTriples);
}
},
.armeb, .thumbeb => {
lib_dirs.appendSliceAssumeCapacity(&ARMebLibDirs);
if (target.abi == .gnueabihf) {
triple_aliases.appendSliceAssumeCapacity(&ARMebHFTriples);
} else {
triple_aliases.appendSliceAssumeCapacity(&ARMebTriples);
}
},
.avr => {
lib_dirs.appendSliceAssumeCapacity(&AVRLibDirs);
triple_aliases.appendSliceAssumeCapacity(&AVRTriples);
},
.csky => {
lib_dirs.appendSliceAssumeCapacity(&CSKYLibDirs);
triple_aliases.appendSliceAssumeCapacity(&CSKYTriples);
},
.x86_64 => {
if (target.abi == .gnux32 or target.abi == .muslx32) {
lib_dirs.appendSliceAssumeCapacity(&X32LibDirs);
triple_aliases.appendSliceAssumeCapacity(&X32Triples);
biarch_libdirs.appendSliceAssumeCapacity(&X86_64LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&X86_64Triples);
} else {
lib_dirs.appendSliceAssumeCapacity(&X86_64LibDirs);
triple_aliases.appendSliceAssumeCapacity(&X86_64Triples);
biarch_libdirs.appendSliceAssumeCapacity(&X32LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&X32Triples);
}
biarch_libdirs.appendSliceAssumeCapacity(&X86LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&X86Triples);
},
.x86 => {
lib_dirs.appendSliceAssumeCapacity(&X86LibDirs);
// MCU toolchain is 32 bit only and its triple alias is TargetTriple
// itself, which will be appended below.
if (target.os.tag != .elfiamcu) {
triple_aliases.appendSliceAssumeCapacity(&X86Triples);
biarch_libdirs.appendSliceAssumeCapacity(&X86_64LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&X86_64Triples);
biarch_libdirs.appendSliceAssumeCapacity(&X32LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&X32Triples);
}
},
.loongarch64 => {
lib_dirs.appendSliceAssumeCapacity(&LoongArch64LibDirs);
triple_aliases.appendSliceAssumeCapacity(&LoongArch64Triples);
},
.m68k => {
lib_dirs.appendSliceAssumeCapacity(&M68kLibDirs);
triple_aliases.appendSliceAssumeCapacity(&M68kTriples);
},
.mips => {
lib_dirs.appendSliceAssumeCapacity(&MIPSLibDirs);
triple_aliases.appendSliceAssumeCapacity(&MIPSTriples);
biarch_libdirs.appendSliceAssumeCapacity(&MIPS64LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPS64Triples);
biarch_libdirs.appendSliceAssumeCapacity(&MIPSN32LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPSN32Triples);
},
.mipsel => {
lib_dirs.appendSliceAssumeCapacity(&MIPSELLibDirs);
triple_aliases.appendSliceAssumeCapacity(&MIPSELTriples);
triple_aliases.appendSliceAssumeCapacity(&MIPSTriples);
biarch_libdirs.appendSliceAssumeCapacity(&MIPS64ELLibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPS64ELTriples);
biarch_libdirs.appendSliceAssumeCapacity(&MIPSN32ELLibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPSN32ELTriples);
},
.mips64 => {
lib_dirs.appendSliceAssumeCapacity(&MIPS64LibDirs);
triple_aliases.appendSliceAssumeCapacity(&MIPS64Triples);
biarch_libdirs.appendSliceAssumeCapacity(&MIPSLibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPSTriples);
biarch_libdirs.appendSliceAssumeCapacity(&MIPSN32LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPSN32Triples);
},
.mips64el => {
lib_dirs.appendSliceAssumeCapacity(&MIPS64ELLibDirs);
triple_aliases.appendSliceAssumeCapacity(&MIPS64ELTriples);
biarch_libdirs.appendSliceAssumeCapacity(&MIPSELLibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPSELTriples);
biarch_libdirs.appendSliceAssumeCapacity(&MIPSN32ELLibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPSN32ELTriples);
biarch_triple_aliases.appendSliceAssumeCapacity(&MIPSTriples);
},
.msp430 => {
lib_dirs.appendSliceAssumeCapacity(&MSP430LibDirs);
triple_aliases.appendSliceAssumeCapacity(&MSP430Triples);
},
.powerpc => {
lib_dirs.appendSliceAssumeCapacity(&PPCLibDirs);
triple_aliases.appendSliceAssumeCapacity(&PPCTriples);
biarch_libdirs.appendSliceAssumeCapacity(&PPC64LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&PPC64Triples);
},
.powerpcle => {
lib_dirs.appendSliceAssumeCapacity(&PPCLELibDirs);
triple_aliases.appendSliceAssumeCapacity(&PPCLETriples);
biarch_libdirs.appendSliceAssumeCapacity(&PPC64LELibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&PPC64LETriples);
},
.powerpc64 => {
lib_dirs.appendSliceAssumeCapacity(&PPC64LibDirs);
triple_aliases.appendSliceAssumeCapacity(&PPC64Triples);
biarch_libdirs.appendSliceAssumeCapacity(&PPCLibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&PPCTriples);
},
.powerpc64le => {
lib_dirs.appendSliceAssumeCapacity(&PPC64LELibDirs);
triple_aliases.appendSliceAssumeCapacity(&PPC64LETriples);
biarch_libdirs.appendSliceAssumeCapacity(&PPCLELibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&PPCLETriples);
},
.riscv32 => {
lib_dirs.appendSliceAssumeCapacity(&RISCV32LibDirs);
triple_aliases.appendSliceAssumeCapacity(&RISCV32Triples);
biarch_libdirs.appendSliceAssumeCapacity(&RISCV64LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&RISCV64Triples);
},
.riscv64 => {
lib_dirs.appendSliceAssumeCapacity(&RISCV64LibDirs);
triple_aliases.appendSliceAssumeCapacity(&RISCV64Triples);
biarch_libdirs.appendSliceAssumeCapacity(&RISCV32LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&RISCV32Triples);
},
.sparc, .sparcel => {
lib_dirs.appendSliceAssumeCapacity(&SPARCv8LibDirs);
triple_aliases.appendSliceAssumeCapacity(&SPARCv8Triples);
biarch_libdirs.appendSliceAssumeCapacity(&SPARCv9LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&SPARCv9Triples);
},
.sparc64 => {
lib_dirs.appendSliceAssumeCapacity(&SPARCv9LibDirs);
triple_aliases.appendSliceAssumeCapacity(&SPARCv9Triples);
biarch_libdirs.appendSliceAssumeCapacity(&SPARCv8LibDirs);
biarch_triple_aliases.appendSliceAssumeCapacity(&SPARCv8Triples);
},
.s390x => {
lib_dirs.appendSliceAssumeCapacity(&SystemZLibDirs);
triple_aliases.appendSliceAssumeCapacity(&SystemZTriples);
},
else => {},
}
}
pub fn discover(self: *GCCDetector, tc: *Toolchain) !void {
var path_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
var fib = std.heap.FixedBufferAllocator.init(&path_buf);
const target = tc.getTarget();
const biarch_variant_target = if (target.ptrBitWidth() == 32) target_util.get64BitArchVariant(target) else target_util.get32BitArchVariant(target);
var candidate_lib_dirs: PathPrefixes = .{};
var candidate_biarch_lib_dirs: PathPrefixes = .{};
var candidate_triple_aliases: PathPrefixes = .{};
var candidate_biarch_triple_aliases: PathPrefixes = .{};
try collectLibDirsAndTriples(tc, &candidate_lib_dirs, &candidate_biarch_lib_dirs, &candidate_triple_aliases, &candidate_biarch_triple_aliases);
var target_buf: [64]u8 = undefined;
const triple_str = target_util.toLLVMTriple(target, &target_buf);
candidate_triple_aliases.appendAssumeCapacity(triple_str);
// Also include the multiarch variant if it's different.
var biarch_buf: [64]u8 = undefined;
if (biarch_variant_target) |biarch_target| {
const biarch_triple_str = target_util.toLLVMTriple(biarch_target, &biarch_buf);
if (!std.mem.eql(u8, biarch_triple_str, triple_str)) {
candidate_triple_aliases.appendAssumeCapacity(biarch_triple_str);
}
}
var prefixes: PathPrefixes = .{};
const gcc_toolchain_dir = gccToolchainDir(tc);
if (gcc_toolchain_dir.len != 0) {
const adjusted = if (gcc_toolchain_dir[gcc_toolchain_dir.len - 1] == '/')
gcc_toolchain_dir[0 .. gcc_toolchain_dir.len - 1]
else
gcc_toolchain_dir;
prefixes.appendAssumeCapacity(adjusted);
} else {
const sysroot = tc.getSysroot();
if (sysroot.len > 0) {
prefixes.appendAssumeCapacity(sysroot);
try addDefaultGCCPrefixes(&prefixes, tc);
}
if (sysroot.len == 0) {
try addDefaultGCCPrefixes(&prefixes, tc);
}
// TODO: Special-case handling for Gentoo
}
const v0 = GCCVersion.parse("0.0.0");
for (prefixes.constSlice()) |prefix| {
if (!tc.filesystem.exists(prefix)) continue;
for (candidate_lib_dirs.constSlice()) |suffix| {
defer fib.reset();
const lib_dir = std.fs.path.join(fib.allocator(), &.{ prefix, suffix }) catch continue;
if (!tc.filesystem.exists(lib_dir)) continue;
const gcc_dir_exists = tc.filesystem.joinedExists(&.{ lib_dir, "/gcc" });
const gcc_cross_dir_exists = tc.filesystem.joinedExists(&.{ lib_dir, "/gcc-cross" });
try self.scanLibDirForGCCTriple(tc, target, lib_dir, triple_str, false, gcc_dir_exists, gcc_cross_dir_exists);
for (candidate_triple_aliases.constSlice()) |candidate| {
try self.scanLibDirForGCCTriple(tc, target, lib_dir, candidate, false, gcc_dir_exists, gcc_cross_dir_exists);
}
}
for (candidate_biarch_lib_dirs.constSlice()) |suffix| {
const lib_dir = std.fs.path.join(fib.allocator(), &.{ prefix, suffix }) catch continue;
if (!tc.filesystem.exists(lib_dir)) continue;
const gcc_dir_exists = tc.filesystem.joinedExists(&.{ lib_dir, "/gcc" });
const gcc_cross_dir_exists = tc.filesystem.joinedExists(&.{ lib_dir, "/gcc-cross" });
for (candidate_biarch_triple_aliases.constSlice()) |candidate| {
try self.scanLibDirForGCCTriple(tc, target, lib_dir, candidate, true, gcc_dir_exists, gcc_cross_dir_exists);
}
}
if (self.version.order(v0) == .gt) break;
}
}
fn findBiarchMultilibs(tc: *const Toolchain, result: *Multilib.Detected, target: std.Target, path: [2][]const u8, needs_biarch_suffix: bool) !bool {
const suff64 = if (target.os.tag == .solaris) switch (target.cpu.arch) {
.x86, .x86_64 => "/amd64",
.sparc => "/sparcv9",
else => "/64",
} else "/64";
const alt_64 = Multilib.init(suff64, suff64, &.{ "-m32", "+m64", "-mx32" });
const alt_32 = Multilib.init("/32", "/32", &.{ "+m32", "-m64", "-mx32" });
const alt_x32 = Multilib.init("/x32", "/x32", &.{ "-m32", "-m64", "+mx32" });
const multilib_filter = Multilib.Filter{
.base = path,
.file = if (target.os.tag == .elfiamcu) "libgcc.a" else "crtbegin.o",
};
const Want = enum {
want32,
want64,
wantx32,
};
const is_x32 = target.abi == .gnux32 or target.abi == .muslx32;
const target_ptr_width = target.ptrBitWidth();
const want: Want = if (target_ptr_width == 32 and multilib_filter.exists(alt_32, tc.filesystem))
.want64
else if (target_ptr_width == 64 and is_x32 and multilib_filter.exists(alt_x32, tc.filesystem))
.want64
else if (target_ptr_width == 64 and !is_x32 and multilib_filter.exists(alt_64, tc.filesystem))
.want32
else if (target_ptr_width == 32)
if (needs_biarch_suffix) .want64 else .want32
else if (is_x32)
if (needs_biarch_suffix) .want64 else .wantx32
else if (needs_biarch_suffix) .want32 else .want64;
const default = switch (want) {
.want32 => Multilib.init("", "", &.{ "+m32", "-m64", "-mx32" }),
.want64 => Multilib.init("", "", &.{ "-m32", "+m64", "-mx32" }),
.wantx32 => Multilib.init("", "", &.{ "-m32", "-m64", "+mx32" }),
};
result.multilibs.appendSliceAssumeCapacity(&.{
default,
alt_64,
alt_32,
alt_x32,
});
result.filter(multilib_filter, tc.filesystem);
var flags: Multilib.Flags = .{};
flags.appendAssumeCapacity(if (target_ptr_width == 64 and !is_x32) "+m64" else "-m64");
flags.appendAssumeCapacity(if (target_ptr_width == 32) "+m32" else "-m32");
flags.appendAssumeCapacity(if (target_ptr_width == 64 and is_x32) "+mx32" else "-mx32");
return result.select(flags);
}
fn scanGCCForMultilibs(self: *GCCDetector, tc: *const Toolchain, target: std.Target, path: [2][]const u8, needs_biarch_suffix: bool) !bool {
var detected: Multilib.Detected = .{};
if (target.cpu.arch == .csky) {
// TODO
} else if (target.cpu.arch.isMIPS()) {
// TODO
} else if (target.cpu.arch.isRISCV()) {
// TODO
} else if (target.cpu.arch == .msp430) {
// TODO
} else if (target.cpu.arch == .avr) {
// No multilibs
} else if (!try findBiarchMultilibs(tc, &detected, target, path, needs_biarch_suffix)) {
return false;
}
self.selected = detected.selected;
self.biarch_sibling = detected.biarch_sibling;
return true;
}
fn scanLibDirForGCCTriple(
self: *GCCDetector,
tc: *const Toolchain,
target: std.Target,
lib_dir: []const u8,
candidate_triple: []const u8,
needs_biarch_suffix: bool,
gcc_dir_exists: bool,
gcc_cross_dir_exists: bool,
) !void {
var path_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
var fib = std.heap.FixedBufferAllocator.init(&path_buf);
for (0..2) |i| {
if (i == 0 and !gcc_dir_exists) continue;
if (i == 1 and !gcc_cross_dir_exists) continue;
defer fib.reset();
const base: []const u8 = if (i == 0) "gcc" else "gcc-cross";
var lib_suffix_buf: [64]u8 = undefined;
var suffix_buf_fib = std.heap.FixedBufferAllocator.init(&lib_suffix_buf);
const lib_suffix = std.fs.path.join(suffix_buf_fib.allocator(), &.{ base, candidate_triple }) catch continue;
const dir_name = std.fs.path.join(fib.allocator(), &.{ lib_dir, lib_suffix }) catch continue;
var parent_dir = tc.filesystem.openIterableDir(dir_name) catch continue;
defer parent_dir.close();
var it = parent_dir.iterate();
while (it.next() catch continue) |entry| {
if (entry.kind != .directory) continue;
const version_text = entry.name;
const candidate_version = GCCVersion.parse(version_text);
if (candidate_version.major != -1) {
// TODO: cache path so we're not repeatedly scanning
}
if (candidate_version.isLessThan(4, 1, 1, "")) continue;
switch (candidate_version.order(self.version)) {
.lt, .eq => continue,
.gt => {},
}
if (!try self.scanGCCForMultilibs(tc, target, .{ dir_name, version_text }, needs_biarch_suffix)) continue;
self.version = candidate_version;
self.gcc_triple = try tc.arena.dupe(u8, candidate_triple);
self.install_path = try std.fs.path.join(tc.arena, &.{ lib_dir, lib_suffix, version_text });
self.parent_lib_path = try std.fs.path.join(tc.arena, &.{ self.install_path, "..", "..", ".." });
self.is_valid = true;
}
}
}
fn gccToolchainDir(tc: *const Toolchain) []const u8 {
const sysroot = tc.getSysroot();
if (sysroot.len != 0) return "";
return system_defaults.gcc_install_prefix;
}

122
deps/aro/Driver/GCCVersion.zig vendored Normal file
View File

@ -0,0 +1,122 @@
const std = @import("std");
const mem = std.mem;
const Order = std.math.Order;
const GCCVersion = @This();
/// Raw version number text
raw: []const u8 = "",
/// -1 indicates not present
major: i32 = -1,
/// -1 indicates not present
minor: i32 = -1,
/// -1 indicates not present
patch: i32 = -1,
/// Text of parsed major version number
major_str: []const u8 = "",
/// Text of parsed major + minor version number
minor_str: []const u8 = "",
/// Patch number suffix
suffix: []const u8 = "",
/// This orders versions according to the preferred usage order, not a notion of release-time ordering
/// Higher version numbers are preferred, but nonexistent minor/patch/suffix is preferred to one that does exist
/// e.g. `4.1` is preferred over `4.0` but `4` is preferred over both `4.0` and `4.1`
pub fn isLessThan(self: GCCVersion, rhs_major: i32, rhs_minor: i32, rhs_patch: i32, rhs_suffix: []const u8) bool {
if (self.major != rhs_major) {
return self.major < rhs_major;
}
if (self.minor != rhs_minor) {
if (rhs_minor == -1) return true;
if (self.minor == -1) return false;
return self.minor < rhs_minor;
}
if (self.patch != rhs_patch) {
if (rhs_patch == -1) return true;
if (self.patch == -1) return false;
return self.patch < rhs_patch;
}
if (!mem.eql(u8, self.suffix, rhs_suffix)) {
if (rhs_suffix.len == 0) return true;
if (self.suffix.len == 0) return false;
return switch (std.mem.order(u8, self.suffix, rhs_suffix)) {
.lt => true,
.eq => unreachable,
.gt => false,
};
}
return false;
}
/// Strings in the returned GCCVersion struct have the same lifetime as `text`
pub fn parse(text: []const u8) GCCVersion {
const bad = GCCVersion{ .major = -1 };
var good = bad;
var it = mem.splitScalar(u8, text, '.');
const first = it.next().?;
const second = it.next() orelse "";
const rest = it.next() orelse "";
good.major = std.fmt.parseInt(i32, first, 10) catch return bad;
if (good.major < 0) return bad;
good.major_str = first;
if (second.len == 0) return good;
var minor_str = second;
if (rest.len == 0) {
const end = mem.indexOfNone(u8, minor_str, "0123456789") orelse minor_str.len;
if (end > 0) {
good.suffix = minor_str[end..];
minor_str = minor_str[0..end];
}
}
good.minor = std.fmt.parseInt(i32, minor_str, 10) catch return bad;
if (good.minor < 0) return bad;
good.minor_str = minor_str;
if (rest.len > 0) {
const end = mem.indexOfNone(u8, rest, "0123456789") orelse rest.len;
if (end > 0) {
const patch_num_text = rest[0..end];
good.patch = std.fmt.parseInt(i32, patch_num_text, 10) catch return bad;
if (good.patch < 0) return bad;
good.suffix = rest[end..];
}
}
return good;
}
pub fn order(a: GCCVersion, b: GCCVersion) Order {
if (a.isLessThan(b.major, b.minor, b.patch, b.suffix)) return .lt;
if (b.isLessThan(a.major, a.minor, a.patch, a.suffix)) return .gt;
return .eq;
}
test parse {
const versions = [10]GCCVersion{
parse("5"),
parse("4"),
parse("4.2"),
parse("4.0"),
parse("4.0-patched"),
parse("4.0.2"),
parse("4.0.1"),
parse("4.0.1-patched"),
parse("4.0.0"),
parse("4.0.0-patched"),
};
for (versions[0 .. versions.len - 1], versions[1..versions.len]) |first, second| {
try std.testing.expectEqual(Order.eq, first.order(first));
try std.testing.expectEqual(Order.gt, first.order(second));
try std.testing.expectEqual(Order.lt, second.order(first));
}
const last = versions[versions.len - 1];
try std.testing.expectEqual(Order.eq, last.order(last));
}

72
deps/aro/Driver/Multilib.zig vendored Normal file
View File

@ -0,0 +1,72 @@
const std = @import("std");
const util = @import("../util.zig");
const Filesystem = @import("Filesystem.zig").Filesystem;
pub const Flags = std.BoundedArray([]const u8, 6);
/// Large enough for GCCDetector for Linux; may need to be increased to support other toolchains.
const max_multilibs = 4;
const MultilibArray = std.BoundedArray(Multilib, max_multilibs);
pub const Detected = struct {
multilibs: MultilibArray = .{},
selected: Multilib = .{},
biarch_sibling: ?Multilib = null,
pub fn filter(self: *Detected, multilib_filter: Filter, fs: Filesystem) void {
var found_count: usize = 0;
for (self.multilibs.constSlice()) |multilib| {
if (multilib_filter.exists(multilib, fs)) {
self.multilibs.set(found_count, multilib);
found_count += 1;
}
}
self.multilibs.resize(found_count) catch unreachable;
}
pub fn select(self: *Detected, flags: Flags) !bool {
var filtered: MultilibArray = .{};
for (self.multilibs.constSlice()) |multilib| {
for (multilib.flags.constSlice()) |multilib_flag| {
const matched = for (flags.constSlice()) |arg_flag| {
if (std.mem.eql(u8, arg_flag[1..], multilib_flag[1..])) break arg_flag;
} else multilib_flag;
if (matched[0] != multilib_flag[0]) break;
} else {
filtered.appendAssumeCapacity(multilib);
}
}
if (filtered.len == 0) return false;
if (filtered.len == 1) {
self.selected = filtered.get(0);
return true;
}
return error.TooManyMultilibs;
}
};
pub const Filter = struct {
base: [2][]const u8,
file: []const u8,
pub fn exists(self: Filter, m: Multilib, fs: Filesystem) bool {
return fs.joinedExists(&.{ self.base[0], self.base[1], m.gcc_suffix, self.file });
}
};
const Multilib = @This();
gcc_suffix: []const u8 = "",
os_suffix: []const u8 = "",
include_suffix: []const u8 = "",
flags: Flags = .{},
priority: u32 = 0,
pub fn init(gcc_suffix: []const u8, os_suffix: []const u8, flags: []const []const u8) Multilib {
var self: Multilib = .{
.gcc_suffix = gcc_suffix,
.os_suffix = os_suffix,
};
self.flags.appendSliceAssumeCapacity(flags);
return self;
}

153
deps/aro/InitList.zig vendored Normal file
View File

@ -0,0 +1,153 @@
//! Sparsely populated list of used indexes.
//! Used for detecting duplicate initializers.
const std = @import("std");
const Allocator = std.mem.Allocator;
const testing = std.testing;
const Tree = @import("Tree.zig");
const Token = Tree.Token;
const TokenIndex = Tree.TokenIndex;
const NodeIndex = Tree.NodeIndex;
const Type = @import("Type.zig");
const Diagnostics = @import("Diagnostics.zig");
const NodeList = std.ArrayList(NodeIndex);
const Parser = @import("Parser.zig");
const InitList = @This();
const Item = struct {
list: InitList = .{},
index: u64,
fn order(_: void, a: Item, b: Item) std.math.Order {
return std.math.order(a.index, b.index);
}
};
list: std.ArrayListUnmanaged(Item) = .{},
node: NodeIndex = .none,
tok: TokenIndex = 0,
/// Deinitialize freeing all memory.
pub fn deinit(il: *InitList, gpa: Allocator) void {
for (il.list.items) |*item| item.list.deinit(gpa);
il.list.deinit(gpa);
il.* = undefined;
}
/// Insert initializer at index, returning previous entry if one exists.
pub fn put(il: *InitList, gpa: Allocator, index: usize, node: NodeIndex, tok: TokenIndex) !?TokenIndex {
const items = il.list.items;
var left: usize = 0;
var right: usize = items.len;
// Append new value to empty list
if (left == right) {
const item = try il.list.addOne(gpa);
item.* = .{
.list = .{ .node = node, .tok = tok },
.index = index,
};
return null;
}
while (left < right) {
// Avoid overflowing in the midpoint calculation
const mid = left + (right - left) / 2;
// Compare the key with the midpoint element
switch (std.math.order(index, items[mid].index)) {
.eq => {
// Replace previous entry.
const prev = items[mid].list.tok;
items[mid].list.deinit(gpa);
items[mid] = .{
.list = .{ .node = node, .tok = tok },
.index = index,
};
return prev;
},
.gt => left = mid + 1,
.lt => right = mid,
}
}
// Insert a new value into a sorted position.
try il.list.insert(gpa, left, .{
.list = .{ .node = node, .tok = tok },
.index = index,
});
return null;
}
/// Find item at index, create new if one does not exist.
pub fn find(il: *InitList, gpa: Allocator, index: u64) !*InitList {
const items = il.list.items;
var left: usize = 0;
var right: usize = items.len;
// Append new value to empty list
if (left == right) {
const item = try il.list.addOne(gpa);
item.* = .{
.list = .{ .node = .none, .tok = 0 },
.index = index,
};
return &item.list;
}
while (left < right) {
// Avoid overflowing in the midpoint calculation
const mid = left + (right - left) / 2;
// Compare the key with the midpoint element
switch (std.math.order(index, items[mid].index)) {
.eq => return &items[mid].list,
.gt => left = mid + 1,
.lt => right = mid,
}
}
// Insert a new value into a sorted position.
try il.list.insert(gpa, left, .{
.list = .{ .node = .none, .tok = 0 },
.index = index,
});
return &il.list.items[left].list;
}
test "basic usage" {
const gpa = testing.allocator;
var il: InitList = .{};
defer il.deinit(gpa);
{
var i: usize = 0;
while (i < 5) : (i += 1) {
const prev = try il.put(gpa, i, .none, 0);
try testing.expect(prev == null);
}
}
{
const failing = testing.failing_allocator;
var i: usize = 0;
while (i < 5) : (i += 1) {
_ = try il.find(failing, i);
}
}
{
var item = try il.find(gpa, 0);
var i: usize = 1;
while (i < 5) : (i += 1) {
item = try item.find(gpa, i);
}
}
{
const failing = testing.failing_allocator;
var item = try il.find(failing, 0);
var i: usize = 1;
while (i < 5) : (i += 1) {
item = try item.find(failing, i);
}
}
}

180
deps/aro/Interner.zig vendored Normal file
View File

@ -0,0 +1,180 @@
const Interner = @This();
const std = @import("std");
const Allocator = std.mem.Allocator;
const assert = std.debug.assert;
const Value = @import("Value.zig");
map: std.ArrayHashMapUnmanaged(Key, void, KeyContext, false) = .{},
const KeyContext = struct {
pub fn eql(_: @This(), a: Key, b: Key, _: usize) bool {
return b.eql(a);
}
pub fn hash(_: @This(), a: Key) u32 {
return a.hash();
}
};
pub const Key = union(enum) {
int: u16,
float: u16,
ptr,
noreturn,
void,
func,
array: struct {
len: u64,
child: Ref,
},
vector: struct {
len: u32,
child: Ref,
},
value: Value,
record: struct {
/// Pointer to user data, value used for hash and equality check.
user_ptr: *anyopaque,
/// TODO make smaller if Value is made smaller
elements: []const Ref,
},
pub fn hash(key: Key) u32 {
var hasher = std.hash.Wyhash.init(0);
switch (key) {
.value => |val| {
std.hash.autoHash(&hasher, val.tag);
switch (val.tag) {
.unavailable => unreachable,
.nullptr_t => std.hash.autoHash(&hasher, @as(u64, 0)),
.int => std.hash.autoHash(&hasher, val.data.int),
.float => std.hash.autoHash(&hasher, @as(u64, @bitCast(val.data.float))),
.bytes => std.hash.autoHashStrat(&hasher, val.data.bytes, .Shallow),
}
},
.record => |info| {
std.hash.autoHash(&hasher, @intFromPtr(info.user_ptr));
},
inline else => |info| {
std.hash.autoHash(&hasher, info);
},
}
return @truncate(hasher.final());
}
pub fn eql(a: Key, b: Key) bool {
const KeyTag = std.meta.Tag(Key);
const a_tag: KeyTag = a;
const b_tag: KeyTag = b;
if (a_tag != b_tag) return false;
switch (a) {
.value => |a_info| {
const b_info = b.value;
if (a_info.tag != b_info.tag) return false;
switch (a_info.tag) {
.unavailable => unreachable,
.nullptr_t => return true,
.int => return a_info.data.int == b_info.data.int,
.float => return a_info.data.float == b_info.data.float,
.bytes => return a_info.data.bytes.start == b_info.data.bytes.start and a_info.data.bytes.end == b_info.data.bytes.end,
}
},
.record => |a_info| {
return a_info.user_ptr == b.record.user_ptr;
},
inline else => |a_info, tag| {
const b_info = @field(b, @tagName(tag));
return std.meta.eql(a_info, b_info);
},
}
}
fn toRef(key: Key) ?Ref {
switch (key) {
.int => |bits| switch (bits) {
1 => return .i1,
8 => return .i8,
16 => return .i16,
32 => return .i32,
64 => return .i64,
128 => return .i128,
else => {},
},
.float => |bits| switch (bits) {
16 => return .f16,
32 => return .f32,
64 => return .f64,
80 => return .f80,
128 => return .f128,
else => unreachable,
},
.ptr => return .ptr,
.func => return .func,
.noreturn => return .noreturn,
.void => return .void,
else => {},
}
return null;
}
};
pub const Ref = enum(u32) {
const max = std.math.maxInt(u32);
ptr = max - 0,
noreturn = max - 1,
void = max - 2,
i1 = max - 3,
i8 = max - 4,
i16 = max - 5,
i32 = max - 6,
i64 = max - 7,
i128 = max - 8,
f16 = max - 9,
f32 = max - 10,
f64 = max - 11,
f80 = max - 12,
f128 = max - 13,
func = max - 14,
_,
};
pub fn deinit(ip: *Interner, gpa: Allocator) void {
ip.map.deinit(gpa);
}
pub fn put(ip: *Interner, gpa: Allocator, key: Key) !Ref {
if (key.toRef()) |some| return some;
const gop = try ip.map.getOrPut(gpa, key);
return @enumFromInt(gop.index);
}
pub fn has(ip: *Interner, key: Key) ?Ref {
if (key.toRef()) |some| return some;
if (ip.map.getIndex(key)) |index| {
return @enumFromInt(index);
}
return null;
}
pub fn get(ip: Interner, ref: Ref) Key {
switch (ref) {
.ptr => return .ptr,
.func => return .func,
.noreturn => return .noreturn,
.void => return .void,
.i1 => return .{ .int = 1 },
.i8 => return .{ .int = 8 },
.i16 => return .{ .int = 16 },
.i32 => return .{ .int = 32 },
.i64 => return .{ .int = 64 },
.i128 => return .{ .int = 128 },
.f16 => return .{ .float = 16 },
.f32 => return .{ .float = 32 },
.f64 => return .{ .float = 64 },
.f80 => return .{ .float = 80 },
.f128 => return .{ .float = 128 },
else => {},
}
return ip.map.keys()[@intFromEnum(ref)];
}

601
deps/aro/Ir.zig vendored Normal file
View File

@ -0,0 +1,601 @@
const std = @import("std");
const assert = std.debug.assert;
const Allocator = std.mem.Allocator;
const Compilation = @import("Compilation.zig");
const Interner = @import("Interner.zig");
const StringId = @import("StringInterner.zig").StringId;
const Value = @import("Value.zig");
const Ir = @This();
pool: Interner,
strings: []const u8,
// decls: std.StringArrayHashMapUnmanaged(Decl),
// pub const Decl = struct {
instructions: std.MultiArrayList(Inst),
body: std.ArrayListUnmanaged(Ref),
arena: std.heap.ArenaAllocator.State,
// };
pub const Builder = struct {
gpa: Allocator,
arena: std.heap.ArenaAllocator,
instructions: std.MultiArrayList(Ir.Inst) = .{},
body: std.ArrayListUnmanaged(Ref) = .{},
alloc_count: u32 = 0,
arg_count: u32 = 0,
pool: Interner = .{},
current_label: Ref = undefined,
pub fn deinit(b: *Builder) void {
b.arena.deinit();
b.instructions.deinit(b.gpa);
b.body.deinit(b.gpa);
b.pool.deinit(b.gpa);
b.* = undefined;
}
pub fn startFn(b: *Builder) Allocator.Error!void {
b.alloc_count = 0;
b.arg_count = 0;
b.instructions.len = 0;
b.body.items.len = 0;
const entry = try b.makeLabel("entry");
try b.body.append(b.gpa, entry);
b.current_label = entry;
}
pub fn startBlock(b: *Builder, label: Ref) !void {
try b.body.append(b.gpa, label);
b.current_label = label;
}
pub fn addArg(b: *Builder, ty: Interner.Ref) Allocator.Error!Ref {
const ref: Ref = @enumFromInt(b.instructions.len);
try b.instructions.append(b.gpa, .{ .tag = .arg, .data = .{ .none = {} }, .ty = ty });
try b.body.insert(b.gpa, b.arg_count, ref);
b.arg_count += 1;
return ref;
}
pub fn addAlloc(b: *Builder, size: u32, @"align": u32) Allocator.Error!Ref {
const ref: Ref = @enumFromInt(b.instructions.len);
try b.instructions.append(b.gpa, .{
.tag = .alloc,
.data = .{ .alloc = .{ .size = size, .@"align" = @"align" } },
.ty = .ptr,
});
try b.body.insert(b.gpa, b.alloc_count + b.arg_count + 1, ref);
b.alloc_count += 1;
return ref;
}
pub fn addInst(b: *Builder, tag: Ir.Inst.Tag, data: Ir.Inst.Data, ty: Interner.Ref) Allocator.Error!Ref {
const ref: Ref = @enumFromInt(b.instructions.len);
try b.instructions.append(b.gpa, .{ .tag = tag, .data = data, .ty = ty });
try b.body.append(b.gpa, ref);
return ref;
}
pub fn makeLabel(b: *Builder, name: [*:0]const u8) Allocator.Error!Ref {
const ref: Ref = @enumFromInt(b.instructions.len);
try b.instructions.append(b.gpa, .{ .tag = .label, .data = .{ .label = name }, .ty = .void });
return ref;
}
pub fn addJump(b: *Builder, label: Ref) Allocator.Error!void {
_ = try b.addInst(.jmp, .{ .un = label }, .noreturn);
}
pub fn addBranch(b: *Builder, cond: Ref, true_label: Ref, false_label: Ref) Allocator.Error!void {
const branch = try b.arena.allocator().create(Ir.Inst.Branch);
branch.* = .{
.cond = cond,
.then = true_label,
.@"else" = false_label,
};
_ = try b.addInst(.branch, .{ .branch = branch }, .noreturn);
}
pub fn addSwitch(b: *Builder, target: Ref, values: []Interner.Ref, labels: []Ref, default: Ref) Allocator.Error!void {
assert(values.len == labels.len);
const a = b.arena.allocator();
const @"switch" = try a.create(Ir.Inst.Switch);
@"switch".* = .{
.target = target,
.cases_len = @intCast(values.len),
.case_vals = (try a.dupe(Interner.Ref, values)).ptr,
.case_labels = (try a.dupe(Ref, labels)).ptr,
.default = default,
};
_ = try b.addInst(.@"switch", .{ .@"switch" = @"switch" }, .noreturn);
}
pub fn addStore(b: *Builder, ptr: Ref, val: Ref) Allocator.Error!void {
_ = try b.addInst(.store, .{ .bin = .{ .lhs = ptr, .rhs = val } }, .void);
}
pub fn addConstant(b: *Builder, val: Value, ty: Interner.Ref) Allocator.Error!Ref {
const ref: Ref = @enumFromInt(b.instructions.len);
const key: Interner.Key = .{
.value = val,
};
const val_ref = try b.pool.put(b.gpa, key);
try b.instructions.append(b.gpa, .{ .tag = .constant, .data = .{
.constant = val_ref,
}, .ty = ty });
return ref;
}
pub fn addPhi(b: *Builder, inputs: []const Inst.Phi.Input, ty: Interner.Ref) Allocator.Error!Ref {
const a = b.arena.allocator();
const input_refs = try a.alloc(Ref, inputs.len * 2 + 1);
input_refs[0] = @enumFromInt(inputs.len);
std.mem.copy(Ref, input_refs[1..], std.mem.bytesAsSlice(Ref, std.mem.sliceAsBytes(inputs)));
return b.addInst(.phi, .{ .phi = .{ .ptr = input_refs.ptr } }, ty);
}
pub fn addSelect(b: *Builder, cond: Ref, then: Ref, @"else": Ref, ty: Interner.Ref) Allocator.Error!Ref {
const branch = try b.arena.allocator().create(Ir.Inst.Branch);
branch.* = .{
.cond = cond,
.then = then,
.@"else" = @"else",
};
return b.addInst(.select, .{ .branch = branch }, ty);
}
};
pub const Ref = enum(u32) { none = std.math.maxInt(u32), _ };
pub const Inst = struct {
tag: Tag,
data: Data,
ty: Interner.Ref,
pub const Tag = enum {
// data.constant
// not included in blocks
constant,
// data.arg
// not included in blocks
arg,
symbol,
// data.label
label,
// data.block
label_addr,
jmp,
// data.switch
@"switch",
// data.branch
branch,
select,
// data.un
jmp_val,
// data.call
call,
// data.alloc
alloc,
// data.phi
phi,
// data.bin
store,
bit_or,
bit_xor,
bit_and,
bit_shl,
bit_shr,
cmp_eq,
cmp_ne,
cmp_lt,
cmp_lte,
cmp_gt,
cmp_gte,
add,
sub,
mul,
div,
mod,
// data.un
ret,
load,
bit_not,
negate,
trunc,
zext,
sext,
};
pub const Data = union {
constant: Interner.Ref,
none: void,
bin: struct {
lhs: Ref,
rhs: Ref,
},
un: Ref,
arg: u32,
alloc: struct {
size: u32,
@"align": u32,
},
@"switch": *Switch,
call: *Call,
label: [*:0]const u8,
branch: *Branch,
phi: Phi,
};
pub const Branch = struct {
cond: Ref,
then: Ref,
@"else": Ref,
};
pub const Switch = struct {
target: Ref,
cases_len: u32,
default: Ref,
case_vals: [*]Interner.Ref,
case_labels: [*]Ref,
};
pub const Call = struct {
func: Ref,
args_len: u32,
args_ptr: [*]Ref,
pub fn args(c: Call) []Ref {
return c.args_ptr[0..c.args_len];
}
};
pub const Phi = struct {
ptr: [*]Ir.Ref,
pub const Input = struct {
label: Ir.Ref,
value: Ir.Ref,
};
pub fn inputs(p: Phi) []Input {
const len = @intFromEnum(p.ptr[0]) * 2;
const slice = (p.ptr + 1)[0..len];
return std.mem.bytesAsSlice(Input, std.mem.sliceAsBytes(slice));
}
};
};
pub fn deinit(ir: *Ir, gpa: std.mem.Allocator) void {
ir.arena.promote(gpa).deinit();
ir.instructions.deinit(gpa);
ir.* = undefined;
}
const util = @import("util.zig");
const TYPE = util.Color.purple;
const INST = util.Color.cyan;
const REF = util.Color.blue;
const LITERAL = util.Color.green;
const ATTRIBUTE = util.Color.yellow;
const RefMap = std.AutoArrayHashMap(Ref, void);
pub fn dump(ir: Ir, gpa: Allocator, name: []const u8, color: bool, w: anytype) !void {
const tags = ir.instructions.items(.tag);
const data = ir.instructions.items(.data);
var ref_map = RefMap.init(gpa);
defer ref_map.deinit();
var label_map = RefMap.init(gpa);
defer label_map.deinit();
const ret_inst = ir.body.items[ir.body.items.len - 1];
const ret_operand = data[@intFromEnum(ret_inst)].un;
const ret_ty = ir.instructions.items(.ty)[@intFromEnum(ret_operand)];
try ir.writeType(ret_ty, color, w);
if (color) util.setColor(REF, w);
try w.print(" @{s}", .{name});
if (color) util.setColor(.reset, w);
try w.writeAll("(");
var arg_count: u32 = 0;
while (true) : (arg_count += 1) {
const ref = ir.body.items[arg_count];
if (tags[@intFromEnum(ref)] != .arg) break;
if (arg_count != 0) try w.writeAll(", ");
try ref_map.put(ref, {});
try ir.writeRef(&ref_map, ref, color, w);
if (color) util.setColor(.reset, w);
}
try w.writeAll(") {\n");
for (ir.body.items[arg_count..]) |ref| {
switch (tags[@intFromEnum(ref)]) {
.label => try label_map.put(ref, {}),
else => {},
}
}
for (ir.body.items[arg_count..]) |ref| {
const i = @intFromEnum(ref);
const tag = tags[i];
switch (tag) {
.arg, .constant, .symbol => unreachable,
.label => {
const label_index = label_map.getIndex(ref).?;
if (color) util.setColor(REF, w);
try w.print("{s}.{d}:\n", .{ data[i].label, label_index });
},
// .label_val => {
// const un = data[i].un;
// try w.print(" %{d} = label.{d}\n", .{ i, @intFromEnum(un) });
// },
.jmp => {
const un = data[i].un;
if (color) util.setColor(INST, w);
try w.writeAll(" jmp ");
try ir.writeLabel(&label_map, un, color, w);
try w.writeByte('\n');
},
.branch => {
const br = data[i].branch;
if (color) util.setColor(INST, w);
try w.writeAll(" branch ");
try ir.writeRef(&ref_map, br.cond, color, w);
if (color) util.setColor(.reset, w);
try w.writeAll(", ");
try ir.writeLabel(&label_map, br.then, color, w);
if (color) util.setColor(.reset, w);
try w.writeAll(", ");
try ir.writeLabel(&label_map, br.@"else", color, w);
try w.writeByte('\n');
},
.select => {
const br = data[i].branch;
try ir.writeNewRef(&ref_map, ref, color, w);
try w.writeAll("select ");
try ir.writeRef(&ref_map, br.cond, color, w);
if (color) util.setColor(.reset, w);
try w.writeAll(", ");
try ir.writeRef(&ref_map, br.then, color, w);
if (color) util.setColor(.reset, w);
try w.writeAll(", ");
try ir.writeRef(&ref_map, br.@"else", color, w);
try w.writeByte('\n');
},
// .jmp_val => {
// const bin = data[i].bin;
// try w.print(" %{s} %{d} label.{d}\n", .{ @tagName(tag), @intFromEnum(bin.lhs), @intFromEnum(bin.rhs) });
// },
.@"switch" => {
const @"switch" = data[i].@"switch";
if (color) util.setColor(INST, w);
try w.writeAll(" switch ");
try ir.writeRef(&ref_map, @"switch".target, color, w);
if (color) util.setColor(.reset, w);
try w.writeAll(" {");
for (@"switch".case_vals[0..@"switch".cases_len], @"switch".case_labels) |val_ref, label_ref| {
try w.writeAll("\n ");
try ir.writeValue(val_ref, color, w);
if (color) util.setColor(.reset, w);
try w.writeAll(" => ");
try ir.writeLabel(&label_map, label_ref, color, w);
if (color) util.setColor(.reset, w);
}
if (color) util.setColor(LITERAL, w);
try w.writeAll("\n default ");
if (color) util.setColor(.reset, w);
try w.writeAll("=> ");
try ir.writeLabel(&label_map, @"switch".default, color, w);
if (color) util.setColor(.reset, w);
try w.writeAll("\n }\n");
},
.call => {
const call = data[i].call;
try ir.writeNewRef(&ref_map, ref, color, w);
try w.writeAll("call ");
try ir.writeRef(&ref_map, call.func, color, w);
if (color) util.setColor(.reset, w);
try w.writeAll("(");
for (call.args(), 0..) |arg, arg_i| {
if (arg_i != 0) try w.writeAll(", ");
try ir.writeRef(&ref_map, arg, color, w);
if (color) util.setColor(.reset, w);
}
try w.writeAll(")\n");
},
.alloc => {
const alloc = data[i].alloc;
try ir.writeNewRef(&ref_map, ref, color, w);
try w.writeAll("alloc ");
if (color) util.setColor(ATTRIBUTE, w);
try w.writeAll("size ");
if (color) util.setColor(LITERAL, w);
try w.print("{d}", .{alloc.size});
if (color) util.setColor(ATTRIBUTE, w);
try w.writeAll(" align ");
if (color) util.setColor(LITERAL, w);
try w.print("{d}", .{alloc.@"align"});
try w.writeByte('\n');
},
.phi => {
try ir.writeNewRef(&ref_map, ref, color, w);
try w.writeAll("phi");
if (color) util.setColor(.reset, w);
try w.writeAll(" {");
for (data[i].phi.inputs()) |input| {
try w.writeAll("\n ");
try ir.writeLabel(&label_map, input.label, color, w);
if (color) util.setColor(.reset, w);
try w.writeAll(" => ");
try ir.writeRef(&ref_map, input.value, color, w);
if (color) util.setColor(.reset, w);
}
if (color) util.setColor(.reset, w);
try w.writeAll("\n }\n");
},
.store => {
const bin = data[i].bin;
if (color) util.setColor(INST, w);
try w.writeAll(" store ");
try ir.writeRef(&ref_map, bin.lhs, color, w);
if (color) util.setColor(.reset, w);
try w.writeAll(", ");
try ir.writeRef(&ref_map, bin.rhs, color, w);
try w.writeByte('\n');
},
.ret => {
if (color) util.setColor(INST, w);
try w.writeAll(" ret ");
if (data[i].un != .none) try ir.writeRef(&ref_map, data[i].un, color, w);
try w.writeByte('\n');
},
.load => {
try ir.writeNewRef(&ref_map, ref, color, w);
try w.writeAll("load ");
try ir.writeRef(&ref_map, data[i].un, color, w);
try w.writeByte('\n');
},
.bit_or,
.bit_xor,
.bit_and,
.bit_shl,
.bit_shr,
.cmp_eq,
.cmp_ne,
.cmp_lt,
.cmp_lte,
.cmp_gt,
.cmp_gte,
.add,
.sub,
.mul,
.div,
.mod,
=> {
const bin = data[i].bin;
try ir.writeNewRef(&ref_map, ref, color, w);
try w.print("{s} ", .{@tagName(tag)});
try ir.writeRef(&ref_map, bin.lhs, color, w);
if (color) util.setColor(.reset, w);
try w.writeAll(", ");
try ir.writeRef(&ref_map, bin.rhs, color, w);
try w.writeByte('\n');
},
.bit_not,
.negate,
.trunc,
.zext,
.sext,
=> {
const un = data[i].un;
try ir.writeNewRef(&ref_map, ref, color, w);
try w.print("{s} ", .{@tagName(tag)});
try ir.writeRef(&ref_map, un, color, w);
try w.writeByte('\n');
},
.label_addr, .jmp_val => {},
}
}
if (color) util.setColor(.reset, w);
try w.writeAll("}\n\n");
}
fn writeType(ir: Ir, ty_ref: Interner.Ref, color: bool, w: anytype) !void {
const ty = ir.pool.get(ty_ref);
if (color) util.setColor(TYPE, w);
switch (ty) {
.value => unreachable,
.ptr, .noreturn, .void, .func => try w.writeAll(@tagName(ty)),
.int => |bits| try w.print("i{d}", .{bits}),
.float => |bits| try w.print("f{d}", .{bits}),
.array => |info| {
try w.print("[{d} * ", .{info.len});
try ir.writeType(info.child, false, w);
try w.writeByte(']');
},
.vector => |info| {
try w.print("<{d} * ", .{info.len});
try ir.writeType(info.child, false, w);
try w.writeByte('>');
},
.record => |info| {
// TODO collect into buffer and only print once
try w.writeAll("{ ");
for (info.elements, 0..) |elem, i| {
if (i != 0) try w.writeAll(", ");
try ir.writeType(elem, color, w);
}
try w.writeAll(" }");
},
}
}
fn writeValue(ir: Ir, val_ref: Interner.Ref, color: bool, w: anytype) !void {
const v = ir.pool.get(val_ref).value;
if (color) util.setColor(LITERAL, w);
switch (v.tag) {
.unavailable => try w.writeAll(" unavailable"),
.int => try w.print("{d}", .{v.data.int}),
.bytes => try w.print("\"{s}\"", .{v.data.bytes.slice(ir.strings)}),
// std.fmt does @as instead of @floatCast
.float => try w.print("{d}", .{@as(f64, @floatCast(v.data.float))}),
else => try w.print("({s})", .{@tagName(v.tag)}),
}
}
fn writeRef(ir: Ir, ref_map: *RefMap, ref: Ref, color: bool, w: anytype) !void {
assert(ref != .none);
const index = @intFromEnum(ref);
const ty_ref = ir.instructions.items(.ty)[index];
if (ir.instructions.items(.tag)[index] == .constant) {
try ir.writeType(ty_ref, color, w);
const v_ref = ir.instructions.items(.data)[index].constant;
try w.writeByte(' ');
try ir.writeValue(v_ref, color, w);
return;
} else if (ir.instructions.items(.tag)[index] == .symbol) {
const name = ir.instructions.items(.data)[index].label;
try ir.writeType(ty_ref, color, w);
if (color) util.setColor(REF, w);
try w.print(" @{s}", .{name});
return;
}
try ir.writeType(ty_ref, color, w);
if (color) util.setColor(REF, w);
const ref_index = ref_map.getIndex(ref).?;
try w.print(" %{d}", .{ref_index});
}
fn writeNewRef(ir: Ir, ref_map: *RefMap, ref: Ref, color: bool, w: anytype) !void {
try ref_map.put(ref, {});
try w.writeAll(" ");
try ir.writeRef(ref_map, ref, color, w);
if (color) util.setColor(.reset, w);
try w.writeAll(" = ");
if (color) util.setColor(INST, w);
}
fn writeLabel(ir: Ir, label_map: *RefMap, ref: Ref, color: bool, w: anytype) !void {
assert(ref != .none);
const index = @intFromEnum(ref);
const label = ir.instructions.items(.data)[index].label;
if (color) util.setColor(REF, w);
const label_index = label_map.getIndex(ref).?;
try w.print("{s}.{d}", .{ label, label_index });
}

146
deps/aro/LangOpts.zig vendored Normal file
View File

@ -0,0 +1,146 @@
const std = @import("std");
const DiagnosticTag = @import("Diagnostics.zig").Tag;
const LangOpts = @This();
pub const Compiler = enum {
clang,
gcc,
msvc,
};
/// The floating-point evaluation method for intermediate results within a single expression
pub const FPEvalMethod = enum(i8) {
/// The evaluation method cannot be determined or is inconsistent for this target.
indeterminate = -1,
/// Use the type declared in the source
source = 0,
/// Use double as the floating-point evaluation method for all float expressions narrower than double.
double = 1,
/// Use long double as the floating-point evaluation method for all float expressions narrower than long double.
extended = 2,
};
pub const Standard = enum {
/// ISO C 1990
c89,
/// ISO C 1990 with amendment 1
iso9899,
/// ISO C 1990 with GNU extensions
gnu89,
/// ISO C 1999
c99,
/// ISO C 1999 with GNU extensions
gnu99,
/// ISO C 2011
c11,
/// ISO C 2011 with GNU extensions
gnu11,
/// ISO C 2017
c17,
/// Default value if nothing specified; adds the GNU keywords to
/// C17 but does not suppress warnings about using GNU extensions
default,
/// ISO C 2017 with GNU extensions
gnu17,
/// Working Draft for ISO C2x
c2x,
/// Working Draft for ISO C2x with GNU extensions
gnu2x,
const NameMap = std.ComptimeStringMap(Standard, .{
.{ "c89", .c89 }, .{ "c90", .c89 }, .{ "iso9899:1990", .c89 },
.{ "iso9899:199409", .iso9899 }, .{ "gnu89", .gnu89 }, .{ "gnu90", .gnu89 },
.{ "c99", .c99 }, .{ "iso9899:1999", .c99 }, .{ "gnu99", .gnu99 },
.{ "c11", .c11 }, .{ "iso9899:2011", .c11 }, .{ "gnu11", .gnu11 },
.{ "c17", .c17 }, .{ "iso9899:2017", .c17 }, .{ "c18", .c17 },
.{ "iso9899:2018", .c17 }, .{ "gnu17", .gnu17 }, .{ "gnu18", .gnu17 },
.{ "c2x", .c2x }, .{ "gnu2x", .gnu2x },
});
pub fn atLeast(self: Standard, other: Standard) bool {
return @intFromEnum(self) >= @intFromEnum(other);
}
pub fn isGNU(standard: Standard) bool {
return switch (standard) {
.gnu89, .gnu99, .gnu11, .default, .gnu17, .gnu2x => true,
else => false,
};
}
pub fn isExplicitGNU(standard: Standard) bool {
return standard.isGNU() and standard != .default;
}
/// Value reported by __STDC_VERSION__ macro
pub fn StdCVersionMacro(standard: Standard) ?[]const u8 {
return switch (standard) {
.c89, .gnu89 => null,
.iso9899 => "199409L",
.c99, .gnu99 => "199901L",
.c11, .gnu11 => "201112L",
.default, .c17, .gnu17 => "201710L",
// todo: subject to change, verify once c23 finalized
.c2x, .gnu2x => "202311L",
};
}
};
emulate: Compiler = .clang,
standard: Standard = .default,
/// -fshort-enums option, makes enums only take up as much space as they need to hold all the values.
short_enums: bool = false,
dollars_in_identifiers: bool = true,
declspec_attrs: bool = false,
ms_extensions: bool = false,
/// true or false if digraph support explicitly enabled/disabled with -fdigraphs/-fno-digraphs
digraphs: ?bool = null,
/// If set, use the native half type instead of promoting to float
use_native_half_type: bool = false,
/// If set, function arguments and return values may be of type __fp16 even if there is no standard ABI for it
allow_half_args_and_returns: bool = false,
/// null indicates that the user did not select a value, use target to determine default
fp_eval_method: ?FPEvalMethod = null,
/// If set, use specified signedness for `char` instead of the target's default char signedness
char_signedness_override: ?std.builtin.Signedness = null,
/// If set, override the default availability of char8_t (by default, enabled in C2X and later; disabled otherwise)
has_char8_t_override: ?bool = null,
/// Whether to allow GNU-style inline assembly
gnu_asm: bool = true,
pub fn setStandard(self: *LangOpts, name: []const u8) error{InvalidStandard}!void {
self.standard = Standard.NameMap.get(name) orelse return error.InvalidStandard;
}
pub fn enableMSExtensions(self: *LangOpts) void {
self.declspec_attrs = true;
self.ms_extensions = true;
}
pub fn disableMSExtensions(self: *LangOpts) void {
self.declspec_attrs = false;
self.ms_extensions = true;
}
pub fn hasChar8_T(self: *const LangOpts) bool {
return self.has_char8_t_override orelse self.standard.atLeast(.c2x);
}
pub fn hasDigraphs(self: *const LangOpts) bool {
return self.digraphs orelse self.standard.atLeast(.gnu89);
}
pub fn setEmulatedCompiler(self: *LangOpts, compiler: Compiler) void {
self.emulate = compiler;
if (compiler == .msvc) self.enableMSExtensions();
}
pub fn setFpEvalMethod(self: *LangOpts, fp_eval_method: FPEvalMethod) void {
self.fp_eval_method = fp_eval_method;
}
pub fn setCharSignedness(self: *LangOpts, signedness: std.builtin.Signedness) void {
self.char_signedness_override = signedness;
}

73
deps/aro/Object.zig vendored Normal file
View File

@ -0,0 +1,73 @@
const std = @import("std");
const Compilation = @import("Compilation.zig");
const Elf = @import("object/Elf.zig");
const Object = @This();
format: std.Target.ObjectFormat,
comp: *Compilation,
pub fn create(comp: *Compilation) !*Object {
switch (comp.target.ofmt) {
.elf => return Elf.create(comp),
else => unreachable,
}
}
pub fn deinit(obj: *Object) void {
switch (obj.format) {
.elf => @fieldParentPtr(Elf, "obj", obj).deinit(),
else => unreachable,
}
}
pub const Section = union(enum) {
undefined,
data,
read_only_data,
func,
strings,
custom: []const u8,
};
pub fn getSection(obj: *Object, section: Section) !*std.ArrayList(u8) {
switch (obj.format) {
.elf => return @fieldParentPtr(Elf, "obj", obj).getSection(section),
else => unreachable,
}
}
pub const SymbolType = enum {
func,
variable,
external,
};
pub fn declareSymbol(
obj: *Object,
section: Section,
name: ?[]const u8,
linkage: std.builtin.GlobalLinkage,
@"type": SymbolType,
offset: u64,
size: u64,
) ![]const u8 {
switch (obj.format) {
.elf => return @fieldParentPtr(Elf, "obj", obj).declareSymbol(section, name, linkage, @"type", offset, size),
else => unreachable,
}
}
pub fn addRelocation(obj: *Object, name: []const u8, section: Section, address: u64, addend: i64) !void {
switch (obj.format) {
.elf => return @fieldParentPtr(Elf, "obj", obj).addRelocation(name, section, address, addend),
else => unreachable,
}
}
pub fn finish(obj: *Object, file: std.fs.File) !void {
switch (obj.format) {
.elf => return @fieldParentPtr(Elf, "obj", obj).finish(file),
else => unreachable,
}
}

8200
deps/aro/Parser.zig vendored Normal file

File diff suppressed because it is too large Load Diff

83
deps/aro/Pragma.zig vendored Normal file
View File

@ -0,0 +1,83 @@
const std = @import("std");
const Compilation = @import("Compilation.zig");
const Preprocessor = @import("Preprocessor.zig");
const Parser = @import("Parser.zig");
const TokenIndex = @import("Tree.zig").TokenIndex;
const Pragma = @This();
pub const Error = Compilation.Error || error{ UnknownPragma, StopPreprocessing };
/// Called during Preprocessor.init
beforePreprocess: ?*const fn (*Pragma, *Compilation) void = null,
/// Called at the beginning of Parser.parse
beforeParse: ?*const fn (*Pragma, *Compilation) void = null,
/// Called at the end of Parser.parse if a Tree was successfully parsed
afterParse: ?*const fn (*Pragma, *Compilation) void = null,
/// Called during Compilation.deinit
deinit: *const fn (*Pragma, *Compilation) void,
/// Called whenever the preprocessor encounters this pragma. `start_idx` is the index
/// within `pp.tokens` of the pragma name token. The pragma end is indicated by a
/// .nl token (which may be generated if the source ends with a pragma with no newline)
/// As an example, given the following line:
/// #pragma GCC diagnostic error "-Wnewline-eof" \n
/// Then pp.tokens.get(start_idx) will return the `GCC` token.
/// Return error.UnknownPragma to emit an `unknown_pragma` diagnostic
/// Return error.StopPreprocessing to stop preprocessing the current file (see once.zig)
preprocessorHandler: ?*const fn (*Pragma, *Preprocessor, start_idx: TokenIndex) Error!void = null,
/// Called during token pretty-printing (`-E` option). If this returns true, the pragma will
/// be printed; otherwise it will be omitted. start_idx is the index of the pragma name token
preserveTokens: ?*const fn (*Pragma, *Preprocessor, start_idx: TokenIndex) bool = null,
/// Same as preprocessorHandler except called during parsing
/// The parser's `p.tok_i` field must not be changed
parserHandler: ?*const fn (*Pragma, *Parser, start_idx: TokenIndex) Compilation.Error!void = null,
pub fn pasteTokens(pp: *Preprocessor, start_idx: TokenIndex) ![]const u8 {
if (pp.tokens.get(start_idx).id == .nl) return error.ExpectedStringLiteral;
const char_top = pp.char_buf.items.len;
defer pp.char_buf.items.len = char_top;
var i: usize = 0;
var lparen_count: u32 = 0;
var rparen_count: u32 = 0;
while (true) : (i += 1) {
const tok = pp.tokens.get(start_idx + i);
if (tok.id == .nl) break;
switch (tok.id) {
.l_paren => {
if (lparen_count != i) return error.ExpectedStringLiteral;
lparen_count += 1;
},
.r_paren => rparen_count += 1,
.string_literal => {
if (rparen_count != 0) return error.ExpectedStringLiteral;
const str = pp.expandedSlice(tok);
try pp.char_buf.appendSlice(str[1 .. str.len - 1]);
},
else => return error.ExpectedStringLiteral,
}
}
if (lparen_count != rparen_count) return error.ExpectedStringLiteral;
return pp.char_buf.items[char_top..];
}
pub fn shouldPreserveTokens(self: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) bool {
if (self.preserveTokens) |func| return func(self, pp, start_idx);
return false;
}
pub fn preprocessorCB(self: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Error!void {
if (self.preprocessorHandler) |func| return func(self, pp, start_idx);
}
pub fn parserCB(self: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation.Error!void {
const tok_index = p.tok_i;
defer std.debug.assert(tok_index == p.tok_i);
if (self.parserHandler) |func| return func(self, p, start_idx);
}

2691
deps/aro/Preprocessor.zig vendored Normal file

File diff suppressed because it is too large Load Diff

125
deps/aro/Source.zig vendored Normal file
View File

@ -0,0 +1,125 @@
const std = @import("std");
const Source = @This();
pub const Id = enum(u32) {
unused = 0,
generated = 1,
_,
};
pub const Location = struct {
id: Id = .unused,
byte_offset: u32 = 0,
line: u32 = 0,
pub fn eql(a: Location, b: Location) bool {
return a.id == b.id and a.byte_offset == b.byte_offset and a.line == b.line;
}
};
path: []const u8,
buf: []const u8,
id: Id,
/// each entry represents a byte position within `buf` where a backslash+newline was deleted
/// from the original raw buffer. The same position can appear multiple times if multiple
/// consecutive splices happened. Guaranteed to be non-decreasing
splice_locs: []const u32,
/// Todo: binary search instead of scanning entire `splice_locs`.
pub fn numSplicesBefore(source: Source, byte_offset: u32) u32 {
for (source.splice_locs, 0..) |splice_offset, i| {
if (splice_offset > byte_offset) return @intCast(i);
}
return @intCast(source.splice_locs.len);
}
/// Returns the actual line number (before newline splicing) of a Location
/// This corresponds to what the user would actually see in their text editor
pub fn physicalLine(source: Source, loc: Location) u32 {
return loc.line + source.numSplicesBefore(loc.byte_offset);
}
const LineCol = struct { line: []const u8, line_no: u32, col: u32, width: u32, end_with_splice: bool };
pub fn lineCol(source: Source, loc: Location) LineCol {
var start: usize = 0;
// find the start of the line which is either a newline or a splice
if (std.mem.lastIndexOfScalar(u8, source.buf[0..loc.byte_offset], '\n')) |some| start = some + 1;
const splice_index: u32 = for (source.splice_locs, 0..) |splice_offset, i| {
if (splice_offset > start) {
if (splice_offset < loc.byte_offset) {
start = splice_offset;
break @as(u32, @intCast(i)) + 1;
}
break @intCast(i);
}
} else @intCast(source.splice_locs.len);
var i: usize = start;
var col: u32 = 1;
var width: u32 = 0;
while (i < loc.byte_offset) : (col += 1) { // TODO this is still incorrect, but better
const len = std.unicode.utf8ByteSequenceLength(source.buf[i]) catch unreachable;
const cp = std.unicode.utf8Decode(source.buf[i..][0..len]) catch unreachable;
width += codepointWidth(cp);
i += len;
}
// find the end of the line which is either a newline, EOF or a splice
var nl = source.buf.len;
var end_with_splice = false;
if (std.mem.indexOfScalar(u8, source.buf[start..], '\n')) |some| nl = some + start;
if (source.splice_locs.len > splice_index and nl > source.splice_locs[splice_index] and source.splice_locs[splice_index] > start) {
end_with_splice = true;
nl = source.splice_locs[splice_index];
}
return .{
.line = source.buf[start..nl],
.line_no = loc.line + splice_index,
.col = col,
.width = width,
.end_with_splice = end_with_splice,
};
}
fn codepointWidth(cp: u32) u32 {
return switch (cp) {
0x1100...0x115F,
0x2329,
0x232A,
0x2E80...0x303F,
0x3040...0x3247,
0x3250...0x4DBF,
0x4E00...0xA4C6,
0xA960...0xA97C,
0xAC00...0xD7A3,
0xF900...0xFAFF,
0xFE10...0xFE19,
0xFE30...0xFE6B,
0xFF01...0xFF60,
0xFFE0...0xFFE6,
0x1B000...0x1B001,
0x1F200...0x1F251,
0x20000...0x3FFFD,
0x1F300...0x1F5FF,
0x1F900...0x1F9FF,
=> 2,
else => 1,
};
}
/// Returns the first offset, if any, in buf where an invalid utf8 sequence
/// is found. Code adapted from std.unicode.utf8ValidateSlice
pub fn offsetOfInvalidUtf8(self: Source) ?u32 {
const buf = self.buf;
std.debug.assert(buf.len <= std.math.maxInt(u32));
var i: u32 = 0;
while (i < buf.len) {
if (std.unicode.utf8ByteSequenceLength(buf[i])) |cp_len| {
if (i + cp_len > buf.len) return i;
if (std.meta.isError(std.unicode.utf8Decode(buf[i .. i + cp_len]))) return i;
i += cp_len;
} else |_| return i;
}
return null;
}

78
deps/aro/StringInterner.zig vendored Normal file
View File

@ -0,0 +1,78 @@
const std = @import("std");
const mem = std.mem;
const StringInterner = @This();
const StringToIdMap = std.StringHashMapUnmanaged(StringId);
pub const StringId = enum(u32) {
empty,
_,
};
pub const TypeMapper = struct {
const LookupSpeed = enum {
fast,
slow,
};
data: union(LookupSpeed) {
fast: []const []const u8,
slow: *const StringToIdMap,
},
pub fn lookup(self: TypeMapper, string_id: StringInterner.StringId) []const u8 {
if (string_id == .empty) return "";
switch (self.data) {
.fast => |arr| return arr[@intFromEnum(string_id)],
.slow => |map| {
var it = map.iterator();
while (it.next()) |entry| {
if (entry.value_ptr.* == string_id) return entry.key_ptr.*;
}
unreachable;
},
}
}
pub fn deinit(self: TypeMapper, allocator: mem.Allocator) void {
switch (self.data) {
.slow => {},
.fast => |arr| allocator.free(arr),
}
}
};
string_table: StringToIdMap = .{},
next_id: StringId = @enumFromInt(@intFromEnum(StringId.empty) + 1),
pub fn deinit(self: *StringInterner, allocator: mem.Allocator) void {
self.string_table.deinit(allocator);
}
pub fn intern(self: *StringInterner, allocator: mem.Allocator, str: []const u8) !StringId {
if (str.len == 0) return .empty;
const gop = try self.string_table.getOrPut(allocator, str);
if (gop.found_existing) return gop.value_ptr.*;
defer self.next_id = @enumFromInt(@intFromEnum(self.next_id) + 1);
gop.value_ptr.* = self.next_id;
return self.next_id;
}
/// deinit for the returned TypeMapper is a no-op and does not need to be called
pub fn getSlowTypeMapper(self: *const StringInterner) TypeMapper {
return TypeMapper{ .data = .{ .slow = &self.string_table } };
}
/// Caller must call `deinit` on the returned TypeMapper
pub fn getFastTypeMapper(self: *const StringInterner, allocator: mem.Allocator) !TypeMapper {
var strings = try allocator.alloc([]const u8, @intFromEnum(self.next_id));
var it = self.string_table.iterator();
strings[0] = "";
while (it.next()) |entry| {
strings[@intFromEnum(entry.value_ptr.*)] = entry.key_ptr.*;
}
return TypeMapper{ .data = .{ .fast = strings } };
}

375
deps/aro/SymbolStack.zig vendored Normal file
View File

@ -0,0 +1,375 @@
const std = @import("std");
const mem = std.mem;
const Allocator = mem.Allocator;
const assert = std.debug.assert;
const Tree = @import("Tree.zig");
const Token = Tree.Token;
const TokenIndex = Tree.TokenIndex;
const NodeIndex = Tree.NodeIndex;
const Type = @import("Type.zig");
const Parser = @import("Parser.zig");
const Value = @import("Value.zig");
const StringId = @import("StringInterner.zig").StringId;
const SymbolStack = @This();
pub const Symbol = struct {
name: StringId,
ty: Type,
tok: TokenIndex,
node: NodeIndex = .none,
kind: Kind,
val: Value,
};
pub const Kind = enum {
typedef,
@"struct",
@"union",
@"enum",
decl,
def,
enumeration,
constexpr,
};
syms: std.MultiArrayList(Symbol) = .{},
scopes: std.ArrayListUnmanaged(u32) = .{},
pub fn deinit(s: *SymbolStack, gpa: Allocator) void {
s.syms.deinit(gpa);
s.scopes.deinit(gpa);
s.* = undefined;
}
pub fn scopeEnd(s: SymbolStack) u32 {
if (s.scopes.items.len == 0) return 0;
return s.scopes.items[s.scopes.items.len - 1];
}
pub fn pushScope(s: *SymbolStack, p: *Parser) !void {
try s.scopes.append(p.pp.comp.gpa, @intCast(s.syms.len));
}
pub fn popScope(s: *SymbolStack) void {
s.syms.len = s.scopes.pop();
}
pub fn findTypedef(s: *SymbolStack, p: *Parser, name: StringId, name_tok: TokenIndex, no_type_yet: bool) !?Symbol {
const kinds = s.syms.items(.kind);
const names = s.syms.items(.name);
var i = s.syms.len;
while (i > 0) {
i -= 1;
switch (kinds[i]) {
.typedef => if (names[i] == name) return s.syms.get(i),
.@"struct" => if (names[i] == name) {
if (no_type_yet) return null;
try p.errStr(.must_use_struct, name_tok, p.tokSlice(name_tok));
return s.syms.get(i);
},
.@"union" => if (names[i] == name) {
if (no_type_yet) return null;
try p.errStr(.must_use_union, name_tok, p.tokSlice(name_tok));
return s.syms.get(i);
},
.@"enum" => if (names[i] == name) {
if (no_type_yet) return null;
try p.errStr(.must_use_enum, name_tok, p.tokSlice(name_tok));
return s.syms.get(i);
},
.def, .decl, .constexpr => if (names[i] == name) return null,
else => {},
}
}
return null;
}
pub fn findSymbol(s: *SymbolStack, name: StringId) ?Symbol {
const kinds = s.syms.items(.kind);
const names = s.syms.items(.name);
var i = s.syms.len;
while (i > 0) {
i -= 1;
switch (kinds[i]) {
.def, .decl, .enumeration, .constexpr => if (names[i] == name) return s.syms.get(i),
else => {},
}
}
return null;
}
pub fn findTag(
s: *SymbolStack,
p: *Parser,
name: StringId,
kind: Token.Id,
name_tok: TokenIndex,
next_tok_id: Token.Id,
) !?Symbol {
const kinds = s.syms.items(.kind);
const names = s.syms.items(.name);
// `tag Name;` should always result in a new type if in a new scope.
const end = if (next_tok_id == .semicolon) s.scopeEnd() else 0;
var i = s.syms.len;
while (i > end) {
i -= 1;
switch (kinds[i]) {
.@"enum" => if (names[i] == name) {
if (kind == .keyword_enum) return s.syms.get(i);
break;
},
.@"struct" => if (names[i] == name) {
if (kind == .keyword_struct) return s.syms.get(i);
break;
},
.@"union" => if (names[i] == name) {
if (kind == .keyword_union) return s.syms.get(i);
break;
},
else => {},
}
} else return null;
if (i < s.scopeEnd()) return null;
try p.errStr(.wrong_tag, name_tok, p.tokSlice(name_tok));
try p.errTok(.previous_definition, s.syms.items(.tok)[i]);
return null;
}
pub fn defineTypedef(
s: *SymbolStack,
p: *Parser,
name: StringId,
ty: Type,
tok: TokenIndex,
node: NodeIndex,
) !void {
const kinds = s.syms.items(.kind);
const names = s.syms.items(.name);
const end = s.scopeEnd();
var i = s.syms.len;
while (i > end) {
i -= 1;
switch (kinds[i]) {
.typedef => if (names[i] == name) {
const prev_ty = s.syms.items(.ty)[i];
if (ty.eql(prev_ty, p.pp.comp, true)) break;
try p.errStr(.redefinition_of_typedef, tok, try p.typePairStrExtra(ty, " vs ", prev_ty));
const previous_tok = s.syms.items(.tok)[i];
if (previous_tok != 0) try p.errTok(.previous_definition, previous_tok);
break;
},
else => {},
}
}
try s.syms.append(p.pp.comp.gpa, .{
.kind = .typedef,
.name = name,
.tok = tok,
.ty = ty,
.node = node,
.val = .{},
});
}
pub fn defineSymbol(
s: *SymbolStack,
p: *Parser,
name: StringId,
ty: Type,
tok: TokenIndex,
node: NodeIndex,
val: Value,
constexpr: bool,
) !void {
const kinds = s.syms.items(.kind);
const names = s.syms.items(.name);
const end = s.scopeEnd();
var i = s.syms.len;
while (i > end) {
i -= 1;
switch (kinds[i]) {
.enumeration => if (names[i] == name) {
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
try p.errTok(.previous_definition, s.syms.items(.tok)[i]);
break;
},
.decl => if (names[i] == name) {
const prev_ty = s.syms.items(.ty)[i];
if (!ty.eql(prev_ty, p.pp.comp, true)) { // TODO adjusted equality check
try p.errStr(.redefinition_incompatible, tok, p.tokSlice(tok));
try p.errTok(.previous_definition, s.syms.items(.tok)[i]);
}
break;
},
.def, .constexpr => if (names[i] == name) {
try p.errStr(.redefinition, tok, p.tokSlice(tok));
try p.errTok(.previous_definition, s.syms.items(.tok)[i]);
break;
},
else => {},
}
}
try s.syms.append(p.pp.comp.gpa, .{
.kind = if (constexpr) .constexpr else .def,
.name = name,
.tok = tok,
.ty = ty,
.node = node,
.val = val,
});
}
pub fn declareSymbol(
s: *SymbolStack,
p: *Parser,
name: StringId,
ty: Type,
tok: TokenIndex,
node: NodeIndex,
) !void {
const kinds = s.syms.items(.kind);
const names = s.syms.items(.name);
const end = s.scopeEnd();
var i = s.syms.len;
while (i > end) {
i -= 1;
switch (kinds[i]) {
.enumeration => if (names[i] == name) {
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
try p.errTok(.previous_definition, s.syms.items(.tok)[i]);
break;
},
.decl => if (names[i] == name) {
const prev_ty = s.syms.items(.ty)[i];
if (!ty.eql(prev_ty, p.pp.comp, true)) { // TODO adjusted equality check
try p.errStr(.redefinition_incompatible, tok, p.tokSlice(tok));
try p.errTok(.previous_definition, s.syms.items(.tok)[i]);
}
break;
},
.def, .constexpr => if (names[i] == name) {
const prev_ty = s.syms.items(.ty)[i];
if (!ty.eql(prev_ty, p.pp.comp, true)) { // TODO adjusted equality check
try p.errStr(.redefinition_incompatible, tok, p.tokSlice(tok));
try p.errTok(.previous_definition, s.syms.items(.tok)[i]);
break;
}
return;
},
else => {},
}
}
try s.syms.append(p.pp.comp.gpa, .{
.kind = .decl,
.name = name,
.tok = tok,
.ty = ty,
.node = node,
.val = .{},
});
}
pub fn defineParam(s: *SymbolStack, p: *Parser, name: StringId, ty: Type, tok: TokenIndex) !void {
const kinds = s.syms.items(.kind);
const names = s.syms.items(.name);
const end = s.scopeEnd();
var i = s.syms.len;
while (i > end) {
i -= 1;
switch (kinds[i]) {
.enumeration, .decl, .def, .constexpr => if (names[i] == name) {
try p.errStr(.redefinition_of_parameter, tok, p.tokSlice(tok));
try p.errTok(.previous_definition, s.syms.items(.tok)[i]);
break;
},
else => {},
}
}
if (ty.is(.fp16) and !p.comp.hasHalfPrecisionFloatABI()) {
try p.errStr(.suggest_pointer_for_invalid_fp16, tok, "parameters");
}
try s.syms.append(p.pp.comp.gpa, .{
.kind = .def,
.name = name,
.tok = tok,
.ty = ty,
.val = .{},
});
}
pub fn defineTag(
s: *SymbolStack,
p: *Parser,
name: StringId,
kind: Token.Id,
tok: TokenIndex,
) !?Symbol {
const kinds = s.syms.items(.kind);
const names = s.syms.items(.name);
const end = s.scopeEnd();
var i = s.syms.len;
while (i > end) {
i -= 1;
switch (kinds[i]) {
.@"enum" => if (names[i] == name) {
if (kind == .keyword_enum) return s.syms.get(i);
try p.errStr(.wrong_tag, tok, p.tokSlice(tok));
try p.errTok(.previous_definition, s.syms.items(.tok)[i]);
return null;
},
.@"struct" => if (names[i] == name) {
if (kind == .keyword_struct) return s.syms.get(i);
try p.errStr(.wrong_tag, tok, p.tokSlice(tok));
try p.errTok(.previous_definition, s.syms.items(.tok)[i]);
return null;
},
.@"union" => if (names[i] == name) {
if (kind == .keyword_union) return s.syms.get(i);
try p.errStr(.wrong_tag, tok, p.tokSlice(tok));
try p.errTok(.previous_definition, s.syms.items(.tok)[i]);
return null;
},
else => {},
}
}
return null;
}
pub fn defineEnumeration(
s: *SymbolStack,
p: *Parser,
name: StringId,
ty: Type,
tok: TokenIndex,
val: Value,
) !void {
const kinds = s.syms.items(.kind);
const names = s.syms.items(.name);
const end = s.scopeEnd();
var i = s.syms.len;
while (i > end) {
i -= 1;
switch (kinds[i]) {
.enumeration => if (names[i] == name) {
try p.errStr(.redefinition, tok, p.tokSlice(tok));
try p.errTok(.previous_definition, s.syms.items(.tok)[i]);
return;
},
.decl, .def, .constexpr => if (names[i] == name) {
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
try p.errTok(.previous_definition, s.syms.items(.tok)[i]);
return;
},
else => {},
}
}
try s.syms.append(p.pp.comp.gpa, .{
.kind = .enumeration,
.name = name,
.tok = tok,
.ty = ty,
.val = val,
});
}

2140
deps/aro/Tokenizer.zig vendored Normal file

File diff suppressed because it is too large Load Diff

493
deps/aro/Toolchain.zig vendored Normal file
View File

@ -0,0 +1,493 @@
const std = @import("std");
const Driver = @import("Driver.zig");
const Compilation = @import("Compilation.zig");
const util = @import("util.zig");
const mem = std.mem;
const system_defaults = @import("system_defaults");
const target_util = @import("target.zig");
const Linux = @import("toolchains/Linux.zig");
const Multilib = @import("Driver/Multilib.zig");
const Filesystem = @import("Driver/Filesystem.zig").Filesystem;
const Toolchain = @This();
pub const PathList = std.ArrayListUnmanaged([]const u8);
pub const RuntimeLibKind = enum {
compiler_rt,
libgcc,
};
pub const FileKind = enum {
object,
static,
shared,
};
pub const LibGCCKind = enum {
unspecified,
static,
shared,
};
pub const UnwindLibKind = enum {
none,
compiler_rt,
libgcc,
};
const Inner = union(enum) {
uninitialized,
linux: Linux,
unknown: void,
fn deinit(self: *Inner, allocator: mem.Allocator) void {
switch (self.*) {
.linux => |*linux| linux.deinit(allocator),
.uninitialized, .unknown => {},
}
}
};
filesystem: Filesystem = .{ .real = {} },
driver: *Driver,
arena: mem.Allocator,
/// The list of toolchain specific path prefixes to search for libraries.
library_paths: PathList = .{},
/// The list of toolchain specific path prefixes to search for files.
file_paths: PathList = .{},
/// The list of toolchain specific path prefixes to search for programs.
program_paths: PathList = .{},
selected_multilib: Multilib = .{},
inner: Inner = .{ .uninitialized = {} },
pub fn getTarget(tc: *const Toolchain) std.Target {
return tc.driver.comp.target;
}
fn getDefaultLinker(tc: *const Toolchain) []const u8 {
return switch (tc.inner) {
.uninitialized => unreachable,
.linux => |linux| linux.getDefaultLinker(tc.getTarget()),
.unknown => "ld",
};
}
/// Call this after driver has finished parsing command line arguments to find the toolchain
pub fn discover(tc: *Toolchain) !void {
if (tc.inner != .uninitialized) return;
const target = tc.getTarget();
tc.inner = switch (target.os.tag) {
.elfiamcu,
.linux,
=> if (target.cpu.arch == .hexagon)
.{ .unknown = {} } // TODO
else if (target.cpu.arch.isMIPS())
.{ .unknown = {} } // TODO
else if (target.cpu.arch.isPPC())
.{ .unknown = {} } // TODO
else if (target.cpu.arch == .ve)
.{ .unknown = {} } // TODO
else
.{ .linux = .{} },
else => .{ .unknown = {} }, // TODO
};
return switch (tc.inner) {
.uninitialized => unreachable,
.linux => |*linux| linux.discover(tc),
.unknown => {},
};
}
pub fn deinit(tc: *Toolchain) void {
const gpa = tc.driver.comp.gpa;
tc.inner.deinit(gpa);
tc.library_paths.deinit(gpa);
tc.file_paths.deinit(gpa);
tc.program_paths.deinit(gpa);
}
/// Write linker path to `buf` and return a slice of it
pub fn getLinkerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
// --ld-path= takes precedence over -fuse-ld= and specifies the executable
// name. -B, COMPILER_PATH and PATH are consulted if the value does not
// contain a path component separator.
// -fuse-ld=lld can be used with --ld-path= to indicate that the binary
// that --ld-path= points to is lld.
const use_linker = tc.driver.use_linker orelse system_defaults.linker;
if (tc.driver.linker_path) |ld_path| {
var path = ld_path;
if (path.len > 0) {
if (std.fs.path.dirname(path) == null) {
path = tc.getProgramPath(path, buf);
}
if (tc.filesystem.canExecute(path)) {
return path;
}
}
return tc.driver.fatal(
"invalid linker name in argument '--ld-path={s}'",
.{path},
);
}
// If we're passed -fuse-ld= with no argument, or with the argument ld,
// then use whatever the default system linker is.
if (use_linker.len == 0 or mem.eql(u8, use_linker, "ld")) {
const default = tc.getDefaultLinker();
if (std.fs.path.isAbsolute(default)) return default;
return tc.getProgramPath(default, buf);
}
// Extending -fuse-ld= to an absolute or relative path is unexpected. Checking
// for the linker flavor is brittle. In addition, prepending "ld." or "ld64."
// to a relative path is surprising. This is more complex due to priorities
// among -B, COMPILER_PATH and PATH. --ld-path= should be used instead.
if (mem.indexOfScalar(u8, use_linker, '/') != null) {
try tc.driver.comp.diag.add(.{ .tag = .fuse_ld_path }, &.{});
}
if (std.fs.path.isAbsolute(use_linker)) {
if (tc.filesystem.canExecute(use_linker)) {
return use_linker;
}
} else {
var linker_name = try std.ArrayList(u8).initCapacity(tc.driver.comp.gpa, 5 + use_linker.len); // "ld64." ++ use_linker
defer linker_name.deinit();
if (tc.getTarget().isDarwin()) {
linker_name.appendSliceAssumeCapacity("ld64.");
} else {
linker_name.appendSliceAssumeCapacity("ld.");
}
linker_name.appendSliceAssumeCapacity(use_linker);
const linker_path = tc.getProgramPath(linker_name.items, buf);
if (tc.filesystem.canExecute(linker_path)) {
return linker_path;
}
}
if (tc.driver.use_linker) |linker| {
return tc.driver.fatal(
"invalid linker name in argument '-fuse-ld={s}'",
.{linker},
);
}
const default_linker = tc.getDefaultLinker();
return tc.getProgramPath(default_linker, buf);
}
const TargetSpecificToolName = std.BoundedArray(u8, 64);
/// If an explicit target is provided, also check the prefixed tool-specific name
/// TODO: this isn't exactly right since our target names don't necessarily match up
/// with GCC's.
/// For example the Zig target `arm-freestanding-eabi` would need the `arm-none-eabi` tools
fn possibleProgramNames(raw_triple: ?[]const u8, name: []const u8, target_specific: *TargetSpecificToolName) std.BoundedArray([]const u8, 2) {
var possible_names: std.BoundedArray([]const u8, 2) = .{};
if (raw_triple) |triple| {
const w = target_specific.writer();
if (w.print("{s}-{s}", .{ triple, name })) {
possible_names.appendAssumeCapacity(target_specific.constSlice());
} else |_| {}
}
possible_names.appendAssumeCapacity(name);
return possible_names;
}
/// Add toolchain `file_paths` to argv as `-L` arguments
pub fn addFilePathLibArgs(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void {
try argv.ensureUnusedCapacity(tc.file_paths.items.len);
var bytes_needed: usize = 0;
for (tc.file_paths.items) |path| {
bytes_needed += path.len + 2; // +2 for `-L`
}
var bytes = try tc.arena.alloc(u8, bytes_needed);
var index: usize = 0;
for (tc.file_paths.items) |path| {
@memcpy(bytes[index..][0..2], "-L");
@memcpy(bytes[index + 2 ..][0..path.len], path);
argv.appendAssumeCapacity(bytes[index..][0 .. path.len + 2]);
index += path.len + 2;
}
}
/// Search for an executable called `name` or `{triple}-{name} in program_paths and the $PATH environment variable
/// If not found there, just use `name`
/// Writes the result to `buf` and returns a slice of it
fn getProgramPath(tc: *const Toolchain, name: []const u8, buf: []u8) []const u8 {
var path_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
var fib = std.heap.FixedBufferAllocator.init(&path_buf);
var tool_specific_name: TargetSpecificToolName = .{};
const possible_names = possibleProgramNames(tc.driver.raw_target_triple, name, &tool_specific_name);
for (possible_names.constSlice()) |tool_name| {
for (tc.program_paths.items) |program_path| {
defer fib.reset();
const candidate = std.fs.path.join(fib.allocator(), &.{ program_path, tool_name }) catch continue;
if (tc.filesystem.canExecute(candidate) and candidate.len <= buf.len) {
@memcpy(buf[0..candidate.len], candidate);
return buf[0..candidate.len];
}
}
return tc.filesystem.findProgramByName(tc.driver.comp.gpa, name, tc.driver.comp.environment.path, buf) orelse continue;
}
@memcpy(buf[0..name.len], name);
return buf[0..name.len];
}
pub fn getSysroot(tc: *const Toolchain) []const u8 {
return tc.driver.sysroot orelse system_defaults.sysroot;
}
/// Search for `name` in a variety of places
/// TODO: cache results based on `name` so we're not repeatedly allocating the same strings?
pub fn getFilePath(tc: *const Toolchain, name: []const u8) ![]const u8 {
var path_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
var fib = std.heap.FixedBufferAllocator.init(&path_buf);
const allocator = fib.allocator();
const sysroot = tc.getSysroot();
// todo check resource dir
// todo check compiler RT path
const aro_dir = std.fs.path.dirname(tc.driver.aro_name) orelse "";
const candidate = try std.fs.path.join(allocator, &.{ aro_dir, "..", name });
if (tc.filesystem.exists(candidate)) {
return tc.arena.dupe(u8, candidate);
}
if (tc.searchPaths(&fib, sysroot, tc.library_paths.items, name)) |path| {
return tc.arena.dupe(u8, path);
}
if (tc.searchPaths(&fib, sysroot, tc.file_paths.items, name)) |path| {
return try tc.arena.dupe(u8, path);
}
return name;
}
/// Search a list of `path_prefixes` for the existence `name`
/// Assumes that `fba` is a fixed-buffer allocator, so does not free joined path candidates
fn searchPaths(tc: *const Toolchain, fib: *std.heap.FixedBufferAllocator, sysroot: []const u8, path_prefixes: []const []const u8, name: []const u8) ?[]const u8 {
for (path_prefixes) |path| {
fib.reset();
if (path.len == 0) continue;
const candidate = if (path[0] == '=')
std.fs.path.join(fib.allocator(), &.{ sysroot, path[1..], name }) catch continue
else
std.fs.path.join(fib.allocator(), &.{ path, name }) catch continue;
if (tc.filesystem.exists(candidate)) {
return candidate;
}
}
return null;
}
const PathKind = enum {
library,
file,
program,
};
/// Join `components` into a path. If the path exists, dupe it into the toolchain arena and
/// add it to the specified path list.
pub fn addPathIfExists(tc: *Toolchain, components: []const []const u8, dest_kind: PathKind) !void {
var path_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
var fib = std.heap.FixedBufferAllocator.init(&path_buf);
const candidate = try std.fs.path.join(fib.allocator(), components);
if (tc.filesystem.exists(candidate)) {
const duped = try tc.arena.dupe(u8, candidate);
const dest = switch (dest_kind) {
.library => &tc.library_paths,
.file => &tc.file_paths,
.program => &tc.program_paths,
};
try dest.append(tc.driver.comp.gpa, duped);
}
}
/// Join `components` using the toolchain arena and add the resulting path to `dest_kind`. Does not check
/// whether the path actually exists
pub fn addPathFromComponents(tc: *Toolchain, components: []const []const u8, dest_kind: PathKind) !void {
const full_path = try std.fs.path.join(tc.arena, components);
const dest = switch (dest_kind) {
.library => &tc.library_paths,
.file => &tc.file_paths,
.program => &tc.program_paths,
};
try dest.append(tc.driver.comp.gpa, full_path);
}
/// Add linker args to `argv`. Does not add path to linker executable as first item; that must be handled separately
/// Items added to `argv` will be string literals or owned by `tc.arena` so they must not be individually freed
pub fn buildLinkerArgs(tc: *Toolchain, argv: *std.ArrayList([]const u8)) !void {
return switch (tc.inner) {
.uninitialized => unreachable,
.linux => |*linux| linux.buildLinkerArgs(tc, argv),
.unknown => @panic("This toolchain does not support linking yet"),
};
}
fn getDefaultRuntimeLibKind(tc: *const Toolchain) RuntimeLibKind {
if (tc.getTarget().isAndroid()) {
return .compiler_rt;
}
return .libgcc;
}
pub fn getRuntimeLibKind(tc: *const Toolchain) RuntimeLibKind {
const libname = tc.driver.rtlib orelse system_defaults.rtlib;
if (mem.eql(u8, libname, "compiler-rt"))
return .compiler_rt
else if (mem.eql(u8, libname, "libgcc"))
return .libgcc
else
return tc.getDefaultRuntimeLibKind();
}
/// TODO
pub fn getCompilerRt(tc: *const Toolchain, component: []const u8, file_kind: FileKind) ![]const u8 {
_ = file_kind;
_ = component;
_ = tc;
return "";
}
fn getLibGCCKind(tc: *const Toolchain) LibGCCKind {
const target = tc.getTarget();
if (tc.driver.static_libgcc or tc.driver.static or tc.driver.static_pie or target.isAndroid()) {
return .static;
}
if (tc.driver.shared_libgcc) {
return .shared;
}
return .unspecified;
}
fn getUnwindLibKind(tc: *const Toolchain) !UnwindLibKind {
const libname = tc.driver.unwindlib orelse system_defaults.unwindlib;
if (libname.len == 0 or mem.eql(u8, libname, "platform")) {
switch (tc.getRuntimeLibKind()) {
.compiler_rt => {
const target = tc.getTarget();
if (target.isAndroid() or target.os.tag == .aix) {
return .compiler_rt;
} else {
return .none;
}
},
.libgcc => return .libgcc,
}
} else if (mem.eql(u8, libname, "none")) {
return .none;
} else if (mem.eql(u8, libname, "libgcc")) {
return .libgcc;
} else if (mem.eql(u8, libname, "libunwind")) {
if (tc.getRuntimeLibKind() == .libgcc) {
try tc.driver.comp.diag.add(.{ .tag = .incompatible_unwindlib }, &.{});
}
return .compiler_rt;
} else {
unreachable;
}
}
fn getAsNeededOption(is_solaris: bool, needed: bool) []const u8 {
if (is_solaris) {
return if (needed) "-zignore" else "-zrecord";
} else {
return if (needed) "--as-needed" else "--no-as-needed";
}
}
fn addUnwindLibrary(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void {
const unw = try tc.getUnwindLibKind();
const target = tc.getTarget();
if ((target.isAndroid() and unw == .libgcc) or
target.os.tag == .elfiamcu or
target.ofmt == .wasm or
target_util.isWindowsMSVCEnvironment(target) or
unw == .none) return;
const lgk = tc.getLibGCCKind();
const as_needed = lgk == .unspecified and !target.isAndroid() and !target_util.isCygwinMinGW(target) and target.os.tag != .aix;
if (as_needed) {
try argv.append(getAsNeededOption(target.os.tag == .solaris, true));
}
switch (unw) {
.none => return,
.libgcc => if (lgk == .static) try argv.append("-lgcc_eh") else try argv.append("-lgcc_s"),
.compiler_rt => if (target.os.tag == .aix) {
if (lgk != .static) {
try argv.append("-lunwind");
}
} else if (lgk == .static) {
try argv.append("-l:libunwind.a");
} else if (lgk == .shared) {
if (target_util.isCygwinMinGW(target)) {
try argv.append("-l:libunwind.dll.a");
} else {
try argv.append("-l:libunwind.so");
}
} else {
try argv.append("-lunwind");
},
}
if (as_needed) {
try argv.append(getAsNeededOption(target.os.tag == .solaris, false));
}
}
fn addLibGCC(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void {
const libgcc_kind = tc.getLibGCCKind();
if (libgcc_kind == .static or libgcc_kind == .unspecified) {
try argv.append("-lgcc");
}
try tc.addUnwindLibrary(argv);
if (libgcc_kind == .shared) {
try argv.append("-lgcc");
}
}
pub fn addRuntimeLibs(tc: *const Toolchain, argv: *std.ArrayList([]const u8)) !void {
const target = tc.getTarget();
const rlt = tc.getRuntimeLibKind();
switch (rlt) {
.compiler_rt => {
// TODO
},
.libgcc => {
if (target_util.isKnownWindowsMSVCEnvironment(target)) {
const rtlib_str = tc.driver.rtlib orelse system_defaults.rtlib;
if (!mem.eql(u8, rtlib_str, "platform")) {
try tc.driver.comp.diag.add(.{ .tag = .unsupported_rtlib_gcc, .extra = .{ .str = "MSVC" } }, &.{});
}
} else {
try tc.addLibGCC(argv);
}
},
}
if (target.isAndroid() and !tc.driver.static and !tc.driver.static_pie) {
try argv.append("-ldl");
}
}

1312
deps/aro/Tree.zig vendored Normal file

File diff suppressed because it is too large Load Diff

2692
deps/aro/Type.zig vendored Normal file

File diff suppressed because it is too large Load Diff

601
deps/aro/Value.zig vendored Normal file
View File

@ -0,0 +1,601 @@
const std = @import("std");
const assert = std.debug.assert;
const Compilation = @import("Compilation.zig");
const Type = @import("Type.zig");
const Value = @This();
pub const ByteRange = struct {
start: u32,
end: u32,
pub fn len(self: ByteRange) u32 {
return self.end - self.start;
}
pub fn trim(self: ByteRange, amount: u32) ByteRange {
std.debug.assert(self.start <= self.end - amount);
return .{ .start = self.start, .end = self.end - amount };
}
pub fn slice(self: ByteRange, all_bytes: []const u8) []const u8 {
return all_bytes[self.start..self.end];
}
};
tag: Tag = .unavailable,
data: union {
none: void,
int: u64,
float: f64,
bytes: ByteRange,
} = .{ .none = {} },
const Tag = enum {
unavailable,
nullptr_t,
/// int is used to store integer, boolean and pointer values
int,
float,
bytes,
};
pub fn zero(v: Value) Value {
return switch (v.tag) {
.int => int(0),
.float => float(0),
else => unreachable,
};
}
pub fn one(v: Value) Value {
return switch (v.tag) {
.int => int(1),
.float => float(1),
else => unreachable,
};
}
pub fn int(v: anytype) Value {
if (@TypeOf(v) == comptime_int or @typeInfo(@TypeOf(v)).Int.signedness == .unsigned)
return .{ .tag = .int, .data = .{ .int = v } }
else
return .{ .tag = .int, .data = .{ .int = @bitCast(@as(i64, v)) } };
}
pub fn float(v: anytype) Value {
return .{ .tag = .float, .data = .{ .float = v } };
}
pub fn bytes(start: u32, end: u32) Value {
return .{ .tag = .bytes, .data = .{ .bytes = .{ .start = start, .end = end } } };
}
pub fn signExtend(v: Value, old_ty: Type, comp: *Compilation) i64 {
const size = old_ty.sizeof(comp).?;
return switch (size) {
1 => v.getInt(i8),
2 => v.getInt(i16),
4 => v.getInt(i32),
8 => v.getInt(i64),
else => unreachable,
};
}
/// Number of bits needed to hold `v` which is of type `ty`.
/// Asserts that `v` is not negative
pub fn minUnsignedBits(v: Value, ty: Type, comp: *const Compilation) usize {
assert(v.compare(.gte, Value.int(0), ty, comp));
return switch (ty.sizeof(comp).?) {
1 => 8 - @clz(v.getInt(u8)),
2 => 16 - @clz(v.getInt(u16)),
4 => 32 - @clz(v.getInt(u32)),
8 => 64 - @clz(v.getInt(u64)),
else => unreachable,
};
}
test "minUnsignedBits" {
const Test = struct {
fn checkIntBits(comp: *const Compilation, specifier: Type.Specifier, v: u64, expected: usize) !void {
const val = Value.int(v);
try std.testing.expectEqual(expected, val.minUnsignedBits(.{ .specifier = specifier }, comp));
}
};
var comp = Compilation.init(std.testing.allocator);
defer comp.deinit();
comp.target = (try std.zig.CrossTarget.parse(.{ .arch_os_abi = "x86_64-linux-gnu" })).toTarget();
try Test.checkIntBits(&comp, .int, 0, 0);
try Test.checkIntBits(&comp, .int, 1, 1);
try Test.checkIntBits(&comp, .int, 2, 2);
try Test.checkIntBits(&comp, .int, std.math.maxInt(i8), 7);
try Test.checkIntBits(&comp, .int, std.math.maxInt(u8), 8);
try Test.checkIntBits(&comp, .int, std.math.maxInt(i16), 15);
try Test.checkIntBits(&comp, .int, std.math.maxInt(u16), 16);
try Test.checkIntBits(&comp, .int, std.math.maxInt(i32), 31);
try Test.checkIntBits(&comp, .uint, std.math.maxInt(u32), 32);
try Test.checkIntBits(&comp, .long, std.math.maxInt(i64), 63);
try Test.checkIntBits(&comp, .ulong, std.math.maxInt(u64), 64);
try Test.checkIntBits(&comp, .long_long, std.math.maxInt(i64), 63);
try Test.checkIntBits(&comp, .ulong_long, std.math.maxInt(u64), 64);
}
/// Minimum number of bits needed to represent `v` in 2's complement notation
/// Asserts that `v` is negative.
pub fn minSignedBits(v: Value, ty: Type, comp: *const Compilation) usize {
assert(v.compare(.lt, Value.int(0), ty, comp));
return switch (ty.sizeof(comp).?) {
1 => 8 - @clz(~v.getInt(u8)) + 1,
2 => 16 - @clz(~v.getInt(u16)) + 1,
4 => 32 - @clz(~v.getInt(u32)) + 1,
8 => 64 - @clz(~v.getInt(u64)) + 1,
else => unreachable,
};
}
test "minSignedBits" {
const Test = struct {
fn checkIntBits(comp: *const Compilation, specifier: Type.Specifier, v: i64, expected: usize) !void {
const val = Value.int(v);
try std.testing.expectEqual(expected, val.minSignedBits(.{ .specifier = specifier }, comp));
}
};
var comp = Compilation.init(std.testing.allocator);
defer comp.deinit();
comp.target = (try std.zig.CrossTarget.parse(.{ .arch_os_abi = "x86_64-linux-gnu" })).toTarget();
for ([_]Type.Specifier{ .int, .long, .long_long }) |specifier| {
try Test.checkIntBits(&comp, specifier, -1, 1);
try Test.checkIntBits(&comp, specifier, -2, 2);
try Test.checkIntBits(&comp, specifier, -10, 5);
try Test.checkIntBits(&comp, specifier, -101, 8);
try Test.checkIntBits(&comp, specifier, std.math.minInt(i8), 8);
try Test.checkIntBits(&comp, specifier, std.math.minInt(i16), 16);
try Test.checkIntBits(&comp, specifier, std.math.minInt(i32), 32);
}
try Test.checkIntBits(&comp, .long, std.math.minInt(i64), 64);
try Test.checkIntBits(&comp, .long_long, std.math.minInt(i64), 64);
}
pub const FloatToIntChangeKind = enum {
/// value did not change
none,
/// floating point number too small or large for destination integer type
out_of_range,
/// tried to convert a NaN or Infinity
overflow,
/// fractional value was converted to zero
nonzero_to_zero,
/// fractional part truncated
value_changed,
};
fn floatToIntExtra(comptime FloatTy: type, int_ty_signedness: std.builtin.Signedness, int_ty_size: u16, v: *Value) FloatToIntChangeKind {
const float_val = v.getFloat(FloatTy);
const was_zero = float_val == 0;
const had_fraction = std.math.modf(float_val).fpart != 0;
switch (int_ty_signedness) {
inline else => |signedness| switch (int_ty_size) {
inline 1, 2, 4, 8 => |bytecount| {
const IntTy = std.meta.Int(signedness, bytecount * 8);
const intVal = std.math.lossyCast(IntTy, float_val);
v.* = int(intVal);
if (!was_zero and v.isZero()) return .nonzero_to_zero;
if (float_val <= std.math.minInt(IntTy) or float_val >= std.math.maxInt(IntTy)) return .out_of_range;
if (had_fraction) return .value_changed;
return .none;
},
else => unreachable,
},
}
}
/// Converts the stored value from a float to an integer.
/// `.unavailable` value remains unchanged.
pub fn floatToInt(v: *Value, old_ty: Type, new_ty: Type, comp: *Compilation) FloatToIntChangeKind {
assert(old_ty.isFloat());
if (v.tag == .unavailable) return .none;
if (new_ty.is(.bool)) {
const was_zero = v.isZero();
const was_one = v.getFloat(f64) == 1.0;
v.toBool();
if (was_zero or was_one) return .none;
return .value_changed;
} else if (new_ty.isUnsignedInt(comp) and v.data.float < 0) {
v.* = int(0);
return .out_of_range;
} else if (!std.math.isFinite(v.data.float)) {
v.tag = .unavailable;
return .overflow;
}
const old_size = old_ty.sizeof(comp).?;
const new_size: u16 = @intCast(new_ty.sizeof(comp).?);
if (new_ty.isUnsignedInt(comp)) switch (old_size) {
1 => unreachable, // promoted to int
2 => unreachable, // promoted to int
4 => return floatToIntExtra(f32, .unsigned, new_size, v),
8 => return floatToIntExtra(f64, .unsigned, new_size, v),
else => unreachable,
} else switch (old_size) {
1 => unreachable, // promoted to int
2 => unreachable, // promoted to int
4 => return floatToIntExtra(f32, .signed, new_size, v),
8 => return floatToIntExtra(f64, .signed, new_size, v),
else => unreachable,
}
}
/// Converts the stored value from an integer to a float.
/// `.unavailable` value remains unchanged.
pub fn intToFloat(v: *Value, old_ty: Type, new_ty: Type, comp: *Compilation) void {
assert(old_ty.isInt());
if (v.tag == .unavailable) return;
if (!new_ty.isReal() or new_ty.sizeof(comp).? > 8) {
v.tag = .unavailable;
} else if (old_ty.isUnsignedInt(comp)) {
v.* = float(@as(f64, @floatFromInt(v.data.int)));
} else {
v.* = float(@as(f64, @floatFromInt(@as(i64, @bitCast(v.data.int)))));
}
}
/// Truncates or extends bits based on type.
/// old_ty is only used for size.
pub fn intCast(v: *Value, old_ty: Type, new_ty: Type, comp: *Compilation) void {
// assert(old_ty.isInt() and new_ty.isInt());
if (v.tag == .unavailable) return;
if (new_ty.is(.bool)) return v.toBool();
if (!old_ty.isUnsignedInt(comp)) {
const size = new_ty.sizeof(comp).?;
switch (size) {
1 => v.* = int(@as(u8, @truncate(@as(u64, @bitCast(v.signExtend(old_ty, comp)))))),
2 => v.* = int(@as(u16, @truncate(@as(u64, @bitCast(v.signExtend(old_ty, comp)))))),
4 => v.* = int(@as(u32, @truncate(@as(u64, @bitCast(v.signExtend(old_ty, comp)))))),
8 => return,
else => unreachable,
}
}
}
/// Converts the stored value from an integer to a float.
/// `.unavailable` value remains unchanged.
pub fn floatCast(v: *Value, old_ty: Type, new_ty: Type, comp: *Compilation) void {
assert(old_ty.isFloat() and new_ty.isFloat());
if (v.tag == .unavailable) return;
const size = new_ty.sizeof(comp).?;
if (!new_ty.isReal() or size > 8) {
v.tag = .unavailable;
} else if (size == 32) {
v.* = float(@as(f32, @floatCast(v.data.float)));
}
}
/// Truncates data.int to one bit
pub fn toBool(v: *Value) void {
if (v.tag == .unavailable) return;
const res = v.getBool();
v.* = int(@intFromBool(res));
}
pub fn isZero(v: Value) bool {
return switch (v.tag) {
.unavailable => false,
.nullptr_t => false,
.int => v.data.int == 0,
.float => v.data.float == 0,
.bytes => false,
};
}
pub fn getBool(v: Value) bool {
return switch (v.tag) {
.unavailable => unreachable,
.nullptr_t => false,
.int => v.data.int != 0,
.float => v.data.float != 0,
.bytes => true,
};
}
pub fn getInt(v: Value, comptime T: type) T {
if (T == u64) return v.data.int;
return if (@typeInfo(T).Int.signedness == .unsigned)
@truncate(v.data.int)
else
@truncate(@as(i64, @bitCast(v.data.int)));
}
pub fn getFloat(v: Value, comptime T: type) T {
if (T == f64) return v.data.float;
return @floatCast(v.data.float);
}
const bin_overflow = struct {
inline fn addInt(comptime T: type, out: *Value, a: Value, b: Value) bool {
const a_val = a.getInt(T);
const b_val = b.getInt(T);
const sum, const overflowed = @addWithOverflow(a_val, b_val);
out.* = int(sum);
return overflowed != 0;
}
inline fn addFloat(comptime T: type, aa: Value, bb: Value) Value {
const a_val = aa.getFloat(T);
const b_val = bb.getFloat(T);
return float(a_val + b_val);
}
inline fn subInt(comptime T: type, out: *Value, a: Value, b: Value) bool {
const a_val = a.getInt(T);
const b_val = b.getInt(T);
const difference, const overflowed = @subWithOverflow(a_val, b_val);
out.* = int(difference);
return overflowed != 0;
}
inline fn subFloat(comptime T: type, aa: Value, bb: Value) Value {
const a_val = aa.getFloat(T);
const b_val = bb.getFloat(T);
return float(a_val - b_val);
}
inline fn mulInt(comptime T: type, out: *Value, a: Value, b: Value) bool {
const a_val = a.getInt(T);
const b_val = b.getInt(T);
const product, const overflowed = @mulWithOverflow(a_val, b_val);
out.* = int(product);
return overflowed != 0;
}
inline fn mulFloat(comptime T: type, aa: Value, bb: Value) Value {
const a_val = aa.getFloat(T);
const b_val = bb.getFloat(T);
return float(a_val * b_val);
}
const FT = fn (*Value, Value, Value, Type, *Compilation) bool;
fn getOp(comptime intFunc: anytype, comptime floatFunc: anytype) FT {
return struct {
fn op(res: *Value, a: Value, b: Value, ty: Type, comp: *Compilation) bool {
const size = ty.sizeof(comp).?;
if (@TypeOf(floatFunc) != @TypeOf(null) and ty.isFloat()) {
res.* = switch (size) {
4 => floatFunc(f32, a, b),
8 => floatFunc(f64, a, b),
else => unreachable,
};
return false;
}
if (ty.isUnsignedInt(comp)) switch (size) {
1 => return intFunc(u8, res, a, b),
2 => return intFunc(u16, res, a, b),
4 => return intFunc(u32, res, a, b),
8 => return intFunc(u64, res, a, b),
else => unreachable,
} else switch (size) {
1 => return intFunc(u8, res, a, b),
2 => return intFunc(u16, res, a, b),
4 => return intFunc(i32, res, a, b),
8 => return intFunc(i64, res, a, b),
else => unreachable,
}
}
}.op;
}
};
pub const add = bin_overflow.getOp(bin_overflow.addInt, bin_overflow.addFloat);
pub const sub = bin_overflow.getOp(bin_overflow.subInt, bin_overflow.subFloat);
pub const mul = bin_overflow.getOp(bin_overflow.mulInt, bin_overflow.mulFloat);
const bin_ops = struct {
inline fn divInt(comptime T: type, aa: Value, bb: Value) Value {
const a_val = aa.getInt(T);
const b_val = bb.getInt(T);
return int(@divTrunc(a_val, b_val));
}
inline fn divFloat(comptime T: type, aa: Value, bb: Value) Value {
const a_val = aa.getFloat(T);
const b_val = bb.getFloat(T);
return float(a_val / b_val);
}
inline fn remInt(comptime T: type, a: Value, b: Value) Value {
const a_val = a.getInt(T);
const b_val = b.getInt(T);
if (@typeInfo(T).Int.signedness == .signed) {
if (a_val == std.math.minInt(T) and b_val == -1) {
return Value{ .tag = .unavailable, .data = .{ .none = {} } };
} else {
if (b_val > 0) return int(@rem(a_val, b_val));
return int(a_val - @divTrunc(a_val, b_val) * b_val);
}
} else {
return int(a_val % b_val);
}
}
inline fn orInt(comptime T: type, a: Value, b: Value) Value {
const a_val = a.getInt(T);
const b_val = b.getInt(T);
return int(a_val | b_val);
}
inline fn xorInt(comptime T: type, a: Value, b: Value) Value {
const a_val = a.getInt(T);
const b_val = b.getInt(T);
return int(a_val ^ b_val);
}
inline fn andInt(comptime T: type, a: Value, b: Value) Value {
const a_val = a.getInt(T);
const b_val = b.getInt(T);
return int(a_val & b_val);
}
inline fn shl(comptime T: type, a: Value, b: Value) Value {
const ShiftT = std.math.Log2Int(T);
const info = @typeInfo(T).Int;
const UT = std.meta.Int(.unsigned, info.bits);
const b_val = b.getInt(T);
if (b_val > std.math.maxInt(ShiftT)) {
return if (info.signedness == .unsigned)
int(@as(UT, std.math.maxInt(UT)))
else
int(@as(T, std.math.minInt(T)));
}
const amt: ShiftT = @truncate(@as(UT, @bitCast(b_val)));
const a_val = a.getInt(T);
return int(a_val << amt);
}
inline fn shr(comptime T: type, a: Value, b: Value) Value {
const ShiftT = std.math.Log2Int(T);
const UT = std.meta.Int(.unsigned, @typeInfo(T).Int.bits);
const b_val = b.getInt(T);
if (b_val > std.math.maxInt(ShiftT)) return Value.int(0);
const amt: ShiftT = @truncate(@as(UT, @intCast(b_val)));
const a_val = a.getInt(T);
return int(a_val >> amt);
}
const FT = fn (Value, Value, Type, *Compilation) Value;
fn getOp(comptime intFunc: anytype, comptime floatFunc: anytype) FT {
return struct {
fn op(a: Value, b: Value, ty: Type, comp: *Compilation) Value {
const size = ty.sizeof(comp).?;
if (@TypeOf(floatFunc) != @TypeOf(null) and ty.isFloat()) {
switch (size) {
4 => return floatFunc(f32, a, b),
8 => return floatFunc(f64, a, b),
else => unreachable,
}
}
if (ty.isUnsignedInt(comp)) switch (size) {
1 => unreachable, // promoted to int
2 => unreachable, // promoted to int
4 => return intFunc(u32, a, b),
8 => return intFunc(u64, a, b),
else => unreachable,
} else switch (size) {
1 => unreachable, // promoted to int
2 => unreachable, // promoted to int
4 => return intFunc(i32, a, b),
8 => return intFunc(i64, a, b),
else => unreachable,
}
}
}.op;
}
};
/// caller guarantees rhs != 0
pub const div = bin_ops.getOp(bin_ops.divInt, bin_ops.divFloat);
/// caller guarantees rhs != 0
/// caller guarantees lhs != std.math.minInt(T) OR rhs != -1
pub const rem = bin_ops.getOp(bin_ops.remInt, null);
pub const bitOr = bin_ops.getOp(bin_ops.orInt, null);
pub const bitXor = bin_ops.getOp(bin_ops.xorInt, null);
pub const bitAnd = bin_ops.getOp(bin_ops.andInt, null);
pub const shl = bin_ops.getOp(bin_ops.shl, null);
pub const shr = bin_ops.getOp(bin_ops.shr, null);
pub fn bitNot(v: Value, ty: Type, comp: *Compilation) Value {
const size = ty.sizeof(comp).?;
var out: Value = undefined;
if (ty.isUnsignedInt(comp)) switch (size) {
1 => unreachable, // promoted to int
2 => unreachable, // promoted to int
4 => out = int(~v.getInt(u32)),
8 => out = int(~v.getInt(u64)),
else => unreachable,
} else switch (size) {
1 => unreachable, // promoted to int
2 => unreachable, // promoted to int
4 => out = int(~v.getInt(i32)),
8 => out = int(~v.getInt(i64)),
else => unreachable,
}
return out;
}
pub fn compare(a: Value, op: std.math.CompareOperator, b: Value, ty: Type, comp: *const Compilation) bool {
assert(a.tag == b.tag);
if (a.tag == .nullptr_t) {
return switch (op) {
.eq => true,
.neq => false,
else => unreachable,
};
}
const S = struct {
inline fn doICompare(comptime T: type, aa: Value, opp: std.math.CompareOperator, bb: Value) bool {
const a_val = aa.getInt(T);
const b_val = bb.getInt(T);
return std.math.compare(a_val, opp, b_val);
}
inline fn doFCompare(comptime T: type, aa: Value, opp: std.math.CompareOperator, bb: Value) bool {
const a_val = aa.getFloat(T);
const b_val = bb.getFloat(T);
return std.math.compare(a_val, opp, b_val);
}
};
const size = ty.sizeof(comp).?;
switch (a.tag) {
.unavailable => return true,
.int => if (ty.isUnsignedInt(comp)) switch (size) {
1 => return S.doICompare(u8, a, op, b),
2 => return S.doICompare(u16, a, op, b),
4 => return S.doICompare(u32, a, op, b),
8 => return S.doICompare(u64, a, op, b),
else => unreachable,
} else switch (size) {
1 => return S.doICompare(i8, a, op, b),
2 => return S.doICompare(i16, a, op, b),
4 => return S.doICompare(i32, a, op, b),
8 => return S.doICompare(i64, a, op, b),
else => unreachable,
},
.float => switch (size) {
4 => return S.doFCompare(f32, a, op, b),
8 => return S.doFCompare(f64, a, op, b),
else => unreachable,
},
else => @panic("TODO"),
}
return false;
}
pub fn hash(v: Value) u64 {
switch (v.tag) {
.unavailable => unreachable,
.int => return std.hash.Wyhash.hash(0, std.mem.asBytes(&v.data.int)),
else => @panic("TODO"),
}
}
pub fn dump(v: Value, ty: Type, comp: *Compilation, strings: []const u8, w: anytype) !void {
switch (v.tag) {
.unavailable => try w.writeAll("unavailable"),
.int => if (ty.is(.bool) and comp.langopts.standard.atLeast(.c2x)) {
try w.print("{s}", .{if (v.isZero()) "false" else "true"});
} else if (ty.isUnsignedInt(comp)) {
try w.print("{d}", .{v.data.int});
} else {
try w.print("{d}", .{v.signExtend(ty, comp)});
},
.bytes => try w.print("\"{s}\"", .{v.data.bytes.slice(strings)}),
// std.fmt does @as instead of @floatCast
.float => try w.print("{d}", .{@as(f64, @floatCast(v.data.float))}),
else => try w.print("({s})", .{@tagName(v.tag)}),
}
}

35783
deps/aro/builtins/BuiltinFunction.zig vendored Normal file

File diff suppressed because it is too large Load Diff

138
deps/aro/builtins/Properties.zig vendored Normal file
View File

@ -0,0 +1,138 @@
const std = @import("std");
const Properties = @This();
language: Language,
attributes: Attributes,
header: Header,
target_set: TargetSet,
/// Header which must be included for a builtin to be available
pub const Header = enum {
none,
/// stdio.h
stdio,
/// stdlib.h
stdlib,
/// setjmpex.h
setjmpex,
/// stdarg.h
stdarg,
/// string.h
string,
/// ctype.h
ctype,
/// wchar.h
wchar,
/// setjmp.h
setjmp,
/// malloc.h
malloc,
/// strings.h
strings,
/// unistd.h
unistd,
/// pthread.h
pthread,
/// math.h
math,
/// complex.h
complex,
/// Blocks.h
blocks,
};
/// Languages in which a builtin is available
pub const Language = enum {
all_languages,
all_ms_languages,
all_gnu_languages,
gnu_lang,
};
pub const Attributes = packed struct {
/// Function does not return
noreturn: bool = false,
/// Function has no side effects
pure: bool = false,
/// Function has no side effects and does not read memory
@"const": bool = false,
/// Signature is meaningless; use custom typecheck
custom_typecheck: bool = false,
/// A declaration of this builtin should be recognized even if the type doesn't match the specified signature.
allow_type_mismatch: bool = false,
/// this is a libc/libm function with a '__builtin_' prefix added.
lib_function_with_builtin_prefix: bool = false,
/// this is a libc/libm function without a '__builtin_' prefix. This builtin is disableable by '-fno-builtin-foo'
lib_function_without_prefix: bool = false,
/// Function returns twice (e.g. setjmp)
returns_twice: bool = false,
/// Nature of the format string passed to this function
format_kind: enum(u3) {
/// Does not take a format string
none,
/// this is a printf-like function whose Nth argument is the format string
printf,
/// function is like vprintf in that it accepts its arguments as a va_list rather than through an ellipsis
vprintf,
/// this is a scanf-like function whose Nth argument is the format string
scanf,
/// the function is like vscanf in that it accepts its arguments as a va_list rather than through an ellipsis
vscanf,
} = .none,
/// Position of format string argument. Only meaningful if format_kind is not .none
format_string_position: u5 = 0,
/// if false, arguments are not evaluated
eval_args: bool = true,
/// no side effects and does not read memory, but only when -fno-math-errno and FP exceptions are ignored
const_without_errno_and_fp_exceptions: bool = false,
/// no side effects and does not read memory, but only when FP exceptions are ignored
const_without_fp_exceptions: bool = false,
/// this function can be constant evaluated by the frontend
const_evaluable: bool = false,
};
pub const Target = enum {
/// Supported on all targets
basic,
aarch64,
aarch64_neon_sve_bridge,
aarch64_neon_sve_bridge_cg,
amdgpu,
arm,
bpf,
hexagon,
hexagon_dep,
hexagon_map_custom_dep,
loong_arch,
mips,
neon,
nvptx,
ppc,
riscv,
riscv_vector,
sve,
systemz,
ve,
vevl_gen,
webassembly,
x86,
x86_64,
xcore,
};
/// Targets for which a builtin is enabled
pub const TargetSet = std.enums.EnumSet(Target);

277
deps/aro/builtins/TypeDescription.zig vendored Normal file
View File

@ -0,0 +1,277 @@
const std = @import("std");
const TypeDescription = @This();
prefix: []const Prefix,
spec: Spec,
suffix: []const Suffix,
pub const Component = union(enum) {
prefix: Prefix,
spec: Spec,
suffix: Suffix,
};
pub const ComponentIterator = struct {
str: []const u8,
idx: usize,
pub fn init(str: []const u8) ComponentIterator {
return .{
.str = str,
.idx = 0,
};
}
pub fn peek(self: *ComponentIterator) ?Component {
const idx = self.idx;
defer self.idx = idx;
return self.next();
}
pub fn next(self: *ComponentIterator) ?Component {
if (self.idx == self.str.len) return null;
const c = self.str[self.idx];
self.idx += 1;
switch (c) {
'L' => {
if (self.str[self.idx] != 'L') return .{ .prefix = .L };
self.idx += 1;
if (self.str[self.idx] != 'L') return .{ .prefix = .LL };
self.idx += 1;
return .{ .prefix = .LLL };
},
'Z' => return .{ .prefix = .Z },
'W' => return .{ .prefix = .W },
'N' => return .{ .prefix = .N },
'O' => return .{ .prefix = .O },
'S' => {
if (self.str[self.idx] == 'J') {
self.idx += 1;
return .{ .spec = .SJ };
}
return .{ .prefix = .S };
},
'U' => return .{ .prefix = .U },
'I' => return .{ .prefix = .I },
'v' => return .{ .spec = .v },
'b' => return .{ .spec = .b },
'c' => return .{ .spec = .c },
's' => return .{ .spec = .s },
'i' => return .{ .spec = .i },
'h' => return .{ .spec = .h },
'x' => return .{ .spec = .x },
'y' => return .{ .spec = .y },
'f' => return .{ .spec = .f },
'd' => return .{ .spec = .d },
'z' => return .{ .spec = .z },
'w' => return .{ .spec = .w },
'F' => return .{ .spec = .F },
'a' => return .{ .spec = .a },
'A' => return .{ .spec = .A },
'V', 'q', 'E' => {
const start = self.idx;
while (std.ascii.isDigit(self.str[self.idx])) : (self.idx += 1) {}
const count = std.fmt.parseUnsigned(u32, self.str[start..self.idx], 10) catch unreachable;
return switch (c) {
'V' => .{ .spec = .{ .V = count } },
'q' => .{ .spec = .{ .q = count } },
'E' => .{ .spec = .{ .E = count } },
else => unreachable,
};
},
'X' => {
defer self.idx += 1;
switch (self.str[self.idx]) {
'f' => return .{ .spec = .{ .X = .float } },
'd' => return .{ .spec = .{ .X = .double } },
'L' => {
self.idx += 1;
return .{ .spec = .{ .X = .longdouble } };
},
else => unreachable,
}
},
'Y' => return .{ .spec = .Y },
'P' => return .{ .spec = .P },
'J' => return .{ .spec = .J },
'K' => return .{ .spec = .K },
'p' => return .{ .spec = .p },
'.' => {
// can only appear at end of param string; indicates varargs function
std.debug.assert(self.idx == self.str.len);
return null;
},
'!' => {
std.debug.assert(self.str.len == 1);
return .{ .spec = .@"!" };
},
'*' => {
if (self.idx < self.str.len and std.ascii.isDigit(self.str[self.idx])) {
defer self.idx += 1;
const addr_space = self.str[self.idx] - '0';
return .{ .suffix = .{ .@"*" = addr_space } };
} else {
return .{ .suffix = .{ .@"*" = null } };
}
},
'C' => return .{ .suffix = .C },
'D' => return .{ .suffix = .D },
'R' => return .{ .suffix = .R },
else => unreachable,
}
return null;
}
};
pub const TypeIterator = struct {
param_str: []const u8,
prefix: [4]Prefix,
spec: Spec,
suffix: [4]Suffix,
idx: usize,
pub fn init(param_str: []const u8) TypeIterator {
return .{
.param_str = param_str,
.prefix = undefined,
.spec = undefined,
.suffix = undefined,
.idx = 0,
};
}
/// Returned `TypeDescription` contains fields which are slices into the underlying `TypeIterator`
/// The returned value is invalidated when `.next()` is called again or the TypeIterator goes out
// of scope.
pub fn next(self: *TypeIterator) ?TypeDescription {
var it = ComponentIterator.init(self.param_str[self.idx..]);
defer self.idx += it.idx;
var prefix_count: usize = 0;
var maybe_spec: ?Spec = null;
var suffix_count: usize = 0;
while (it.peek()) |component| {
switch (component) {
.prefix => |prefix| {
if (maybe_spec != null) break;
self.prefix[prefix_count] = prefix;
prefix_count += 1;
},
.spec => |spec| {
if (maybe_spec != null) break;
maybe_spec = spec;
},
.suffix => |suffix| {
std.debug.assert(maybe_spec != null);
self.suffix[suffix_count] = suffix;
suffix_count += 1;
},
}
_ = it.next();
}
if (maybe_spec) |spec| {
return TypeDescription{
.prefix = self.prefix[0..prefix_count],
.spec = spec,
.suffix = self.suffix[0..suffix_count],
};
}
return null;
}
};
const Prefix = enum {
/// long (e.g. Li for 'long int', Ld for 'long double')
L,
/// long long (e.g. LLi for 'long long int', LLd for __float128)
LL,
/// __int128_t (e.g. LLLi)
LLL,
/// int32_t (require a native 32-bit integer type on the target)
Z,
/// int64_t (require a native 64-bit integer type on the target)
W,
/// 'int' size if target is LP64, 'L' otherwise.
N,
/// long for OpenCL targets, long long otherwise.
O,
/// signed
S,
/// unsigned
U,
/// Required to constant fold to an integer constant expression.
I,
};
const Spec = union(enum) {
/// void
v,
/// boolean
b,
/// char
c,
/// short
s,
/// int
i,
/// half (__fp16, OpenCL)
h,
/// half (_Float16)
x,
/// half (__bf16)
y,
/// float
f,
/// double
d,
/// size_t
z,
/// wchar_t
w,
/// constant CFString
F,
/// __builtin_va_list
a,
/// "reference" to __builtin_va_list
A,
/// Vector, followed by the number of elements and the base type.
V: u32,
/// Scalable vector, followed by the number of elements and the base type.
q: u32,
/// ext_vector, followed by the number of elements and the base type.
E: u32,
/// _Complex, followed by the base type.
X: enum {
float,
double,
longdouble,
},
/// ptrdiff_t
Y,
/// FILE
P,
/// jmp_buf
J,
/// sigjmp_buf
SJ,
/// ucontext_t
K,
/// pid_t
p,
/// Used to indicate a builtin with target-dependent param types. Must appear by itself
@"!",
};
const Suffix = union(enum) {
/// pointer (optionally followed by an address space number,if no address space is specified than any address space will be accepted)
@"*": ?u8,
/// const
C,
/// volatile
D,
/// restrict
R,
};

221
deps/aro/codegen/x86_64.zig vendored Normal file
View File

@ -0,0 +1,221 @@
const std = @import("std");
const Codegen = @import("../Codegen_legacy.zig");
const Tree = @import("../Tree.zig");
const NodeIndex = Tree.NodeIndex;
const x86_64 = @import("zig").codegen.x86_64;
const Register = x86_64.Register;
const RegisterManager = @import("zig").RegisterManager;
const Fn = @This();
const Value = union(enum) {
symbol: []const u8,
immediate: i64,
register: Register,
none,
};
register_manager: RegisterManager(Fn, Register, &x86_64.callee_preserved_regs) = .{},
data: *std.ArrayList(u8),
c: *Codegen,
pub fn deinit(func: *Fn) void {
func.* = undefined;
}
pub fn genFn(c: *Codegen, decl: NodeIndex, data: *std.ArrayList(u8)) Codegen.Error!void {
var func = Fn{ .data = data, .c = c };
defer func.deinit();
// function prologue
try func.data.appendSlice(&.{
0x55, // push rbp
0x48, 0x89, 0xe5, // mov rbp,rsp
});
_ = try func.genNode(c.node_data[@intFromEnum(decl)].decl.node);
// all functions are guaranteed to end in a return statement so no extra work required here
}
pub fn spillInst(f: *Fn, reg: Register, inst: u32) !void {
_ = inst;
_ = reg;
_ = f;
}
fn setReg(func: *Fn, val: Value, reg: Register) !void {
switch (val) {
.none => unreachable,
.symbol => |sym| {
// lea address with 0 and add relocation
const encoder = try x86_64.Encoder.init(func.data, 8);
encoder.rex(.{ .w = true });
encoder.opcode_1byte(0x8D);
encoder.modRm_RIPDisp32(reg.low_id());
const offset = func.data.items.len;
encoder.imm32(0);
try func.c.obj.addRelocation(sym, .func, offset, -4);
},
.immediate => |x| if (x == 0) {
// 32-bit moves zero-extend to 64-bit, so xoring the 32-bit
// register is the fastest way to zero a register.
// The encoding for `xor r32, r32` is `0x31 /r`.
const encoder = try x86_64.Encoder.init(func.data, 3);
// If we're accessing e.g. r8d, we need to use a REX prefix before the actual operation. Since
// this is a 32-bit operation, the W flag is set to zero. X is also zero, as we're not using a SIB.
// Both R and B are set, as we're extending, in effect, the register bits *and* the operand.
encoder.rex(.{ .r = reg.isExtended(), .b = reg.isExtended() });
encoder.opcode_1byte(0x31);
// Section 3.1.1.1 of the Intel x64 Manual states that "/r indicates that the
// ModR/M byte of the instruction contains a register operand and an r/m operand."
encoder.modRm_direct(reg.low_id(), reg.low_id());
} else if (x <= std.math.maxInt(i32)) {
// Next best case: if we set the lower four bytes, the upper four will be zeroed.
//
// The encoding for `mov IMM32 -> REG` is (0xB8 + R) IMM.
const encoder = try x86_64.Encoder.init(func.data, 6);
// Just as with XORing, we need a REX prefix. This time though, we only
// need the B bit set, as we're extending the opcode's register field,
// and there is no Mod R/M byte.
encoder.rex(.{ .b = reg.isExtended() });
encoder.opcode_withReg(0xB8, reg.low_id());
// no ModR/M byte
// IMM
encoder.imm32(@intCast(x));
} else {
// Worst case: we need to load the 64-bit register with the IMM. GNU's assemblers calls
// this `movabs`, though this is officially just a different variant of the plain `mov`
// instruction.
//
// This encoding is, in fact, the *same* as the one used for 32-bit loads. The only
// difference is that we set REX.W before the instruction, which extends the load to
// 64-bit and uses the full bit-width of the register.
{
const encoder = try x86_64.Encoder.init(func.data, 10);
encoder.rex(.{ .w = true, .b = reg.isExtended() });
encoder.opcode_withReg(0xB8, reg.low_id());
encoder.imm64(@bitCast(x));
}
},
.register => |src_reg| {
// If the registers are the same, nothing to do.
if (src_reg.id() == reg.id())
return;
// This is a variant of 8B /r.
const encoder = try x86_64.Encoder.init(func.data, 3);
encoder.rex(.{
.w = true,
.r = reg.isExtended(),
.b = src_reg.isExtended(),
});
encoder.opcode_1byte(0x8B);
encoder.modRm_direct(reg.low_id(), src_reg.low_id());
},
}
}
fn genNode(func: *Fn, node: NodeIndex) Codegen.Error!Value {
if (func.c.tree.value_map.get(node)) |some| {
if (some.tag == .int)
return Value{ .immediate = @bitCast(some.data.int) };
}
const data = func.c.node_data[@intFromEnum(node)];
switch (func.c.node_tag[@intFromEnum(node)]) {
.static_assert => return Value{ .none = {} },
.compound_stmt_two => {
if (data.bin.lhs != .none) _ = try func.genNode(data.bin.lhs);
if (data.bin.rhs != .none) _ = try func.genNode(data.bin.rhs);
return Value{ .none = {} };
},
.compound_stmt => {
for (func.c.tree.data[data.range.start..data.range.end]) |stmt| {
_ = try func.genNode(stmt);
}
return Value{ .none = {} };
},
.call_expr_one => if (data.bin.rhs != .none)
return func.genCall(data.bin.lhs, &.{data.bin.rhs})
else
return func.genCall(data.bin.lhs, &.{}),
.call_expr => return func.genCall(func.c.tree.data[data.range.start], func.c.tree.data[data.range.start + 1 .. data.range.end]),
.explicit_cast, .implicit_cast => {
switch (data.cast.kind) {
.function_to_pointer,
.array_to_pointer,
=> return func.genNode(data.cast.operand), // no-op
else => return func.c.comp.diag.fatalNoSrc("TODO x86_64 genNode for cast {s}\n", .{@tagName(data.cast.kind)}),
}
},
.decl_ref_expr => {
// TODO locals and arguments
return Value{ .symbol = func.c.tree.tokSlice(data.decl_ref) };
},
.return_stmt => {
const value = try func.genNode(data.un);
try func.setReg(value, x86_64.c_abi_int_return_regs[0]);
try func.data.appendSlice(&.{
0x5d, // pop rbp
0xc3, // ret
});
return Value{ .none = {} };
},
.implicit_return => {
try func.setReg(.{ .immediate = 0 }, x86_64.c_abi_int_return_regs[0]);
try func.data.appendSlice(&.{
0x5d, // pop rbp
0xc3, // ret
});
return Value{ .none = {} };
},
.int_literal => return Value{ .immediate = @bitCast(data.int) },
.string_literal_expr => {
const range = func.c.tree.value_map.get(node).?.data.bytes;
const str_bytes = range.slice(func.c.tree.strings);
const section = try func.c.obj.getSection(.strings);
const start = section.items.len;
try section.appendSlice(str_bytes);
const symbol_name = try func.c.obj.declareSymbol(.strings, null, .Internal, .variable, start, str_bytes.len);
return Value{ .symbol = symbol_name };
},
else => return func.c.comp.diag.fatalNoSrc("TODO x86_64 genNode {}\n", .{func.c.node_tag[@intFromEnum(node)]}),
}
}
fn genCall(func: *Fn, lhs: NodeIndex, args: []const NodeIndex) Codegen.Error!Value {
if (args.len > x86_64.c_abi_int_param_regs.len)
return func.c.comp.diag.fatalNoSrc("TODO more than args {d}\n", .{x86_64.c_abi_int_param_regs.len});
const func_value = try func.genNode(lhs);
for (args, 0..) |arg, i| {
const value = try func.genNode(arg);
try func.setReg(value, x86_64.c_abi_int_param_regs[i]);
}
switch (func_value) {
.none => unreachable,
.symbol => |sym| {
const encoder = try x86_64.Encoder.init(func.data, 5);
encoder.opcode_1byte(0xe8);
const offset = func.data.items.len;
encoder.imm32(0);
try func.c.obj.addRelocation(sym, .func, offset, -4);
},
.immediate => return func.c.comp.diag.fatalNoSrc("TODO call immediate\n", .{}),
.register => return func.c.comp.diag.fatalNoSrc("TODO call reg\n", .{}),
}
return Value{ .register = x86_64.c_abi_int_return_regs[0] };
}
pub fn genVar(c: *Codegen, decl: NodeIndex) Codegen.Error!void {
_ = c;
_ = decl;
}

76
deps/aro/features.zig vendored Normal file
View File

@ -0,0 +1,76 @@
const std = @import("std");
const Compilation = @import("Compilation.zig");
const target_util = @import("target.zig");
/// Used to implement the __has_feature macro.
pub fn hasFeature(comp: *Compilation, ext: []const u8) bool {
const list = .{
.assume_nonnull = true,
.attribute_analyzer_noreturn = true,
.attribute_availability = true,
.attribute_availability_with_message = true,
.attribute_availability_app_extension = true,
.attribute_availability_with_version_underscores = true,
.attribute_availability_tvos = true,
.attribute_availability_watchos = true,
.attribute_availability_with_strict = true,
.attribute_availability_with_replacement = true,
.attribute_availability_in_templates = true,
.attribute_availability_swift = true,
.attribute_cf_returns_not_retained = true,
.attribute_cf_returns_retained = true,
.attribute_cf_returns_on_parameters = true,
.attribute_deprecated_with_message = true,
.attribute_deprecated_with_replacement = true,
.attribute_ext_vector_type = true,
.attribute_ns_returns_not_retained = true,
.attribute_ns_returns_retained = true,
.attribute_ns_consumes_self = true,
.attribute_ns_consumed = true,
.attribute_cf_consumed = true,
.attribute_overloadable = true,
.attribute_unavailable_with_message = true,
.attribute_unused_on_fields = true,
.attribute_diagnose_if_objc = true,
.blocks = false, // TODO
.c_thread_safety_attributes = true,
.enumerator_attributes = true,
.nullability = true,
.nullability_on_arrays = true,
.nullability_nullable_result = true,
.c_alignas = comp.langopts.standard.atLeast(.c11),
.c_alignof = comp.langopts.standard.atLeast(.c11),
.c_atomic = comp.langopts.standard.atLeast(.c11),
.c_generic_selections = comp.langopts.standard.atLeast(.c11),
.c_static_assert = comp.langopts.standard.atLeast(.c11),
.c_thread_local = comp.langopts.standard.atLeast(.c11) and target_util.isTlsSupported(comp.target),
};
inline for (std.meta.fields(@TypeOf(list))) |f| {
if (std.mem.eql(u8, f.name, ext)) return @field(list, f.name);
}
return false;
}
/// Used to implement the __has_extension macro.
pub fn hasExtension(comp: *Compilation, ext: []const u8) bool {
const list = .{
// C11 features
.c_alignas = true,
.c_alignof = true,
.c_atomic = false, // TODO
.c_generic_selections = true,
.c_static_assert = true,
.c_thread_local = target_util.isTlsSupported(comp.target),
// misc
.overloadable_unmarked = false, // TODO
.statement_attributes_with_gnu_syntax = false, // TODO
.gnu_asm = true,
.gnu_asm_goto_with_outputs = true,
.matrix_types = false, // TODO
.matrix_types_scalar_division = false, // TODO
};
inline for (std.meta.fields(@TypeOf(list))) |f| {
if (std.mem.eql(u8, f.name, ext)) return @field(list, f.name);
}
return false;
}

27
deps/aro/lib.zig vendored Normal file
View File

@ -0,0 +1,27 @@
/// Deprecated
pub const Codegen = @import("Codegen_legacy.zig");
pub const CodeGen = @import("CodeGen.zig");
pub const Compilation = @import("Compilation.zig");
pub const Diagnostics = @import("Diagnostics.zig");
pub const Driver = @import("Driver.zig");
pub const Interner = @import("Interner.zig");
pub const Ir = @import("Ir.zig");
pub const Object = @import("Object.zig");
pub const Parser = @import("Parser.zig");
pub const Preprocessor = @import("Preprocessor.zig");
pub const Source = @import("Source.zig");
pub const Tokenizer = @import("Tokenizer.zig");
pub const Tree = @import("Tree.zig");
pub const Type = @import("Type.zig");
pub const TypeMapper = @import("StringInterner.zig").TypeMapper;
pub const target_util = @import("target.zig");
pub const version_str = "0.0.0-dev";
pub const version = @import("std").SemanticVersion.parse(version_str) catch unreachable;
pub const CallingConvention = enum {
C,
stdcall,
thiscall,
vectorcall,
};

169
deps/aro/number_affixes.zig vendored Normal file
View File

@ -0,0 +1,169 @@
const std = @import("std");
const mem = std.mem;
pub const Prefix = enum(u8) {
binary = 2,
octal = 8,
decimal = 10,
hex = 16,
pub fn digitAllowed(prefix: Prefix, c: u8) bool {
return switch (c) {
'0', '1' => true,
'2'...'7' => prefix != .binary,
'8'...'9' => prefix == .decimal or prefix == .hex,
'a'...'f', 'A'...'F' => prefix == .hex,
else => false,
};
}
pub fn fromString(buf: []const u8) Prefix {
if (buf.len == 1) return .decimal;
// tokenizer enforces that first byte is a decimal digit or period
switch (buf[0]) {
'.', '1'...'9' => return .decimal,
'0' => {},
else => unreachable,
}
switch (buf[1]) {
'x', 'X' => return if (buf.len == 2) .decimal else .hex,
'b', 'B' => return if (buf.len == 2) .decimal else .binary,
else => {
if (mem.indexOfAny(u8, buf, "eE.")) |_| {
// This is a decimal floating point number that happens to start with zero
return .decimal;
} else if (Suffix.fromString(buf[1..], .int)) |_| {
// This is `0` with a valid suffix
return .decimal;
} else {
return .octal;
}
},
}
}
/// Length of this prefix as a string
pub fn stringLen(prefix: Prefix) usize {
return switch (prefix) {
.binary => 2,
.octal => 1,
.decimal => 0,
.hex => 2,
};
}
};
pub const Suffix = enum {
// zig fmt: off
// int and imaginary int
None, I,
// unsigned real integers
U, UL, ULL,
// unsigned imaginary integers
IU, IUL, IULL,
// long or long double, real and imaginary
L, IL,
// long long and imaginary long long
LL, ILL,
// float and imaginary float
F, IF,
// _Float16
F16,
// Imaginary _Bitint
IWB, IUWB,
// _Bitint
WB, UWB,
// zig fmt: on
const Tuple = struct { Suffix, []const []const u8 };
const IntSuffixes = &[_]Tuple{
.{ .U, &.{"U"} },
.{ .L, &.{"L"} },
.{ .WB, &.{"WB"} },
.{ .UL, &.{ "U", "L" } },
.{ .UWB, &.{ "U", "WB" } },
.{ .LL, &.{"LL"} },
.{ .ULL, &.{ "U", "LL" } },
.{ .I, &.{"I"} },
.{ .IWB, &.{ "I", "WB" } },
.{ .IU, &.{ "I", "U" } },
.{ .IL, &.{ "I", "L" } },
.{ .IUL, &.{ "I", "U", "L" } },
.{ .IUWB, &.{ "I", "U", "WB" } },
.{ .ILL, &.{ "I", "LL" } },
.{ .IULL, &.{ "I", "U", "LL" } },
};
const FloatSuffixes = &[_]Tuple{
.{ .F16, &.{"F16"} },
.{ .F, &.{"F"} },
.{ .L, &.{"L"} },
.{ .I, &.{"I"} },
.{ .IL, &.{ "I", "L" } },
.{ .IF, &.{ "I", "F" } },
};
pub fn fromString(buf: []const u8, suffix_kind: enum { int, float }) ?Suffix {
if (buf.len == 0) return .None;
const suffixes = switch (suffix_kind) {
.float => FloatSuffixes,
.int => IntSuffixes,
};
var scratch: [3]u8 = undefined;
top: for (suffixes) |candidate| {
const tag = candidate[0];
const parts = candidate[1];
var len: usize = 0;
for (parts) |part| len += part.len;
if (len != buf.len) continue;
for (parts) |part| {
const lower = std.ascii.lowerString(&scratch, part);
if (mem.indexOf(u8, buf, part) == null and mem.indexOf(u8, buf, lower) == null) continue :top;
}
return tag;
}
return null;
}
pub fn isImaginary(suffix: Suffix) bool {
return switch (suffix) {
.I, .IL, .IF, .IU, .IUL, .ILL, .IULL, .IWB, .IUWB => true,
.None, .L, .F16, .F, .U, .UL, .LL, .ULL, .WB, .UWB => false,
};
}
pub fn isSignedInteger(suffix: Suffix) bool {
return switch (suffix) {
.None, .L, .LL, .I, .IL, .ILL, .WB, .IWB => true,
.U, .UL, .ULL, .IU, .IUL, .IULL, .UWB, .IUWB => false,
.F, .IF, .F16 => unreachable,
};
}
pub fn signedness(suffix: Suffix) std.builtin.Signedness {
return if (suffix.isSignedInteger()) .signed else .unsigned;
}
pub fn isBitInt(suffix: Suffix) bool {
return switch (suffix) {
.WB, .UWB, .IWB, .IUWB => true,
else => false,
};
}
};

377
deps/aro/object/Elf.zig vendored Normal file
View File

@ -0,0 +1,377 @@
const std = @import("std");
const Compilation = @import("../Compilation.zig");
const Object = @import("../Object.zig");
const Elf = @This();
const Section = struct {
data: std.ArrayList(u8),
relocations: std.ArrayListUnmanaged(Relocation) = .{},
flags: u64,
type: u32,
index: u16 = undefined,
};
const Symbol = struct {
section: ?*Section,
size: u64,
offset: u64,
index: u16 = undefined,
info: u8,
};
const Relocation = packed struct {
symbol: *Symbol,
addend: i64,
offset: u48,
type: u8,
};
const additional_sections = 3; // null section, strtab, symtab
const strtab_index = 1;
const symtab_index = 2;
const strtab_default = "\x00.strtab\x00.symtab\x00";
const strtab_name = 1;
const symtab_name = "\x00.strtab\x00".len;
obj: Object,
/// The keys are owned by the Codegen.tree
sections: std.StringHashMapUnmanaged(*Section) = .{},
local_symbols: std.StringHashMapUnmanaged(*Symbol) = .{},
global_symbols: std.StringHashMapUnmanaged(*Symbol) = .{},
unnamed_symbol_mangle: u32 = 0,
strtab_len: u64 = strtab_default.len,
arena: std.heap.ArenaAllocator,
pub fn create(comp: *Compilation) !*Object {
const elf = try comp.gpa.create(Elf);
elf.* = .{
.obj = .{ .format = .elf, .comp = comp },
.arena = std.heap.ArenaAllocator.init(comp.gpa),
};
return &elf.obj;
}
pub fn deinit(elf: *Elf) void {
const gpa = elf.arena.child_allocator;
{
var it = elf.sections.valueIterator();
while (it.next()) |sect| {
sect.*.data.deinit();
sect.*.relocations.deinit(gpa);
}
}
elf.sections.deinit(gpa);
elf.local_symbols.deinit(gpa);
elf.global_symbols.deinit(gpa);
elf.arena.deinit();
gpa.destroy(elf);
}
fn sectionString(sec: Object.Section) []const u8 {
return switch (sec) {
.undefined => unreachable,
.data => "data",
.read_only_data => "rodata",
.func => "text",
.strings => "rodata.str",
.custom => |name| name,
};
}
pub fn getSection(elf: *Elf, section_kind: Object.Section) !*std.ArrayList(u8) {
const section_name = sectionString(section_kind);
const section = elf.sections.get(section_name) orelse blk: {
const section = try elf.arena.allocator().create(Section);
section.* = .{
.data = std.ArrayList(u8).init(elf.arena.child_allocator),
.type = std.elf.SHT_PROGBITS,
.flags = switch (section_kind) {
.func, .custom => std.elf.SHF_ALLOC + std.elf.SHF_EXECINSTR,
.strings => std.elf.SHF_ALLOC + std.elf.SHF_MERGE + std.elf.SHF_STRINGS,
.read_only_data => std.elf.SHF_ALLOC,
.data => std.elf.SHF_ALLOC + std.elf.SHF_WRITE,
.undefined => unreachable,
},
};
try elf.sections.putNoClobber(elf.arena.child_allocator, section_name, section);
elf.strtab_len += section_name.len + ".\x00".len;
break :blk section;
};
return &section.data;
}
pub fn declareSymbol(
elf: *Elf,
section_kind: Object.Section,
maybe_name: ?[]const u8,
linkage: std.builtin.GlobalLinkage,
@"type": Object.SymbolType,
offset: u64,
size: u64,
) ![]const u8 {
const section = blk: {
if (section_kind == .undefined) break :blk null;
const section_name = sectionString(section_kind);
break :blk elf.sections.get(section_name);
};
const binding: u8 = switch (linkage) {
.Internal => std.elf.STB_LOCAL,
.Strong => std.elf.STB_GLOBAL,
.Weak => std.elf.STB_WEAK,
.LinkOnce => unreachable,
};
const sym_type: u8 = switch (@"type") {
.func => std.elf.STT_FUNC,
.variable => std.elf.STT_OBJECT,
.external => std.elf.STT_NOTYPE,
};
const name = if (maybe_name) |some| some else blk: {
defer elf.unnamed_symbol_mangle += 1;
break :blk try std.fmt.allocPrint(elf.arena.allocator(), ".L.{d}", .{elf.unnamed_symbol_mangle});
};
const gop = if (linkage == .Internal)
try elf.local_symbols.getOrPut(elf.arena.child_allocator, name)
else
try elf.global_symbols.getOrPut(elf.arena.child_allocator, name);
if (!gop.found_existing) {
gop.value_ptr.* = try elf.arena.allocator().create(Symbol);
elf.strtab_len += name.len + 1; // +1 for null byte
}
gop.value_ptr.*.* = .{
.section = section,
.size = size,
.offset = offset,
.info = (binding << 4) + sym_type,
};
return name;
}
pub fn addRelocation(elf: *Elf, name: []const u8, section_kind: Object.Section, address: u64, addend: i64) !void {
const section_name = sectionString(section_kind);
const symbol = elf.local_symbols.get(name) orelse elf.global_symbols.get(name).?; // reference to undeclared symbol
const section = elf.sections.get(section_name).?;
if (section.relocations.items.len == 0) elf.strtab_len += ".rela".len;
try section.relocations.append(elf.arena.child_allocator, .{
.symbol = symbol,
.offset = @intCast(address),
.addend = addend,
.type = if (symbol.section == null) 4 else 2, // TODO
});
}
/// elf header
/// sections contents
/// symbols
/// relocations
/// strtab
/// section headers
pub fn finish(elf: *Elf, file: std.fs.File) !void {
var buf_writer = std.io.bufferedWriter(file.writer());
const w = buf_writer.writer();
var num_sections: std.elf.Elf64_Half = additional_sections;
var relocations_len: std.elf.Elf64_Off = 0;
var sections_len: std.elf.Elf64_Off = 0;
{
var it = elf.sections.valueIterator();
while (it.next()) |sect| {
sections_len += sect.*.data.items.len;
relocations_len += sect.*.relocations.items.len * @sizeOf(std.elf.Elf64_Rela);
sect.*.index = num_sections;
num_sections += 1;
num_sections += @intFromBool(sect.*.relocations.items.len != 0);
}
}
const symtab_len = (elf.local_symbols.count() + elf.global_symbols.count() + 1) * @sizeOf(std.elf.Elf64_Sym);
const symtab_offset = @sizeOf(std.elf.Elf64_Ehdr) + sections_len;
const symtab_offset_aligned = std.mem.alignForward(u64, symtab_offset, 8);
const rela_offset = symtab_offset_aligned + symtab_len;
const strtab_offset = rela_offset + relocations_len;
const sh_offset = strtab_offset + elf.strtab_len;
const sh_offset_aligned = std.mem.alignForward(u64, sh_offset, 16);
var elf_header = std.elf.Elf64_Ehdr{
.e_ident = .{ 0x7F, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
.e_type = std.elf.ET.REL, // we only produce relocatables
.e_machine = elf.obj.comp.target.cpu.arch.toElfMachine(),
.e_version = 1,
.e_entry = 0, // linker will handle this
.e_phoff = 0, // no program header
.e_shoff = sh_offset_aligned, // section headers offset
.e_flags = 0, // no flags
.e_ehsize = @sizeOf(std.elf.Elf64_Ehdr),
.e_phentsize = 0, // no program header
.e_phnum = 0, // no program header
.e_shentsize = @sizeOf(std.elf.Elf64_Shdr),
.e_shnum = num_sections,
.e_shstrndx = strtab_index,
};
try w.writeStruct(elf_header);
// write contents of sections
{
var it = elf.sections.valueIterator();
while (it.next()) |sect| try w.writeAll(sect.*.data.items);
}
// pad to 8 bytes
try w.writeByteNTimes(0, @intCast(symtab_offset_aligned - symtab_offset));
var name_offset: u32 = strtab_default.len;
// write symbols
{
// first symbol must be null
try w.writeStruct(std.mem.zeroes(std.elf.Elf64_Sym));
var sym_index: u16 = 1;
var it = elf.local_symbols.iterator();
while (it.next()) |entry| {
const sym = entry.value_ptr.*;
try w.writeStruct(std.elf.Elf64_Sym{
.st_name = name_offset,
.st_info = sym.info,
.st_other = 0,
.st_shndx = if (sym.section) |some| some.index else 0,
.st_value = sym.offset,
.st_size = sym.size,
});
sym.index = sym_index;
sym_index += 1;
name_offset += @intCast(entry.key_ptr.len + 1); // +1 for null byte
}
it = elf.global_symbols.iterator();
while (it.next()) |entry| {
const sym = entry.value_ptr.*;
try w.writeStruct(std.elf.Elf64_Sym{
.st_name = name_offset,
.st_info = sym.info,
.st_other = 0,
.st_shndx = if (sym.section) |some| some.index else 0,
.st_value = sym.offset,
.st_size = sym.size,
});
sym.index = sym_index;
sym_index += 1;
name_offset += @intCast(entry.key_ptr.len + 1); // +1 for null byte
}
}
// write relocations
{
var it = elf.sections.valueIterator();
while (it.next()) |sect| {
for (sect.*.relocations.items) |rela| {
try w.writeStruct(std.elf.Elf64_Rela{
.r_offset = rela.offset,
.r_addend = rela.addend,
.r_info = (@as(u64, rela.symbol.index) << 32) | rela.type,
});
}
}
}
// write strtab
try w.writeAll(strtab_default);
{
var it = elf.local_symbols.keyIterator();
while (it.next()) |key| try w.print("{s}\x00", .{key.*});
it = elf.global_symbols.keyIterator();
while (it.next()) |key| try w.print("{s}\x00", .{key.*});
}
{
var it = elf.sections.iterator();
while (it.next()) |entry| {
if (entry.value_ptr.*.relocations.items.len != 0) try w.writeAll(".rela");
try w.print(".{s}\x00", .{entry.key_ptr.*});
}
}
// pad to 16 bytes
try w.writeByteNTimes(0, @intCast(sh_offset_aligned - sh_offset));
// mandatory null header
try w.writeStruct(std.mem.zeroes(std.elf.Elf64_Shdr));
// write strtab section header
{
var sect_header = std.elf.Elf64_Shdr{
.sh_name = strtab_name,
.sh_type = std.elf.SHT_STRTAB,
.sh_flags = 0,
.sh_addr = 0,
.sh_offset = strtab_offset,
.sh_size = elf.strtab_len,
.sh_link = 0,
.sh_info = 0,
.sh_addralign = 1,
.sh_entsize = 0,
};
try w.writeStruct(sect_header);
}
// write symtab section header
{
var sect_header = std.elf.Elf64_Shdr{
.sh_name = symtab_name,
.sh_type = std.elf.SHT_SYMTAB,
.sh_flags = 0,
.sh_addr = 0,
.sh_offset = symtab_offset_aligned,
.sh_size = symtab_len,
.sh_link = strtab_index,
.sh_info = elf.local_symbols.size + 1,
.sh_addralign = 8,
.sh_entsize = @sizeOf(std.elf.Elf64_Sym),
};
try w.writeStruct(sect_header);
}
// remaining section headers
{
var sect_offset: u64 = @sizeOf(std.elf.Elf64_Ehdr);
var rela_sect_offset: u64 = rela_offset;
var it = elf.sections.iterator();
while (it.next()) |entry| {
const sect = entry.value_ptr.*;
const rela_count = sect.relocations.items.len;
const rela_name_offset: u32 = if (rela_count != 0) @truncate(".rela".len) else 0;
try w.writeStruct(std.elf.Elf64_Shdr{
.sh_name = rela_name_offset + name_offset,
.sh_type = sect.type,
.sh_flags = sect.flags,
.sh_addr = 0,
.sh_offset = sect_offset,
.sh_size = sect.data.items.len,
.sh_link = 0,
.sh_info = 0,
.sh_addralign = if (sect.flags & std.elf.SHF_EXECINSTR != 0) 16 else 1,
.sh_entsize = 0,
});
if (rela_count != 0) {
const size = rela_count * @sizeOf(std.elf.Elf64_Rela);
try w.writeStruct(std.elf.Elf64_Shdr{
.sh_name = name_offset,
.sh_type = std.elf.SHT_RELA,
.sh_flags = 0,
.sh_addr = 0,
.sh_offset = rela_sect_offset,
.sh_size = rela_count * @sizeOf(std.elf.Elf64_Rela),
.sh_link = symtab_index,
.sh_info = sect.index,
.sh_addralign = 8,
.sh_entsize = @sizeOf(std.elf.Elf64_Rela),
});
rela_sect_offset += size;
}
sect_offset += sect.data.items.len;
name_offset += @as(u32, @intCast(entry.key_ptr.len + ".\x00".len)) + rela_name_offset;
}
}
try buf_writer.flush();
}

199
deps/aro/pragmas/gcc.zig vendored Normal file
View File

@ -0,0 +1,199 @@
const std = @import("std");
const mem = std.mem;
const Compilation = @import("../Compilation.zig");
const Pragma = @import("../Pragma.zig");
const Diagnostics = @import("../Diagnostics.zig");
const Preprocessor = @import("../Preprocessor.zig");
const Parser = @import("../Parser.zig");
const TokenIndex = @import("../Tree.zig").TokenIndex;
const GCC = @This();
pragma: Pragma = .{
.beforeParse = beforeParse,
.beforePreprocess = beforePreprocess,
.afterParse = afterParse,
.deinit = deinit,
.preprocessorHandler = preprocessorHandler,
.parserHandler = parserHandler,
.preserveTokens = preserveTokens,
},
original_options: Diagnostics.Options = .{},
options_stack: std.ArrayListUnmanaged(Diagnostics.Options) = .{},
const Directive = enum {
warning,
@"error",
diagnostic,
poison,
const Diagnostics = enum {
ignored,
warning,
@"error",
fatal,
push,
pop,
};
};
fn beforePreprocess(pragma: *Pragma, comp: *Compilation) void {
var self = @fieldParentPtr(GCC, "pragma", pragma);
self.original_options = comp.diag.options;
}
fn beforeParse(pragma: *Pragma, comp: *Compilation) void {
var self = @fieldParentPtr(GCC, "pragma", pragma);
comp.diag.options = self.original_options;
self.options_stack.items.len = 0;
}
fn afterParse(pragma: *Pragma, comp: *Compilation) void {
var self = @fieldParentPtr(GCC, "pragma", pragma);
comp.diag.options = self.original_options;
self.options_stack.items.len = 0;
}
pub fn init(allocator: mem.Allocator) !*Pragma {
var gcc = try allocator.create(GCC);
gcc.* = .{};
return &gcc.pragma;
}
fn deinit(pragma: *Pragma, comp: *Compilation) void {
var self = @fieldParentPtr(GCC, "pragma", pragma);
self.options_stack.deinit(comp.gpa);
comp.gpa.destroy(self);
}
fn diagnosticHandler(self: *GCC, pp: *Preprocessor, start_idx: TokenIndex) Pragma.Error!void {
const diagnostic_tok = pp.tokens.get(start_idx);
if (diagnostic_tok.id == .nl) return;
const diagnostic = std.meta.stringToEnum(Directive.Diagnostics, pp.expandedSlice(diagnostic_tok)) orelse
return error.UnknownPragma;
switch (diagnostic) {
.ignored, .warning, .@"error", .fatal => {
const str = Pragma.pasteTokens(pp, start_idx + 1) catch |err| switch (err) {
error.ExpectedStringLiteral => {
return pp.comp.diag.add(.{
.tag = .pragma_requires_string_literal,
.loc = diagnostic_tok.loc,
.extra = .{ .str = "GCC diagnostic" },
}, diagnostic_tok.expansionSlice());
},
else => |e| return e,
};
if (!mem.startsWith(u8, str, "-W")) {
const next = pp.tokens.get(start_idx + 1);
return pp.comp.diag.add(.{
.tag = .malformed_warning_check,
.loc = next.loc,
.extra = .{ .str = "GCC diagnostic" },
}, next.expansionSlice());
}
const new_kind = switch (diagnostic) {
.ignored => Diagnostics.Kind.off,
.warning => Diagnostics.Kind.warning,
.@"error" => Diagnostics.Kind.@"error",
.fatal => Diagnostics.Kind.@"fatal error",
else => unreachable,
};
try pp.comp.diag.set(str[2..], new_kind);
},
.push => try self.options_stack.append(pp.comp.gpa, pp.comp.diag.options),
.pop => pp.comp.diag.options = self.options_stack.popOrNull() orelse self.original_options,
}
}
fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Pragma.Error!void {
var self = @fieldParentPtr(GCC, "pragma", pragma);
const directive_tok = pp.tokens.get(start_idx + 1);
if (directive_tok.id == .nl) return;
const gcc_pragma = std.meta.stringToEnum(Directive, pp.expandedSlice(directive_tok)) orelse
return pp.comp.diag.add(.{
.tag = .unknown_gcc_pragma,
.loc = directive_tok.loc,
}, directive_tok.expansionSlice());
switch (gcc_pragma) {
.warning, .@"error" => {
const text = Pragma.pasteTokens(pp, start_idx + 2) catch |err| switch (err) {
error.ExpectedStringLiteral => {
return pp.comp.diag.add(.{
.tag = .pragma_requires_string_literal,
.loc = directive_tok.loc,
.extra = .{ .str = @tagName(gcc_pragma) },
}, directive_tok.expansionSlice());
},
else => |e| return e,
};
const extra = Diagnostics.Message.Extra{ .str = try pp.comp.diag.arena.allocator().dupe(u8, text) };
const diagnostic_tag: Diagnostics.Tag = if (gcc_pragma == .warning) .pragma_warning_message else .pragma_error_message;
return pp.comp.diag.add(
.{ .tag = diagnostic_tag, .loc = directive_tok.loc, .extra = extra },
directive_tok.expansionSlice(),
);
},
.diagnostic => return self.diagnosticHandler(pp, start_idx + 2) catch |err| switch (err) {
error.UnknownPragma => {
const tok = pp.tokens.get(start_idx + 2);
return pp.comp.diag.add(.{
.tag = .unknown_gcc_pragma_directive,
.loc = tok.loc,
}, tok.expansionSlice());
},
else => |e| return e,
},
.poison => {
var i: usize = 2;
while (true) : (i += 1) {
const tok = pp.tokens.get(start_idx + i);
if (tok.id == .nl) break;
if (!tok.id.isMacroIdentifier()) {
return pp.comp.diag.add(.{
.tag = .pragma_poison_identifier,
.loc = tok.loc,
}, tok.expansionSlice());
}
const str = pp.expandedSlice(tok);
if (pp.defines.get(str) != null) {
try pp.comp.diag.add(.{
.tag = .pragma_poison_macro,
.loc = tok.loc,
}, tok.expansionSlice());
}
try pp.poisoned_identifiers.put(str, {});
}
return;
},
}
}
fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation.Error!void {
var self = @fieldParentPtr(GCC, "pragma", pragma);
const directive_tok = p.pp.tokens.get(start_idx + 1);
if (directive_tok.id == .nl) return;
const name = p.pp.expandedSlice(directive_tok);
if (mem.eql(u8, name, "diagnostic")) {
return self.diagnosticHandler(p.pp, start_idx + 2) catch |err| switch (err) {
error.UnknownPragma => {}, // handled during preprocessing
error.StopPreprocessing => unreachable, // Only used by #pragma once
else => |e| return e,
};
}
}
fn preserveTokens(_: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) bool {
const next = pp.tokens.get(start_idx + 1);
if (next.id != .nl) {
const name = pp.expandedSlice(next);
if (mem.eql(u8, name, "poison")) {
return false;
}
}
return true;
}

50
deps/aro/pragmas/message.zig vendored Normal file
View File

@ -0,0 +1,50 @@
const std = @import("std");
const mem = std.mem;
const Compilation = @import("../Compilation.zig");
const Pragma = @import("../Pragma.zig");
const Diagnostics = @import("../Diagnostics.zig");
const Preprocessor = @import("../Preprocessor.zig");
const Parser = @import("../Parser.zig");
const TokenIndex = @import("../Tree.zig").TokenIndex;
const Source = @import("../Source.zig");
const Message = @This();
pragma: Pragma = .{
.deinit = deinit,
.preprocessorHandler = preprocessorHandler,
},
pub fn init(allocator: mem.Allocator) !*Pragma {
var once = try allocator.create(Message);
once.* = .{};
return &once.pragma;
}
fn deinit(pragma: *Pragma, comp: *Compilation) void {
var self = @fieldParentPtr(Message, "pragma", pragma);
comp.gpa.destroy(self);
}
fn preprocessorHandler(_: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Pragma.Error!void {
const message_tok = pp.tokens.get(start_idx);
const message_expansion_locs = message_tok.expansionSlice();
const str = Pragma.pasteTokens(pp, start_idx + 1) catch |err| switch (err) {
error.ExpectedStringLiteral => {
return pp.comp.diag.add(.{
.tag = .pragma_requires_string_literal,
.loc = message_tok.loc,
.extra = .{ .str = "message" },
}, message_expansion_locs);
},
else => |e| return e,
};
const loc = if (message_expansion_locs.len != 0)
message_expansion_locs[message_expansion_locs.len - 1]
else
message_tok.loc;
const extra = Diagnostics.Message.Extra{ .str = try pp.comp.diag.arena.allocator().dupe(u8, str) };
return pp.comp.diag.add(.{ .tag = .pragma_message, .loc = loc, .extra = extra }, &.{});
}

56
deps/aro/pragmas/once.zig vendored Normal file
View File

@ -0,0 +1,56 @@
const std = @import("std");
const mem = std.mem;
const Compilation = @import("../Compilation.zig");
const Pragma = @import("../Pragma.zig");
const Diagnostics = @import("../Diagnostics.zig");
const Preprocessor = @import("../Preprocessor.zig");
const Parser = @import("../Parser.zig");
const TokenIndex = @import("../Tree.zig").TokenIndex;
const Source = @import("../Source.zig");
const Once = @This();
pragma: Pragma = .{
.afterParse = afterParse,
.deinit = deinit,
.preprocessorHandler = preprocessorHandler,
},
pragma_once: std.AutoHashMap(Source.Id, void),
preprocess_count: u32 = 0,
pub fn init(allocator: mem.Allocator) !*Pragma {
var once = try allocator.create(Once);
once.* = .{
.pragma_once = std.AutoHashMap(Source.Id, void).init(allocator),
};
return &once.pragma;
}
fn afterParse(pragma: *Pragma, _: *Compilation) void {
var self = @fieldParentPtr(Once, "pragma", pragma);
self.pragma_once.clearRetainingCapacity();
}
fn deinit(pragma: *Pragma, comp: *Compilation) void {
var self = @fieldParentPtr(Once, "pragma", pragma);
self.pragma_once.deinit();
comp.gpa.destroy(self);
}
fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Pragma.Error!void {
var self = @fieldParentPtr(Once, "pragma", pragma);
const name_tok = pp.tokens.get(start_idx);
const next = pp.tokens.get(start_idx + 1);
if (next.id != .nl) {
try pp.comp.diag.add(.{
.tag = .extra_tokens_directive_end,
.loc = name_tok.loc,
}, next.expansionSlice());
}
const seen = self.preprocess_count == pp.preprocess_count;
const prev = try self.pragma_once.fetchPut(name_tok.loc.id, {});
if (prev != null and !seen) {
return error.StopPreprocessing;
}
self.preprocess_count = pp.preprocess_count;
}

164
deps/aro/pragmas/pack.zig vendored Normal file
View File

@ -0,0 +1,164 @@
const std = @import("std");
const mem = std.mem;
const Compilation = @import("../Compilation.zig");
const Pragma = @import("../Pragma.zig");
const Diagnostics = @import("../Diagnostics.zig");
const Preprocessor = @import("../Preprocessor.zig");
const Parser = @import("../Parser.zig");
const Tree = @import("../Tree.zig");
const TokenIndex = Tree.TokenIndex;
const Pack = @This();
pragma: Pragma = .{
.deinit = deinit,
.parserHandler = parserHandler,
.preserveTokens = preserveTokens,
},
stack: std.ArrayListUnmanaged(struct { label: []const u8, val: u8 }) = .{},
pub fn init(allocator: mem.Allocator) !*Pragma {
var pack = try allocator.create(Pack);
pack.* = .{};
return &pack.pragma;
}
fn deinit(pragma: *Pragma, comp: *Compilation) void {
var self = @fieldParentPtr(Pack, "pragma", pragma);
self.stack.deinit(comp.gpa);
comp.gpa.destroy(self);
}
fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation.Error!void {
var pack = @fieldParentPtr(Pack, "pragma", pragma);
var idx = start_idx + 1;
const l_paren = p.pp.tokens.get(idx);
if (l_paren.id != .l_paren) {
return p.pp.comp.diag.add(.{
.tag = .pragma_pack_lparen,
.loc = l_paren.loc,
}, l_paren.expansionSlice());
}
idx += 1;
// TODO -fapple-pragma-pack -fxl-pragma-pack
const apple_or_xl = false;
const tok_ids = p.pp.tokens.items(.id);
const arg = idx;
switch (tok_ids[arg]) {
.identifier => {
idx += 1;
const Action = enum {
show,
push,
pop,
};
const action = std.meta.stringToEnum(Action, p.tokSlice(arg)) orelse {
return p.errTok(.pragma_pack_unknown_action, arg);
};
switch (action) {
.show => {
try p.errExtra(.pragma_pack_show, arg, .{ .unsigned = p.pragma_pack orelse 8 });
},
.push, .pop => {
var new_val: ?u8 = null;
var label: ?[]const u8 = null;
if (tok_ids[idx] == .comma) {
idx += 1;
const next = idx;
idx += 1;
switch (tok_ids[next]) {
.pp_num => new_val = (try packInt(p, next)) orelse return,
.identifier => {
label = p.tokSlice(next);
if (tok_ids[idx] == .comma) {
idx += 1;
const int = idx;
idx += 1;
if (tok_ids[int] != .pp_num) return p.errTok(.pragma_pack_int_ident, int);
new_val = (try packInt(p, int)) orelse return;
}
},
else => return p.errTok(.pragma_pack_int_ident, next),
}
}
if (action == .push) {
try pack.stack.append(p.pp.comp.gpa, .{ .label = label orelse "", .val = p.pragma_pack orelse 8 });
} else {
pack.pop(p, label);
if (new_val != null) {
try p.errTok(.pragma_pack_undefined_pop, arg);
} else if (pack.stack.items.len == 0) {
try p.errTok(.pragma_pack_empty_stack, arg);
}
}
if (new_val) |some| {
p.pragma_pack = some;
}
},
}
},
.r_paren => if (apple_or_xl) {
pack.pop(p, null);
} else {
p.pragma_pack = null;
},
.pp_num => {
const new_val = (try packInt(p, arg)) orelse return;
idx += 1;
if (apple_or_xl) {
try pack.stack.append(p.pp.comp.gpa, .{ .label = "", .val = p.pragma_pack });
}
p.pragma_pack = new_val;
},
else => {},
}
if (tok_ids[idx] != .r_paren) {
return p.errTok(.pragma_pack_rparen, idx);
}
}
fn packInt(p: *Parser, tok_i: TokenIndex) Compilation.Error!?u8 {
const res = p.parseNumberToken(tok_i) catch |err| switch (err) {
error.ParsingFailed => {
try p.errTok(.pragma_pack_int, tok_i);
return null;
},
else => |e| return e,
};
const int = if (res.val.tag == .int) res.val.getInt(u64) else 99;
switch (int) {
1, 2, 4, 8, 16 => return @intCast(int),
else => {
try p.errTok(.pragma_pack_int, tok_i);
return null;
},
}
}
fn pop(pack: *Pack, p: *Parser, maybe_label: ?[]const u8) void {
if (maybe_label) |label| {
var i = pack.stack.items.len;
while (i > 0) {
i -= 1;
if (std.mem.eql(u8, pack.stack.items[i].label, label)) {
const prev = pack.stack.orderedRemove(i);
p.pragma_pack = prev.val;
return;
}
}
} else {
const prev = pack.stack.popOrNull() orelse {
p.pragma_pack = 2;
return;
};
p.pragma_pack = prev.val;
}
}
fn preserveTokens(_: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) bool {
_ = pp;
_ = start_idx;
return true;
}

669
deps/aro/record_layout.zig vendored Normal file
View File

@ -0,0 +1,669 @@
//! Record layout code adapted from https://github.com/mahkoh/repr-c
//! Licensed under MIT license: https://github.com/mahkoh/repr-c/tree/master/repc/facade
const std = @import("std");
const Type = @import("Type.zig");
const Attribute = @import("Attribute.zig");
const Compilation = @import("Compilation.zig");
const Parser = @import("Parser.zig");
const Record = Type.Record;
const Field = Record.Field;
const TypeLayout = Type.TypeLayout;
const FieldLayout = Type.FieldLayout;
const target_util = @import("target.zig");
const BITS_PER_BYTE = 8;
const OngoingBitfield = struct {
size_bits: u64,
unused_size_bits: u64,
};
const SysVContext = struct {
/// Does the record have an __attribute__((packed)) annotation.
attr_packed: bool,
/// The value of #pragma pack(N) at the type level if any.
max_field_align_bits: ?u64,
/// The alignment of this record.
aligned_bits: u32,
is_union: bool,
/// The size of the record. This might not be a multiple of 8 if the record contains bit-fields.
/// For structs, this is also the offset of the first bit after the last field.
size_bits: u64,
/// non-null if the previous field was a non-zero-sized bit-field. Only used by MinGW.
ongoing_bitfield: ?OngoingBitfield,
comp: *const Compilation,
fn init(ty: Type, comp: *const Compilation, pragma_pack: ?u8) SysVContext {
var pack_value: ?u64 = null;
if (pragma_pack) |pak| {
pack_value = pak * BITS_PER_BYTE;
}
var req_align: u29 = BITS_PER_BYTE;
if (ty.requestedAlignment(comp)) |aln| {
req_align = aln * BITS_PER_BYTE;
}
return SysVContext{
.attr_packed = ty.hasAttribute(.@"packed"),
.max_field_align_bits = pack_value,
.aligned_bits = req_align,
.is_union = ty.is(.@"union"),
.size_bits = 0,
.comp = comp,
.ongoing_bitfield = null,
};
}
fn layoutFields(self: *SysVContext, rec: *const Record) void {
for (rec.fields, 0..) |*fld, fld_indx| {
const type_layout = computeLayout(fld.ty, self.comp);
var field_attrs: ?[]const Attribute = null;
if (rec.field_attributes) |attrs| {
field_attrs = attrs[fld_indx];
}
if (self.comp.target.isMinGW()) {
fld.layout = self.layoutMinGWField(fld, field_attrs, type_layout);
} else {
if (fld.isRegularField()) {
fld.layout = self.layoutRegularField(field_attrs, type_layout);
} else {
fld.layout = self.layoutBitField(field_attrs, type_layout, fld.isNamed(), fld.specifiedBitWidth());
}
}
}
}
/// On MinGW the alignment of the field is calculated in the usual way except that the alignment of
/// the underlying type is ignored in three cases
/// - the field is packed
/// - the field is a bit-field and the previous field was a non-zero-sized bit-field with the same type size
/// - the field is a zero-sized bit-field and the previous field was not a non-zero-sized bit-field
/// See test case 0068.
fn ignoreTypeAlignment(is_attr_packed: bool, bit_width: ?u32, ongoing_bitfield: ?OngoingBitfield, fld_layout: TypeLayout) bool {
if (is_attr_packed) return true;
if (bit_width) |width| {
if (ongoing_bitfield) |ongoing| {
if (ongoing.size_bits == fld_layout.size_bits) return true;
} else {
if (width == 0) return true;
}
}
return false;
}
fn layoutMinGWField(
self: *SysVContext,
field: *const Field,
field_attrs: ?[]const Attribute,
field_layout: TypeLayout,
) FieldLayout {
const annotation_alignment_bits = BITS_PER_BYTE * (Type.annotationAlignment(self.comp, field_attrs) orelse 1);
const is_attr_packed = self.attr_packed or isPacked(field_attrs);
const ignore_type_alignment = ignoreTypeAlignment(is_attr_packed, field.bit_width, self.ongoing_bitfield, field_layout);
var field_alignment_bits: u64 = field_layout.field_alignment_bits;
if (ignore_type_alignment) {
field_alignment_bits = BITS_PER_BYTE;
}
field_alignment_bits = @max(field_alignment_bits, annotation_alignment_bits);
if (self.max_field_align_bits) |bits| {
field_alignment_bits = @min(field_alignment_bits, bits);
}
// The field affects the record alignment in one of three cases
// - the field is a regular field
// - the field is a zero-width bit-field following a non-zero-width bit-field
// - the field is a non-zero-width bit-field and not packed.
// See test case 0069.
const update_record_alignment =
field.isRegularField() or
(field.specifiedBitWidth() == 0 and self.ongoing_bitfield != null) or
(field.specifiedBitWidth() != 0 and !is_attr_packed);
// If a field affects the alignment of a record, the alignment is calculated in the
// usual way except that __attribute__((packed)) is ignored on a zero-width bit-field.
// See test case 0068.
if (update_record_alignment) {
var ty_alignment_bits = field_layout.field_alignment_bits;
if (is_attr_packed and (field.isRegularField() or field.specifiedBitWidth() != 0)) {
ty_alignment_bits = BITS_PER_BYTE;
}
ty_alignment_bits = @max(ty_alignment_bits, annotation_alignment_bits);
if (self.max_field_align_bits) |bits| {
ty_alignment_bits = @intCast(@min(ty_alignment_bits, bits));
}
self.aligned_bits = @max(self.aligned_bits, ty_alignment_bits);
}
// NOTE: ty_alignment_bits and field_alignment_bits are different in the following case:
// Y = { size: 64, alignment: 64 }struct {
// { offset: 0, size: 1 }c { size: 8, alignment: 8 }char:1,
// @attr_packed _ { size: 64, alignment: 64 }long long:0,
// { offset: 8, size: 8 }d { size: 8, alignment: 8 }char,
// }
if (field.isRegularField()) {
return self.layoutRegularFieldMinGW(field_layout.size_bits, field_alignment_bits);
} else {
return self.layoutBitFieldMinGW(field_layout.size_bits, field_alignment_bits, field.isNamed(), field.specifiedBitWidth());
}
}
fn layoutBitFieldMinGW(
self: *SysVContext,
ty_size_bits: u64,
field_alignment_bits: u64,
is_named: bool,
width: u64,
) FieldLayout {
std.debug.assert(width <= ty_size_bits); // validated in parser
// In a union, the size of the underlying type does not affect the size of the union.
// See test case 0070.
if (self.is_union) {
self.size_bits = @max(self.size_bits, width);
if (!is_named) return .{};
return .{
.offset_bits = 0,
.size_bits = width,
};
}
if (width == 0) {
self.ongoing_bitfield = null;
} else {
// If there is an ongoing bit-field in a struct whose underlying type has the same size and
// if there is enough space left to place this bit-field, then this bit-field is placed in
// the ongoing bit-field and the size of the struct is not affected by this
// bit-field. See test case 0037.
if (self.ongoing_bitfield) |*ongoing| {
if (ongoing.size_bits == ty_size_bits and ongoing.unused_size_bits >= width) {
const offset_bits = self.size_bits - ongoing.unused_size_bits;
ongoing.unused_size_bits -= width;
if (!is_named) return .{};
return .{
.offset_bits = offset_bits,
.size_bits = width,
};
}
}
// Otherwise this field is part of a new ongoing bit-field.
self.ongoing_bitfield = .{
.size_bits = ty_size_bits,
.unused_size_bits = ty_size_bits - width,
};
}
const offset_bits = std.mem.alignForward(u64, self.size_bits, field_alignment_bits);
self.size_bits = if (width == 0) offset_bits else offset_bits + ty_size_bits;
if (!is_named) return .{};
return .{
.offset_bits = offset_bits,
.size_bits = width,
};
}
fn layoutRegularFieldMinGW(
self: *SysVContext,
ty_size_bits: u64,
field_alignment_bits: u64,
) FieldLayout {
self.ongoing_bitfield = null;
// A struct field starts at the next offset in the struct that is properly
// aligned with respect to the start of the struct. See test case 0033.
// A union field always starts at offset 0.
const offset_bits = if (self.is_union) 0 else std.mem.alignForward(u64, self.size_bits, field_alignment_bits);
// Set the size of the record to the maximum of the current size and the end of
// the field. See test case 0034.
self.size_bits = @max(self.size_bits, offset_bits + ty_size_bits);
return .{
.offset_bits = offset_bits,
.size_bits = ty_size_bits,
};
}
fn layoutRegularField(
self: *SysVContext,
fld_attrs: ?[]const Attribute,
fld_layout: TypeLayout,
) FieldLayout {
var fld_align_bits = fld_layout.field_alignment_bits;
// If the struct or the field is packed, then the alignment of the underlying type is
// ignored. See test case 0084.
if (self.attr_packed or isPacked(fld_attrs)) {
fld_align_bits = BITS_PER_BYTE;
}
// The field alignment can be increased by __attribute__((aligned)) annotations on the
// field. See test case 0085.
if (Type.annotationAlignment(self.comp, fld_attrs)) |anno| {
fld_align_bits = @max(fld_align_bits, anno * BITS_PER_BYTE);
}
// #pragma pack takes precedence over all other attributes. See test cases 0084 and
// 0085.
if (self.max_field_align_bits) |req_bits| {
fld_align_bits = @intCast(@min(fld_align_bits, req_bits));
}
// A struct field starts at the next offset in the struct that is properly
// aligned with respect to the start of the struct.
const offset_bits = if (self.is_union) 0 else std.mem.alignForward(u64, self.size_bits, fld_align_bits);
const size_bits = fld_layout.size_bits;
// The alignment of a record is the maximum of its field alignments. See test cases
// 0084, 0085, 0086.
self.size_bits = @max(self.size_bits, offset_bits + size_bits);
self.aligned_bits = @max(self.aligned_bits, fld_align_bits);
return .{
.offset_bits = offset_bits,
.size_bits = size_bits,
};
}
fn layoutBitField(
self: *SysVContext,
fld_attrs: ?[]const Attribute,
fld_layout: TypeLayout,
is_named: bool,
bit_width: u64,
) FieldLayout {
const ty_size_bits = fld_layout.size_bits;
var ty_fld_algn_bits: u32 = fld_layout.field_alignment_bits;
if (bit_width > 0) {
std.debug.assert(bit_width <= ty_size_bits); // Checked in parser
// Some targets ignore the alignment of the underlying type when laying out
// non-zero-sized bit-fields. See test case 0072. On such targets, bit-fields never
// cross a storage boundary. See test case 0081.
if (target_util.ignoreNonZeroSizedBitfieldTypeAlignment(self.comp.target)) {
ty_fld_algn_bits = 1;
}
} else {
// Some targets ignore the alignment of the underlying type when laying out
// zero-sized bit-fields. See test case 0073.
if (target_util.ignoreZeroSizedBitfieldTypeAlignment(self.comp.target)) {
ty_fld_algn_bits = 1;
}
// Some targets have a minimum alignment of zero-sized bit-fields. See test case
// 0074.
if (target_util.minZeroWidthBitfieldAlignment(self.comp.target)) |target_align| {
ty_fld_algn_bits = @max(ty_fld_algn_bits, target_align);
}
}
// __attribute__((packed)) on the record is identical to __attribute__((packed)) on each
// field. See test case 0067.
const attr_packed = self.attr_packed or isPacked(fld_attrs);
const has_packing_annotation = attr_packed or self.max_field_align_bits != null;
const annotation_alignment: u32 = if (Type.annotationAlignment(self.comp, fld_attrs)) |anno| anno * BITS_PER_BYTE else 1;
const first_unused_bit: u64 = if (self.is_union) 0 else self.size_bits;
var field_align_bits: u64 = 1;
if (bit_width == 0) {
field_align_bits = @max(ty_fld_algn_bits, annotation_alignment);
} else if (self.comp.langopts.emulate == .gcc) {
// On GCC, the field alignment is at least the alignment requested by annotations
// except as restricted by #pragma pack. See test case 0083.
field_align_bits = annotation_alignment;
if (self.max_field_align_bits) |max_bits| {
field_align_bits = @min(annotation_alignment, max_bits);
}
// On GCC, if there are no packing annotations and
// - the field would otherwise start at an offset such that it would cross a
// storage boundary or
// - the alignment of the type is larger than its size,
// then it is aligned to the type's field alignment. See test case 0083.
if (!has_packing_annotation) {
const start_bit = std.mem.alignForward(u64, first_unused_bit, field_align_bits);
const does_field_cross_boundary = start_bit % ty_fld_algn_bits + bit_width > ty_size_bits;
if (ty_fld_algn_bits > ty_size_bits or does_field_cross_boundary) {
field_align_bits = @max(field_align_bits, ty_fld_algn_bits);
}
}
} else {
std.debug.assert(self.comp.langopts.emulate == .clang);
// On Clang, the alignment requested by annotations is not respected if it is
// larger than the value of #pragma pack. See test case 0083.
if (annotation_alignment <= self.max_field_align_bits orelse std.math.maxInt(u29)) {
field_align_bits = @max(field_align_bits, annotation_alignment);
}
// On Clang, if there are no packing annotations and the field would cross a
// storage boundary if it were positioned at the first unused bit in the record,
// it is aligned to the type's field alignment. See test case 0083.
if (!has_packing_annotation) {
const does_field_cross_boundary = first_unused_bit % ty_fld_algn_bits + bit_width > ty_size_bits;
if (does_field_cross_boundary)
field_align_bits = @max(field_align_bits, ty_fld_algn_bits);
}
}
const offset_bits = std.mem.alignForward(u64, first_unused_bit, field_align_bits);
self.size_bits = @max(self.size_bits, offset_bits + bit_width);
// Unnamed fields do not contribute to the record alignment except on a few targets.
// See test case 0079.
if (is_named or target_util.unnamedFieldAffectsAlignment(self.comp.target)) {
var inherited_align_bits: u32 = undefined;
if (bit_width == 0) {
// If the width is 0, #pragma pack and __attribute__((packed)) are ignored.
// See test case 0075.
inherited_align_bits = @max(ty_fld_algn_bits, annotation_alignment);
} else if (self.max_field_align_bits) |max_align_bits| {
// Otherwise, if a #pragma pack is in effect, __attribute__((packed)) on the field or
// record is ignored. See test case 0076.
inherited_align_bits = @max(ty_fld_algn_bits, annotation_alignment);
inherited_align_bits = @intCast(@min(inherited_align_bits, max_align_bits));
} else if (attr_packed) {
// Otherwise, if the field or the record is packed, the field alignment is 1 bit unless
// it is explicitly increased with __attribute__((aligned)). See test case 0077.
inherited_align_bits = annotation_alignment;
} else {
// Otherwise, the field alignment is the field alignment of the underlying type unless
// it is explicitly increased with __attribute__((aligned)). See test case 0078.
inherited_align_bits = @max(ty_fld_algn_bits, annotation_alignment);
}
self.aligned_bits = @max(self.aligned_bits, inherited_align_bits);
}
if (!is_named) return .{};
return .{
.size_bits = bit_width,
.offset_bits = offset_bits,
};
}
};
const MsvcContext = struct {
req_align_bits: u32,
max_field_align_bits: ?u32,
/// The alignment of pointers that point to an object of this type. This is greater than or equal
/// to the required alignment. Once all fields have been laid out, the size of the record will be
/// rounded up to this value.
pointer_align_bits: u32,
/// The alignment of this type when it is used as a record field. This is greater than or equal to
/// the pointer alignment.
field_align_bits: u32,
size_bits: u64,
ongoing_bitfield: ?OngoingBitfield,
contains_non_bitfield: bool,
is_union: bool,
comp: *const Compilation,
fn init(ty: Type, comp: *const Compilation, pragma_pack: ?u8) MsvcContext {
var pack_value: ?u32 = null;
if (ty.hasAttribute(.@"packed")) {
// __attribute__((packed)) behaves like #pragma pack(1) in clang. See test case 0056.
pack_value = BITS_PER_BYTE;
}
if (pack_value == null) {
if (pragma_pack) |pack| {
pack_value = pack * BITS_PER_BYTE;
}
}
if (pack_value) |pack| {
pack_value = msvcPragmaPack(comp, pack);
}
// The required alignment can be increased by adding a __declspec(align)
// annotation. See test case 0023.
var must_align: u29 = BITS_PER_BYTE;
if (ty.requestedAlignment(comp)) |req_align| {
must_align = req_align * BITS_PER_BYTE;
}
return MsvcContext{
.req_align_bits = must_align,
.pointer_align_bits = must_align,
.field_align_bits = must_align,
.size_bits = 0,
.max_field_align_bits = pack_value,
.ongoing_bitfield = null,
.contains_non_bitfield = false,
.is_union = ty.is(.@"union"),
.comp = comp,
};
}
fn layoutField(self: *MsvcContext, fld: *const Field, fld_attrs: ?[]const Attribute) FieldLayout {
const type_layout = computeLayout(fld.ty, self.comp);
// The required alignment of the field is the maximum of the required alignment of the
// underlying type and the __declspec(align) annotation on the field itself.
// See test case 0028.
var req_align = type_layout.required_alignment_bits;
if (Type.annotationAlignment(self.comp, fld_attrs)) |anno| {
req_align = @max(anno * BITS_PER_BYTE, req_align);
}
// The required alignment of a record is the maximum of the required alignments of its
// fields except that the required alignment of bitfields is ignored.
// See test case 0029.
if (fld.isRegularField()) {
self.req_align_bits = @max(self.req_align_bits, req_align);
}
// The offset of the field is based on the field alignment of the underlying type.
// See test case 0027.
var fld_align_bits = type_layout.field_alignment_bits;
if (self.max_field_align_bits) |max_align| {
fld_align_bits = @min(fld_align_bits, max_align);
}
// check the requested alignment of the field type.
if (fld.ty.requestedAlignment(self.comp)) |type_req_align| {
fld_align_bits = @max(fld_align_bits, type_req_align * 8);
}
if (isPacked(fld_attrs)) {
// __attribute__((packed)) on a field is a clang extension. It behaves as if #pragma
// pack(1) had been applied only to this field. See test case 0057.
fld_align_bits = BITS_PER_BYTE;
}
// __attribute__((packed)) on a field is a clang extension. It behaves as if #pragma
// pack(1) had been applied only to this field. See test case 0057.
fld_align_bits = @max(fld_align_bits, req_align);
if (fld.isRegularField()) {
return self.layoutRegularField(type_layout.size_bits, fld_align_bits);
} else {
return self.layoutBitField(type_layout.size_bits, fld_align_bits, fld.specifiedBitWidth());
}
}
fn layoutBitField(self: *MsvcContext, ty_size_bits: u64, field_align: u32, bit_width: u32) FieldLayout {
if (bit_width == 0) {
// A zero-sized bit-field that does not follow a non-zero-sized bit-field does not affect
// the overall layout of the record. Even in a union where the order would otherwise
// not matter. See test case 0035.
if (self.ongoing_bitfield) |_| {
self.ongoing_bitfield = null;
} else {
// this field takes 0 space.
return .{ .offset_bits = self.size_bits, .size_bits = bit_width };
}
} else {
std.debug.assert(bit_width <= ty_size_bits);
// If there is an ongoing bit-field in a struct whose underlying type has the same size and
// if there is enough space left to place this bit-field, then this bit-field is placed in
// the ongoing bit-field and the overall layout of the struct is not affected by this
// bit-field. See test case 0037.
if (!self.is_union) {
if (self.ongoing_bitfield) |*p| {
if (p.size_bits == ty_size_bits and p.unused_size_bits >= bit_width) {
const offset_bits = self.size_bits - p.unused_size_bits;
p.unused_size_bits -= bit_width;
return .{ .offset_bits = offset_bits, .size_bits = bit_width };
}
}
}
// Otherwise this field is part of a new ongoing bit-field.
self.ongoing_bitfield = .{ .size_bits = ty_size_bits, .unused_size_bits = ty_size_bits - bit_width };
}
const offset_bits = if (!self.is_union) bits: {
// This is the one place in the layout of a record where the pointer alignment might
// get assigned a smaller value than the field alignment. This can only happen if
// the field or the type of the field has a required alignment. Otherwise the value
// of field_alignment_bits is already bound by max_field_alignment_bits.
// See test case 0038.
const p_align = if (self.max_field_align_bits) |max_fld_align|
@min(max_fld_align, field_align)
else
field_align;
self.pointer_align_bits = @max(self.pointer_align_bits, p_align);
self.field_align_bits = @max(self.field_align_bits, field_align);
const offset_bits = std.mem.alignForward(u64, self.size_bits, field_align);
self.size_bits = if (bit_width == 0) offset_bits else offset_bits + ty_size_bits;
break :bits offset_bits;
} else bits: {
// Bit-fields do not affect the alignment of a union. See test case 0041.
self.size_bits = @max(self.size_bits, ty_size_bits);
break :bits 0;
};
return .{ .offset_bits = offset_bits, .size_bits = bit_width };
}
fn layoutRegularField(self: *MsvcContext, size_bits: u64, field_align: u32) FieldLayout {
self.contains_non_bitfield = true;
self.ongoing_bitfield = null;
// The alignment of the field affects both the pointer alignment and the field
// alignment of the record. See test case 0032.
self.pointer_align_bits = @max(self.pointer_align_bits, field_align);
self.field_align_bits = @max(self.field_align_bits, field_align);
const offset_bits = switch (self.is_union) {
true => 0,
false => std.mem.alignForward(u64, self.size_bits, field_align),
};
self.size_bits = @max(self.size_bits, offset_bits + size_bits);
return .{ .offset_bits = offset_bits, .size_bits = size_bits };
}
fn handleZeroSizedRecord(self: *MsvcContext) void {
if (self.is_union) {
// MSVC does not allow unions without fields.
// If all fields in a union have size 0, the size of the union is set to
// - its field alignment if it contains at least one non-bitfield
// - 4 bytes if it contains only bitfields
// See test case 0025.
if (self.contains_non_bitfield) {
self.size_bits = self.field_align_bits;
} else {
self.size_bits = 4 * BITS_PER_BYTE;
}
} else {
// If all fields in a struct have size 0, its size is set to its required alignment
// but at least to 4 bytes. See test case 0026.
self.size_bits = @max(self.req_align_bits, 4 * BITS_PER_BYTE);
self.pointer_align_bits = @intCast(@min(self.pointer_align_bits, self.size_bits));
}
}
};
pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pack: ?u8) void {
switch (comp.langopts.emulate) {
.gcc, .clang => {
var context = SysVContext.init(ty, comp, pragma_pack);
context.layoutFields(rec);
context.size_bits = std.mem.alignForward(u64, context.size_bits, context.aligned_bits);
rec.type_layout = .{
.size_bits = context.size_bits,
.field_alignment_bits = context.aligned_bits,
.pointer_alignment_bits = context.aligned_bits,
.required_alignment_bits = BITS_PER_BYTE,
};
},
.msvc => {
var context = MsvcContext.init(ty, comp, pragma_pack);
for (rec.fields, 0..) |*fld, fld_indx| {
var field_attrs: ?[]const Attribute = null;
if (rec.field_attributes) |attrs| {
field_attrs = attrs[fld_indx];
}
fld.layout = context.layoutField(fld, field_attrs);
}
if (context.size_bits == 0) {
// As an extension, MSVC allows records that only contain zero-sized bitfields and empty
// arrays. Such records would be zero-sized but this case is handled here separately to
// ensure that there are no zero-sized records.
context.handleZeroSizedRecord();
}
context.size_bits = std.mem.alignForward(u64, context.size_bits, context.pointer_align_bits);
rec.type_layout = .{
.size_bits = context.size_bits,
.field_alignment_bits = context.field_align_bits,
.pointer_alignment_bits = context.pointer_align_bits,
.required_alignment_bits = context.req_align_bits,
};
},
}
}
fn computeLayout(ty: Type, comp: *const Compilation) TypeLayout {
if (ty.getRecord()) |rec| {
const requested = BITS_PER_BYTE * (ty.requestedAlignment(comp) orelse 0);
return .{
.size_bits = rec.type_layout.size_bits,
.pointer_alignment_bits = @max(requested, rec.type_layout.pointer_alignment_bits),
.field_alignment_bits = @max(requested, rec.type_layout.field_alignment_bits),
.required_alignment_bits = rec.type_layout.required_alignment_bits,
};
} else {
const type_align = ty.alignof(comp) * BITS_PER_BYTE;
return .{
.size_bits = ty.bitSizeof(comp) orelse 0,
.pointer_alignment_bits = type_align,
.field_alignment_bits = type_align,
.required_alignment_bits = BITS_PER_BYTE,
};
}
}
fn isPacked(attrs: ?[]const Attribute) bool {
const a = attrs orelse return false;
for (a) |attribute| {
if (attribute.tag != .@"packed") continue;
return true;
}
return false;
}
// The effect of #pragma pack(N) depends on the target.
//
// x86: By default, there is no maximum field alignment. N={1,2,4} set the maximum field
// alignment to that value. All other N activate the default.
// x64: By default, there is no maximum field alignment. N={1,2,4,8} set the maximum field
// alignment to that value. All other N activate the default.
// arm: By default, the maximum field alignment is 8. N={1,2,4,8,16} set the maximum field
// alignment to that value. All other N activate the default.
// arm64: By default, the maximum field alignment is 8. N={1,2,4,8} set the maximum field
// alignment to that value. N=16 disables the maximum field alignment. All other N
// activate the default.
//
// See test case 0020.
pub fn msvcPragmaPack(comp: *const Compilation, pack: u32) ?u32 {
return switch (pack) {
8, 16, 32 => pack,
64 => if (comp.target.cpu.arch == .x86) null else pack,
128 => if (comp.target.cpu.arch == .thumb) pack else null,
else => {
return switch (comp.target.cpu.arch) {
.thumb, .aarch64 => 64,
else => null,
};
},
};
}

810
deps/aro/target.zig vendored Normal file
View File

@ -0,0 +1,810 @@
const std = @import("std");
const LangOpts = @import("LangOpts.zig");
const Type = @import("Type.zig");
const llvm = @import("zig").codegen.llvm;
const TargetSet = @import("builtins/Properties.zig").TargetSet;
/// intmax_t for this target
pub fn intMaxType(target: std.Target) Type {
switch (target.cpu.arch) {
.aarch64,
.aarch64_be,
.sparc64,
=> if (target.os.tag != .openbsd) return .{ .specifier = .long },
.bpfel,
.bpfeb,
.loongarch64,
.riscv64,
.powerpc64,
.powerpc64le,
.tce,
.tcele,
.ve,
=> return .{ .specifier = .long },
.x86_64 => switch (target.os.tag) {
.windows, .openbsd => {},
else => switch (target.abi) {
.gnux32, .muslx32 => {},
else => return .{ .specifier = .long },
},
},
else => {},
}
return .{ .specifier = .long_long };
}
/// intptr_t for this target
pub fn intPtrType(target: std.Target) Type {
switch (target.os.tag) {
.haiku => return .{ .specifier = .long },
.nacl => return .{ .specifier = .int },
else => {},
}
switch (target.cpu.arch) {
.aarch64, .aarch64_be => switch (target.os.tag) {
.windows => return .{ .specifier = .long_long },
else => {},
},
.msp430,
.csky,
.loongarch32,
.riscv32,
.xcore,
.hexagon,
.tce,
.tcele,
.m68k,
.spir,
.spirv32,
.arc,
.avr,
=> return .{ .specifier = .int },
.sparc, .sparcel => switch (target.os.tag) {
.netbsd, .openbsd => {},
else => return .{ .specifier = .int },
},
.powerpc, .powerpcle => switch (target.os.tag) {
.linux, .freebsd, .netbsd => return .{ .specifier = .int },
else => {},
},
// 32-bit x86 Darwin, OpenBSD, and RTEMS use long (the default); others use int
.x86 => switch (target.os.tag) {
.openbsd, .rtems => {},
else => if (!target.os.tag.isDarwin()) return .{ .specifier = .int },
},
.x86_64 => switch (target.os.tag) {
.windows => return .{ .specifier = .long_long },
else => switch (target.abi) {
.gnux32, .muslx32 => return .{ .specifier = .int },
else => {},
},
},
else => {},
}
return .{ .specifier = .long };
}
/// int16_t for this target
pub fn int16Type(target: std.Target) Type {
return switch (target.cpu.arch) {
.avr => .{ .specifier = .int },
else => .{ .specifier = .short },
};
}
/// int64_t for this target
pub fn int64Type(target: std.Target) Type {
switch (target.cpu.arch) {
.loongarch64,
.ve,
.riscv64,
.powerpc64,
.powerpc64le,
.bpfel,
.bpfeb,
=> return .{ .specifier = .long },
.sparc64 => return intMaxType(target),
.x86, .x86_64 => if (!target.isDarwin()) return intMaxType(target),
.aarch64, .aarch64_be => if (!target.isDarwin() and target.os.tag != .openbsd and target.os.tag != .windows) return .{ .specifier = .long },
else => {},
}
return .{ .specifier = .long_long };
}
/// This function returns 1 if function alignment is not observable or settable.
pub fn defaultFunctionAlignment(target: std.Target) u8 {
return switch (target.cpu.arch) {
.arm, .armeb => 4,
.aarch64, .aarch64_32, .aarch64_be => 4,
.sparc, .sparcel, .sparc64 => 4,
.riscv64 => 2,
else => 1,
};
}
pub fn isTlsSupported(target: std.Target) bool {
if (target.isDarwin()) {
var supported = false;
switch (target.os.tag) {
.macos => supported = !(target.os.isAtLeast(.macos, .{ .major = 10, .minor = 7, .patch = 0 }) orelse false),
else => {},
}
return supported;
}
return switch (target.cpu.arch) {
.tce, .tcele, .bpfel, .bpfeb, .msp430, .nvptx, .nvptx64, .x86, .arm, .armeb, .thumb, .thumbeb => false,
else => true,
};
}
pub fn ignoreNonZeroSizedBitfieldTypeAlignment(target: std.Target) bool {
switch (target.cpu.arch) {
.avr => return true,
.arm => {
if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
switch (target.os.tag) {
.ios => return true,
else => return false,
}
}
},
else => return false,
}
return false;
}
pub fn ignoreZeroSizedBitfieldTypeAlignment(target: std.Target) bool {
switch (target.cpu.arch) {
.avr => return true,
else => return false,
}
}
pub fn minZeroWidthBitfieldAlignment(target: std.Target) ?u29 {
switch (target.cpu.arch) {
.avr => return 8,
.arm => {
if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
switch (target.os.tag) {
.ios => return 32,
else => return null,
}
} else return null;
},
else => return null,
}
}
pub fn unnamedFieldAffectsAlignment(target: std.Target) bool {
switch (target.cpu.arch) {
.aarch64 => {
if (target.isDarwin() or target.os.tag == .windows) return false;
return true;
},
.armeb => {
if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
if (std.Target.Abi.default(target.cpu.arch, target.os) == .eabi) return true;
}
},
.arm => return true,
.avr => return true,
.thumb => {
if (target.os.tag == .windows) return false;
return true;
},
else => return false,
}
return false;
}
pub fn packAllEnums(target: std.Target) bool {
return switch (target.cpu.arch) {
.hexagon => true,
else => false,
};
}
/// Default alignment (in bytes) for __attribute__((aligned)) when no alignment is specified
pub fn defaultAlignment(target: std.Target) u29 {
switch (target.cpu.arch) {
.avr => return 1,
.arm => if (target.isAndroid() or target.os.tag == .ios) return 16 else return 8,
.sparc => if (std.Target.sparc.featureSetHas(target.cpu.features, .v9)) return 16 else return 8,
.mips, .mipsel => switch (target.abi) {
.none, .gnuabi64 => return 16,
else => return 8,
},
.s390x, .armeb, .thumbeb, .thumb => return 8,
else => return 16,
}
}
pub fn systemCompiler(target: std.Target) LangOpts.Compiler {
// Android is linux but not gcc, so these checks go first
// the rest for documentation as fn returns .clang
if (target.isDarwin() or
target.isAndroid() or
target.isBSD() or
target.os.tag == .fuchsia or
target.os.tag == .solaris or
target.os.tag == .haiku or
target.cpu.arch == .hexagon)
{
return .clang;
}
if (target.os.tag == .uefi) return .msvc;
// this is before windows to grab WindowsGnu
if (target.abi.isGnu() or
target.os.tag == .linux)
{
return .gcc;
}
if (target.os.tag == .windows) {
return .msvc;
}
if (target.cpu.arch == .avr) return .gcc;
return .clang;
}
pub fn hasInt128(target: std.Target) bool {
if (target.cpu.arch == .wasm32) return true;
if (target.cpu.arch == .x86_64) return true;
return target.ptrBitWidth() >= 64;
}
pub fn hasHalfPrecisionFloatABI(target: std.Target) bool {
return switch (target.cpu.arch) {
.thumb, .thumbeb, .arm, .aarch64 => true,
else => false,
};
}
pub const FPSemantics = enum {
None,
IEEEHalf,
BFloat,
IEEESingle,
IEEEDouble,
IEEEQuad,
/// Minifloat 5-bit exponent 2-bit mantissa
E5M2,
/// Minifloat 4-bit exponent 3-bit mantissa
E4M3,
x87ExtendedDouble,
IBMExtendedDouble,
/// Only intended for generating float.h macros for the preprocessor
pub fn forType(ty: std.Target.CType, target: std.Target) FPSemantics {
std.debug.assert(ty == .float or ty == .double or ty == .longdouble);
return switch (target.c_type_bit_size(ty)) {
32 => .IEEESingle,
64 => .IEEEDouble,
80 => .x87ExtendedDouble,
128 => switch (target.cpu.arch) {
.powerpc, .powerpcle, .powerpc64, .powerpc64le => .IBMExtendedDouble,
else => .IEEEQuad,
},
else => unreachable,
};
}
pub fn halfPrecisionType(target: std.Target) ?FPSemantics {
switch (target.cpu.arch) {
.aarch64,
.aarch64_32,
.aarch64_be,
.arm,
.armeb,
.hexagon,
.riscv32,
.riscv64,
.spirv32,
.spirv64,
=> return .IEEEHalf,
.x86, .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .sse2)) return .IEEEHalf,
else => {},
}
return null;
}
pub fn chooseValue(self: FPSemantics, comptime T: type, values: [6]T) T {
return switch (self) {
.IEEEHalf => values[0],
.IEEESingle => values[1],
.IEEEDouble => values[2],
.x87ExtendedDouble => values[3],
.IBMExtendedDouble => values[4],
.IEEEQuad => values[5],
else => unreachable,
};
}
};
pub fn isLP64(target: std.Target) bool {
return target.c_type_bit_size(.int) == 32 and target.ptrBitWidth() == 64;
}
pub fn isKnownWindowsMSVCEnvironment(target: std.Target) bool {
return target.os.tag == .windows and target.abi == .msvc;
}
pub fn isWindowsMSVCEnvironment(target: std.Target) bool {
return target.os.tag == .windows and (target.abi == .msvc or target.abi == .none);
}
pub fn isCygwinMinGW(target: std.Target) bool {
return target.os.tag == .windows and (target.abi == .gnu or target.abi == .cygnus);
}
pub fn builtinEnabled(target: std.Target, enabled_for: TargetSet) bool {
var copy = enabled_for;
var it = copy.iterator();
while (it.next()) |val| {
switch (val) {
.basic => return true,
.x86_64 => if (target.cpu.arch == .x86_64) return true,
.aarch64 => if (target.cpu.arch == .aarch64) return true,
.arm => if (target.cpu.arch == .arm) return true,
.ppc => switch (target.cpu.arch) {
.powerpc, .powerpc64, .powerpc64le => return true,
else => {},
},
else => {
// Todo: handle other target predicates
},
}
}
return false;
}
pub fn defaultFpEvalMethod(target: std.Target) LangOpts.FPEvalMethod {
if (target.os.tag == .aix) return .double;
switch (target.cpu.arch) {
.x86, .x86_64 => {
if (target.ptrBitWidth() == 32 and target.os.tag == .netbsd) {
if (target.os.version_range.semver.min.order(.{ .major = 6, .minor = 99, .patch = 26 }) != .gt) {
// NETBSD <= 6.99.26 on 32-bit x86 defaults to double
return .double;
}
}
if (std.Target.x86.featureSetHas(target.cpu.features, .sse)) {
return .source;
}
return .extended;
},
else => {},
}
return .source;
}
/// Value of the `-m` flag for `ld` for this target
pub fn ldEmulationOption(target: std.Target, arm_endianness: ?std.builtin.Endian) ?[]const u8 {
return switch (target.cpu.arch) {
.x86 => if (target.os.tag == .elfiamcu) "elf_iamcu" else "elf_i386",
.arm,
.armeb,
.thumb,
.thumbeb,
=> switch (arm_endianness orelse target.cpu.arch.endian()) {
.Little => "armelf_linux_eabi",
.Big => "armelfb_linux_eabi",
},
.aarch64 => "aarch64linux",
.aarch64_be => "aarch64linuxb",
.m68k => "m68kelf",
.powerpc => if (target.os.tag == .linux) "elf32ppclinux" else "elf32ppc",
.powerpcle => if (target.os.tag == .linux) "elf32lppclinux" else "elf32lppc",
.powerpc64 => "elf64ppc",
.powerpc64le => "elf64lppc",
.riscv32 => "elf32lriscv",
.riscv64 => "elf64lriscv",
.sparc, .sparcel => "elf32_sparc",
.sparc64 => "elf64_sparc",
.loongarch32 => "elf32loongarch",
.loongarch64 => "elf64loongarch",
.mips => "elf32btsmip",
.mipsel => "elf32ltsmip",
.mips64 => if (target.abi == .gnuabin32) "elf32btsmipn32" else "elf64btsmip",
.mips64el => if (target.abi == .gnuabin32) "elf32ltsmipn32" else "elf64ltsmip",
.x86_64 => if (target.abi == .gnux32 or target.abi == .muslx32) "elf32_x86_64" else "elf_x86_64",
.ve => "elf64ve",
.csky => "cskyelf_linux",
else => null,
};
}
pub fn get32BitArchVariant(target: std.Target) ?std.Target {
var copy = target;
switch (target.cpu.arch) {
.amdgcn,
.avr,
.msp430,
.spu_2,
.ve,
.bpfel,
.bpfeb,
.s390x,
=> return null,
.arc,
.arm,
.armeb,
.csky,
.hexagon,
.m68k,
.le32,
.mips,
.mipsel,
.powerpc,
.powerpcle,
.r600,
.riscv32,
.sparc,
.sparcel,
.tce,
.tcele,
.thumb,
.thumbeb,
.x86,
.xcore,
.nvptx,
.amdil,
.hsail,
.spir,
.kalimba,
.shave,
.lanai,
.wasm32,
.renderscript32,
.aarch64_32,
.spirv32,
.loongarch32,
.dxil,
.xtensa,
=> {}, // Already 32 bit
.aarch64 => copy.cpu.arch = .arm,
.aarch64_be => copy.cpu.arch = .armeb,
.le64 => copy.cpu.arch = .le32,
.amdil64 => copy.cpu.arch = .amdil,
.nvptx64 => copy.cpu.arch = .nvptx,
.wasm64 => copy.cpu.arch = .wasm32,
.hsail64 => copy.cpu.arch = .hsail,
.spir64 => copy.cpu.arch = .spir,
.spirv64 => copy.cpu.arch = .spirv32,
.renderscript64 => copy.cpu.arch = .renderscript32,
.loongarch64 => copy.cpu.arch = .loongarch32,
.mips64 => copy.cpu.arch = .mips,
.mips64el => copy.cpu.arch = .mipsel,
.powerpc64 => copy.cpu.arch = .powerpc,
.powerpc64le => copy.cpu.arch = .powerpcle,
.riscv64 => copy.cpu.arch = .riscv32,
.sparc64 => copy.cpu.arch = .sparc,
.x86_64 => copy.cpu.arch = .x86,
}
return copy;
}
pub fn get64BitArchVariant(target: std.Target) ?std.Target {
var copy = target;
switch (target.cpu.arch) {
.arc,
.avr,
.csky,
.dxil,
.hexagon,
.kalimba,
.lanai,
.m68k,
.msp430,
.r600,
.shave,
.sparcel,
.spu_2,
.tce,
.tcele,
.xcore,
.xtensa,
=> return null,
.aarch64,
.aarch64_be,
.amdgcn,
.bpfeb,
.bpfel,
.le64,
.amdil64,
.nvptx64,
.wasm64,
.hsail64,
.spir64,
.spirv64,
.renderscript64,
.loongarch64,
.mips64,
.mips64el,
.powerpc64,
.powerpc64le,
.riscv64,
.s390x,
.sparc64,
.ve,
.x86_64,
=> {}, // Already 64 bit
.aarch64_32 => copy.cpu.arch = .aarch64,
.amdil => copy.cpu.arch = .amdil64,
.arm => copy.cpu.arch = .aarch64,
.armeb => copy.cpu.arch = .aarch64_be,
.hsail => copy.cpu.arch = .hsail64,
.le32 => copy.cpu.arch = .le64,
.loongarch32 => copy.cpu.arch = .loongarch64,
.mips => copy.cpu.arch = .mips64,
.mipsel => copy.cpu.arch = .mips64el,
.nvptx => copy.cpu.arch = .nvptx64,
.powerpc => copy.cpu.arch = .powerpc64,
.powerpcle => copy.cpu.arch = .powerpc64le,
.renderscript32 => copy.cpu.arch = .renderscript64,
.riscv32 => copy.cpu.arch = .riscv64,
.sparc => copy.cpu.arch = .sparc64,
.spir => copy.cpu.arch = .spir64,
.spirv32 => copy.cpu.arch = .spirv64,
.thumb => copy.cpu.arch = .aarch64,
.thumbeb => copy.cpu.arch = .aarch64_be,
.wasm32 => copy.cpu.arch = .wasm64,
.x86 => copy.cpu.arch = .x86_64,
}
return copy;
}
/// Adapted from Zig's src/codegen/llvm.zig
pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
// 64 bytes is assumed to be large enough to hold any target triple; increase if necessary
std.debug.assert(buf.len >= 64);
var stream = std.io.fixedBufferStream(buf);
const writer = stream.writer();
const llvm_arch = switch (target.cpu.arch) {
.arm => "arm",
.armeb => "armeb",
.aarch64 => "aarch64",
.aarch64_be => "aarch64_be",
.aarch64_32 => "aarch64_32",
.arc => "arc",
.avr => "avr",
.bpfel => "bpfel",
.bpfeb => "bpfeb",
.csky => "csky",
.dxil => "dxil",
.hexagon => "hexagon",
.loongarch32 => "loongarch32",
.loongarch64 => "loongarch64",
.m68k => "m68k",
.mips => "mips",
.mipsel => "mipsel",
.mips64 => "mips64",
.mips64el => "mips64el",
.msp430 => "msp430",
.powerpc => "powerpc",
.powerpcle => "powerpcle",
.powerpc64 => "powerpc64",
.powerpc64le => "powerpc64le",
.r600 => "r600",
.amdgcn => "amdgcn",
.riscv32 => "riscv32",
.riscv64 => "riscv64",
.sparc => "sparc",
.sparc64 => "sparc64",
.sparcel => "sparcel",
.s390x => "s390x",
.tce => "tce",
.tcele => "tcele",
.thumb => "thumb",
.thumbeb => "thumbeb",
.x86 => "i386",
.x86_64 => "x86_64",
.xcore => "xcore",
.xtensa => "xtensa",
.nvptx => "nvptx",
.nvptx64 => "nvptx64",
.le32 => "le32",
.le64 => "le64",
.amdil => "amdil",
.amdil64 => "amdil64",
.hsail => "hsail",
.hsail64 => "hsail64",
.spir => "spir",
.spir64 => "spir64",
.spirv32 => "spirv32",
.spirv64 => "spirv64",
.kalimba => "kalimba",
.shave => "shave",
.lanai => "lanai",
.wasm32 => "wasm32",
.wasm64 => "wasm64",
.renderscript32 => "renderscript32",
.renderscript64 => "renderscript64",
.ve => "ve",
// Note: spu_2 is not supported in LLVM; this is the Zig arch name
.spu_2 => "spu_2",
};
writer.writeAll(llvm_arch) catch unreachable;
writer.writeByte('-') catch unreachable;
const llvm_os = switch (target.os.tag) {
.freestanding => "unknown",
.ananas => "ananas",
.cloudabi => "cloudabi",
.dragonfly => "dragonfly",
.freebsd => "freebsd",
.fuchsia => "fuchsia",
.kfreebsd => "kfreebsd",
.linux => "linux",
.lv2 => "lv2",
.netbsd => "netbsd",
.openbsd => "openbsd",
.solaris => "solaris",
.windows => "windows",
.zos => "zos",
.haiku => "haiku",
.minix => "minix",
.rtems => "rtems",
.nacl => "nacl",
.aix => "aix",
.cuda => "cuda",
.nvcl => "nvcl",
.amdhsa => "amdhsa",
.ps4 => "ps4",
.ps5 => "ps5",
.elfiamcu => "elfiamcu",
.mesa3d => "mesa3d",
.contiki => "contiki",
.amdpal => "amdpal",
.hermit => "hermit",
.hurd => "hurd",
.wasi => "wasi",
.emscripten => "emscripten",
.uefi => "windows",
.macos => "macosx",
.ios => "ios",
.tvos => "tvos",
.watchos => "watchos",
.driverkit => "driverkit",
.shadermodel => "shadermodel",
.opencl,
.glsl450,
.vulkan,
.plan9,
.other,
=> "unknown",
};
writer.writeAll(llvm_os) catch unreachable;
if (target.os.tag.isDarwin()) {
const min_version = target.os.version_range.semver.min;
writer.print("{d}.{d}.{d}", .{
min_version.major,
min_version.minor,
min_version.patch,
}) catch unreachable;
}
writer.writeByte('-') catch unreachable;
const llvm_abi = switch (target.abi) {
.none => "unknown",
.gnu => "gnu",
.gnuabin32 => "gnuabin32",
.gnuabi64 => "gnuabi64",
.gnueabi => "gnueabi",
.gnueabihf => "gnueabihf",
.gnuf32 => "gnuf32",
.gnuf64 => "gnuf64",
.gnusf => "gnusf",
.gnux32 => "gnux32",
.gnuilp32 => "gnuilp32",
.code16 => "code16",
.eabi => "eabi",
.eabihf => "eabihf",
.android => "android",
.musl => "musl",
.musleabi => "musleabi",
.musleabihf => "musleabihf",
.muslx32 => "muslx32",
.msvc => "msvc",
.itanium => "itanium",
.cygnus => "cygnus",
.coreclr => "coreclr",
.simulator => "simulator",
.macabi => "macabi",
.pixel => "pixel",
.vertex => "vertex",
.geometry => "geometry",
.hull => "hull",
.domain => "domain",
.compute => "compute",
.library => "library",
.raygeneration => "raygeneration",
.intersection => "intersection",
.anyhit => "anyhit",
.closesthit => "closesthit",
.miss => "miss",
.callable => "callable",
.mesh => "mesh",
.amplification => "amplification",
};
writer.writeAll(llvm_abi) catch unreachable;
return stream.getWritten();
}
test "alignment functions - smoke test" {
var target: std.Target = undefined;
const x86 = std.Target.Cpu.Arch.x86_64;
target.cpu = std.Target.Cpu.baseline(x86);
target.os = std.Target.Os.Tag.defaultVersionRange(.linux, x86);
target.abi = std.Target.Abi.default(x86, target.os);
try std.testing.expect(isTlsSupported(target));
try std.testing.expect(!ignoreNonZeroSizedBitfieldTypeAlignment(target));
try std.testing.expect(minZeroWidthBitfieldAlignment(target) == null);
try std.testing.expect(!unnamedFieldAffectsAlignment(target));
try std.testing.expect(defaultAlignment(target) == 16);
try std.testing.expect(!packAllEnums(target));
try std.testing.expect(systemCompiler(target) == .gcc);
const arm = std.Target.Cpu.Arch.arm;
target.cpu = std.Target.Cpu.baseline(arm);
target.os = std.Target.Os.Tag.defaultVersionRange(.ios, arm);
target.abi = std.Target.Abi.default(arm, target.os);
try std.testing.expect(!isTlsSupported(target));
try std.testing.expect(ignoreNonZeroSizedBitfieldTypeAlignment(target));
try std.testing.expectEqual(@as(?u29, 32), minZeroWidthBitfieldAlignment(target));
try std.testing.expect(unnamedFieldAffectsAlignment(target));
try std.testing.expect(defaultAlignment(target) == 16);
try std.testing.expect(!packAllEnums(target));
try std.testing.expect(systemCompiler(target) == .clang);
}
test "target size/align tests" {
var comp: @import("Compilation.zig") = undefined;
const x86 = std.Target.Cpu.Arch.x86;
comp.target.cpu.arch = x86;
comp.target.cpu.model = &std.Target.x86.cpu.i586;
comp.target.os = std.Target.Os.Tag.defaultVersionRange(.linux, x86);
comp.target.abi = std.Target.Abi.gnu;
const tt: Type = .{
.specifier = .long_long,
};
try std.testing.expectEqual(@as(u64, 8), tt.sizeof(&comp).?);
try std.testing.expectEqual(@as(u64, 4), tt.alignof(&comp));
const arm = std.Target.Cpu.Arch.arm;
comp.target.cpu = std.Target.Cpu.Model.toCpu(&std.Target.arm.cpu.cortex_r4, arm);
comp.target.os = std.Target.Os.Tag.defaultVersionRange(.ios, arm);
comp.target.abi = std.Target.Abi.none;
const ct: Type = .{
.specifier = .char,
};
try std.testing.expectEqual(true, std.Target.arm.featureSetHas(comp.target.cpu.features, .has_v7));
try std.testing.expectEqual(@as(u64, 1), ct.sizeof(&comp).?);
try std.testing.expectEqual(@as(u64, 1), ct.alignof(&comp));
try std.testing.expectEqual(true, ignoreNonZeroSizedBitfieldTypeAlignment(comp.target));
}

482
deps/aro/toolchains/Linux.zig vendored Normal file
View File

@ -0,0 +1,482 @@
const std = @import("std");
const mem = std.mem;
const Compilation = @import("../Compilation.zig");
const GCCDetector = @import("../Driver/GCCDetector.zig");
const Toolchain = @import("../Toolchain.zig");
const Driver = @import("../Driver.zig");
const Distro = @import("../Driver/Distro.zig");
const target_util = @import("../target.zig");
const system_defaults = @import("system_defaults");
const Linux = @This();
distro: Distro.Tag = .unknown,
extra_opts: std.ArrayListUnmanaged([]const u8) = .{},
gcc_detector: GCCDetector = .{},
pub fn discover(self: *Linux, tc: *Toolchain) !void {
self.distro = Distro.detect(tc.getTarget(), tc.filesystem);
try self.gcc_detector.discover(tc);
tc.selected_multilib = self.gcc_detector.selected;
try self.gcc_detector.appendToolPath(tc);
try self.buildExtraOpts(tc);
try self.findPaths(tc);
}
fn buildExtraOpts(self: *Linux, tc: *const Toolchain) !void {
const gpa = tc.driver.comp.gpa;
const target = tc.getTarget();
const is_android = target.isAndroid();
if (self.distro.isAlpine() or is_android) {
try self.extra_opts.ensureUnusedCapacity(gpa, 2);
self.extra_opts.appendAssumeCapacity("-z");
self.extra_opts.appendAssumeCapacity("now");
}
if (self.distro.isOpenSUSE() or self.distro.isUbuntu() or self.distro.isAlpine() or is_android) {
try self.extra_opts.ensureUnusedCapacity(gpa, 2);
self.extra_opts.appendAssumeCapacity("-z");
self.extra_opts.appendAssumeCapacity("relro");
}
if (target.cpu.arch.isARM() or target.cpu.arch.isAARCH64() or is_android) {
try self.extra_opts.ensureUnusedCapacity(gpa, 2);
self.extra_opts.appendAssumeCapacity("-z");
self.extra_opts.appendAssumeCapacity("max-page-size=4096");
}
if (target.cpu.arch == .arm or target.cpu.arch == .thumb) {
try self.extra_opts.append(gpa, "-X");
}
if (!target.cpu.arch.isMIPS() and target.cpu.arch != .hexagon) {
const hash_style = if (is_android) .both else self.distro.getHashStyle();
try self.extra_opts.append(gpa, switch (hash_style) {
inline else => |tag| "--hash-style=" ++ @tagName(tag),
});
}
if (system_defaults.enable_linker_build_id) {
try self.extra_opts.append(gpa, "--build-id");
}
}
fn addMultiLibPaths(self: *Linux, tc: *Toolchain, sysroot: []const u8, os_lib_dir: []const u8) !void {
if (!self.gcc_detector.is_valid) return;
const gcc_triple = self.gcc_detector.gcc_triple;
const lib_path = self.gcc_detector.parent_lib_path;
// Add lib/gcc/$triple/$version, with an optional /multilib suffix.
try tc.addPathIfExists(&.{ self.gcc_detector.install_path, tc.selected_multilib.gcc_suffix }, .file);
// Add lib/gcc/$triple/$libdir
// For GCC built with --enable-version-specific-runtime-libs.
try tc.addPathIfExists(&.{ self.gcc_detector.install_path, "..", os_lib_dir }, .file);
try tc.addPathIfExists(&.{ lib_path, "..", gcc_triple, "lib", "..", os_lib_dir, tc.selected_multilib.os_suffix }, .file);
// If the GCC installation we found is inside of the sysroot, we want to
// prefer libraries installed in the parent prefix of the GCC installation.
// It is important to *not* use these paths when the GCC installation is
// outside of the system root as that can pick up unintended libraries.
// This usually happens when there is an external cross compiler on the
// host system, and a more minimal sysroot available that is the target of
// the cross. Note that GCC does include some of these directories in some
// configurations but this seems somewhere between questionable and simply
// a bug.
if (mem.startsWith(u8, lib_path, sysroot)) {
try tc.addPathIfExists(&.{ lib_path, "..", os_lib_dir }, .file);
}
}
fn addMultiArchPaths(self: *Linux, tc: *Toolchain) !void {
if (!self.gcc_detector.is_valid) return;
const lib_path = self.gcc_detector.parent_lib_path;
const gcc_triple = self.gcc_detector.gcc_triple;
const multilib = self.gcc_detector.selected;
try tc.addPathIfExists(&.{ lib_path, "..", gcc_triple, "lib", multilib.os_suffix }, .file);
}
/// TODO: Very incomplete
fn findPaths(self: *Linux, tc: *Toolchain) !void {
const target = tc.getTarget();
const sysroot = tc.getSysroot();
var output: [64]u8 = undefined;
const os_lib_dir = getOSLibDir(target);
const multiarch_triple = getMultiarchTriple(target) orelse target_util.toLLVMTriple(target, &output);
try self.addMultiLibPaths(tc, sysroot, os_lib_dir);
try tc.addPathIfExists(&.{ sysroot, "/lib", multiarch_triple }, .file);
try tc.addPathIfExists(&.{ sysroot, "/lib", "..", os_lib_dir }, .file);
if (target.isAndroid()) {
// TODO
}
try tc.addPathIfExists(&.{ sysroot, "/usr", "lib", multiarch_triple }, .file);
try tc.addPathIfExists(&.{ sysroot, "/usr", "lib", "..", os_lib_dir }, .file);
try self.addMultiArchPaths(tc);
try tc.addPathIfExists(&.{ sysroot, "/lib" }, .file);
try tc.addPathIfExists(&.{ sysroot, "/usr", "lib" }, .file);
}
pub fn deinit(self: *Linux, allocator: std.mem.Allocator) void {
self.extra_opts.deinit(allocator);
}
fn isPIEDefault(self: *const Linux) bool {
_ = self;
return false;
}
fn getPIE(self: *const Linux, d: *const Driver) bool {
if (d.shared or d.static or d.relocatable or d.static_pie) {
return false;
}
return d.pie orelse self.isPIEDefault();
}
fn getStaticPIE(self: *const Linux, d: *Driver) !bool {
_ = self;
if (d.static_pie and d.pie != null) {
try d.err("cannot specify 'nopie' along with 'static-pie'");
}
return d.static_pie;
}
fn getStatic(self: *const Linux, d: *const Driver) bool {
_ = self;
return d.static and !d.static_pie;
}
pub fn getDefaultLinker(self: *const Linux, target: std.Target) []const u8 {
_ = self;
if (target.isAndroid()) {
return "ld.lld";
}
return "ld";
}
pub fn buildLinkerArgs(self: *const Linux, tc: *const Toolchain, argv: *std.ArrayList([]const u8)) Compilation.Error!void {
const d = tc.driver;
const target = tc.getTarget();
const is_pie = self.getPIE(d);
const is_static_pie = try self.getStaticPIE(d);
const is_static = self.getStatic(d);
const is_android = target.isAndroid();
const is_iamcu = target.os.tag == .elfiamcu;
const is_ve = target.cpu.arch == .ve;
const has_crt_begin_end_files = target.abi != .none; // TODO: clang checks for MIPS vendor
if (is_pie) {
try argv.append("-pie");
}
if (is_static_pie) {
try argv.appendSlice(&.{ "-static", "-pie", "--no-dynamic-linker", "-z", "text" });
}
if (d.rdynamic) {
try argv.append("-export-dynamic");
}
if (d.strip) {
try argv.append("-s");
}
try argv.appendSlice(self.extra_opts.items);
try argv.append("--eh-frame-hdr");
// Todo: Driver should parse `-EL`/`-EB` for arm to set endianness for arm targets
if (target_util.ldEmulationOption(d.comp.target, null)) |emulation| {
try argv.appendSlice(&.{ "-m", emulation });
} else {
try d.err("Unknown target triple");
return;
}
if (d.comp.target.cpu.arch.isRISCV()) {
try argv.append("-X");
}
if (d.shared) {
try argv.append("-shared");
}
if (is_static) {
try argv.append("-static");
} else {
if (d.rdynamic) {
try argv.append("-export-dynamic");
}
if (!d.shared and !is_static_pie and !d.relocatable) {
const dynamic_linker = d.comp.target.standardDynamicLinkerPath();
// todo: check for --dyld-prefix
if (dynamic_linker.get()) |path| {
try argv.appendSlice(&.{ "-dynamic-linker", try tc.arena.dupe(u8, path) });
} else {
try d.err("Could not find dynamic linker path");
}
}
}
try argv.appendSlice(&.{ "-o", d.output_name orelse "a.out" });
if (!d.nostdlib and !d.nostartfiles and !d.relocatable) {
if (!is_android and !is_iamcu) {
if (!d.shared) {
const crt1 = if (is_pie)
"Scrt1.o"
else if (is_static_pie)
"rcrt1.o"
else
"crt1.o";
try argv.append(try tc.getFilePath(crt1));
}
try argv.append(try tc.getFilePath("crti.o"));
}
if (is_ve) {
try argv.appendSlice(&.{ "-z", "max-page-size=0x4000000" });
}
if (is_iamcu) {
try argv.append(try tc.getFilePath("crt0.o"));
} else if (has_crt_begin_end_files) {
var path: []const u8 = "";
if (tc.getRuntimeLibKind() == .compiler_rt and !is_android) {
const crt_begin = try tc.getCompilerRt("crtbegin", .object);
if (tc.filesystem.exists(crt_begin)) {
path = crt_begin;
}
}
if (path.len == 0) {
const crt_begin = if (tc.driver.shared)
if (is_android) "crtbegin_so.o" else "crtbeginS.o"
else if (is_static)
if (is_android) "crtbegin_static.o" else "crtbeginT.o"
else if (is_pie or is_static_pie)
if (is_android) "crtbegin_dynamic.o" else "crtbeginS.o"
else if (is_android) "crtbegin_dynamic.o" else "crtbegin.o";
path = try tc.getFilePath(crt_begin);
}
try argv.append(path);
}
}
// TODO add -L opts
// TODO add -u opts
try tc.addFilePathLibArgs(argv);
// TODO handle LTO
try argv.appendSlice(d.link_objects.items);
if (!d.nostdlib and !d.relocatable) {
if (!d.nodefaultlibs) {
if (is_static or is_static_pie) {
try argv.append("--start-group");
}
try tc.addRuntimeLibs(argv);
// TODO: add pthread if needed
if (!d.nolibc) {
try argv.append("-lc");
}
if (is_iamcu) {
try argv.append("-lgloss");
}
if (is_static or is_static_pie) {
try argv.append("--end-group");
} else {
try tc.addRuntimeLibs(argv);
}
if (is_iamcu) {
try argv.appendSlice(&.{ "--as-needed", "-lsoftfp", "--no-as-needed" });
}
}
if (!d.nostartfiles and !is_iamcu) {
if (has_crt_begin_end_files) {
var path: []const u8 = "";
if (tc.getRuntimeLibKind() == .compiler_rt and !is_android) {
const crt_end = try tc.getCompilerRt("crtend", .object);
if (tc.filesystem.exists(crt_end)) {
path = crt_end;
}
}
if (path.len == 0) {
const crt_end = if (d.shared)
if (is_android) "crtend_so.o" else "crtendS.o"
else if (is_pie or is_static_pie)
if (is_android) "crtend_android.o" else "crtendS.o"
else if (is_android) "crtend_android.o" else "crtend.o";
path = try tc.getFilePath(crt_end);
}
try argv.append(path);
}
if (!is_android) {
try argv.append(try tc.getFilePath("crtn.o"));
}
}
}
// TODO add -T args
}
fn getMultiarchTriple(target: std.Target) ?[]const u8 {
const is_android = target.isAndroid();
const is_mips_r6 = std.Target.mips.featureSetHas(target.cpu.features, .mips32r6);
return switch (target.cpu.arch) {
.arm, .thumb => if (is_android) "arm-linux-androideabi" else if (target.abi == .gnueabihf) "arm-linux-gnueabihf" else "arm-linux-gnueabi",
.armeb, .thumbeb => if (target.abi == .gnueabihf) "armeb-linux-gnueabihf" else "armeb-linux-gnueabi",
.aarch64 => if (is_android) "aarch64-linux-android" else "aarch64-linux-gnu",
.aarch64_be => "aarch64_be-linux-gnu",
.x86 => if (is_android) "i686-linux-android" else "i386-linux-gnu",
.x86_64 => if (is_android) "x86_64-linux-android" else if (target.abi == .gnux32) "x86_64-linux-gnux32" else "x86_64-linux-gnu",
.m68k => "m68k-linux-gnu",
.mips => if (is_mips_r6) "mipsisa32r6-linux-gnu" else "mips-linux-gnu",
.mipsel => if (is_android) "mipsel-linux-android" else if (is_mips_r6) "mipsisa32r6el-linux-gnu" else "mipsel-linux-gnu",
.powerpcle => "powerpcle-linux-gnu",
.powerpc64 => "powerpc64-linux-gnu",
.powerpc64le => "powerpc64le-linux-gnu",
.riscv64 => "riscv64-linux-gnu",
.sparc => "sparc-linux-gnu",
.sparc64 => "sparc64-linux-gnu",
.s390x => "s390x-linux-gnu",
// TODO: expand this
else => null,
};
}
fn getOSLibDir(target: std.Target) []const u8 {
switch (target.cpu.arch) {
.x86,
.powerpc,
.powerpcle,
.sparc,
.sparcel,
=> return "lib32",
else => {},
}
if (target.cpu.arch == .x86_64 and (target.abi == .gnux32 or target.abi == .muslx32)) {
return "libx32";
}
if (target.cpu.arch == .riscv32) {
return "lib32";
}
if (target.ptrBitWidth() == 32) {
return "lib";
}
return "lib64";
}
test Linux {
if (@import("builtin").os.tag == .windows) return error.SkipZigTest;
var arena_instance = std.heap.ArenaAllocator.init(std.testing.allocator);
defer arena_instance.deinit();
const arena = arena_instance.allocator();
var comp = Compilation.init(std.testing.allocator);
defer comp.deinit();
comp.environment = .{
.path = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
};
const raw_triple = "x86_64-linux-gnu";
const cross = std.zig.CrossTarget.parse(.{ .arch_os_abi = raw_triple }) catch unreachable;
comp.target = cross.toTarget(); // TODO deprecated
comp.langopts.setEmulatedCompiler(.gcc);
var driver: Driver = .{ .comp = &comp };
defer driver.deinit();
driver.raw_target_triple = raw_triple;
const link_obj = try driver.comp.gpa.dupe(u8, "/tmp/foo.o");
try driver.link_objects.append(driver.comp.gpa, link_obj);
driver.temp_file_count += 1;
var toolchain: Toolchain = .{ .driver = &driver, .arena = arena, .filesystem = .{ .fake = &.{
.{ .path = "/tmp" },
.{ .path = "/usr" },
.{ .path = "/usr/lib64" },
.{ .path = "/usr/bin" },
.{ .path = "/usr/bin/ld", .executable = true },
.{ .path = "/lib" },
.{ .path = "/lib/x86_64-linux-gnu" },
.{ .path = "/lib/x86_64-linux-gnu/crt1.o" },
.{ .path = "/lib/x86_64-linux-gnu/crti.o" },
.{ .path = "/lib/x86_64-linux-gnu/crtn.o" },
.{ .path = "/lib64" },
.{ .path = "/usr/lib" },
.{ .path = "/usr/lib/gcc" },
.{ .path = "/usr/lib/gcc/x86_64-linux-gnu" },
.{ .path = "/usr/lib/gcc/x86_64-linux-gnu/9" },
.{ .path = "/usr/lib/gcc/x86_64-linux-gnu/9/crtbegin.o" },
.{ .path = "/usr/lib/gcc/x86_64-linux-gnu/9/crtend.o" },
.{ .path = "/usr/lib/x86_64-linux-gnu" },
.{ .path = "/etc/lsb-release", .contents =
\\DISTRIB_ID=Ubuntu
\\DISTRIB_RELEASE=20.04
\\DISTRIB_CODENAME=focal
\\DISTRIB_DESCRIPTION="Ubuntu 20.04.6 LTS"
\\
},
} } };
defer toolchain.deinit();
try toolchain.discover();
var argv = std.ArrayList([]const u8).init(driver.comp.gpa);
defer argv.deinit();
var linker_path_buf: [std.fs.MAX_PATH_BYTES]u8 = undefined;
const linker_path = try toolchain.getLinkerPath(&linker_path_buf);
try argv.append(linker_path);
try toolchain.buildLinkerArgs(&argv);
const expected = [_][]const u8{
"/usr/bin/ld",
"-z",
"relro",
"--hash-style=gnu",
"--eh-frame-hdr",
"-m",
"elf_x86_64",
"-dynamic-linker",
"/lib64/ld-linux-x86-64.so.2",
"-o",
"a.out",
"/lib/x86_64-linux-gnu/crt1.o",
"/lib/x86_64-linux-gnu/crti.o",
"/usr/lib/gcc/x86_64-linux-gnu/9/crtbegin.o",
"-L/usr/lib/gcc/x86_64-linux-gnu/9",
"-L/usr/lib/gcc/x86_64-linux-gnu/9/../../../../lib64",
"-L/lib/x86_64-linux-gnu",
"-L/lib/../lib64",
"-L/usr/lib/x86_64-linux-gnu",
"-L/usr/lib/../lib64",
"-L/lib",
"-L/usr/lib",
link_obj,
"-lgcc",
"--as-needed",
"-lgcc_s",
"--no-as-needed",
"-lc",
"-lgcc",
"--as-needed",
"-lgcc_s",
"--no-as-needed",
"/usr/lib/gcc/x86_64-linux-gnu/9/crtend.o",
"/lib/x86_64-linux-gnu/crtn.o",
};
try std.testing.expectEqual(expected.len, argv.items.len);
for (expected, argv.items) |expected_item, actual_item| {
try std.testing.expectEqualStrings(expected_item, actual_item);
}
}

41
deps/aro/unicode.zig vendored Normal file
View File

@ -0,0 +1,41 @@
//! Copied from https://github.com/ziglang/zig/blob/6f0807f50f4e946bb850e746beaa5d6556cf7750/lib/std/unicode.zig
//! with all safety checks removed. These functions must only be called with known-good buffers that have already
//! been validated as being legitimate UTF8-encoded data, otherwise undefined behavior will occur.
pub fn utf8ByteSequenceLength_unsafe(first_byte: u8) u3 {
return switch (first_byte) {
0b0000_0000...0b0111_1111 => 1,
0b1100_0000...0b1101_1111 => 2,
0b1110_0000...0b1110_1111 => 3,
0b1111_0000...0b1111_0111 => 4,
else => unreachable,
};
}
pub fn utf8Decode2_unsafe(bytes: []const u8) u21 {
var value: u21 = bytes[0] & 0b00011111;
value <<= 6;
return value | (bytes[1] & 0b00111111);
}
pub fn utf8Decode3_unsafe(bytes: []const u8) u21 {
var value: u21 = bytes[0] & 0b00001111;
value <<= 6;
value |= bytes[1] & 0b00111111;
value <<= 6;
return value | (bytes[2] & 0b00111111);
}
pub fn utf8Decode4_unsafe(bytes: []const u8) u21 {
var value: u21 = bytes[0] & 0b00000111;
value <<= 6;
value |= bytes[1] & 0b00111111;
value <<= 6;
value |= bytes[2] & 0b00111111;
value <<= 6;
return value | (bytes[3] & 0b00111111);
}

83
deps/aro/util.zig vendored Normal file
View File

@ -0,0 +1,83 @@
const std = @import("std");
const mem = std.mem;
const builtin = @import("builtin");
const is_windows = builtin.os.tag == .windows;
pub const Color = enum {
reset,
red,
green,
blue,
cyan,
purple,
yellow,
white,
};
pub fn fileSupportsColor(file: std.fs.File) bool {
return file.supportsAnsiEscapeCodes() or (is_windows and file.isTty());
}
pub fn setColor(color: Color, w: anytype) void {
if (is_windows) {
const stderr_file = std.io.getStdErr();
if (!stderr_file.isTty()) return;
const windows = std.os.windows;
const S = struct {
var attrs: windows.WORD = undefined;
var init_attrs = false;
};
if (!S.init_attrs) {
S.init_attrs = true;
var info: windows.CONSOLE_SCREEN_BUFFER_INFO = undefined;
_ = windows.kernel32.GetConsoleScreenBufferInfo(stderr_file.handle, &info);
S.attrs = info.wAttributes;
_ = windows.kernel32.SetConsoleOutputCP(65001);
}
// need to flush bufferedWriter
const T = if (@typeInfo(@TypeOf(w.context)) == .Pointer) @TypeOf(w.context.*) else @TypeOf(w.context);
if (T != void and @hasDecl(T, "flush")) w.context.flush() catch {};
switch (color) {
.reset => _ = windows.SetConsoleTextAttribute(stderr_file.handle, S.attrs) catch {},
.red => _ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_RED | windows.FOREGROUND_INTENSITY) catch {},
.green => _ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY) catch {},
.blue => _ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY) catch {},
.cyan => _ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY) catch {},
.purple => _ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_RED | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY) catch {},
.yellow => _ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_INTENSITY) catch {},
.white => _ = windows.SetConsoleTextAttribute(stderr_file.handle, windows.FOREGROUND_RED | windows.FOREGROUND_GREEN | windows.FOREGROUND_BLUE | windows.FOREGROUND_INTENSITY) catch {},
}
} else switch (color) {
.reset => w.writeAll("\x1b[0m") catch {},
.red => w.writeAll("\x1b[31;1m") catch {},
.green => w.writeAll("\x1b[32;1m") catch {},
.blue => w.writeAll("\x1b[34;1m") catch {},
.cyan => w.writeAll("\x1b[36;1m") catch {},
.purple => w.writeAll("\x1b[35;1m") catch {},
.yellow => w.writeAll("\x1b[93;1m") catch {},
.white => w.writeAll("\x1b[0m\x1b[1m") catch {},
}
}
pub fn errorDescription(err: anyerror) []const u8 {
return switch (err) {
error.OutOfMemory => "ran out of memory",
error.FileNotFound => "file not found",
error.IsDir => "is a directory",
error.NotDir => "is not a directory",
error.NotOpenForReading => "file is not open for reading",
error.NotOpenForWriting => "file is not open for writing",
error.InvalidUtf8 => "input is not valid UTF-8",
error.FileBusy => "file is busy",
error.NameTooLong => "file name is too long",
error.AccessDenied => "access denied",
error.FileTooBig => "file is too big",
error.ProcessFdQuotaExceeded, error.SystemFdQuotaExceeded => "ran out of file descriptors",
error.SystemResources => "ran out of system resources",
error.FatalError => "a fatal error occurred",
error.Unexpected => "an unexpected error occurred",
else => @errorName(err),
};
}

View File

@ -32,7 +32,6 @@ const Module = @import("Module.zig");
const InternPool = @import("InternPool.zig");
const BuildId = std.Build.CompileStep.BuildId;
const Cache = std.Build.Cache;
const translate_c = @import("translate_c.zig");
const c_codegen = @import("codegen/c.zig");
const libtsan = @import("libtsan.zig");
const Zir = @import("Zir.zig");
@ -88,7 +87,7 @@ failed_win32_resources: if (build_options.only_core_functionality) void else std
misc_failures: std.AutoArrayHashMapUnmanaged(MiscTask, MiscError) = .{},
keep_source_files_loaded: bool,
use_clang: bool,
c_frontend: CFrontend,
sanitize_c: bool,
/// When this is `true` it means invoking clang as a sub-process is expected to inherit
/// stdin, stdout, stderr, and if it returns non success, to forward the exit code.
@ -515,6 +514,8 @@ pub const cache_helpers = struct {
}
};
pub const CFrontend = enum { clang, aro };
pub const ClangPreprocessorMode = enum {
no,
/// This means we are doing `zig cc -E -o <path>`.
@ -1046,14 +1047,11 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
break :pic explicit;
} else pie or must_pic;
// Make a decision on whether to use Clang for translate-c and compiling C files.
const use_clang = if (options.use_clang) |explicit| explicit else blk: {
if (build_options.have_llvm) {
// Can't use it if we don't have it!
break :blk false;
}
// It's not planned to do our own translate-c or C compilation.
break :blk true;
// Make a decision on whether to use Clang or Aro for translate-c and compiling C files.
const c_frontend: CFrontend = blk: {
if (!build_options.have_llvm) break :blk .aro;
if (options.use_clang) |explicit| if (explicit) break :blk .clang;
break :blk .clang;
};
const is_safe_mode = switch (options.optimize_mode) {
@ -1677,7 +1675,7 @@ pub fn create(gpa: Allocator, options: InitOptions) !*Compilation {
.astgen_work_queue = std.fifo.LinearFifo(*Module.File, .Dynamic).init(gpa),
.embed_file_work_queue = std.fifo.LinearFifo(*Module.EmbedFile, .Dynamic).init(gpa),
.keep_source_files_loaded = options.keep_source_files_loaded,
.use_clang = use_clang,
.c_frontend = c_frontend,
.clang_argv = options.clang_argv,
.c_source_files = options.c_source_files,
.rc_source_files = options.rc_source_files,
@ -3917,9 +3915,7 @@ pub const CImportResult = struct {
/// This API is currently coupled pretty tightly to stage1's needs; it will need to be reworked
/// a bit when we want to start using it from self-hosted.
pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
if (!build_options.have_llvm)
return error.ZigCompilerNotBuiltWithLLVMExtensions;
if (build_options.only_c) unreachable; // @cImport is not needed for bootstrapping
const tracy_trace = trace(@src());
defer tracy_trace.end();
@ -3966,7 +3962,7 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
var argv = std.ArrayList([]const u8).init(comp.gpa);
defer argv.deinit();
try argv.append(""); // argv[0] is program name, actual args start at [1]
try argv.append(@tagName(comp.c_frontend)); // argv[0] is program name, actual args start at [1]
try comp.addTranslateCCArgs(arena, &argv, .c, out_dep_path);
try argv.append(out_h_path);
@ -3974,31 +3970,44 @@ pub fn cImport(comp: *Compilation, c_src: []const u8) !CImportResult {
if (comp.verbose_cc) {
dump_argv(argv.items);
}
var tree = switch (comp.c_frontend) {
.aro => tree: {
if (builtin.zig_backend == .stage2_c) @panic("the CBE cannot compile Aro yet!");
const translate_c = @import("aro_translate_c.zig");
_ = translate_c;
if (true) @panic("TODO");
break :tree undefined;
},
.clang => tree: {
if (!build_options.have_llvm) unreachable;
const translate_c = @import("translate_c.zig");
// Convert to null terminated args.
const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, argv.items.len + 1);
new_argv_with_sentinel[argv.items.len] = null;
const new_argv = new_argv_with_sentinel[0..argv.items.len :null];
for (argv.items, 0..) |arg, i| {
new_argv[i] = try arena.dupeZ(u8, arg);
}
// Convert to null terminated args.
const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, argv.items.len + 1);
new_argv_with_sentinel[argv.items.len] = null;
const new_argv = new_argv_with_sentinel[0..argv.items.len :null];
for (argv.items, 0..) |arg, i| {
new_argv[i] = try arena.dupeZ(u8, arg);
}
const c_headers_dir_path_z = try comp.zig_lib_directory.joinZ(arena, &[_][]const u8{"include"});
var errors = std.zig.ErrorBundle.empty;
errdefer errors.deinit(comp.gpa);
var tree = translate_c.translate(
comp.gpa,
new_argv.ptr,
new_argv.ptr + new_argv.len,
&errors,
c_headers_dir_path_z,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.SemanticAnalyzeFail => {
return CImportResult{
.out_zig_path = "",
.cache_hit = actual_hit,
.errors = errors,
const c_headers_dir_path_z = try comp.zig_lib_directory.joinZ(arena, &[_][]const u8{"include"});
var errors = std.zig.ErrorBundle.empty;
errdefer errors.deinit(comp.gpa);
break :tree translate_c.translate(
comp.gpa,
new_argv.ptr,
new_argv.ptr + new_argv.len,
&errors,
c_headers_dir_path_z,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.SemanticAnalyzeFail => {
return CImportResult{
.out_zig_path = "",
.cache_hit = actual_hit,
.errors = errors,
};
},
};
},
};
@ -4248,6 +4257,9 @@ fn reportRetryableEmbedFileError(
}
fn updateCObject(comp: *Compilation, c_object: *CObject, c_obj_prog_node: *std.Progress.Node) !void {
if (comp.c_frontend == .aro) {
return comp.failCObj(c_object, "aro does not support compiling C objects yet", .{});
}
if (!build_options.have_llvm) {
return comp.failCObj(c_object, "clang not available: compiler built without LLVM extensions", .{});
}

444
src/aro_translate_c.zig Normal file
View File

@ -0,0 +1,444 @@
const std = @import("std");
const mem = std.mem;
const assert = std.debug.assert;
const translate_c = @import("translate_c.zig");
const aro = @import("aro");
const Tree = aro.Tree;
const NodeIndex = Tree.NodeIndex;
const TokenIndex = Tree.TokenIndex;
const Type = aro.Type;
const ast = @import("translate_c/ast.zig");
const ZigNode = ast.Node;
const ZigTag = ZigNode.Tag;
const common = @import("translate_c/common.zig");
const Error = common.Error;
const MacroProcessingError = common.MacroProcessingError;
const TypeError = common.TypeError;
const TransError = common.TransError;
const SymbolTable = common.SymbolTable;
const AliasList = common.AliasList;
const ResultUsed = common.ResultUsed;
const Scope = common.ScopeExtra(Context, Type);
pub const Compilation = aro.Compilation;
const Context = struct {
gpa: mem.Allocator,
arena: mem.Allocator,
decl_table: std.AutoArrayHashMapUnmanaged(usize, []const u8) = .{},
alias_list: AliasList,
global_scope: *Scope.Root,
mangle_count: u32 = 0,
/// Table of record decls that have been demoted to opaques.
opaque_demotes: std.AutoHashMapUnmanaged(usize, void) = .{},
/// Table of unnamed enums and records that are child types of typedefs.
unnamed_typedefs: std.AutoHashMapUnmanaged(usize, []const u8) = .{},
/// Needed to decide if we are parsing a typename
typedefs: std.StringArrayHashMapUnmanaged(void) = .{},
/// This one is different than the root scope's name table. This contains
/// a list of names that we found by visiting all the top level decls without
/// translating them. The other maps are updated as we translate; this one is updated
/// up front in a pre-processing step.
global_names: std.StringArrayHashMapUnmanaged(void) = .{},
/// This is similar to `global_names`, but contains names which we would
/// *like* to use, but do not strictly *have* to if they are unavailable.
/// These are relevant to types, which ideally we would name like
/// 'struct_foo' with an alias 'foo', but if either of those names is taken,
/// may be mangled.
/// This is distinct from `global_names` so we can detect at a type
/// declaration whether or not the name is available.
weak_global_names: std.StringArrayHashMapUnmanaged(void) = .{},
pattern_list: translate_c.PatternList,
tree: Tree,
comp: *Compilation,
mapper: aro.TypeMapper,
fn getMangle(c: *Context) u32 {
c.mangle_count += 1;
return c.mangle_count;
}
/// Convert a clang source location to a file:line:column string
fn locStr(c: *Context, loc: TokenIndex) ![]const u8 {
_ = c;
_ = loc;
// const spelling_loc = c.source_manager.getSpellingLoc(loc);
// const filename_c = c.source_manager.getFilename(spelling_loc);
// const filename = if (filename_c) |s| try c.str(s) else @as([]const u8, "(no file)");
// const line = c.source_manager.getSpellingLineNumber(spelling_loc);
// const column = c.source_manager.getSpellingColumnNumber(spelling_loc);
// return std.fmt.allocPrint(c.arena, "{s}:{d}:{d}", .{ filename, line, column });
return "somewhere";
}
};
fn maybeSuppressResult(c: *Context, used: ResultUsed, result: ZigNode) TransError!ZigNode {
if (used == .used) return result;
return ZigTag.discard.create(c.arena, .{ .should_skip = false, .value = result });
}
fn addTopLevelDecl(c: *Context, name: []const u8, decl_node: ZigNode) !void {
const gop = try c.global_scope.sym_table.getOrPut(name);
if (!gop.found_existing) {
gop.value_ptr.* = decl_node;
try c.global_scope.nodes.append(decl_node);
}
}
fn failDecl(c: *Context, loc: TokenIndex, name: []const u8, comptime format: []const u8, args: anytype) Error!void {
// location
// pub const name = @compileError(msg);
const fail_msg = try std.fmt.allocPrint(c.arena, format, args);
try addTopLevelDecl(c, name, try ZigTag.fail_decl.create(c.arena, .{ .actual = name, .mangled = fail_msg }));
const str = try c.locStr(loc);
const location_comment = try std.fmt.allocPrint(c.arena, "// {s}", .{str});
try c.global_scope.nodes.append(try ZigTag.warning.create(c.arena, location_comment));
}
pub fn translate(
gpa: mem.Allocator,
comp: *Compilation,
args: []const []const u8,
) !std.zig.Ast {
try comp.addDefaultPragmaHandlers();
comp.langopts.setEmulatedCompiler(aro.target_util.systemCompiler(comp.target));
var driver: aro.Driver = .{ .comp = comp };
defer driver.deinit();
var macro_buf = std.ArrayList(u8).init(gpa);
defer macro_buf.deinit();
assert(!try driver.parseArgs(std.io.null_writer, macro_buf.writer(), args));
assert(driver.inputs.items.len == 1);
const source = driver.inputs.items[0];
const builtin = try comp.generateBuiltinMacros();
const user_macros = try comp.addSourceFromBuffer("<command line>", macro_buf.items);
var pp = aro.Preprocessor.init(comp);
defer pp.deinit();
try pp.addBuiltinMacros();
_ = try pp.preprocess(builtin);
_ = try pp.preprocess(user_macros);
const eof = try pp.preprocess(source);
try pp.tokens.append(pp.comp.gpa, eof);
var tree = try aro.Parser.parse(&pp);
defer tree.deinit();
if (driver.comp.diag.errors != 0) {
return error.SemanticAnalyzeFail;
}
const mapper = tree.comp.string_interner.getFastTypeMapper(tree.comp.gpa) catch tree.comp.string_interner.getSlowTypeMapper();
defer mapper.deinit(tree.comp.gpa);
var arena_allocator = std.heap.ArenaAllocator.init(gpa);
errdefer arena_allocator.deinit();
const arena = arena_allocator.allocator();
var context = Context{
.gpa = gpa,
.arena = arena,
.alias_list = AliasList.init(gpa),
.global_scope = try arena.create(Scope.Root),
.pattern_list = try translate_c.PatternList.init(gpa),
.comp = comp,
.mapper = mapper,
.tree = tree,
};
context.global_scope.* = Scope.Root.init(&context);
defer {
context.decl_table.deinit(gpa);
context.alias_list.deinit();
context.global_names.deinit(gpa);
context.opaque_demotes.deinit(gpa);
context.unnamed_typedefs.deinit(gpa);
context.typedefs.deinit(gpa);
context.global_scope.deinit();
context.pattern_list.deinit(gpa);
}
inline for (@typeInfo(std.zig.c_builtins).Struct.decls) |decl| {
const builtin_fn = try ZigTag.pub_var_simple.create(arena, .{
.name = decl.name,
.init = try ZigTag.import_c_builtin.create(arena, decl.name),
});
try addTopLevelDecl(&context, decl.name, builtin_fn);
}
try prepopulateGlobalNameTable(&context);
try transTopLevelDecls(&context);
for (context.alias_list.items) |alias| {
if (!context.global_scope.sym_table.contains(alias.alias)) {
const node = try ZigTag.alias.create(arena, .{ .actual = alias.alias, .mangled = alias.name });
try addTopLevelDecl(&context, alias.alias, node);
}
}
return ast.render(gpa, context.global_scope.nodes.items);
}
fn prepopulateGlobalNameTable(c: *Context) !void {
const node_tags = c.tree.nodes.items(.tag);
const node_types = c.tree.nodes.items(.ty);
const node_data = c.tree.nodes.items(.data);
for (c.tree.root_decls) |node| {
const data = node_data[@intFromEnum(node)];
const decl_name = switch (node_tags[@intFromEnum(node)]) {
.typedef => @panic("TODO"),
.static_assert,
.struct_decl_two,
.union_decl_two,
.struct_decl,
.union_decl,
=> blk: {
const ty = node_types[@intFromEnum(node)];
const name_id = ty.data.record.name;
break :blk c.mapper.lookup(name_id);
},
.enum_decl_two,
.enum_decl,
=> blk: {
const ty = node_types[@intFromEnum(node)];
const name_id = ty.data.@"enum".name;
break :blk c.mapper.lookup(name_id);
},
.fn_proto,
.static_fn_proto,
.inline_fn_proto,
.inline_static_fn_proto,
.fn_def,
.static_fn_def,
.inline_fn_def,
.inline_static_fn_def,
.@"var",
.static_var,
.threadlocal_var,
.threadlocal_static_var,
.extern_var,
.threadlocal_extern_var,
=> c.tree.tokSlice(data.decl.name),
else => unreachable,
};
try c.global_names.put(c.gpa, decl_name, {});
}
}
fn transTopLevelDecls(c: *Context) !void {
const node_tags = c.tree.nodes.items(.tag);
const node_data = c.tree.nodes.items(.data);
for (c.tree.root_decls) |node| {
const data = node_data[@intFromEnum(node)];
switch (node_tags[@intFromEnum(node)]) {
.typedef => {
try transTypeDef(c, &c.global_scope.base, node);
},
.static_assert,
.struct_decl_two,
.union_decl_two,
.struct_decl,
.union_decl,
=> {
try transRecordDecl(c, &c.global_scope.base, node);
},
.enum_decl_two => {
var fields = [2]NodeIndex{ data.bin.lhs, data.bin.rhs };
var field_count: u8 = 0;
if (fields[0] != .none) field_count += 1;
if (fields[1] != .none) field_count += 1;
try transEnumDecl(c, &c.global_scope.base, node, fields[0..field_count]);
},
.enum_decl => {
const fields = c.tree.data[data.range.start..data.range.end];
try transEnumDecl(c, &c.global_scope.base, node, fields);
},
.fn_proto,
.static_fn_proto,
.inline_fn_proto,
.inline_static_fn_proto,
.fn_def,
.static_fn_def,
.inline_fn_def,
.inline_static_fn_def,
=> {
try transFnDecl(c, node);
},
.@"var",
.static_var,
.threadlocal_var,
.threadlocal_static_var,
.extern_var,
.threadlocal_extern_var,
=> {
try transVarDecl(c, node, null);
},
else => unreachable,
}
}
}
fn transTypeDef(_: *Context, _: *Scope, _: NodeIndex) Error!void {
@panic("TODO");
}
fn transRecordDecl(_: *Context, _: *Scope, _: NodeIndex) Error!void {
@panic("TODO");
}
fn transFnDecl(_: *Context, _: NodeIndex) Error!void {
@panic("TODO");
}
fn transVarDecl(_: *Context, _: NodeIndex, _: ?usize) Error!void {
@panic("TODO");
}
fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: NodeIndex, field_nodes: []const NodeIndex) Error!void {
const node_types = c.tree.nodes.items(.ty);
const ty = node_types[@intFromEnum(enum_decl)];
const node_data = c.tree.nodes.items(.data);
if (c.decl_table.get(@intFromPtr(ty.data.@"enum"))) |_|
return; // Avoid processing this decl twice
const toplevel = scope.id == .root;
const bs: *Scope.Block = if (!toplevel) try scope.findBlockScope(c) else undefined;
var is_unnamed = false;
var bare_name: []const u8 = c.mapper.lookup(ty.data.@"enum".name);
var name = bare_name;
if (c.unnamed_typedefs.get(@intFromPtr(ty.data.@"enum"))) |typedef_name| {
bare_name = typedef_name;
name = typedef_name;
} else {
if (bare_name.len == 0) {
bare_name = try std.fmt.allocPrint(c.arena, "unnamed_{d}", .{c.getMangle()});
is_unnamed = true;
}
name = try std.fmt.allocPrint(c.arena, "enum_{s}", .{bare_name});
}
if (!toplevel) name = try bs.makeMangledName(c, name);
try c.decl_table.putNoClobber(c.gpa, @intFromPtr(ty.data.@"enum"), name);
const enum_type_node = if (!ty.data.@"enum".isIncomplete()) blk: {
for (ty.data.@"enum".fields, field_nodes) |field, field_node| {
var enum_val_name: []const u8 = c.mapper.lookup(field.name);
if (!toplevel) {
enum_val_name = try bs.makeMangledName(c, enum_val_name);
}
const enum_const_type_node: ?ZigNode = transType(c, scope, field.ty, field.name_tok) catch |err| switch (err) {
error.UnsupportedType => null,
else => |e| return e,
};
const enum_const_def = try ZigTag.enum_constant.create(c.arena, .{
.name = enum_val_name,
.is_public = toplevel,
.type = enum_const_type_node,
.value = transExpr(c, node_data[@intFromEnum(field_node)].decl.node, .used) catch @panic("TODO"),
});
if (toplevel)
try addTopLevelDecl(c, enum_val_name, enum_const_def)
else {
try scope.appendNode(enum_const_def);
try bs.discardVariable(c, enum_val_name);
}
}
break :blk transType(c, scope, ty.data.@"enum".tag_ty, 0) catch |err| switch (err) {
error.UnsupportedType => {
return failDecl(c, 0, name, "unable to translate enum integer type", .{});
},
else => |e| return e,
};
} else blk: {
try c.opaque_demotes.put(c.gpa, @intFromPtr(ty.data.@"enum"), {});
break :blk ZigTag.opaque_literal.init();
};
const is_pub = toplevel and !is_unnamed;
const payload = try c.arena.create(ast.Payload.SimpleVarDecl);
payload.* = .{
.base = .{ .tag = ([2]ZigTag{ .var_simple, .pub_var_simple })[@intFromBool(is_pub)] },
.data = .{
.init = enum_type_node,
.name = name,
},
};
const node = ZigNode.initPayload(&payload.base);
if (toplevel) {
try addTopLevelDecl(c, name, node);
if (!is_unnamed)
try c.alias_list.append(.{ .alias = bare_name, .name = name });
} else {
try scope.appendNode(node);
if (node.tag() != .pub_var_simple) {
try bs.discardVariable(c, name);
}
}
}
fn transType(c: *Context, scope: *Scope, raw_ty: Type, source_loc: TokenIndex) TypeError!ZigNode {
_ = source_loc;
_ = scope;
const ty = raw_ty.canonicalize(.standard);
switch (ty.specifier) {
.void => return ZigTag.type.create(c.arena, "anyopaque"),
.bool => return ZigTag.type.create(c.arena, "bool"),
.char => return ZigTag.type.create(c.arena, "c_char"),
.schar => return ZigTag.type.create(c.arena, "i8"),
.uchar => return ZigTag.type.create(c.arena, "u8"),
.short => return ZigTag.type.create(c.arena, "c_short"),
.ushort => return ZigTag.type.create(c.arena, "c_ushort"),
.int => return ZigTag.type.create(c.arena, "c_int"),
.uint => return ZigTag.type.create(c.arena, "c_uint"),
.long => return ZigTag.type.create(c.arena, "c_long"),
.ulong => return ZigTag.type.create(c.arena, "c_ulong"),
.long_long => return ZigTag.type.create(c.arena, "c_longlong"),
.ulong_long => return ZigTag.type.create(c.arena, "c_ulonglong"),
.int128 => return ZigTag.type.create(c.arena, "i128"),
.uint128 => return ZigTag.type.create(c.arena, "u128"),
.fp16, .float16 => return ZigTag.type.create(c.arena, "f16"),
.float => return ZigTag.type.create(c.arena, "f32"),
.double => return ZigTag.type.create(c.arena, "f64"),
.long_double => return ZigTag.type.create(c.arena, "c_longdouble"),
.float80 => return ZigTag.type.create(c.arena, "f80"),
.float128 => return ZigTag.type.create(c.arena, "f128"),
else => @panic("TODO"),
}
}
fn transStmt(c: *Context, node: NodeIndex) TransError!void {
_ = try c.transExpr(node, .unused);
}
fn transExpr(c: *Context, node: NodeIndex, result_used: ResultUsed) TransError!ZigNode {
std.debug.assert(node != .none);
const ty = c.tree.nodes.items(.ty)[@intFromEnum(node)];
if (c.tree.value_map.get(node)) |val| {
// TODO handle other values
const str = try std.fmt.allocPrint(c.arena, "{d}", .{val.data.int});
const int = try ZigTag.integer_literal.create(c.arena, str);
const as_node = try ZigTag.as.create(c.arena, .{
.lhs = try transType(c, undefined, ty, undefined),
.rhs = int,
});
return maybeSuppressResult(c, result_used, as_node);
}
const node_tags = c.tree.nodes.items(.tag);
switch (node_tags[@intFromEnum(node)]) {
else => unreachable, // Not an expression.
}
return .none;
}

View File

@ -20,7 +20,6 @@ const build_options = @import("build_options");
const introspect = @import("introspect.zig");
const LibCInstallation = @import("libc_installation.zig").LibCInstallation;
const wasi_libc = @import("wasi_libc.zig");
const translate_c = @import("translate_c.zig");
const BuildId = std.Build.CompileStep.BuildId;
const Cache = std.Build.Cache;
const target_util = @import("target.zig");
@ -4204,9 +4203,7 @@ fn updateModule(comp: *Compilation) !void {
}
fn cmdTranslateC(comp: *Compilation, arena: Allocator, fancy_output: ?*Compilation.CImportResult) !void {
if (!build_options.have_llvm)
fatal("cannot translate-c: compiler built without LLVM extensions", .{});
if (build_options.only_c) unreachable; // translate-c is not needed for bootstrapping
assert(comp.c_source_files.len == 1);
const c_source_file = comp.c_source_files[0];
@ -4225,14 +4222,14 @@ fn cmdTranslateC(comp: *Compilation, arena: Allocator, fancy_output: ?*Compilati
const digest = if (try man.hit()) man.final() else digest: {
if (fancy_output) |p| p.cache_hit = false;
var argv = std.ArrayList([]const u8).init(arena);
try argv.append(""); // argv[0] is program name, actual args start at [1]
try argv.append(@tagName(comp.c_frontend)); // argv[0] is program name, actual args start at [1]
var zig_cache_tmp_dir = try comp.local_cache_directory.handle.makeOpenPath("tmp", .{});
defer zig_cache_tmp_dir.close();
const ext = Compilation.classifyFileExt(c_source_file.src_path);
const out_dep_path: ?[]const u8 = blk: {
if (comp.disable_c_depfile or !ext.clangSupportsDepFile())
if (comp.c_frontend == .aro or comp.disable_c_depfile or !ext.clangSupportsDepFile())
break :blk null;
const c_src_basename = fs.path.basename(c_source_file.src_path);
@ -4241,44 +4238,67 @@ fn cmdTranslateC(comp: *Compilation, arena: Allocator, fancy_output: ?*Compilati
break :blk out_dep_path;
};
try comp.addTranslateCCArgs(arena, &argv, ext, out_dep_path);
// TODO
if (comp.c_frontend != .aro) try comp.addTranslateCCArgs(arena, &argv, ext, out_dep_path);
try argv.append(c_source_file.src_path);
if (comp.verbose_cc) {
std.debug.print("clang ", .{});
Compilation.dump_argv(argv.items);
}
// Convert to null terminated args.
const clang_args_len = argv.items.len + c_source_file.extra_flags.len;
const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, clang_args_len + 1);
new_argv_with_sentinel[clang_args_len] = null;
const new_argv = new_argv_with_sentinel[0..clang_args_len :null];
for (argv.items, 0..) |arg, i| {
new_argv[i] = try arena.dupeZ(u8, arg);
}
for (c_source_file.extra_flags, 0..) |arg, i| {
new_argv[argv.items.len + i] = try arena.dupeZ(u8, arg);
}
var tree = switch (comp.c_frontend) {
.aro => tree: {
if (builtin.zig_backend == .stage2_c) @panic("the CBE cannot compile Aro yet!");
const translate_c = @import("aro_translate_c.zig");
var aro_comp = translate_c.Compilation.init(comp.gpa);
defer aro_comp.deinit();
const c_headers_dir_path_z = try comp.zig_lib_directory.joinZ(arena, &[_][]const u8{"include"});
var errors = std.zig.ErrorBundle.empty;
var tree = translate_c.translate(
comp.gpa,
new_argv.ptr,
new_argv.ptr + new_argv.len,
&errors,
c_headers_dir_path_z,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.SemanticAnalyzeFail => {
if (fancy_output) |p| {
p.errors = errors;
return;
} else {
errors.renderToStdErr(renderOptions(comp.color));
process.exit(1);
break :tree translate_c.translate(comp.gpa, &aro_comp, argv.items) catch |err| switch (err) {
error.SemanticAnalyzeFail, error.FatalError => {
// TODO convert these to zig errors
aro_comp.renderErrors();
process.exit(1);
},
error.OutOfMemory => return error.OutOfMemory,
error.StreamTooLong => fatal("StreamTooLong?", .{}),
};
},
.clang => tree: {
if (!build_options.have_llvm) unreachable;
const translate_c = @import("translate_c.zig");
// Convert to null terminated args.
const clang_args_len = argv.items.len + c_source_file.extra_flags.len;
const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, clang_args_len + 1);
new_argv_with_sentinel[clang_args_len] = null;
const new_argv = new_argv_with_sentinel[0..clang_args_len :null];
for (argv.items, 0..) |arg, i| {
new_argv[i] = try arena.dupeZ(u8, arg);
}
for (c_source_file.extra_flags, 0..) |arg, i| {
new_argv[argv.items.len + i] = try arena.dupeZ(u8, arg);
}
const c_headers_dir_path_z = try comp.zig_lib_directory.joinZ(arena, &[_][]const u8{"include"});
var errors = std.zig.ErrorBundle.empty;
break :tree translate_c.translate(
comp.gpa,
new_argv.ptr,
new_argv.ptr + new_argv.len,
&errors,
c_headers_dir_path_z,
) catch |err| switch (err) {
error.OutOfMemory => return error.OutOfMemory,
error.SemanticAnalyzeFail => {
if (fancy_output) |p| {
p.errors = errors;
return;
} else {
errors.renderToStdErr(renderOptions(comp.color));
process.exit(1);
}
},
};
},
};
defer tree.deinit(comp.gpa);

View File

@ -7,312 +7,24 @@ const CToken = std.c.Token;
const mem = std.mem;
const math = std.math;
const meta = std.meta;
const CallingConvention = std.builtin.CallingConvention;
const ast = @import("translate_c/ast.zig");
const Node = ast.Node;
const Tag = Node.Tag;
const CallingConvention = std.builtin.CallingConvention;
pub const Error = std.mem.Allocator.Error;
const MacroProcessingError = Error || error{UnexpectedMacroToken};
const TypeError = Error || error{UnsupportedType};
const TransError = TypeError || error{UnsupportedTranslation};
const SymbolTable = std.StringArrayHashMap(Node);
const AliasList = std.ArrayList(struct {
alias: []const u8,
name: []const u8,
});
const common = @import("translate_c/common.zig");
const Error = common.Error;
const MacroProcessingError = common.MacroProcessingError;
const TypeError = common.TypeError;
const TransError = common.TransError;
const SymbolTable = common.SymbolTable;
const AliasList = common.AliasList;
const ResultUsed = common.ResultUsed;
const Scope = common.ScopeExtra(Context, clang.QualType);
// Maps macro parameter names to token position, for determining if different
// identifiers refer to the same positional argument in different macros.
const ArgsPositionMap = std.StringArrayHashMapUnmanaged(usize);
const Scope = struct {
id: Id,
parent: ?*Scope,
const Id = enum {
block,
root,
condition,
loop,
do_loop,
};
/// Used for the scope of condition expressions, for example `if (cond)`.
/// The block is lazily initialised because it is only needed for rare
/// cases of comma operators being used.
const Condition = struct {
base: Scope,
block: ?Block = null,
fn getBlockScope(self: *Condition, c: *Context) !*Block {
if (self.block) |*b| return b;
self.block = try Block.init(c, &self.base, true);
return &self.block.?;
}
fn deinit(self: *Condition) void {
if (self.block) |*b| b.deinit();
}
};
/// Represents an in-progress Node.Block. This struct is stack-allocated.
/// When it is deinitialized, it produces an Node.Block which is allocated
/// into the main arena.
const Block = struct {
base: Scope,
statements: std.ArrayList(Node),
variables: AliasList,
mangle_count: u32 = 0,
label: ?[]const u8 = null,
/// By default all variables are discarded, since we do not know in advance if they
/// will be used. This maps the variable's name to the Discard payload, so that if
/// the variable is subsequently referenced we can indicate that the discard should
/// be skipped during the intermediate AST -> Zig AST render step.
variable_discards: std.StringArrayHashMap(*ast.Payload.Discard),
/// When the block corresponds to a function, keep track of the return type
/// so that the return expression can be cast, if necessary
return_type: ?clang.QualType = null,
/// C static local variables are wrapped in a block-local struct. The struct
/// is named after the (mangled) variable name, the Zig variable within the
/// struct itself is given this name.
const StaticInnerName = "static";
fn init(c: *Context, parent: *Scope, labeled: bool) !Block {
var blk = Block{
.base = .{
.id = .block,
.parent = parent,
},
.statements = std.ArrayList(Node).init(c.gpa),
.variables = AliasList.init(c.gpa),
.variable_discards = std.StringArrayHashMap(*ast.Payload.Discard).init(c.gpa),
};
if (labeled) {
blk.label = try blk.makeMangledName(c, "blk");
}
return blk;
}
fn deinit(self: *Block) void {
self.statements.deinit();
self.variables.deinit();
self.variable_discards.deinit();
self.* = undefined;
}
fn complete(self: *Block, c: *Context) !Node {
if (self.base.parent.?.id == .do_loop) {
// We reserve 1 extra statement if the parent is a do_loop. This is in case of
// do while, we want to put `if (cond) break;` at the end.
const alloc_len = self.statements.items.len + @intFromBool(self.base.parent.?.id == .do_loop);
var stmts = try c.arena.alloc(Node, alloc_len);
stmts.len = self.statements.items.len;
@memcpy(stmts[0..self.statements.items.len], self.statements.items);
return Tag.block.create(c.arena, .{
.label = self.label,
.stmts = stmts,
});
}
if (self.statements.items.len == 0) return Tag.empty_block.init();
return Tag.block.create(c.arena, .{
.label = self.label,
.stmts = try c.arena.dupe(Node, self.statements.items),
});
}
/// Given the desired name, return a name that does not shadow anything from outer scopes.
/// Inserts the returned name into the scope.
/// The name will not be visible to callers of getAlias.
fn reserveMangledName(scope: *Block, c: *Context, name: []const u8) ![]const u8 {
return scope.createMangledName(c, name, true);
}
/// Same as reserveMangledName, but enables the alias immediately.
fn makeMangledName(scope: *Block, c: *Context, name: []const u8) ![]const u8 {
return scope.createMangledName(c, name, false);
}
fn createMangledName(scope: *Block, c: *Context, name: []const u8, reservation: bool) ![]const u8 {
const name_copy = try c.arena.dupe(u8, name);
var proposed_name = name_copy;
while (scope.contains(proposed_name)) {
scope.mangle_count += 1;
proposed_name = try std.fmt.allocPrint(c.arena, "{s}_{d}", .{ name, scope.mangle_count });
}
const new_mangle = try scope.variables.addOne();
if (reservation) {
new_mangle.* = .{ .name = name_copy, .alias = name_copy };
} else {
new_mangle.* = .{ .name = name_copy, .alias = proposed_name };
}
return proposed_name;
}
fn getAlias(scope: *Block, name: []const u8) []const u8 {
for (scope.variables.items) |p| {
if (mem.eql(u8, p.name, name))
return p.alias;
}
return scope.base.parent.?.getAlias(name);
}
fn localContains(scope: *Block, name: []const u8) bool {
for (scope.variables.items) |p| {
if (mem.eql(u8, p.alias, name))
return true;
}
return false;
}
fn contains(scope: *Block, name: []const u8) bool {
if (scope.localContains(name))
return true;
return scope.base.parent.?.contains(name);
}
fn discardVariable(scope: *Block, c: *Context, name: []const u8) Error!void {
const name_node = try Tag.identifier.create(c.arena, name);
const discard = try Tag.discard.create(c.arena, .{ .should_skip = false, .value = name_node });
try scope.statements.append(discard);
try scope.variable_discards.putNoClobber(name, discard.castTag(.discard).?);
}
};
const Root = struct {
base: Scope,
sym_table: SymbolTable,
macro_table: SymbolTable,
context: *Context,
nodes: std.ArrayList(Node),
fn init(c: *Context) Root {
return .{
.base = .{
.id = .root,
.parent = null,
},
.sym_table = SymbolTable.init(c.gpa),
.macro_table = SymbolTable.init(c.gpa),
.context = c,
.nodes = std.ArrayList(Node).init(c.gpa),
};
}
fn deinit(scope: *Root) void {
scope.sym_table.deinit();
scope.macro_table.deinit();
scope.nodes.deinit();
}
/// Check if the global scope contains this name, without looking into the "future", e.g.
/// ignore the preprocessed decl and macro names.
fn containsNow(scope: *Root, name: []const u8) bool {
return scope.sym_table.contains(name) or scope.macro_table.contains(name);
}
/// Check if the global scope contains the name, includes all decls that haven't been translated yet.
fn contains(scope: *Root, name: []const u8) bool {
return scope.containsNow(name) or scope.context.global_names.contains(name) or scope.context.weak_global_names.contains(name);
}
};
fn findBlockScope(inner: *Scope, c: *Context) !*Scope.Block {
var scope = inner;
while (true) {
switch (scope.id) {
.root => unreachable,
.block => return @fieldParentPtr(Block, "base", scope),
.condition => return @fieldParentPtr(Condition, "base", scope).getBlockScope(c),
else => scope = scope.parent.?,
}
}
}
fn findBlockReturnType(inner: *Scope) clang.QualType {
var scope = inner;
while (true) {
switch (scope.id) {
.root => unreachable,
.block => {
const block = @fieldParentPtr(Block, "base", scope);
if (block.return_type) |qt| return qt;
scope = scope.parent.?;
},
else => scope = scope.parent.?,
}
}
}
fn getAlias(scope: *Scope, name: []const u8) []const u8 {
return switch (scope.id) {
.root => return name,
.block => @fieldParentPtr(Block, "base", scope).getAlias(name),
.loop, .do_loop, .condition => scope.parent.?.getAlias(name),
};
}
fn contains(scope: *Scope, name: []const u8) bool {
return switch (scope.id) {
.root => @fieldParentPtr(Root, "base", scope).contains(name),
.block => @fieldParentPtr(Block, "base", scope).contains(name),
.loop, .do_loop, .condition => scope.parent.?.contains(name),
};
}
fn getBreakableScope(inner: *Scope) *Scope {
var scope = inner;
while (true) {
switch (scope.id) {
.root => unreachable,
.loop, .do_loop => return scope,
else => scope = scope.parent.?,
}
}
}
/// Appends a node to the first block scope if inside a function, or to the root tree if not.
fn appendNode(inner: *Scope, node: Node) !void {
var scope = inner;
while (true) {
switch (scope.id) {
.root => {
const root = @fieldParentPtr(Root, "base", scope);
return root.nodes.append(node);
},
.block => {
const block = @fieldParentPtr(Block, "base", scope);
return block.statements.append(node);
},
else => scope = scope.parent.?,
}
}
}
fn skipVariableDiscard(inner: *Scope, name: []const u8) void {
var scope = inner;
while (true) {
switch (scope.id) {
.root => return,
.block => {
const block = @fieldParentPtr(Block, "base", scope);
if (block.variable_discards.get(name)) |discard| {
discard.data.should_skip = true;
return;
}
},
else => {},
}
scope = scope.parent.?;
}
}
};
pub const Context = struct {
gpa: mem.Allocator,
arena: mem.Allocator,
@ -829,7 +541,7 @@ fn transQualTypeMaybeInitialized(c: *Context, scope: *Scope, qt: clang.QualType,
/// var static = S.*;
/// }).static;
fn stringLiteralToCharStar(c: *Context, str: Node) Error!Node {
const var_name = Scope.Block.StaticInnerName;
const var_name = Scope.Block.static_inner_name;
const variables = try c.arena.alloc(Node, 1);
variables[0] = try Tag.mut_str.create(c.arena, .{ .name = var_name, .init = str });
@ -1423,11 +1135,6 @@ fn transEnumDecl(c: *Context, scope: *Scope, enum_decl: *const clang.EnumDecl) E
}
}
const ResultUsed = enum {
used,
unused,
};
fn transStmt(
c: *Context,
scope: *Scope,
@ -2070,7 +1777,7 @@ fn transDeclStmtOne(
init_node = try removeCVQualifiers(c, dst_type_node, init_node);
}
const var_name: []const u8 = if (is_static_local) Scope.Block.StaticInnerName else mangled_name;
const var_name: []const u8 = if (is_static_local) Scope.Block.static_inner_name else mangled_name;
var node = try Tag.var_decl.create(c.arena, .{
.is_pub = false,
.is_const = is_const,
@ -2153,7 +1860,7 @@ fn transDeclRefExpr(
if (var_decl.isStaticLocal()) {
ref_expr = try Tag.field_access.create(c.arena, .{
.lhs = ref_expr,
.field_name = Scope.Block.StaticInnerName,
.field_name = Scope.Block.static_inner_name,
});
}
}
@ -5322,7 +5029,7 @@ pub fn failDecl(c: *Context, loc: clang.SourceLocation, name: []const u8, compti
try c.global_scope.nodes.append(try Tag.warning.create(c.arena, location_comment));
}
const PatternList = struct {
pub const PatternList = struct {
patterns: []Pattern,
/// Templates must be function-like macros
@ -5455,7 +5162,7 @@ const PatternList = struct {
/// macro. Please review this logic carefully if changing that assumption. Two
/// function-like macros are considered equivalent if and only if they contain the same
/// list of tokens, modulo parameter names.
fn isEquivalent(self: Pattern, ms: MacroSlicer, args_hash: ArgsPositionMap) bool {
pub fn isEquivalent(self: Pattern, ms: MacroSlicer, args_hash: ArgsPositionMap) bool {
if (self.tokens.len != ms.tokens.len) return false;
if (args_hash.count() != self.args_hash.count()) return false;
@ -5496,7 +5203,7 @@ const PatternList = struct {
}
};
fn init(allocator: mem.Allocator) Error!PatternList {
pub fn init(allocator: mem.Allocator) Error!PatternList {
const patterns = try allocator.alloc(Pattern, templates.len);
for (templates, 0..) |template, i| {
try patterns[i].init(allocator, template);
@ -5504,12 +5211,12 @@ const PatternList = struct {
return PatternList{ .patterns = patterns };
}
fn deinit(self: *PatternList, allocator: mem.Allocator) void {
pub fn deinit(self: *PatternList, allocator: mem.Allocator) void {
for (self.patterns) |*pattern| pattern.deinit(allocator);
allocator.free(self.patterns);
}
fn match(self: PatternList, allocator: mem.Allocator, ms: MacroSlicer) Error!?Pattern {
pub fn match(self: PatternList, allocator: mem.Allocator, ms: MacroSlicer) Error!?Pattern {
var args_hash: ArgsPositionMap = .{};
defer args_hash.deinit(allocator);

311
src/translate_c/common.zig Normal file
View File

@ -0,0 +1,311 @@
const std = @import("std");
const ast = @import("ast.zig");
const Node = ast.Node;
const Tag = Node.Tag;
const CallingConvention = std.builtin.CallingConvention;
pub const Error = std.mem.Allocator.Error;
pub const MacroProcessingError = Error || error{UnexpectedMacroToken};
pub const TypeError = Error || error{UnsupportedType};
pub const TransError = TypeError || error{UnsupportedTranslation};
pub const SymbolTable = std.StringArrayHashMap(Node);
pub const AliasList = std.ArrayList(struct {
alias: []const u8,
name: []const u8,
});
pub const ResultUsed = enum {
used,
unused,
};
pub fn ScopeExtra(comptime Context: type, comptime Type: type) type {
return struct {
id: Id,
parent: ?*Scope,
const Scope = @This();
pub const Id = enum {
block,
root,
condition,
loop,
do_loop,
};
/// Used for the scope of condition expressions, for example `if (cond)`.
/// The block is lazily initialised because it is only needed for rare
/// cases of comma operators being used.
pub const Condition = struct {
base: Scope,
block: ?Block = null,
pub fn getBlockScope(self: *Condition, c: *Context) !*Block {
if (self.block) |*b| return b;
self.block = try Block.init(c, &self.base, true);
return &self.block.?;
}
pub fn deinit(self: *Condition) void {
if (self.block) |*b| b.deinit();
}
};
/// Represents an in-progress Node.Block. This struct is stack-allocated.
/// When it is deinitialized, it produces an Node.Block which is allocated
/// into the main arena.
pub const Block = struct {
base: Scope,
statements: std.ArrayList(Node),
variables: AliasList,
mangle_count: u32 = 0,
label: ?[]const u8 = null,
/// By default all variables are discarded, since we do not know in advance if they
/// will be used. This maps the variable's name to the Discard payload, so that if
/// the variable is subsequently referenced we can indicate that the discard should
/// be skipped during the intermediate AST -> Zig AST render step.
variable_discards: std.StringArrayHashMap(*ast.Payload.Discard),
/// When the block corresponds to a function, keep track of the return type
/// so that the return expression can be cast, if necessary
return_type: ?Type = null,
/// C static local variables are wrapped in a block-local struct. The struct
/// is named after the (mangled) variable name, the Zig variable within the
/// struct itself is given this name.
pub const static_inner_name = "static";
pub fn init(c: *Context, parent: *Scope, labeled: bool) !Block {
var blk = Block{
.base = .{
.id = .block,
.parent = parent,
},
.statements = std.ArrayList(Node).init(c.gpa),
.variables = AliasList.init(c.gpa),
.variable_discards = std.StringArrayHashMap(*ast.Payload.Discard).init(c.gpa),
};
if (labeled) {
blk.label = try blk.makeMangledName(c, "blk");
}
return blk;
}
pub fn deinit(self: *Block) void {
self.statements.deinit();
self.variables.deinit();
self.variable_discards.deinit();
self.* = undefined;
}
pub fn complete(self: *Block, c: *Context) !Node {
if (self.base.parent.?.id == .do_loop) {
// We reserve 1 extra statement if the parent is a do_loop. This is in case of
// do while, we want to put `if (cond) break;` at the end.
const alloc_len = self.statements.items.len + @intFromBool(self.base.parent.?.id == .do_loop);
var stmts = try c.arena.alloc(Node, alloc_len);
stmts.len = self.statements.items.len;
@memcpy(stmts[0..self.statements.items.len], self.statements.items);
return Tag.block.create(c.arena, .{
.label = self.label,
.stmts = stmts,
});
}
if (self.statements.items.len == 0) return Tag.empty_block.init();
return Tag.block.create(c.arena, .{
.label = self.label,
.stmts = try c.arena.dupe(Node, self.statements.items),
});
}
/// Given the desired name, return a name that does not shadow anything from outer scopes.
/// Inserts the returned name into the scope.
/// The name will not be visible to callers of getAlias.
pub fn reserveMangledName(scope: *Block, c: *Context, name: []const u8) ![]const u8 {
return scope.createMangledName(c, name, true);
}
/// Same as reserveMangledName, but enables the alias immediately.
pub fn makeMangledName(scope: *Block, c: *Context, name: []const u8) ![]const u8 {
return scope.createMangledName(c, name, false);
}
pub fn createMangledName(scope: *Block, c: *Context, name: []const u8, reservation: bool) ![]const u8 {
const name_copy = try c.arena.dupe(u8, name);
var proposed_name = name_copy;
while (scope.contains(proposed_name)) {
scope.mangle_count += 1;
proposed_name = try std.fmt.allocPrint(c.arena, "{s}_{d}", .{ name, scope.mangle_count });
}
const new_mangle = try scope.variables.addOne();
if (reservation) {
new_mangle.* = .{ .name = name_copy, .alias = name_copy };
} else {
new_mangle.* = .{ .name = name_copy, .alias = proposed_name };
}
return proposed_name;
}
pub fn getAlias(scope: *Block, name: []const u8) []const u8 {
for (scope.variables.items) |p| {
if (std.mem.eql(u8, p.name, name))
return p.alias;
}
return scope.base.parent.?.getAlias(name);
}
pub fn localContains(scope: *Block, name: []const u8) bool {
for (scope.variables.items) |p| {
if (std.mem.eql(u8, p.alias, name))
return true;
}
return false;
}
pub fn contains(scope: *Block, name: []const u8) bool {
if (scope.localContains(name))
return true;
return scope.base.parent.?.contains(name);
}
pub fn discardVariable(scope: *Block, c: *Context, name: []const u8) Error!void {
const name_node = try Tag.identifier.create(c.arena, name);
const discard = try Tag.discard.create(c.arena, .{ .should_skip = false, .value = name_node });
try scope.statements.append(discard);
try scope.variable_discards.putNoClobber(name, discard.castTag(.discard).?);
}
};
pub const Root = struct {
base: Scope,
sym_table: SymbolTable,
macro_table: SymbolTable,
context: *Context,
nodes: std.ArrayList(Node),
pub fn init(c: *Context) Root {
return .{
.base = .{
.id = .root,
.parent = null,
},
.sym_table = SymbolTable.init(c.gpa),
.macro_table = SymbolTable.init(c.gpa),
.context = c,
.nodes = std.ArrayList(Node).init(c.gpa),
};
}
pub fn deinit(scope: *Root) void {
scope.sym_table.deinit();
scope.macro_table.deinit();
scope.nodes.deinit();
}
/// Check if the global scope contains this name, without looking into the "future", e.g.
/// ignore the preprocessed decl and macro names.
pub fn containsNow(scope: *Root, name: []const u8) bool {
return scope.sym_table.contains(name) or scope.macro_table.contains(name);
}
/// Check if the global scope contains the name, includes all decls that haven't been translated yet.
pub fn contains(scope: *Root, name: []const u8) bool {
return scope.containsNow(name) or scope.context.global_names.contains(name) or scope.context.weak_global_names.contains(name);
}
};
pub fn findBlockScope(inner: *Scope, c: *Context) !*Scope.Block {
var scope = inner;
while (true) {
switch (scope.id) {
.root => unreachable,
.block => return @fieldParentPtr(Block, "base", scope),
.condition => return @fieldParentPtr(Condition, "base", scope).getBlockScope(c),
else => scope = scope.parent.?,
}
}
}
pub fn findBlockReturnType(inner: *Scope) Type {
var scope = inner;
while (true) {
switch (scope.id) {
.root => unreachable,
.block => {
const block = @fieldParentPtr(Block, "base", scope);
if (block.return_type) |ty| return ty;
scope = scope.parent.?;
},
else => scope = scope.parent.?,
}
}
}
pub fn getAlias(scope: *Scope, name: []const u8) []const u8 {
return switch (scope.id) {
.root => return name,
.block => @fieldParentPtr(Block, "base", scope).getAlias(name),
.loop, .do_loop, .condition => scope.parent.?.getAlias(name),
};
}
pub fn contains(scope: *Scope, name: []const u8) bool {
return switch (scope.id) {
.root => @fieldParentPtr(Root, "base", scope).contains(name),
.block => @fieldParentPtr(Block, "base", scope).contains(name),
.loop, .do_loop, .condition => scope.parent.?.contains(name),
};
}
pub fn getBreakableScope(inner: *Scope) *Scope {
var scope = inner;
while (true) {
switch (scope.id) {
.root => unreachable,
.loop, .do_loop => return scope,
else => scope = scope.parent.?,
}
}
}
/// Appends a node to the first block scope if inside a function, or to the root tree if not.
pub fn appendNode(inner: *Scope, node: Node) !void {
var scope = inner;
while (true) {
switch (scope.id) {
.root => {
const root = @fieldParentPtr(Root, "base", scope);
return root.nodes.append(node);
},
.block => {
const block = @fieldParentPtr(Block, "base", scope);
return block.statements.append(node);
},
else => scope = scope.parent.?,
}
}
}
pub fn skipVariableDiscard(inner: *Scope, name: []const u8) void {
var scope = inner;
while (true) {
switch (scope.id) {
.root => return,
.block => {
const block = @fieldParentPtr(Block, "base", scope);
if (block.variable_discards.get(name)) |discard| {
discard.data.should_skip = true;
return;
}
},
else => {},
}
scope = scope.parent.?;
}
}
};
}