mirror of
https://github.com/ziglang/zig.git
synced 2025-12-06 06:13:07 +00:00
compiler: update aro and translate-c to latest; delete clang translate-c
This commit is contained in:
parent
91b0adc4c1
commit
f49a54745b
@ -197,7 +197,6 @@ set(ZIG_CPP_SOURCES
|
||||
# These are planned to stay even when we are self-hosted.
|
||||
src/zig_llvm.cpp
|
||||
src/zig_llvm-ar.cpp
|
||||
src/zig_clang.cpp
|
||||
src/zig_clang_driver.cpp
|
||||
src/zig_clang_cc1_main.cpp
|
||||
src/zig_clang_cc1as_main.cpp
|
||||
@ -537,7 +536,6 @@ set(ZIG_STAGE2_SOURCES
|
||||
src/Value.zig
|
||||
src/Zcu.zig
|
||||
src/Zcu/PerThread.zig
|
||||
src/clang.zig
|
||||
src/clang_options.zig
|
||||
src/clang_options_data.zig
|
||||
src/codegen.zig
|
||||
@ -641,7 +639,6 @@ set(ZIG_STAGE2_SOURCES
|
||||
src/register_manager.zig
|
||||
src/target.zig
|
||||
src/tracy.zig
|
||||
src/translate_c.zig
|
||||
src/libs/wasi_libc.zig
|
||||
)
|
||||
|
||||
|
||||
@ -732,13 +732,7 @@ fn addCompilerMod(b: *std.Build, options: AddCompilerModOptions) *std.Build.Modu
|
||||
.root_source_file = b.path("lib/compiler/aro/aro.zig"),
|
||||
});
|
||||
|
||||
const aro_translate_c_mod = b.createModule(.{
|
||||
.root_source_file = b.path("lib/compiler/aro_translate_c.zig"),
|
||||
});
|
||||
|
||||
aro_translate_c_mod.addImport("aro", aro_mod);
|
||||
compiler_mod.addImport("aro", aro_mod);
|
||||
compiler_mod.addImport("aro_translate_c", aro_translate_c_mod);
|
||||
|
||||
return compiler_mod;
|
||||
}
|
||||
|
||||
26
lib/compiler/aro/README.md
vendored
26
lib/compiler/aro/README.md
vendored
@ -1,26 +0,0 @@
|
||||
<img src="https://aro.vexu.eu/aro-logo.svg" alt="Aro" width="120px"/>
|
||||
|
||||
# Aro
|
||||
|
||||
A C compiler with the goal of providing fast compilation and low memory usage with good diagnostics.
|
||||
|
||||
Aro is included as an alternative C frontend in the [Zig compiler](https://github.com/ziglang/zig)
|
||||
for `translate-c` and eventually compiling C files by translating them to Zig first.
|
||||
Aro is developed in https://github.com/Vexu/arocc and the Zig dependency is
|
||||
updated from there when needed.
|
||||
|
||||
Currently most of standard C is supported up to C23 and as are many of the common
|
||||
extensions from GNU, MSVC, and Clang
|
||||
|
||||
Basic code generation is supported for x86-64 linux and can produce a valid hello world:
|
||||
```sh-session
|
||||
$ cat hello.c
|
||||
extern int printf(const char *restrict fmt, ...);
|
||||
int main(void) {
|
||||
printf("Hello, world!\n");
|
||||
return 0;
|
||||
}
|
||||
$ zig build && ./zig-out/bin/arocc hello.c -o hello
|
||||
$ ./hello
|
||||
Hello, world!
|
||||
```
|
||||
9
lib/compiler/aro/aro.zig
vendored
9
lib/compiler/aro/aro.zig
vendored
@ -5,12 +5,14 @@ pub const Driver = @import("aro/Driver.zig");
|
||||
pub const Parser = @import("aro/Parser.zig");
|
||||
pub const Preprocessor = @import("aro/Preprocessor.zig");
|
||||
pub const Source = @import("aro/Source.zig");
|
||||
pub const StringInterner = @import("aro/StringInterner.zig");
|
||||
pub const target_util = @import("aro/target.zig");
|
||||
pub const Tokenizer = @import("aro/Tokenizer.zig");
|
||||
pub const Toolchain = @import("aro/Toolchain.zig");
|
||||
pub const Tree = @import("aro/Tree.zig");
|
||||
pub const Type = @import("aro/Type.zig");
|
||||
pub const TypeMapper = @import("aro/StringInterner.zig").TypeMapper;
|
||||
pub const target_util = @import("aro/target.zig");
|
||||
pub const TypeStore = @import("aro/TypeStore.zig");
|
||||
pub const QualType = TypeStore.QualType;
|
||||
pub const Type = TypeStore.Type;
|
||||
pub const Value = @import("aro/Value.zig");
|
||||
|
||||
const backend = @import("backend.zig");
|
||||
@ -18,6 +20,7 @@ pub const Interner = backend.Interner;
|
||||
pub const Ir = backend.Ir;
|
||||
pub const Object = backend.Object;
|
||||
pub const CallingConvention = backend.CallingConvention;
|
||||
pub const Assembly = backend.Assembly;
|
||||
|
||||
pub const version_str = backend.version_str;
|
||||
pub const version = backend.version;
|
||||
|
||||
682
lib/compiler/aro/aro/Attribute.zig
vendored
682
lib/compiler/aro/aro/Attribute.zig
vendored
@ -6,9 +6,8 @@ const Compilation = @import("Compilation.zig");
|
||||
const Diagnostics = @import("Diagnostics.zig");
|
||||
const Parser = @import("Parser.zig");
|
||||
const Tree = @import("Tree.zig");
|
||||
const NodeIndex = Tree.NodeIndex;
|
||||
const TokenIndex = Tree.TokenIndex;
|
||||
const Type = @import("Type.zig");
|
||||
const QualType = @import("TypeStore.zig").QualType;
|
||||
const Value = @import("Value.zig");
|
||||
|
||||
const Attribute = @This();
|
||||
@ -39,79 +38,53 @@ pub const Kind = enum {
|
||||
};
|
||||
|
||||
pub const Iterator = struct {
|
||||
source: union(enum) {
|
||||
ty: Type,
|
||||
slice: []const Attribute,
|
||||
source: ?struct {
|
||||
qt: QualType,
|
||||
comp: *const Compilation,
|
||||
},
|
||||
slice: []const Attribute,
|
||||
index: usize,
|
||||
|
||||
pub fn initSlice(slice: ?[]const Attribute) Iterator {
|
||||
return .{ .source = .{ .slice = slice orelse &.{} }, .index = 0 };
|
||||
pub fn initSlice(slice: []const Attribute) Iterator {
|
||||
return .{ .source = null, .slice = slice, .index = 0 };
|
||||
}
|
||||
|
||||
pub fn initType(ty: Type) Iterator {
|
||||
return .{ .source = .{ .ty = ty }, .index = 0 };
|
||||
pub fn initType(qt: QualType, comp: *const Compilation) Iterator {
|
||||
return .{ .source = .{ .qt = qt, .comp = comp }, .slice = &.{}, .index = 0 };
|
||||
}
|
||||
|
||||
/// returns the next attribute as well as its index within the slice or current type
|
||||
/// The index can be used to determine when a nested type has been recursed into
|
||||
pub fn next(self: *Iterator) ?struct { Attribute, usize } {
|
||||
switch (self.source) {
|
||||
.slice => |slice| {
|
||||
if (self.index < slice.len) {
|
||||
defer self.index += 1;
|
||||
return .{ slice[self.index], self.index };
|
||||
}
|
||||
},
|
||||
.ty => |ty| {
|
||||
switch (ty.specifier) {
|
||||
.typeof_type => {
|
||||
self.* = .{ .source = .{ .ty = ty.data.sub_type.* }, .index = 0 };
|
||||
return self.next();
|
||||
},
|
||||
.typeof_expr => {
|
||||
self.* = .{ .source = .{ .ty = ty.data.expr.ty }, .index = 0 };
|
||||
return self.next();
|
||||
},
|
||||
.attributed => {
|
||||
if (self.index < ty.data.attributed.attributes.len) {
|
||||
defer self.index += 1;
|
||||
return .{ ty.data.attributed.attributes[self.index], self.index };
|
||||
}
|
||||
self.* = .{ .source = .{ .ty = ty.data.attributed.base }, .index = 0 };
|
||||
return self.next();
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
if (self.index < self.slice.len) {
|
||||
defer self.index += 1;
|
||||
return .{ self.slice[self.index], self.index };
|
||||
}
|
||||
if (self.source) |*source| {
|
||||
var cur = source.qt;
|
||||
if (cur.isInvalid()) {
|
||||
self.source = null;
|
||||
return null;
|
||||
}
|
||||
while (true) switch (cur.type(source.comp)) {
|
||||
.typeof => |typeof| cur = typeof.base,
|
||||
.attributed => |attributed| {
|
||||
self.slice = attributed.attributes;
|
||||
self.index = 1;
|
||||
source.qt = attributed.base;
|
||||
return .{ self.slice[0], 0 };
|
||||
},
|
||||
.typedef => |typedef| cur = typedef.base,
|
||||
else => {
|
||||
self.source = null;
|
||||
break;
|
||||
},
|
||||
};
|
||||
}
|
||||
return null;
|
||||
}
|
||||
};
|
||||
|
||||
pub const ArgumentType = enum {
|
||||
string,
|
||||
identifier,
|
||||
int,
|
||||
alignment,
|
||||
float,
|
||||
complex_float,
|
||||
expression,
|
||||
nullptr_t,
|
||||
|
||||
pub fn toString(self: ArgumentType) []const u8 {
|
||||
return switch (self) {
|
||||
.string => "a string",
|
||||
.identifier => "an identifier",
|
||||
.int, .alignment => "an integer constant",
|
||||
.nullptr_t => "nullptr",
|
||||
.float => "a floating point number",
|
||||
.complex_float => "a complex floating point number",
|
||||
.expression => "an expression",
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
/// number of required arguments
|
||||
pub fn requiredArgCount(attr: Tag) u32 {
|
||||
switch (attr) {
|
||||
@ -211,21 +184,20 @@ pub fn wantsIdentEnum(attr: Tag) bool {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn diagnoseIdent(attr: Tag, arguments: *Arguments, ident: []const u8) ?Diagnostics.Message {
|
||||
pub fn diagnoseIdent(attr: Tag, arguments: *Arguments, ident: TokenIndex, p: *Parser) !bool {
|
||||
switch (attr) {
|
||||
inline else => |tag| {
|
||||
const fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields;
|
||||
if (fields.len == 0) unreachable;
|
||||
const Unwrapped = UnwrapOptional(fields[0].type);
|
||||
if (@typeInfo(Unwrapped) != .@"enum") unreachable;
|
||||
if (std.meta.stringToEnum(Unwrapped, normalize(ident))) |enum_val| {
|
||||
if (std.meta.stringToEnum(Unwrapped, normalize(p.tokSlice(ident)))) |enum_val| {
|
||||
@field(@field(arguments, @tagName(tag)), fields[0].name) = enum_val;
|
||||
return null;
|
||||
return false;
|
||||
}
|
||||
return Diagnostics.Message{
|
||||
.tag = .unknown_attr_enum,
|
||||
.extra = .{ .attr_enum = .{ .tag = attr } },
|
||||
};
|
||||
|
||||
try p.err(ident, .unknown_attr_enum, .{ @tagName(attr), Formatting.choices(attr) });
|
||||
return true;
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -244,7 +216,7 @@ pub fn wantsAlignment(attr: Tag, idx: usize) bool {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn diagnoseAlignment(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Result, p: *Parser) !?Diagnostics.Message {
|
||||
pub fn diagnoseAlignment(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Result, arg_start: TokenIndex, p: *Parser) !bool {
|
||||
switch (attr) {
|
||||
inline else => |tag| {
|
||||
const arg_fields = @typeInfo(@field(attributes, @tagName(tag))).@"struct".fields;
|
||||
@ -254,17 +226,25 @@ pub fn diagnoseAlignment(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Pa
|
||||
inline 0...arg_fields.len - 1 => |arg_i| {
|
||||
if (UnwrapOptional(arg_fields[arg_i].type) != Alignment) unreachable;
|
||||
|
||||
if (!res.val.is(.int, p.comp)) return Diagnostics.Message{ .tag = .alignas_unavailable };
|
||||
if (!res.val.is(.int, p.comp)) {
|
||||
try p.err(arg_start, .alignas_unavailable, .{});
|
||||
return true;
|
||||
}
|
||||
if (res.val.compare(.lt, Value.zero, p.comp)) {
|
||||
return Diagnostics.Message{ .tag = .negative_alignment, .extra = .{ .str = try res.str(p) } };
|
||||
try p.err(arg_start, .negative_alignment, .{res});
|
||||
return true;
|
||||
}
|
||||
const requested = res.val.toInt(u29, p.comp) orelse {
|
||||
return Diagnostics.Message{ .tag = .maximum_alignment, .extra = .{ .str = try res.str(p) } };
|
||||
try p.err(arg_start, .maximum_alignment, .{res});
|
||||
return true;
|
||||
};
|
||||
if (!std.mem.isValidAlign(requested)) return Diagnostics.Message{ .tag = .non_pow2_align };
|
||||
if (!std.mem.isValidAlign(requested)) {
|
||||
try p.err(arg_start, .non_pow2_align, .{});
|
||||
return true;
|
||||
}
|
||||
|
||||
@field(@field(arguments, @tagName(tag)), arg_fields[arg_i].name) = Alignment{ .requested = requested };
|
||||
return null;
|
||||
@field(@field(arguments, @tagName(tag)), arg_fields[arg_i].name) = .{ .requested = requested };
|
||||
return false;
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -278,102 +258,105 @@ fn diagnoseField(
|
||||
comptime Wanted: type,
|
||||
arguments: *Arguments,
|
||||
res: Parser.Result,
|
||||
arg_start: TokenIndex,
|
||||
node: Tree.Node,
|
||||
p: *Parser,
|
||||
) !?Diagnostics.Message {
|
||||
) !bool {
|
||||
const string = "a string";
|
||||
const identifier = "an identifier";
|
||||
const int = "an integer constant";
|
||||
const alignment = "an integer constant";
|
||||
const nullptr_t = "nullptr";
|
||||
const float = "a floating point number";
|
||||
const complex_float = "a complex floating point number";
|
||||
const expression = "an expression";
|
||||
|
||||
const expected: []const u8 = switch (Wanted) {
|
||||
Value => string,
|
||||
Identifier => identifier,
|
||||
u32 => int,
|
||||
Alignment => alignment,
|
||||
CallingConvention => identifier,
|
||||
else => switch (@typeInfo(Wanted)) {
|
||||
.@"enum" => if (Wanted.opts.enum_kind == .string) string else identifier,
|
||||
else => unreachable,
|
||||
},
|
||||
};
|
||||
|
||||
if (res.val.opt_ref == .none) {
|
||||
if (Wanted == Identifier and node.tag == .decl_ref_expr) {
|
||||
@field(@field(arguments, decl.name), field.name) = Identifier{ .tok = node.data.decl_ref };
|
||||
return null;
|
||||
if (Wanted == Identifier and node == .decl_ref_expr) {
|
||||
@field(@field(arguments, decl.name), field.name) = .{ .tok = node.decl_ref_expr.name_tok };
|
||||
return false;
|
||||
}
|
||||
return invalidArgMsg(Wanted, .expression);
|
||||
|
||||
try p.err(arg_start, .attribute_arg_invalid, .{ expected, expression });
|
||||
return true;
|
||||
}
|
||||
const key = p.comp.interner.get(res.val.ref());
|
||||
switch (key) {
|
||||
.int => {
|
||||
if (@typeInfo(Wanted) == .int) {
|
||||
@field(@field(arguments, decl.name), field.name) = res.val.toInt(Wanted, p.comp) orelse return .{
|
||||
.tag = .attribute_int_out_of_range,
|
||||
.extra = .{ .str = try res.str(p) },
|
||||
@field(@field(arguments, decl.name), field.name) = res.val.toInt(Wanted, p.comp) orelse {
|
||||
try p.err(arg_start, .attribute_int_out_of_range, .{res});
|
||||
return true;
|
||||
};
|
||||
return null;
|
||||
|
||||
return false;
|
||||
}
|
||||
},
|
||||
.bytes => |bytes| {
|
||||
if (Wanted == Value) {
|
||||
if (node.tag != .string_literal_expr or (!node.ty.elemType().is(.char) and !node.ty.elemType().is(.uchar))) {
|
||||
return .{
|
||||
.tag = .attribute_requires_string,
|
||||
.extra = .{ .str = decl.name },
|
||||
};
|
||||
validate: {
|
||||
if (node != .string_literal_expr) break :validate;
|
||||
switch (node.string_literal_expr.qt.childType(p.comp).get(p.comp, .int).?) {
|
||||
.char, .uchar, .schar => {},
|
||||
else => break :validate,
|
||||
}
|
||||
@field(@field(arguments, decl.name), field.name) = try p.removeNull(res.val);
|
||||
return false;
|
||||
}
|
||||
@field(@field(arguments, decl.name), field.name) = try p.removeNull(res.val);
|
||||
return null;
|
||||
|
||||
try p.err(arg_start, .attribute_requires_string, .{decl.name});
|
||||
return true;
|
||||
} else if (@typeInfo(Wanted) == .@"enum" and @hasDecl(Wanted, "opts") and Wanted.opts.enum_kind == .string) {
|
||||
const str = bytes[0 .. bytes.len - 1];
|
||||
if (std.meta.stringToEnum(Wanted, str)) |enum_val| {
|
||||
@field(@field(arguments, decl.name), field.name) = enum_val;
|
||||
return null;
|
||||
} else {
|
||||
return .{
|
||||
.tag = .unknown_attr_enum,
|
||||
.extra = .{ .attr_enum = .{ .tag = std.meta.stringToEnum(Tag, decl.name).? } },
|
||||
};
|
||||
return false;
|
||||
}
|
||||
|
||||
try p.err(arg_start, .unknown_attr_enum, .{ decl.name, Formatting.choices(@field(Tag, decl.name)) });
|
||||
return true;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
return invalidArgMsg(Wanted, switch (key) {
|
||||
.int => .int,
|
||||
.bytes => .string,
|
||||
.float => .float,
|
||||
.complex => .complex_float,
|
||||
.null => .nullptr_t,
|
||||
.int_ty,
|
||||
.float_ty,
|
||||
.complex_ty,
|
||||
.ptr_ty,
|
||||
.noreturn_ty,
|
||||
.void_ty,
|
||||
.func_ty,
|
||||
.array_ty,
|
||||
.vector_ty,
|
||||
.record_ty,
|
||||
=> unreachable,
|
||||
});
|
||||
|
||||
try p.err(arg_start, .attribute_arg_invalid, .{ expected, switch (key) {
|
||||
.int => int,
|
||||
.bytes => string,
|
||||
.float => float,
|
||||
.complex => complex_float,
|
||||
.null => nullptr_t,
|
||||
else => unreachable,
|
||||
} });
|
||||
return true;
|
||||
}
|
||||
|
||||
fn invalidArgMsg(comptime Expected: type, actual: ArgumentType) Diagnostics.Message {
|
||||
return .{
|
||||
.tag = .attribute_arg_invalid,
|
||||
.extra = .{ .attr_arg_type = .{ .expected = switch (Expected) {
|
||||
Value => .string,
|
||||
Identifier => .identifier,
|
||||
u32 => .int,
|
||||
Alignment => .alignment,
|
||||
CallingConvention => .identifier,
|
||||
else => switch (@typeInfo(Expected)) {
|
||||
.@"enum" => if (Expected.opts.enum_kind == .string) .string else .identifier,
|
||||
else => unreachable,
|
||||
},
|
||||
}, .actual = actual } },
|
||||
};
|
||||
}
|
||||
|
||||
pub fn diagnose(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Result, node: Tree.Node, p: *Parser) !?Diagnostics.Message {
|
||||
pub fn diagnose(attr: Tag, arguments: *Arguments, arg_idx: u32, res: Parser.Result, arg_start: TokenIndex, node: Tree.Node, p: *Parser) !bool {
|
||||
switch (attr) {
|
||||
inline else => |tag| {
|
||||
const decl = @typeInfo(attributes).@"struct".decls[@intFromEnum(tag)];
|
||||
const max_arg_count = comptime maxArgCount(tag);
|
||||
if (arg_idx >= max_arg_count) return Diagnostics.Message{
|
||||
.tag = .attribute_too_many_args,
|
||||
.extra = .{ .attr_arg_count = .{ .attribute = attr, .expected = max_arg_count } },
|
||||
};
|
||||
if (arg_idx >= max_arg_count) {
|
||||
try p.err(arg_start, .attribute_too_many_args, .{ @tagName(attr), max_arg_count });
|
||||
return true;
|
||||
}
|
||||
|
||||
const arg_fields = @typeInfo(@field(attributes, decl.name)).@"struct".fields;
|
||||
switch (arg_idx) {
|
||||
inline 0...arg_fields.len - 1 => |arg_i| {
|
||||
return diagnoseField(decl, arg_fields[arg_i], UnwrapOptional(arg_fields[arg_i].type), arguments, res, node, p);
|
||||
return diagnoseField(decl, arg_fields[arg_i], UnwrapOptional(arg_fields[arg_i].type), arguments, res, arg_start, node, p);
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -386,8 +369,8 @@ const EnumTypes = enum {
|
||||
identifier,
|
||||
};
|
||||
pub const Alignment = struct {
|
||||
node: NodeIndex = .none,
|
||||
requested: u29,
|
||||
node: Tree.Node.OptIndex = .null,
|
||||
requested: u32,
|
||||
};
|
||||
pub const Identifier = struct {
|
||||
tok: TokenIndex = 0,
|
||||
@ -556,6 +539,7 @@ const attributes = struct {
|
||||
pub const nonstring = struct {};
|
||||
pub const noplt = struct {};
|
||||
pub const @"noreturn" = struct {};
|
||||
pub const nothrow = struct {};
|
||||
// TODO: union args ?
|
||||
// const optimize = struct {
|
||||
// // optimize, // u32 | []const u8 -- optimize?
|
||||
@ -697,6 +681,39 @@ const attributes = struct {
|
||||
pub const calling_convention = struct {
|
||||
cc: CallingConvention,
|
||||
};
|
||||
pub const nullability = struct {
|
||||
kind: enum {
|
||||
nonnull,
|
||||
nullable,
|
||||
nullable_result,
|
||||
unspecified,
|
||||
|
||||
const opts = struct {
|
||||
const enum_kind = .identifier;
|
||||
};
|
||||
},
|
||||
};
|
||||
pub const unaligned = struct {};
|
||||
pub const pcs = struct {
|
||||
kind: enum {
|
||||
aapcs,
|
||||
@"aapcs-vfp",
|
||||
|
||||
const opts = struct {
|
||||
const enum_kind = .string;
|
||||
};
|
||||
},
|
||||
};
|
||||
pub const riscv_vector_cc = struct {};
|
||||
pub const aarch64_sve_pcs = struct {};
|
||||
pub const aarch64_vector_pcs = struct {};
|
||||
pub const fastcall = struct {};
|
||||
pub const stdcall = struct {};
|
||||
pub const vectorcall = struct {};
|
||||
pub const cdecl = struct {};
|
||||
pub const thiscall = struct {};
|
||||
pub const sysv_abi = struct {};
|
||||
pub const ms_abi = struct {};
|
||||
};
|
||||
|
||||
pub const Tag = std.meta.DeclEnum(attributes);
|
||||
@ -786,108 +803,120 @@ fn ignoredAttrErr(p: *Parser, tok: TokenIndex, attr: Attribute.Tag, context: []c
|
||||
}
|
||||
|
||||
pub const applyParameterAttributes = applyVariableAttributes;
|
||||
pub fn applyVariableAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag: ?Diagnostics.Tag) !Type {
|
||||
pub fn applyVariableAttributes(p: *Parser, qt: QualType, attr_buf_start: usize, diagnostic: ?Parser.Diagnostic) !QualType {
|
||||
const attrs = p.attr_buf.items(.attr)[attr_buf_start..];
|
||||
const toks = p.attr_buf.items(.tok)[attr_buf_start..];
|
||||
p.attr_application_buf.items.len = 0;
|
||||
var base_ty = ty;
|
||||
var base_qt = qt;
|
||||
var common = false;
|
||||
var nocommon = false;
|
||||
for (attrs, toks) |attr, tok| switch (attr.tag) {
|
||||
// zig fmt: off
|
||||
.alias, .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .weak, .used,
|
||||
.noinit, .retain, .persistent, .section, .mode, .asm_label,
|
||||
.noinit, .retain, .persistent, .section, .mode, .asm_label, .nullability, .unaligned,
|
||||
=> try p.attr_application_buf.append(p.gpa, attr),
|
||||
// zig fmt: on
|
||||
.common => if (nocommon) {
|
||||
try p.errTok(.ignore_common, tok);
|
||||
try p.err(tok, .ignore_common, .{});
|
||||
} else {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
common = true;
|
||||
},
|
||||
.nocommon => if (common) {
|
||||
try p.errTok(.ignore_nocommon, tok);
|
||||
try p.err(tok, .ignore_nocommon, .{});
|
||||
} else {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
nocommon = true;
|
||||
},
|
||||
.vector_size => try attr.applyVectorSize(p, tok, &base_ty),
|
||||
.aligned => try attr.applyAligned(p, base_ty, tag),
|
||||
.nonstring => if (!base_ty.isArray() or !(base_ty.is(.char) or base_ty.is(.uchar) or base_ty.is(.schar))) {
|
||||
try p.errStr(.non_string_ignored, tok, try p.typeStr(ty));
|
||||
.vector_size => try attr.applyVectorSize(p, tok, &base_qt),
|
||||
.aligned => try attr.applyAligned(p, base_qt, diagnostic),
|
||||
.nonstring => {
|
||||
if (base_qt.get(p.comp, .array)) |array_ty| {
|
||||
if (array_ty.elem.get(p.comp, .int)) |int_ty| switch (int_ty) {
|
||||
.char, .uchar, .schar => {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
continue;
|
||||
},
|
||||
else => {},
|
||||
};
|
||||
}
|
||||
try p.err(tok, .non_string_ignored, .{qt});
|
||||
},
|
||||
.uninitialized => if (p.func.qt == null) {
|
||||
try p.err(tok, .local_variable_attribute, .{"uninitialized"});
|
||||
} else {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
},
|
||||
.uninitialized => if (p.func.ty == null) {
|
||||
try p.errStr(.local_variable_attribute, tok, "uninitialized");
|
||||
} else {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
},
|
||||
.cleanup => if (p.func.ty == null) {
|
||||
try p.errStr(.local_variable_attribute, tok, "cleanup");
|
||||
.cleanup => if (p.func.qt == null) {
|
||||
try p.err(tok, .local_variable_attribute, .{"cleanup"});
|
||||
} else {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
},
|
||||
.calling_convention => try applyCallingConvention(attr, p, tok, base_qt),
|
||||
.alloc_size,
|
||||
.copy,
|
||||
.tls_model,
|
||||
.visibility,
|
||||
=> |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .variables } }),
|
||||
=> |t| try p.err(tok, .attribute_todo, .{ @tagName(t), "variables" }),
|
||||
// There is already an error in Parser for _Noreturn keyword
|
||||
.noreturn => if (attr.syntax != .keyword) try ignoredAttrErr(p, tok, attr.tag, "variables"),
|
||||
else => try ignoredAttrErr(p, tok, attr.tag, "variables"),
|
||||
};
|
||||
return base_ty.withAttributes(p.arena, p.attr_application_buf.items);
|
||||
return applySelected(base_qt, p);
|
||||
}
|
||||
|
||||
pub fn applyFieldAttributes(p: *Parser, field_ty: *Type, attr_buf_start: usize) ![]const Attribute {
|
||||
pub fn applyFieldAttributes(p: *Parser, field_qt: *QualType, attr_buf_start: usize) ![]const Attribute {
|
||||
const attrs = p.attr_buf.items(.attr)[attr_buf_start..];
|
||||
const toks = p.attr_buf.items(.tok)[attr_buf_start..];
|
||||
p.attr_application_buf.items.len = 0;
|
||||
for (attrs, toks) |attr, tok| switch (attr.tag) {
|
||||
// zig fmt: off
|
||||
.@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode, .warn_unused_result, .nodiscard,
|
||||
.@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned,
|
||||
.mode, .warn_unused_result, .nodiscard, .nullability, .unaligned,
|
||||
=> try p.attr_application_buf.append(p.gpa, attr),
|
||||
// zig fmt: on
|
||||
.vector_size => try attr.applyVectorSize(p, tok, field_ty),
|
||||
.aligned => try attr.applyAligned(p, field_ty.*, null),
|
||||
.vector_size => try attr.applyVectorSize(p, tok, field_qt),
|
||||
.aligned => try attr.applyAligned(p, field_qt.*, null),
|
||||
.calling_convention => try applyCallingConvention(attr, p, tok, field_qt.*),
|
||||
else => try ignoredAttrErr(p, tok, attr.tag, "fields"),
|
||||
};
|
||||
if (p.attr_application_buf.items.len == 0) return &[0]Attribute{};
|
||||
return p.arena.dupe(Attribute, p.attr_application_buf.items);
|
||||
return p.attr_application_buf.items;
|
||||
}
|
||||
|
||||
pub fn applyTypeAttributes(p: *Parser, ty: Type, attr_buf_start: usize, tag: ?Diagnostics.Tag) !Type {
|
||||
pub fn applyTypeAttributes(p: *Parser, qt: QualType, attr_buf_start: usize, diagnostic: ?Parser.Diagnostic) !QualType {
|
||||
const attrs = p.attr_buf.items(.attr)[attr_buf_start..];
|
||||
const toks = p.attr_buf.items(.tok)[attr_buf_start..];
|
||||
p.attr_application_buf.items.len = 0;
|
||||
var base_ty = ty;
|
||||
var base_qt = qt;
|
||||
for (attrs, toks) |attr, tok| switch (attr.tag) {
|
||||
// zig fmt: off
|
||||
.@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode,
|
||||
.@"packed", .may_alias, .deprecated, .unavailable, .unused, .warn_if_not_aligned, .mode, .nullability, .unaligned,
|
||||
=> try p.attr_application_buf.append(p.gpa, attr),
|
||||
// zig fmt: on
|
||||
.transparent_union => try attr.applyTransparentUnion(p, tok, base_ty),
|
||||
.vector_size => try attr.applyVectorSize(p, tok, &base_ty),
|
||||
.aligned => try attr.applyAligned(p, base_ty, tag),
|
||||
.designated_init => if (base_ty.is(.@"struct")) {
|
||||
.transparent_union => try attr.applyTransparentUnion(p, tok, base_qt),
|
||||
.vector_size => try attr.applyVectorSize(p, tok, &base_qt),
|
||||
.aligned => try attr.applyAligned(p, base_qt, diagnostic),
|
||||
.designated_init => if (base_qt.is(p.comp, .@"struct")) {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
} else {
|
||||
try p.errTok(.designated_init_invalid, tok);
|
||||
try p.err(tok, .designated_init_invalid, .{});
|
||||
},
|
||||
.calling_convention => try applyCallingConvention(attr, p, tok, base_qt),
|
||||
.alloc_size,
|
||||
.copy,
|
||||
.scalar_storage_order,
|
||||
.nonstring,
|
||||
=> |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .types } }),
|
||||
=> |t| try p.err(tok, .attribute_todo, .{ @tagName(t), "types" }),
|
||||
else => try ignoredAttrErr(p, tok, attr.tag, "types"),
|
||||
};
|
||||
return base_ty.withAttributes(p.arena, p.attr_application_buf.items);
|
||||
return applySelected(base_qt, p);
|
||||
}
|
||||
|
||||
pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Type {
|
||||
pub fn applyFunctionAttributes(p: *Parser, qt: QualType, attr_buf_start: usize) !QualType {
|
||||
const attrs = p.attr_buf.items(.attr)[attr_buf_start..];
|
||||
const toks = p.attr_buf.items(.tok)[attr_buf_start..];
|
||||
p.attr_application_buf.items.len = 0;
|
||||
var base_ty = ty;
|
||||
var base_qt = qt;
|
||||
var hot = false;
|
||||
var cold = false;
|
||||
var @"noinline" = false;
|
||||
@ -897,55 +926,153 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ
|
||||
.noreturn, .unused, .used, .warning, .deprecated, .unavailable, .weak, .pure, .leaf,
|
||||
.@"const", .warn_unused_result, .section, .returns_nonnull, .returns_twice, .@"error",
|
||||
.externally_visible, .retain, .flatten, .gnu_inline, .alias, .asm_label, .nodiscard,
|
||||
.reproducible, .unsequenced,
|
||||
.reproducible, .unsequenced, .nothrow, .nullability, .unaligned,
|
||||
=> try p.attr_application_buf.append(p.gpa, attr),
|
||||
// zig fmt: on
|
||||
.hot => if (cold) {
|
||||
try p.errTok(.ignore_hot, tok);
|
||||
try p.err(tok, .ignore_hot, .{});
|
||||
} else {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
hot = true;
|
||||
},
|
||||
.cold => if (hot) {
|
||||
try p.errTok(.ignore_cold, tok);
|
||||
try p.err(tok, .ignore_cold, .{});
|
||||
} else {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
cold = true;
|
||||
},
|
||||
.always_inline => if (@"noinline") {
|
||||
try p.errTok(.ignore_always_inline, tok);
|
||||
try p.err(tok, .ignore_always_inline, .{});
|
||||
} else {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
always_inline = true;
|
||||
},
|
||||
.@"noinline" => if (always_inline) {
|
||||
try p.errTok(.ignore_noinline, tok);
|
||||
try p.err(tok, .ignore_noinline, .{});
|
||||
} else {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
@"noinline" = true;
|
||||
},
|
||||
.aligned => try attr.applyAligned(p, base_ty, null),
|
||||
.format => try attr.applyFormat(p, base_ty),
|
||||
.calling_convention => switch (attr.args.calling_convention.cc) {
|
||||
.C => continue,
|
||||
.stdcall, .thiscall => switch (p.comp.target.cpu.arch) {
|
||||
.x86 => try p.attr_application_buf.append(p.gpa, attr),
|
||||
else => try p.errStr(.callconv_not_supported, tok, p.tok_ids[tok].lexeme().?),
|
||||
},
|
||||
.vectorcall => switch (p.comp.target.cpu.arch) {
|
||||
.x86, .aarch64, .aarch64_be => try p.attr_application_buf.append(p.gpa, attr),
|
||||
else => try p.errStr(.callconv_not_supported, tok, p.tok_ids[tok].lexeme().?),
|
||||
},
|
||||
.aligned => try attr.applyAligned(p, base_qt, null),
|
||||
.format => try attr.applyFormat(p, base_qt),
|
||||
.calling_convention => try applyCallingConvention(attr, p, tok, base_qt),
|
||||
.fastcall => if (p.comp.target.cpu.arch == .x86) {
|
||||
try p.attr_application_buf.append(p.gpa, .{
|
||||
.tag = .calling_convention,
|
||||
.args = .{ .calling_convention = .{ .cc = .fastcall } },
|
||||
.syntax = attr.syntax,
|
||||
});
|
||||
} else {
|
||||
try p.err(tok, .callconv_not_supported, .{"fastcall"});
|
||||
},
|
||||
.stdcall => if (p.comp.target.cpu.arch == .x86) {
|
||||
try p.attr_application_buf.append(p.gpa, .{
|
||||
.tag = .calling_convention,
|
||||
.args = .{ .calling_convention = .{ .cc = .stdcall } },
|
||||
.syntax = attr.syntax,
|
||||
});
|
||||
} else {
|
||||
try p.err(tok, .callconv_not_supported, .{"stdcall"});
|
||||
},
|
||||
.thiscall => if (p.comp.target.cpu.arch == .x86) {
|
||||
try p.attr_application_buf.append(p.gpa, .{
|
||||
.tag = .calling_convention,
|
||||
.args = .{ .calling_convention = .{ .cc = .thiscall } },
|
||||
.syntax = attr.syntax,
|
||||
});
|
||||
} else {
|
||||
try p.err(tok, .callconv_not_supported, .{"thiscall"});
|
||||
},
|
||||
.vectorcall => if (p.comp.target.cpu.arch == .x86 or p.comp.target.cpu.arch.isAARCH64()) {
|
||||
try p.attr_application_buf.append(p.gpa, .{
|
||||
.tag = .calling_convention,
|
||||
.args = .{ .calling_convention = .{ .cc = .vectorcall } },
|
||||
.syntax = attr.syntax,
|
||||
});
|
||||
} else {
|
||||
try p.err(tok, .callconv_not_supported, .{"vectorcall"});
|
||||
},
|
||||
.cdecl => {},
|
||||
.pcs => if (p.comp.target.cpu.arch.isArm()) {
|
||||
try p.attr_application_buf.append(p.gpa, .{
|
||||
.tag = .calling_convention,
|
||||
.args = .{ .calling_convention = .{ .cc = switch (attr.args.pcs.kind) {
|
||||
.aapcs => .arm_aapcs,
|
||||
.@"aapcs-vfp" => .arm_aapcs_vfp,
|
||||
} } },
|
||||
.syntax = attr.syntax,
|
||||
});
|
||||
} else {
|
||||
try p.err(tok, .callconv_not_supported, .{"pcs"});
|
||||
},
|
||||
.riscv_vector_cc => if (p.comp.target.cpu.arch.isRISCV()) {
|
||||
try p.attr_application_buf.append(p.gpa, .{
|
||||
.tag = .calling_convention,
|
||||
.args = .{ .calling_convention = .{ .cc = .riscv_vector } },
|
||||
.syntax = attr.syntax,
|
||||
});
|
||||
} else {
|
||||
try p.err(tok, .callconv_not_supported, .{"pcs"});
|
||||
},
|
||||
.aarch64_sve_pcs => if (p.comp.target.cpu.arch.isAARCH64()) {
|
||||
try p.attr_application_buf.append(p.gpa, .{
|
||||
.tag = .calling_convention,
|
||||
.args = .{ .calling_convention = .{ .cc = .aarch64_sve_pcs } },
|
||||
.syntax = attr.syntax,
|
||||
});
|
||||
} else {
|
||||
try p.err(tok, .callconv_not_supported, .{"pcs"});
|
||||
},
|
||||
.aarch64_vector_pcs => if (p.comp.target.cpu.arch.isAARCH64()) {
|
||||
try p.attr_application_buf.append(p.gpa, .{
|
||||
.tag = .calling_convention,
|
||||
.args = .{ .calling_convention = .{ .cc = .aarch64_vector_pcs } },
|
||||
.syntax = attr.syntax,
|
||||
});
|
||||
} else {
|
||||
try p.err(tok, .callconv_not_supported, .{"pcs"});
|
||||
},
|
||||
.sysv_abi => if (p.comp.target.cpu.arch == .x86_64 and p.comp.target.os.tag == .windows) {
|
||||
try p.attr_application_buf.append(p.gpa, .{
|
||||
.tag = .calling_convention,
|
||||
.args = .{ .calling_convention = .{ .cc = .x86_64_sysv } },
|
||||
.syntax = attr.syntax,
|
||||
});
|
||||
},
|
||||
.ms_abi => if (p.comp.target.cpu.arch == .x86_64 and p.comp.target.os.tag != .windows) {
|
||||
try p.attr_application_buf.append(p.gpa, .{
|
||||
.tag = .calling_convention,
|
||||
.args = .{ .calling_convention = .{ .cc = .x86_64_win } },
|
||||
.syntax = attr.syntax,
|
||||
});
|
||||
},
|
||||
.malloc => {
|
||||
if (base_ty.returnType().isPtr()) {
|
||||
if (base_qt.get(p.comp, .func).?.return_type.isPointer(p.comp)) {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
} else {
|
||||
try ignoredAttrErr(p, tok, attr.tag, "functions that do not return pointers");
|
||||
}
|
||||
},
|
||||
.alloc_align => {
|
||||
const func_ty = base_qt.get(p.comp, .func).?;
|
||||
if (func_ty.return_type.isPointer(p.comp)) {
|
||||
if (attr.args.alloc_align.position == 0 or attr.args.alloc_align.position > func_ty.params.len) {
|
||||
try p.err(tok, .attribute_param_out_of_bounds, .{ "alloc_align", 1 });
|
||||
} else {
|
||||
const arg_qt = func_ty.params[attr.args.alloc_align.position - 1].qt;
|
||||
if (arg_qt.isInvalid()) continue;
|
||||
const arg_sk = arg_qt.scalarKind(p.comp);
|
||||
if (!arg_sk.isInt() or !arg_sk.isReal()) {
|
||||
try p.err(tok, .alloc_align_required_int_param, .{});
|
||||
} else {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
try p.err(tok, .alloc_align_requires_ptr_return, .{});
|
||||
}
|
||||
},
|
||||
.access,
|
||||
.alloc_align,
|
||||
.alloc_size,
|
||||
.artificial,
|
||||
.assume_aligned,
|
||||
@ -984,13 +1111,13 @@ pub fn applyFunctionAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Typ
|
||||
.visibility,
|
||||
.weakref,
|
||||
.zero_call_used_regs,
|
||||
=> |t| try p.errExtra(.attribute_todo, tok, .{ .attribute_todo = .{ .tag = t, .kind = .functions } }),
|
||||
=> |t| try p.err(tok, .attribute_todo, .{ @tagName(t), "functions" }),
|
||||
else => try ignoredAttrErr(p, tok, attr.tag, "functions"),
|
||||
};
|
||||
return ty.withAttributes(p.arena, p.attr_application_buf.items);
|
||||
return applySelected(qt, p);
|
||||
}
|
||||
|
||||
pub fn applyLabelAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Type {
|
||||
pub fn applyLabelAttributes(p: *Parser, attr_buf_start: usize) !QualType {
|
||||
const attrs = p.attr_buf.items(.attr)[attr_buf_start..];
|
||||
const toks = p.attr_buf.items(.tok)[attr_buf_start..];
|
||||
p.attr_application_buf.items.len = 0;
|
||||
@ -999,41 +1126,48 @@ pub fn applyLabelAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Type {
|
||||
for (attrs, toks) |attr, tok| switch (attr.tag) {
|
||||
.unused => try p.attr_application_buf.append(p.gpa, attr),
|
||||
.hot => if (cold) {
|
||||
try p.errTok(.ignore_hot, tok);
|
||||
try p.err(tok, .ignore_hot, .{});
|
||||
} else {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
hot = true;
|
||||
},
|
||||
.cold => if (hot) {
|
||||
try p.errTok(.ignore_cold, tok);
|
||||
try p.err(tok, .ignore_cold, .{});
|
||||
} else {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
cold = true;
|
||||
},
|
||||
else => try ignoredAttrErr(p, tok, attr.tag, "labels"),
|
||||
};
|
||||
return ty.withAttributes(p.arena, p.attr_application_buf.items);
|
||||
return applySelected(.void, p);
|
||||
}
|
||||
|
||||
pub fn applyStatementAttributes(p: *Parser, ty: Type, expr_start: TokenIndex, attr_buf_start: usize) !Type {
|
||||
pub fn applyStatementAttributes(p: *Parser, expr_start: TokenIndex, attr_buf_start: usize) !QualType {
|
||||
const attrs = p.attr_buf.items(.attr)[attr_buf_start..];
|
||||
const toks = p.attr_buf.items(.tok)[attr_buf_start..];
|
||||
p.attr_application_buf.items.len = 0;
|
||||
for (attrs, toks) |attr, tok| switch (attr.tag) {
|
||||
.fallthrough => if (p.tok_ids[p.tok_i] != .keyword_case and p.tok_ids[p.tok_i] != .keyword_default) {
|
||||
// TODO: this condition is not completely correct; the last statement of a compound
|
||||
// statement is also valid if it precedes a switch label (so intervening '}' are ok,
|
||||
// but only if they close a compound statement)
|
||||
try p.errTok(.invalid_fallthrough, expr_start);
|
||||
} else {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
.fallthrough => {
|
||||
for (p.tok_ids[p.tok_i..]) |tok_id| {
|
||||
switch (tok_id) {
|
||||
.keyword_case, .keyword_default, .eof => {
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
break;
|
||||
},
|
||||
.r_brace => {},
|
||||
else => {
|
||||
try p.err(expr_start, .invalid_fallthrough, .{});
|
||||
break;
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
else => try p.errStr(.cannot_apply_attribute_to_statement, tok, @tagName(attr.tag)),
|
||||
else => try p.err(tok, .cannot_apply_attribute_to_statement, .{@tagName(attr.tag)}),
|
||||
};
|
||||
return ty.withAttributes(p.arena, p.attr_application_buf.items);
|
||||
return applySelected(.void, p);
|
||||
}
|
||||
|
||||
pub fn applyEnumeratorAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !Type {
|
||||
pub fn applyEnumeratorAttributes(p: *Parser, qt: QualType, attr_buf_start: usize) !QualType {
|
||||
const attrs = p.attr_buf.items(.attr)[attr_buf_start..];
|
||||
const toks = p.attr_buf.items(.tok)[attr_buf_start..];
|
||||
p.attr_application_buf.items.len = 0;
|
||||
@ -1041,80 +1175,118 @@ pub fn applyEnumeratorAttributes(p: *Parser, ty: Type, attr_buf_start: usize) !T
|
||||
.deprecated, .unavailable => try p.attr_application_buf.append(p.gpa, attr),
|
||||
else => try ignoredAttrErr(p, tok, attr.tag, "enums"),
|
||||
};
|
||||
return ty.withAttributes(p.arena, p.attr_application_buf.items);
|
||||
return applySelected(qt, p);
|
||||
}
|
||||
|
||||
fn applyAligned(attr: Attribute, p: *Parser, ty: Type, tag: ?Diagnostics.Tag) !void {
|
||||
const base = ty.canonicalize(.standard);
|
||||
fn applyAligned(attr: Attribute, p: *Parser, qt: QualType, diagnostic: ?Parser.Diagnostic) !void {
|
||||
if (attr.args.aligned.alignment) |alignment| alignas: {
|
||||
if (attr.syntax != .keyword) break :alignas;
|
||||
|
||||
const align_tok = attr.args.aligned.__name_tok;
|
||||
if (tag) |t| try p.errTok(t, align_tok);
|
||||
if (diagnostic) |d| try p.err(align_tok, d, .{});
|
||||
|
||||
const default_align = base.alignof(p.comp);
|
||||
if (ty.isFunc()) {
|
||||
try p.errTok(.alignas_on_func, align_tok);
|
||||
if (qt.isInvalid()) return;
|
||||
const default_align = qt.base(p.comp).qt.alignof(p.comp);
|
||||
if (qt.is(p.comp, .func)) {
|
||||
try p.err(align_tok, .alignas_on_func, .{});
|
||||
} else if (alignment.requested < default_align) {
|
||||
try p.errExtra(.minimum_alignment, align_tok, .{ .unsigned = default_align });
|
||||
try p.err(align_tok, .minimum_alignment, .{default_align});
|
||||
}
|
||||
}
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
}
|
||||
|
||||
fn applyTransparentUnion(attr: Attribute, p: *Parser, tok: TokenIndex, ty: Type) !void {
|
||||
const union_ty = ty.get(.@"union") orelse {
|
||||
return p.errTok(.transparent_union_wrong_type, tok);
|
||||
fn applyTransparentUnion(attr: Attribute, p: *Parser, tok: TokenIndex, qt: QualType) !void {
|
||||
const union_ty = qt.get(p.comp, .@"union") orelse {
|
||||
return p.err(tok, .transparent_union_wrong_type, .{});
|
||||
};
|
||||
// TODO validate union defined at end
|
||||
if (union_ty.data.record.isIncomplete()) return;
|
||||
const fields = union_ty.data.record.fields;
|
||||
if (fields.len == 0) {
|
||||
return p.errTok(.transparent_union_one_field, tok);
|
||||
if (union_ty.layout == null) return;
|
||||
if (union_ty.fields.len == 0) {
|
||||
return p.err(tok, .transparent_union_one_field, .{});
|
||||
}
|
||||
const first_field_size = fields[0].ty.bitSizeof(p.comp).?;
|
||||
for (fields[1..]) |field| {
|
||||
const field_size = field.ty.bitSizeof(p.comp).?;
|
||||
const first_field_size = union_ty.fields[0].qt.bitSizeof(p.comp);
|
||||
for (union_ty.fields[1..]) |field| {
|
||||
const field_size = field.qt.bitSizeof(p.comp);
|
||||
if (field_size == first_field_size) continue;
|
||||
const mapper = p.comp.string_interner.getSlowTypeMapper();
|
||||
const str = try std.fmt.allocPrint(
|
||||
p.comp.diagnostics.arena.allocator(),
|
||||
"'{s}' ({d}",
|
||||
.{ mapper.lookup(field.name), field_size },
|
||||
);
|
||||
try p.errStr(.transparent_union_size, field.name_tok, str);
|
||||
return p.errExtra(.transparent_union_size_note, fields[0].name_tok, .{ .unsigned = first_field_size });
|
||||
|
||||
try p.err(field.name_tok, .transparent_union_size, .{ field.name.lookup(p.comp), field_size });
|
||||
return p.err(union_ty.fields[0].name_tok, .transparent_union_size_note, .{first_field_size});
|
||||
}
|
||||
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
}
|
||||
|
||||
fn applyVectorSize(attr: Attribute, p: *Parser, tok: TokenIndex, ty: *Type) !void {
|
||||
const base = ty.base();
|
||||
const is_enum = ty.is(.@"enum");
|
||||
if (!(ty.isInt() or ty.isFloat()) or !ty.isReal() or (is_enum and p.comp.langopts.emulate == .gcc)) {
|
||||
try p.errStr(.invalid_vec_elem_ty, tok, try p.typeStr(ty.*));
|
||||
fn applyVectorSize(attr: Attribute, p: *Parser, tok: TokenIndex, qt: *QualType) !void {
|
||||
if (qt.isInvalid()) return;
|
||||
const scalar_kind = qt.scalarKind(p.comp);
|
||||
if (scalar_kind != .int and scalar_kind != .float) {
|
||||
if (qt.get(p.comp, .@"enum")) |enum_ty| {
|
||||
if (p.comp.langopts.emulate == .clang and enum_ty.incomplete) {
|
||||
return; // Clang silently ignores vector_size on incomplete enums.
|
||||
}
|
||||
}
|
||||
try p.err(tok, .invalid_vec_elem_ty, .{qt.*});
|
||||
return error.ParsingFailed;
|
||||
}
|
||||
if (is_enum) return;
|
||||
if (qt.get(p.comp, .bit_int)) |bit_int| {
|
||||
if (bit_int.bits < 8) {
|
||||
try p.err(tok, .bit_int_vec_too_small, .{});
|
||||
return error.ParsingFailed;
|
||||
} else if (!std.math.isPowerOfTwo(bit_int.bits)) {
|
||||
try p.err(tok, .bit_int_vec_not_pow2, .{});
|
||||
return error.ParsingFailed;
|
||||
}
|
||||
}
|
||||
|
||||
const vec_bytes = attr.args.vector_size.bytes;
|
||||
const ty_size = ty.sizeof(p.comp).?;
|
||||
if (vec_bytes % ty_size != 0) {
|
||||
return p.errTok(.vec_size_not_multiple, tok);
|
||||
const elem_size = qt.sizeof(p.comp);
|
||||
if (vec_bytes % elem_size != 0) {
|
||||
return p.err(tok, .vec_size_not_multiple, .{});
|
||||
}
|
||||
const vec_size = vec_bytes / ty_size;
|
||||
|
||||
const arr_ty = try p.arena.create(Type.Array);
|
||||
arr_ty.* = .{ .elem = ty.*, .len = vec_size };
|
||||
base.* = .{
|
||||
.specifier = .vector,
|
||||
.data = .{ .array = arr_ty },
|
||||
};
|
||||
qt.* = try p.comp.type_store.put(p.gpa, .{ .vector = .{
|
||||
.elem = qt.*,
|
||||
.len = @intCast(vec_bytes / elem_size),
|
||||
} });
|
||||
}
|
||||
|
||||
fn applyFormat(attr: Attribute, p: *Parser, ty: Type) !void {
|
||||
fn applyFormat(attr: Attribute, p: *Parser, qt: QualType) !void {
|
||||
// TODO validate
|
||||
_ = ty;
|
||||
_ = qt;
|
||||
try p.attr_application_buf.append(p.gpa, attr);
|
||||
}
|
||||
|
||||
fn applyCallingConvention(attr: Attribute, p: *Parser, tok: TokenIndex, qt: QualType) !void {
|
||||
if (!qt.is(p.comp, .func)) {
|
||||
return p.err(tok, .callconv_non_func, .{ p.tok_ids[tok].symbol(), qt });
|
||||
}
|
||||
switch (attr.args.calling_convention.cc) {
|
||||
.c => {},
|
||||
.stdcall, .thiscall, .fastcall, .regcall => switch (p.comp.target.cpu.arch) {
|
||||
.x86 => try p.attr_application_buf.append(p.gpa, attr),
|
||||
else => try p.err(tok, .callconv_not_supported, .{p.tok_ids[tok].symbol()}),
|
||||
},
|
||||
.vectorcall => switch (p.comp.target.cpu.arch) {
|
||||
.x86, .aarch64, .aarch64_be => try p.attr_application_buf.append(p.gpa, attr),
|
||||
else => try p.err(tok, .callconv_not_supported, .{p.tok_ids[tok].symbol()}),
|
||||
},
|
||||
.riscv_vector,
|
||||
.aarch64_sve_pcs,
|
||||
.aarch64_vector_pcs,
|
||||
.arm_aapcs,
|
||||
.arm_aapcs_vfp,
|
||||
.x86_64_sysv,
|
||||
.x86_64_win,
|
||||
=> unreachable, // These can't come from keyword syntax
|
||||
}
|
||||
}
|
||||
|
||||
fn applySelected(qt: QualType, p: *Parser) !QualType {
|
||||
if (p.attr_application_buf.items.len == 0) return qt;
|
||||
if (qt.isInvalid()) return qt;
|
||||
return (try p.comp.type_store.put(p.gpa, .{ .attributed = .{
|
||||
.base = qt,
|
||||
.attributes = p.attr_application_buf.items,
|
||||
} })).withQualifiers(qt);
|
||||
}
|
||||
|
||||
1716
lib/compiler/aro/aro/Attribute/names.zig
vendored
1716
lib/compiler/aro/aro/Attribute/names.zig
vendored
File diff suppressed because it is too large
Load Diff
293
lib/compiler/aro/aro/Builtins.zig
vendored
293
lib/compiler/aro/aro/Builtins.zig
vendored
@ -1,21 +1,23 @@
|
||||
const std = @import("std");
|
||||
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Type = @import("Type.zig");
|
||||
const TypeDescription = @import("Builtins/TypeDescription.zig");
|
||||
const target_util = @import("target.zig");
|
||||
const StringId = @import("StringInterner.zig").StringId;
|
||||
const LangOpts = @import("LangOpts.zig");
|
||||
const Parser = @import("Parser.zig");
|
||||
const target_util = @import("target.zig");
|
||||
const TypeStore = @import("TypeStore.zig");
|
||||
const QualType = TypeStore.QualType;
|
||||
const Builder = TypeStore.Builder;
|
||||
const TypeDescription = @import("Builtins/TypeDescription.zig");
|
||||
|
||||
const Properties = @import("Builtins/Properties.zig");
|
||||
pub const Builtin = @import("Builtins/Builtin.zig").with(Properties);
|
||||
|
||||
const Expanded = struct {
|
||||
ty: Type,
|
||||
qt: QualType,
|
||||
builtin: Builtin,
|
||||
};
|
||||
|
||||
const NameToTypeMap = std.StringHashMapUnmanaged(Type);
|
||||
const NameToTypeMap = std.StringHashMapUnmanaged(QualType);
|
||||
|
||||
const Builtins = @This();
|
||||
|
||||
@ -25,38 +27,38 @@ pub fn deinit(b: *Builtins, gpa: std.mem.Allocator) void {
|
||||
b._name_to_type_map.deinit(gpa);
|
||||
}
|
||||
|
||||
fn specForSize(comp: *const Compilation, size_bits: u32) Type.Builder.Specifier {
|
||||
var ty = Type{ .specifier = .short };
|
||||
if (ty.sizeof(comp).? * 8 == size_bits) return .short;
|
||||
fn specForSize(comp: *const Compilation, size_bits: u32) TypeStore.Builder.Specifier {
|
||||
var qt: QualType = .short;
|
||||
if (qt.bitSizeof(comp) == size_bits) return .short;
|
||||
|
||||
ty.specifier = .int;
|
||||
if (ty.sizeof(comp).? * 8 == size_bits) return .int;
|
||||
qt = .int;
|
||||
if (qt.bitSizeof(comp) == size_bits) return .int;
|
||||
|
||||
ty.specifier = .long;
|
||||
if (ty.sizeof(comp).? * 8 == size_bits) return .long;
|
||||
qt = .long;
|
||||
if (qt.bitSizeof(comp) == size_bits) return .long;
|
||||
|
||||
ty.specifier = .long_long;
|
||||
if (ty.sizeof(comp).? * 8 == size_bits) return .long_long;
|
||||
qt = .long_long;
|
||||
if (qt.bitSizeof(comp) == size_bits) return .long_long;
|
||||
|
||||
unreachable;
|
||||
}
|
||||
|
||||
fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *const Compilation, allocator: std.mem.Allocator) !Type {
|
||||
var builder: Type.Builder = .{ .error_on_invalid = true };
|
||||
fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *Compilation) !QualType {
|
||||
var parser: Parser = undefined;
|
||||
parser.comp = comp;
|
||||
var builder: TypeStore.Builder = .{ .parser = &parser, .error_on_invalid = true };
|
||||
|
||||
var require_native_int32 = false;
|
||||
var require_native_int64 = false;
|
||||
for (desc.prefix) |prefix| {
|
||||
switch (prefix) {
|
||||
.L => builder.combine(undefined, .long, 0) catch unreachable,
|
||||
.LL => {
|
||||
builder.combine(undefined, .long, 0) catch unreachable;
|
||||
builder.combine(undefined, .long, 0) catch unreachable;
|
||||
},
|
||||
.L => builder.combine(.long, 0) catch unreachable,
|
||||
.LL => builder.combine(.long_long, 0) catch unreachable,
|
||||
.LLL => {
|
||||
switch (builder.specifier) {
|
||||
.none => builder.specifier = .int128,
|
||||
.signed => builder.specifier = .sint128,
|
||||
.unsigned => builder.specifier = .uint128,
|
||||
switch (builder.type) {
|
||||
.none => builder.type = .int128,
|
||||
.signed => builder.type = .sint128,
|
||||
.unsigned => builder.type = .uint128,
|
||||
else => unreachable,
|
||||
}
|
||||
},
|
||||
@ -65,239 +67,226 @@ fn createType(desc: TypeDescription, it: *TypeDescription.TypeIterator, comp: *c
|
||||
.N => {
|
||||
std.debug.assert(desc.spec == .i);
|
||||
if (!target_util.isLP64(comp.target)) {
|
||||
builder.combine(undefined, .long, 0) catch unreachable;
|
||||
builder.combine(.long, 0) catch unreachable;
|
||||
}
|
||||
},
|
||||
.O => {
|
||||
builder.combine(undefined, .long, 0) catch unreachable;
|
||||
builder.combine(.long, 0) catch unreachable;
|
||||
if (comp.target.os.tag != .opencl) {
|
||||
builder.combine(undefined, .long, 0) catch unreachable;
|
||||
builder.combine(.long, 0) catch unreachable;
|
||||
}
|
||||
},
|
||||
.S => builder.combine(undefined, .signed, 0) catch unreachable,
|
||||
.U => builder.combine(undefined, .unsigned, 0) catch unreachable,
|
||||
.S => builder.combine(.signed, 0) catch unreachable,
|
||||
.U => builder.combine(.unsigned, 0) catch unreachable,
|
||||
.I => {
|
||||
// Todo: compile-time constant integer
|
||||
},
|
||||
}
|
||||
}
|
||||
switch (desc.spec) {
|
||||
.v => builder.combine(undefined, .void, 0) catch unreachable,
|
||||
.b => builder.combine(undefined, .bool, 0) catch unreachable,
|
||||
.c => builder.combine(undefined, .char, 0) catch unreachable,
|
||||
.s => builder.combine(undefined, .short, 0) catch unreachable,
|
||||
.v => builder.combine(.void, 0) catch unreachable,
|
||||
.b => builder.combine(.bool, 0) catch unreachable,
|
||||
.c => builder.combine(.char, 0) catch unreachable,
|
||||
.s => builder.combine(.short, 0) catch unreachable,
|
||||
.i => {
|
||||
if (require_native_int32) {
|
||||
builder.specifier = specForSize(comp, 32);
|
||||
builder.type = specForSize(comp, 32);
|
||||
} else if (require_native_int64) {
|
||||
builder.specifier = specForSize(comp, 64);
|
||||
builder.type = specForSize(comp, 64);
|
||||
} else {
|
||||
switch (builder.specifier) {
|
||||
switch (builder.type) {
|
||||
.int128, .sint128, .uint128 => {},
|
||||
else => builder.combine(undefined, .int, 0) catch unreachable,
|
||||
else => builder.combine(.int, 0) catch unreachable,
|
||||
}
|
||||
}
|
||||
},
|
||||
.h => builder.combine(undefined, .fp16, 0) catch unreachable,
|
||||
.x => builder.combine(undefined, .float16, 0) catch unreachable,
|
||||
.h => builder.combine(.fp16, 0) catch unreachable,
|
||||
.x => builder.combine(.float16, 0) catch unreachable,
|
||||
.y => {
|
||||
// Todo: __bf16
|
||||
return .{ .specifier = .invalid };
|
||||
return .invalid;
|
||||
},
|
||||
.f => builder.combine(undefined, .float, 0) catch unreachable,
|
||||
.f => builder.combine(.float, 0) catch unreachable,
|
||||
.d => {
|
||||
if (builder.specifier == .long_long) {
|
||||
builder.specifier = .float128;
|
||||
if (builder.type == .long_long) {
|
||||
builder.type = .float128;
|
||||
} else {
|
||||
builder.combine(undefined, .double, 0) catch unreachable;
|
||||
builder.combine(.double, 0) catch unreachable;
|
||||
}
|
||||
},
|
||||
.z => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
builder.specifier = Type.Builder.fromType(comp.types.size);
|
||||
std.debug.assert(builder.type == .none);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.size);
|
||||
},
|
||||
.w => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
builder.specifier = Type.Builder.fromType(comp.types.wchar);
|
||||
std.debug.assert(builder.type == .none);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.wchar);
|
||||
},
|
||||
.F => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
builder.specifier = Type.Builder.fromType(comp.types.ns_constant_string.ty);
|
||||
std.debug.assert(builder.type == .none);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.ns_constant_string);
|
||||
},
|
||||
.G => {
|
||||
// Todo: id
|
||||
return .{ .specifier = .invalid };
|
||||
return .invalid;
|
||||
},
|
||||
.H => {
|
||||
// Todo: SEL
|
||||
return .{ .specifier = .invalid };
|
||||
return .invalid;
|
||||
},
|
||||
.M => {
|
||||
// Todo: struct objc_super
|
||||
return .{ .specifier = .invalid };
|
||||
return .invalid;
|
||||
},
|
||||
.a => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
std.debug.assert(builder.type == .none);
|
||||
std.debug.assert(desc.suffix.len == 0);
|
||||
builder.specifier = Type.Builder.fromType(comp.types.va_list);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.va_list);
|
||||
},
|
||||
.A => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
std.debug.assert(builder.type == .none);
|
||||
std.debug.assert(desc.suffix.len == 0);
|
||||
var va_list = comp.types.va_list;
|
||||
if (va_list.isArray()) va_list.decayArray();
|
||||
builder.specifier = Type.Builder.fromType(va_list);
|
||||
var va_list = comp.type_store.va_list;
|
||||
std.debug.assert(!va_list.is(comp, .array));
|
||||
builder.type = Builder.fromType(comp, va_list);
|
||||
},
|
||||
.V => |element_count| {
|
||||
std.debug.assert(desc.suffix.len == 0);
|
||||
const child_desc = it.next().?;
|
||||
const child_ty = try createType(child_desc, undefined, comp, allocator);
|
||||
const arr_ty = try allocator.create(Type.Array);
|
||||
arr_ty.* = .{
|
||||
const elem_qt = try createType(child_desc, undefined, comp);
|
||||
const vector_qt = try comp.type_store.put(comp.gpa, .{ .vector = .{
|
||||
.elem = elem_qt,
|
||||
.len = element_count,
|
||||
.elem = child_ty,
|
||||
};
|
||||
const vector_ty: Type = .{ .specifier = .vector, .data = .{ .array = arr_ty } };
|
||||
builder.specifier = Type.Builder.fromType(vector_ty);
|
||||
} });
|
||||
builder.type = .{ .other = vector_qt };
|
||||
},
|
||||
.q => {
|
||||
// Todo: scalable vector
|
||||
return .{ .specifier = .invalid };
|
||||
return .invalid;
|
||||
},
|
||||
.E => {
|
||||
// Todo: ext_vector (OpenCL vector)
|
||||
return .{ .specifier = .invalid };
|
||||
return .invalid;
|
||||
},
|
||||
.X => |child| {
|
||||
builder.combine(undefined, .complex, 0) catch unreachable;
|
||||
builder.combine(.complex, 0) catch unreachable;
|
||||
switch (child) {
|
||||
.float => builder.combine(undefined, .float, 0) catch unreachable,
|
||||
.double => builder.combine(undefined, .double, 0) catch unreachable,
|
||||
.float => builder.combine(.float, 0) catch unreachable,
|
||||
.double => builder.combine(.double, 0) catch unreachable,
|
||||
.longdouble => {
|
||||
builder.combine(undefined, .long, 0) catch unreachable;
|
||||
builder.combine(undefined, .double, 0) catch unreachable;
|
||||
builder.combine(.long, 0) catch unreachable;
|
||||
builder.combine(.double, 0) catch unreachable;
|
||||
},
|
||||
}
|
||||
},
|
||||
.Y => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
std.debug.assert(builder.type == .none);
|
||||
std.debug.assert(desc.suffix.len == 0);
|
||||
builder.specifier = Type.Builder.fromType(comp.types.ptrdiff);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.ptrdiff);
|
||||
},
|
||||
.P => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
if (comp.types.file.specifier == .invalid) {
|
||||
return comp.types.file;
|
||||
std.debug.assert(builder.type == .none);
|
||||
if (comp.type_store.file.isInvalid()) {
|
||||
return comp.type_store.file;
|
||||
}
|
||||
builder.specifier = Type.Builder.fromType(comp.types.file);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.file);
|
||||
},
|
||||
.J => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
std.debug.assert(builder.type == .none);
|
||||
std.debug.assert(desc.suffix.len == 0);
|
||||
if (comp.types.jmp_buf.specifier == .invalid) {
|
||||
return comp.types.jmp_buf;
|
||||
if (comp.type_store.jmp_buf.isInvalid()) {
|
||||
return comp.type_store.jmp_buf;
|
||||
}
|
||||
builder.specifier = Type.Builder.fromType(comp.types.jmp_buf);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.jmp_buf);
|
||||
},
|
||||
.SJ => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
std.debug.assert(builder.type == .none);
|
||||
std.debug.assert(desc.suffix.len == 0);
|
||||
if (comp.types.sigjmp_buf.specifier == .invalid) {
|
||||
return comp.types.sigjmp_buf;
|
||||
if (comp.type_store.sigjmp_buf.isInvalid()) {
|
||||
return comp.type_store.sigjmp_buf;
|
||||
}
|
||||
builder.specifier = Type.Builder.fromType(comp.types.sigjmp_buf);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.sigjmp_buf);
|
||||
},
|
||||
.K => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
if (comp.types.ucontext_t.specifier == .invalid) {
|
||||
return comp.types.ucontext_t;
|
||||
std.debug.assert(builder.type == .none);
|
||||
if (comp.type_store.ucontext_t.isInvalid()) {
|
||||
return comp.type_store.ucontext_t;
|
||||
}
|
||||
builder.specifier = Type.Builder.fromType(comp.types.ucontext_t);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.ucontext_t);
|
||||
},
|
||||
.p => {
|
||||
std.debug.assert(builder.specifier == .none);
|
||||
std.debug.assert(builder.type == .none);
|
||||
std.debug.assert(desc.suffix.len == 0);
|
||||
builder.specifier = Type.Builder.fromType(comp.types.pid_t);
|
||||
builder.type = Builder.fromType(comp, comp.type_store.pid_t);
|
||||
},
|
||||
.@"!" => return .{ .specifier = .invalid },
|
||||
.@"!" => return .invalid,
|
||||
}
|
||||
for (desc.suffix) |suffix| {
|
||||
switch (suffix) {
|
||||
.@"*" => |address_space| {
|
||||
_ = address_space; // TODO: handle address space
|
||||
const elem_ty = try allocator.create(Type);
|
||||
elem_ty.* = builder.finish(undefined) catch unreachable;
|
||||
const ty = Type{
|
||||
.specifier = .pointer,
|
||||
.data = .{ .sub_type = elem_ty },
|
||||
};
|
||||
builder.qual = .{};
|
||||
builder.specifier = Type.Builder.fromType(ty);
|
||||
const pointer_qt = try comp.type_store.put(comp.gpa, .{ .pointer = .{
|
||||
.child = builder.finish() catch unreachable,
|
||||
.decayed = null,
|
||||
} });
|
||||
|
||||
builder.@"const" = null;
|
||||
builder.@"volatile" = null;
|
||||
builder.restrict = null;
|
||||
builder.type = .{ .other = pointer_qt };
|
||||
},
|
||||
.C => builder.qual.@"const" = 0,
|
||||
.D => builder.qual.@"volatile" = 0,
|
||||
.R => builder.qual.restrict = 0,
|
||||
.C => builder.@"const" = 0,
|
||||
.D => builder.@"volatile" = 0,
|
||||
.R => builder.restrict = 0,
|
||||
}
|
||||
}
|
||||
return builder.finish(undefined) catch unreachable;
|
||||
return builder.finish() catch unreachable;
|
||||
}
|
||||
|
||||
fn createBuiltin(comp: *const Compilation, builtin: Builtin, type_arena: std.mem.Allocator) !Type {
|
||||
fn createBuiltin(comp: *Compilation, builtin: Builtin) !QualType {
|
||||
var it = TypeDescription.TypeIterator.init(builtin.properties.param_str);
|
||||
|
||||
const ret_ty_desc = it.next().?;
|
||||
if (ret_ty_desc.spec == .@"!") {
|
||||
// Todo: handle target-dependent definition
|
||||
}
|
||||
const ret_ty = try createType(ret_ty_desc, &it, comp, type_arena);
|
||||
const ret_ty = try createType(ret_ty_desc, &it, comp);
|
||||
var param_count: usize = 0;
|
||||
var params: [Builtin.max_param_count]Type.Func.Param = undefined;
|
||||
var params: [Builtin.max_param_count]TypeStore.Type.Func.Param = undefined;
|
||||
while (it.next()) |desc| : (param_count += 1) {
|
||||
params[param_count] = .{ .name_tok = 0, .ty = try createType(desc, &it, comp, type_arena), .name = .empty };
|
||||
params[param_count] = .{ .name_tok = 0, .qt = try createType(desc, &it, comp), .name = .empty, .node = .null };
|
||||
}
|
||||
|
||||
const duped_params = try type_arena.dupe(Type.Func.Param, params[0..param_count]);
|
||||
const func = try type_arena.create(Type.Func);
|
||||
|
||||
func.* = .{
|
||||
return comp.type_store.put(comp.gpa, .{ .func = .{
|
||||
.return_type = ret_ty,
|
||||
.params = duped_params,
|
||||
};
|
||||
return .{
|
||||
.specifier = if (builtin.properties.isVarArgs()) .var_args_func else .func,
|
||||
.data = .{ .func = func },
|
||||
};
|
||||
.kind = if (builtin.properties.isVarArgs()) .variadic else .normal,
|
||||
.params = params[0..param_count],
|
||||
} });
|
||||
}
|
||||
|
||||
/// Asserts that the builtin has already been created
|
||||
pub fn lookup(b: *const Builtins, name: []const u8) Expanded {
|
||||
const builtin = Builtin.fromName(name).?;
|
||||
const ty = b._name_to_type_map.get(name).?;
|
||||
return .{
|
||||
.builtin = builtin,
|
||||
.ty = ty,
|
||||
};
|
||||
const qt = b._name_to_type_map.get(name).?;
|
||||
return .{ .builtin = builtin, .qt = qt };
|
||||
}
|
||||
|
||||
pub fn getOrCreate(b: *Builtins, comp: *Compilation, name: []const u8, type_arena: std.mem.Allocator) !?Expanded {
|
||||
const ty = b._name_to_type_map.get(name) orelse {
|
||||
pub fn getOrCreate(b: *Builtins, comp: *Compilation, name: []const u8) !?Expanded {
|
||||
const qt = b._name_to_type_map.get(name) orelse {
|
||||
const builtin = Builtin.fromName(name) orelse return null;
|
||||
if (!comp.hasBuiltinFunction(builtin)) return null;
|
||||
|
||||
try b._name_to_type_map.ensureUnusedCapacity(comp.gpa, 1);
|
||||
const ty = try createBuiltin(comp, builtin, type_arena);
|
||||
b._name_to_type_map.putAssumeCapacity(name, ty);
|
||||
const qt = try createBuiltin(comp, builtin);
|
||||
b._name_to_type_map.putAssumeCapacity(name, qt);
|
||||
|
||||
return .{
|
||||
.builtin = builtin,
|
||||
.ty = ty,
|
||||
.qt = qt,
|
||||
};
|
||||
};
|
||||
const builtin = Builtin.fromName(name).?;
|
||||
return .{
|
||||
.builtin = builtin,
|
||||
.ty = ty,
|
||||
};
|
||||
return .{ .builtin = builtin, .qt = qt };
|
||||
}
|
||||
|
||||
pub const Iterator = struct {
|
||||
@ -350,19 +339,21 @@ test Iterator {
|
||||
}
|
||||
|
||||
test "All builtins" {
|
||||
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
_ = try comp.generateBuiltinMacros(.include_system_defines);
|
||||
var arena = std.heap.ArenaAllocator.init(std.testing.allocator);
|
||||
defer arena.deinit();
|
||||
var arena_state: std.heap.ArenaAllocator = .init(std.testing.allocator);
|
||||
defer arena_state.deinit();
|
||||
const arena = arena_state.allocator();
|
||||
|
||||
const type_arena = arena.allocator();
|
||||
var comp = Compilation.init(std.testing.allocator, arena, undefined, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
|
||||
try comp.type_store.initNamedTypes(&comp);
|
||||
comp.type_store.va_list = try comp.type_store.va_list.decay(&comp);
|
||||
|
||||
var builtin_it = Iterator{};
|
||||
while (builtin_it.next()) |entry| {
|
||||
const name = try type_arena.dupe(u8, entry.name);
|
||||
if (try comp.builtins.getOrCreate(&comp, name, type_arena)) |func_ty| {
|
||||
const get_again = (try comp.builtins.getOrCreate(&comp, name, std.testing.failing_allocator)).?;
|
||||
const name = try arena.dupe(u8, entry.name);
|
||||
if (try comp.builtins.getOrCreate(&comp, name)) |func_ty| {
|
||||
const get_again = (try comp.builtins.getOrCreate(&comp, name)).?;
|
||||
const found_by_lookup = comp.builtins.lookup(name);
|
||||
try std.testing.expectEqual(func_ty.builtin.tag, get_again.builtin.tag);
|
||||
try std.testing.expectEqual(func_ty.builtin.tag, found_by_lookup.builtin.tag);
|
||||
@ -373,19 +364,19 @@ test "All builtins" {
|
||||
test "Allocation failures" {
|
||||
const Test = struct {
|
||||
fn testOne(allocator: std.mem.Allocator) !void {
|
||||
var comp = Compilation.init(allocator, std.fs.cwd());
|
||||
var arena_state: std.heap.ArenaAllocator = .init(allocator);
|
||||
defer arena_state.deinit();
|
||||
const arena = arena_state.allocator();
|
||||
|
||||
var comp = Compilation.init(allocator, arena, undefined, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
_ = try comp.generateBuiltinMacros(.include_system_defines);
|
||||
var arena = std.heap.ArenaAllocator.init(comp.gpa);
|
||||
defer arena.deinit();
|
||||
|
||||
const type_arena = arena.allocator();
|
||||
|
||||
const num_builtins = 40;
|
||||
var builtin_it = Iterator{};
|
||||
for (0..num_builtins) |_| {
|
||||
const entry = builtin_it.next().?;
|
||||
_ = try comp.builtins.getOrCreate(&comp, entry.name, type_arena);
|
||||
_ = try comp.builtins.getOrCreate(&comp, entry.name);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
23183
lib/compiler/aro/aro/Builtins/Builtin.zig
vendored
23183
lib/compiler/aro/aro/Builtins/Builtin.zig
vendored
File diff suppressed because it is too large
Load Diff
39
lib/compiler/aro/aro/Builtins/eval.zig
vendored
39
lib/compiler/aro/aro/Builtins/eval.zig
vendored
@ -5,8 +5,9 @@ const Builtins = @import("../Builtins.zig");
|
||||
const Builtin = Builtins.Builtin;
|
||||
const Parser = @import("../Parser.zig");
|
||||
const Tree = @import("../Tree.zig");
|
||||
const NodeIndex = Tree.NodeIndex;
|
||||
const Type = @import("../Type.zig");
|
||||
const TypeStore = @import("../TypeStore.zig");
|
||||
const Type = TypeStore.Type;
|
||||
const QualType = TypeStore.QualType;
|
||||
const Value = @import("../Value.zig");
|
||||
|
||||
fn makeNan(comptime T: type, str: []const u8) T {
|
||||
@ -22,22 +23,22 @@ fn makeNan(comptime T: type, str: []const u8) T {
|
||||
return @bitCast(@as(UnsignedSameSize, bits) | @as(UnsignedSameSize, @bitCast(std.math.nan(T))));
|
||||
}
|
||||
|
||||
pub fn eval(tag: Builtin.Tag, p: *Parser, args: []const NodeIndex) !Value {
|
||||
pub fn eval(tag: Builtin.Tag, p: *Parser, args: []const Tree.Node.Index) !Value {
|
||||
const builtin = Builtin.fromTag(tag);
|
||||
if (!builtin.properties.attributes.const_evaluable) return .{};
|
||||
|
||||
switch (tag) {
|
||||
Builtin.tagFromName("__builtin_inff").?,
|
||||
Builtin.tagFromName("__builtin_inf").?,
|
||||
Builtin.tagFromName("__builtin_infl").?,
|
||||
.__builtin_inff,
|
||||
.__builtin_inf,
|
||||
.__builtin_infl,
|
||||
=> {
|
||||
const ty: Type = switch (tag) {
|
||||
Builtin.tagFromName("__builtin_inff").? => .{ .specifier = .float },
|
||||
Builtin.tagFromName("__builtin_inf").? => .{ .specifier = .double },
|
||||
Builtin.tagFromName("__builtin_infl").? => .{ .specifier = .long_double },
|
||||
const qt: QualType = switch (tag) {
|
||||
.__builtin_inff => .float,
|
||||
.__builtin_inf => .double,
|
||||
.__builtin_infl => .long_double,
|
||||
else => unreachable,
|
||||
};
|
||||
const f: Interner.Key.Float = switch (ty.bitSizeof(p.comp).?) {
|
||||
const f: Interner.Key.Float = switch (qt.bitSizeof(p.comp)) {
|
||||
32 => .{ .f32 = std.math.inf(f32) },
|
||||
64 => .{ .f64 = std.math.inf(f64) },
|
||||
80 => .{ .f80 = std.math.inf(f80) },
|
||||
@ -46,14 +47,14 @@ pub fn eval(tag: Builtin.Tag, p: *Parser, args: []const NodeIndex) !Value {
|
||||
};
|
||||
return Value.intern(p.comp, .{ .float = f });
|
||||
},
|
||||
Builtin.tagFromName("__builtin_isinf").? => blk: {
|
||||
.__builtin_isinf => blk: {
|
||||
if (args.len == 0) break :blk;
|
||||
const val = p.value_map.get(args[0]) orelse break :blk;
|
||||
const val = p.tree.value_map.get(args[0]) orelse break :blk;
|
||||
return Value.fromBool(val.isInf(p.comp));
|
||||
},
|
||||
Builtin.tagFromName("__builtin_isinf_sign").? => blk: {
|
||||
.__builtin_isinf_sign => blk: {
|
||||
if (args.len == 0) break :blk;
|
||||
const val = p.value_map.get(args[0]) orelse break :blk;
|
||||
const val = p.tree.value_map.get(args[0]) orelse break :blk;
|
||||
switch (val.isInfSign(p.comp)) {
|
||||
.unknown => {},
|
||||
.finite => return Value.zero,
|
||||
@ -61,17 +62,17 @@ pub fn eval(tag: Builtin.Tag, p: *Parser, args: []const NodeIndex) !Value {
|
||||
.negative => return Value.int(@as(i64, -1), p.comp),
|
||||
}
|
||||
},
|
||||
Builtin.tagFromName("__builtin_isnan").? => blk: {
|
||||
.__builtin_isnan => blk: {
|
||||
if (args.len == 0) break :blk;
|
||||
const val = p.value_map.get(args[0]) orelse break :blk;
|
||||
const val = p.tree.value_map.get(args[0]) orelse break :blk;
|
||||
return Value.fromBool(val.isNan(p.comp));
|
||||
},
|
||||
Builtin.tagFromName("__builtin_nan").? => blk: {
|
||||
.__builtin_nan => blk: {
|
||||
if (args.len == 0) break :blk;
|
||||
const val = p.getDecayedStringLiteral(args[0]) orelse break :blk;
|
||||
const bytes = p.comp.interner.get(val.ref()).bytes;
|
||||
|
||||
const f: Interner.Key.Float = switch ((Type{ .specifier = .double }).bitSizeof(p.comp).?) {
|
||||
const f: Interner.Key.Float = switch (Type.Float.double.bits(p.comp)) {
|
||||
32 => .{ .f32 = makeNan(f32, bytes) },
|
||||
64 => .{ .f64 = makeNan(f64, bytes) },
|
||||
80 => .{ .f80 = makeNan(f80, bytes) },
|
||||
|
||||
1054
lib/compiler/aro/aro/CodeGen.zig
vendored
1054
lib/compiler/aro/aro/CodeGen.zig
vendored
File diff suppressed because it is too large
Load Diff
1689
lib/compiler/aro/aro/Compilation.zig
vendored
1689
lib/compiler/aro/aro/Compilation.zig
vendored
File diff suppressed because it is too large
Load Diff
976
lib/compiler/aro/aro/Diagnostics.zig
vendored
976
lib/compiler/aro/aro/Diagnostics.zig
vendored
File diff suppressed because it is too large
Load Diff
1041
lib/compiler/aro/aro/Diagnostics/messages.zig
vendored
1041
lib/compiler/aro/aro/Diagnostics/messages.zig
vendored
File diff suppressed because it is too large
Load Diff
813
lib/compiler/aro/aro/Driver.zig
vendored
813
lib/compiler/aro/aro/Driver.zig
vendored
File diff suppressed because it is too large
Load Diff
12
lib/compiler/aro/aro/Driver/Filesystem.zig
vendored
12
lib/compiler/aro/aro/Driver/Filesystem.zig
vendored
@ -96,7 +96,7 @@ fn findProgramByNamePosix(name: []const u8, path: ?[]const u8, buf: []u8) ?[]con
|
||||
}
|
||||
|
||||
pub const Filesystem = union(enum) {
|
||||
real: void,
|
||||
real: std.fs.Dir,
|
||||
fake: []const Entry,
|
||||
|
||||
const Entry = struct {
|
||||
@ -172,8 +172,8 @@ pub const Filesystem = union(enum) {
|
||||
|
||||
pub fn exists(fs: Filesystem, path: []const u8) bool {
|
||||
switch (fs) {
|
||||
.real => {
|
||||
std.fs.cwd().access(path, .{}) catch return false;
|
||||
.real => |cwd| {
|
||||
cwd.access(path, .{}) catch return false;
|
||||
return true;
|
||||
},
|
||||
.fake => |paths| return existsFake(paths, path),
|
||||
@ -210,8 +210,8 @@ pub const Filesystem = union(enum) {
|
||||
/// Otherwise returns a slice of `buf`. If the file is larger than `buf` partial contents are returned
|
||||
pub fn readFile(fs: Filesystem, path: []const u8, buf: []u8) ?[]const u8 {
|
||||
return switch (fs) {
|
||||
.real => {
|
||||
const file = std.fs.cwd().openFile(path, .{}) catch return null;
|
||||
.real => |cwd| {
|
||||
const file = cwd.openFile(path, .{}) catch return null;
|
||||
defer file.close();
|
||||
|
||||
const bytes_read = file.readAll(buf) catch return null;
|
||||
@ -223,7 +223,7 @@ pub const Filesystem = union(enum) {
|
||||
|
||||
pub fn openDir(fs: Filesystem, dir_name: []const u8) std.fs.Dir.OpenError!Dir {
|
||||
return switch (fs) {
|
||||
.real => .{ .dir = try std.fs.cwd().openDir(dir_name, .{ .access_sub_paths = false, .iterate = true }) },
|
||||
.real => |cwd| .{ .dir = try cwd.openDir(dir_name, .{ .access_sub_paths = false, .iterate = true }) },
|
||||
.fake => |entries| .{ .fake = .{ .entries = entries, .path = dir_name } },
|
||||
};
|
||||
}
|
||||
|
||||
20
lib/compiler/aro/aro/Driver/GCCDetector.zig
vendored
20
lib/compiler/aro/aro/Driver/GCCDetector.zig
vendored
@ -1,9 +1,11 @@
|
||||
const std = @import("std");
|
||||
const Toolchain = @import("../Toolchain.zig");
|
||||
const target_util = @import("../target.zig");
|
||||
|
||||
const system_defaults = @import("system_defaults");
|
||||
|
||||
const GCCVersion = @import("GCCVersion.zig");
|
||||
const Multilib = @import("Multilib.zig");
|
||||
const target_util = @import("../target.zig");
|
||||
const Toolchain = @import("../Toolchain.zig");
|
||||
|
||||
const GCCDetector = @This();
|
||||
|
||||
@ -50,7 +52,7 @@ fn addDefaultGCCPrefixes(prefixes: *std.ArrayListUnmanaged([]const u8), tc: *con
|
||||
if (sysroot.len == 0) {
|
||||
prefixes.appendAssumeCapacity("/usr");
|
||||
} else {
|
||||
var usr_path = try tc.arena.alloc(u8, 4 + sysroot.len);
|
||||
var usr_path = try tc.driver.comp.arena.alloc(u8, 4 + sysroot.len);
|
||||
@memcpy(usr_path[0..4], "/usr");
|
||||
@memcpy(usr_path[4..], sysroot);
|
||||
prefixes.appendAssumeCapacity(usr_path);
|
||||
@ -284,11 +286,6 @@ fn collectLibDirsAndTriples(
|
||||
},
|
||||
.x86 => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&X86LibDirs);
|
||||
triple_aliases.appendSliceAssumeCapacity(&X86Triples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&X86_64LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&X86_64Triples);
|
||||
biarch_libdirs.appendSliceAssumeCapacity(&X32LibDirs);
|
||||
biarch_triple_aliases.appendSliceAssumeCapacity(&X32Triples);
|
||||
},
|
||||
.loongarch64 => {
|
||||
lib_dirs.appendSliceAssumeCapacity(&LoongArch64LibDirs);
|
||||
@ -587,6 +584,7 @@ fn scanLibDirForGCCTriple(
|
||||
) !void {
|
||||
var path_buf: [std.fs.max_path_bytes]u8 = undefined;
|
||||
var fib = std.heap.FixedBufferAllocator.init(&path_buf);
|
||||
const arena = tc.driver.comp.arena;
|
||||
for (0..2) |i| {
|
||||
if (i == 0 and !gcc_dir_exists) continue;
|
||||
if (i == 1 and !gcc_cross_dir_exists) continue;
|
||||
@ -619,9 +617,9 @@ fn scanLibDirForGCCTriple(
|
||||
if (!try self.scanGCCForMultilibs(tc, target, .{ dir_name, version_text }, needs_biarch_suffix)) continue;
|
||||
|
||||
self.version = candidate_version;
|
||||
self.gcc_triple = try tc.arena.dupe(u8, candidate_triple);
|
||||
self.install_path = try std.fs.path.join(tc.arena, &.{ lib_dir, lib_suffix, version_text });
|
||||
self.parent_lib_path = try std.fs.path.join(tc.arena, &.{ self.install_path, "..", "..", ".." });
|
||||
self.gcc_triple = try arena.dupe(u8, candidate_triple);
|
||||
self.install_path = try std.fs.path.join(arena, &.{ lib_dir, lib_suffix, version_text });
|
||||
self.parent_lib_path = try std.fs.path.join(arena, &.{ self.install_path, "..", "..", ".." });
|
||||
self.is_valid = true;
|
||||
}
|
||||
}
|
||||
|
||||
7
lib/compiler/aro/aro/Hideset.zig
vendored
7
lib/compiler/aro/aro/Hideset.zig
vendored
@ -10,8 +10,9 @@
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const Allocator = mem.Allocator;
|
||||
const Source = @import("Source.zig");
|
||||
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Source = @import("Source.zig");
|
||||
const Tokenizer = @import("Tokenizer.zig");
|
||||
|
||||
pub const Hideset = @This();
|
||||
@ -51,10 +52,10 @@ pub const Index = enum(u32) {
|
||||
_,
|
||||
};
|
||||
|
||||
map: std.AutoHashMapUnmanaged(Identifier, Index) = .empty,
|
||||
map: std.AutoHashMapUnmanaged(Identifier, Index) = .{},
|
||||
/// Used for computing union/intersection of two lists; stored here so that allocations can be retained
|
||||
/// until hideset is deinit'ed
|
||||
tmp_map: std.AutoHashMapUnmanaged(Identifier, void) = .empty,
|
||||
tmp_map: std.AutoHashMapUnmanaged(Identifier, void) = .{},
|
||||
linked_list: Item.List = .{},
|
||||
comp: *const Compilation,
|
||||
|
||||
|
||||
87
lib/compiler/aro/aro/InitList.zig
vendored
87
lib/compiler/aro/aro/InitList.zig
vendored
@ -3,17 +3,16 @@
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const testing = std.testing;
|
||||
|
||||
const Diagnostics = @import("Diagnostics.zig");
|
||||
const Parser = @import("Parser.zig");
|
||||
const Tree = @import("Tree.zig");
|
||||
const Token = Tree.Token;
|
||||
const TokenIndex = Tree.TokenIndex;
|
||||
const NodeIndex = Tree.NodeIndex;
|
||||
const Type = @import("Type.zig");
|
||||
const Diagnostics = @import("Diagnostics.zig");
|
||||
const NodeList = std.array_list.Managed(NodeIndex);
|
||||
const Parser = @import("Parser.zig");
|
||||
const Node = Tree.Node;
|
||||
|
||||
const Item = struct {
|
||||
list: InitList = .{},
|
||||
list: InitList,
|
||||
index: u64,
|
||||
|
||||
fn order(_: void, a: Item, b: Item) std.math.Order {
|
||||
@ -24,7 +23,7 @@ const Item = struct {
|
||||
const InitList = @This();
|
||||
|
||||
list: std.ArrayListUnmanaged(Item) = .empty,
|
||||
node: NodeIndex = .none,
|
||||
node: Node.OptIndex = .null,
|
||||
tok: TokenIndex = 0,
|
||||
|
||||
/// Deinitialize freeing all memory.
|
||||
@ -34,50 +33,6 @@ pub fn deinit(il: *InitList, gpa: Allocator) void {
|
||||
il.* = undefined;
|
||||
}
|
||||
|
||||
/// Insert initializer at index, returning previous entry if one exists.
|
||||
pub fn put(il: *InitList, gpa: Allocator, index: usize, node: NodeIndex, tok: TokenIndex) !?TokenIndex {
|
||||
const items = il.list.items;
|
||||
var left: usize = 0;
|
||||
var right: usize = items.len;
|
||||
|
||||
// Append new value to empty list
|
||||
if (left == right) {
|
||||
const item = try il.list.addOne(gpa);
|
||||
item.* = .{
|
||||
.list = .{ .node = node, .tok = tok },
|
||||
.index = index,
|
||||
};
|
||||
return null;
|
||||
}
|
||||
|
||||
while (left < right) {
|
||||
// Avoid overflowing in the midpoint calculation
|
||||
const mid = left + (right - left) / 2;
|
||||
// Compare the key with the midpoint element
|
||||
switch (std.math.order(index, items[mid].index)) {
|
||||
.eq => {
|
||||
// Replace previous entry.
|
||||
const prev = items[mid].list.tok;
|
||||
items[mid].list.deinit(gpa);
|
||||
items[mid] = .{
|
||||
.list = .{ .node = node, .tok = tok },
|
||||
.index = index,
|
||||
};
|
||||
return prev;
|
||||
},
|
||||
.gt => left = mid + 1,
|
||||
.lt => right = mid,
|
||||
}
|
||||
}
|
||||
|
||||
// Insert a new value into a sorted position.
|
||||
try il.list.insert(gpa, left, .{
|
||||
.list = .{ .node = node, .tok = tok },
|
||||
.index = index,
|
||||
});
|
||||
return null;
|
||||
}
|
||||
|
||||
/// Find item at index, create new if one does not exist.
|
||||
pub fn find(il: *InitList, gpa: Allocator, index: u64) !*InitList {
|
||||
const items = il.list.items;
|
||||
@ -85,13 +40,21 @@ pub fn find(il: *InitList, gpa: Allocator, index: u64) !*InitList {
|
||||
var right: usize = items.len;
|
||||
|
||||
// Append new value to empty list
|
||||
if (left == right) {
|
||||
if (il.list.items.len == 0) {
|
||||
const item = try il.list.addOne(gpa);
|
||||
item.* = .{
|
||||
.list = .{ .node = .none, .tok = 0 },
|
||||
.list = .{},
|
||||
.index = index,
|
||||
};
|
||||
return &item.list;
|
||||
} else if (il.list.items[il.list.items.len - 1].index < index) {
|
||||
// Append a new value to the end of the list.
|
||||
const new = try il.list.addOne(gpa);
|
||||
new.* = .{
|
||||
.list = .{},
|
||||
.index = index,
|
||||
};
|
||||
return &new.list;
|
||||
}
|
||||
|
||||
while (left < right) {
|
||||
@ -107,7 +70,7 @@ pub fn find(il: *InitList, gpa: Allocator, index: u64) !*InitList {
|
||||
|
||||
// Insert a new value into a sorted position.
|
||||
try il.list.insert(gpa, left, .{
|
||||
.list = .{ .node = .none, .tok = 0 },
|
||||
.list = .{},
|
||||
.index = index,
|
||||
});
|
||||
return &il.list.items[left].list;
|
||||
@ -118,22 +81,6 @@ test "basic usage" {
|
||||
var il: InitList = .{};
|
||||
defer il.deinit(gpa);
|
||||
|
||||
{
|
||||
var i: usize = 0;
|
||||
while (i < 5) : (i += 1) {
|
||||
const prev = try il.put(gpa, i, .none, 0);
|
||||
try testing.expect(prev == null);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
const failing = testing.failing_allocator;
|
||||
var i: usize = 0;
|
||||
while (i < 5) : (i += 1) {
|
||||
_ = try il.find(failing, i);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
var item = try il.find(gpa, 0);
|
||||
var i: usize = 1;
|
||||
|
||||
16
lib/compiler/aro/aro/LangOpts.zig
vendored
16
lib/compiler/aro/aro/LangOpts.zig
vendored
@ -1,6 +1,7 @@
|
||||
const std = @import("std");
|
||||
const DiagnosticTag = @import("Diagnostics.zig").Tag;
|
||||
|
||||
const char_info = @import("char_info.zig");
|
||||
const DiagnosticTag = @import("Diagnostics.zig").Tag;
|
||||
|
||||
pub const Compiler = enum {
|
||||
clang,
|
||||
@ -144,14 +145,9 @@ pub fn setStandard(self: *LangOpts, name: []const u8) error{InvalidStandard}!voi
|
||||
self.standard = Standard.NameMap.get(name) orelse return error.InvalidStandard;
|
||||
}
|
||||
|
||||
pub fn enableMSExtensions(self: *LangOpts) void {
|
||||
self.declspec_attrs = true;
|
||||
self.ms_extensions = true;
|
||||
}
|
||||
|
||||
pub fn disableMSExtensions(self: *LangOpts) void {
|
||||
self.declspec_attrs = false;
|
||||
self.ms_extensions = true;
|
||||
pub fn setMSExtensions(self: *LangOpts, enabled: bool) void {
|
||||
self.declspec_attrs = enabled;
|
||||
self.ms_extensions = enabled;
|
||||
}
|
||||
|
||||
pub fn hasChar8_T(self: *const LangOpts) bool {
|
||||
@ -164,7 +160,7 @@ pub fn hasDigraphs(self: *const LangOpts) bool {
|
||||
|
||||
pub fn setEmulatedCompiler(self: *LangOpts, compiler: Compiler) void {
|
||||
self.emulate = compiler;
|
||||
if (compiler == .msvc) self.enableMSExtensions();
|
||||
self.setMSExtensions(compiler == .msvc);
|
||||
}
|
||||
|
||||
pub fn setFpEvalMethod(self: *LangOpts, fp_eval_method: FPEvalMethod) void {
|
||||
|
||||
9840
lib/compiler/aro/aro/Parser.zig
vendored
9840
lib/compiler/aro/aro/Parser.zig
vendored
File diff suppressed because it is too large
Load Diff
2390
lib/compiler/aro/aro/Parser/Diagnostic.zig
vendored
Normal file
2390
lib/compiler/aro/aro/Parser/Diagnostic.zig
vendored
Normal file
File diff suppressed because it is too large
Load Diff
131
lib/compiler/aro/aro/Pragma.zig
vendored
131
lib/compiler/aro/aro/Pragma.zig
vendored
@ -1,7 +1,9 @@
|
||||
const std = @import("std");
|
||||
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Preprocessor = @import("Preprocessor.zig");
|
||||
const Diagnostics = @import("Diagnostics.zig");
|
||||
const Parser = @import("Parser.zig");
|
||||
const Preprocessor = @import("Preprocessor.zig");
|
||||
const TokenIndex = @import("Tree.zig").TokenIndex;
|
||||
|
||||
pub const Error = Compilation.Error || error{ UnknownPragma, StopPreprocessing };
|
||||
@ -69,7 +71,7 @@ pub fn pasteTokens(pp: *Preprocessor, start_idx: TokenIndex) ![]const u8 {
|
||||
|
||||
pub fn shouldPreserveTokens(self: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) bool {
|
||||
if (self.preserveTokens) |func| return func(self, pp, start_idx);
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
pub fn preprocessorCB(self: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Error!void {
|
||||
@ -81,3 +83,128 @@ pub fn parserCB(self: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation.Er
|
||||
defer std.debug.assert(tok_index == p.tok_i);
|
||||
if (self.parserHandler) |func| return func(self, p, start_idx);
|
||||
}
|
||||
|
||||
pub const Diagnostic = struct {
|
||||
fmt: []const u8,
|
||||
kind: Diagnostics.Message.Kind,
|
||||
opt: ?Diagnostics.Option = null,
|
||||
extension: bool = false,
|
||||
|
||||
pub const pragma_warning_message: Diagnostic = .{
|
||||
.fmt = "{s}",
|
||||
.kind = .warning,
|
||||
.opt = .@"#pragma-messages",
|
||||
};
|
||||
|
||||
pub const pragma_error_message: Diagnostic = .{
|
||||
.fmt = "{s}",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const pragma_message: Diagnostic = .{
|
||||
.fmt = "#pragma message: {s}",
|
||||
.kind = .note,
|
||||
};
|
||||
|
||||
pub const pragma_requires_string_literal: Diagnostic = .{
|
||||
.fmt = "pragma {s} requires string literal",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const poisoned_identifier: Diagnostic = .{
|
||||
.fmt = "attempt to use a poisoned identifier",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const pragma_poison_identifier: Diagnostic = .{
|
||||
.fmt = "can only poison identifier tokens",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const pragma_poison_macro: Diagnostic = .{
|
||||
.fmt = "poisoning existing macro",
|
||||
.kind = .warning,
|
||||
};
|
||||
|
||||
pub const unknown_gcc_pragma: Diagnostic = .{
|
||||
.fmt = "pragma GCC expected 'error', 'warning', 'diagnostic', 'poison'",
|
||||
.kind = .off,
|
||||
.opt = .@"unknown-pragmas",
|
||||
};
|
||||
|
||||
pub const unknown_gcc_pragma_directive: Diagnostic = .{
|
||||
.fmt = "pragma GCC diagnostic expected 'error', 'warning', 'ignored', 'fatal', 'push', or 'pop'",
|
||||
.kind = .warning,
|
||||
.opt = .@"unknown-pragmas",
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const malformed_warning_check: Diagnostic = .{
|
||||
.fmt = "{s} expected option name (e.g. \"-Wundef\")",
|
||||
.opt = .@"malformed-warning-check",
|
||||
.kind = .warning,
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const pragma_pack_lparen: Diagnostic = .{
|
||||
.fmt = "missing '(' after '#pragma pack' - ignoring",
|
||||
.kind = .warning,
|
||||
.opt = .@"ignored-pragmas",
|
||||
};
|
||||
|
||||
pub const pragma_pack_rparen: Diagnostic = .{
|
||||
.fmt = "missing ')' after '#pragma pack' - ignoring",
|
||||
.kind = .warning,
|
||||
.opt = .@"ignored-pragmas",
|
||||
};
|
||||
|
||||
pub const pragma_pack_unknown_action: Diagnostic = .{
|
||||
.fmt = "unknown action for '#pragma pack' - ignoring",
|
||||
.kind = .warning,
|
||||
.opt = .@"ignored-pragmas",
|
||||
};
|
||||
|
||||
pub const pragma_pack_show: Diagnostic = .{
|
||||
.fmt = "value of #pragma pack(show) == {d}",
|
||||
.kind = .warning,
|
||||
};
|
||||
|
||||
pub const pragma_pack_int_ident: Diagnostic = .{
|
||||
.fmt = "expected integer or identifier in '#pragma pack' - ignored",
|
||||
.kind = .warning,
|
||||
.opt = .@"ignored-pragmas",
|
||||
};
|
||||
|
||||
pub const pragma_pack_int: Diagnostic = .{
|
||||
.fmt = "expected #pragma pack parameter to be '1', '2', '4', '8', or '16'",
|
||||
.opt = .@"ignored-pragmas",
|
||||
.kind = .warning,
|
||||
};
|
||||
|
||||
pub const pragma_pack_undefined_pop: Diagnostic = .{
|
||||
.fmt = "specifying both a name and alignment to 'pop' is undefined",
|
||||
.kind = .warning,
|
||||
};
|
||||
|
||||
pub const pragma_pack_empty_stack: Diagnostic = .{
|
||||
.fmt = "#pragma pack(pop, ...) failed: stack empty",
|
||||
.opt = .@"ignored-pragmas",
|
||||
.kind = .warning,
|
||||
};
|
||||
};
|
||||
|
||||
pub fn err(pp: *Preprocessor, tok_i: TokenIndex, diagnostic: Diagnostic, args: anytype) Compilation.Error!void {
|
||||
var sf = std.heap.stackFallback(1024, pp.gpa);
|
||||
var allocating: std.Io.Writer.Allocating = .init(sf.get());
|
||||
defer allocating.deinit();
|
||||
|
||||
Diagnostics.formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory;
|
||||
|
||||
try pp.diagnostics.addWithLocation(pp.comp, .{
|
||||
.kind = diagnostic.kind,
|
||||
.opt = diagnostic.opt,
|
||||
.text = allocating.getWritten(),
|
||||
.location = pp.tokens.items(.loc)[tok_i].expand(pp.comp),
|
||||
.extension = diagnostic.extension,
|
||||
}, pp.expansionSlice(tok_i), true);
|
||||
}
|
||||
|
||||
1229
lib/compiler/aro/aro/Preprocessor.zig
vendored
1229
lib/compiler/aro/aro/Preprocessor.zig
vendored
File diff suppressed because it is too large
Load Diff
442
lib/compiler/aro/aro/Preprocessor/Diagnostic.zig
vendored
Normal file
442
lib/compiler/aro/aro/Preprocessor/Diagnostic.zig
vendored
Normal file
@ -0,0 +1,442 @@
|
||||
const std = @import("std");
|
||||
|
||||
const Diagnostics = @import("../Diagnostics.zig");
|
||||
const LangOpts = @import("../LangOpts.zig");
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
|
||||
const Diagnostic = @This();
|
||||
|
||||
fmt: []const u8,
|
||||
kind: Diagnostics.Message.Kind,
|
||||
opt: ?Diagnostics.Option = null,
|
||||
extension: bool = false,
|
||||
|
||||
pub const elif_without_if: Diagnostic = .{
|
||||
.fmt = "#elif without #if",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const elif_after_else: Diagnostic = .{
|
||||
.fmt = "#elif after #else",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const elifdef_without_if: Diagnostic = .{
|
||||
.fmt = "#elifdef without #if",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const elifdef_after_else: Diagnostic = .{
|
||||
.fmt = "#elifdef after #else",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const elifndef_without_if: Diagnostic = .{
|
||||
.fmt = "#elifndef without #if",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const elifndef_after_else: Diagnostic = .{
|
||||
.fmt = "#elifndef after #else",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const else_without_if: Diagnostic = .{
|
||||
.fmt = "#else without #if",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const else_after_else: Diagnostic = .{
|
||||
.fmt = "#else after #else",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const endif_without_if: Diagnostic = .{
|
||||
.fmt = "#endif without #if",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const unknown_pragma: Diagnostic = .{
|
||||
.fmt = "unknown pragma ignored",
|
||||
.opt = .@"unknown-pragmas",
|
||||
.kind = .off,
|
||||
};
|
||||
|
||||
pub const line_simple_digit: Diagnostic = .{
|
||||
.fmt = "#line directive requires a simple digit sequence",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const line_invalid_filename: Diagnostic = .{
|
||||
.fmt = "invalid filename for #line directive",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const unterminated_conditional_directive: Diagnostic = .{
|
||||
.fmt = "unterminated conditional directive",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const invalid_preprocessing_directive: Diagnostic = .{
|
||||
.fmt = "invalid preprocessing directive",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const error_directive: Diagnostic = .{
|
||||
.fmt = "{s}",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const warning_directive: Diagnostic = .{
|
||||
.fmt = "{s}",
|
||||
.opt = .@"#warnings",
|
||||
.kind = .warning,
|
||||
};
|
||||
|
||||
pub const macro_name_missing: Diagnostic = .{
|
||||
.fmt = "macro name missing",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const extra_tokens_directive_end: Diagnostic = .{
|
||||
.fmt = "extra tokens at end of macro directive",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const expected_value_in_expr: Diagnostic = .{
|
||||
.fmt = "expected value in expression",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const defined_as_macro_name: Diagnostic = .{
|
||||
.fmt = "'defined' cannot be used as a macro name",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const macro_name_must_be_identifier: Diagnostic = .{
|
||||
.fmt = "macro name must be an identifier",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const whitespace_after_macro_name: Diagnostic = .{
|
||||
.fmt = "ISO C99 requires whitespace after the macro name",
|
||||
.opt = .@"c99-extensions",
|
||||
.kind = .warning,
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const hash_hash_at_start: Diagnostic = .{
|
||||
.fmt = "'##' cannot appear at the start of a macro expansion",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const hash_hash_at_end: Diagnostic = .{
|
||||
.fmt = "'##' cannot appear at the end of a macro expansion",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const pasting_formed_invalid: Diagnostic = .{
|
||||
.fmt = "pasting formed '{s}', an invalid preprocessing token",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const missing_paren_param_list: Diagnostic = .{
|
||||
.fmt = "missing ')' in macro parameter list",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const unterminated_macro_param_list: Diagnostic = .{
|
||||
.fmt = "unterminated macro param list",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const invalid_token_param_list: Diagnostic = .{
|
||||
.fmt = "invalid token in macro parameter list",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const expected_comma_param_list: Diagnostic = .{
|
||||
.fmt = "expected comma in macro parameter list",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const hash_not_followed_param: Diagnostic = .{
|
||||
.fmt = "'#' is not followed by a macro parameter",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const expected_filename: Diagnostic = .{
|
||||
.fmt = "expected \"FILENAME\" or <FILENAME>",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const empty_filename: Diagnostic = .{
|
||||
.fmt = "empty filename",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const header_str_closing: Diagnostic = .{
|
||||
.fmt = "expected closing '>'",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const header_str_match: Diagnostic = .{
|
||||
.fmt = "to match this '<'",
|
||||
.kind = .note,
|
||||
};
|
||||
|
||||
pub const string_literal_in_pp_expr: Diagnostic = .{
|
||||
.fmt = "string literal in preprocessor expression",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const empty_char_literal_warning: Diagnostic = .{
|
||||
.fmt = "empty character constant",
|
||||
.kind = .warning,
|
||||
.opt = .@"invalid-pp-token",
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const unterminated_char_literal_warning: Diagnostic = .{
|
||||
.fmt = "missing terminating ' character",
|
||||
.kind = .warning,
|
||||
.opt = .@"invalid-pp-token",
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const unterminated_string_literal_warning: Diagnostic = .{
|
||||
.fmt = "missing terminating '\"' character",
|
||||
.kind = .warning,
|
||||
.opt = .@"invalid-pp-token",
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const unterminated_comment: Diagnostic = .{
|
||||
.fmt = "unterminated comment",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const malformed_embed_param: Diagnostic = .{
|
||||
.fmt = "unexpected token in embed parameter",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const malformed_embed_limit: Diagnostic = .{
|
||||
.fmt = "the limit parameter expects one non-negative integer as a parameter",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const duplicate_embed_param: Diagnostic = .{
|
||||
.fmt = "duplicate embed parameter '{s}'",
|
||||
.kind = .warning,
|
||||
.opt = .@"duplicate-embed-param",
|
||||
};
|
||||
|
||||
pub const unsupported_embed_param: Diagnostic = .{
|
||||
.fmt = "unsupported embed parameter '{s}' embed parameter",
|
||||
.kind = .warning,
|
||||
.opt = .@"unsupported-embed-param",
|
||||
};
|
||||
|
||||
pub const va_opt_lparen: Diagnostic = .{
|
||||
.fmt = "missing '(' following __VA_OPT__",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const va_opt_rparen: Diagnostic = .{
|
||||
.fmt = "unterminated __VA_OPT__ argument list",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const keyword_macro: Diagnostic = .{
|
||||
.fmt = "keyword is hidden by macro definition",
|
||||
.kind = .off,
|
||||
.opt = .@"keyword-macro",
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const undefined_macro: Diagnostic = .{
|
||||
.fmt = "'{s}' is not defined, evaluates to 0",
|
||||
.kind = .off,
|
||||
.opt = .undef,
|
||||
};
|
||||
|
||||
pub const fn_macro_undefined: Diagnostic = .{
|
||||
.fmt = "function-like macro '{s}' is not defined",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
// pub const preprocessing_directive_only: Diagnostic = .{
|
||||
// .fmt = "'{s}' must be used within a preprocessing directive",
|
||||
// .extra = .tok_id_expected,
|
||||
// .kind = .@"error",
|
||||
// };
|
||||
|
||||
pub const missing_lparen_after_builtin: Diagnostic = .{
|
||||
.fmt = "Missing '(' after built-in macro '{s}'",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const too_many_includes: Diagnostic = .{
|
||||
.fmt = "#include nested too deeply",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const include_next: Diagnostic = .{
|
||||
.fmt = "#include_next is a language extension",
|
||||
.kind = .off,
|
||||
.opt = .@"gnu-include-next",
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const include_next_outside_header: Diagnostic = .{
|
||||
.fmt = "#include_next in primary source file; will search from start of include path",
|
||||
.kind = .warning,
|
||||
.opt = .@"include-next-outside-header",
|
||||
};
|
||||
|
||||
pub const comma_deletion_va_args: Diagnostic = .{
|
||||
.fmt = "token pasting of ',' and __VA_ARGS__ is a GNU extension",
|
||||
.kind = .off,
|
||||
.opt = .@"gnu-zero-variadic-macro-arguments",
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const expansion_to_defined_obj: Diagnostic = .{
|
||||
.fmt = "macro expansion producing 'defined' has undefined behavior",
|
||||
.kind = .off,
|
||||
.opt = .@"expansion-to-defined",
|
||||
};
|
||||
|
||||
pub const expansion_to_defined_func: Diagnostic = .{
|
||||
.fmt = expansion_to_defined_obj.fmt,
|
||||
.kind = .off,
|
||||
.opt = .@"expansion-to-defined",
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const invalid_pp_stringify_escape: Diagnostic = .{
|
||||
.fmt = "invalid string literal, ignoring final '\\'",
|
||||
.kind = .warning,
|
||||
};
|
||||
|
||||
pub const gnu_va_macro: Diagnostic = .{
|
||||
.fmt = "named variadic macros are a GNU extension",
|
||||
.opt = .@"variadic-macros",
|
||||
.kind = .off,
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const pragma_operator_string_literal: Diagnostic = .{
|
||||
.fmt = "_Pragma requires exactly one string literal token",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const invalid_preproc_expr_start: Diagnostic = .{
|
||||
.fmt = "invalid token at start of a preprocessor expression",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const newline_eof: Diagnostic = .{
|
||||
.fmt = "no newline at end of file",
|
||||
.opt = .@"newline-eof",
|
||||
.kind = .off,
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const malformed_warning_check: Diagnostic = .{
|
||||
.fmt = "{s} expected option name (e.g. \"-Wundef\")",
|
||||
.opt = .@"malformed-warning-check",
|
||||
.kind = .warning,
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const feature_check_requires_identifier: Diagnostic = .{
|
||||
.fmt = "builtin feature check macro requires a parenthesized identifier",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const builtin_macro_redefined: Diagnostic = .{
|
||||
.fmt = "redefining builtin macro",
|
||||
.opt = .@"builtin-macro-redefined",
|
||||
.kind = .warning,
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const macro_redefined: Diagnostic = .{
|
||||
.fmt = "'{s}' macro redefined",
|
||||
.opt = .@"macro-redefined",
|
||||
.kind = .warning,
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const previous_definition: Diagnostic = .{
|
||||
.fmt = "previous definition is here",
|
||||
.kind = .note,
|
||||
};
|
||||
|
||||
pub const unterminated_macro_arg_list: Diagnostic = .{
|
||||
.fmt = "unterminated function macro argument list",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const to_match_paren: Diagnostic = .{
|
||||
.fmt = "to match this '('",
|
||||
.kind = .note,
|
||||
};
|
||||
|
||||
pub const closing_paren: Diagnostic = .{
|
||||
.fmt = "expected closing ')'",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const poisoned_identifier: Diagnostic = .{
|
||||
.fmt = "attempt to use a poisoned identifier",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const expected_arguments: Diagnostic = .{
|
||||
.fmt = "expected {d} argument(s) got {d}",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const expected_at_least_arguments: Diagnostic = .{
|
||||
.fmt = "expected at least {d} argument(s) got {d}",
|
||||
.kind = .warning,
|
||||
};
|
||||
|
||||
pub const invalid_preproc_operator: Diagnostic = .{
|
||||
.fmt = "token is not a valid binary operator in a preprocessor subexpression",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const expected_str_literal_in: Diagnostic = .{
|
||||
.fmt = "expected string literal in '{s}'",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const builtin_missing_r_paren: Diagnostic = .{
|
||||
.fmt = "missing ')', after {s}",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const cannot_convert_to_identifier: Diagnostic = .{
|
||||
.fmt = "cannot convert {s} to an identifier",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const expected_identifier: Diagnostic = .{
|
||||
.fmt = "expected identifier argument",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const incomplete_ucn: Diagnostic = .{
|
||||
.fmt = "incomplete universal character name; treating as '\\' followed by identifier",
|
||||
.kind = .warning,
|
||||
.opt = .unicode,
|
||||
};
|
||||
|
||||
pub const invalid_source_epoch: Diagnostic = .{
|
||||
.fmt = "environment variable SOURCE_DATE_EPOCH must expand to a non-negative integer less than or equal to 253402300799",
|
||||
.kind = .@"error",
|
||||
};
|
||||
19
lib/compiler/aro/aro/Source.zig
vendored
19
lib/compiler/aro/aro/Source.zig
vendored
@ -24,6 +24,20 @@ pub const Location = struct {
|
||||
pub fn eql(a: Location, b: Location) bool {
|
||||
return a.id == b.id and a.byte_offset == b.byte_offset and a.line == b.line;
|
||||
}
|
||||
|
||||
pub fn expand(loc: Location, comp: *const @import("Compilation.zig")) ExpandedLocation {
|
||||
const source = comp.getSource(loc.id);
|
||||
return source.lineCol(loc);
|
||||
}
|
||||
};
|
||||
|
||||
pub const ExpandedLocation = struct {
|
||||
path: []const u8,
|
||||
line: []const u8,
|
||||
line_no: u32,
|
||||
col: u32,
|
||||
width: u32,
|
||||
end_with_splice: bool,
|
||||
};
|
||||
|
||||
const Source = @This();
|
||||
@ -51,9 +65,7 @@ pub fn physicalLine(source: Source, loc: Location) u32 {
|
||||
return loc.line + source.numSplicesBefore(loc.byte_offset);
|
||||
}
|
||||
|
||||
const LineCol = struct { line: []const u8, line_no: u32, col: u32, width: u32, end_with_splice: bool };
|
||||
|
||||
pub fn lineCol(source: Source, loc: Location) LineCol {
|
||||
pub fn lineCol(source: Source, loc: Location) ExpandedLocation {
|
||||
var start: usize = 0;
|
||||
// find the start of the line which is either a newline or a splice
|
||||
if (std.mem.lastIndexOfScalar(u8, source.buf[0..loc.byte_offset], '\n')) |some| start = some + 1;
|
||||
@ -102,6 +114,7 @@ pub fn lineCol(source: Source, loc: Location) LineCol {
|
||||
nl = source.splice_locs[splice_index];
|
||||
}
|
||||
return .{
|
||||
.path = source.path,
|
||||
.line = source.buf[start..nl],
|
||||
.line_no = loc.line + splice_index,
|
||||
.col = col,
|
||||
|
||||
94
lib/compiler/aro/aro/StringInterner.zig
vendored
94
lib/compiler/aro/aro/StringInterner.zig
vendored
@ -2,82 +2,34 @@ const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const Compilation = @import("Compilation.zig");
|
||||
|
||||
const StringToIdMap = std.StringHashMapUnmanaged(StringId);
|
||||
|
||||
pub const StringId = enum(u32) {
|
||||
empty,
|
||||
_,
|
||||
};
|
||||
|
||||
pub const TypeMapper = struct {
|
||||
const LookupSpeed = enum {
|
||||
fast,
|
||||
slow,
|
||||
};
|
||||
|
||||
data: union(LookupSpeed) {
|
||||
fast: []const []const u8,
|
||||
slow: *const StringToIdMap,
|
||||
},
|
||||
|
||||
pub fn lookup(self: TypeMapper, string_id: StringInterner.StringId) []const u8 {
|
||||
if (string_id == .empty) return "";
|
||||
switch (self.data) {
|
||||
.fast => |arr| return arr[@intFromEnum(string_id)],
|
||||
.slow => |map| {
|
||||
var it = map.iterator();
|
||||
while (it.next()) |entry| {
|
||||
if (entry.value_ptr.* == string_id) return entry.key_ptr.*;
|
||||
}
|
||||
unreachable;
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deinit(self: TypeMapper, allocator: mem.Allocator) void {
|
||||
switch (self.data) {
|
||||
.slow => {},
|
||||
.fast => |arr| allocator.free(arr),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const StringInterner = @This();
|
||||
|
||||
string_table: StringToIdMap = .{},
|
||||
next_id: StringId = @enumFromInt(@intFromEnum(StringId.empty) + 1),
|
||||
pub const StringId = enum(u32) {
|
||||
empty = std.math.maxInt(u32),
|
||||
_,
|
||||
|
||||
pub fn deinit(self: *StringInterner, allocator: mem.Allocator) void {
|
||||
self.string_table.deinit(allocator);
|
||||
pub fn lookup(id: StringId, comp: *const Compilation) []const u8 {
|
||||
if (id == .empty) return "";
|
||||
return comp.string_interner.table.keys()[@intFromEnum(id)];
|
||||
}
|
||||
|
||||
pub fn lookupExtra(id: StringId, si: StringInterner) []const u8 {
|
||||
if (id == .empty) return "";
|
||||
return si.table.keys()[@intFromEnum(id)];
|
||||
}
|
||||
};
|
||||
|
||||
table: std.StringArrayHashMapUnmanaged(void) = .empty,
|
||||
|
||||
pub fn deinit(si: *StringInterner, allocator: mem.Allocator) void {
|
||||
si.table.deinit(allocator);
|
||||
si.* = undefined;
|
||||
}
|
||||
|
||||
pub fn intern(comp: *Compilation, str: []const u8) !StringId {
|
||||
return comp.string_interner.internExtra(comp.gpa, str);
|
||||
}
|
||||
|
||||
pub fn internExtra(self: *StringInterner, allocator: mem.Allocator, str: []const u8) !StringId {
|
||||
/// Intern externally owned string.
|
||||
pub fn intern(si: *StringInterner, allocator: mem.Allocator, str: []const u8) !StringId {
|
||||
if (str.len == 0) return .empty;
|
||||
|
||||
const gop = try self.string_table.getOrPut(allocator, str);
|
||||
if (gop.found_existing) return gop.value_ptr.*;
|
||||
|
||||
defer self.next_id = @enumFromInt(@intFromEnum(self.next_id) + 1);
|
||||
gop.value_ptr.* = self.next_id;
|
||||
return self.next_id;
|
||||
}
|
||||
|
||||
/// deinit for the returned TypeMapper is a no-op and does not need to be called
|
||||
pub fn getSlowTypeMapper(self: *const StringInterner) TypeMapper {
|
||||
return TypeMapper{ .data = .{ .slow = &self.string_table } };
|
||||
}
|
||||
|
||||
/// Caller must call `deinit` on the returned TypeMapper
|
||||
pub fn getFastTypeMapper(self: *const StringInterner, allocator: mem.Allocator) !TypeMapper {
|
||||
var strings = try allocator.alloc([]const u8, @intFromEnum(self.next_id));
|
||||
var it = self.string_table.iterator();
|
||||
strings[0] = "";
|
||||
while (it.next()) |entry| {
|
||||
strings[@intFromEnum(entry.value_ptr.*)] = entry.key_ptr.*;
|
||||
}
|
||||
return TypeMapper{ .data = .{ .fast = strings } };
|
||||
const gop = try si.table.getOrPut(allocator, str);
|
||||
return @enumFromInt(gop.index);
|
||||
}
|
||||
|
||||
202
lib/compiler/aro/aro/SymbolStack.zig
vendored
202
lib/compiler/aro/aro/SymbolStack.zig
vendored
@ -2,22 +2,24 @@ const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const Allocator = mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const Parser = @import("Parser.zig");
|
||||
const StringId = @import("StringInterner.zig").StringId;
|
||||
const Tree = @import("Tree.zig");
|
||||
const Token = Tree.Token;
|
||||
const TokenIndex = Tree.TokenIndex;
|
||||
const NodeIndex = Tree.NodeIndex;
|
||||
const Type = @import("Type.zig");
|
||||
const Parser = @import("Parser.zig");
|
||||
const Node = Tree.Node;
|
||||
const QualType = @import("TypeStore.zig").QualType;
|
||||
const Value = @import("Value.zig");
|
||||
const StringId = @import("StringInterner.zig").StringId;
|
||||
|
||||
const SymbolStack = @This();
|
||||
|
||||
pub const Symbol = struct {
|
||||
name: StringId,
|
||||
ty: Type,
|
||||
qt: QualType,
|
||||
tok: TokenIndex,
|
||||
node: NodeIndex = .none,
|
||||
node: Node.OptIndex = .null,
|
||||
out_of_scope: bool = false,
|
||||
kind: Kind,
|
||||
val: Value,
|
||||
};
|
||||
@ -33,14 +35,14 @@ pub const Kind = enum {
|
||||
constexpr,
|
||||
};
|
||||
|
||||
scopes: std.ArrayListUnmanaged(Scope) = .empty,
|
||||
scopes: std.ArrayListUnmanaged(Scope) = .{},
|
||||
/// allocations from nested scopes are retained after popping; `active_len` is the number
|
||||
/// of currently-active items in `scopes`.
|
||||
active_len: usize = 0,
|
||||
|
||||
const Scope = struct {
|
||||
vars: std.AutoHashMapUnmanaged(StringId, Symbol) = .empty,
|
||||
tags: std.AutoHashMapUnmanaged(StringId, Symbol) = .empty,
|
||||
vars: std.AutoHashMapUnmanaged(StringId, Symbol) = .{},
|
||||
tags: std.AutoHashMapUnmanaged(StringId, Symbol) = .{},
|
||||
|
||||
fn deinit(self: *Scope, allocator: Allocator) void {
|
||||
self.vars.deinit(allocator);
|
||||
@ -82,17 +84,17 @@ pub fn findTypedef(s: *SymbolStack, p: *Parser, name: StringId, name_tok: TokenI
|
||||
.typedef => return prev,
|
||||
.@"struct" => {
|
||||
if (no_type_yet) return null;
|
||||
try p.errStr(.must_use_struct, name_tok, p.tokSlice(name_tok));
|
||||
try p.err(name_tok, .must_use_struct, .{p.tokSlice(name_tok)});
|
||||
return prev;
|
||||
},
|
||||
.@"union" => {
|
||||
if (no_type_yet) return null;
|
||||
try p.errStr(.must_use_union, name_tok, p.tokSlice(name_tok));
|
||||
try p.err(name_tok, .must_use_union, .{p.tokSlice(name_tok)});
|
||||
return prev;
|
||||
},
|
||||
.@"enum" => {
|
||||
if (no_type_yet) return null;
|
||||
try p.errStr(.must_use_enum, name_tok, p.tokSlice(name_tok));
|
||||
try p.err(name_tok, .must_use_enum, .{p.tokSlice(name_tok)});
|
||||
return prev;
|
||||
},
|
||||
else => return null,
|
||||
@ -120,8 +122,8 @@ pub fn findTag(
|
||||
else => unreachable,
|
||||
}
|
||||
if (s.get(name, .tags) == null) return null;
|
||||
try p.errStr(.wrong_tag, name_tok, p.tokSlice(name_tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
try p.err(name_tok, .wrong_tag, .{p.tokSlice(name_tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
return null;
|
||||
}
|
||||
|
||||
@ -171,23 +173,24 @@ pub fn defineTypedef(
|
||||
s: *SymbolStack,
|
||||
p: *Parser,
|
||||
name: StringId,
|
||||
ty: Type,
|
||||
qt: QualType,
|
||||
tok: TokenIndex,
|
||||
node: NodeIndex,
|
||||
node: Node.Index,
|
||||
) !void {
|
||||
if (s.get(name, .vars)) |prev| {
|
||||
switch (prev.kind) {
|
||||
.typedef => {
|
||||
if (!prev.ty.is(.invalid)) {
|
||||
if (!ty.eql(prev.ty, p.comp, true)) {
|
||||
try p.errStr(.redefinition_of_typedef, tok, try p.typePairStrExtra(ty, " vs ", prev.ty));
|
||||
if (prev.tok != 0) try p.errTok(.previous_definition, prev.tok);
|
||||
}
|
||||
if (!prev.qt.isInvalid() and !qt.eqlQualified(prev.qt, p.comp)) {
|
||||
if (qt.isInvalid()) return;
|
||||
const non_typedef_qt = qt.type(p.comp).typedef.base;
|
||||
const non_typedef_prev_qt = prev.qt.type(p.comp).typedef.base;
|
||||
try p.err(tok, .redefinition_of_typedef, .{ non_typedef_qt, non_typedef_prev_qt });
|
||||
if (prev.tok != 0) try p.err(prev.tok, .previous_definition, .{});
|
||||
}
|
||||
},
|
||||
.enumeration, .decl, .def, .constexpr => {
|
||||
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -196,13 +199,8 @@ pub fn defineTypedef(
|
||||
.kind = .typedef,
|
||||
.name = name,
|
||||
.tok = tok,
|
||||
.ty = .{
|
||||
.name = name,
|
||||
.specifier = ty.specifier,
|
||||
.qual = ty.qual,
|
||||
.data = ty.data,
|
||||
},
|
||||
.node = node,
|
||||
.qt = qt,
|
||||
.node = .pack(node),
|
||||
.val = .{},
|
||||
});
|
||||
}
|
||||
@ -211,31 +209,37 @@ pub fn defineSymbol(
|
||||
s: *SymbolStack,
|
||||
p: *Parser,
|
||||
name: StringId,
|
||||
ty: Type,
|
||||
qt: QualType,
|
||||
tok: TokenIndex,
|
||||
node: NodeIndex,
|
||||
node: Node.Index,
|
||||
val: Value,
|
||||
constexpr: bool,
|
||||
) !void {
|
||||
if (s.get(name, .vars)) |prev| {
|
||||
switch (prev.kind) {
|
||||
.enumeration => {
|
||||
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
.decl => {
|
||||
if (!ty.eql(prev.ty, p.comp, true)) {
|
||||
try p.errStr(.redefinition_incompatible, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (!prev.qt.isInvalid() and !qt.eqlQualified(prev.qt, p.comp)) {
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_incompatible, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
} else {
|
||||
if (prev.node.unpack()) |some| p.setTentativeDeclDefinition(some, node);
|
||||
}
|
||||
},
|
||||
.def, .constexpr => {
|
||||
try p.errStr(.redefinition, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
.def, .constexpr => if (!prev.qt.isInvalid()) {
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
.typedef => {
|
||||
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -245,8 +249,8 @@ pub fn defineSymbol(
|
||||
.kind = if (constexpr) .constexpr else .def,
|
||||
.name = name,
|
||||
.tok = tok,
|
||||
.ty = ty,
|
||||
.node = node,
|
||||
.qt = qt,
|
||||
.node = .pack(node),
|
||||
.val = val,
|
||||
});
|
||||
}
|
||||
@ -264,33 +268,40 @@ pub fn declareSymbol(
|
||||
s: *SymbolStack,
|
||||
p: *Parser,
|
||||
name: StringId,
|
||||
ty: Type,
|
||||
qt: QualType,
|
||||
tok: TokenIndex,
|
||||
node: NodeIndex,
|
||||
node: Node.Index,
|
||||
) !void {
|
||||
if (s.get(name, .vars)) |prev| {
|
||||
switch (prev.kind) {
|
||||
.enumeration => {
|
||||
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
.decl => {
|
||||
if (!ty.eql(prev.ty, p.comp, true)) {
|
||||
try p.errStr(.redefinition_incompatible, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (!prev.qt.isInvalid() and !qt.eqlQualified(prev.qt, p.comp)) {
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_incompatible, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
} else {
|
||||
if (prev.node.unpack()) |some| p.setTentativeDeclDefinition(node, some);
|
||||
}
|
||||
},
|
||||
.def, .constexpr => {
|
||||
if (!ty.eql(prev.ty, p.comp, true)) {
|
||||
try p.errStr(.redefinition_incompatible, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (!prev.qt.isInvalid() and !qt.eqlQualified(prev.qt, p.comp)) {
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_incompatible, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
} else {
|
||||
if (prev.node.unpack()) |some| p.setTentativeDeclDefinition(node, some);
|
||||
return;
|
||||
}
|
||||
},
|
||||
.typedef => {
|
||||
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -299,34 +310,54 @@ pub fn declareSymbol(
|
||||
.kind = .decl,
|
||||
.name = name,
|
||||
.tok = tok,
|
||||
.ty = ty,
|
||||
.node = node,
|
||||
.qt = qt,
|
||||
.node = .pack(node),
|
||||
.val = .{},
|
||||
});
|
||||
|
||||
// Declare out of scope symbol for functions declared in functions.
|
||||
if (s.active_len > 1 and !p.comp.langopts.standard.atLeast(.c23) and qt.is(p.comp, .func)) {
|
||||
try s.scopes.items[0].vars.put(p.gpa, name, .{
|
||||
.kind = .decl,
|
||||
.name = name,
|
||||
.tok = tok,
|
||||
.qt = qt,
|
||||
.node = .pack(node),
|
||||
.val = .{},
|
||||
.out_of_scope = true,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
pub fn defineParam(s: *SymbolStack, p: *Parser, name: StringId, ty: Type, tok: TokenIndex) !void {
|
||||
pub fn defineParam(
|
||||
s: *SymbolStack,
|
||||
p: *Parser,
|
||||
name: StringId,
|
||||
qt: QualType,
|
||||
tok: TokenIndex,
|
||||
node: ?Node.Index,
|
||||
) !void {
|
||||
if (s.get(name, .vars)) |prev| {
|
||||
switch (prev.kind) {
|
||||
.enumeration, .decl, .def, .constexpr => {
|
||||
try p.errStr(.redefinition_of_parameter, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
.enumeration, .decl, .def, .constexpr => if (!prev.qt.isInvalid()) {
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_of_parameter, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
.typedef => {
|
||||
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
if (ty.is(.fp16) and !p.comp.hasHalfPrecisionFloatABI()) {
|
||||
try p.errStr(.suggest_pointer_for_invalid_fp16, tok, "parameters");
|
||||
}
|
||||
try s.define(p.gpa, .{
|
||||
.kind = .def,
|
||||
.name = name,
|
||||
.tok = tok,
|
||||
.ty = ty,
|
||||
.qt = qt,
|
||||
.node = .packOpt(node),
|
||||
.val = .{},
|
||||
});
|
||||
}
|
||||
@ -342,20 +373,20 @@ pub fn defineTag(
|
||||
switch (prev.kind) {
|
||||
.@"enum" => {
|
||||
if (kind == .keyword_enum) return prev;
|
||||
try p.errStr(.wrong_tag, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
try p.err(tok, .wrong_tag, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
return null;
|
||||
},
|
||||
.@"struct" => {
|
||||
if (kind == .keyword_struct) return prev;
|
||||
try p.errStr(.wrong_tag, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
try p.err(tok, .wrong_tag, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
return null;
|
||||
},
|
||||
.@"union" => {
|
||||
if (kind == .keyword_union) return prev;
|
||||
try p.errStr(.wrong_tag, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
try p.err(tok, .wrong_tag, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
return null;
|
||||
},
|
||||
else => unreachable,
|
||||
@ -366,25 +397,29 @@ pub fn defineEnumeration(
|
||||
s: *SymbolStack,
|
||||
p: *Parser,
|
||||
name: StringId,
|
||||
ty: Type,
|
||||
qt: QualType,
|
||||
tok: TokenIndex,
|
||||
val: Value,
|
||||
node: Node.Index,
|
||||
) !void {
|
||||
if (s.get(name, .vars)) |prev| {
|
||||
switch (prev.kind) {
|
||||
.enumeration => {
|
||||
try p.errStr(.redefinition, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
.enumeration => if (!prev.qt.isInvalid()) {
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
return;
|
||||
},
|
||||
.decl, .def, .constexpr => {
|
||||
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
return;
|
||||
},
|
||||
.typedef => {
|
||||
try p.errStr(.redefinition_different_sym, tok, p.tokSlice(tok));
|
||||
try p.errTok(.previous_definition, prev.tok);
|
||||
if (qt.isInvalid()) return;
|
||||
try p.err(tok, .redefinition_different_sym, .{p.tokSlice(tok)});
|
||||
try p.err(prev.tok, .previous_definition, .{});
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
@ -393,7 +428,8 @@ pub fn defineEnumeration(
|
||||
.kind = .enumeration,
|
||||
.name = name,
|
||||
.tok = tok,
|
||||
.ty = ty,
|
||||
.qt = qt,
|
||||
.val = val,
|
||||
.node = .pack(node),
|
||||
});
|
||||
}
|
||||
|
||||
286
lib/compiler/aro/aro/Tokenizer.zig
vendored
286
lib/compiler/aro/aro/Tokenizer.zig
vendored
@ -1,8 +1,45 @@
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Source = @import("Source.zig");
|
||||
const LangOpts = @import("LangOpts.zig");
|
||||
const Source = @import("Source.zig");
|
||||
|
||||
/// Value for valid escapes indicates how many characters to consume, not counting leading backslash
|
||||
const UCNKind = enum(u8) {
|
||||
/// Just `\`
|
||||
none,
|
||||
/// \u or \U followed by an insufficient number of hex digits
|
||||
incomplete,
|
||||
/// `\uxxxx`
|
||||
hex4 = 5,
|
||||
/// `\Uxxxxxxxx`
|
||||
hex8 = 9,
|
||||
|
||||
/// In the classification phase we do not care if the escape represents a valid universal character name
|
||||
/// e.g. \UFFFFFFFF is acceptable.
|
||||
fn classify(buf: []const u8) UCNKind {
|
||||
assert(buf[0] == '\\');
|
||||
if (buf.len == 1) return .none;
|
||||
switch (buf[1]) {
|
||||
'u' => {
|
||||
if (buf.len < 6) return .incomplete;
|
||||
for (buf[2..6]) |c| {
|
||||
if (!std.ascii.isHex(c)) return .incomplete;
|
||||
}
|
||||
return .hex4;
|
||||
},
|
||||
'U' => {
|
||||
if (buf.len < 10) return .incomplete;
|
||||
for (buf[2..10]) |c| {
|
||||
if (!std.ascii.isHex(c)) return .incomplete;
|
||||
}
|
||||
return .hex8;
|
||||
},
|
||||
else => return .none,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub const Token = struct {
|
||||
id: Id,
|
||||
@ -18,7 +55,7 @@ pub const Token = struct {
|
||||
eof,
|
||||
/// identifier containing solely basic character set characters
|
||||
identifier,
|
||||
/// identifier with at least one extended character
|
||||
/// identifier with at least one extended character or UCN escape sequence
|
||||
extended_identifier,
|
||||
|
||||
// string literals with prefixes
|
||||
@ -147,6 +184,10 @@ pub const Token = struct {
|
||||
macro_counter,
|
||||
/// Special token for implementing _Pragma
|
||||
macro_param_pragma_operator,
|
||||
/// Special token for implementing __identifier (MS extension)
|
||||
macro_param_ms_identifier,
|
||||
/// Special token for implementing __pragma (MS extension)
|
||||
macro_param_ms_pragma,
|
||||
|
||||
/// Special identifier for implementing __func__
|
||||
macro_func,
|
||||
@ -154,6 +195,12 @@ pub const Token = struct {
|
||||
macro_function,
|
||||
/// Special identifier for implementing __PRETTY_FUNCTION__
|
||||
macro_pretty_func,
|
||||
/// Special identifier for implementing __DATE__
|
||||
macro_date,
|
||||
/// Special identifier for implementing __TIME__
|
||||
macro_time,
|
||||
/// Special identifier for implementing __TIMESTAMP__
|
||||
macro_timestamp,
|
||||
|
||||
keyword_auto,
|
||||
keyword_auto_type,
|
||||
@ -290,13 +337,21 @@ pub const Token = struct {
|
||||
keyword_thiscall2,
|
||||
keyword_vectorcall,
|
||||
keyword_vectorcall2,
|
||||
keyword_fastcall,
|
||||
keyword_fastcall2,
|
||||
keyword_regcall,
|
||||
keyword_cdecl,
|
||||
keyword_cdecl2,
|
||||
keyword_forceinline,
|
||||
keyword_forceinline2,
|
||||
keyword_unaligned,
|
||||
keyword_unaligned2,
|
||||
|
||||
// builtins that require special parsing
|
||||
builtin_choose_expr,
|
||||
builtin_va_arg,
|
||||
builtin_offsetof,
|
||||
builtin_bitoffsetof,
|
||||
builtin_types_compatible_p,
|
||||
// Type nullability
|
||||
keyword_nonnull,
|
||||
keyword_nullable,
|
||||
keyword_nullable_result,
|
||||
keyword_null_unspecified,
|
||||
|
||||
/// Generated by #embed directive
|
||||
/// Decimal value with no prefix or suffix
|
||||
@ -323,6 +378,12 @@ pub const Token = struct {
|
||||
/// A comment token if asked to preserve comments.
|
||||
comment,
|
||||
|
||||
/// Incomplete universal character name
|
||||
/// This happens if the source text contains `\u` or `\U` followed by an insufficient number of hex
|
||||
/// digits. This token id represents just the backslash; the subsequent `u` or `U` will be treated as the
|
||||
/// leading character of the following identifier token.
|
||||
incomplete_ucn,
|
||||
|
||||
/// Return true if token is identifier or keyword.
|
||||
pub fn isMacroIdentifier(id: Id) bool {
|
||||
switch (id) {
|
||||
@ -347,6 +408,9 @@ pub const Token = struct {
|
||||
.macro_func,
|
||||
.macro_function,
|
||||
.macro_pretty_func,
|
||||
.macro_date,
|
||||
.macro_time,
|
||||
.macro_timestamp,
|
||||
.keyword_auto,
|
||||
.keyword_auto_type,
|
||||
.keyword_break,
|
||||
@ -409,11 +473,6 @@ pub const Token = struct {
|
||||
.keyword_restrict2,
|
||||
.keyword_alignof1,
|
||||
.keyword_alignof2,
|
||||
.builtin_choose_expr,
|
||||
.builtin_va_arg,
|
||||
.builtin_offsetof,
|
||||
.builtin_bitoffsetof,
|
||||
.builtin_types_compatible_p,
|
||||
.keyword_attribute1,
|
||||
.keyword_attribute2,
|
||||
.keyword_extension,
|
||||
@ -444,6 +503,19 @@ pub const Token = struct {
|
||||
.keyword_thiscall2,
|
||||
.keyword_vectorcall,
|
||||
.keyword_vectorcall2,
|
||||
.keyword_fastcall,
|
||||
.keyword_fastcall2,
|
||||
.keyword_regcall,
|
||||
.keyword_cdecl,
|
||||
.keyword_cdecl2,
|
||||
.keyword_forceinline,
|
||||
.keyword_forceinline2,
|
||||
.keyword_unaligned,
|
||||
.keyword_unaligned2,
|
||||
.keyword_nonnull,
|
||||
.keyword_nullable,
|
||||
.keyword_nullable_result,
|
||||
.keyword_null_unspecified,
|
||||
.keyword_bit_int,
|
||||
.keyword_c23_alignas,
|
||||
.keyword_c23_alignof,
|
||||
@ -547,11 +619,18 @@ pub const Token = struct {
|
||||
.macro_file,
|
||||
.macro_line,
|
||||
.macro_counter,
|
||||
.macro_time,
|
||||
.macro_date,
|
||||
.macro_timestamp,
|
||||
.macro_param_pragma_operator,
|
||||
.macro_param_ms_identifier,
|
||||
.macro_param_ms_pragma,
|
||||
.placemarker,
|
||||
=> "",
|
||||
.macro_ws => " ",
|
||||
|
||||
.incomplete_ucn => "\\",
|
||||
|
||||
.macro_func => "__func__",
|
||||
.macro_function => "__FUNCTION__",
|
||||
.macro_pretty_func => "__PRETTY_FUNCTION__",
|
||||
@ -695,11 +774,6 @@ pub const Token = struct {
|
||||
.keyword_alignof2 => "__alignof__",
|
||||
.keyword_typeof1 => "__typeof",
|
||||
.keyword_typeof2 => "__typeof__",
|
||||
.builtin_choose_expr => "__builtin_choose_expr",
|
||||
.builtin_va_arg => "__builtin_va_arg",
|
||||
.builtin_offsetof => "__builtin_offsetof",
|
||||
.builtin_bitoffsetof => "__builtin_bitoffsetof",
|
||||
.builtin_types_compatible_p => "__builtin_types_compatible_p",
|
||||
.keyword_attribute1 => "__attribute",
|
||||
.keyword_attribute2 => "__attribute__",
|
||||
.keyword_extension => "__extension__",
|
||||
@ -730,6 +804,19 @@ pub const Token = struct {
|
||||
.keyword_thiscall2 => "_thiscall",
|
||||
.keyword_vectorcall => "__vectorcall",
|
||||
.keyword_vectorcall2 => "_vectorcall",
|
||||
.keyword_fastcall => "__fastcall",
|
||||
.keyword_fastcall2 => "_fastcall",
|
||||
.keyword_regcall => "__regcall",
|
||||
.keyword_cdecl => "__cdecl",
|
||||
.keyword_cdecl2 => "_cdecl",
|
||||
.keyword_forceinline => "__forceinline",
|
||||
.keyword_forceinline2 => "_forceinline",
|
||||
.keyword_unaligned => "__unaligned",
|
||||
.keyword_unaligned2 => "_unaligned",
|
||||
.keyword_nonnull => "_Nonnull",
|
||||
.keyword_nullable => "_Nullable",
|
||||
.keyword_nullable_result => "_Nullable_result",
|
||||
.keyword_null_unspecified => "_Null_unspecified",
|
||||
};
|
||||
}
|
||||
|
||||
@ -742,11 +829,6 @@ pub const Token = struct {
|
||||
.macro_func,
|
||||
.macro_function,
|
||||
.macro_pretty_func,
|
||||
.builtin_choose_expr,
|
||||
.builtin_va_arg,
|
||||
.builtin_offsetof,
|
||||
.builtin_bitoffsetof,
|
||||
.builtin_types_compatible_p,
|
||||
=> "an identifier",
|
||||
.string_literal,
|
||||
.string_literal_utf_16,
|
||||
@ -763,7 +845,7 @@ pub const Token = struct {
|
||||
.unterminated_char_literal,
|
||||
.empty_char_literal,
|
||||
=> "a character literal",
|
||||
.pp_num, .embed_byte => "A number",
|
||||
.pp_num, .embed_byte => "a number",
|
||||
else => id.lexeme().?,
|
||||
};
|
||||
}
|
||||
@ -871,6 +953,12 @@ pub const Token = struct {
|
||||
.keyword_stdcall2,
|
||||
.keyword_thiscall2,
|
||||
.keyword_vectorcall2,
|
||||
.keyword_fastcall2,
|
||||
.keyword_cdecl2,
|
||||
.keyword_forceinline,
|
||||
.keyword_forceinline2,
|
||||
.keyword_unaligned,
|
||||
.keyword_unaligned2,
|
||||
=> if (langopts.ms_extensions) kw else .identifier,
|
||||
else => kw,
|
||||
};
|
||||
@ -1013,13 +1101,21 @@ pub const Token = struct {
|
||||
.{ "_thiscall", .keyword_thiscall2 },
|
||||
.{ "__vectorcall", .keyword_vectorcall },
|
||||
.{ "_vectorcall", .keyword_vectorcall2 },
|
||||
.{ "__fastcall", .keyword_fastcall },
|
||||
.{ "_fastcall", .keyword_fastcall2 },
|
||||
.{ "_regcall", .keyword_regcall },
|
||||
.{ "__cdecl", .keyword_cdecl },
|
||||
.{ "_cdecl", .keyword_cdecl2 },
|
||||
.{ "__forceinline", .keyword_forceinline },
|
||||
.{ "_forceinline", .keyword_forceinline2 },
|
||||
.{ "__unaligned", .keyword_unaligned },
|
||||
.{ "_unaligned", .keyword_unaligned2 },
|
||||
|
||||
// builtins that require special parsing
|
||||
.{ "__builtin_choose_expr", .builtin_choose_expr },
|
||||
.{ "__builtin_va_arg", .builtin_va_arg },
|
||||
.{ "__builtin_offsetof", .builtin_offsetof },
|
||||
.{ "__builtin_bitoffsetof", .builtin_bitoffsetof },
|
||||
.{ "__builtin_types_compatible_p", .builtin_types_compatible_p },
|
||||
// Type nullability
|
||||
.{ "_Nonnull", .keyword_nonnull },
|
||||
.{ "_Nullable", .keyword_nullable },
|
||||
.{ "_Nullable_result", .keyword_nullable_result },
|
||||
.{ "_Null_unspecified", .keyword_null_unspecified },
|
||||
});
|
||||
};
|
||||
|
||||
@ -1099,6 +1195,26 @@ pub fn next(self: *Tokenizer) Token {
|
||||
'u' => state = .u,
|
||||
'U' => state = .U,
|
||||
'L' => state = .L,
|
||||
'\\' => {
|
||||
const ucn_kind = UCNKind.classify(self.buf[self.index..]);
|
||||
switch (ucn_kind) {
|
||||
.none => {
|
||||
self.index += 1;
|
||||
id = .invalid;
|
||||
break;
|
||||
},
|
||||
.incomplete => {
|
||||
self.index += 1;
|
||||
id = .incomplete_ucn;
|
||||
break;
|
||||
},
|
||||
.hex4, .hex8 => {
|
||||
self.index += @intFromEnum(ucn_kind);
|
||||
id = .extended_identifier;
|
||||
state = .extended_identifier;
|
||||
},
|
||||
}
|
||||
},
|
||||
'a'...'t', 'v'...'z', 'A'...'K', 'M'...'T', 'V'...'Z', '_' => state = .identifier,
|
||||
'=' => state = .equal,
|
||||
'!' => state = .bang,
|
||||
@ -1324,6 +1440,20 @@ pub fn next(self: *Tokenizer) Token {
|
||||
break;
|
||||
},
|
||||
0x80...0xFF => state = .extended_identifier,
|
||||
'\\' => {
|
||||
const ucn_kind = UCNKind.classify(self.buf[self.index..]);
|
||||
switch (ucn_kind) {
|
||||
.none, .incomplete => {
|
||||
id = if (state == .identifier) Token.getTokenId(self.langopts, self.buf[start..self.index]) else .extended_identifier;
|
||||
break;
|
||||
},
|
||||
.hex4, .hex8 => {
|
||||
state = .extended_identifier;
|
||||
self.index += @intFromEnum(ucn_kind);
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
else => {
|
||||
id = if (state == .identifier) Token.getTokenId(self.langopts, self.buf[start..self.index]) else .extended_identifier;
|
||||
break;
|
||||
@ -1731,7 +1861,10 @@ pub fn next(self: *Tokenizer) Token {
|
||||
}
|
||||
} else if (self.index == self.buf.len) {
|
||||
switch (state) {
|
||||
.start, .line_comment => {},
|
||||
.start => {},
|
||||
.line_comment => if (self.langopts.preserve_comments) {
|
||||
id = .comment;
|
||||
},
|
||||
.u, .u8, .U, .L, .identifier => id = Token.getTokenId(self.langopts, self.buf[start..self.index]),
|
||||
.extended_identifier => id = .extended_identifier,
|
||||
|
||||
@ -2105,6 +2238,15 @@ test "comments" {
|
||||
.hash,
|
||||
.identifier,
|
||||
});
|
||||
try expectTokensExtra(
|
||||
\\//foo
|
||||
\\void
|
||||
\\//bar
|
||||
, &.{
|
||||
.comment, .nl,
|
||||
.keyword_void, .nl,
|
||||
.comment,
|
||||
}, .{ .preserve_comments = true });
|
||||
}
|
||||
|
||||
test "extended identifiers" {
|
||||
@ -2147,36 +2289,76 @@ test "C23 keywords" {
|
||||
.keyword_c23_thread_local,
|
||||
.keyword_nullptr,
|
||||
.keyword_typeof_unqual,
|
||||
}, .c23);
|
||||
}, .{ .standard = .c23 });
|
||||
}
|
||||
|
||||
test "Universal character names" {
|
||||
try expectTokens("\\", &.{.invalid});
|
||||
try expectTokens("\\g", &.{ .invalid, .identifier });
|
||||
try expectTokens("\\u", &.{ .incomplete_ucn, .identifier });
|
||||
try expectTokens("\\ua", &.{ .incomplete_ucn, .identifier });
|
||||
try expectTokens("\\U9", &.{ .incomplete_ucn, .identifier });
|
||||
try expectTokens("\\ug", &.{ .incomplete_ucn, .identifier });
|
||||
try expectTokens("\\uag", &.{ .incomplete_ucn, .identifier });
|
||||
|
||||
try expectTokens("\\ ", &.{ .invalid, .eof });
|
||||
try expectTokens("\\g ", &.{ .invalid, .identifier, .eof });
|
||||
try expectTokens("\\u ", &.{ .incomplete_ucn, .identifier, .eof });
|
||||
try expectTokens("\\ua ", &.{ .incomplete_ucn, .identifier, .eof });
|
||||
try expectTokens("\\U9 ", &.{ .incomplete_ucn, .identifier, .eof });
|
||||
try expectTokens("\\ug ", &.{ .incomplete_ucn, .identifier, .eof });
|
||||
try expectTokens("\\uag ", &.{ .incomplete_ucn, .identifier, .eof });
|
||||
|
||||
try expectTokens("a\\", &.{ .identifier, .invalid });
|
||||
try expectTokens("a\\g", &.{ .identifier, .invalid, .identifier });
|
||||
try expectTokens("a\\u", &.{ .identifier, .incomplete_ucn, .identifier });
|
||||
try expectTokens("a\\ua", &.{ .identifier, .incomplete_ucn, .identifier });
|
||||
try expectTokens("a\\U9", &.{ .identifier, .incomplete_ucn, .identifier });
|
||||
try expectTokens("a\\ug", &.{ .identifier, .incomplete_ucn, .identifier });
|
||||
try expectTokens("a\\uag", &.{ .identifier, .incomplete_ucn, .identifier });
|
||||
|
||||
try expectTokens("a\\ ", &.{ .identifier, .invalid, .eof });
|
||||
try expectTokens("a\\g ", &.{ .identifier, .invalid, .identifier, .eof });
|
||||
try expectTokens("a\\u ", &.{ .identifier, .incomplete_ucn, .identifier, .eof });
|
||||
try expectTokens("a\\ua ", &.{ .identifier, .incomplete_ucn, .identifier, .eof });
|
||||
try expectTokens("a\\U9 ", &.{ .identifier, .incomplete_ucn, .identifier, .eof });
|
||||
try expectTokens("a\\ug ", &.{ .identifier, .incomplete_ucn, .identifier, .eof });
|
||||
try expectTokens("a\\uag ", &.{ .identifier, .incomplete_ucn, .identifier, .eof });
|
||||
}
|
||||
|
||||
test "Tokenizer fuzz test" {
|
||||
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
const Context = struct {
|
||||
fn testOne(_: @This(), input_bytes: []const u8) anyerror!void {
|
||||
var arena: std.heap.ArenaAllocator = .init(std.testing.allocator);
|
||||
defer arena.deinit();
|
||||
var comp = Compilation.init(std.testing.allocator, arena.allocator(), undefined, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
|
||||
const input_bytes = std.testing.fuzzInput(.{});
|
||||
if (input_bytes.len == 0) return;
|
||||
const source = try comp.addSourceFromBuffer("fuzz.c", input_bytes);
|
||||
|
||||
const source = try comp.addSourceFromBuffer("fuzz.c", input_bytes);
|
||||
|
||||
var tokenizer: Tokenizer = .{
|
||||
.buf = source.buf,
|
||||
.source = source.id,
|
||||
.langopts = comp.langopts,
|
||||
var tokenizer: Tokenizer = .{
|
||||
.buf = source.buf,
|
||||
.source = source.id,
|
||||
.langopts = comp.langopts,
|
||||
};
|
||||
while (true) {
|
||||
const prev_index = tokenizer.index;
|
||||
const tok = tokenizer.next();
|
||||
if (tok.id == .eof) break;
|
||||
try std.testing.expect(prev_index < tokenizer.index); // ensure that the tokenizer always makes progress
|
||||
}
|
||||
}
|
||||
};
|
||||
while (true) {
|
||||
const prev_index = tokenizer.index;
|
||||
const tok = tokenizer.next();
|
||||
if (tok.id == .eof) break;
|
||||
try std.testing.expect(prev_index < tokenizer.index); // ensure that the tokenizer always makes progress
|
||||
}
|
||||
return std.testing.fuzz(Context{}, Context.testOne, .{});
|
||||
}
|
||||
|
||||
fn expectTokensExtra(contents: []const u8, expected_tokens: []const Token.Id, standard: ?LangOpts.Standard) !void {
|
||||
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
|
||||
fn expectTokensExtra(contents: []const u8, expected_tokens: []const Token.Id, langopts: ?LangOpts) !void {
|
||||
var arena: std.heap.ArenaAllocator = .init(std.testing.allocator);
|
||||
defer arena.deinit();
|
||||
var comp = Compilation.init(std.testing.allocator, arena.allocator(), undefined, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
if (standard) |provided| {
|
||||
comp.langopts.standard = provided;
|
||||
if (langopts) |provided| {
|
||||
comp.langopts = provided;
|
||||
}
|
||||
const source = try comp.addSourceFromBuffer("path", contents);
|
||||
var tokenizer = Tokenizer{
|
||||
|
||||
53
lib/compiler/aro/aro/Toolchain.zig
vendored
53
lib/compiler/aro/aro/Toolchain.zig
vendored
@ -1,12 +1,14 @@
|
||||
const std = @import("std");
|
||||
const Driver = @import("Driver.zig");
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const mem = std.mem;
|
||||
|
||||
const system_defaults = @import("system_defaults");
|
||||
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Driver = @import("Driver.zig");
|
||||
const Filesystem = @import("Driver/Filesystem.zig").Filesystem;
|
||||
const Multilib = @import("Driver/Multilib.zig");
|
||||
const target_util = @import("target.zig");
|
||||
const Linux = @import("toolchains/Linux.zig");
|
||||
const Multilib = @import("Driver/Multilib.zig");
|
||||
const Filesystem = @import("Driver/Filesystem.zig").Filesystem;
|
||||
|
||||
pub const PathList = std.ArrayListUnmanaged([]const u8);
|
||||
|
||||
@ -48,9 +50,8 @@ const Inner = union(enum) {
|
||||
|
||||
const Toolchain = @This();
|
||||
|
||||
filesystem: Filesystem = .{ .real = {} },
|
||||
filesystem: Filesystem,
|
||||
driver: *Driver,
|
||||
arena: mem.Allocator,
|
||||
|
||||
/// The list of toolchain specific path prefixes to search for libraries.
|
||||
library_paths: PathList = .{},
|
||||
@ -83,7 +84,8 @@ pub fn discover(tc: *Toolchain) !void {
|
||||
|
||||
const target = tc.getTarget();
|
||||
tc.inner = switch (target.os.tag) {
|
||||
.linux => if (target.cpu.arch == .hexagon)
|
||||
.linux,
|
||||
=> if (target.cpu.arch == .hexagon)
|
||||
.{ .unknown = {} } // TODO
|
||||
else if (target.cpu.arch.isMIPS())
|
||||
.{ .unknown = {} } // TODO
|
||||
@ -111,6 +113,11 @@ pub fn deinit(tc: *Toolchain) void {
|
||||
tc.program_paths.deinit(gpa);
|
||||
}
|
||||
|
||||
/// Write assembler path to `buf` and return a slice of it
|
||||
pub fn getAssemblerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
|
||||
return tc.getProgramPath("as", buf);
|
||||
}
|
||||
|
||||
/// Write linker path to `buf` and return a slice of it
|
||||
pub fn getLinkerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
|
||||
// --ld-path= takes precedence over -fuse-ld= and specifies the executable
|
||||
@ -149,7 +156,12 @@ pub fn getLinkerPath(tc: *const Toolchain, buf: []u8) ![]const u8 {
|
||||
// to a relative path is surprising. This is more complex due to priorities
|
||||
// among -B, COMPILER_PATH and PATH. --ld-path= should be used instead.
|
||||
if (mem.indexOfScalar(u8, use_linker, '/') != null) {
|
||||
try tc.driver.comp.addDiagnostic(.{ .tag = .fuse_ld_path }, &.{});
|
||||
try tc.driver.comp.diagnostics.add(.{
|
||||
.text = "'-fuse-ld=' taking a path is deprecated; use '--ld-path=' instead",
|
||||
.kind = .off,
|
||||
.opt = .@"fuse-ld-path",
|
||||
.location = null,
|
||||
});
|
||||
}
|
||||
|
||||
if (std.fs.path.isAbsolute(use_linker)) {
|
||||
@ -205,7 +217,7 @@ pub fn addFilePathLibArgs(tc: *const Toolchain, argv: *std.array_list.Managed([]
|
||||
for (tc.file_paths.items) |path| {
|
||||
bytes_needed += path.len + 2; // +2 for `-L`
|
||||
}
|
||||
var bytes = try tc.arena.alloc(u8, bytes_needed);
|
||||
var bytes = try tc.driver.comp.arena.alloc(u8, bytes_needed);
|
||||
var index: usize = 0;
|
||||
for (tc.file_paths.items) |path| {
|
||||
@memcpy(bytes[index..][0..2], "-L");
|
||||
@ -252,6 +264,7 @@ pub fn getFilePath(tc: *const Toolchain, name: []const u8) ![]const u8 {
|
||||
var path_buf: [std.fs.max_path_bytes]u8 = undefined;
|
||||
var fib = std.heap.FixedBufferAllocator.init(&path_buf);
|
||||
const allocator = fib.allocator();
|
||||
const arena = tc.driver.comp.arena;
|
||||
|
||||
const sysroot = tc.getSysroot();
|
||||
|
||||
@ -260,15 +273,15 @@ pub fn getFilePath(tc: *const Toolchain, name: []const u8) ![]const u8 {
|
||||
const aro_dir = std.fs.path.dirname(tc.driver.aro_name) orelse "";
|
||||
const candidate = try std.fs.path.join(allocator, &.{ aro_dir, "..", name });
|
||||
if (tc.filesystem.exists(candidate)) {
|
||||
return tc.arena.dupe(u8, candidate);
|
||||
return arena.dupe(u8, candidate);
|
||||
}
|
||||
|
||||
if (tc.searchPaths(&fib, sysroot, tc.library_paths.items, name)) |path| {
|
||||
return tc.arena.dupe(u8, path);
|
||||
return arena.dupe(u8, path);
|
||||
}
|
||||
|
||||
if (tc.searchPaths(&fib, sysroot, tc.file_paths.items, name)) |path| {
|
||||
return try tc.arena.dupe(u8, path);
|
||||
return try arena.dupe(u8, path);
|
||||
}
|
||||
|
||||
return name;
|
||||
@ -299,7 +312,7 @@ const PathKind = enum {
|
||||
program,
|
||||
};
|
||||
|
||||
/// Join `components` into a path. If the path exists, dupe it into the toolchain arena and
|
||||
/// Join `components` into a path. If the path exists, dupe it into the Compilation arena and
|
||||
/// add it to the specified path list.
|
||||
pub fn addPathIfExists(tc: *Toolchain, components: []const []const u8, dest_kind: PathKind) !void {
|
||||
var path_buf: [std.fs.max_path_bytes]u8 = undefined;
|
||||
@ -308,7 +321,7 @@ pub fn addPathIfExists(tc: *Toolchain, components: []const []const u8, dest_kind
|
||||
const candidate = try std.fs.path.join(fib.allocator(), components);
|
||||
|
||||
if (tc.filesystem.exists(candidate)) {
|
||||
const duped = try tc.arena.dupe(u8, candidate);
|
||||
const duped = try tc.driver.comp.arena.dupe(u8, candidate);
|
||||
const dest = switch (dest_kind) {
|
||||
.library => &tc.library_paths,
|
||||
.file => &tc.file_paths,
|
||||
@ -318,10 +331,10 @@ pub fn addPathIfExists(tc: *Toolchain, components: []const []const u8, dest_kind
|
||||
}
|
||||
}
|
||||
|
||||
/// Join `components` using the toolchain arena and add the resulting path to `dest_kind`. Does not check
|
||||
/// Join `components` using the Compilation arena and add the resulting path to `dest_kind`. Does not check
|
||||
/// whether the path actually exists
|
||||
pub fn addPathFromComponents(tc: *Toolchain, components: []const []const u8, dest_kind: PathKind) !void {
|
||||
const full_path = try std.fs.path.join(tc.arena, components);
|
||||
const full_path = try std.fs.path.join(tc.driver.comp.arena, components);
|
||||
const dest = switch (dest_kind) {
|
||||
.library => &tc.library_paths,
|
||||
.file => &tc.file_paths,
|
||||
@ -331,7 +344,7 @@ pub fn addPathFromComponents(tc: *Toolchain, components: []const []const u8, des
|
||||
}
|
||||
|
||||
/// Add linker args to `argv`. Does not add path to linker executable as first item; that must be handled separately
|
||||
/// Items added to `argv` will be string literals or owned by `tc.arena` so they must not be individually freed
|
||||
/// Items added to `argv` will be string literals or owned by `tc.driver.comp.arena` so they must not be individually freed
|
||||
pub fn buildLinkerArgs(tc: *Toolchain, argv: *std.array_list.Managed([]const u8)) !void {
|
||||
return switch (tc.inner) {
|
||||
.uninitialized => unreachable,
|
||||
@ -396,7 +409,7 @@ fn getUnwindLibKind(tc: *const Toolchain) !UnwindLibKind {
|
||||
return .libgcc;
|
||||
} else if (mem.eql(u8, libname, "libunwind")) {
|
||||
if (tc.getRuntimeLibKind() == .libgcc) {
|
||||
try tc.driver.comp.addDiagnostic(.{ .tag = .incompatible_unwindlib }, &.{});
|
||||
try tc.driver.err("--rtlib=libgcc requires --unwindlib=libgcc", .{});
|
||||
}
|
||||
return .compiler_rt;
|
||||
} else {
|
||||
@ -472,7 +485,7 @@ pub fn addRuntimeLibs(tc: *const Toolchain, argv: *std.array_list.Managed([]cons
|
||||
if (target_util.isKnownWindowsMSVCEnvironment(target)) {
|
||||
const rtlib_str = tc.driver.rtlib orelse system_defaults.rtlib;
|
||||
if (!mem.eql(u8, rtlib_str, "platform")) {
|
||||
try tc.driver.comp.addDiagnostic(.{ .tag = .unsupported_rtlib_gcc, .extra = .{ .str = "MSVC" } }, &.{});
|
||||
try tc.driver.err("unsupported runtime library 'libgcc' for platform 'MSVC'", .{});
|
||||
}
|
||||
} else {
|
||||
try tc.addLibGCC(argv);
|
||||
@ -494,7 +507,7 @@ pub fn defineSystemIncludes(tc: *Toolchain) !void {
|
||||
|
||||
const comp = tc.driver.comp;
|
||||
if (!tc.driver.nobuiltininc) {
|
||||
try comp.addBuiltinIncludeDir(tc.driver.aro_name);
|
||||
try comp.addBuiltinIncludeDir(tc.driver.aro_name, tc.driver.resource_dir);
|
||||
}
|
||||
|
||||
if (!tc.driver.nostdlibinc) {
|
||||
|
||||
4194
lib/compiler/aro/aro/Tree.zig
vendored
4194
lib/compiler/aro/aro/Tree.zig
vendored
File diff suppressed because it is too large
Load Diff
2676
lib/compiler/aro/aro/Type.zig
vendored
2676
lib/compiler/aro/aro/Type.zig
vendored
File diff suppressed because it is too large
Load Diff
3008
lib/compiler/aro/aro/TypeStore.zig
vendored
Normal file
3008
lib/compiler/aro/aro/TypeStore.zig
vendored
Normal file
File diff suppressed because it is too large
Load Diff
378
lib/compiler/aro/aro/Value.zig
vendored
378
lib/compiler/aro/aro/Value.zig
vendored
@ -2,14 +2,14 @@ const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const BigIntConst = std.math.big.int.Const;
|
||||
const BigIntMutable = std.math.big.int.Mutable;
|
||||
const backend = @import("../backend.zig");
|
||||
const Interner = backend.Interner;
|
||||
|
||||
const Interner = @import("../backend.zig").Interner;
|
||||
const BigIntSpace = Interner.Tag.Int.BigIntSpace;
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Type = @import("Type.zig");
|
||||
const target_util = @import("target.zig");
|
||||
|
||||
const annex_g = @import("annex_g.zig");
|
||||
const Writer = std.Io.Writer;
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const target_util = @import("target.zig");
|
||||
const QualType = @import("TypeStore.zig").QualType;
|
||||
|
||||
const Value = @This();
|
||||
|
||||
@ -33,11 +33,19 @@ pub fn int(i: anytype, comp: *Compilation) !Value {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn pointer(r: Interner.Key.Pointer, comp: *Compilation) !Value {
|
||||
return intern(comp, .{ .pointer = r });
|
||||
}
|
||||
|
||||
pub fn ref(v: Value) Interner.Ref {
|
||||
std.debug.assert(v.opt_ref != .none);
|
||||
return @enumFromInt(@intFromEnum(v.opt_ref));
|
||||
}
|
||||
|
||||
pub fn fromRef(r: Interner.Ref) Value {
|
||||
return .{ .opt_ref = @enumFromInt(@intFromEnum(r)) };
|
||||
}
|
||||
|
||||
pub fn is(v: Value, tag: std.meta.Tag(Interner.Key), comp: *const Compilation) bool {
|
||||
if (v.opt_ref == .none) return false;
|
||||
return comp.interner.get(v.ref()) == tag;
|
||||
@ -68,7 +76,11 @@ test "minUnsignedBits" {
|
||||
}
|
||||
};
|
||||
|
||||
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
|
||||
var arena_state: std.heap.ArenaAllocator = .init(std.testing.allocator);
|
||||
defer arena_state.deinit();
|
||||
const arena = arena_state.allocator();
|
||||
|
||||
var comp = Compilation.init(std.testing.allocator, arena, undefined, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" });
|
||||
comp.target = try std.zig.system.resolveTargetQuery(target_query);
|
||||
@ -103,7 +115,11 @@ test "minSignedBits" {
|
||||
}
|
||||
};
|
||||
|
||||
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
|
||||
var arena_state: std.heap.ArenaAllocator = .init(std.testing.allocator);
|
||||
defer arena_state.deinit();
|
||||
const arena = arena_state.allocator();
|
||||
|
||||
var comp = Compilation.init(std.testing.allocator, arena, undefined, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
const target_query = try std.Target.Query.parse(.{ .arch_os_abi = "x86_64-linux-gnu" });
|
||||
comp.target = try std.zig.system.resolveTargetQuery(target_query);
|
||||
@ -133,24 +149,27 @@ pub const FloatToIntChangeKind = enum {
|
||||
|
||||
/// Converts the stored value from a float to an integer.
|
||||
/// `.none` value remains unchanged.
|
||||
pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChangeKind {
|
||||
pub fn floatToInt(v: *Value, dest_ty: QualType, comp: *Compilation) !FloatToIntChangeKind {
|
||||
if (v.opt_ref == .none) return .none;
|
||||
|
||||
const float_val = v.toFloat(f128, comp);
|
||||
const was_zero = float_val == 0;
|
||||
|
||||
if (dest_ty.is(.bool)) {
|
||||
if (dest_ty.is(comp, .bool)) {
|
||||
const was_one = float_val == 1.0;
|
||||
v.* = fromBool(!was_zero);
|
||||
if (was_zero or was_one) return .none;
|
||||
return .value_changed;
|
||||
} else if (dest_ty.isUnsignedInt(comp) and float_val < 0) {
|
||||
} else if (dest_ty.signedness(comp) == .unsigned and float_val < 0) {
|
||||
v.* = zero;
|
||||
return .out_of_range;
|
||||
} else if (!std.math.isFinite(float_val)) {
|
||||
v.* = .{};
|
||||
return .overflow;
|
||||
}
|
||||
|
||||
const signedness = dest_ty.signedness(comp);
|
||||
const bits: usize = @intCast(dest_ty.bitSizeof(comp).?);
|
||||
const bits: usize = @intCast(dest_ty.bitSizeof(comp));
|
||||
|
||||
var big_int: std.math.big.int.Mutable = .{
|
||||
.limbs = try comp.gpa.alloc(std.math.big.Limb, @max(
|
||||
@ -160,6 +179,7 @@ pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChang
|
||||
.len = undefined,
|
||||
.positive = undefined,
|
||||
};
|
||||
defer comp.gpa.free(big_int.limbs);
|
||||
const had_fraction = switch (big_int.setFloat(float_val, .trunc)) {
|
||||
.inexact => true,
|
||||
.exact => false,
|
||||
@ -177,11 +197,11 @@ pub fn floatToInt(v: *Value, dest_ty: Type, comp: *Compilation) !FloatToIntChang
|
||||
|
||||
/// Converts the stored value from an integer to a float.
|
||||
/// `.none` value remains unchanged.
|
||||
pub fn intToFloat(v: *Value, dest_ty: Type, comp: *Compilation) !void {
|
||||
pub fn intToFloat(v: *Value, dest_ty: QualType, comp: *Compilation) !void {
|
||||
if (v.opt_ref == .none) return;
|
||||
|
||||
if (dest_ty.isComplex()) {
|
||||
const bits = dest_ty.bitSizeof(comp).?;
|
||||
if (dest_ty.is(comp, .complex)) {
|
||||
const bits = dest_ty.bitSizeof(comp);
|
||||
const cf: Interner.Key.Complex = switch (bits) {
|
||||
32 => .{ .cf16 = .{ v.toFloat(f16, comp), 0 } },
|
||||
64 => .{ .cf32 = .{ v.toFloat(f32, comp), 0 } },
|
||||
@ -193,7 +213,7 @@ pub fn intToFloat(v: *Value, dest_ty: Type, comp: *Compilation) !void {
|
||||
v.* = try intern(comp, .{ .complex = cf });
|
||||
return;
|
||||
}
|
||||
const bits = dest_ty.bitSizeof(comp).?;
|
||||
const bits = dest_ty.bitSizeof(comp);
|
||||
return switch (comp.interner.get(v.ref()).int) {
|
||||
inline .u64, .i64 => |data| {
|
||||
const f: Interner.Key.Float = switch (bits) {
|
||||
@ -232,14 +252,16 @@ pub const IntCastChangeKind = enum {
|
||||
|
||||
/// Truncates or extends bits based on type.
|
||||
/// `.none` value remains unchanged.
|
||||
pub fn intCast(v: *Value, dest_ty: Type, comp: *Compilation) !IntCastChangeKind {
|
||||
pub fn intCast(v: *Value, dest_ty: QualType, comp: *Compilation) !IntCastChangeKind {
|
||||
if (v.opt_ref == .none) return .none;
|
||||
const key = comp.interner.get(v.ref());
|
||||
if (key == .pointer or key == .bytes) return .none;
|
||||
|
||||
const dest_bits: usize = @intCast(dest_ty.bitSizeof(comp).?);
|
||||
const dest_bits: usize = @intCast(dest_ty.bitSizeof(comp));
|
||||
const dest_signed = dest_ty.signedness(comp) == .signed;
|
||||
|
||||
var space: BigIntSpace = undefined;
|
||||
const big = v.toBigInt(&space, comp);
|
||||
const big = key.toBigInt(&space);
|
||||
const value_bits = big.bitCountTwosComp();
|
||||
|
||||
// if big is negative, then is signed.
|
||||
@ -269,10 +291,10 @@ pub fn intCast(v: *Value, dest_ty: Type, comp: *Compilation) !IntCastChangeKind
|
||||
|
||||
/// Converts the stored value to a float of the specified type
|
||||
/// `.none` value remains unchanged.
|
||||
pub fn floatCast(v: *Value, dest_ty: Type, comp: *Compilation) !void {
|
||||
pub fn floatCast(v: *Value, dest_ty: QualType, comp: *Compilation) !void {
|
||||
if (v.opt_ref == .none) return;
|
||||
const bits = dest_ty.bitSizeof(comp).?;
|
||||
if (dest_ty.isComplex()) {
|
||||
const bits = dest_ty.bitSizeof(comp);
|
||||
if (dest_ty.is(comp, .complex)) {
|
||||
const cf: Interner.Key.Complex = switch (bits) {
|
||||
32 => .{ .cf16 = .{ v.toFloat(f16, comp), v.imag(f16, comp) } },
|
||||
64 => .{ .cf32 = .{ v.toFloat(f32, comp), v.imag(f32, comp) } },
|
||||
@ -370,11 +392,8 @@ fn bigIntToFloat(limbs: []const std.math.big.Limb, positive: bool) f128 {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn toBigInt(val: Value, space: *BigIntSpace, comp: *const Compilation) BigIntConst {
|
||||
return switch (comp.interner.get(val.ref()).int) {
|
||||
inline .u64, .i64 => |x| BigIntMutable.init(&space.limbs, x).toConst(),
|
||||
.big_int => |b| b,
|
||||
};
|
||||
fn toBigInt(val: Value, space: *BigIntSpace, comp: *const Compilation) BigIntConst {
|
||||
return comp.interner.get(val.ref()).toBigInt(space);
|
||||
}
|
||||
|
||||
pub fn isZero(v: Value, comp: *const Compilation) bool {
|
||||
@ -398,6 +417,7 @@ pub fn isZero(v: Value, comp: *const Compilation) bool {
|
||||
inline else => |data| return data[0] == 0.0 and data[1] == 0.0,
|
||||
},
|
||||
.bytes => return false,
|
||||
.pointer => return false,
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
@ -461,12 +481,19 @@ pub fn toBool(v: Value, comp: *const Compilation) bool {
|
||||
|
||||
pub fn toInt(v: Value, comptime T: type, comp: *const Compilation) ?T {
|
||||
if (v.opt_ref == .none) return null;
|
||||
if (comp.interner.get(v.ref()) != .int) return null;
|
||||
const key = comp.interner.get(v.ref());
|
||||
if (key != .int) return null;
|
||||
var space: BigIntSpace = undefined;
|
||||
const big_int = v.toBigInt(&space, comp);
|
||||
const big_int = key.toBigInt(&space);
|
||||
return big_int.toInt(T) catch null;
|
||||
}
|
||||
|
||||
pub fn toBytes(v: Value, comp: *const Compilation) []const u8 {
|
||||
assert(v.opt_ref != .none);
|
||||
const key = comp.interner.get(v.ref());
|
||||
return key.bytes;
|
||||
}
|
||||
|
||||
const ComplexOp = enum {
|
||||
add,
|
||||
sub,
|
||||
@ -492,10 +519,11 @@ fn complexAddSub(lhs: Value, rhs: Value, comptime T: type, op: ComplexOp, comp:
|
||||
};
|
||||
}
|
||||
|
||||
pub fn add(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool {
|
||||
const bits: usize = @intCast(ty.bitSizeof(comp).?);
|
||||
if (ty.isFloat()) {
|
||||
if (ty.isComplex()) {
|
||||
pub fn add(res: *Value, lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !bool {
|
||||
const bits: usize = @intCast(qt.bitSizeof(comp));
|
||||
const scalar_kind = qt.scalarKind(comp);
|
||||
if (scalar_kind.isFloat()) {
|
||||
if (scalar_kind == .complex_float) {
|
||||
res.* = switch (bits) {
|
||||
32 => try complexAddSub(lhs, rhs, f16, .add, comp),
|
||||
64 => try complexAddSub(lhs, rhs, f32, .add, comp),
|
||||
@ -516,29 +544,60 @@ pub fn add(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
|
||||
};
|
||||
res.* = try intern(comp, .{ .float = f });
|
||||
return false;
|
||||
} else {
|
||||
var lhs_space: BigIntSpace = undefined;
|
||||
var rhs_space: BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, comp);
|
||||
|
||||
const limbs = try comp.gpa.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(bits),
|
||||
);
|
||||
defer comp.gpa.free(limbs);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
|
||||
const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, ty.signedness(comp), bits);
|
||||
res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
|
||||
return overflowed;
|
||||
}
|
||||
const lhs_key = comp.interner.get(lhs.ref());
|
||||
const rhs_key = comp.interner.get(rhs.ref());
|
||||
if (lhs_key == .bytes or rhs_key == .bytes) {
|
||||
res.* = .{};
|
||||
return false;
|
||||
}
|
||||
if (lhs_key == .pointer or rhs_key == .pointer) {
|
||||
const rel, const index = if (lhs_key == .pointer)
|
||||
.{ lhs_key.pointer, rhs }
|
||||
else
|
||||
.{ rhs_key.pointer, lhs };
|
||||
|
||||
const elem_size = try int(qt.childType(comp).sizeofOrNull(comp) orelse 1, comp);
|
||||
var total_offset: Value = undefined;
|
||||
const mul_overflow = try total_offset.mul(elem_size, index, comp.type_store.ptrdiff, comp);
|
||||
const old_offset = fromRef(rel.offset);
|
||||
const add_overflow = try total_offset.add(total_offset, old_offset, comp.type_store.ptrdiff, comp);
|
||||
_ = try total_offset.intCast(comp.type_store.ptrdiff, comp);
|
||||
res.* = try pointer(.{ .node = rel.node, .offset = total_offset.ref() }, comp);
|
||||
return mul_overflow or add_overflow;
|
||||
}
|
||||
|
||||
var lhs_space: BigIntSpace = undefined;
|
||||
var rhs_space: BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs_key.toBigInt(&lhs_space);
|
||||
const rhs_bigint = rhs_key.toBigInt(&rhs_space);
|
||||
|
||||
const limbs = try comp.gpa.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(bits),
|
||||
);
|
||||
defer comp.gpa.free(limbs);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
|
||||
const overflowed = result_bigint.addWrap(lhs_bigint, rhs_bigint, qt.signedness(comp), bits);
|
||||
res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
|
||||
return overflowed;
|
||||
}
|
||||
|
||||
pub fn sub(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool {
|
||||
const bits: usize = @intCast(ty.bitSizeof(comp).?);
|
||||
if (ty.isFloat()) {
|
||||
if (ty.isComplex()) {
|
||||
pub fn negate(res: *Value, val: Value, qt: QualType, comp: *Compilation) !bool {
|
||||
return res.sub(zero, val, qt, undefined, comp);
|
||||
}
|
||||
|
||||
pub fn decrement(res: *Value, val: Value, qt: QualType, comp: *Compilation) !bool {
|
||||
return res.sub(val, one, qt, undefined, comp);
|
||||
}
|
||||
|
||||
/// elem_size is only used when subtracting two pointers, so we can scale the result by the size of the element type
|
||||
pub fn sub(res: *Value, lhs: Value, rhs: Value, qt: QualType, elem_size: u64, comp: *Compilation) !bool {
|
||||
const bits: usize = @intCast(qt.bitSizeof(comp));
|
||||
const scalar_kind = qt.scalarKind(comp);
|
||||
if (scalar_kind.isFloat()) {
|
||||
if (scalar_kind == .complex_float) {
|
||||
res.* = switch (bits) {
|
||||
32 => try complexAddSub(lhs, rhs, f16, .sub, comp),
|
||||
64 => try complexAddSub(lhs, rhs, f32, .sub, comp),
|
||||
@ -559,29 +618,61 @@ pub fn sub(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
|
||||
};
|
||||
res.* = try intern(comp, .{ .float = f });
|
||||
return false;
|
||||
} else {
|
||||
var lhs_space: BigIntSpace = undefined;
|
||||
var rhs_space: BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, comp);
|
||||
|
||||
const limbs = try comp.gpa.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(bits),
|
||||
);
|
||||
defer comp.gpa.free(limbs);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
|
||||
const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, ty.signedness(comp), bits);
|
||||
res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
|
||||
return overflowed;
|
||||
}
|
||||
const lhs_key = comp.interner.get(lhs.ref());
|
||||
const rhs_key = comp.interner.get(rhs.ref());
|
||||
if (lhs_key == .bytes or rhs_key == .bytes) {
|
||||
res.* = .{};
|
||||
return false;
|
||||
}
|
||||
if (lhs_key == .pointer and rhs_key == .pointer) {
|
||||
const lhs_pointer = lhs_key.pointer;
|
||||
const rhs_pointer = rhs_key.pointer;
|
||||
if (lhs_pointer.node != rhs_pointer.node) {
|
||||
res.* = .{};
|
||||
return false;
|
||||
}
|
||||
const lhs_offset = fromRef(lhs_pointer.offset);
|
||||
const rhs_offset = fromRef(rhs_pointer.offset);
|
||||
const overflowed = try res.sub(lhs_offset, rhs_offset, comp.type_store.ptrdiff, undefined, comp);
|
||||
const rhs_size = try int(elem_size, comp);
|
||||
_ = try res.div(res.*, rhs_size, comp.type_store.ptrdiff, comp);
|
||||
return overflowed;
|
||||
} else if (lhs_key == .pointer) {
|
||||
const rel = lhs_key.pointer;
|
||||
|
||||
const lhs_size = try int(elem_size, comp);
|
||||
var total_offset: Value = undefined;
|
||||
const mul_overflow = try total_offset.mul(lhs_size, rhs, comp.type_store.ptrdiff, comp);
|
||||
const old_offset = fromRef(rel.offset);
|
||||
const add_overflow = try total_offset.sub(old_offset, total_offset, comp.type_store.ptrdiff, undefined, comp);
|
||||
_ = try total_offset.intCast(comp.type_store.ptrdiff, comp);
|
||||
res.* = try pointer(.{ .node = rel.node, .offset = total_offset.ref() }, comp);
|
||||
return mul_overflow or add_overflow;
|
||||
}
|
||||
|
||||
var lhs_space: BigIntSpace = undefined;
|
||||
var rhs_space: BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs_key.toBigInt(&lhs_space);
|
||||
const rhs_bigint = rhs_key.toBigInt(&rhs_space);
|
||||
|
||||
const limbs = try comp.gpa.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(bits),
|
||||
);
|
||||
defer comp.gpa.free(limbs);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
|
||||
const overflowed = result_bigint.subWrap(lhs_bigint, rhs_bigint, qt.signedness(comp), bits);
|
||||
res.* = try intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
|
||||
return overflowed;
|
||||
}
|
||||
|
||||
pub fn mul(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool {
|
||||
const bits: usize = @intCast(ty.bitSizeof(comp).?);
|
||||
if (ty.isFloat()) {
|
||||
if (ty.isComplex()) {
|
||||
pub fn mul(res: *Value, lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !bool {
|
||||
const bits: usize = @intCast(qt.bitSizeof(comp));
|
||||
const scalar_kind = qt.scalarKind(comp);
|
||||
if (scalar_kind.isFloat()) {
|
||||
if (scalar_kind == .complex_float) {
|
||||
const cf: Interner.Key.Complex = switch (bits) {
|
||||
32 => .{ .cf16 = annex_g.complexFloatMul(f16, lhs.toFloat(f16, comp), lhs.imag(f16, comp), rhs.toFloat(f16, comp), rhs.imag(f16, comp)) },
|
||||
64 => .{ .cf32 = annex_g.complexFloatMul(f32, lhs.toFloat(f32, comp), lhs.imag(f32, comp), rhs.toFloat(f32, comp), rhs.imag(f32, comp)) },
|
||||
@ -624,7 +715,7 @@ pub fn mul(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
|
||||
|
||||
result_bigint.mul(lhs_bigint, rhs_bigint, limbs_buffer, comp.gpa);
|
||||
|
||||
const signedness = ty.signedness(comp);
|
||||
const signedness = qt.signedness(comp);
|
||||
const overflowed = !result_bigint.toConst().fitsInTwosComp(signedness, bits);
|
||||
if (overflowed) {
|
||||
result_bigint.truncate(result_bigint.toConst(), signedness, bits);
|
||||
@ -635,10 +726,11 @@ pub fn mul(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
|
||||
}
|
||||
|
||||
/// caller guarantees rhs != 0
|
||||
pub fn div(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool {
|
||||
const bits: usize = @intCast(ty.bitSizeof(comp).?);
|
||||
if (ty.isFloat()) {
|
||||
if (ty.isComplex()) {
|
||||
pub fn div(res: *Value, lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !bool {
|
||||
const bits: usize = @intCast(qt.bitSizeof(comp));
|
||||
const scalar_kind = qt.scalarKind(comp);
|
||||
if (scalar_kind.isFloat()) {
|
||||
if (scalar_kind == .complex_float) {
|
||||
const cf: Interner.Key.Complex = switch (bits) {
|
||||
32 => .{ .cf16 = annex_g.complexFloatDiv(f16, lhs.toFloat(f16, comp), lhs.imag(f16, comp), rhs.toFloat(f16, comp), rhs.imag(f16, comp)) },
|
||||
64 => .{ .cf32 = annex_g.complexFloatDiv(f32, lhs.toFloat(f32, comp), lhs.imag(f32, comp), rhs.toFloat(f32, comp), rhs.imag(f32, comp)) },
|
||||
@ -689,22 +781,21 @@ pub fn div(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
|
||||
result_q.divTrunc(&result_r, lhs_bigint, rhs_bigint, limbs_buffer);
|
||||
|
||||
res.* = try intern(comp, .{ .int = .{ .big_int = result_q.toConst() } });
|
||||
return !result_q.toConst().fitsInTwosComp(ty.signedness(comp), bits);
|
||||
return !result_q.toConst().fitsInTwosComp(qt.signedness(comp), bits);
|
||||
}
|
||||
}
|
||||
|
||||
/// caller guarantees rhs != 0
|
||||
/// caller guarantees lhs != std.math.minInt(T) OR rhs != -1
|
||||
pub fn rem(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value {
|
||||
pub fn rem(lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !Value {
|
||||
var lhs_space: BigIntSpace = undefined;
|
||||
var rhs_space: BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
|
||||
const rhs_bigint = rhs.toBigInt(&rhs_space, comp);
|
||||
|
||||
const signedness = ty.signedness(comp);
|
||||
if (signedness == .signed) {
|
||||
if (qt.signedness(comp) == .signed) {
|
||||
var spaces: [2]BigIntSpace = undefined;
|
||||
const min_val = try Value.minInt(ty, comp);
|
||||
const min_val = try Value.minInt(qt, comp);
|
||||
const negative = BigIntMutable.init(&spaces[0].limbs, -1).toConst();
|
||||
const big_one = BigIntMutable.init(&spaces[1].limbs, 1).toConst();
|
||||
if (lhs.compare(.eq, min_val, comp) and rhs_bigint.eql(negative)) {
|
||||
@ -712,9 +803,9 @@ pub fn rem(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value {
|
||||
} else if (rhs_bigint.order(big_one).compare(.lt)) {
|
||||
// lhs - @divTrunc(lhs, rhs) * rhs
|
||||
var tmp: Value = undefined;
|
||||
_ = try tmp.div(lhs, rhs, ty, comp);
|
||||
_ = try tmp.mul(tmp, rhs, ty, comp);
|
||||
_ = try tmp.sub(lhs, tmp, ty, comp);
|
||||
_ = try tmp.div(lhs, rhs, qt, comp);
|
||||
_ = try tmp.mul(tmp, rhs, qt, comp);
|
||||
_ = try tmp.sub(lhs, tmp, qt, undefined, comp);
|
||||
return tmp;
|
||||
}
|
||||
}
|
||||
@ -801,8 +892,8 @@ pub fn bitAnd(lhs: Value, rhs: Value, comp: *Compilation) !Value {
|
||||
return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
|
||||
}
|
||||
|
||||
pub fn bitNot(val: Value, ty: Type, comp: *Compilation) !Value {
|
||||
const bits: usize = @intCast(ty.bitSizeof(comp).?);
|
||||
pub fn bitNot(val: Value, qt: QualType, comp: *Compilation) !Value {
|
||||
const bits: usize = @intCast(qt.bitSizeof(comp));
|
||||
var val_space: Value.BigIntSpace = undefined;
|
||||
const val_bigint = val.toBigInt(&val_space, comp);
|
||||
|
||||
@ -813,21 +904,21 @@ pub fn bitNot(val: Value, ty: Type, comp: *Compilation) !Value {
|
||||
defer comp.gpa.free(limbs);
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
|
||||
result_bigint.bitNotWrap(val_bigint, ty.signedness(comp), bits);
|
||||
result_bigint.bitNotWrap(val_bigint, qt.signedness(comp), bits);
|
||||
return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
|
||||
}
|
||||
|
||||
pub fn shl(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !bool {
|
||||
pub fn shl(res: *Value, lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !bool {
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
|
||||
const shift = rhs.toInt(usize, comp) orelse std.math.maxInt(usize);
|
||||
|
||||
const bits: usize = @intCast(ty.bitSizeof(comp).?);
|
||||
const bits: usize = @intCast(qt.bitSizeof(comp));
|
||||
if (shift > bits) {
|
||||
if (lhs_bigint.positive) {
|
||||
res.* = try Value.maxInt(ty, comp);
|
||||
res.* = try Value.maxInt(qt, comp);
|
||||
} else {
|
||||
res.* = try Value.minInt(ty, comp);
|
||||
res.* = try Value.minInt(qt, comp);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -840,7 +931,7 @@ pub fn shl(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
|
||||
var result_bigint = BigIntMutable{ .limbs = limbs, .positive = undefined, .len = undefined };
|
||||
|
||||
result_bigint.shiftLeft(lhs_bigint, shift);
|
||||
const signedness = ty.signedness(comp);
|
||||
const signedness = qt.signedness(comp);
|
||||
const overflowed = !result_bigint.toConst().fitsInTwosComp(signedness, bits);
|
||||
if (overflowed) {
|
||||
result_bigint.truncate(result_bigint.toConst(), signedness, bits);
|
||||
@ -849,7 +940,7 @@ pub fn shl(res: *Value, lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !b
|
||||
return overflowed;
|
||||
}
|
||||
|
||||
pub fn shr(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value {
|
||||
pub fn shr(lhs: Value, rhs: Value, qt: QualType, comp: *Compilation) !Value {
|
||||
var lhs_space: Value.BigIntSpace = undefined;
|
||||
const lhs_bigint = lhs.toBigInt(&lhs_space, comp);
|
||||
const shift = rhs.toInt(usize, comp) orelse return zero;
|
||||
@ -865,7 +956,7 @@ pub fn shr(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value {
|
||||
}
|
||||
}
|
||||
|
||||
const bits: usize = @intCast(ty.bitSizeof(comp).?);
|
||||
const bits: usize = @intCast(qt.bitSizeof(comp));
|
||||
const limbs = try comp.gpa.alloc(
|
||||
std.math.big.Limb,
|
||||
std.math.big.int.calcTwosCompLimbCount(bits),
|
||||
@ -877,8 +968,8 @@ pub fn shr(lhs: Value, rhs: Value, ty: Type, comp: *Compilation) !Value {
|
||||
return intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
|
||||
}
|
||||
|
||||
pub fn complexConj(val: Value, ty: Type, comp: *Compilation) !Value {
|
||||
const bits = ty.bitSizeof(comp).?;
|
||||
pub fn complexConj(val: Value, qt: QualType, comp: *Compilation) !Value {
|
||||
const bits = qt.bitSizeof(comp);
|
||||
const cf: Interner.Key.Complex = switch (bits) {
|
||||
32 => .{ .cf16 = .{ val.toFloat(f16, comp), -val.imag(f16, comp) } },
|
||||
64 => .{ .cf32 = .{ val.toFloat(f32, comp), -val.imag(f32, comp) } },
|
||||
@ -890,12 +981,17 @@ pub fn complexConj(val: Value, ty: Type, comp: *Compilation) !Value {
|
||||
return intern(comp, .{ .complex = cf });
|
||||
}
|
||||
|
||||
pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *const Compilation) bool {
|
||||
fn shallowCompare(lhs: Value, op: std.math.CompareOperator, rhs: Value) ?bool {
|
||||
if (op == .eq) {
|
||||
return lhs.opt_ref == rhs.opt_ref;
|
||||
} else if (lhs.opt_ref == rhs.opt_ref) {
|
||||
return std.math.Order.eq.compare(op);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *const Compilation) bool {
|
||||
if (lhs.shallowCompare(op, rhs)) |val| return val;
|
||||
|
||||
const lhs_key = comp.interner.get(lhs.ref());
|
||||
const rhs_key = comp.interner.get(rhs.ref());
|
||||
@ -918,10 +1014,33 @@ pub fn compare(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *cons
|
||||
return lhs_bigint.order(rhs_bigint).compare(op);
|
||||
}
|
||||
|
||||
fn twosCompIntLimit(limit: std.math.big.int.TwosCompIntLimit, ty: Type, comp: *Compilation) !Value {
|
||||
const signedness = ty.signedness(comp);
|
||||
/// Returns null for values that cannot be compared at compile time (e.g. `&x < &y`) for globals `x` and `y`.
|
||||
pub fn comparePointers(lhs: Value, op: std.math.CompareOperator, rhs: Value, comp: *const Compilation) ?bool {
|
||||
if (lhs.shallowCompare(op, rhs)) |val| return val;
|
||||
|
||||
const lhs_key = comp.interner.get(lhs.ref());
|
||||
const rhs_key = comp.interner.get(rhs.ref());
|
||||
|
||||
if (lhs_key == .pointer and rhs_key == .pointer) {
|
||||
const lhs_pointer = lhs_key.pointer;
|
||||
const rhs_pointer = rhs_key.pointer;
|
||||
switch (op) {
|
||||
.eq => if (lhs_pointer.node != rhs_pointer.node) return false,
|
||||
.neq => if (lhs_pointer.node != rhs_pointer.node) return true,
|
||||
else => if (lhs_pointer.node != rhs_pointer.node) return null,
|
||||
}
|
||||
|
||||
const lhs_offset = fromRef(lhs_pointer.offset);
|
||||
const rhs_offset = fromRef(rhs_pointer.offset);
|
||||
return lhs_offset.compare(op, rhs_offset, comp);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
fn twosCompIntLimit(limit: std.math.big.int.TwosCompIntLimit, qt: QualType, comp: *Compilation) !Value {
|
||||
const signedness = qt.signedness(comp);
|
||||
if (limit == .min and signedness == .unsigned) return Value.zero;
|
||||
const mag_bits: usize = @intCast(ty.bitSizeof(comp).?);
|
||||
const mag_bits: usize = @intCast(qt.bitSizeof(comp));
|
||||
switch (mag_bits) {
|
||||
inline 8, 16, 32, 64 => |bits| {
|
||||
if (limit == .min) return Value.int(@as(i64, std.math.minInt(std.meta.Int(.signed, bits))), comp);
|
||||
@ -946,44 +1065,63 @@ fn twosCompIntLimit(limit: std.math.big.int.TwosCompIntLimit, ty: Type, comp: *C
|
||||
return Value.intern(comp, .{ .int = .{ .big_int = result_bigint.toConst() } });
|
||||
}
|
||||
|
||||
pub fn minInt(ty: Type, comp: *Compilation) !Value {
|
||||
return twosCompIntLimit(.min, ty, comp);
|
||||
pub fn minInt(qt: QualType, comp: *Compilation) !Value {
|
||||
return twosCompIntLimit(.min, qt, comp);
|
||||
}
|
||||
|
||||
pub fn maxInt(ty: Type, comp: *Compilation) !Value {
|
||||
return twosCompIntLimit(.max, ty, comp);
|
||||
pub fn maxInt(qt: QualType, comp: *Compilation) !Value {
|
||||
return twosCompIntLimit(.max, qt, comp);
|
||||
}
|
||||
|
||||
pub fn print(v: Value, ty: Type, comp: *const Compilation, w: *Writer) Writer.Error!void {
|
||||
if (ty.is(.bool)) {
|
||||
return w.writeAll(if (v.isZero(comp)) "false" else "true");
|
||||
const NestedPrint = union(enum) {
|
||||
pointer: struct {
|
||||
node: u32,
|
||||
offset: Value,
|
||||
},
|
||||
};
|
||||
|
||||
pub fn printPointer(offset: Value, base: []const u8, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!void {
|
||||
try w.writeByte('&');
|
||||
try w.writeAll(base);
|
||||
if (!offset.isZero(comp)) {
|
||||
const maybe_nested = try offset.print(comp.type_store.ptrdiff, comp, w);
|
||||
std.debug.assert(maybe_nested == null);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn print(v: Value, qt: QualType, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!?NestedPrint {
|
||||
if (qt.is(comp, .bool)) {
|
||||
try w.writeAll(if (v.isZero(comp)) "false" else "true");
|
||||
return null;
|
||||
}
|
||||
const key = comp.interner.get(v.ref());
|
||||
switch (key) {
|
||||
.null => return w.writeAll("nullptr_t"),
|
||||
.null => try w.writeAll("nullptr_t"),
|
||||
.int => |repr| switch (repr) {
|
||||
inline .u64, .i64, .big_int => |x| return w.print("{d}", .{x}),
|
||||
inline else => |x| try w.print("{d}", .{x}),
|
||||
},
|
||||
.float => |repr| switch (repr) {
|
||||
.f16 => |x| return w.print("{d}", .{@round(@as(f64, @floatCast(x)) * 1000) / 1000}),
|
||||
.f32 => |x| return w.print("{d}", .{@round(@as(f64, @floatCast(x)) * 1000000) / 1000000}),
|
||||
inline else => |x| return w.print("{d}", .{@as(f64, @floatCast(x))}),
|
||||
.f16 => |x| try w.print("{d}", .{@round(@as(f64, @floatCast(x)) * 1000) / 1000}),
|
||||
.f32 => |x| try w.print("{d}", .{@round(@as(f64, @floatCast(x)) * 1000000) / 1000000}),
|
||||
inline else => |x| try w.print("{d}", .{@as(f64, @floatCast(x))}),
|
||||
},
|
||||
.bytes => |b| return printString(b, ty, comp, w),
|
||||
.bytes => |b| try printString(b, qt, comp, w),
|
||||
.complex => |repr| switch (repr) {
|
||||
.cf32 => |components| return w.print("{d} + {d}i", .{ @round(@as(f64, @floatCast(components[0])) * 1000000) / 1000000, @round(@as(f64, @floatCast(components[1])) * 1000000) / 1000000 }),
|
||||
inline else => |components| return w.print("{d} + {d}i", .{ @as(f64, @floatCast(components[0])), @as(f64, @floatCast(components[1])) }),
|
||||
.cf32 => |components| try w.print("{d} + {d}i", .{ @round(@as(f64, @floatCast(components[0])) * 1000000) / 1000000, @round(@as(f64, @floatCast(components[1])) * 1000000) / 1000000 }),
|
||||
inline else => |components| try w.print("{d} + {d}i", .{ @as(f64, @floatCast(components[0])), @as(f64, @floatCast(components[1])) }),
|
||||
},
|
||||
.pointer => |ptr| return .{ .pointer = .{ .node = ptr.node, .offset = fromRef(ptr.offset) } },
|
||||
else => unreachable, // not a value
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn printString(bytes: []const u8, ty: Type, comp: *const Compilation, w: *Writer) Writer.Error!void {
|
||||
const size: Compilation.CharUnitSize = @enumFromInt(ty.elemType().sizeof(comp).?);
|
||||
pub fn printString(bytes: []const u8, qt: QualType, comp: *const Compilation, w: *std.Io.Writer) std.Io.Writer.Error!void {
|
||||
const size: Compilation.CharUnitSize = @enumFromInt(qt.childType(comp).sizeof(comp));
|
||||
const without_null = bytes[0 .. bytes.len - @intFromEnum(size)];
|
||||
try w.writeByte('"');
|
||||
switch (size) {
|
||||
.@"1" => try w.print("{f}", .{std.zig.fmtString(without_null)}),
|
||||
.@"1" => try std.zig.stringEscape(without_null, w),
|
||||
.@"2" => {
|
||||
var items: [2]u16 = undefined;
|
||||
var i: usize = 0;
|
||||
|
||||
80
lib/compiler/aro/aro/char_info.zig
vendored
80
lib/compiler/aro/aro/char_info.zig
vendored
@ -442,48 +442,48 @@ pub fn isInvisible(codepoint: u21) bool {
|
||||
}
|
||||
|
||||
/// Checks for identifier characters which resemble non-identifier characters
|
||||
pub fn homoglyph(codepoint: u21) ?u21 {
|
||||
pub fn homoglyph(codepoint: u21) ?[]const u8 {
|
||||
assert(codepoint > 0x7F);
|
||||
return switch (codepoint) {
|
||||
0x01c3 => '!', // LATIN LETTER RETROFLEX CLICK
|
||||
0x037e => ';', // GREEK QUESTION MARK
|
||||
0x2212 => '-', // MINUS SIGN
|
||||
0x2215 => '/', // DIVISION SLASH
|
||||
0x2216 => '\\', // SET MINUS
|
||||
0x2217 => '*', // ASTERISK OPERATOR
|
||||
0x2223 => '|', // DIVIDES
|
||||
0x2227 => '^', // LOGICAL AND
|
||||
0x2236 => ':', // RATIO
|
||||
0x223c => '~', // TILDE OPERATOR
|
||||
0xa789 => ':', // MODIFIER LETTER COLON
|
||||
0xff01 => '!', // FULLWIDTH EXCLAMATION MARK
|
||||
0xff03 => '#', // FULLWIDTH NUMBER SIGN
|
||||
0xff04 => '$', // FULLWIDTH DOLLAR SIGN
|
||||
0xff05 => '%', // FULLWIDTH PERCENT SIGN
|
||||
0xff06 => '&', // FULLWIDTH AMPERSAND
|
||||
0xff08 => '(', // FULLWIDTH LEFT PARENTHESIS
|
||||
0xff09 => ')', // FULLWIDTH RIGHT PARENTHESIS
|
||||
0xff0a => '*', // FULLWIDTH ASTERISK
|
||||
0xff0b => '+', // FULLWIDTH ASTERISK
|
||||
0xff0c => ',', // FULLWIDTH COMMA
|
||||
0xff0d => '-', // FULLWIDTH HYPHEN-MINUS
|
||||
0xff0e => '.', // FULLWIDTH FULL STOP
|
||||
0xff0f => '/', // FULLWIDTH SOLIDUS
|
||||
0xff1a => ':', // FULLWIDTH COLON
|
||||
0xff1b => ';', // FULLWIDTH SEMICOLON
|
||||
0xff1c => '<', // FULLWIDTH LESS-THAN SIGN
|
||||
0xff1d => '=', // FULLWIDTH EQUALS SIGN
|
||||
0xff1e => '>', // FULLWIDTH GREATER-THAN SIGN
|
||||
0xff1f => '?', // FULLWIDTH QUESTION MARK
|
||||
0xff20 => '@', // FULLWIDTH COMMERCIAL AT
|
||||
0xff3b => '[', // FULLWIDTH LEFT SQUARE BRACKET
|
||||
0xff3c => '\\', // FULLWIDTH REVERSE SOLIDUS
|
||||
0xff3d => ']', // FULLWIDTH RIGHT SQUARE BRACKET
|
||||
0xff3e => '^', // FULLWIDTH CIRCUMFLEX ACCENT
|
||||
0xff5b => '{', // FULLWIDTH LEFT CURLY BRACKET
|
||||
0xff5c => '|', // FULLWIDTH VERTICAL LINE
|
||||
0xff5d => '}', // FULLWIDTH RIGHT CURLY BRACKET
|
||||
0xff5e => '~', // FULLWIDTH TILDE
|
||||
0x01c3 => "!", // LATIN LETTER RETROFLEX CLICK
|
||||
0x037e => ";", // GREEK QUESTION MARK
|
||||
0x2212 => "-", // MINUS SIGN
|
||||
0x2215 => "/", // DIVISION SLASH
|
||||
0x2216 => "\\", // SET MINUS
|
||||
0x2217 => "*", // ASTERISK OPERATOR
|
||||
0x2223 => "|", // DIVIDES
|
||||
0x2227 => "^", // LOGICAL AND
|
||||
0x2236 => ":", // RATIO
|
||||
0x223c => "~", // TILDE OPERATOR
|
||||
0xa789 => ":", // MODIFIER LETTER COLON
|
||||
0xff01 => "!", // FULLWIDTH EXCLAMATION MARK
|
||||
0xff03 => "#", // FULLWIDTH NUMBER SIGN
|
||||
0xff04 => "$", // FULLWIDTH DOLLAR SIGN
|
||||
0xff05 => "%", // FULLWIDTH PERCENT SIGN
|
||||
0xff06 => "&", // FULLWIDTH AMPERSAND
|
||||
0xff08 => "(", // FULLWIDTH LEFT PARENTHESIS
|
||||
0xff09 => ")", // FULLWIDTH RIGHT PARENTHESIS
|
||||
0xff0a => "*", // FULLWIDTH ASTERISK
|
||||
0xff0b => "+", // FULLWIDTH ASTERISK
|
||||
0xff0c => ",", // FULLWIDTH COMMA
|
||||
0xff0d => "-", // FULLWIDTH HYPHEN-MINUS
|
||||
0xff0e => ".", // FULLWIDTH FULL STOP
|
||||
0xff0f => "/", // FULLWIDTH SOLIDUS
|
||||
0xff1a => ":", // FULLWIDTH COLON
|
||||
0xff1b => ";", // FULLWIDTH SEMICOLON
|
||||
0xff1c => "<", // FULLWIDTH LESS-THAN SIGN
|
||||
0xff1d => "=", // FULLWIDTH EQUALS SIGN
|
||||
0xff1e => ">", // FULLWIDTH GREATER-THAN SIGN
|
||||
0xff1f => "?", // FULLWIDTH QUESTION MARK
|
||||
0xff20 => "@", // FULLWIDTH COMMERCIAL AT
|
||||
0xff3b => "[", // FULLWIDTH LEFT SQUARE BRACKET
|
||||
0xff3c => "\\", // FULLWIDTH REVERSE SOLIDUS
|
||||
0xff3d => "]", // FULLWIDTH RIGHT SQUARE BRACKET
|
||||
0xff3e => "^", // FULLWIDTH CIRCUMFLEX ACCENT
|
||||
0xff5b => "{", // FULLWIDTH LEFT CURLY BRACKET
|
||||
0xff5c => "|", // FULLWIDTH VERTICAL LINE
|
||||
0xff5d => "}", // FULLWIDTH RIGHT CURLY BRACKET
|
||||
0xff5e => "~", // FULLWIDTH TILDE
|
||||
else => null,
|
||||
};
|
||||
}
|
||||
|
||||
4
lib/compiler/aro/aro/features.zig
vendored
4
lib/compiler/aro/aro/features.zig
vendored
@ -57,13 +57,13 @@ pub fn hasExtension(comp: *Compilation, ext: []const u8) bool {
|
||||
// C11 features
|
||||
.c_alignas = true,
|
||||
.c_alignof = true,
|
||||
.c_atomic = false, // TODO
|
||||
.c_atomic = true,
|
||||
.c_generic_selections = true,
|
||||
.c_static_assert = true,
|
||||
.c_thread_local = target_util.isTlsSupported(comp.target),
|
||||
// misc
|
||||
.overloadable_unmarked = false, // TODO
|
||||
.statement_attributes_with_gnu_syntax = false, // TODO
|
||||
.statement_attributes_with_gnu_syntax = true,
|
||||
.gnu_asm = true,
|
||||
.gnu_asm_goto_with_outputs = true,
|
||||
.matrix_types = false, // TODO
|
||||
|
||||
80
lib/compiler/aro/aro/pragmas/gcc.zig
vendored
80
lib/compiler/aro/aro/pragmas/gcc.zig
vendored
@ -1,10 +1,11 @@
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const Pragma = @import("../Pragma.zig");
|
||||
const Diagnostics = @import("../Diagnostics.zig");
|
||||
const Preprocessor = @import("../Preprocessor.zig");
|
||||
const Parser = @import("../Parser.zig");
|
||||
const Pragma = @import("../Pragma.zig");
|
||||
const Preprocessor = @import("../Preprocessor.zig");
|
||||
const TokenIndex = @import("../Tree.zig").TokenIndex;
|
||||
|
||||
const GCC = @This();
|
||||
@ -18,8 +19,8 @@ pragma: Pragma = .{
|
||||
.parserHandler = parserHandler,
|
||||
.preserveTokens = preserveTokens,
|
||||
},
|
||||
original_options: Diagnostics.Options = .{},
|
||||
options_stack: std.ArrayListUnmanaged(Diagnostics.Options) = .empty,
|
||||
original_state: Diagnostics.State = .{},
|
||||
state_stack: std.ArrayListUnmanaged(Diagnostics.State) = .{},
|
||||
|
||||
const Directive = enum {
|
||||
warning,
|
||||
@ -38,19 +39,19 @@ const Directive = enum {
|
||||
|
||||
fn beforePreprocess(pragma: *Pragma, comp: *Compilation) void {
|
||||
var self: *GCC = @fieldParentPtr("pragma", pragma);
|
||||
self.original_options = comp.diagnostics.options;
|
||||
self.original_state = comp.diagnostics.state;
|
||||
}
|
||||
|
||||
fn beforeParse(pragma: *Pragma, comp: *Compilation) void {
|
||||
var self: *GCC = @fieldParentPtr("pragma", pragma);
|
||||
comp.diagnostics.options = self.original_options;
|
||||
self.options_stack.items.len = 0;
|
||||
comp.diagnostics.state = self.original_state;
|
||||
self.state_stack.items.len = 0;
|
||||
}
|
||||
|
||||
fn afterParse(pragma: *Pragma, comp: *Compilation) void {
|
||||
var self: *GCC = @fieldParentPtr("pragma", pragma);
|
||||
comp.diagnostics.options = self.original_options;
|
||||
self.options_stack.items.len = 0;
|
||||
comp.diagnostics.state = self.original_state;
|
||||
self.state_stack.items.len = 0;
|
||||
}
|
||||
|
||||
pub fn init(allocator: mem.Allocator) !*Pragma {
|
||||
@ -61,7 +62,7 @@ pub fn init(allocator: mem.Allocator) !*Pragma {
|
||||
|
||||
fn deinit(pragma: *Pragma, comp: *Compilation) void {
|
||||
var self: *GCC = @fieldParentPtr("pragma", pragma);
|
||||
self.options_stack.deinit(comp.gpa);
|
||||
self.state_stack.deinit(comp.gpa);
|
||||
comp.gpa.destroy(self);
|
||||
}
|
||||
|
||||
@ -76,23 +77,14 @@ fn diagnosticHandler(self: *GCC, pp: *Preprocessor, start_idx: TokenIndex) Pragm
|
||||
.ignored, .warning, .@"error", .fatal => {
|
||||
const str = Pragma.pasteTokens(pp, start_idx + 1) catch |err| switch (err) {
|
||||
error.ExpectedStringLiteral => {
|
||||
return pp.comp.addDiagnostic(.{
|
||||
.tag = .pragma_requires_string_literal,
|
||||
.loc = diagnostic_tok.loc,
|
||||
.extra = .{ .str = "GCC diagnostic" },
|
||||
}, pp.expansionSlice(start_idx));
|
||||
return Pragma.err(pp, start_idx, .pragma_requires_string_literal, .{"GCC diagnostic"});
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
if (!mem.startsWith(u8, str, "-W")) {
|
||||
const next = pp.tokens.get(start_idx + 1);
|
||||
return pp.comp.addDiagnostic(.{
|
||||
.tag = .malformed_warning_check,
|
||||
.loc = next.loc,
|
||||
.extra = .{ .str = "GCC diagnostic" },
|
||||
}, pp.expansionSlice(start_idx + 1));
|
||||
return Pragma.err(pp, start_idx + 1, .malformed_warning_check, .{"GCC diagnostic"});
|
||||
}
|
||||
const new_kind: Diagnostics.Kind = switch (diagnostic) {
|
||||
const new_kind: Diagnostics.Message.Kind = switch (diagnostic) {
|
||||
.ignored => .off,
|
||||
.warning => .warning,
|
||||
.@"error" => .@"error",
|
||||
@ -100,10 +92,10 @@ fn diagnosticHandler(self: *GCC, pp: *Preprocessor, start_idx: TokenIndex) Pragm
|
||||
else => unreachable,
|
||||
};
|
||||
|
||||
try pp.comp.diagnostics.set(str[2..], new_kind);
|
||||
try pp.diagnostics.set(str[2..], new_kind);
|
||||
},
|
||||
.push => try self.options_stack.append(pp.comp.gpa, pp.comp.diagnostics.options),
|
||||
.pop => pp.comp.diagnostics.options = self.options_stack.pop() orelse self.original_options,
|
||||
.push => try self.state_stack.append(pp.comp.gpa, pp.diagnostics.state),
|
||||
.pop => pp.diagnostics.state = self.state_stack.pop() orelse self.original_state,
|
||||
}
|
||||
}
|
||||
|
||||
@ -112,38 +104,24 @@ fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex
|
||||
const directive_tok = pp.tokens.get(start_idx + 1);
|
||||
if (directive_tok.id == .nl) return;
|
||||
|
||||
const gcc_pragma = std.meta.stringToEnum(Directive, pp.expandedSlice(directive_tok)) orelse
|
||||
return pp.comp.addDiagnostic(.{
|
||||
.tag = .unknown_gcc_pragma,
|
||||
.loc = directive_tok.loc,
|
||||
}, pp.expansionSlice(start_idx + 1));
|
||||
const gcc_pragma = std.meta.stringToEnum(Directive, pp.expandedSlice(directive_tok)) orelse {
|
||||
return Pragma.err(pp, start_idx + 1, .unknown_gcc_pragma, .{});
|
||||
};
|
||||
|
||||
switch (gcc_pragma) {
|
||||
.warning, .@"error" => {
|
||||
const text = Pragma.pasteTokens(pp, start_idx + 2) catch |err| switch (err) {
|
||||
error.ExpectedStringLiteral => {
|
||||
return pp.comp.addDiagnostic(.{
|
||||
.tag = .pragma_requires_string_literal,
|
||||
.loc = directive_tok.loc,
|
||||
.extra = .{ .str = @tagName(gcc_pragma) },
|
||||
}, pp.expansionSlice(start_idx + 1));
|
||||
return Pragma.err(pp, start_idx + 1, .pragma_requires_string_literal, .{@tagName(gcc_pragma)});
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
const extra = Diagnostics.Message.Extra{ .str = try pp.comp.diagnostics.arena.allocator().dupe(u8, text) };
|
||||
const diagnostic_tag: Diagnostics.Tag = if (gcc_pragma == .warning) .pragma_warning_message else .pragma_error_message;
|
||||
return pp.comp.addDiagnostic(
|
||||
.{ .tag = diagnostic_tag, .loc = directive_tok.loc, .extra = extra },
|
||||
pp.expansionSlice(start_idx + 1),
|
||||
);
|
||||
|
||||
return Pragma.err(pp, start_idx + 1, if (gcc_pragma == .warning) .pragma_warning_message else .pragma_error_message, .{text});
|
||||
},
|
||||
.diagnostic => return self.diagnosticHandler(pp, start_idx + 2) catch |err| switch (err) {
|
||||
error.UnknownPragma => {
|
||||
const tok = pp.tokens.get(start_idx + 2);
|
||||
return pp.comp.addDiagnostic(.{
|
||||
.tag = .unknown_gcc_pragma_directive,
|
||||
.loc = tok.loc,
|
||||
}, pp.expansionSlice(start_idx + 2));
|
||||
return Pragma.err(pp, start_idx + 2, .unknown_gcc_pragma_directive, .{});
|
||||
},
|
||||
else => |e| return e,
|
||||
},
|
||||
@ -154,17 +132,11 @@ fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex
|
||||
if (tok.id == .nl) break;
|
||||
|
||||
if (!tok.id.isMacroIdentifier()) {
|
||||
return pp.comp.addDiagnostic(.{
|
||||
.tag = .pragma_poison_identifier,
|
||||
.loc = tok.loc,
|
||||
}, pp.expansionSlice(start_idx + i));
|
||||
return Pragma.err(pp, start_idx + i, .pragma_poison_identifier, .{});
|
||||
}
|
||||
const str = pp.expandedSlice(tok);
|
||||
if (pp.defines.get(str) != null) {
|
||||
try pp.comp.addDiagnostic(.{
|
||||
.tag = .pragma_poison_macro,
|
||||
.loc = tok.loc,
|
||||
}, pp.expansionSlice(start_idx + i));
|
||||
try Pragma.err(pp, start_idx + i, .pragma_poison_macro, .{});
|
||||
}
|
||||
try pp.poisoned_identifiers.put(str, {});
|
||||
}
|
||||
|
||||
35
lib/compiler/aro/aro/pragmas/message.zig
vendored
35
lib/compiler/aro/aro/pragmas/message.zig
vendored
@ -1,12 +1,13 @@
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const Pragma = @import("../Pragma.zig");
|
||||
const Diagnostics = @import("../Diagnostics.zig");
|
||||
const Preprocessor = @import("../Preprocessor.zig");
|
||||
const Parser = @import("../Parser.zig");
|
||||
const TokenIndex = @import("../Tree.zig").TokenIndex;
|
||||
const Pragma = @import("../Pragma.zig");
|
||||
const Preprocessor = @import("../Preprocessor.zig");
|
||||
const Source = @import("../Source.zig");
|
||||
const TokenIndex = @import("../Tree.zig").TokenIndex;
|
||||
|
||||
const Message = @This();
|
||||
|
||||
@ -27,24 +28,32 @@ fn deinit(pragma: *Pragma, comp: *Compilation) void {
|
||||
}
|
||||
|
||||
fn preprocessorHandler(_: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) Pragma.Error!void {
|
||||
const message_tok = pp.tokens.get(start_idx);
|
||||
const message_expansion_locs = pp.expansionSlice(start_idx);
|
||||
|
||||
const str = Pragma.pasteTokens(pp, start_idx + 1) catch |err| switch (err) {
|
||||
error.ExpectedStringLiteral => {
|
||||
return pp.comp.addDiagnostic(.{
|
||||
.tag = .pragma_requires_string_literal,
|
||||
.loc = message_tok.loc,
|
||||
.extra = .{ .str = "message" },
|
||||
}, message_expansion_locs);
|
||||
return Pragma.err(pp, start_idx, .pragma_requires_string_literal, .{"message"});
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
const message_tok = pp.tokens.get(start_idx);
|
||||
const message_expansion_locs = pp.expansionSlice(start_idx);
|
||||
const loc = if (message_expansion_locs.len != 0)
|
||||
message_expansion_locs[message_expansion_locs.len - 1]
|
||||
else
|
||||
message_tok.loc;
|
||||
const extra = Diagnostics.Message.Extra{ .str = try pp.comp.diagnostics.arena.allocator().dupe(u8, str) };
|
||||
return pp.comp.addDiagnostic(.{ .tag = .pragma_message, .loc = loc, .extra = extra }, &.{});
|
||||
|
||||
const diagnostic: Pragma.Diagnostic = .pragma_message;
|
||||
|
||||
var sf = std.heap.stackFallback(1024, pp.gpa);
|
||||
var allocating: std.Io.Writer.Allocating = .init(sf.get());
|
||||
defer allocating.deinit();
|
||||
|
||||
Diagnostics.formatArgs(&allocating.writer, diagnostic.fmt, .{str}) catch return error.OutOfMemory;
|
||||
|
||||
try pp.diagnostics.add(.{
|
||||
.text = allocating.getWritten(),
|
||||
.kind = diagnostic.kind,
|
||||
.opt = diagnostic.opt,
|
||||
.location = loc.expand(pp.comp),
|
||||
});
|
||||
}
|
||||
|
||||
23
lib/compiler/aro/aro/pragmas/once.zig
vendored
23
lib/compiler/aro/aro/pragmas/once.zig
vendored
@ -1,12 +1,13 @@
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const Pragma = @import("../Pragma.zig");
|
||||
const Diagnostics = @import("../Diagnostics.zig");
|
||||
const Preprocessor = @import("../Preprocessor.zig");
|
||||
const Parser = @import("../Parser.zig");
|
||||
const TokenIndex = @import("../Tree.zig").TokenIndex;
|
||||
const Pragma = @import("../Pragma.zig");
|
||||
const Preprocessor = @import("../Preprocessor.zig");
|
||||
const Source = @import("../Source.zig");
|
||||
const TokenIndex = @import("../Tree.zig").TokenIndex;
|
||||
|
||||
const Once = @This();
|
||||
|
||||
@ -14,6 +15,7 @@ pragma: Pragma = .{
|
||||
.afterParse = afterParse,
|
||||
.deinit = deinit,
|
||||
.preprocessorHandler = preprocessorHandler,
|
||||
.preserveTokens = preserveTokens,
|
||||
},
|
||||
pragma_once: std.AutoHashMap(Source.Id, void),
|
||||
preprocess_count: u32 = 0,
|
||||
@ -42,10 +44,13 @@ fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex
|
||||
const name_tok = pp.tokens.get(start_idx);
|
||||
const next = pp.tokens.get(start_idx + 1);
|
||||
if (next.id != .nl) {
|
||||
try pp.comp.addDiagnostic(.{
|
||||
.tag = .extra_tokens_directive_end,
|
||||
.loc = name_tok.loc,
|
||||
}, pp.expansionSlice(start_idx + 1));
|
||||
const diagnostic: Preprocessor.Diagnostic = .extra_tokens_directive_end;
|
||||
return pp.diagnostics.addWithLocation(pp.comp, .{
|
||||
.text = diagnostic.fmt,
|
||||
.kind = diagnostic.kind,
|
||||
.opt = diagnostic.opt,
|
||||
.location = name_tok.loc.expand(pp.comp),
|
||||
}, pp.expansionSlice(start_idx + 1), true);
|
||||
}
|
||||
const seen = self.preprocess_count == pp.preprocess_count;
|
||||
const prev = try self.pragma_once.fetchPut(name_tok.loc.id, {});
|
||||
@ -54,3 +59,7 @@ fn preprocessorHandler(pragma: *Pragma, pp: *Preprocessor, start_idx: TokenIndex
|
||||
}
|
||||
self.preprocess_count = pp.preprocess_count;
|
||||
}
|
||||
|
||||
fn preserveTokens(_: *Pragma, _: *Preprocessor, _: TokenIndex) bool {
|
||||
return false;
|
||||
}
|
||||
|
||||
39
lib/compiler/aro/aro/pragmas/pack.zig
vendored
39
lib/compiler/aro/aro/pragmas/pack.zig
vendored
@ -1,10 +1,11 @@
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const Pragma = @import("../Pragma.zig");
|
||||
const Diagnostics = @import("../Diagnostics.zig");
|
||||
const Preprocessor = @import("../Preprocessor.zig");
|
||||
const Parser = @import("../Parser.zig");
|
||||
const Pragma = @import("../Pragma.zig");
|
||||
const Preprocessor = @import("../Preprocessor.zig");
|
||||
const Tree = @import("../Tree.zig");
|
||||
const TokenIndex = Tree.TokenIndex;
|
||||
|
||||
@ -13,9 +14,8 @@ const Pack = @This();
|
||||
pragma: Pragma = .{
|
||||
.deinit = deinit,
|
||||
.parserHandler = parserHandler,
|
||||
.preserveTokens = preserveTokens,
|
||||
},
|
||||
stack: std.ArrayListUnmanaged(struct { label: []const u8, val: u8 }) = .empty,
|
||||
stack: std.ArrayListUnmanaged(struct { label: []const u8, val: u8 }) = .{},
|
||||
|
||||
pub fn init(allocator: mem.Allocator) !*Pragma {
|
||||
var pack = try allocator.create(Pack);
|
||||
@ -34,10 +34,7 @@ fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation
|
||||
var idx = start_idx + 1;
|
||||
const l_paren = p.pp.tokens.get(idx);
|
||||
if (l_paren.id != .l_paren) {
|
||||
return p.comp.addDiagnostic(.{
|
||||
.tag = .pragma_pack_lparen,
|
||||
.loc = l_paren.loc,
|
||||
}, p.pp.expansionSlice(idx));
|
||||
return Pragma.err(p.pp, idx, .pragma_pack_lparen, .{});
|
||||
}
|
||||
idx += 1;
|
||||
|
||||
@ -54,11 +51,11 @@ fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation
|
||||
pop,
|
||||
};
|
||||
const action = std.meta.stringToEnum(Action, p.tokSlice(arg)) orelse {
|
||||
return p.errTok(.pragma_pack_unknown_action, arg);
|
||||
return Pragma.err(p.pp, arg, .pragma_pack_unknown_action, .{});
|
||||
};
|
||||
switch (action) {
|
||||
.show => {
|
||||
try p.errExtra(.pragma_pack_show, arg, .{ .unsigned = p.pragma_pack orelse 8 });
|
||||
return Pragma.err(p.pp, arg, .pragma_pack_show, .{p.pragma_pack orelse 8});
|
||||
},
|
||||
.push, .pop => {
|
||||
var new_val: ?u8 = null;
|
||||
@ -75,11 +72,13 @@ fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation
|
||||
idx += 1;
|
||||
const int = idx;
|
||||
idx += 1;
|
||||
if (tok_ids[int] != .pp_num) return p.errTok(.pragma_pack_int_ident, int);
|
||||
if (tok_ids[int] != .pp_num) {
|
||||
return Pragma.err(p.pp, int, .pragma_pack_int_ident, .{});
|
||||
}
|
||||
new_val = (try packInt(p, int)) orelse return;
|
||||
}
|
||||
},
|
||||
else => return p.errTok(.pragma_pack_int_ident, next),
|
||||
else => return Pragma.err(p.pp, next, .pragma_pack_int_ident, .{}),
|
||||
}
|
||||
}
|
||||
if (action == .push) {
|
||||
@ -87,9 +86,9 @@ fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation
|
||||
} else {
|
||||
pack.pop(p, label);
|
||||
if (new_val != null) {
|
||||
try p.errTok(.pragma_pack_undefined_pop, arg);
|
||||
try Pragma.err(p.pp, arg, .pragma_pack_undefined_pop, .{});
|
||||
} else if (pack.stack.items.len == 0) {
|
||||
try p.errTok(.pragma_pack_empty_stack, arg);
|
||||
try Pragma.err(p.pp, arg, .pragma_pack_empty_stack, .{});
|
||||
}
|
||||
}
|
||||
if (new_val) |some| {
|
||||
@ -115,14 +114,14 @@ fn parserHandler(pragma: *Pragma, p: *Parser, start_idx: TokenIndex) Compilation
|
||||
}
|
||||
|
||||
if (tok_ids[idx] != .r_paren) {
|
||||
return p.errTok(.pragma_pack_rparen, idx);
|
||||
return Pragma.err(p.pp, idx, .pragma_pack_rparen, .{});
|
||||
}
|
||||
}
|
||||
|
||||
fn packInt(p: *Parser, tok_i: TokenIndex) Compilation.Error!?u8 {
|
||||
const res = p.parseNumberToken(tok_i) catch |err| switch (err) {
|
||||
error.ParsingFailed => {
|
||||
try p.errTok(.pragma_pack_int, tok_i);
|
||||
try Pragma.err(p.pp, tok_i, .pragma_pack_int, .{});
|
||||
return null;
|
||||
},
|
||||
else => |e| return e,
|
||||
@ -131,7 +130,7 @@ fn packInt(p: *Parser, tok_i: TokenIndex) Compilation.Error!?u8 {
|
||||
switch (int) {
|
||||
1, 2, 4, 8, 16 => return @intCast(int),
|
||||
else => {
|
||||
try p.errTok(.pragma_pack_int, tok_i);
|
||||
try Pragma.err(p.pp, tok_i, .pragma_pack_int, .{});
|
||||
return null;
|
||||
},
|
||||
}
|
||||
@ -156,9 +155,3 @@ fn pop(pack: *Pack, p: *Parser, maybe_label: ?[]const u8) void {
|
||||
p.pragma_pack = prev.val;
|
||||
}
|
||||
}
|
||||
|
||||
fn preserveTokens(_: *Pragma, pp: *Preprocessor, start_idx: TokenIndex) bool {
|
||||
_ = pp;
|
||||
_ = start_idx;
|
||||
return true;
|
||||
}
|
||||
|
||||
162
lib/compiler/aro/aro/record_layout.zig
vendored
162
lib/compiler/aro/aro/record_layout.zig
vendored
@ -2,15 +2,18 @@
|
||||
//! Licensed under MIT license: https://github.com/mahkoh/repr-c/tree/master/repc/facade
|
||||
|
||||
const std = @import("std");
|
||||
const Type = @import("Type.zig");
|
||||
|
||||
const Attribute = @import("Attribute.zig");
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Parser = @import("Parser.zig");
|
||||
const target_util = @import("target.zig");
|
||||
const TypeStore = @import("TypeStore.zig");
|
||||
const QualType = TypeStore.QualType;
|
||||
const Type = TypeStore.Type;
|
||||
const Record = Type.Record;
|
||||
const Field = Record.Field;
|
||||
const TypeLayout = Type.TypeLayout;
|
||||
const FieldLayout = Type.FieldLayout;
|
||||
const target_util = @import("target.zig");
|
||||
const RecordLayout = Type.Record.Layout;
|
||||
const FieldLayout = Type.Record.Field.Layout;
|
||||
|
||||
const BITS_PER_BYTE = 8;
|
||||
|
||||
@ -42,36 +45,33 @@ const SysVContext = struct {
|
||||
|
||||
comp: *const Compilation,
|
||||
|
||||
fn init(ty: Type, comp: *const Compilation, pragma_pack: ?u8) SysVContext {
|
||||
fn init(qt: QualType, comp: *const Compilation, pragma_pack: ?u8) SysVContext {
|
||||
const pack_value: ?u64 = if (pragma_pack) |pak| @as(u64, pak) * BITS_PER_BYTE else null;
|
||||
const req_align = @as(u32, (ty.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE;
|
||||
const req_align = @as(u32, (qt.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE;
|
||||
return SysVContext{
|
||||
.attr_packed = ty.hasAttribute(.@"packed"),
|
||||
.attr_packed = qt.hasAttribute(comp, .@"packed"),
|
||||
.max_field_align_bits = pack_value,
|
||||
.aligned_bits = req_align,
|
||||
.is_union = ty.is(.@"union"),
|
||||
.is_union = qt.is(comp, .@"union"),
|
||||
.size_bits = 0,
|
||||
.comp = comp,
|
||||
.ongoing_bitfield = null,
|
||||
};
|
||||
}
|
||||
|
||||
fn layoutFields(self: *SysVContext, rec: *const Record) !void {
|
||||
for (rec.fields, 0..) |*fld, fld_indx| {
|
||||
if (fld.ty.specifier == .invalid) continue;
|
||||
const type_layout = computeLayout(fld.ty, self.comp);
|
||||
fn layoutFields(self: *SysVContext, fields: []Type.Record.Field) !void {
|
||||
for (fields) |*field| {
|
||||
if (field.qt.isInvalid()) continue;
|
||||
const type_layout = computeLayout(field.qt, self.comp);
|
||||
|
||||
var field_attrs: ?[]const Attribute = null;
|
||||
if (rec.field_attributes) |attrs| {
|
||||
field_attrs = attrs[fld_indx];
|
||||
}
|
||||
const attributes = field.attributes(self.comp);
|
||||
if (self.comp.target.isMinGW()) {
|
||||
fld.layout = try self.layoutMinGWField(fld, field_attrs, type_layout);
|
||||
field.layout = try self.layoutMinGWField(field, attributes, type_layout);
|
||||
} else {
|
||||
if (fld.isRegularField()) {
|
||||
fld.layout = try self.layoutRegularField(field_attrs, type_layout);
|
||||
if (field.bit_width.unpack()) |bit_width| {
|
||||
field.layout = try self.layoutBitField(attributes, type_layout, field.name_tok != 0, bit_width);
|
||||
} else {
|
||||
fld.layout = try self.layoutBitField(field_attrs, type_layout, fld.isNamed(), fld.specifiedBitWidth());
|
||||
field.layout = try self.layoutRegularField(attributes, type_layout);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -83,7 +83,7 @@ const SysVContext = struct {
|
||||
/// - the field is a bit-field and the previous field was a non-zero-sized bit-field with the same type size
|
||||
/// - the field is a zero-sized bit-field and the previous field was not a non-zero-sized bit-field
|
||||
/// See test case 0068.
|
||||
fn ignoreTypeAlignment(is_attr_packed: bool, bit_width: ?u32, ongoing_bitfield: ?OngoingBitfield, fld_layout: TypeLayout) bool {
|
||||
fn ignoreTypeAlignment(is_attr_packed: bool, bit_width: ?u32, ongoing_bitfield: ?OngoingBitfield, fld_layout: RecordLayout) bool {
|
||||
if (is_attr_packed) return true;
|
||||
if (bit_width) |width| {
|
||||
if (ongoing_bitfield) |ongoing| {
|
||||
@ -98,12 +98,12 @@ const SysVContext = struct {
|
||||
fn layoutMinGWField(
|
||||
self: *SysVContext,
|
||||
field: *const Field,
|
||||
field_attrs: ?[]const Attribute,
|
||||
field_layout: TypeLayout,
|
||||
field_attrs: []const Attribute,
|
||||
field_layout: RecordLayout,
|
||||
) !FieldLayout {
|
||||
const annotation_alignment_bits = BITS_PER_BYTE * @as(u32, (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(field_attrs)) orelse 1));
|
||||
const annotation_alignment_bits = BITS_PER_BYTE * (QualType.annotationAlignment(self.comp, Attribute.Iterator.initSlice(field_attrs)) orelse 1);
|
||||
const is_attr_packed = self.attr_packed or isPacked(field_attrs);
|
||||
const ignore_type_alignment = ignoreTypeAlignment(is_attr_packed, field.bit_width, self.ongoing_bitfield, field_layout);
|
||||
const ignore_type_alignment = ignoreTypeAlignment(is_attr_packed, field.bit_width.unpack(), self.ongoing_bitfield, field_layout);
|
||||
|
||||
var field_alignment_bits: u64 = field_layout.field_alignment_bits;
|
||||
if (ignore_type_alignment) {
|
||||
@ -120,16 +120,16 @@ const SysVContext = struct {
|
||||
// - the field is a non-zero-width bit-field and not packed.
|
||||
// See test case 0069.
|
||||
const update_record_alignment =
|
||||
field.isRegularField() or
|
||||
(field.specifiedBitWidth() == 0 and self.ongoing_bitfield != null) or
|
||||
(field.specifiedBitWidth() != 0 and !is_attr_packed);
|
||||
field.bit_width == .null or
|
||||
(field.bit_width.unpack().? == 0 and self.ongoing_bitfield != null) or
|
||||
(field.bit_width.unpack().? != 0 and !is_attr_packed);
|
||||
|
||||
// If a field affects the alignment of a record, the alignment is calculated in the
|
||||
// usual way except that __attribute__((packed)) is ignored on a zero-width bit-field.
|
||||
// See test case 0068.
|
||||
if (update_record_alignment) {
|
||||
var ty_alignment_bits = field_layout.field_alignment_bits;
|
||||
if (is_attr_packed and (field.isRegularField() or field.specifiedBitWidth() != 0)) {
|
||||
if (is_attr_packed and (field.bit_width == .null or field.bit_width.unpack().? != 0)) {
|
||||
ty_alignment_bits = BITS_PER_BYTE;
|
||||
}
|
||||
ty_alignment_bits = @max(ty_alignment_bits, annotation_alignment_bits);
|
||||
@ -145,10 +145,10 @@ const SysVContext = struct {
|
||||
// @attr_packed _ { size: 64, alignment: 64 }long long:0,
|
||||
// { offset: 8, size: 8 }d { size: 8, alignment: 8 }char,
|
||||
// }
|
||||
if (field.isRegularField()) {
|
||||
return self.layoutRegularFieldMinGW(field_layout.size_bits, field_alignment_bits);
|
||||
if (field.bit_width.unpack()) |bit_width| {
|
||||
return self.layoutBitFieldMinGW(field_layout.size_bits, field_alignment_bits, field.name_tok != 0, bit_width);
|
||||
} else {
|
||||
return self.layoutBitFieldMinGW(field_layout.size_bits, field_alignment_bits, field.isNamed(), field.specifiedBitWidth());
|
||||
return self.layoutRegularFieldMinGW(field_layout.size_bits, field_alignment_bits);
|
||||
}
|
||||
}
|
||||
|
||||
@ -227,8 +227,8 @@ const SysVContext = struct {
|
||||
|
||||
fn layoutRegularField(
|
||||
self: *SysVContext,
|
||||
fld_attrs: ?[]const Attribute,
|
||||
fld_layout: TypeLayout,
|
||||
fld_attrs: []const Attribute,
|
||||
fld_layout: RecordLayout,
|
||||
) !FieldLayout {
|
||||
var fld_align_bits = fld_layout.field_alignment_bits;
|
||||
|
||||
@ -240,7 +240,7 @@ const SysVContext = struct {
|
||||
|
||||
// The field alignment can be increased by __attribute__((aligned)) annotations on the
|
||||
// field. See test case 0085.
|
||||
if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| {
|
||||
if (QualType.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| {
|
||||
fld_align_bits = @max(fld_align_bits, @as(u32, anno) * BITS_PER_BYTE);
|
||||
}
|
||||
|
||||
@ -268,8 +268,8 @@ const SysVContext = struct {
|
||||
|
||||
fn layoutBitField(
|
||||
self: *SysVContext,
|
||||
fld_attrs: ?[]const Attribute,
|
||||
fld_layout: TypeLayout,
|
||||
fld_attrs: []const Attribute,
|
||||
fld_layout: RecordLayout,
|
||||
is_named: bool,
|
||||
bit_width: u64,
|
||||
) !FieldLayout {
|
||||
@ -302,7 +302,7 @@ const SysVContext = struct {
|
||||
const attr_packed = self.attr_packed or isPacked(fld_attrs);
|
||||
const has_packing_annotation = attr_packed or self.max_field_align_bits != null;
|
||||
|
||||
const annotation_alignment = if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| @as(u32, anno) * BITS_PER_BYTE else 1;
|
||||
const annotation_alignment = if (QualType.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| @as(u32, anno) * BITS_PER_BYTE else 1;
|
||||
|
||||
const first_unused_bit: u64 = if (self.is_union) 0 else self.size_bits;
|
||||
var field_align_bits: u64 = 1;
|
||||
@ -403,9 +403,9 @@ const MsvcContext = struct {
|
||||
is_union: bool,
|
||||
comp: *const Compilation,
|
||||
|
||||
fn init(ty: Type, comp: *const Compilation, pragma_pack: ?u8) MsvcContext {
|
||||
fn init(qt: QualType, comp: *const Compilation, pragma_pack: ?u8) MsvcContext {
|
||||
var pack_value: ?u32 = null;
|
||||
if (ty.hasAttribute(.@"packed")) {
|
||||
if (qt.hasAttribute(comp, .@"packed")) {
|
||||
// __attribute__((packed)) behaves like #pragma pack(1) in clang. See test case 0056.
|
||||
pack_value = BITS_PER_BYTE;
|
||||
}
|
||||
@ -420,8 +420,8 @@ const MsvcContext = struct {
|
||||
|
||||
// The required alignment can be increased by adding a __declspec(align)
|
||||
// annotation. See test case 0023.
|
||||
const must_align = @as(u32, (ty.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE;
|
||||
return MsvcContext{
|
||||
const must_align = @as(u32, (qt.requestedAlignment(comp) orelse 1)) * BITS_PER_BYTE;
|
||||
return .{
|
||||
.req_align_bits = must_align,
|
||||
.pointer_align_bits = must_align,
|
||||
.field_align_bits = must_align,
|
||||
@ -429,26 +429,26 @@ const MsvcContext = struct {
|
||||
.max_field_align_bits = pack_value,
|
||||
.ongoing_bitfield = null,
|
||||
.contains_non_bitfield = false,
|
||||
.is_union = ty.is(.@"union"),
|
||||
.is_union = qt.is(comp, .@"union"),
|
||||
.comp = comp,
|
||||
};
|
||||
}
|
||||
|
||||
fn layoutField(self: *MsvcContext, fld: *const Field, fld_attrs: ?[]const Attribute) !FieldLayout {
|
||||
const type_layout = computeLayout(fld.ty, self.comp);
|
||||
fn layoutField(self: *MsvcContext, fld: *const Field, fld_attrs: []const Attribute) !FieldLayout {
|
||||
const type_layout = computeLayout(fld.qt, self.comp);
|
||||
|
||||
// The required alignment of the field is the maximum of the required alignment of the
|
||||
// underlying type and the __declspec(align) annotation on the field itself.
|
||||
// See test case 0028.
|
||||
var req_align = type_layout.required_alignment_bits;
|
||||
if (Type.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| {
|
||||
if (QualType.annotationAlignment(self.comp, Attribute.Iterator.initSlice(fld_attrs))) |anno| {
|
||||
req_align = @max(@as(u32, anno) * BITS_PER_BYTE, req_align);
|
||||
}
|
||||
|
||||
// The required alignment of a record is the maximum of the required alignments of its
|
||||
// fields except that the required alignment of bitfields is ignored.
|
||||
// See test case 0029.
|
||||
if (fld.isRegularField()) {
|
||||
if (fld.bit_width == .null) {
|
||||
self.req_align_bits = @max(self.req_align_bits, req_align);
|
||||
}
|
||||
|
||||
@ -459,7 +459,7 @@ const MsvcContext = struct {
|
||||
fld_align_bits = @min(fld_align_bits, max_align);
|
||||
}
|
||||
// check the requested alignment of the field type.
|
||||
if (fld.ty.requestedAlignment(self.comp)) |type_req_align| {
|
||||
if (fld.qt.requestedAlignment(self.comp)) |type_req_align| {
|
||||
fld_align_bits = @max(fld_align_bits, type_req_align * 8);
|
||||
}
|
||||
|
||||
@ -471,10 +471,10 @@ const MsvcContext = struct {
|
||||
// __attribute__((packed)) on a field is a clang extension. It behaves as if #pragma
|
||||
// pack(1) had been applied only to this field. See test case 0057.
|
||||
fld_align_bits = @max(fld_align_bits, req_align);
|
||||
if (fld.isRegularField()) {
|
||||
return self.layoutRegularField(type_layout.size_bits, fld_align_bits);
|
||||
if (fld.bit_width.unpack()) |bit_width| {
|
||||
return self.layoutBitField(type_layout.size_bits, fld_align_bits, bit_width);
|
||||
} else {
|
||||
return self.layoutBitField(type_layout.size_bits, fld_align_bits, fld.specifiedBitWidth());
|
||||
return self.layoutRegularField(type_layout.size_bits, fld_align_bits);
|
||||
}
|
||||
}
|
||||
|
||||
@ -567,16 +567,16 @@ const MsvcContext = struct {
|
||||
}
|
||||
};
|
||||
|
||||
pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pack: ?u8) Error!void {
|
||||
pub fn compute(fields: []Type.Record.Field, qt: QualType, comp: *const Compilation, pragma_pack: ?u8) Error!Type.Record.Layout {
|
||||
switch (comp.langopts.emulate) {
|
||||
.gcc, .clang => {
|
||||
var context = SysVContext.init(ty, comp, pragma_pack);
|
||||
var context = SysVContext.init(qt, comp, pragma_pack);
|
||||
|
||||
try context.layoutFields(rec);
|
||||
try context.layoutFields(fields);
|
||||
|
||||
context.size_bits = try alignForward(context.size_bits, context.aligned_bits);
|
||||
|
||||
rec.type_layout = .{
|
||||
return .{
|
||||
.size_bits = context.size_bits,
|
||||
.field_alignment_bits = context.aligned_bits,
|
||||
.pointer_alignment_bits = context.aligned_bits,
|
||||
@ -584,15 +584,10 @@ pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pac
|
||||
};
|
||||
},
|
||||
.msvc => {
|
||||
var context = MsvcContext.init(ty, comp, pragma_pack);
|
||||
for (rec.fields, 0..) |*fld, fld_indx| {
|
||||
if (fld.ty.specifier == .invalid) continue;
|
||||
var field_attrs: ?[]const Attribute = null;
|
||||
if (rec.field_attributes) |attrs| {
|
||||
field_attrs = attrs[fld_indx];
|
||||
}
|
||||
|
||||
fld.layout = try context.layoutField(fld, field_attrs);
|
||||
var context = MsvcContext.init(qt, comp, pragma_pack);
|
||||
for (fields) |*field| {
|
||||
if (field.qt.isInvalid()) continue;
|
||||
field.layout = try context.layoutField(field, field.attributes(comp));
|
||||
}
|
||||
if (context.size_bits == 0) {
|
||||
// As an extension, MSVC allows records that only contain zero-sized bitfields and empty
|
||||
@ -601,7 +596,7 @@ pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pac
|
||||
context.handleZeroSizedRecord();
|
||||
}
|
||||
context.size_bits = try alignForward(context.size_bits, context.pointer_align_bits);
|
||||
rec.type_layout = .{
|
||||
return .{
|
||||
.size_bits = context.size_bits,
|
||||
.field_alignment_bits = context.field_align_bits,
|
||||
.pointer_alignment_bits = context.pointer_align_bits,
|
||||
@ -611,23 +606,26 @@ pub fn compute(rec: *Type.Record, ty: Type, comp: *const Compilation, pragma_pac
|
||||
}
|
||||
}
|
||||
|
||||
fn computeLayout(ty: Type, comp: *const Compilation) TypeLayout {
|
||||
if (ty.getRecord()) |rec| {
|
||||
const requested = BITS_PER_BYTE * (ty.requestedAlignment(comp) orelse 0);
|
||||
return .{
|
||||
.size_bits = rec.type_layout.size_bits,
|
||||
.pointer_alignment_bits = @max(requested, rec.type_layout.pointer_alignment_bits),
|
||||
.field_alignment_bits = @max(requested, rec.type_layout.field_alignment_bits),
|
||||
.required_alignment_bits = rec.type_layout.required_alignment_bits,
|
||||
};
|
||||
} else {
|
||||
const type_align = ty.alignof(comp) * BITS_PER_BYTE;
|
||||
return .{
|
||||
.size_bits = ty.bitSizeof(comp) orelse 0,
|
||||
.pointer_alignment_bits = type_align,
|
||||
.field_alignment_bits = type_align,
|
||||
.required_alignment_bits = BITS_PER_BYTE,
|
||||
};
|
||||
fn computeLayout(qt: QualType, comp: *const Compilation) RecordLayout {
|
||||
switch (qt.base(comp).type) {
|
||||
.@"struct", .@"union" => |record| {
|
||||
const requested = BITS_PER_BYTE * (qt.requestedAlignment(comp) orelse 0);
|
||||
return .{
|
||||
.size_bits = record.layout.?.size_bits,
|
||||
.pointer_alignment_bits = @max(requested, record.layout.?.pointer_alignment_bits),
|
||||
.field_alignment_bits = @max(requested, record.layout.?.field_alignment_bits),
|
||||
.required_alignment_bits = record.layout.?.required_alignment_bits,
|
||||
};
|
||||
},
|
||||
else => {
|
||||
const type_align = qt.alignof(comp) * BITS_PER_BYTE;
|
||||
return .{
|
||||
.size_bits = qt.bitSizeofOrNull(comp) orelse 0,
|
||||
.pointer_alignment_bits = type_align,
|
||||
.field_alignment_bits = type_align,
|
||||
.required_alignment_bits = BITS_PER_BYTE,
|
||||
};
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
387
lib/compiler/aro/aro/target.zig
vendored
387
lib/compiler/aro/aro/target.zig
vendored
@ -1,15 +1,18 @@
|
||||
const std = @import("std");
|
||||
|
||||
const backend = @import("../backend.zig");
|
||||
|
||||
const LangOpts = @import("LangOpts.zig");
|
||||
const Type = @import("Type.zig");
|
||||
const TargetSet = @import("Builtins/Properties.zig").TargetSet;
|
||||
const QualType = @import("TypeStore.zig").QualType;
|
||||
|
||||
/// intmax_t for this target
|
||||
pub fn intMaxType(target: std.Target) Type {
|
||||
pub fn intMaxType(target: std.Target) QualType {
|
||||
switch (target.cpu.arch) {
|
||||
.aarch64,
|
||||
.aarch64_be,
|
||||
.sparc64,
|
||||
=> if (target.os.tag != .openbsd) return .{ .specifier = .long },
|
||||
=> if (target.os.tag != .openbsd) return .long,
|
||||
|
||||
.bpfel,
|
||||
.bpfeb,
|
||||
@ -19,28 +22,28 @@ pub fn intMaxType(target: std.Target) Type {
|
||||
.powerpc64,
|
||||
.powerpc64le,
|
||||
.ve,
|
||||
=> return .{ .specifier = .long },
|
||||
=> return .long,
|
||||
|
||||
.x86_64 => switch (target.os.tag) {
|
||||
.windows, .openbsd => {},
|
||||
else => switch (target.abi) {
|
||||
.gnux32, .muslx32 => {},
|
||||
else => return .{ .specifier = .long },
|
||||
else => return .long,
|
||||
},
|
||||
},
|
||||
|
||||
else => {},
|
||||
}
|
||||
return .{ .specifier = .long_long };
|
||||
return .long_long;
|
||||
}
|
||||
|
||||
/// intptr_t for this target
|
||||
pub fn intPtrType(target: std.Target) Type {
|
||||
if (target.os.tag == .haiku) return .{ .specifier = .long };
|
||||
pub fn intPtrType(target: std.Target) QualType {
|
||||
if (target.os.tag == .haiku) return .long;
|
||||
|
||||
switch (target.cpu.arch) {
|
||||
.aarch64, .aarch64_be => switch (target.os.tag) {
|
||||
.windows => return .{ .specifier = .long_long },
|
||||
.windows => return .long_long,
|
||||
else => {},
|
||||
},
|
||||
|
||||
@ -55,28 +58,28 @@ pub fn intPtrType(target: std.Target) Type {
|
||||
.spirv32,
|
||||
.arc,
|
||||
.avr,
|
||||
=> return .{ .specifier = .int },
|
||||
=> return .int,
|
||||
|
||||
.sparc => switch (target.os.tag) {
|
||||
.netbsd, .openbsd => {},
|
||||
else => return .{ .specifier = .int },
|
||||
else => return .int,
|
||||
},
|
||||
|
||||
.powerpc, .powerpcle => switch (target.os.tag) {
|
||||
.linux, .freebsd, .netbsd => return .{ .specifier = .int },
|
||||
.linux, .freebsd, .netbsd => return .int,
|
||||
else => {},
|
||||
},
|
||||
|
||||
// 32-bit x86 Darwin, OpenBSD, and RTEMS use long (the default); others use int
|
||||
.x86 => switch (target.os.tag) {
|
||||
.openbsd, .rtems => {},
|
||||
else => if (!target.os.tag.isDarwin()) return .{ .specifier = .int },
|
||||
else => if (!target.os.tag.isDarwin()) return .int,
|
||||
},
|
||||
|
||||
.x86_64 => switch (target.os.tag) {
|
||||
.windows => return .{ .specifier = .long_long },
|
||||
.windows => return .long_long,
|
||||
else => switch (target.abi) {
|
||||
.gnux32, .muslx32 => return .{ .specifier = .int },
|
||||
.gnux32, .muslx32 => return .int,
|
||||
else => {},
|
||||
},
|
||||
},
|
||||
@ -84,29 +87,29 @@ pub fn intPtrType(target: std.Target) Type {
|
||||
else => {},
|
||||
}
|
||||
|
||||
return .{ .specifier = .long };
|
||||
return .long;
|
||||
}
|
||||
|
||||
/// int16_t for this target
|
||||
pub fn int16Type(target: std.Target) Type {
|
||||
pub fn int16Type(target: std.Target) QualType {
|
||||
return switch (target.cpu.arch) {
|
||||
.avr => .{ .specifier = .int },
|
||||
else => .{ .specifier = .short },
|
||||
.avr => .int,
|
||||
else => .short,
|
||||
};
|
||||
}
|
||||
|
||||
/// sig_atomic_t for this target
|
||||
pub fn sigAtomicType(target: std.Target) Type {
|
||||
if (target.cpu.arch.isWasm()) return .{ .specifier = .long };
|
||||
pub fn sigAtomicType(target: std.Target) QualType {
|
||||
if (target.cpu.arch.isWasm()) return .long;
|
||||
return switch (target.cpu.arch) {
|
||||
.avr => .{ .specifier = .schar },
|
||||
.msp430 => .{ .specifier = .long },
|
||||
else => .{ .specifier = .int },
|
||||
.avr => .schar,
|
||||
.msp430 => .long,
|
||||
else => .int,
|
||||
};
|
||||
}
|
||||
|
||||
/// int64_t for this target
|
||||
pub fn int64Type(target: std.Target) Type {
|
||||
pub fn int64Type(target: std.Target) QualType {
|
||||
switch (target.cpu.arch) {
|
||||
.loongarch64,
|
||||
.ve,
|
||||
@ -116,20 +119,20 @@ pub fn int64Type(target: std.Target) Type {
|
||||
.powerpc64le,
|
||||
.bpfel,
|
||||
.bpfeb,
|
||||
=> return .{ .specifier = .long },
|
||||
=> return .long,
|
||||
|
||||
.sparc64 => return intMaxType(target),
|
||||
|
||||
.x86, .x86_64 => if (!target.os.tag.isDarwin()) return intMaxType(target),
|
||||
.aarch64, .aarch64_be => if (!target.os.tag.isDarwin() and target.os.tag != .openbsd and target.os.tag != .windows) return .{ .specifier = .long },
|
||||
.aarch64, .aarch64_be => if (!target.os.tag.isDarwin() and target.os.tag != .openbsd and target.os.tag != .windows) return .long,
|
||||
else => {},
|
||||
}
|
||||
return .{ .specifier = .long_long };
|
||||
return .long_long;
|
||||
}
|
||||
|
||||
pub fn float80Type(target: std.Target) ?Type {
|
||||
pub fn float80Type(target: std.Target) ?QualType {
|
||||
switch (target.cpu.arch) {
|
||||
.x86, .x86_64 => return .{ .specifier = .long_double },
|
||||
.x86, .x86_64 => return .long_double,
|
||||
else => {},
|
||||
}
|
||||
return null;
|
||||
@ -165,7 +168,7 @@ pub fn ignoreNonZeroSizedBitfieldTypeAlignment(target: std.Target) bool {
|
||||
switch (target.cpu.arch) {
|
||||
.avr => return true,
|
||||
.arm => {
|
||||
if (target.cpu.has(.arm, .has_v7)) {
|
||||
if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
|
||||
switch (target.os.tag) {
|
||||
.ios => return true,
|
||||
else => return false,
|
||||
@ -188,7 +191,7 @@ pub fn minZeroWidthBitfieldAlignment(target: std.Target) ?u29 {
|
||||
switch (target.cpu.arch) {
|
||||
.avr => return 8,
|
||||
.arm => {
|
||||
if (target.cpu.has(.arm, .has_v7)) {
|
||||
if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
|
||||
switch (target.os.tag) {
|
||||
.ios => return 32,
|
||||
else => return null,
|
||||
@ -206,7 +209,7 @@ pub fn unnamedFieldAffectsAlignment(target: std.Target) bool {
|
||||
return true;
|
||||
},
|
||||
.armeb => {
|
||||
if (target.cpu.has(.arm, .has_v7)) {
|
||||
if (std.Target.arm.featureSetHas(target.cpu.features, .has_v7)) {
|
||||
if (std.Target.Abi.default(target.cpu.arch, target.os.tag) == .eabi) return true;
|
||||
}
|
||||
},
|
||||
@ -233,7 +236,7 @@ pub fn defaultAlignment(target: std.Target) u29 {
|
||||
switch (target.cpu.arch) {
|
||||
.avr => return 1,
|
||||
.arm => if (target.abi.isAndroid() or target.os.tag == .ios) return 16 else return 8,
|
||||
.sparc => if (target.cpu.has(.sparc, .v9)) return 16 else return 8,
|
||||
.sparc => if (std.Target.sparc.featureSetHas(target.cpu.features, .v9)) return 16 else return 8,
|
||||
.mips, .mipsel => switch (target.abi) {
|
||||
.none, .gnuabi64 => return 16,
|
||||
else => return 8,
|
||||
@ -245,7 +248,8 @@ pub fn defaultAlignment(target: std.Target) u29 {
|
||||
pub fn systemCompiler(target: std.Target) LangOpts.Compiler {
|
||||
// Android is linux but not gcc, so these checks go first
|
||||
// the rest for documentation as fn returns .clang
|
||||
if (target.abi.isAndroid() or
|
||||
if (target.os.tag.isDarwin() or
|
||||
target.abi.isAndroid() or
|
||||
target.os.tag.isBSD() or
|
||||
target.os.tag == .fuchsia or
|
||||
target.os.tag == .solaris or
|
||||
@ -271,7 +275,7 @@ pub fn systemCompiler(target: std.Target) LangOpts.Compiler {
|
||||
pub fn hasFloat128(target: std.Target) bool {
|
||||
if (target.cpu.arch.isWasm()) return true;
|
||||
if (target.os.tag.isDarwin()) return false;
|
||||
if (target.cpu.arch.isPowerPC()) return target.cpu.has(.powerpc, .float128);
|
||||
if (target.cpu.arch.isPowerPC()) return std.Target.powerpc.featureSetHas(target.cpu.features, .float128);
|
||||
return switch (target.os.tag) {
|
||||
.dragonfly,
|
||||
.haiku,
|
||||
@ -339,7 +343,7 @@ pub const FPSemantics = enum {
|
||||
.spirv32,
|
||||
.spirv64,
|
||||
=> return .IEEEHalf,
|
||||
.x86, .x86_64 => if (target.cpu.has(.x86, .sse2)) return .IEEEHalf,
|
||||
.x86, .x86_64 => if (std.Target.x86.featureSetHas(target.cpu.features, .sse2)) return .IEEEHalf,
|
||||
else => {},
|
||||
}
|
||||
return null;
|
||||
@ -374,6 +378,10 @@ pub fn isCygwinMinGW(target: std.Target) bool {
|
||||
return target.os.tag == .windows and (target.abi == .gnu or target.abi == .cygnus);
|
||||
}
|
||||
|
||||
pub fn isPS(target: std.Target) bool {
|
||||
return (target.os.tag == .ps4 or target.os.tag == .ps5) and target.cpu.arch == .x86_64;
|
||||
}
|
||||
|
||||
pub fn builtinEnabled(target: std.Target, enabled_for: TargetSet) bool {
|
||||
var it = enabled_for.iterator();
|
||||
while (it.next()) |val| {
|
||||
@ -404,7 +412,7 @@ pub fn defaultFpEvalMethod(target: std.Target) LangOpts.FPEvalMethod {
|
||||
return .double;
|
||||
}
|
||||
}
|
||||
if (target.cpu.has(.x86, .sse)) {
|
||||
if (std.Target.x86.featureSetHas(target.cpu.features, .sse)) {
|
||||
return .source;
|
||||
}
|
||||
return .extended;
|
||||
@ -497,6 +505,8 @@ pub fn get32BitArchVariant(target: std.Target) ?std.Target {
|
||||
.spirv32,
|
||||
.loongarch32,
|
||||
.xtensa,
|
||||
.propeller,
|
||||
.or1k,
|
||||
=> {}, // Already 32 bit
|
||||
|
||||
.aarch64 => copy.cpu.arch = .arm,
|
||||
@ -530,6 +540,8 @@ pub fn get64BitArchVariant(target: std.Target) ?std.Target {
|
||||
.msp430,
|
||||
.xcore,
|
||||
.xtensa,
|
||||
.propeller,
|
||||
.or1k,
|
||||
=> return null,
|
||||
|
||||
.aarch64,
|
||||
@ -621,11 +633,14 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
|
||||
.nvptx64 => "nvptx64",
|
||||
.spirv32 => "spirv32",
|
||||
.spirv64 => "spirv64",
|
||||
.kalimba => "kalimba",
|
||||
.lanai => "lanai",
|
||||
.wasm32 => "wasm32",
|
||||
.wasm64 => "wasm64",
|
||||
.ve => "ve",
|
||||
// Note: propeller1, kalimba and or1k are not supported in LLVM; this is the Zig arch name
|
||||
.kalimba => "kalimba",
|
||||
.propeller => "propeller",
|
||||
.or1k => "or1k",
|
||||
};
|
||||
writer.writeAll(llvm_arch) catch unreachable;
|
||||
writer.writeByte('-') catch unreachable;
|
||||
@ -721,64 +736,262 @@ pub fn toLLVMTriple(target: std.Target, buf: []u8) []const u8 {
|
||||
return writer.buffered();
|
||||
}
|
||||
|
||||
pub const DefaultPIStatus = enum { yes, no, depends_on_linker };
|
||||
|
||||
pub fn isPIEDefault(target: std.Target) DefaultPIStatus {
|
||||
return switch (target.os.tag) {
|
||||
.aix,
|
||||
.haiku,
|
||||
|
||||
.macos,
|
||||
.ios,
|
||||
.tvos,
|
||||
.watchos,
|
||||
.visionos,
|
||||
.driverkit,
|
||||
|
||||
.dragonfly,
|
||||
.netbsd,
|
||||
.freebsd,
|
||||
.solaris,
|
||||
|
||||
.cuda,
|
||||
.amdhsa,
|
||||
.amdpal,
|
||||
.mesa3d,
|
||||
|
||||
.ps4,
|
||||
.ps5,
|
||||
|
||||
.hurd,
|
||||
.zos,
|
||||
=> .no,
|
||||
|
||||
.openbsd,
|
||||
.fuchsia,
|
||||
=> .yes,
|
||||
|
||||
.linux => {
|
||||
if (target.abi == .ohos)
|
||||
return .yes;
|
||||
|
||||
switch (target.cpu.arch) {
|
||||
.ve => return .no,
|
||||
else => return if (target.os.tag == .linux or target.abi.isAndroid() or target.abi.isMusl()) .yes else .no,
|
||||
}
|
||||
},
|
||||
|
||||
.windows => {
|
||||
if (target.isMinGW())
|
||||
return .no;
|
||||
|
||||
if (target.abi == .itanium)
|
||||
return if (target.cpu.arch == .x86_64) .yes else .no;
|
||||
|
||||
if (target.abi == .msvc or target.abi == .none)
|
||||
return .depends_on_linker;
|
||||
|
||||
return .no;
|
||||
},
|
||||
|
||||
else => {
|
||||
switch (target.cpu.arch) {
|
||||
.hexagon => {
|
||||
// CLANG_DEFAULT_PIE_ON_LINUX
|
||||
return if (target.os.tag == .linux or target.abi.isAndroid() or target.abi.isMusl()) .yes else .no;
|
||||
},
|
||||
|
||||
else => return .no,
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isPICdefault(target: std.Target) DefaultPIStatus {
|
||||
return switch (target.os.tag) {
|
||||
.aix,
|
||||
.haiku,
|
||||
|
||||
.macos,
|
||||
.ios,
|
||||
.tvos,
|
||||
.watchos,
|
||||
.visionos,
|
||||
.driverkit,
|
||||
|
||||
.amdhsa,
|
||||
.amdpal,
|
||||
.mesa3d,
|
||||
|
||||
.ps4,
|
||||
.ps5,
|
||||
=> .yes,
|
||||
|
||||
.fuchsia,
|
||||
.cuda,
|
||||
.zos,
|
||||
=> .no,
|
||||
|
||||
.dragonfly,
|
||||
.openbsd,
|
||||
.netbsd,
|
||||
.freebsd,
|
||||
.solaris,
|
||||
.hurd,
|
||||
=> {
|
||||
return switch (target.cpu.arch) {
|
||||
.mips64, .mips64el => .yes,
|
||||
else => .no,
|
||||
};
|
||||
},
|
||||
|
||||
.linux => {
|
||||
if (target.abi == .ohos)
|
||||
return .no;
|
||||
|
||||
return switch (target.cpu.arch) {
|
||||
.mips64, .mips64el => .yes,
|
||||
else => .no,
|
||||
};
|
||||
},
|
||||
|
||||
.windows => {
|
||||
if (target.isMinGW())
|
||||
return if (target.cpu.arch == .x86_64 or target.cpu.arch == .aarch64) .yes else .no;
|
||||
|
||||
if (target.abi == .itanium)
|
||||
return if (target.cpu.arch == .x86_64) .yes else .no;
|
||||
|
||||
if (target.abi == .msvc or target.abi == .none)
|
||||
return .depends_on_linker;
|
||||
|
||||
if (target.ofmt == .macho)
|
||||
return .yes;
|
||||
|
||||
return switch (target.cpu.arch) {
|
||||
.x86_64, .mips64, .mips64el => .yes,
|
||||
else => .no,
|
||||
};
|
||||
},
|
||||
|
||||
else => {
|
||||
if (target.ofmt == .macho)
|
||||
return .yes;
|
||||
|
||||
return switch (target.cpu.arch) {
|
||||
.mips64, .mips64el => .yes,
|
||||
else => .no,
|
||||
};
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
pub fn isPICDefaultForced(target: std.Target) DefaultPIStatus {
|
||||
return switch (target.os.tag) {
|
||||
.aix, .amdhsa, .amdpal, .mesa3d => .yes,
|
||||
|
||||
.haiku,
|
||||
.dragonfly,
|
||||
.openbsd,
|
||||
.netbsd,
|
||||
.freebsd,
|
||||
.solaris,
|
||||
.cuda,
|
||||
.ps4,
|
||||
.ps5,
|
||||
.hurd,
|
||||
.linux,
|
||||
.fuchsia,
|
||||
.zos,
|
||||
=> .no,
|
||||
|
||||
.windows => {
|
||||
if (target.isMinGW())
|
||||
return .yes;
|
||||
|
||||
if (target.abi == .itanium)
|
||||
return if (target.cpu.arch == .x86_64) .yes else .no;
|
||||
|
||||
// if (bfd) return target.cpu.arch == .x86_64 else target.cpu.arch == .x86_64 or target.cpu.arch == .aarch64;
|
||||
if (target.abi == .msvc or target.abi == .none)
|
||||
return .depends_on_linker;
|
||||
|
||||
if (target.ofmt == .macho)
|
||||
return if (target.cpu.arch == .aarch64 or target.cpu.arch == .x86_64) .yes else .no;
|
||||
|
||||
return if (target.cpu.arch == .x86_64) .yes else .no;
|
||||
},
|
||||
|
||||
.macos,
|
||||
.ios,
|
||||
.tvos,
|
||||
.watchos,
|
||||
.visionos,
|
||||
.driverkit,
|
||||
=> if (target.cpu.arch == .x86_64 or target.cpu.arch == .aarch64) .yes else .no,
|
||||
|
||||
else => {
|
||||
return switch (target.cpu.arch) {
|
||||
.hexagon,
|
||||
.lanai,
|
||||
.avr,
|
||||
.riscv32,
|
||||
.riscv64,
|
||||
.csky,
|
||||
.xcore,
|
||||
.wasm32,
|
||||
.wasm64,
|
||||
.ve,
|
||||
.spirv32,
|
||||
.spirv64,
|
||||
=> .no,
|
||||
|
||||
.msp430 => .yes,
|
||||
|
||||
else => {
|
||||
if (target.ofmt == .macho)
|
||||
return if (target.cpu.arch == .aarch64 or target.cpu.arch == .x86_64) .yes else .no;
|
||||
return .no;
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
test "alignment functions - smoke test" {
|
||||
var target: std.Target = undefined;
|
||||
const x86 = std.Target.Cpu.Arch.x86_64;
|
||||
target.os = std.Target.Os.Tag.defaultVersionRange(.linux, x86, .none);
|
||||
target.cpu = std.Target.Cpu.baseline(x86, target.os);
|
||||
target.abi = std.Target.Abi.default(x86, target.os.tag);
|
||||
const linux: std.Target.Os = .{ .tag = .linux, .version_range = .{ .none = {} } };
|
||||
const x86_64_target: std.Target = .{
|
||||
.abi = std.Target.Abi.default(.x86_64, linux.tag),
|
||||
.cpu = std.Target.Cpu.Model.generic(.x86_64).toCpu(.x86_64),
|
||||
.os = linux,
|
||||
.ofmt = .elf,
|
||||
};
|
||||
|
||||
try std.testing.expect(isTlsSupported(target));
|
||||
try std.testing.expect(!ignoreNonZeroSizedBitfieldTypeAlignment(target));
|
||||
try std.testing.expect(minZeroWidthBitfieldAlignment(target) == null);
|
||||
try std.testing.expect(!unnamedFieldAffectsAlignment(target));
|
||||
try std.testing.expect(defaultAlignment(target) == 16);
|
||||
try std.testing.expect(!packAllEnums(target));
|
||||
try std.testing.expect(systemCompiler(target) == .gcc);
|
||||
|
||||
const arm = std.Target.Cpu.Arch.arm;
|
||||
target.os = std.Target.Os.Tag.defaultVersionRange(.ios, arm, .none);
|
||||
target.cpu = std.Target.Cpu.baseline(arm, target.os);
|
||||
target.abi = std.Target.Abi.default(arm, target.os.tag);
|
||||
|
||||
try std.testing.expect(!isTlsSupported(target));
|
||||
try std.testing.expect(ignoreNonZeroSizedBitfieldTypeAlignment(target));
|
||||
try std.testing.expectEqual(@as(?u29, 32), minZeroWidthBitfieldAlignment(target));
|
||||
try std.testing.expect(unnamedFieldAffectsAlignment(target));
|
||||
try std.testing.expect(defaultAlignment(target) == 16);
|
||||
try std.testing.expect(!packAllEnums(target));
|
||||
try std.testing.expect(systemCompiler(target) == .clang);
|
||||
try std.testing.expect(isTlsSupported(x86_64_target));
|
||||
try std.testing.expect(!ignoreNonZeroSizedBitfieldTypeAlignment(x86_64_target));
|
||||
try std.testing.expect(minZeroWidthBitfieldAlignment(x86_64_target) == null);
|
||||
try std.testing.expect(!unnamedFieldAffectsAlignment(x86_64_target));
|
||||
try std.testing.expect(defaultAlignment(x86_64_target) == 16);
|
||||
try std.testing.expect(!packAllEnums(x86_64_target));
|
||||
try std.testing.expect(systemCompiler(x86_64_target) == .gcc);
|
||||
}
|
||||
|
||||
test "target size/align tests" {
|
||||
var comp: @import("Compilation.zig") = undefined;
|
||||
|
||||
const x86 = std.Target.Cpu.Arch.x86;
|
||||
comp.target.cpu.arch = x86;
|
||||
comp.target.cpu.model = &std.Target.x86.cpu.i586;
|
||||
comp.target.os = std.Target.Os.Tag.defaultVersionRange(.linux, x86, .none);
|
||||
comp.target.abi = std.Target.Abi.gnu;
|
||||
|
||||
const tt: Type = .{
|
||||
.specifier = .long_long,
|
||||
const linux: std.Target.Os = .{ .tag = .linux, .version_range = .{ .none = {} } };
|
||||
const x86_target: std.Target = .{
|
||||
.abi = std.Target.Abi.default(.x86, linux.tag),
|
||||
.cpu = std.Target.Cpu.Model.generic(.x86).toCpu(.x86),
|
||||
.os = linux,
|
||||
.ofmt = .elf,
|
||||
};
|
||||
comp.target = x86_target;
|
||||
|
||||
try std.testing.expectEqual(@as(u64, 8), tt.sizeof(&comp).?);
|
||||
const tt: QualType = .long_long;
|
||||
|
||||
try std.testing.expectEqual(@as(u64, 8), tt.sizeof(&comp));
|
||||
try std.testing.expectEqual(@as(u64, 4), tt.alignof(&comp));
|
||||
|
||||
const arm = std.Target.Cpu.Arch.arm;
|
||||
comp.target.cpu = std.Target.Cpu.Model.toCpu(&std.Target.arm.cpu.cortex_r4, arm);
|
||||
comp.target.os = std.Target.Os.Tag.defaultVersionRange(.ios, arm, .none);
|
||||
comp.target.abi = std.Target.Abi.none;
|
||||
|
||||
const ct: Type = .{
|
||||
.specifier = .char,
|
||||
};
|
||||
|
||||
try std.testing.expectEqual(true, comp.target.cpu.has(.arm, .has_v7));
|
||||
try std.testing.expectEqual(@as(u64, 1), ct.sizeof(&comp).?);
|
||||
try std.testing.expectEqual(@as(u64, 1), ct.alignof(&comp));
|
||||
try std.testing.expectEqual(true, ignoreNonZeroSizedBitfieldTypeAlignment(comp.target));
|
||||
}
|
||||
|
||||
/// The canonical integer representation of nullptr_t.
|
||||
|
||||
371
lib/compiler/aro/aro/text_literal.zig
vendored
371
lib/compiler/aro/aro/text_literal.zig
vendored
@ -1,11 +1,13 @@
|
||||
//! Parsing and classification of string and character literals
|
||||
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
|
||||
const Compilation = @import("Compilation.zig");
|
||||
const Type = @import("Type.zig");
|
||||
const Diagnostics = @import("Diagnostics.zig");
|
||||
const Tokenizer = @import("Tokenizer.zig");
|
||||
const mem = std.mem;
|
||||
const QualType = @import("TypeStore.zig").QualType;
|
||||
const Source = @import("Source.zig");
|
||||
|
||||
pub const Item = union(enum) {
|
||||
/// decoded hex or character escape
|
||||
@ -18,11 +20,6 @@ pub const Item = union(enum) {
|
||||
utf8_text: std.unicode.Utf8View,
|
||||
};
|
||||
|
||||
const CharDiagnostic = struct {
|
||||
tag: Diagnostics.Tag,
|
||||
extra: Diagnostics.Message.Extra,
|
||||
};
|
||||
|
||||
pub const Kind = enum {
|
||||
char,
|
||||
wide,
|
||||
@ -91,13 +88,13 @@ pub const Kind = enum {
|
||||
}
|
||||
|
||||
/// The C type of a character literal of this kind
|
||||
pub fn charLiteralType(kind: Kind, comp: *const Compilation) Type {
|
||||
pub fn charLiteralType(kind: Kind, comp: *const Compilation) QualType {
|
||||
return switch (kind) {
|
||||
.char => Type.int,
|
||||
.wide => comp.types.wchar,
|
||||
.utf_8 => .{ .specifier = .uchar },
|
||||
.utf_16 => comp.types.uint_least16_t,
|
||||
.utf_32 => comp.types.uint_least32_t,
|
||||
.char => .int,
|
||||
.wide => comp.type_store.wchar,
|
||||
.utf_8 => .uchar,
|
||||
.utf_16 => comp.type_store.uint_least16_t,
|
||||
.utf_32 => comp.type_store.uint_least32_t,
|
||||
.unterminated => unreachable,
|
||||
};
|
||||
}
|
||||
@ -120,7 +117,7 @@ pub const Kind = enum {
|
||||
pub fn charUnitSize(kind: Kind, comp: *const Compilation) Compilation.CharUnitSize {
|
||||
return switch (kind) {
|
||||
.char => .@"1",
|
||||
.wide => switch (comp.types.wchar.sizeof(comp).?) {
|
||||
.wide => switch (comp.type_store.wchar.sizeof(comp)) {
|
||||
2 => .@"2",
|
||||
4 => .@"4",
|
||||
else => unreachable,
|
||||
@ -140,37 +137,55 @@ pub const Kind = enum {
|
||||
}
|
||||
|
||||
/// The C type of an element of a string literal of this kind
|
||||
pub fn elementType(kind: Kind, comp: *const Compilation) Type {
|
||||
pub fn elementType(kind: Kind, comp: *const Compilation) QualType {
|
||||
return switch (kind) {
|
||||
.unterminated => unreachable,
|
||||
.char => .{ .specifier = .char },
|
||||
.utf_8 => if (comp.langopts.hasChar8_T()) .{ .specifier = .uchar } else .{ .specifier = .char },
|
||||
.char => .char,
|
||||
.utf_8 => if (comp.langopts.hasChar8_T()) .uchar else .char,
|
||||
else => kind.charLiteralType(comp),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
pub const Ascii = struct {
|
||||
val: u7,
|
||||
|
||||
pub fn init(val: anytype) Ascii {
|
||||
return .{ .val = @intCast(val) };
|
||||
}
|
||||
|
||||
pub fn format(ctx: Ascii, w: *std.Io.Writer, fmt_str: []const u8) !usize {
|
||||
const template = "{c}";
|
||||
const i = std.mem.indexOf(u8, fmt_str, template).?;
|
||||
try w.writeAll(fmt_str[0..i]);
|
||||
|
||||
if (std.ascii.isPrint(ctx.val)) {
|
||||
try w.writeByte(ctx.val);
|
||||
} else {
|
||||
try w.print("x{x:0>2}", .{ctx.val});
|
||||
}
|
||||
return i + template.len;
|
||||
}
|
||||
};
|
||||
|
||||
pub const Parser = struct {
|
||||
comp: *const Compilation,
|
||||
literal: []const u8,
|
||||
i: usize = 0,
|
||||
kind: Kind,
|
||||
max_codepoint: u21,
|
||||
loc: Source.Location,
|
||||
/// Offset added to `loc.byte_offset` when emitting an error.
|
||||
offset: u32 = 0,
|
||||
expansion_locs: []const Source.Location,
|
||||
/// We only want to issue a max of 1 error per char literal
|
||||
errored: bool = false,
|
||||
errors_buffer: [4]CharDiagnostic,
|
||||
errors_len: usize,
|
||||
comp: *const Compilation,
|
||||
|
||||
pub fn init(literal: []const u8, kind: Kind, max_codepoint: u21, comp: *const Compilation) Parser {
|
||||
return .{
|
||||
.literal = literal,
|
||||
.comp = comp,
|
||||
.kind = kind,
|
||||
.max_codepoint = max_codepoint,
|
||||
.errors_buffer = undefined,
|
||||
.errors_len = 0,
|
||||
};
|
||||
}
|
||||
/// Makes incorrect encoding always an error.
|
||||
/// Used when concatenating string literals.
|
||||
incorrect_encoding_is_error: bool = false,
|
||||
/// If this is false, do not issue any diagnostics for incorrect character encoding
|
||||
/// Incorrect encoding is allowed if we are unescaping an identifier in the preprocessor
|
||||
diagnose_incorrect_encoding: bool = true,
|
||||
|
||||
fn prefixLen(self: *const Parser) usize {
|
||||
return switch (self.kind) {
|
||||
@ -181,65 +196,204 @@ pub const Parser = struct {
|
||||
};
|
||||
}
|
||||
|
||||
pub fn errors(p: *Parser) []CharDiagnostic {
|
||||
return p.errors_buffer[0..p.errors_len];
|
||||
const Diagnostic = struct {
|
||||
fmt: []const u8,
|
||||
kind: Diagnostics.Message.Kind,
|
||||
opt: ?Diagnostics.Option = null,
|
||||
extension: bool = false,
|
||||
|
||||
pub const illegal_char_encoding_error: Diagnostic = .{
|
||||
.fmt = "illegal character encoding in character literal",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const illegal_char_encoding_warning: Diagnostic = .{
|
||||
.fmt = "illegal character encoding in character literal",
|
||||
.kind = .warning,
|
||||
.opt = .@"invalid-source-encoding",
|
||||
};
|
||||
|
||||
pub const missing_hex_escape: Diagnostic = .{
|
||||
.fmt = "\\{c} used with no following hex digits",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const escape_sequence_overflow: Diagnostic = .{
|
||||
.fmt = "escape sequence out of range",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const incomplete_universal_character: Diagnostic = .{
|
||||
.fmt = "incomplete universal character name",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const invalid_universal_character: Diagnostic = .{
|
||||
.fmt = "invalid universal character",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const char_too_large: Diagnostic = .{
|
||||
.fmt = "character too large for enclosing character literal type",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const ucn_basic_char_error: Diagnostic = .{
|
||||
.fmt = "character '{c}' cannot be specified by a universal character name",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const ucn_basic_char_warning: Diagnostic = .{
|
||||
.fmt = "specifying character '{c}' with a universal character name is incompatible with C standards before C23",
|
||||
.kind = .off,
|
||||
.opt = .@"pre-c23-compat",
|
||||
};
|
||||
|
||||
pub const ucn_control_char_error: Diagnostic = .{
|
||||
.fmt = "universal character name refers to a control character",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const ucn_control_char_warning: Diagnostic = .{
|
||||
.fmt = "universal character name referring to a control character is incompatible with C standards before C23",
|
||||
.kind = .off,
|
||||
.opt = .@"pre-c23-compat",
|
||||
};
|
||||
|
||||
pub const c89_ucn_in_literal: Diagnostic = .{
|
||||
.fmt = "universal character names are only valid in C99 or later",
|
||||
.kind = .warning,
|
||||
.opt = .unicode,
|
||||
};
|
||||
|
||||
const non_standard_escape_char: Diagnostic = .{
|
||||
.fmt = "use of non-standard escape character '\\{c}'",
|
||||
.kind = .off,
|
||||
.extension = true,
|
||||
};
|
||||
|
||||
pub const unknown_escape_sequence: Diagnostic = .{
|
||||
.fmt = "unknown escape sequence '\\{c}'",
|
||||
.kind = .warning,
|
||||
.opt = .@"unknown-escape-sequence",
|
||||
};
|
||||
|
||||
pub const four_char_char_literal: Diagnostic = .{
|
||||
.fmt = "multi-character character constant",
|
||||
.opt = .@"four-char-constants",
|
||||
.kind = .off,
|
||||
};
|
||||
|
||||
pub const multichar_literal_warning: Diagnostic = .{
|
||||
.fmt = "multi-character character constant",
|
||||
.kind = .warning,
|
||||
.opt = .multichar,
|
||||
};
|
||||
|
||||
pub const invalid_multichar_literal: Diagnostic = .{
|
||||
.fmt = "{s} character literals may not contain multiple characters",
|
||||
.kind = .@"error",
|
||||
};
|
||||
|
||||
pub const char_lit_too_wide: Diagnostic = .{
|
||||
.fmt = "character constant too long for its type",
|
||||
.kind = .warning,
|
||||
};
|
||||
|
||||
// pub const wide_multichar_literal: Diagnostic = .{
|
||||
// .fmt = "extraneous characters in character constant ignored",
|
||||
// .kind = .warning,
|
||||
// };
|
||||
};
|
||||
|
||||
pub fn err(p: *Parser, diagnostic: Diagnostic, args: anytype) !void {
|
||||
defer p.offset = 0;
|
||||
if (p.errored) return;
|
||||
defer p.errored = true;
|
||||
try p.warn(diagnostic, args);
|
||||
}
|
||||
|
||||
pub fn err(self: *Parser, tag: Diagnostics.Tag, extra: Diagnostics.Message.Extra) void {
|
||||
if (self.errored) return;
|
||||
self.errored = true;
|
||||
const diagnostic: CharDiagnostic = .{ .tag = tag, .extra = extra };
|
||||
if (self.errors_len == self.errors_buffer.len) {
|
||||
self.errors_buffer[self.errors_buffer.len - 1] = diagnostic;
|
||||
} else {
|
||||
self.errors_buffer[self.errors_len] = diagnostic;
|
||||
self.errors_len += 1;
|
||||
pub fn warn(p: *Parser, diagnostic: Diagnostic, args: anytype) Compilation.Error!void {
|
||||
defer p.offset = 0;
|
||||
if (p.errored) return;
|
||||
if (p.comp.diagnostics.effectiveKind(diagnostic) == .off) return;
|
||||
|
||||
var sf = std.heap.stackFallback(1024, p.comp.gpa);
|
||||
var allocating: std.Io.Writer.Allocating = .init(sf.get());
|
||||
defer allocating.deinit();
|
||||
|
||||
formatArgs(&allocating.writer, diagnostic.fmt, args) catch return error.OutOfMemory;
|
||||
|
||||
var offset_location = p.loc;
|
||||
offset_location.byte_offset += p.offset;
|
||||
try p.comp.diagnostics.addWithLocation(p.comp, .{
|
||||
.kind = diagnostic.kind,
|
||||
.text = allocating.getWritten(),
|
||||
.opt = diagnostic.opt,
|
||||
.extension = diagnostic.extension,
|
||||
.location = offset_location.expand(p.comp),
|
||||
}, p.expansion_locs, true);
|
||||
}
|
||||
|
||||
fn formatArgs(w: *std.Io.Writer, fmt: []const u8, args: anytype) !void {
|
||||
var i: usize = 0;
|
||||
inline for (std.meta.fields(@TypeOf(args))) |arg_info| {
|
||||
const arg = @field(args, arg_info.name);
|
||||
i += switch (@TypeOf(arg)) {
|
||||
[]const u8 => try Diagnostics.formatString(w, fmt[i..], arg),
|
||||
Ascii => try arg.format(w, fmt[i..]),
|
||||
else => switch (@typeInfo(@TypeOf(arg))) {
|
||||
.int, .comptime_int => try Diagnostics.formatInt(w, fmt[i..], arg),
|
||||
.pointer => try Diagnostics.formatString(w, fmt[i..], arg),
|
||||
else => unreachable,
|
||||
},
|
||||
};
|
||||
}
|
||||
try w.writeAll(fmt[i..]);
|
||||
}
|
||||
|
||||
pub fn warn(self: *Parser, tag: Diagnostics.Tag, extra: Diagnostics.Message.Extra) void {
|
||||
if (self.errored) return;
|
||||
if (self.errors_len < self.errors_buffer.len) {
|
||||
self.errors_buffer[self.errors_len] = .{ .tag = tag, .extra = extra };
|
||||
self.errors_len += 1;
|
||||
}
|
||||
}
|
||||
pub fn next(p: *Parser) !?Item {
|
||||
if (p.i >= p.literal.len) return null;
|
||||
|
||||
pub fn next(self: *Parser) ?Item {
|
||||
if (self.i >= self.literal.len) return null;
|
||||
|
||||
const start = self.i;
|
||||
if (self.literal[start] != '\\') {
|
||||
self.i = mem.indexOfScalarPos(u8, self.literal, start + 1, '\\') orelse self.literal.len;
|
||||
const unescaped_slice = self.literal[start..self.i];
|
||||
const start = p.i;
|
||||
if (p.literal[start] != '\\') {
|
||||
p.i = mem.indexOfScalarPos(u8, p.literal, start + 1, '\\') orelse p.literal.len;
|
||||
const unescaped_slice = p.literal[start..p.i];
|
||||
|
||||
const view = std.unicode.Utf8View.init(unescaped_slice) catch {
|
||||
if (self.kind != .char) {
|
||||
self.err(.illegal_char_encoding_error, .{ .none = {} });
|
||||
if (!p.diagnose_incorrect_encoding) {
|
||||
return .{ .improperly_encoded = p.literal[start..p.i] };
|
||||
}
|
||||
if (p.incorrect_encoding_is_error) {
|
||||
try p.warn(.illegal_char_encoding_error, .{});
|
||||
return .{ .improperly_encoded = p.literal[start..p.i] };
|
||||
}
|
||||
if (p.kind != .char) {
|
||||
try p.err(.illegal_char_encoding_error, .{});
|
||||
return null;
|
||||
}
|
||||
self.warn(.illegal_char_encoding_warning, .{ .none = {} });
|
||||
return .{ .improperly_encoded = self.literal[start..self.i] };
|
||||
try p.warn(.illegal_char_encoding_warning, .{});
|
||||
return .{ .improperly_encoded = p.literal[start..p.i] };
|
||||
};
|
||||
return .{ .utf8_text = view };
|
||||
}
|
||||
switch (self.literal[start + 1]) {
|
||||
'u', 'U' => return self.parseUnicodeEscape(),
|
||||
else => return self.parseEscapedChar(),
|
||||
switch (p.literal[start + 1]) {
|
||||
'u', 'U' => return try p.parseUnicodeEscape(),
|
||||
else => return try p.parseEscapedChar(),
|
||||
}
|
||||
}
|
||||
|
||||
fn parseUnicodeEscape(self: *Parser) ?Item {
|
||||
const start = self.i;
|
||||
fn parseUnicodeEscape(p: *Parser) !?Item {
|
||||
const start = p.i;
|
||||
|
||||
std.debug.assert(self.literal[self.i] == '\\');
|
||||
std.debug.assert(p.literal[p.i] == '\\');
|
||||
|
||||
const kind = self.literal[self.i + 1];
|
||||
const kind = p.literal[p.i + 1];
|
||||
std.debug.assert(kind == 'u' or kind == 'U');
|
||||
|
||||
self.i += 2;
|
||||
if (self.i >= self.literal.len or !std.ascii.isHex(self.literal[self.i])) {
|
||||
self.err(.missing_hex_escape, .{ .ascii = @intCast(kind) });
|
||||
p.i += 2;
|
||||
if (p.i >= p.literal.len or !std.ascii.isHex(p.literal[p.i])) {
|
||||
try p.err(.missing_hex_escape, .{Ascii.init(kind)});
|
||||
return null;
|
||||
}
|
||||
const expected_len: usize = if (kind == 'u') 4 else 8;
|
||||
@ -247,66 +401,66 @@ pub const Parser = struct {
|
||||
var count: usize = 0;
|
||||
var val: u32 = 0;
|
||||
|
||||
for (self.literal[self.i..], 0..) |c, i| {
|
||||
for (p.literal[p.i..], 0..) |c, i| {
|
||||
if (i == expected_len) break;
|
||||
|
||||
const char = std.fmt.charToDigit(c, 16) catch {
|
||||
break;
|
||||
};
|
||||
const char = std.fmt.charToDigit(c, 16) catch break;
|
||||
|
||||
val, const overflow = @shlWithOverflow(val, 4);
|
||||
overflowed = overflowed or overflow != 0;
|
||||
val |= char;
|
||||
count += 1;
|
||||
}
|
||||
self.i += expected_len;
|
||||
p.i += expected_len;
|
||||
|
||||
if (overflowed) {
|
||||
self.err(.escape_sequence_overflow, .{ .offset = start + self.prefixLen() });
|
||||
p.offset += @intCast(start + p.prefixLen());
|
||||
try p.err(.escape_sequence_overflow, .{});
|
||||
return null;
|
||||
}
|
||||
|
||||
if (count != expected_len) {
|
||||
self.err(.incomplete_universal_character, .{ .none = {} });
|
||||
try p.err(.incomplete_universal_character, .{});
|
||||
return null;
|
||||
}
|
||||
|
||||
if (val > std.math.maxInt(u21) or !std.unicode.utf8ValidCodepoint(@intCast(val))) {
|
||||
self.err(.invalid_universal_character, .{ .offset = start + self.prefixLen() });
|
||||
p.offset += @intCast(start + p.prefixLen());
|
||||
try p.err(.invalid_universal_character, .{});
|
||||
return null;
|
||||
}
|
||||
|
||||
if (val > self.max_codepoint) {
|
||||
self.err(.char_too_large, .{ .none = {} });
|
||||
if (val > p.max_codepoint) {
|
||||
try p.err(.char_too_large, .{});
|
||||
return null;
|
||||
}
|
||||
|
||||
if (val < 0xA0 and (val != '$' and val != '@' and val != '`')) {
|
||||
const is_error = !self.comp.langopts.standard.atLeast(.c23);
|
||||
const is_error = !p.comp.langopts.standard.atLeast(.c23);
|
||||
if (val >= 0x20 and val <= 0x7F) {
|
||||
if (is_error) {
|
||||
self.err(.ucn_basic_char_error, .{ .ascii = @intCast(val) });
|
||||
} else {
|
||||
self.warn(.ucn_basic_char_warning, .{ .ascii = @intCast(val) });
|
||||
try p.err(.ucn_basic_char_error, .{Ascii.init(val)});
|
||||
} else if (!p.comp.langopts.standard.atLeast(.c23)) {
|
||||
try p.warn(.ucn_basic_char_warning, .{Ascii.init(val)});
|
||||
}
|
||||
} else {
|
||||
if (is_error) {
|
||||
self.err(.ucn_control_char_error, .{ .none = {} });
|
||||
} else {
|
||||
self.warn(.ucn_control_char_warning, .{ .none = {} });
|
||||
try p.err(.ucn_control_char_error, .{});
|
||||
} else if (!p.comp.langopts.standard.atLeast(.c23)) {
|
||||
try p.warn(.ucn_control_char_warning, .{});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.warn(.c89_ucn_in_literal, .{ .none = {} });
|
||||
if (!p.comp.langopts.standard.atLeast(.c99)) try p.warn(.c89_ucn_in_literal, .{});
|
||||
return .{ .codepoint = @intCast(val) };
|
||||
}
|
||||
|
||||
fn parseEscapedChar(self: *Parser) Item {
|
||||
self.i += 1;
|
||||
const c = self.literal[self.i];
|
||||
fn parseEscapedChar(p: *Parser) !Item {
|
||||
p.i += 1;
|
||||
const c = p.literal[p.i];
|
||||
defer if (c != 'x' and (c < '0' or c > '7')) {
|
||||
self.i += 1;
|
||||
p.i += 1;
|
||||
};
|
||||
|
||||
switch (c) {
|
||||
@ -319,36 +473,40 @@ pub const Parser = struct {
|
||||
'a' => return .{ .value = 0x07 },
|
||||
'b' => return .{ .value = 0x08 },
|
||||
'e', 'E' => {
|
||||
self.warn(.non_standard_escape_char, .{ .invalid_escape = .{ .char = c, .offset = @intCast(self.i) } });
|
||||
p.offset += @intCast(p.i);
|
||||
try p.warn(.non_standard_escape_char, .{Ascii.init(c)});
|
||||
return .{ .value = 0x1B };
|
||||
},
|
||||
'(', '{', '[', '%' => {
|
||||
self.warn(.non_standard_escape_char, .{ .invalid_escape = .{ .char = c, .offset = @intCast(self.i) } });
|
||||
p.offset += @intCast(p.i);
|
||||
try p.warn(.non_standard_escape_char, .{Ascii.init(c)});
|
||||
return .{ .value = c };
|
||||
},
|
||||
'f' => return .{ .value = 0x0C },
|
||||
'v' => return .{ .value = 0x0B },
|
||||
'x' => return .{ .value = self.parseNumberEscape(.hex) },
|
||||
'0'...'7' => return .{ .value = self.parseNumberEscape(.octal) },
|
||||
'x' => return .{ .value = try p.parseNumberEscape(.hex) },
|
||||
'0'...'7' => return .{ .value = try p.parseNumberEscape(.octal) },
|
||||
'u', 'U' => unreachable, // handled by parseUnicodeEscape
|
||||
else => {
|
||||
self.warn(.unknown_escape_sequence, .{ .invalid_escape = .{ .char = c, .offset = @intCast(self.i) } });
|
||||
p.offset += @intCast(p.i);
|
||||
try p.warn(.unknown_escape_sequence, .{Ascii.init(c)});
|
||||
return .{ .value = c };
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn parseNumberEscape(self: *Parser, base: EscapeBase) u32 {
|
||||
fn parseNumberEscape(p: *Parser, base: EscapeBase) !u32 {
|
||||
var val: u32 = 0;
|
||||
var count: usize = 0;
|
||||
var overflowed = false;
|
||||
const start = self.i;
|
||||
defer self.i += count;
|
||||
const start = p.i;
|
||||
defer p.i += count;
|
||||
|
||||
const slice = switch (base) {
|
||||
.octal => self.literal[self.i..@min(self.literal.len, self.i + 3)], // max 3 chars
|
||||
.octal => p.literal[p.i..@min(p.literal.len, p.i + 3)], // max 3 chars
|
||||
.hex => blk: {
|
||||
self.i += 1;
|
||||
break :blk self.literal[self.i..]; // skip over 'x'; could have an arbitrary number of chars
|
||||
p.i += 1;
|
||||
break :blk p.literal[p.i..]; // skip over 'x'; could have an arbitrary number of chars
|
||||
},
|
||||
};
|
||||
for (slice) |c| {
|
||||
@ -358,13 +516,14 @@ pub const Parser = struct {
|
||||
val += char;
|
||||
count += 1;
|
||||
}
|
||||
if (overflowed or val > self.kind.maxInt(self.comp)) {
|
||||
self.err(.escape_sequence_overflow, .{ .offset = start + self.prefixLen() });
|
||||
if (overflowed or val > p.kind.maxInt(p.comp)) {
|
||||
p.offset += @intCast(start + p.prefixLen());
|
||||
try p.err(.escape_sequence_overflow, .{});
|
||||
return 0;
|
||||
}
|
||||
if (count == 0) {
|
||||
std.debug.assert(base == .hex);
|
||||
self.err(.missing_hex_escape, .{ .ascii = 'x' });
|
||||
try p.err(.missing_hex_escape, .{Ascii.init('x')});
|
||||
}
|
||||
return val;
|
||||
}
|
||||
|
||||
32
lib/compiler/aro/aro/toolchains/Linux.zig
vendored
32
lib/compiler/aro/aro/toolchains/Linux.zig
vendored
@ -1,12 +1,14 @@
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
|
||||
const system_defaults = @import("system_defaults");
|
||||
|
||||
const Compilation = @import("../Compilation.zig");
|
||||
const GCCDetector = @import("../Driver/GCCDetector.zig");
|
||||
const Toolchain = @import("../Toolchain.zig");
|
||||
const Driver = @import("../Driver.zig");
|
||||
const Distro = @import("../Driver/Distro.zig");
|
||||
const GCCDetector = @import("../Driver/GCCDetector.zig");
|
||||
const target_util = @import("../target.zig");
|
||||
const system_defaults = @import("system_defaults");
|
||||
const Toolchain = @import("../Toolchain.zig");
|
||||
|
||||
const Linux = @This();
|
||||
|
||||
@ -144,7 +146,7 @@ fn getPIE(self: *const Linux, d: *const Driver) bool {
|
||||
fn getStaticPIE(self: *const Linux, d: *Driver) !bool {
|
||||
_ = self;
|
||||
if (d.static_pie and d.pie != null) {
|
||||
try d.err("cannot specify 'nopie' along with 'static-pie'");
|
||||
try d.err("cannot specify 'nopie' along with 'static-pie'", .{});
|
||||
}
|
||||
return d.static_pie;
|
||||
}
|
||||
@ -195,7 +197,7 @@ pub fn buildLinkerArgs(self: *const Linux, tc: *const Toolchain, argv: *std.arra
|
||||
if (target_util.ldEmulationOption(d.comp.target, null)) |emulation| {
|
||||
try argv.appendSlice(&.{ "-m", emulation });
|
||||
} else {
|
||||
try d.err("Unknown target triple");
|
||||
try d.err("Unknown target triple", .{});
|
||||
return;
|
||||
}
|
||||
if (d.comp.target.cpu.arch.isRISCV()) {
|
||||
@ -214,9 +216,9 @@ pub fn buildLinkerArgs(self: *const Linux, tc: *const Toolchain, argv: *std.arra
|
||||
const dynamic_linker = d.comp.target.standardDynamicLinkerPath();
|
||||
// todo: check for --dyld-prefix
|
||||
if (dynamic_linker.get()) |path| {
|
||||
try argv.appendSlice(&.{ "-dynamic-linker", try tc.arena.dupe(u8, path) });
|
||||
try argv.appendSlice(&.{ "-dynamic-linker", try d.comp.arena.dupe(u8, path) });
|
||||
} else {
|
||||
try d.err("Could not find dynamic linker path");
|
||||
try d.err("Could not find dynamic linker path", .{});
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -318,7 +320,7 @@ pub fn buildLinkerArgs(self: *const Linux, tc: *const Toolchain, argv: *std.arra
|
||||
|
||||
fn getMultiarchTriple(target: std.Target) ?[]const u8 {
|
||||
const is_android = target.abi.isAndroid();
|
||||
const is_mips_r6 = target.cpu.has(.mips, .mips32r6);
|
||||
const is_mips_r6 = std.Target.mips.featureSetHas(target.cpu.features, .mips32r6);
|
||||
return switch (target.cpu.arch) {
|
||||
.arm, .thumb => if (is_android) "arm-linux-androideabi" else if (target.abi == .gnueabihf) "arm-linux-gnueabihf" else "arm-linux-gnueabi",
|
||||
.armeb, .thumbeb => if (target.abi == .gnueabihf) "armeb-linux-gnueabihf" else "armeb-linux-gnueabi",
|
||||
@ -372,13 +374,13 @@ pub fn defineSystemIncludes(self: *const Linux, tc: *const Toolchain) !void {
|
||||
// musl prefers /usr/include before builtin includes, so musl targets will add builtins
|
||||
// at the end of this function (unless disabled with nostdlibinc)
|
||||
if (!tc.driver.nobuiltininc and (!target.abi.isMusl() or tc.driver.nostdlibinc)) {
|
||||
try comp.addBuiltinIncludeDir(tc.driver.aro_name);
|
||||
try comp.addBuiltinIncludeDir(tc.driver.aro_name, tc.driver.resource_dir);
|
||||
}
|
||||
|
||||
if (tc.driver.nostdlibinc) return;
|
||||
|
||||
const sysroot = tc.getSysroot();
|
||||
const local_include = try std.fmt.allocPrint(comp.gpa, "{s}{s}", .{ sysroot, "/usr/local/include" });
|
||||
const local_include = try std.fs.path.join(comp.gpa, &.{ sysroot, "/usr/local/include" });
|
||||
defer comp.gpa.free(local_include);
|
||||
try comp.addSystemIncludeDir(local_include);
|
||||
|
||||
@ -389,7 +391,7 @@ pub fn defineSystemIncludes(self: *const Linux, tc: *const Toolchain) !void {
|
||||
}
|
||||
|
||||
if (getMultiarchTriple(target)) |triple| {
|
||||
const joined = try std.fs.path.join(comp.gpa, &.{ sysroot, "usr", "include", triple });
|
||||
const joined = try std.fs.path.join(comp.gpa, &.{ sysroot, "/usr/include", triple });
|
||||
defer comp.gpa.free(joined);
|
||||
if (tc.filesystem.exists(joined)) {
|
||||
try comp.addSystemIncludeDir(joined);
|
||||
@ -403,7 +405,7 @@ pub fn defineSystemIncludes(self: *const Linux, tc: *const Toolchain) !void {
|
||||
|
||||
std.debug.assert(!tc.driver.nostdlibinc);
|
||||
if (!tc.driver.nobuiltininc and target.abi.isMusl()) {
|
||||
try comp.addBuiltinIncludeDir(tc.driver.aro_name);
|
||||
try comp.addBuiltinIncludeDir(tc.driver.aro_name, tc.driver.resource_dir);
|
||||
}
|
||||
}
|
||||
|
||||
@ -414,7 +416,7 @@ test Linux {
|
||||
defer arena_instance.deinit();
|
||||
const arena = arena_instance.allocator();
|
||||
|
||||
var comp = Compilation.init(std.testing.allocator, std.fs.cwd());
|
||||
var comp = Compilation.init(std.testing.allocator, arena, undefined, std.fs.cwd());
|
||||
defer comp.deinit();
|
||||
comp.environment = .{
|
||||
.path = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
@ -426,7 +428,7 @@ test Linux {
|
||||
comp.target = try std.zig.system.resolveTargetQuery(target_query);
|
||||
comp.langopts.setEmulatedCompiler(.gcc);
|
||||
|
||||
var driver: Driver = .{ .comp = &comp };
|
||||
var driver: Driver = .{ .comp = &comp, .diagnostics = undefined };
|
||||
defer driver.deinit();
|
||||
driver.raw_target_triple = raw_triple;
|
||||
|
||||
@ -434,7 +436,7 @@ test Linux {
|
||||
try driver.link_objects.append(driver.comp.gpa, link_obj);
|
||||
driver.temp_file_count += 1;
|
||||
|
||||
var toolchain: Toolchain = .{ .driver = &driver, .arena = arena, .filesystem = .{ .fake = &.{
|
||||
var toolchain: Toolchain = .{ .driver = &driver, .filesystem = .{ .fake = &.{
|
||||
.{ .path = "/tmp" },
|
||||
.{ .path = "/usr" },
|
||||
.{ .path = "/usr/lib64" },
|
||||
|
||||
12
lib/compiler/aro/assembly_backend.zig
vendored
Normal file
12
lib/compiler/aro/assembly_backend.zig
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
const std = @import("std");
|
||||
|
||||
const aro = @import("aro");
|
||||
|
||||
pub const x86_64 = @import("assembly_backend/x86_64.zig");
|
||||
|
||||
pub fn genAsm(target: std.Target, tree: *const aro.Tree) aro.Compilation.Error!aro.Assembly {
|
||||
return switch (target.cpu.arch) {
|
||||
.x86_64 => x86_64.genAsm(tree),
|
||||
else => std.debug.panic("genAsm not implemented: {s}", .{@tagName(target.cpu.arch)}),
|
||||
};
|
||||
}
|
||||
254
lib/compiler/aro/assembly_backend/x86_64.zig
vendored
Normal file
254
lib/compiler/aro/assembly_backend/x86_64.zig
vendored
Normal file
@ -0,0 +1,254 @@
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const aro = @import("aro");
|
||||
const Assembly = aro.Assembly;
|
||||
const Compilation = aro.Compilation;
|
||||
const Node = Tree.Node;
|
||||
const Source = aro.Source;
|
||||
const Tree = aro.Tree;
|
||||
const QualType = aro.QualType;
|
||||
const Value = aro.Value;
|
||||
|
||||
const AsmCodeGen = @This();
|
||||
const Error = aro.Compilation.Error;
|
||||
|
||||
tree: *const Tree,
|
||||
comp: *Compilation,
|
||||
text: *std.Io.Writer,
|
||||
data: *std.Io.Writer,
|
||||
|
||||
const StorageUnit = enum(u8) {
|
||||
byte = 8,
|
||||
short = 16,
|
||||
long = 32,
|
||||
quad = 64,
|
||||
|
||||
fn trunc(self: StorageUnit, val: u64) u64 {
|
||||
return switch (self) {
|
||||
.byte => @as(u8, @truncate(val)),
|
||||
.short => @as(u16, @truncate(val)),
|
||||
.long => @as(u32, @truncate(val)),
|
||||
.quad => val,
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
fn serializeInt(value: u64, storage_unit: StorageUnit, w: *std.Io.Writer) !void {
|
||||
try w.print(" .{s} 0x{x}\n", .{ @tagName(storage_unit), storage_unit.trunc(value) });
|
||||
}
|
||||
|
||||
fn serializeFloat(comptime T: type, value: T, w: *std.Io.Writer) !void {
|
||||
switch (T) {
|
||||
f128 => {
|
||||
const bytes = std.mem.asBytes(&value);
|
||||
const first = std.mem.bytesToValue(u64, bytes[0..8]);
|
||||
try serializeInt(first, .quad, w);
|
||||
const second = std.mem.bytesToValue(u64, bytes[8..16]);
|
||||
return serializeInt(second, .quad, w);
|
||||
},
|
||||
f80 => {
|
||||
const bytes = std.mem.asBytes(&value);
|
||||
const first = std.mem.bytesToValue(u64, bytes[0..8]);
|
||||
try serializeInt(first, .quad, w);
|
||||
const second = std.mem.bytesToValue(u16, bytes[8..10]);
|
||||
try serializeInt(second, .short, w);
|
||||
return w.writeAll(" .zero 6\n");
|
||||
},
|
||||
else => {
|
||||
const size = @bitSizeOf(T);
|
||||
const storage_unit = std.meta.intToEnum(StorageUnit, size) catch unreachable;
|
||||
const IntTy = @Type(.{ .int = .{ .signedness = .unsigned, .bits = size } });
|
||||
const int_val: IntTy = @bitCast(value);
|
||||
return serializeInt(int_val, storage_unit, w);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn todo(c: *AsmCodeGen, msg: []const u8, tok: Tree.TokenIndex) Error {
|
||||
const loc: Source.Location = c.tree.tokens.items(.loc)[tok];
|
||||
|
||||
var sf = std.heap.stackFallback(1024, c.comp.gpa);
|
||||
var buf = std.ArrayList(u8).init(sf.get());
|
||||
defer buf.deinit();
|
||||
|
||||
try buf.print("TODO: {s}", .{msg});
|
||||
try c.comp.diagnostics.add(.{
|
||||
.text = buf.items,
|
||||
.kind = .@"error",
|
||||
.location = loc.expand(c.comp),
|
||||
});
|
||||
return error.FatalError;
|
||||
}
|
||||
|
||||
fn emitAggregate(c: *AsmCodeGen, qt: QualType, node: Node.Index) !void {
|
||||
_ = qt;
|
||||
return c.todo("Codegen aggregates", node.tok(c.tree));
|
||||
}
|
||||
|
||||
fn emitSingleValue(c: *AsmCodeGen, qt: QualType, node: Node.Index) !void {
|
||||
const value = c.tree.value_map.get(node) orelse return;
|
||||
const bit_size = qt.bitSizeof(c.comp);
|
||||
const scalar_kind = qt.scalarKind(c.comp);
|
||||
if (!scalar_kind.isReal()) {
|
||||
return c.todo("Codegen _Complex values", node.tok(c.tree));
|
||||
} else if (scalar_kind.isInt()) {
|
||||
const storage_unit = std.meta.intToEnum(StorageUnit, bit_size) catch return c.todo("Codegen _BitInt values", node.tok(c.tree));
|
||||
try c.data.print(" .{s} ", .{@tagName(storage_unit)});
|
||||
_ = try value.print(qt, c.comp, c.data);
|
||||
try c.data.writeByte('\n');
|
||||
} else if (scalar_kind.isFloat()) {
|
||||
switch (bit_size) {
|
||||
16 => return serializeFloat(f16, value.toFloat(f16, c.comp), c.data),
|
||||
32 => return serializeFloat(f32, value.toFloat(f32, c.comp), c.data),
|
||||
64 => return serializeFloat(f64, value.toFloat(f64, c.comp), c.data),
|
||||
80 => return serializeFloat(f80, value.toFloat(f80, c.comp), c.data),
|
||||
128 => return serializeFloat(f128, value.toFloat(f128, c.comp), c.data),
|
||||
else => unreachable,
|
||||
}
|
||||
} else if (scalar_kind.isPointer()) {
|
||||
return c.todo("Codegen pointer", node.tok(c.tree));
|
||||
} else if (qt.is(c.comp, .array)) {
|
||||
// Todo:
|
||||
// Handle truncated initializers e.g. char x[3] = "hello";
|
||||
// Zero out remaining bytes if initializer is shorter than storage capacity
|
||||
// Handle non-char strings
|
||||
const bytes = value.toBytes(c.comp);
|
||||
const directive = if (bytes.len > bit_size / 8) "ascii" else "string";
|
||||
try c.data.print(" .{s} ", .{directive});
|
||||
try Value.printString(bytes, qt, c.comp, c.data);
|
||||
|
||||
try c.data.writeByte('\n');
|
||||
} else unreachable;
|
||||
}
|
||||
|
||||
fn emitValue(c: *AsmCodeGen, qt: QualType, node: Node.Index) !void {
|
||||
switch (node.get(c.tree)) {
|
||||
.array_init_expr,
|
||||
.struct_init_expr,
|
||||
.union_init_expr,
|
||||
=> return c.todo("Codegen multiple inits", node.tok(c.tree)),
|
||||
else => return c.emitSingleValue(qt, node),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn genAsm(tree: *const Tree) Error!Assembly {
|
||||
var data: std.Io.Writer.Allocating = .init(tree.comp.gpa);
|
||||
defer data.deinit();
|
||||
|
||||
var text: std.Io.Writer.Allocating = .init(tree.comp.gpa);
|
||||
defer text.deinit();
|
||||
|
||||
var codegen: AsmCodeGen = .{
|
||||
.tree = tree,
|
||||
.comp = tree.comp,
|
||||
.text = &text.writer,
|
||||
.data = &data.writer,
|
||||
};
|
||||
|
||||
codegen.genDecls() catch |err| switch (err) {
|
||||
error.WriteFailed => return error.OutOfMemory,
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.FatalError => return error.FatalError,
|
||||
};
|
||||
|
||||
const text_slice = try text.toOwnedSlice();
|
||||
errdefer tree.comp.gpa.free(text_slice);
|
||||
const data_slice = try data.toOwnedSlice();
|
||||
return .{
|
||||
.text = text_slice,
|
||||
.data = data_slice,
|
||||
};
|
||||
}
|
||||
|
||||
fn genDecls(c: *AsmCodeGen) !void {
|
||||
if (c.tree.comp.code_gen_options.debug) {
|
||||
const sources = c.tree.comp.sources.values();
|
||||
for (sources) |source| {
|
||||
try c.data.print(" .file {d} \"{s}\"\n", .{ @intFromEnum(source.id) - 1, source.path });
|
||||
}
|
||||
}
|
||||
|
||||
for (c.tree.root_decls.items) |decl| {
|
||||
switch (decl.get(c.tree)) {
|
||||
.static_assert,
|
||||
.typedef,
|
||||
.struct_decl,
|
||||
.union_decl,
|
||||
.enum_decl,
|
||||
=> {},
|
||||
|
||||
.function => |function| {
|
||||
if (function.body == null) continue;
|
||||
try c.genFn(function);
|
||||
},
|
||||
|
||||
.variable => |variable| try c.genVar(variable),
|
||||
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
try c.text.writeAll(" .section .note.GNU-stack,\"\",@progbits\n");
|
||||
}
|
||||
|
||||
fn genFn(c: *AsmCodeGen, function: Node.Function) !void {
|
||||
return c.todo("Codegen functions", function.name_tok);
|
||||
}
|
||||
|
||||
fn genVar(c: *AsmCodeGen, variable: Node.Variable) !void {
|
||||
const comp = c.comp;
|
||||
const qt = variable.qt;
|
||||
|
||||
const is_tentative = variable.initializer == null;
|
||||
const size = qt.sizeofOrNull(comp) orelse blk: {
|
||||
// tentative array definition assumed to have one element
|
||||
std.debug.assert(is_tentative and qt.is(c.comp, .array));
|
||||
break :blk qt.childType(c.comp).sizeof(comp);
|
||||
};
|
||||
|
||||
const name = c.tree.tokSlice(variable.name_tok);
|
||||
const nat_align = qt.alignof(comp);
|
||||
const alignment = if (qt.is(c.comp, .array) and size >= 16) @max(16, nat_align) else nat_align;
|
||||
|
||||
if (variable.storage_class == .static) {
|
||||
try c.data.print(" .local \"{s}\"\n", .{name});
|
||||
} else {
|
||||
try c.data.print(" .globl \"{s}\"\n", .{name});
|
||||
}
|
||||
|
||||
if (is_tentative and comp.code_gen_options.common) {
|
||||
try c.data.print(" .comm \"{s}\", {d}, {d}\n", .{ name, size, alignment });
|
||||
return;
|
||||
}
|
||||
if (variable.initializer) |init| {
|
||||
if (variable.thread_local and comp.code_gen_options.data_sections) {
|
||||
try c.data.print(" .section .tdata.\"{s}\",\"awT\",@progbits\n", .{name});
|
||||
} else if (variable.thread_local) {
|
||||
try c.data.writeAll(" .section .tdata,\"awT\",@progbits\n");
|
||||
} else if (comp.code_gen_options.data_sections) {
|
||||
try c.data.print(" .section .data.\"{s}\",\"aw\",@progbits\n", .{name});
|
||||
} else {
|
||||
try c.data.writeAll(" .data\n");
|
||||
}
|
||||
|
||||
try c.data.print(" .type \"{s}\", @object\n", .{name});
|
||||
try c.data.print(" .size \"{s}\", {d}\n", .{ name, size });
|
||||
try c.data.print(" .align {d}\n", .{alignment});
|
||||
try c.data.print("\"{s}\":\n", .{name});
|
||||
try c.emitValue(qt, init);
|
||||
return;
|
||||
}
|
||||
if (variable.thread_local and comp.code_gen_options.data_sections) {
|
||||
try c.data.print(" .section .tbss.\"{s}\",\"awT\",@nobits\n", .{name});
|
||||
} else if (variable.thread_local) {
|
||||
try c.data.writeAll(" .section .tbss,\"awT\",@nobits\n");
|
||||
} else if (comp.code_gen_options.data_sections) {
|
||||
try c.data.print(" .section .bss.\"{s}\",\"aw\",@nobits\n", .{name});
|
||||
} else {
|
||||
try c.data.writeAll(" .bss\n");
|
||||
}
|
||||
try c.data.print(" .align {d}\n", .{alignment});
|
||||
try c.data.print("\"{s}\":\n", .{name});
|
||||
try c.data.print(" .zero {d}\n", .{size});
|
||||
}
|
||||
13
lib/compiler/aro/backend.zig
vendored
13
lib/compiler/aro/backend.zig
vendored
@ -1,12 +1,23 @@
|
||||
pub const Assembly = @import("backend/Assembly.zig");
|
||||
pub const CodeGenOptions = @import("backend/CodeGenOptions.zig");
|
||||
pub const Interner = @import("backend/Interner.zig");
|
||||
pub const Ir = @import("backend/Ir.zig");
|
||||
pub const Object = @import("backend/Object.zig");
|
||||
|
||||
pub const CallingConvention = enum {
|
||||
C,
|
||||
c,
|
||||
stdcall,
|
||||
thiscall,
|
||||
vectorcall,
|
||||
fastcall,
|
||||
regcall,
|
||||
riscv_vector,
|
||||
aarch64_sve_pcs,
|
||||
aarch64_vector_pcs,
|
||||
arm_aapcs,
|
||||
arm_aapcs_vfp,
|
||||
x86_64_sysv,
|
||||
x86_64_win,
|
||||
};
|
||||
|
||||
pub const version_str = "aro-zig";
|
||||
|
||||
20
lib/compiler/aro/backend/Assembly.zig
vendored
Normal file
20
lib/compiler/aro/backend/Assembly.zig
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
const std = @import("std");
|
||||
const Allocator = std.mem.Allocator;
|
||||
|
||||
data: []const u8,
|
||||
text: []const u8,
|
||||
|
||||
const Assembly = @This();
|
||||
|
||||
pub fn deinit(self: *const Assembly, gpa: Allocator) void {
|
||||
gpa.free(self.data);
|
||||
gpa.free(self.text);
|
||||
}
|
||||
|
||||
pub fn writeToFile(self: Assembly, file: std.fs.File) !void {
|
||||
var vec: [2]std.posix.iovec_const = .{
|
||||
.{ .base = self.data.ptr, .len = self.data.len },
|
||||
.{ .base = self.text.ptr, .len = self.text.len },
|
||||
};
|
||||
return file.writevAll(&vec);
|
||||
}
|
||||
64
lib/compiler/aro/backend/CodeGenOptions.zig
vendored
Normal file
64
lib/compiler/aro/backend/CodeGenOptions.zig
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
const std = @import("std");
|
||||
|
||||
/// place uninitialized global variables in a common block
|
||||
common: bool,
|
||||
/// Place each function into its own section in the output file if the target supports arbitrary sections
|
||||
func_sections: bool,
|
||||
/// Place each data item into its own section in the output file if the target supports arbitrary sections
|
||||
data_sections: bool,
|
||||
pic_level: PicLevel,
|
||||
/// Generate position-independent code that can only be linked into executables
|
||||
is_pie: bool,
|
||||
optimization_level: OptimizationLevel,
|
||||
/// Generate debug information
|
||||
debug: bool,
|
||||
|
||||
pub const PicLevel = enum(u8) {
|
||||
/// Do not generate position-independent code
|
||||
none = 0,
|
||||
/// Generate position-independent code (PIC) suitable for use in a shared library, if supported for the target machine.
|
||||
one = 1,
|
||||
/// If supported for the target machine, emit position-independent code, suitable for dynamic linking and avoiding
|
||||
/// any limit on the size of the global offset table.
|
||||
two = 2,
|
||||
};
|
||||
|
||||
pub const OptimizationLevel = enum {
|
||||
@"0",
|
||||
@"1",
|
||||
@"2",
|
||||
@"3",
|
||||
/// Optimize for size
|
||||
s,
|
||||
/// Disregard strict standards compliance
|
||||
fast,
|
||||
/// Optimize debugging experience
|
||||
g,
|
||||
/// Optimize aggressively for size rather than speed
|
||||
z,
|
||||
|
||||
const level_map = std.StaticStringMap(OptimizationLevel).initComptime(.{
|
||||
.{ "0", .@"0" },
|
||||
.{ "1", .@"1" },
|
||||
.{ "2", .@"2" },
|
||||
.{ "3", .@"3" },
|
||||
.{ "s", .s },
|
||||
.{ "fast", .fast },
|
||||
.{ "g", .g },
|
||||
.{ "z", .z },
|
||||
});
|
||||
|
||||
pub fn fromString(str: []const u8) ?OptimizationLevel {
|
||||
return level_map.get(str);
|
||||
}
|
||||
};
|
||||
|
||||
pub const default: @This() = .{
|
||||
.common = false,
|
||||
.func_sections = false,
|
||||
.data_sections = false,
|
||||
.pic_level = .none,
|
||||
.is_pie = false,
|
||||
.optimization_level = .@"0",
|
||||
.debug = false,
|
||||
};
|
||||
42
lib/compiler/aro/backend/Interner.zig
vendored
42
lib/compiler/aro/backend/Interner.zig
vendored
@ -8,14 +8,14 @@ const Limb = std.math.big.Limb;
|
||||
|
||||
const Interner = @This();
|
||||
|
||||
map: std.AutoArrayHashMapUnmanaged(void, void) = .empty,
|
||||
map: std.AutoArrayHashMapUnmanaged(void, void) = .{},
|
||||
items: std.MultiArrayList(struct {
|
||||
tag: Tag,
|
||||
data: u32,
|
||||
}) = .{},
|
||||
extra: std.ArrayListUnmanaged(u32) = .empty,
|
||||
limbs: std.ArrayListUnmanaged(Limb) = .empty,
|
||||
strings: std.ArrayListUnmanaged(u8) = .empty,
|
||||
extra: std.ArrayListUnmanaged(u32) = .{},
|
||||
limbs: std.ArrayListUnmanaged(Limb) = .{},
|
||||
strings: std.ArrayListUnmanaged(u8) = .{},
|
||||
|
||||
const KeyAdapter = struct {
|
||||
interner: *const Interner,
|
||||
@ -65,6 +65,7 @@ pub const Key = union(enum) {
|
||||
float: Float,
|
||||
complex: Complex,
|
||||
bytes: []const u8,
|
||||
pointer: Pointer,
|
||||
|
||||
pub const Float = union(enum) {
|
||||
f16: f16,
|
||||
@ -80,6 +81,12 @@ pub const Key = union(enum) {
|
||||
cf80: [2]f80,
|
||||
cf128: [2]f128,
|
||||
};
|
||||
pub const Pointer = struct {
|
||||
/// NodeIndex of decl or compound literal whose address we are offsetting from
|
||||
node: u32,
|
||||
/// Offset in bytes
|
||||
offset: Ref,
|
||||
};
|
||||
|
||||
pub fn hash(key: Key) u32 {
|
||||
var hasher = Hash.init(0);
|
||||
@ -199,6 +206,10 @@ pub const Key = union(enum) {
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
pub fn toBigInt(key: Key, space: *Tag.Int.BigIntSpace) BigIntConst {
|
||||
return key.int.toBigInt(space);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Ref = enum(u32) {
|
||||
@ -303,6 +314,8 @@ pub const Tag = enum(u8) {
|
||||
bytes,
|
||||
/// `data` is `Record`
|
||||
record_ty,
|
||||
/// `data` is Pointer
|
||||
pointer,
|
||||
|
||||
pub const Array = struct {
|
||||
len0: u32,
|
||||
@ -322,6 +335,11 @@ pub const Tag = enum(u8) {
|
||||
child: Ref,
|
||||
};
|
||||
|
||||
pub const Pointer = struct {
|
||||
node: u32,
|
||||
offset: Ref,
|
||||
};
|
||||
|
||||
pub const Int = struct {
|
||||
limbs_index: u32,
|
||||
limbs_len: u32,
|
||||
@ -606,6 +624,15 @@ pub fn put(i: *Interner, gpa: Allocator, key: Key) !Ref {
|
||||
}),
|
||||
});
|
||||
},
|
||||
.pointer => |info| {
|
||||
i.items.appendAssumeCapacity(.{
|
||||
.tag = .pointer,
|
||||
.data = try i.addExtra(gpa, Tag.Pointer{
|
||||
.node = info.node,
|
||||
.offset = info.offset,
|
||||
}),
|
||||
});
|
||||
},
|
||||
.int => |repr| int: {
|
||||
var space: Tag.Int.BigIntSpace = undefined;
|
||||
const big = repr.toBigInt(&space);
|
||||
@ -792,6 +819,13 @@ pub fn get(i: *const Interner, ref: Ref) Key {
|
||||
.child = vector_ty.child,
|
||||
} };
|
||||
},
|
||||
.pointer => {
|
||||
const pointer = i.extraData(Tag.Pointer, data);
|
||||
return .{ .pointer = .{
|
||||
.node = pointer.node,
|
||||
.offset = pointer.offset,
|
||||
} };
|
||||
},
|
||||
.u32 => .{ .int = .{ .u64 = data } },
|
||||
.i32 => .{ .int = .{ .i64 = @as(i32, @bitCast(data)) } },
|
||||
.int_positive, .int_negative => {
|
||||
|
||||
21
lib/compiler/aro/backend/Ir.zig
vendored
21
lib/compiler/aro/backend/Ir.zig
vendored
@ -26,9 +26,9 @@ pub const Builder = struct {
|
||||
arena: std.heap.ArenaAllocator,
|
||||
interner: *Interner,
|
||||
|
||||
decls: std.StringArrayHashMapUnmanaged(Decl) = .empty,
|
||||
decls: std.StringArrayHashMapUnmanaged(Decl) = .{},
|
||||
instructions: std.MultiArrayList(Ir.Inst) = .{},
|
||||
body: std.ArrayListUnmanaged(Ref) = .empty,
|
||||
body: std.ArrayListUnmanaged(Ref) = .{},
|
||||
alloc_count: u32 = 0,
|
||||
arg_count: u32 = 0,
|
||||
current_label: Ref = undefined,
|
||||
@ -382,13 +382,14 @@ const ATTRIBUTE = std.Io.tty.Color.bright_yellow;
|
||||
|
||||
const RefMap = std.AutoArrayHashMap(Ref, void);
|
||||
|
||||
pub fn dump(ir: *const Ir, gpa: Allocator, config: std.Io.tty.Config, w: anytype) !void {
|
||||
pub fn dump(ir: *const Ir, gpa: Allocator, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
|
||||
for (ir.decls.keys(), ir.decls.values()) |name, *decl| {
|
||||
try ir.dumpDecl(decl, gpa, name, config, w);
|
||||
}
|
||||
try w.flush();
|
||||
}
|
||||
|
||||
fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8, config: std.Io.tty.Config, w: anytype) !void {
|
||||
fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
|
||||
const tags = decl.instructions.items(.tag);
|
||||
const data = decl.instructions.items(.data);
|
||||
|
||||
@ -609,7 +610,7 @@ fn dumpDecl(ir: *const Ir, decl: *const Decl, gpa: Allocator, name: []const u8,
|
||||
try w.writeAll("}\n\n");
|
||||
}
|
||||
|
||||
fn writeType(ir: Ir, ty_ref: Interner.Ref, config: std.Io.tty.Config, w: anytype) !void {
|
||||
fn writeType(ir: Ir, ty_ref: Interner.Ref, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
|
||||
const ty = ir.interner.get(ty_ref);
|
||||
try config.setColor(w, TYPE);
|
||||
switch (ty) {
|
||||
@ -639,7 +640,7 @@ fn writeType(ir: Ir, ty_ref: Interner.Ref, config: std.Io.tty.Config, w: anytype
|
||||
}
|
||||
}
|
||||
|
||||
fn writeValue(ir: Ir, val: Interner.Ref, config: std.Io.tty.Config, w: anytype) !void {
|
||||
fn writeValue(ir: Ir, val: Interner.Ref, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
|
||||
try config.setColor(w, LITERAL);
|
||||
const key = ir.interner.get(val);
|
||||
switch (key) {
|
||||
@ -650,12 +651,12 @@ fn writeValue(ir: Ir, val: Interner.Ref, config: std.Io.tty.Config, w: anytype)
|
||||
.float => |repr| switch (repr) {
|
||||
inline else => |x| return w.print("{d}", .{@as(f64, @floatCast(x))}),
|
||||
},
|
||||
.bytes => |b| return std.zig.stringEscape(b, "", .{}, w),
|
||||
.bytes => |b| return std.zig.stringEscape(b, w),
|
||||
else => unreachable, // not a value
|
||||
}
|
||||
}
|
||||
|
||||
fn writeRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: anytype) !void {
|
||||
fn writeRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
|
||||
assert(ref != .none);
|
||||
const index = @intFromEnum(ref);
|
||||
const ty_ref = decl.instructions.items(.ty)[index];
|
||||
@ -678,7 +679,7 @@ fn writeRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.I
|
||||
try w.print(" %{d}", .{ref_index});
|
||||
}
|
||||
|
||||
fn writeNewRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: anytype) !void {
|
||||
fn writeNewRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
|
||||
try ref_map.put(ref, {});
|
||||
try w.writeAll(" ");
|
||||
try ir.writeRef(decl, ref_map, ref, config, w);
|
||||
@ -687,7 +688,7 @@ fn writeNewRef(ir: Ir, decl: *const Decl, ref_map: *RefMap, ref: Ref, config: st
|
||||
try config.setColor(w, INST);
|
||||
}
|
||||
|
||||
fn writeLabel(decl: *const Decl, label_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: anytype) !void {
|
||||
fn writeLabel(decl: *const Decl, label_map: *RefMap, ref: Ref, config: std.Io.tty.Config, w: *std.Io.Writer) !void {
|
||||
assert(ref != .none);
|
||||
const index = @intFromEnum(ref);
|
||||
const label = decl.instructions.items(.data)[index].label;
|
||||
|
||||
4
lib/compiler/aro/backend/Object.zig
vendored
4
lib/compiler/aro/backend/Object.zig
vendored
@ -65,9 +65,9 @@ pub fn addRelocation(obj: *Object, name: []const u8, section: Section, address:
|
||||
}
|
||||
}
|
||||
|
||||
pub fn finish(obj: *Object, file: std.fs.File) !void {
|
||||
pub fn finish(obj: *Object, w: *std.Io.Writer) !void {
|
||||
switch (obj.format) {
|
||||
.elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).finish(file),
|
||||
.elf => return @as(*Elf, @alignCast(@fieldParentPtr("obj", obj))).finish(w),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
47
lib/compiler/aro/backend/Object/Elf.zig
vendored
47
lib/compiler/aro/backend/Object/Elf.zig
vendored
@ -5,7 +5,7 @@ const Object = @import("../Object.zig");
|
||||
|
||||
const Section = struct {
|
||||
data: std.array_list.Managed(u8),
|
||||
relocations: std.ArrayListUnmanaged(Relocation) = .empty,
|
||||
relocations: std.ArrayListUnmanaged(Relocation) = .{},
|
||||
flags: u64,
|
||||
type: u32,
|
||||
index: u16 = undefined,
|
||||
@ -37,9 +37,9 @@ const Elf = @This();
|
||||
|
||||
obj: Object,
|
||||
/// The keys are owned by the Codegen.tree
|
||||
sections: std.StringHashMapUnmanaged(*Section) = .empty,
|
||||
local_symbols: std.StringHashMapUnmanaged(*Symbol) = .empty,
|
||||
global_symbols: std.StringHashMapUnmanaged(*Symbol) = .empty,
|
||||
sections: std.StringHashMapUnmanaged(*Section) = .{},
|
||||
local_symbols: std.StringHashMapUnmanaged(*Symbol) = .{},
|
||||
global_symbols: std.StringHashMapUnmanaged(*Symbol) = .{},
|
||||
unnamed_symbol_mangle: u32 = 0,
|
||||
strtab_len: u64 = strtab_default.len,
|
||||
arena: std.heap.ArenaAllocator,
|
||||
@ -170,12 +170,8 @@ pub fn addRelocation(elf: *Elf, name: []const u8, section_kind: Object.Section,
|
||||
/// relocations
|
||||
/// strtab
|
||||
/// section headers
|
||||
pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
||||
var file_buffer: [1024]u8 = undefined;
|
||||
var file_writer = file.writer(&file_buffer);
|
||||
const w = &file_writer.interface;
|
||||
|
||||
var num_sections: std.elf.Elf64_Half = additional_sections;
|
||||
pub fn finish(elf: *Elf, w: *std.Io.Writer) !void {
|
||||
var num_sections: std.elf.Half = additional_sections;
|
||||
var relocations_len: std.elf.Elf64_Off = 0;
|
||||
var sections_len: std.elf.Elf64_Off = 0;
|
||||
{
|
||||
@ -196,8 +192,9 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
||||
const strtab_offset = rela_offset + relocations_len;
|
||||
const sh_offset = strtab_offset + elf.strtab_len;
|
||||
const sh_offset_aligned = std.mem.alignForward(u64, sh_offset, 16);
|
||||
const endian = elf.obj.target.cpu.arch.endian();
|
||||
|
||||
const elf_header = std.elf.Elf64_Ehdr{
|
||||
const elf_header: std.elf.Elf64_Ehdr = .{
|
||||
.e_ident = .{ 0x7F, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
|
||||
.e_type = std.elf.ET.REL, // we only produce relocatables
|
||||
.e_machine = elf.obj.target.toElfMachine(),
|
||||
@ -213,7 +210,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
||||
.e_shnum = num_sections,
|
||||
.e_shstrndx = strtab_index,
|
||||
};
|
||||
try w.writeStruct(elf_header);
|
||||
try w.writeStruct(elf_header, endian);
|
||||
|
||||
// write contents of sections
|
||||
{
|
||||
@ -222,13 +219,13 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
||||
}
|
||||
|
||||
// pad to 8 bytes
|
||||
try w.writeByteNTimes(0, @intCast(symtab_offset_aligned - symtab_offset));
|
||||
try w.splatByteAll(0, @intCast(symtab_offset_aligned - symtab_offset));
|
||||
|
||||
var name_offset: u32 = strtab_default.len;
|
||||
// write symbols
|
||||
{
|
||||
// first symbol must be null
|
||||
try w.writeStruct(std.mem.zeroes(std.elf.Elf64_Sym));
|
||||
try w.writeStruct(std.mem.zeroes(std.elf.Elf64_Sym), endian);
|
||||
|
||||
var sym_index: u16 = 1;
|
||||
var it = elf.local_symbols.iterator();
|
||||
@ -241,7 +238,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
||||
.st_shndx = if (sym.section) |some| some.index else 0,
|
||||
.st_value = sym.offset,
|
||||
.st_size = sym.size,
|
||||
});
|
||||
}, endian);
|
||||
sym.index = sym_index;
|
||||
sym_index += 1;
|
||||
name_offset += @intCast(entry.key_ptr.len + 1); // +1 for null byte
|
||||
@ -256,7 +253,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
||||
.st_shndx = if (sym.section) |some| some.index else 0,
|
||||
.st_value = sym.offset,
|
||||
.st_size = sym.size,
|
||||
});
|
||||
}, endian);
|
||||
sym.index = sym_index;
|
||||
sym_index += 1;
|
||||
name_offset += @intCast(entry.key_ptr.len + 1); // +1 for null byte
|
||||
@ -272,7 +269,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
||||
.r_offset = rela.offset,
|
||||
.r_addend = rela.addend,
|
||||
.r_info = (@as(u64, rela.symbol.index) << 32) | rela.type,
|
||||
});
|
||||
}, endian);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -294,13 +291,13 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
||||
}
|
||||
|
||||
// pad to 16 bytes
|
||||
try w.writeByteNTimes(0, @intCast(sh_offset_aligned - sh_offset));
|
||||
try w.splatByteAll(0, @intCast(sh_offset_aligned - sh_offset));
|
||||
// mandatory null header
|
||||
try w.writeStruct(std.mem.zeroes(std.elf.Elf64_Shdr));
|
||||
try w.writeStruct(std.mem.zeroes(std.elf.Elf64_Shdr), endian);
|
||||
|
||||
// write strtab section header
|
||||
{
|
||||
const sect_header = std.elf.Elf64_Shdr{
|
||||
const sect_header: std.elf.Elf64_Shdr = .{
|
||||
.sh_name = strtab_name,
|
||||
.sh_type = std.elf.SHT_STRTAB,
|
||||
.sh_flags = 0,
|
||||
@ -312,12 +309,12 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
||||
.sh_addralign = 1,
|
||||
.sh_entsize = 0,
|
||||
};
|
||||
try w.writeStruct(sect_header);
|
||||
try w.writeStruct(sect_header, endian);
|
||||
}
|
||||
|
||||
// write symtab section header
|
||||
{
|
||||
const sect_header = std.elf.Elf64_Shdr{
|
||||
const sect_header: std.elf.Elf64_Shdr = .{
|
||||
.sh_name = symtab_name,
|
||||
.sh_type = std.elf.SHT_SYMTAB,
|
||||
.sh_flags = 0,
|
||||
@ -329,7 +326,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
||||
.sh_addralign = 8,
|
||||
.sh_entsize = @sizeOf(std.elf.Elf64_Sym),
|
||||
};
|
||||
try w.writeStruct(sect_header);
|
||||
try w.writeStruct(sect_header, endian);
|
||||
}
|
||||
|
||||
// remaining section headers
|
||||
@ -352,7 +349,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
||||
.sh_info = 0,
|
||||
.sh_addralign = if (sect.flags & std.elf.SHF_EXECINSTR != 0) 16 else 1,
|
||||
.sh_entsize = 0,
|
||||
});
|
||||
}, endian);
|
||||
|
||||
if (rela_count != 0) {
|
||||
const size = rela_count * @sizeOf(std.elf.Elf64_Rela);
|
||||
@ -367,7 +364,7 @@ pub fn finish(elf: *Elf, file: std.fs.File) !void {
|
||||
.sh_info = sect.index,
|
||||
.sh_addralign = 8,
|
||||
.sh_entsize = @sizeOf(std.elf.Elf64_Rela),
|
||||
});
|
||||
}, endian);
|
||||
rela_sect_offset += size;
|
||||
}
|
||||
|
||||
|
||||
80
lib/compiler/aro/main.zig
vendored
Normal file
80
lib/compiler/aro/main.zig
vendored
Normal file
@ -0,0 +1,80 @@
|
||||
const std = @import("std");
|
||||
const Allocator = mem.Allocator;
|
||||
const mem = std.mem;
|
||||
const process = std.process;
|
||||
const aro = @import("aro");
|
||||
const Compilation = aro.Compilation;
|
||||
const Diagnostics = aro.Diagnostics;
|
||||
const Driver = aro.Driver;
|
||||
const Toolchain = aro.Toolchain;
|
||||
const assembly_backend = @import("assembly_backend");
|
||||
|
||||
var general_purpose_allocator = std.heap.GeneralPurposeAllocator(.{}){};
|
||||
|
||||
pub fn main() u8 {
|
||||
const gpa = if (@import("builtin").link_libc)
|
||||
std.heap.raw_c_allocator
|
||||
else
|
||||
general_purpose_allocator.allocator();
|
||||
defer if (!@import("builtin").link_libc) {
|
||||
_ = general_purpose_allocator.deinit();
|
||||
};
|
||||
|
||||
var arena_instance = std.heap.ArenaAllocator.init(gpa);
|
||||
defer arena_instance.deinit();
|
||||
const arena = arena_instance.allocator();
|
||||
|
||||
const fast_exit = @import("builtin").mode != .Debug;
|
||||
|
||||
const args = process.argsAlloc(arena) catch {
|
||||
std.debug.print("out of memory\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
};
|
||||
|
||||
const aro_name = std.fs.selfExePathAlloc(gpa) catch {
|
||||
std.debug.print("unable to find Aro executable path\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
};
|
||||
defer gpa.free(aro_name);
|
||||
|
||||
var stderr_buf: [1024]u8 = undefined;
|
||||
var stderr = std.fs.File.stderr().writer(&stderr_buf);
|
||||
var diagnostics: Diagnostics = .{
|
||||
.output = .{ .to_writer = .{
|
||||
.color = .detect(stderr.file),
|
||||
.writer = &stderr.interface,
|
||||
} },
|
||||
};
|
||||
|
||||
var comp = Compilation.initDefault(gpa, arena, &diagnostics, std.fs.cwd()) catch |er| switch (er) {
|
||||
error.OutOfMemory => {
|
||||
std.debug.print("out of memory\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
},
|
||||
};
|
||||
defer comp.deinit();
|
||||
|
||||
var driver: Driver = .{ .comp = &comp, .aro_name = aro_name, .diagnostics = &diagnostics };
|
||||
defer driver.deinit();
|
||||
|
||||
var toolchain: Toolchain = .{ .driver = &driver, .filesystem = .{ .real = comp.cwd } };
|
||||
defer toolchain.deinit();
|
||||
|
||||
driver.main(&toolchain, args, fast_exit, assembly_backend.genAsm) catch |er| switch (er) {
|
||||
error.OutOfMemory => {
|
||||
std.debug.print("out of memory\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
},
|
||||
error.FatalError => {
|
||||
driver.printDiagnosticsStats();
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
},
|
||||
};
|
||||
if (fast_exit) process.exit(@intFromBool(comp.diagnostics.errors != 0));
|
||||
return @intFromBool(diagnostics.errors != 0);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
301
lib/compiler/translate-c/lib/c_builtins.zig
Normal file
301
lib/compiler/translate-c/lib/c_builtins.zig
Normal file
@ -0,0 +1,301 @@
|
||||
const std = @import("std");
|
||||
|
||||
/// Standard C Library bug: The absolute value of the most negative integer remains negative.
|
||||
pub inline fn abs(val: c_int) c_int {
|
||||
return if (val == std.math.minInt(c_int)) val else @intCast(@abs(val));
|
||||
}
|
||||
|
||||
pub inline fn assume(cond: bool) void {
|
||||
if (!cond) unreachable;
|
||||
}
|
||||
|
||||
pub inline fn bswap16(val: u16) u16 {
|
||||
return @byteSwap(val);
|
||||
}
|
||||
|
||||
pub inline fn bswap32(val: u32) u32 {
|
||||
return @byteSwap(val);
|
||||
}
|
||||
|
||||
pub inline fn bswap64(val: u64) u64 {
|
||||
return @byteSwap(val);
|
||||
}
|
||||
|
||||
pub inline fn ceilf(val: f32) f32 {
|
||||
return @ceil(val);
|
||||
}
|
||||
|
||||
pub inline fn ceil(val: f64) f64 {
|
||||
return @ceil(val);
|
||||
}
|
||||
|
||||
/// Returns the number of leading 0-bits in x, starting at the most significant bit position.
|
||||
/// In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint
|
||||
pub inline fn clz(val: c_uint) c_int {
|
||||
@setRuntimeSafety(false);
|
||||
return @as(c_int, @bitCast(@as(c_uint, @clz(val))));
|
||||
}
|
||||
|
||||
pub inline fn constant_p(expr: anytype) c_int {
|
||||
_ = expr;
|
||||
return @intFromBool(false);
|
||||
}
|
||||
|
||||
pub inline fn cosf(val: f32) f32 {
|
||||
return @cos(val);
|
||||
}
|
||||
|
||||
pub inline fn cos(val: f64) f64 {
|
||||
return @cos(val);
|
||||
}
|
||||
|
||||
/// Returns the number of trailing 0-bits in val, starting at the least significant bit position.
|
||||
/// In C if `val` is 0, the result is undefined; in zig it's the number of bits in a c_uint
|
||||
pub inline fn ctz(val: c_uint) c_int {
|
||||
@setRuntimeSafety(false);
|
||||
return @as(c_int, @bitCast(@as(c_uint, @ctz(val))));
|
||||
}
|
||||
|
||||
pub inline fn exp2f(val: f32) f32 {
|
||||
return @exp2(val);
|
||||
}
|
||||
|
||||
pub inline fn exp2(val: f64) f64 {
|
||||
return @exp2(val);
|
||||
}
|
||||
|
||||
pub inline fn expf(val: f32) f32 {
|
||||
return @exp(val);
|
||||
}
|
||||
|
||||
pub inline fn exp(val: f64) f64 {
|
||||
return @exp(val);
|
||||
}
|
||||
|
||||
/// The return value of __builtin_expect is `expr`. `c` is the expected value
|
||||
/// of `expr` and is used as a hint to the compiler in C. Here it is unused.
|
||||
pub inline fn expect(expr: c_long, c: c_long) c_long {
|
||||
_ = c;
|
||||
return expr;
|
||||
}
|
||||
|
||||
pub inline fn fabsf(val: f32) f32 {
|
||||
return @abs(val);
|
||||
}
|
||||
|
||||
pub inline fn fabs(val: f64) f64 {
|
||||
return @abs(val);
|
||||
}
|
||||
|
||||
pub inline fn floorf(val: f32) f32 {
|
||||
return @floor(val);
|
||||
}
|
||||
|
||||
pub inline fn floor(val: f64) f64 {
|
||||
return @floor(val);
|
||||
}
|
||||
|
||||
pub inline fn has_builtin(func: anytype) c_int {
|
||||
_ = func;
|
||||
return @intFromBool(true);
|
||||
}
|
||||
|
||||
pub inline fn huge_valf() f32 {
|
||||
return std.math.inf(f32);
|
||||
}
|
||||
|
||||
pub inline fn inff() f32 {
|
||||
return std.math.inf(f32);
|
||||
}
|
||||
|
||||
/// Similar to isinf, except the return value is -1 for an argument of -Inf and 1 for an argument of +Inf.
|
||||
pub inline fn isinf_sign(x: anytype) c_int {
|
||||
if (!std.math.isInf(x)) return 0;
|
||||
return if (std.math.isPositiveInf(x)) 1 else -1;
|
||||
}
|
||||
|
||||
pub inline fn isinf(x: anytype) c_int {
|
||||
return @intFromBool(std.math.isInf(x));
|
||||
}
|
||||
|
||||
pub inline fn isnan(x: anytype) c_int {
|
||||
return @intFromBool(std.math.isNan(x));
|
||||
}
|
||||
|
||||
/// Standard C Library bug: The absolute value of the most negative integer remains negative.
|
||||
pub inline fn labs(val: c_long) c_long {
|
||||
return if (val == std.math.minInt(c_long)) val else @intCast(@abs(val));
|
||||
}
|
||||
|
||||
/// Standard C Library bug: The absolute value of the most negative integer remains negative.
|
||||
pub inline fn llabs(val: c_longlong) c_longlong {
|
||||
return if (val == std.math.minInt(c_longlong)) val else @intCast(@abs(val));
|
||||
}
|
||||
|
||||
pub inline fn log10f(val: f32) f32 {
|
||||
return @log10(val);
|
||||
}
|
||||
|
||||
pub inline fn log10(val: f64) f64 {
|
||||
return @log10(val);
|
||||
}
|
||||
|
||||
pub inline fn log2f(val: f32) f32 {
|
||||
return @log2(val);
|
||||
}
|
||||
|
||||
pub inline fn log2(val: f64) f64 {
|
||||
return @log2(val);
|
||||
}
|
||||
|
||||
pub inline fn logf(val: f32) f32 {
|
||||
return @log(val);
|
||||
}
|
||||
|
||||
pub inline fn log(val: f64) f64 {
|
||||
return @log(val);
|
||||
}
|
||||
|
||||
pub inline fn memcpy_chk(
|
||||
noalias dst: ?*anyopaque,
|
||||
noalias src: ?*const anyopaque,
|
||||
len: usize,
|
||||
remaining: usize,
|
||||
) ?*anyopaque {
|
||||
if (len > remaining) @panic("__builtin___memcpy_chk called with len > remaining");
|
||||
if (len > 0) @memcpy(
|
||||
@as([*]u8, @ptrCast(dst.?))[0..len],
|
||||
@as([*]const u8, @ptrCast(src.?)),
|
||||
);
|
||||
return dst;
|
||||
}
|
||||
|
||||
pub inline fn memcpy(
|
||||
noalias dst: ?*anyopaque,
|
||||
noalias src: ?*const anyopaque,
|
||||
len: usize,
|
||||
) ?*anyopaque {
|
||||
if (len > 0) @memcpy(
|
||||
@as([*]u8, @ptrCast(dst.?))[0..len],
|
||||
@as([*]const u8, @ptrCast(src.?)),
|
||||
);
|
||||
return dst;
|
||||
}
|
||||
|
||||
pub inline fn memset_chk(
|
||||
dst: ?*anyopaque,
|
||||
val: c_int,
|
||||
len: usize,
|
||||
remaining: usize,
|
||||
) ?*anyopaque {
|
||||
if (len > remaining) @panic("__builtin___memset_chk called with len > remaining");
|
||||
const dst_cast = @as([*c]u8, @ptrCast(dst));
|
||||
@memset(dst_cast[0..len], @as(u8, @bitCast(@as(i8, @truncate(val)))));
|
||||
return dst;
|
||||
}
|
||||
|
||||
pub inline fn memset(dst: ?*anyopaque, val: c_int, len: usize) ?*anyopaque {
|
||||
const dst_cast = @as([*c]u8, @ptrCast(dst));
|
||||
@memset(dst_cast[0..len], @as(u8, @bitCast(@as(i8, @truncate(val)))));
|
||||
return dst;
|
||||
}
|
||||
|
||||
pub fn mul_overflow(a: anytype, b: anytype, result: *@TypeOf(a, b)) c_int {
|
||||
const res = @mulWithOverflow(a, b);
|
||||
result.* = res[0];
|
||||
return res[1];
|
||||
}
|
||||
|
||||
/// returns a quiet NaN. Quiet NaNs have many representations; tagp is used to select one in an
|
||||
/// implementation-defined way.
|
||||
/// This implementation is based on the description for nan provided in the GCC docs at
|
||||
/// https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html#index-_005f_005fbuiltin_005fnan
|
||||
/// Comment is reproduced below:
|
||||
/// Since ISO C99 defines this function in terms of strtod, which we do not implement, a description
|
||||
/// of the parsing is in order.
|
||||
/// The string is parsed as by strtol; that is, the base is recognized by leading ‘0’ or ‘0x’ prefixes.
|
||||
/// The number parsed is placed in the significand such that the least significant bit of the number is
|
||||
/// at the least significant bit of the significand.
|
||||
/// The number is truncated to fit the significand field provided.
|
||||
/// The significand is forced to be a quiet NaN.
|
||||
///
|
||||
/// If tagp contains any non-numeric characters, the function returns a NaN whose significand is zero.
|
||||
/// If tagp is empty, the function returns a NaN whose significand is zero.
|
||||
pub inline fn nanf(tagp: []const u8) f32 {
|
||||
const parsed = std.fmt.parseUnsigned(c_ulong, tagp, 0) catch 0;
|
||||
const bits: u23 = @truncate(parsed); // single-precision float trailing significand is 23 bits
|
||||
return @bitCast(@as(u32, bits) | @as(u32, @bitCast(std.math.nan(f32))));
|
||||
}
|
||||
|
||||
pub inline fn object_size(ptr: ?*const anyopaque, ty: c_int) usize {
|
||||
_ = ptr;
|
||||
// clang semantics match gcc's: https://gcc.gnu.org/onlinedocs/gcc/Object-Size-Checking.html
|
||||
// If it is not possible to determine which objects ptr points to at compile time,
|
||||
// object_size should return (size_t) -1 for type 0 or 1 and (size_t) 0
|
||||
// for type 2 or 3.
|
||||
if (ty == 0 or ty == 1) return @as(usize, @bitCast(-@as(isize, 1)));
|
||||
if (ty == 2 or ty == 3) return 0;
|
||||
unreachable;
|
||||
}
|
||||
|
||||
/// popcount of a c_uint will never exceed the capacity of a c_int
|
||||
pub inline fn popcount(val: c_uint) c_int {
|
||||
@setRuntimeSafety(false);
|
||||
return @as(c_int, @bitCast(@as(c_uint, @popCount(val))));
|
||||
}
|
||||
|
||||
pub inline fn roundf(val: f32) f32 {
|
||||
return @round(val);
|
||||
}
|
||||
|
||||
pub inline fn round(val: f64) f64 {
|
||||
return @round(val);
|
||||
}
|
||||
|
||||
pub inline fn signbitf(val: f32) c_int {
|
||||
return @intFromBool(std.math.signbit(val));
|
||||
}
|
||||
|
||||
pub inline fn signbit(val: f64) c_int {
|
||||
return @intFromBool(std.math.signbit(val));
|
||||
}
|
||||
|
||||
pub inline fn sinf(val: f32) f32 {
|
||||
return @sin(val);
|
||||
}
|
||||
|
||||
pub inline fn sin(val: f64) f64 {
|
||||
return @sin(val);
|
||||
}
|
||||
|
||||
pub inline fn sqrtf(val: f32) f32 {
|
||||
return @sqrt(val);
|
||||
}
|
||||
|
||||
pub inline fn sqrt(val: f64) f64 {
|
||||
return @sqrt(val);
|
||||
}
|
||||
|
||||
pub inline fn strcmp(s1: [*c]const u8, s2: [*c]const u8) c_int {
|
||||
return switch (std.mem.orderZ(u8, s1, s2)) {
|
||||
.lt => -1,
|
||||
.eq => 0,
|
||||
.gt => 1,
|
||||
};
|
||||
}
|
||||
|
||||
pub inline fn strlen(s: [*c]const u8) usize {
|
||||
return std.mem.sliceTo(s, 0).len;
|
||||
}
|
||||
|
||||
pub inline fn truncf(val: f32) f32 {
|
||||
return @trunc(val);
|
||||
}
|
||||
|
||||
pub inline fn trunc(val: f64) f64 {
|
||||
return @trunc(val);
|
||||
}
|
||||
|
||||
pub inline fn @"unreachable"() noreturn {
|
||||
unreachable;
|
||||
}
|
||||
413
lib/compiler/translate-c/lib/helpers.zig
Normal file
413
lib/compiler/translate-c/lib/helpers.zig
Normal file
@ -0,0 +1,413 @@
|
||||
const std = @import("std");
|
||||
|
||||
/// "Usual arithmetic conversions" from C11 standard 6.3.1.8
|
||||
pub fn ArithmeticConversion(comptime A: type, comptime B: type) type {
|
||||
if (A == c_longdouble or B == c_longdouble) return c_longdouble;
|
||||
if (A == f80 or B == f80) return f80;
|
||||
if (A == f64 or B == f64) return f64;
|
||||
if (A == f32 or B == f32) return f32;
|
||||
|
||||
const A_Promoted = PromotedIntType(A);
|
||||
const B_Promoted = PromotedIntType(B);
|
||||
comptime {
|
||||
std.debug.assert(integerRank(A_Promoted) >= integerRank(c_int));
|
||||
std.debug.assert(integerRank(B_Promoted) >= integerRank(c_int));
|
||||
}
|
||||
|
||||
if (A_Promoted == B_Promoted) return A_Promoted;
|
||||
|
||||
const a_signed = @typeInfo(A_Promoted).int.signedness == .signed;
|
||||
const b_signed = @typeInfo(B_Promoted).int.signedness == .signed;
|
||||
|
||||
if (a_signed == b_signed) {
|
||||
return if (integerRank(A_Promoted) > integerRank(B_Promoted)) A_Promoted else B_Promoted;
|
||||
}
|
||||
|
||||
const SignedType = if (a_signed) A_Promoted else B_Promoted;
|
||||
const UnsignedType = if (!a_signed) A_Promoted else B_Promoted;
|
||||
|
||||
if (integerRank(UnsignedType) >= integerRank(SignedType)) return UnsignedType;
|
||||
|
||||
if (std.math.maxInt(SignedType) >= std.math.maxInt(UnsignedType)) return SignedType;
|
||||
|
||||
return ToUnsigned(SignedType);
|
||||
}
|
||||
|
||||
/// Integer promotion described in C11 6.3.1.1.2
|
||||
fn PromotedIntType(comptime T: type) type {
|
||||
return switch (T) {
|
||||
bool, c_short => c_int,
|
||||
c_ushort => if (@sizeOf(c_ushort) == @sizeOf(c_int)) c_uint else c_int,
|
||||
c_int, c_uint, c_long, c_ulong, c_longlong, c_ulonglong => T,
|
||||
else => switch (@typeInfo(T)) {
|
||||
.comptime_int => @compileError("Cannot promote `" ++ @typeName(T) ++ "`; a fixed-size number type is required"),
|
||||
// promote to c_int if it can represent all values of T
|
||||
.int => |int_info| if (int_info.bits < @bitSizeOf(c_int))
|
||||
c_int
|
||||
// otherwise, restore the original C type
|
||||
else if (int_info.bits == @bitSizeOf(c_int))
|
||||
if (int_info.signedness == .unsigned) c_uint else c_int
|
||||
else if (int_info.bits <= @bitSizeOf(c_long))
|
||||
if (int_info.signedness == .unsigned) c_ulong else c_long
|
||||
else if (int_info.bits <= @bitSizeOf(c_longlong))
|
||||
if (int_info.signedness == .unsigned) c_ulonglong else c_longlong
|
||||
else
|
||||
@compileError("Cannot promote `" ++ @typeName(T) ++ "`; a C ABI type is required"),
|
||||
else => @compileError("Attempted to promote invalid type `" ++ @typeName(T) ++ "`"),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// C11 6.3.1.1.1
|
||||
fn integerRank(comptime T: type) u8 {
|
||||
return switch (T) {
|
||||
bool => 0,
|
||||
u8, i8 => 1,
|
||||
c_short, c_ushort => 2,
|
||||
c_int, c_uint => 3,
|
||||
c_long, c_ulong => 4,
|
||||
c_longlong, c_ulonglong => 5,
|
||||
else => @compileError("integer rank not supported for `" ++ @typeName(T) ++ "`"),
|
||||
};
|
||||
}
|
||||
|
||||
fn ToUnsigned(comptime T: type) type {
|
||||
return switch (T) {
|
||||
c_int => c_uint,
|
||||
c_long => c_ulong,
|
||||
c_longlong => c_ulonglong,
|
||||
else => @compileError("Cannot convert `" ++ @typeName(T) ++ "` to unsigned"),
|
||||
};
|
||||
}
|
||||
|
||||
/// Constructs a [*c] pointer with the const and volatile annotations
|
||||
/// from SelfType for pointing to a C flexible array of ElementType.
|
||||
pub fn FlexibleArrayType(comptime SelfType: type, comptime ElementType: type) type {
|
||||
switch (@typeInfo(SelfType)) {
|
||||
.pointer => |ptr| {
|
||||
return @Type(.{ .pointer = .{
|
||||
.size = .c,
|
||||
.is_const = ptr.is_const,
|
||||
.is_volatile = ptr.is_volatile,
|
||||
.alignment = @alignOf(ElementType),
|
||||
.address_space = .generic,
|
||||
.child = ElementType,
|
||||
.is_allowzero = true,
|
||||
.sentinel_ptr = null,
|
||||
} });
|
||||
},
|
||||
else => |info| @compileError("Invalid self type \"" ++ @tagName(info) ++ "\" for flexible array getter: " ++ @typeName(SelfType)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Promote the type of an integer literal until it fits as C would.
|
||||
pub fn promoteIntLiteral(
|
||||
comptime SuffixType: type,
|
||||
comptime number: comptime_int,
|
||||
comptime base: CIntLiteralBase,
|
||||
) PromoteIntLiteralReturnType(SuffixType, number, base) {
|
||||
return number;
|
||||
}
|
||||
|
||||
const CIntLiteralBase = enum { decimal, octal, hex };
|
||||
|
||||
fn PromoteIntLiteralReturnType(comptime SuffixType: type, comptime number: comptime_int, comptime base: CIntLiteralBase) type {
|
||||
const signed_decimal = [_]type{ c_int, c_long, c_longlong, c_ulonglong };
|
||||
const signed_oct_hex = [_]type{ c_int, c_uint, c_long, c_ulong, c_longlong, c_ulonglong };
|
||||
const unsigned = [_]type{ c_uint, c_ulong, c_ulonglong };
|
||||
|
||||
const list: []const type = if (@typeInfo(SuffixType).int.signedness == .unsigned)
|
||||
&unsigned
|
||||
else if (base == .decimal)
|
||||
&signed_decimal
|
||||
else
|
||||
&signed_oct_hex;
|
||||
|
||||
var pos = std.mem.indexOfScalar(type, list, SuffixType).?;
|
||||
while (pos < list.len) : (pos += 1) {
|
||||
if (number >= std.math.minInt(list[pos]) and number <= std.math.maxInt(list[pos])) {
|
||||
return list[pos];
|
||||
}
|
||||
}
|
||||
|
||||
@compileError("Integer literal is too large");
|
||||
}
|
||||
|
||||
/// Convert from clang __builtin_shufflevector index to Zig @shuffle index
|
||||
/// clang requires __builtin_shufflevector index arguments to be integer constants.
|
||||
/// negative values for `this_index` indicate "don't care".
|
||||
/// clang enforces that `this_index` is less than the total number of vector elements
|
||||
/// See https://ziglang.org/documentation/master/#shuffle
|
||||
/// See https://clang.llvm.org/docs/LanguageExtensions.html#langext-builtin-shufflevector
|
||||
pub fn shuffleVectorIndex(comptime this_index: c_int, comptime source_vector_len: usize) i32 {
|
||||
const positive_index = std.math.cast(usize, this_index) orelse return undefined;
|
||||
if (positive_index < source_vector_len) return @as(i32, @intCast(this_index));
|
||||
const b_index = positive_index - source_vector_len;
|
||||
return ~@as(i32, @intCast(b_index));
|
||||
}
|
||||
|
||||
/// C `%` operator for signed integers
|
||||
/// C standard states: "If the quotient a/b is representable, the expression (a/b)*b + a%b shall equal a"
|
||||
/// The quotient is not representable if denominator is zero, or if numerator is the minimum integer for
|
||||
/// the type and denominator is -1. C has undefined behavior for those two cases; this function has safety
|
||||
/// checked undefined behavior
|
||||
pub fn signedRemainder(numerator: anytype, denominator: anytype) @TypeOf(numerator, denominator) {
|
||||
std.debug.assert(@typeInfo(@TypeOf(numerator, denominator)).int.signedness == .signed);
|
||||
if (denominator > 0) return @rem(numerator, denominator);
|
||||
return numerator - @divTrunc(numerator, denominator) * denominator;
|
||||
}
|
||||
|
||||
/// Given a type and value, cast the value to the type as c would.
|
||||
pub fn cast(comptime DestType: type, target: anytype) DestType {
|
||||
// this function should behave like transCCast in translate-c, except it's for macros
|
||||
const SourceType = @TypeOf(target);
|
||||
switch (@typeInfo(DestType)) {
|
||||
.@"fn" => return castToPtr(*const DestType, SourceType, target),
|
||||
.pointer => return castToPtr(DestType, SourceType, target),
|
||||
.optional => |dest_opt| {
|
||||
if (@typeInfo(dest_opt.child) == .pointer) {
|
||||
return castToPtr(DestType, SourceType, target);
|
||||
} else if (@typeInfo(dest_opt.child) == .@"fn") {
|
||||
return castToPtr(?*const dest_opt.child, SourceType, target);
|
||||
}
|
||||
},
|
||||
.int => {
|
||||
switch (@typeInfo(SourceType)) {
|
||||
.pointer => {
|
||||
return castInt(DestType, @intFromPtr(target));
|
||||
},
|
||||
.optional => |opt| {
|
||||
if (@typeInfo(opt.child) == .pointer) {
|
||||
return castInt(DestType, @intFromPtr(target));
|
||||
}
|
||||
},
|
||||
.int => {
|
||||
return castInt(DestType, target);
|
||||
},
|
||||
.@"fn" => {
|
||||
return castInt(DestType, @intFromPtr(&target));
|
||||
},
|
||||
.bool => {
|
||||
return @intFromBool(target);
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
.float => {
|
||||
switch (@typeInfo(SourceType)) {
|
||||
.int => return @as(DestType, @floatFromInt(target)),
|
||||
.float => return @as(DestType, @floatCast(target)),
|
||||
.bool => return @as(DestType, @floatFromInt(@intFromBool(target))),
|
||||
else => {},
|
||||
}
|
||||
},
|
||||
.@"union" => |info| {
|
||||
inline for (info.fields) |field| {
|
||||
if (field.type == SourceType) return @unionInit(DestType, field.name, target);
|
||||
}
|
||||
|
||||
@compileError("cast to union type '" ++ @typeName(DestType) ++ "' from type '" ++ @typeName(SourceType) ++ "' which is not present in union");
|
||||
},
|
||||
.bool => return cast(usize, target) != 0,
|
||||
else => {},
|
||||
}
|
||||
|
||||
return @as(DestType, target);
|
||||
}
|
||||
|
||||
fn castInt(comptime DestType: type, target: anytype) DestType {
|
||||
const dest = @typeInfo(DestType).int;
|
||||
const source = @typeInfo(@TypeOf(target)).int;
|
||||
|
||||
const Int = @Type(.{ .int = .{ .bits = dest.bits, .signedness = source.signedness } });
|
||||
|
||||
if (dest.bits < source.bits)
|
||||
return @as(DestType, @bitCast(@as(Int, @truncate(target))))
|
||||
else
|
||||
return @as(DestType, @bitCast(@as(Int, target)));
|
||||
}
|
||||
|
||||
fn castPtr(comptime DestType: type, target: anytype) DestType {
|
||||
return @constCast(@volatileCast(@alignCast(@ptrCast(target))));
|
||||
}
|
||||
|
||||
fn castToPtr(comptime DestType: type, comptime SourceType: type, target: anytype) DestType {
|
||||
switch (@typeInfo(SourceType)) {
|
||||
.int => {
|
||||
return @as(DestType, @ptrFromInt(castInt(usize, target)));
|
||||
},
|
||||
.comptime_int => {
|
||||
if (target < 0)
|
||||
return @as(DestType, @ptrFromInt(@as(usize, @bitCast(@as(isize, @intCast(target))))))
|
||||
else
|
||||
return @as(DestType, @ptrFromInt(@as(usize, @intCast(target))));
|
||||
},
|
||||
.pointer => {
|
||||
return castPtr(DestType, target);
|
||||
},
|
||||
.@"fn" => {
|
||||
return castPtr(DestType, &target);
|
||||
},
|
||||
.optional => |target_opt| {
|
||||
if (@typeInfo(target_opt.child) == .pointer) {
|
||||
return castPtr(DestType, target);
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
|
||||
return @as(DestType, target);
|
||||
}
|
||||
|
||||
/// Given a value returns its size as C's sizeof operator would.
|
||||
pub fn sizeof(target: anytype) usize {
|
||||
const T: type = if (@TypeOf(target) == type) target else @TypeOf(target);
|
||||
switch (@typeInfo(T)) {
|
||||
.float, .int, .@"struct", .@"union", .array, .bool, .vector => return @sizeOf(T),
|
||||
.@"fn" => {
|
||||
// sizeof(main) in C returns 1
|
||||
return 1;
|
||||
},
|
||||
.null => return @sizeOf(*anyopaque),
|
||||
.void => {
|
||||
// Note: sizeof(void) is 1 on clang/gcc and 0 on MSVC.
|
||||
return 1;
|
||||
},
|
||||
.@"opaque" => {
|
||||
if (T == anyopaque) {
|
||||
// Note: sizeof(void) is 1 on clang/gcc and 0 on MSVC.
|
||||
return 1;
|
||||
} else {
|
||||
@compileError("Cannot use C sizeof on opaque type " ++ @typeName(T));
|
||||
}
|
||||
},
|
||||
.optional => |opt| {
|
||||
if (@typeInfo(opt.child) == .pointer) {
|
||||
return sizeof(opt.child);
|
||||
} else {
|
||||
@compileError("Cannot use C sizeof on non-pointer optional " ++ @typeName(T));
|
||||
}
|
||||
},
|
||||
.pointer => |ptr| {
|
||||
if (ptr.size == .slice) {
|
||||
@compileError("Cannot use C sizeof on slice type " ++ @typeName(T));
|
||||
}
|
||||
|
||||
// for strings, sizeof("a") returns 2.
|
||||
// normal pointer decay scenarios from C are handled
|
||||
// in the .array case above, but strings remain literals
|
||||
// and are therefore always pointers, so they need to be
|
||||
// specially handled here.
|
||||
if (ptr.size == .one and ptr.is_const and @typeInfo(ptr.child) == .array) {
|
||||
const array_info = @typeInfo(ptr.child).array;
|
||||
if ((array_info.child == u8 or array_info.child == u16) and array_info.sentinel() == 0) {
|
||||
// length of the string plus one for the null terminator.
|
||||
return (array_info.len + 1) * @sizeOf(array_info.child);
|
||||
}
|
||||
}
|
||||
|
||||
// When zero sized pointers are removed, this case will no
|
||||
// longer be reachable and can be deleted.
|
||||
if (@sizeOf(T) == 0) {
|
||||
return @sizeOf(*anyopaque);
|
||||
}
|
||||
|
||||
return @sizeOf(T);
|
||||
},
|
||||
.comptime_float => return @sizeOf(f64), // TODO c_double #3999
|
||||
.comptime_int => {
|
||||
// TODO to get the correct result we have to translate
|
||||
// `1073741824 * 4` as `int(1073741824) *% int(4)` since
|
||||
// sizeof(1073741824 * 4) != sizeof(4294967296).
|
||||
|
||||
// TODO test if target fits in int, long or long long
|
||||
return @sizeOf(c_int);
|
||||
},
|
||||
else => @compileError("__helpers.sizeof does not support type " ++ @typeName(T)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn div(a: anytype, b: anytype) ArithmeticConversion(@TypeOf(a), @TypeOf(b)) {
|
||||
const ResType = ArithmeticConversion(@TypeOf(a), @TypeOf(b));
|
||||
const a_casted = cast(ResType, a);
|
||||
const b_casted = cast(ResType, b);
|
||||
switch (@typeInfo(ResType)) {
|
||||
.float => return a_casted / b_casted,
|
||||
.int => return @divTrunc(a_casted, b_casted),
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn rem(a: anytype, b: anytype) ArithmeticConversion(@TypeOf(a), @TypeOf(b)) {
|
||||
const ResType = ArithmeticConversion(@TypeOf(a), @TypeOf(b));
|
||||
const a_casted = cast(ResType, a);
|
||||
const b_casted = cast(ResType, b);
|
||||
switch (@typeInfo(ResType)) {
|
||||
.int => {
|
||||
if (@typeInfo(ResType).int.signedness == .signed) {
|
||||
return signedRemainder(a_casted, b_casted);
|
||||
} else {
|
||||
return a_casted % b_casted;
|
||||
}
|
||||
},
|
||||
else => unreachable,
|
||||
}
|
||||
}
|
||||
|
||||
/// A 2-argument function-like macro defined as #define FOO(A, B) (A)(B)
|
||||
/// could be either: cast B to A, or call A with the value B.
|
||||
pub fn CAST_OR_CALL(a: anytype, b: anytype) switch (@typeInfo(@TypeOf(a))) {
|
||||
.type => a,
|
||||
.@"fn" => |fn_info| fn_info.return_type orelse void,
|
||||
else => |info| @compileError("Unexpected argument type: " ++ @tagName(info)),
|
||||
} {
|
||||
switch (@typeInfo(@TypeOf(a))) {
|
||||
.type => return cast(a, b),
|
||||
.@"fn" => return a(b),
|
||||
else => unreachable, // return type will be a compile error otherwise
|
||||
}
|
||||
}
|
||||
|
||||
pub inline fn DISCARD(x: anytype) void {
|
||||
_ = x;
|
||||
}
|
||||
|
||||
pub fn F_SUFFIX(comptime f: comptime_float) f32 {
|
||||
return @as(f32, f);
|
||||
}
|
||||
|
||||
fn L_SUFFIX_ReturnType(comptime number: anytype) type {
|
||||
switch (@typeInfo(@TypeOf(number))) {
|
||||
.int, .comptime_int => return @TypeOf(promoteIntLiteral(c_long, number, .decimal)),
|
||||
.float, .comptime_float => return c_longdouble,
|
||||
else => @compileError("Invalid value for L suffix"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn L_SUFFIX(comptime number: anytype) L_SUFFIX_ReturnType(number) {
|
||||
switch (@typeInfo(@TypeOf(number))) {
|
||||
.int, .comptime_int => return promoteIntLiteral(c_long, number, .decimal),
|
||||
.float, .comptime_float => @compileError("TODO: c_longdouble initialization from comptime_float not supported"),
|
||||
else => @compileError("Invalid value for L suffix"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn LL_SUFFIX(comptime n: comptime_int) @TypeOf(promoteIntLiteral(c_longlong, n, .decimal)) {
|
||||
return promoteIntLiteral(c_longlong, n, .decimal);
|
||||
}
|
||||
|
||||
pub fn U_SUFFIX(comptime n: comptime_int) @TypeOf(promoteIntLiteral(c_uint, n, .decimal)) {
|
||||
return promoteIntLiteral(c_uint, n, .decimal);
|
||||
}
|
||||
|
||||
pub fn UL_SUFFIX(comptime n: comptime_int) @TypeOf(promoteIntLiteral(c_ulong, n, .decimal)) {
|
||||
return promoteIntLiteral(c_ulong, n, .decimal);
|
||||
}
|
||||
|
||||
pub fn ULL_SUFFIX(comptime n: comptime_int) @TypeOf(promoteIntLiteral(c_ulonglong, n, .decimal)) {
|
||||
return promoteIntLiteral(c_ulonglong, n, .decimal);
|
||||
}
|
||||
|
||||
pub fn WL_CONTAINER_OF(ptr: anytype, sample: anytype, comptime member: []const u8) @TypeOf(sample) {
|
||||
return @fieldParentPtr(member, ptr);
|
||||
}
|
||||
1307
lib/compiler/translate-c/src/MacroTranslator.zig
Normal file
1307
lib/compiler/translate-c/src/MacroTranslator.zig
Normal file
File diff suppressed because it is too large
Load Diff
288
lib/compiler/translate-c/src/PatternList.zig
Normal file
288
lib/compiler/translate-c/src/PatternList.zig
Normal file
@ -0,0 +1,288 @@
|
||||
const std = @import("std");
|
||||
const mem = std.mem;
|
||||
const assert = std.debug.assert;
|
||||
|
||||
const aro = @import("aro");
|
||||
const CToken = aro.Tokenizer.Token;
|
||||
|
||||
const helpers = @import("helpers.zig");
|
||||
const Translator = @import("Translator.zig");
|
||||
const Error = Translator.Error;
|
||||
pub const MacroProcessingError = Error || error{UnexpectedMacroToken};
|
||||
|
||||
const Impl = std.meta.DeclEnum(@import("helpers"));
|
||||
const Template = struct { []const u8, Impl };
|
||||
|
||||
/// Templates must be function-like macros
|
||||
/// first element is macro source, second element is the name of the function
|
||||
/// in __helpers which implements it
|
||||
const templates = [_]Template{
|
||||
.{ "f_SUFFIX(X) (X ## f)", .F_SUFFIX },
|
||||
.{ "F_SUFFIX(X) (X ## F)", .F_SUFFIX },
|
||||
|
||||
.{ "u_SUFFIX(X) (X ## u)", .U_SUFFIX },
|
||||
.{ "U_SUFFIX(X) (X ## U)", .U_SUFFIX },
|
||||
|
||||
.{ "l_SUFFIX(X) (X ## l)", .L_SUFFIX },
|
||||
.{ "L_SUFFIX(X) (X ## L)", .L_SUFFIX },
|
||||
|
||||
.{ "ul_SUFFIX(X) (X ## ul)", .UL_SUFFIX },
|
||||
.{ "uL_SUFFIX(X) (X ## uL)", .UL_SUFFIX },
|
||||
.{ "Ul_SUFFIX(X) (X ## Ul)", .UL_SUFFIX },
|
||||
.{ "UL_SUFFIX(X) (X ## UL)", .UL_SUFFIX },
|
||||
|
||||
.{ "ll_SUFFIX(X) (X ## ll)", .LL_SUFFIX },
|
||||
.{ "LL_SUFFIX(X) (X ## LL)", .LL_SUFFIX },
|
||||
|
||||
.{ "ull_SUFFIX(X) (X ## ull)", .ULL_SUFFIX },
|
||||
.{ "uLL_SUFFIX(X) (X ## uLL)", .ULL_SUFFIX },
|
||||
.{ "Ull_SUFFIX(X) (X ## Ull)", .ULL_SUFFIX },
|
||||
.{ "ULL_SUFFIX(X) (X ## ULL)", .ULL_SUFFIX },
|
||||
|
||||
.{ "f_SUFFIX(X) X ## f", .F_SUFFIX },
|
||||
.{ "F_SUFFIX(X) X ## F", .F_SUFFIX },
|
||||
|
||||
.{ "u_SUFFIX(X) X ## u", .U_SUFFIX },
|
||||
.{ "U_SUFFIX(X) X ## U", .U_SUFFIX },
|
||||
|
||||
.{ "l_SUFFIX(X) X ## l", .L_SUFFIX },
|
||||
.{ "L_SUFFIX(X) X ## L", .L_SUFFIX },
|
||||
|
||||
.{ "ul_SUFFIX(X) X ## ul", .UL_SUFFIX },
|
||||
.{ "uL_SUFFIX(X) X ## uL", .UL_SUFFIX },
|
||||
.{ "Ul_SUFFIX(X) X ## Ul", .UL_SUFFIX },
|
||||
.{ "UL_SUFFIX(X) X ## UL", .UL_SUFFIX },
|
||||
|
||||
.{ "ll_SUFFIX(X) X ## ll", .LL_SUFFIX },
|
||||
.{ "LL_SUFFIX(X) X ## LL", .LL_SUFFIX },
|
||||
|
||||
.{ "ull_SUFFIX(X) X ## ull", .ULL_SUFFIX },
|
||||
.{ "uLL_SUFFIX(X) X ## uLL", .ULL_SUFFIX },
|
||||
.{ "Ull_SUFFIX(X) X ## Ull", .ULL_SUFFIX },
|
||||
.{ "ULL_SUFFIX(X) X ## ULL", .ULL_SUFFIX },
|
||||
|
||||
.{ "CAST_OR_CALL(X, Y) (X)(Y)", .CAST_OR_CALL },
|
||||
.{ "CAST_OR_CALL(X, Y) ((X)(Y))", .CAST_OR_CALL },
|
||||
|
||||
.{
|
||||
\\wl_container_of(ptr, sample, member) \
|
||||
\\(__typeof__(sample))((char *)(ptr) - \
|
||||
\\ offsetof(__typeof__(*sample), member))
|
||||
,
|
||||
.WL_CONTAINER_OF,
|
||||
},
|
||||
|
||||
.{ "IGNORE_ME(X) ((void)(X))", .DISCARD },
|
||||
.{ "IGNORE_ME(X) (void)(X)", .DISCARD },
|
||||
.{ "IGNORE_ME(X) ((const void)(X))", .DISCARD },
|
||||
.{ "IGNORE_ME(X) (const void)(X)", .DISCARD },
|
||||
.{ "IGNORE_ME(X) ((volatile void)(X))", .DISCARD },
|
||||
.{ "IGNORE_ME(X) (volatile void)(X)", .DISCARD },
|
||||
.{ "IGNORE_ME(X) ((const volatile void)(X))", .DISCARD },
|
||||
.{ "IGNORE_ME(X) (const volatile void)(X)", .DISCARD },
|
||||
.{ "IGNORE_ME(X) ((volatile const void)(X))", .DISCARD },
|
||||
.{ "IGNORE_ME(X) (volatile const void)(X)", .DISCARD },
|
||||
};
|
||||
|
||||
const Pattern = struct {
|
||||
slicer: MacroSlicer,
|
||||
impl: Impl,
|
||||
|
||||
fn init(pl: *Pattern, allocator: mem.Allocator, template: Template) Error!void {
|
||||
const source = template[0];
|
||||
const impl = template[1];
|
||||
var tok_list = std.ArrayList(CToken).init(allocator);
|
||||
defer tok_list.deinit();
|
||||
|
||||
pl.* = .{
|
||||
.slicer = try tokenizeMacro(source, &tok_list),
|
||||
.impl = impl,
|
||||
};
|
||||
}
|
||||
|
||||
fn deinit(pl: *Pattern, allocator: mem.Allocator) void {
|
||||
allocator.free(pl.slicer.tokens);
|
||||
pl.* = undefined;
|
||||
}
|
||||
|
||||
/// This function assumes that `ms` has already been validated to contain a function-like
|
||||
/// macro, and that the parsed template macro in `pl` also contains a function-like
|
||||
/// macro. Please review this logic carefully if changing that assumption. Two
|
||||
/// function-like macros are considered equivalent if and only if they contain the same
|
||||
/// list of tokens, modulo parameter names.
|
||||
fn matches(pat: Pattern, ms: MacroSlicer) bool {
|
||||
if (ms.params != pat.slicer.params) return false;
|
||||
if (ms.tokens.len != pat.slicer.tokens.len) return false;
|
||||
|
||||
for (ms.tokens, pat.slicer.tokens) |macro_tok, pat_tok| {
|
||||
if (macro_tok.id != pat_tok.id) return false;
|
||||
switch (macro_tok.id) {
|
||||
.macro_param, .macro_param_no_expand => {
|
||||
// `.end` is the parameter index.
|
||||
if (macro_tok.end != pat_tok.end) return false;
|
||||
},
|
||||
.identifier, .extended_identifier, .string_literal, .char_literal, .pp_num => {
|
||||
const macro_bytes = ms.slice(macro_tok);
|
||||
const pattern_bytes = pat.slicer.slice(pat_tok);
|
||||
|
||||
if (!mem.eql(u8, pattern_bytes, macro_bytes)) return false;
|
||||
},
|
||||
else => {
|
||||
// other tags correspond to keywords and operators that do not contain a "payload"
|
||||
// that can vary
|
||||
},
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
const PatternList = @This();
|
||||
|
||||
patterns: []Pattern,
|
||||
|
||||
pub const MacroSlicer = struct {
|
||||
source: []const u8,
|
||||
tokens: []const CToken,
|
||||
params: u32,
|
||||
|
||||
fn slice(pl: MacroSlicer, token: CToken) []const u8 {
|
||||
return pl.source[token.start..token.end];
|
||||
}
|
||||
};
|
||||
|
||||
pub fn init(allocator: mem.Allocator) Error!PatternList {
|
||||
const patterns = try allocator.alloc(Pattern, templates.len);
|
||||
for (patterns, templates) |*pattern, template| {
|
||||
try pattern.init(allocator, template);
|
||||
}
|
||||
return .{ .patterns = patterns };
|
||||
}
|
||||
|
||||
pub fn deinit(pl: *PatternList, allocator: mem.Allocator) void {
|
||||
for (pl.patterns) |*pattern| pattern.deinit(allocator);
|
||||
allocator.free(pl.patterns);
|
||||
pl.* = undefined;
|
||||
}
|
||||
|
||||
pub fn match(pl: PatternList, ms: MacroSlicer) Error!?Impl {
|
||||
for (pl.patterns) |pattern| if (pattern.matches(ms)) return pattern.impl;
|
||||
return null;
|
||||
}
|
||||
|
||||
fn tokenizeMacro(source: []const u8, tok_list: *std.ArrayList(CToken)) Error!MacroSlicer {
|
||||
var param_count: u32 = 0;
|
||||
var param_buf: [8][]const u8 = undefined;
|
||||
|
||||
var tokenizer: aro.Tokenizer = .{
|
||||
.buf = source,
|
||||
.source = .unused,
|
||||
.langopts = .{},
|
||||
};
|
||||
{
|
||||
const name_tok = tokenizer.nextNoWS();
|
||||
assert(name_tok.id == .identifier);
|
||||
const l_paren = tokenizer.nextNoWS();
|
||||
assert(l_paren.id == .l_paren);
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const param = tokenizer.nextNoWS();
|
||||
if (param.id == .r_paren) break;
|
||||
assert(param.id == .identifier);
|
||||
const slice = source[param.start..param.end];
|
||||
param_buf[param_count] = slice;
|
||||
param_count += 1;
|
||||
|
||||
const comma = tokenizer.nextNoWS();
|
||||
if (comma.id == .r_paren) break;
|
||||
assert(comma.id == .comma);
|
||||
}
|
||||
|
||||
outer: while (true) {
|
||||
const tok = tokenizer.next();
|
||||
switch (tok.id) {
|
||||
.whitespace, .comment => continue,
|
||||
.identifier => {
|
||||
const slice = source[tok.start..tok.end];
|
||||
for (param_buf[0..param_count], 0..) |param, i| {
|
||||
if (std.mem.eql(u8, param, slice)) {
|
||||
try tok_list.append(.{
|
||||
.id = .macro_param,
|
||||
.source = .unused,
|
||||
.end = @intCast(i),
|
||||
});
|
||||
continue :outer;
|
||||
}
|
||||
}
|
||||
},
|
||||
.hash_hash => {
|
||||
if (tok_list.items[tok_list.items.len - 1].id == .macro_param) {
|
||||
tok_list.items[tok_list.items.len - 1].id = .macro_param_no_expand;
|
||||
}
|
||||
},
|
||||
.nl, .eof => break,
|
||||
else => {},
|
||||
}
|
||||
try tok_list.append(tok);
|
||||
}
|
||||
|
||||
return .{
|
||||
.source = source,
|
||||
.tokens = try tok_list.toOwnedSlice(),
|
||||
.params = param_count,
|
||||
};
|
||||
}
|
||||
|
||||
test "Macro matching" {
|
||||
const testing = std.testing;
|
||||
const helper = struct {
|
||||
fn checkMacro(
|
||||
allocator: mem.Allocator,
|
||||
pattern_list: PatternList,
|
||||
source: []const u8,
|
||||
comptime expected_match: ?Impl,
|
||||
) !void {
|
||||
var tok_list = std.ArrayList(CToken).init(allocator);
|
||||
defer tok_list.deinit();
|
||||
const ms = try tokenizeMacro(source, &tok_list);
|
||||
defer allocator.free(ms.tokens);
|
||||
|
||||
const matched = try pattern_list.match(ms);
|
||||
if (expected_match) |expected| {
|
||||
try testing.expectEqual(expected, matched);
|
||||
} else {
|
||||
try testing.expectEqual(@as(@TypeOf(matched), null), matched);
|
||||
}
|
||||
}
|
||||
};
|
||||
const allocator = std.testing.allocator;
|
||||
var pattern_list = try PatternList.init(allocator);
|
||||
defer pattern_list.deinit(allocator);
|
||||
|
||||
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## F)", .F_SUFFIX);
|
||||
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## U)", .U_SUFFIX);
|
||||
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## L)", .L_SUFFIX);
|
||||
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## LL)", .LL_SUFFIX);
|
||||
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## UL)", .UL_SUFFIX);
|
||||
try helper.checkMacro(allocator, pattern_list, "BAR(Z) (Z ## ULL)", .ULL_SUFFIX);
|
||||
try helper.checkMacro(allocator, pattern_list,
|
||||
\\container_of(a, b, c) \
|
||||
\\(__typeof__(b))((char *)(a) - \
|
||||
\\ offsetof(__typeof__(*b), c))
|
||||
, .WL_CONTAINER_OF);
|
||||
|
||||
try helper.checkMacro(allocator, pattern_list, "NO_MATCH(X, Y) (X + Y)", null);
|
||||
try helper.checkMacro(allocator, pattern_list, "CAST_OR_CALL(X, Y) (X)(Y)", .CAST_OR_CALL);
|
||||
try helper.checkMacro(allocator, pattern_list, "CAST_OR_CALL(X, Y) ((X)(Y))", .CAST_OR_CALL);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (void)(X)", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((void)(X))", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (const void)(X)", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((const void)(X))", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (volatile void)(X)", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((volatile void)(X))", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (const volatile void)(X)", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((const volatile void)(X))", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) (volatile const void)(X)", .DISCARD);
|
||||
try helper.checkMacro(allocator, pattern_list, "IGNORE_ME(X) ((volatile const void)(X))", .DISCARD);
|
||||
}
|
||||
399
lib/compiler/translate-c/src/Scope.zig
Normal file
399
lib/compiler/translate-c/src/Scope.zig
Normal file
@ -0,0 +1,399 @@
|
||||
const std = @import("std");
|
||||
|
||||
const aro = @import("aro");
|
||||
|
||||
const ast = @import("ast.zig");
|
||||
const Translator = @import("Translator.zig");
|
||||
|
||||
const Scope = @This();
|
||||
|
||||
pub const SymbolTable = std.StringArrayHashMapUnmanaged(ast.Node);
|
||||
pub const AliasList = std.ArrayListUnmanaged(struct {
|
||||
alias: []const u8,
|
||||
name: []const u8,
|
||||
});
|
||||
|
||||
/// Associates a container (structure or union) with its relevant member functions.
|
||||
pub const ContainerMemberFns = struct {
|
||||
container_decl_ptr: *ast.Node,
|
||||
member_fns: std.ArrayListUnmanaged(*ast.Payload.Func) = .empty,
|
||||
};
|
||||
pub const ContainerMemberFnsHashMap = std.AutoArrayHashMapUnmanaged(aro.QualType, ContainerMemberFns);
|
||||
|
||||
id: Id,
|
||||
parent: ?*Scope,
|
||||
|
||||
pub const Id = enum {
|
||||
block,
|
||||
root,
|
||||
condition,
|
||||
loop,
|
||||
do_loop,
|
||||
};
|
||||
|
||||
/// Used for the scope of condition expressions, for example `if (cond)`.
|
||||
/// The block is lazily initialized because it is only needed for rare
|
||||
/// cases of comma operators being used.
|
||||
pub const Condition = struct {
|
||||
base: Scope,
|
||||
block: ?Block = null,
|
||||
|
||||
fn getBlockScope(cond: *Condition, t: *Translator) !*Block {
|
||||
if (cond.block) |*b| return b;
|
||||
cond.block = try Block.init(t, &cond.base, true);
|
||||
return &cond.block.?;
|
||||
}
|
||||
|
||||
pub fn deinit(cond: *Condition) void {
|
||||
if (cond.block) |*b| b.deinit();
|
||||
}
|
||||
};
|
||||
|
||||
/// Represents an in-progress Node.Block. This struct is stack-allocated.
|
||||
/// When it is deinitialized, it produces an Node.Block which is allocated
|
||||
/// into the main arena.
|
||||
pub const Block = struct {
|
||||
base: Scope,
|
||||
translator: *Translator,
|
||||
statements: std.ArrayListUnmanaged(ast.Node),
|
||||
variables: AliasList,
|
||||
mangle_count: u32 = 0,
|
||||
label: ?[]const u8 = null,
|
||||
|
||||
/// By default all variables are discarded, since we do not know in advance if they
|
||||
/// will be used. This maps the variable's name to the Discard payload, so that if
|
||||
/// the variable is subsequently referenced we can indicate that the discard should
|
||||
/// be skipped during the intermediate AST -> Zig AST render step.
|
||||
variable_discards: std.StringArrayHashMapUnmanaged(*ast.Payload.Discard),
|
||||
|
||||
/// When the block corresponds to a function, keep track of the return type
|
||||
/// so that the return expression can be cast, if necessary
|
||||
return_type: ?aro.QualType = null,
|
||||
|
||||
/// C static local variables are wrapped in a block-local struct. The struct
|
||||
/// is named `mangle(static_local_ + name)` and the Zig variable within the
|
||||
/// struct keeps the name of the C variable.
|
||||
pub const static_local_prefix = "static_local";
|
||||
|
||||
/// C extern local variables are wrapped in a block-local struct. The struct
|
||||
/// is named `mangle(extern_local + name)` and the Zig variable within the
|
||||
/// struct keeps the name of the C variable.
|
||||
pub const extern_local_prefix = "extern_local";
|
||||
|
||||
pub fn init(t: *Translator, parent: *Scope, labeled: bool) !Block {
|
||||
var blk: Block = .{
|
||||
.base = .{
|
||||
.id = .block,
|
||||
.parent = parent,
|
||||
},
|
||||
.translator = t,
|
||||
.statements = .empty,
|
||||
.variables = .empty,
|
||||
.variable_discards = .empty,
|
||||
};
|
||||
if (labeled) {
|
||||
blk.label = try blk.makeMangledName("blk");
|
||||
}
|
||||
return blk;
|
||||
}
|
||||
|
||||
pub fn deinit(block: *Block) void {
|
||||
block.statements.deinit(block.translator.gpa);
|
||||
block.variables.deinit(block.translator.gpa);
|
||||
block.variable_discards.deinit(block.translator.gpa);
|
||||
block.* = undefined;
|
||||
}
|
||||
|
||||
pub fn complete(block: *Block) !ast.Node {
|
||||
const arena = block.translator.arena;
|
||||
if (block.base.parent.?.id == .do_loop) {
|
||||
// We reserve 1 extra statement if the parent is a do_loop. This is in case of
|
||||
// do while, we want to put `if (cond) break;` at the end.
|
||||
const alloc_len = block.statements.items.len + @intFromBool(block.base.parent.?.id == .do_loop);
|
||||
var stmts = try arena.alloc(ast.Node, alloc_len);
|
||||
stmts.len = block.statements.items.len;
|
||||
@memcpy(stmts[0..block.statements.items.len], block.statements.items);
|
||||
return ast.Node.Tag.block.create(arena, .{
|
||||
.label = block.label,
|
||||
.stmts = stmts,
|
||||
});
|
||||
}
|
||||
if (block.statements.items.len == 0) return ast.Node.Tag.empty_block.init();
|
||||
return ast.Node.Tag.block.create(arena, .{
|
||||
.label = block.label,
|
||||
.stmts = try arena.dupe(ast.Node, block.statements.items),
|
||||
});
|
||||
}
|
||||
|
||||
/// Given the desired name, return a name that does not shadow anything from outer scopes.
|
||||
/// Inserts the returned name into the scope.
|
||||
/// The name will not be visible to callers of getAlias.
|
||||
pub fn reserveMangledName(block: *Block, name: []const u8) ![]const u8 {
|
||||
return block.createMangledName(name, true, null);
|
||||
}
|
||||
|
||||
/// Same as reserveMangledName, but enables the alias immediately.
|
||||
pub fn makeMangledName(block: *Block, name: []const u8) ![]const u8 {
|
||||
return block.createMangledName(name, false, null);
|
||||
}
|
||||
|
||||
pub fn createMangledName(block: *Block, name: []const u8, reservation: bool, prefix_opt: ?[]const u8) ![]const u8 {
|
||||
const arena = block.translator.arena;
|
||||
const name_copy = try arena.dupe(u8, name);
|
||||
const alias_base = if (prefix_opt) |prefix|
|
||||
try std.fmt.allocPrint(arena, "{s}_{s}", .{ prefix, name })
|
||||
else
|
||||
name;
|
||||
var proposed_name = alias_base;
|
||||
while (block.contains(proposed_name)) {
|
||||
block.mangle_count += 1;
|
||||
proposed_name = try std.fmt.allocPrint(arena, "{s}_{d}", .{ alias_base, block.mangle_count });
|
||||
}
|
||||
const new_mangle = try block.variables.addOne(block.translator.gpa);
|
||||
if (reservation) {
|
||||
new_mangle.* = .{ .name = name_copy, .alias = name_copy };
|
||||
} else {
|
||||
new_mangle.* = .{ .name = name_copy, .alias = proposed_name };
|
||||
}
|
||||
return proposed_name;
|
||||
}
|
||||
|
||||
fn getAlias(block: *Block, name: []const u8) ?[]const u8 {
|
||||
for (block.variables.items) |p| {
|
||||
if (std.mem.eql(u8, p.name, name))
|
||||
return p.alias;
|
||||
}
|
||||
return block.base.parent.?.getAlias(name);
|
||||
}
|
||||
|
||||
fn localContains(block: *Block, name: []const u8) bool {
|
||||
for (block.variables.items) |p| {
|
||||
if (std.mem.eql(u8, p.alias, name))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
fn contains(block: *Block, name: []const u8) bool {
|
||||
if (block.localContains(name))
|
||||
return true;
|
||||
return block.base.parent.?.contains(name);
|
||||
}
|
||||
|
||||
pub fn discardVariable(block: *Block, name: []const u8) Translator.Error!void {
|
||||
const gpa = block.translator.gpa;
|
||||
const arena = block.translator.arena;
|
||||
const name_node = try ast.Node.Tag.identifier.create(arena, name);
|
||||
const discard = try ast.Node.Tag.discard.create(arena, .{ .should_skip = false, .value = name_node });
|
||||
try block.statements.append(gpa, discard);
|
||||
try block.variable_discards.putNoClobber(gpa, name, discard.castTag(.discard).?);
|
||||
}
|
||||
};
|
||||
|
||||
pub const Root = struct {
|
||||
base: Scope,
|
||||
translator: *Translator,
|
||||
sym_table: SymbolTable,
|
||||
blank_macros: std.StringArrayHashMapUnmanaged(void),
|
||||
nodes: std.ArrayListUnmanaged(ast.Node),
|
||||
container_member_fns_map: ContainerMemberFnsHashMap,
|
||||
|
||||
pub fn init(t: *Translator) Root {
|
||||
return .{
|
||||
.base = .{
|
||||
.id = .root,
|
||||
.parent = null,
|
||||
},
|
||||
.translator = t,
|
||||
.sym_table = .empty,
|
||||
.blank_macros = .empty,
|
||||
.nodes = .empty,
|
||||
.container_member_fns_map = .empty,
|
||||
};
|
||||
}
|
||||
|
||||
pub fn deinit(root: *Root) void {
|
||||
root.sym_table.deinit(root.translator.gpa);
|
||||
root.blank_macros.deinit(root.translator.gpa);
|
||||
root.nodes.deinit(root.translator.gpa);
|
||||
for (root.container_member_fns_map.values()) |*members| {
|
||||
members.member_fns.deinit(root.translator.gpa);
|
||||
}
|
||||
root.container_member_fns_map.deinit(root.translator.gpa);
|
||||
}
|
||||
|
||||
/// Check if the global scope contains this name, without looking into the "future", e.g.
|
||||
/// ignore the preprocessed decl and macro names.
|
||||
pub fn containsNow(root: *Root, name: []const u8) bool {
|
||||
return root.sym_table.contains(name);
|
||||
}
|
||||
|
||||
/// Check if the global scope contains the name, includes all decls that haven't been translated yet.
|
||||
pub fn contains(root: *Root, name: []const u8) bool {
|
||||
return root.containsNow(name) or root.translator.global_names.contains(name) or root.translator.weak_global_names.contains(name);
|
||||
}
|
||||
|
||||
pub fn addMemberFunction(root: *Root, func_ty: aro.Type.Func, func: *ast.Payload.Func) !void {
|
||||
std.debug.assert(func.data.name != null);
|
||||
if (func_ty.params.len == 0) return;
|
||||
|
||||
const param1_base = func_ty.params[0].qt.base(root.translator.comp);
|
||||
const container_qt = if (param1_base.type == .pointer)
|
||||
param1_base.type.pointer.child.base(root.translator.comp).qt
|
||||
else
|
||||
param1_base.qt;
|
||||
|
||||
if (root.container_member_fns_map.getPtr(container_qt)) |members| {
|
||||
try members.member_fns.append(root.translator.gpa, func);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn processContainerMemberFns(root: *Root) !void {
|
||||
const gpa = root.translator.gpa;
|
||||
const arena = root.translator.arena;
|
||||
|
||||
var member_names: std.StringArrayHashMapUnmanaged(u32) = .empty;
|
||||
defer member_names.deinit(gpa);
|
||||
for (root.container_member_fns_map.values()) |members| {
|
||||
member_names.clearRetainingCapacity();
|
||||
const decls_ptr = switch (members.container_decl_ptr.tag()) {
|
||||
.@"struct", .@"union" => blk_record: {
|
||||
const payload: *ast.Payload.Container = @alignCast(@fieldParentPtr("base", members.container_decl_ptr.ptr_otherwise));
|
||||
// Avoid duplication with field names
|
||||
for (payload.data.fields) |field| {
|
||||
try member_names.put(gpa, field.name, 0);
|
||||
}
|
||||
break :blk_record &payload.data.decls;
|
||||
},
|
||||
.opaque_literal => blk_opaque: {
|
||||
const container_decl = try ast.Node.Tag.@"opaque".create(arena, .{
|
||||
.layout = .none,
|
||||
.fields = &.{},
|
||||
.decls = &.{},
|
||||
});
|
||||
members.container_decl_ptr.* = container_decl;
|
||||
break :blk_opaque &container_decl.castTag(.@"opaque").?.data.decls;
|
||||
},
|
||||
else => return,
|
||||
};
|
||||
|
||||
const old_decls = decls_ptr.*;
|
||||
const new_decls = try arena.alloc(ast.Node, old_decls.len + members.member_fns.items.len);
|
||||
@memcpy(new_decls[0..old_decls.len], old_decls);
|
||||
// Assume the allocator of payload.data.decls is arena,
|
||||
// so don't add arena.free(old_variables).
|
||||
const func_ref_vars = new_decls[old_decls.len..];
|
||||
var count: u32 = 0;
|
||||
for (members.member_fns.items) |func| {
|
||||
const func_name = func.data.name.?;
|
||||
|
||||
const last_index = std.mem.lastIndexOf(u8, func_name, "_");
|
||||
const last_name = if (last_index) |index| func_name[index + 1 ..] else continue;
|
||||
var same_count: u32 = 0;
|
||||
const gop = try member_names.getOrPutValue(gpa, last_name, same_count);
|
||||
if (gop.found_existing) {
|
||||
gop.value_ptr.* += 1;
|
||||
same_count = gop.value_ptr.*;
|
||||
}
|
||||
const var_name = if (same_count == 0)
|
||||
last_name
|
||||
else
|
||||
try std.fmt.allocPrint(arena, "{s}{d}", .{ last_name, same_count });
|
||||
|
||||
func_ref_vars[count] = try ast.Node.Tag.pub_var_simple.create(arena, .{
|
||||
.name = var_name,
|
||||
.init = try ast.Node.Tag.identifier.create(arena, func_name),
|
||||
});
|
||||
count += 1;
|
||||
}
|
||||
decls_ptr.* = new_decls[0 .. old_decls.len + count];
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
pub fn findBlockScope(inner: *Scope, t: *Translator) !*Block {
|
||||
var scope = inner;
|
||||
while (true) {
|
||||
switch (scope.id) {
|
||||
.root => unreachable,
|
||||
.block => return @fieldParentPtr("base", scope),
|
||||
.condition => return @as(*Condition, @fieldParentPtr("base", scope)).getBlockScope(t),
|
||||
else => scope = scope.parent.?,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn findBlockReturnType(inner: *Scope) aro.QualType {
|
||||
var scope = inner;
|
||||
while (true) {
|
||||
switch (scope.id) {
|
||||
.root => unreachable,
|
||||
.block => {
|
||||
const block: *Block = @fieldParentPtr("base", scope);
|
||||
if (block.return_type) |qt| return qt;
|
||||
scope = scope.parent.?;
|
||||
},
|
||||
else => scope = scope.parent.?,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn getAlias(scope: *Scope, name: []const u8) ?[]const u8 {
|
||||
return switch (scope.id) {
|
||||
.root => null,
|
||||
.block => @as(*Block, @fieldParentPtr("base", scope)).getAlias(name),
|
||||
.loop, .do_loop, .condition => scope.parent.?.getAlias(name),
|
||||
};
|
||||
}
|
||||
|
||||
fn contains(scope: *Scope, name: []const u8) bool {
|
||||
return switch (scope.id) {
|
||||
.root => @as(*Root, @fieldParentPtr("base", scope)).contains(name),
|
||||
.block => @as(*Block, @fieldParentPtr("base", scope)).contains(name),
|
||||
.loop, .do_loop, .condition => scope.parent.?.contains(name),
|
||||
};
|
||||
}
|
||||
|
||||
/// Appends a node to the first block scope if inside a function, or to the root tree if not.
|
||||
pub fn appendNode(inner: *Scope, node: ast.Node) !void {
|
||||
var scope = inner;
|
||||
while (true) {
|
||||
switch (scope.id) {
|
||||
.root => {
|
||||
const root: *Root = @fieldParentPtr("base", scope);
|
||||
return root.nodes.append(root.translator.gpa, node);
|
||||
},
|
||||
.block => {
|
||||
const block: *Block = @fieldParentPtr("base", scope);
|
||||
return block.statements.append(block.translator.gpa, node);
|
||||
},
|
||||
else => scope = scope.parent.?,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn skipVariableDiscard(inner: *Scope, name: []const u8) void {
|
||||
if (true) {
|
||||
// TODO: due to 'local variable is never mutated' errors, we can
|
||||
// only skip discards if a variable is used as an lvalue, which
|
||||
// we don't currently have detection for in translate-c.
|
||||
// Once #17584 is completed, perhaps we can do away with this
|
||||
// logic entirely, and instead rely on render to fixup code.
|
||||
return;
|
||||
}
|
||||
var scope = inner;
|
||||
while (true) {
|
||||
switch (scope.id) {
|
||||
.root => return,
|
||||
.block => {
|
||||
const block: *Block = @fieldParentPtr("base", scope);
|
||||
if (block.variable_discards.get(name)) |discard| {
|
||||
discard.data.should_skip = true;
|
||||
return;
|
||||
}
|
||||
},
|
||||
else => {},
|
||||
}
|
||||
scope = scope.parent.?;
|
||||
}
|
||||
}
|
||||
4183
lib/compiler/translate-c/src/Translator.zig
Normal file
4183
lib/compiler/translate-c/src/Translator.zig
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
76
lib/compiler/translate-c/src/builtins.zig
Normal file
76
lib/compiler/translate-c/src/builtins.zig
Normal file
@ -0,0 +1,76 @@
|
||||
const std = @import("std");
|
||||
|
||||
const ast = @import("ast.zig");
|
||||
|
||||
/// All builtins need to have a source so that macros can reference them
|
||||
/// but for some it is possible to directly call an equivalent Zig builtin
|
||||
/// which is preferrable.
|
||||
pub const Builtin = struct {
|
||||
/// The name of the builtin in `c_builtins.zig`.
|
||||
name: []const u8,
|
||||
tag: ?ast.Node.Tag = null,
|
||||
};
|
||||
|
||||
pub const map = std.StaticStringMap(Builtin).initComptime([_]struct { []const u8, Builtin }{
|
||||
.{ "__builtin_abs", .{ .name = "abs" } },
|
||||
.{ "__builtin_assume", .{ .name = "assume" } },
|
||||
.{ "__builtin_bswap16", .{ .name = "bswap16", .tag = .byte_swap } },
|
||||
.{ "__builtin_bswap32", .{ .name = "bswap32", .tag = .byte_swap } },
|
||||
.{ "__builtin_bswap64", .{ .name = "bswap64", .tag = .byte_swap } },
|
||||
.{ "__builtin_ceilf", .{ .name = "ceilf", .tag = .ceil } },
|
||||
.{ "__builtin_ceil", .{ .name = "ceil", .tag = .ceil } },
|
||||
.{ "__builtin_clz", .{ .name = "clz" } },
|
||||
.{ "__builtin_constant_p", .{ .name = "constant_p" } },
|
||||
.{ "__builtin_cosf", .{ .name = "cosf", .tag = .cos } },
|
||||
.{ "__builtin_cos", .{ .name = "cos", .tag = .cos } },
|
||||
.{ "__builtin_ctz", .{ .name = "ctz" } },
|
||||
.{ "__builtin_exp2f", .{ .name = "exp2f", .tag = .exp2 } },
|
||||
.{ "__builtin_exp2", .{ .name = "exp2", .tag = .exp2 } },
|
||||
.{ "__builtin_expf", .{ .name = "expf", .tag = .exp } },
|
||||
.{ "__builtin_exp", .{ .name = "exp", .tag = .exp } },
|
||||
.{ "__builtin_expect", .{ .name = "expect" } },
|
||||
.{ "__builtin_fabsf", .{ .name = "fabsf", .tag = .abs } },
|
||||
.{ "__builtin_fabs", .{ .name = "fabs", .tag = .abs } },
|
||||
.{ "__builtin_floorf", .{ .name = "floorf", .tag = .floor } },
|
||||
.{ "__builtin_floor", .{ .name = "floor", .tag = .floor } },
|
||||
.{ "__builtin_huge_valf", .{ .name = "huge_valf" } },
|
||||
.{ "__builtin_inff", .{ .name = "inff" } },
|
||||
.{ "__builtin_isinf_sign", .{ .name = "isinf_sign" } },
|
||||
.{ "__builtin_isinf", .{ .name = "isinf" } },
|
||||
.{ "__builtin_isnan", .{ .name = "isnan" } },
|
||||
.{ "__builtin_labs", .{ .name = "labs" } },
|
||||
.{ "__builtin_llabs", .{ .name = "llabs" } },
|
||||
.{ "__builtin_log10f", .{ .name = "log10f", .tag = .log10 } },
|
||||
.{ "__builtin_log10", .{ .name = "log10", .tag = .log10 } },
|
||||
.{ "__builtin_log2f", .{ .name = "log2f", .tag = .log2 } },
|
||||
.{ "__builtin_log2", .{ .name = "log2", .tag = .log2 } },
|
||||
.{ "__builtin_logf", .{ .name = "logf", .tag = .log } },
|
||||
.{ "__builtin_log", .{ .name = "log", .tag = .log } },
|
||||
.{ "__builtin___memcpy_chk", .{ .name = "memcpy_chk" } },
|
||||
.{ "__builtin_memcpy", .{ .name = "memcpy" } },
|
||||
.{ "__builtin___memset_chk", .{ .name = "memset_chk" } },
|
||||
.{ "__builtin_memset", .{ .name = "memset" } },
|
||||
.{ "__builtin_mul_overflow", .{ .name = "mul_overflow" } },
|
||||
.{ "__builtin_nanf", .{ .name = "nanf" } },
|
||||
.{ "__builtin_object_size", .{ .name = "object_size" } },
|
||||
.{ "__builtin_popcount", .{ .name = "popcount" } },
|
||||
.{ "__builtin_roundf", .{ .name = "roundf", .tag = .round } },
|
||||
.{ "__builtin_round", .{ .name = "round", .tag = .round } },
|
||||
.{ "__builtin_signbitf", .{ .name = "signbitf" } },
|
||||
.{ "__builtin_signbit", .{ .name = "signbit" } },
|
||||
.{ "__builtin_sinf", .{ .name = "sinf", .tag = .sin } },
|
||||
.{ "__builtin_sin", .{ .name = "sin", .tag = .sin } },
|
||||
.{ "__builtin_sqrtf", .{ .name = "sqrtf", .tag = .sqrt } },
|
||||
.{ "__builtin_sqrt", .{ .name = "sqrt", .tag = .sqrt } },
|
||||
.{ "__builtin_strcmp", .{ .name = "strcmp" } },
|
||||
.{ "__builtin_strlen", .{ .name = "strlen" } },
|
||||
.{ "__builtin_truncf", .{ .name = "truncf", .tag = .trunc } },
|
||||
.{ "__builtin_trunc", .{ .name = "trunc", .tag = .trunc } },
|
||||
.{ "__builtin_unreachable", .{ .name = "unreachable", .tag = .@"unreachable" } },
|
||||
.{ "__has_builtin", .{ .name = "has_builtin" } },
|
||||
|
||||
// __builtin_alloca_with_align is not currently implemented.
|
||||
// It is used in a run and a translate test to ensure that non-implemented
|
||||
// builtins are correctly demoted. If you implement __builtin_alloca_with_align,
|
||||
// please update the tests to use a different non-implemented builtin.
|
||||
});
|
||||
327
lib/compiler/translate-c/src/helpers.zig
Normal file
327
lib/compiler/translate-c/src/helpers.zig
Normal file
@ -0,0 +1,327 @@
|
||||
const std = @import("std");
|
||||
const builtin = @import("builtin");
|
||||
const testing = std.testing;
|
||||
const math = std.math;
|
||||
|
||||
const helpers = @import("helpers");
|
||||
|
||||
const cast = helpers.cast;
|
||||
|
||||
test cast {
|
||||
var i = @as(i64, 10);
|
||||
|
||||
try testing.expect(cast(*u8, 16) == @as(*u8, @ptrFromInt(16)));
|
||||
try testing.expect(cast(*u64, &i).* == @as(u64, 10));
|
||||
try testing.expect(cast(*i64, @as(?*align(1) i64, &i)) == &i);
|
||||
|
||||
try testing.expect(cast(?*u8, 2) == @as(*u8, @ptrFromInt(2)));
|
||||
try testing.expect(cast(?*i64, @as(*align(1) i64, &i)) == &i);
|
||||
try testing.expect(cast(?*i64, @as(?*align(1) i64, &i)) == &i);
|
||||
|
||||
try testing.expectEqual(@as(u32, 4), cast(u32, @as(*u32, @ptrFromInt(4))));
|
||||
try testing.expectEqual(@as(u32, 4), cast(u32, @as(?*u32, @ptrFromInt(4))));
|
||||
try testing.expectEqual(@as(u32, 10), cast(u32, @as(u64, 10)));
|
||||
|
||||
try testing.expectEqual(@as(i32, @bitCast(@as(u32, 0x8000_0000))), cast(i32, @as(u32, 0x8000_0000)));
|
||||
|
||||
try testing.expectEqual(@as(*u8, @ptrFromInt(2)), cast(*u8, @as(*const u8, @ptrFromInt(2))));
|
||||
try testing.expectEqual(@as(*u8, @ptrFromInt(2)), cast(*u8, @as(*volatile u8, @ptrFromInt(2))));
|
||||
|
||||
try testing.expectEqual(@as(?*anyopaque, @ptrFromInt(2)), cast(?*anyopaque, @as(*u8, @ptrFromInt(2))));
|
||||
|
||||
var foo: c_int = -1;
|
||||
_ = &foo;
|
||||
try testing.expect(cast(*anyopaque, -1) == @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
|
||||
try testing.expect(cast(*anyopaque, foo) == @as(*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
|
||||
try testing.expect(cast(?*anyopaque, -1) == @as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
|
||||
try testing.expect(cast(?*anyopaque, foo) == @as(?*anyopaque, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
|
||||
|
||||
const FnPtr = ?*align(1) const fn (*anyopaque) void;
|
||||
try testing.expect(cast(FnPtr, 0) == @as(FnPtr, @ptrFromInt(@as(usize, 0))));
|
||||
try testing.expect(cast(FnPtr, foo) == @as(FnPtr, @ptrFromInt(@as(usize, @bitCast(@as(isize, -1))))));
|
||||
|
||||
const complexFunction = struct {
|
||||
fn f(_: ?*anyopaque, _: c_uint, _: ?*const fn (?*anyopaque) callconv(.c) c_uint, _: ?*anyopaque, _: c_uint, _: [*c]c_uint) callconv(.c) usize {
|
||||
return 0;
|
||||
}
|
||||
}.f;
|
||||
|
||||
const SDL_FunctionPointer = ?*const fn () callconv(.c) void;
|
||||
const fn_ptr = cast(SDL_FunctionPointer, complexFunction);
|
||||
try testing.expect(fn_ptr != null);
|
||||
}
|
||||
|
||||
const sizeof = helpers.sizeof;
|
||||
|
||||
test sizeof {
|
||||
const S = extern struct { a: u32 };
|
||||
|
||||
const ptr_size = @sizeOf(*anyopaque);
|
||||
|
||||
try testing.expect(sizeof(u32) == 4);
|
||||
try testing.expect(sizeof(@as(u32, 2)) == 4);
|
||||
try testing.expect(sizeof(2) == @sizeOf(c_int));
|
||||
|
||||
try testing.expect(sizeof(2.0) == @sizeOf(f64));
|
||||
|
||||
try testing.expect(sizeof(S) == 4);
|
||||
|
||||
try testing.expect(sizeof([_]u32{ 4, 5, 6 }) == 12);
|
||||
try testing.expect(sizeof([3]u32) == 12);
|
||||
try testing.expect(sizeof([3:0]u32) == 16);
|
||||
try testing.expect(sizeof(&[_]u32{ 4, 5, 6 }) == ptr_size);
|
||||
|
||||
try testing.expect(sizeof(*u32) == ptr_size);
|
||||
try testing.expect(sizeof([*]u32) == ptr_size);
|
||||
try testing.expect(sizeof([*c]u32) == ptr_size);
|
||||
try testing.expect(sizeof(?*u32) == ptr_size);
|
||||
try testing.expect(sizeof(?[*]u32) == ptr_size);
|
||||
try testing.expect(sizeof(*anyopaque) == ptr_size);
|
||||
try testing.expect(sizeof(*void) == ptr_size);
|
||||
try testing.expect(sizeof(null) == ptr_size);
|
||||
|
||||
try testing.expect(sizeof("foobar") == 7);
|
||||
try testing.expect(sizeof(&[_:0]u16{ 'f', 'o', 'o', 'b', 'a', 'r' }) == 14);
|
||||
try testing.expect(sizeof(*const [4:0]u8) == 5);
|
||||
try testing.expect(sizeof(*[4:0]u8) == ptr_size);
|
||||
try testing.expect(sizeof([*]const [4:0]u8) == ptr_size);
|
||||
try testing.expect(sizeof(*const *const [4:0]u8) == ptr_size);
|
||||
try testing.expect(sizeof(*const [4]u8) == ptr_size);
|
||||
|
||||
if (false) { // TODO
|
||||
try testing.expect(sizeof(&sizeof) == @sizeOf(@TypeOf(&sizeof)));
|
||||
try testing.expect(sizeof(sizeof) == 1);
|
||||
}
|
||||
|
||||
try testing.expect(sizeof(void) == 1);
|
||||
try testing.expect(sizeof(anyopaque) == 1);
|
||||
}
|
||||
|
||||
const promoteIntLiteral = helpers.promoteIntLiteral;
|
||||
|
||||
test promoteIntLiteral {
|
||||
const signed_hex = promoteIntLiteral(c_int, math.maxInt(c_int) + 1, .hex);
|
||||
try testing.expectEqual(c_uint, @TypeOf(signed_hex));
|
||||
|
||||
if (math.maxInt(c_longlong) == math.maxInt(c_int)) return;
|
||||
|
||||
const signed_decimal = promoteIntLiteral(c_int, math.maxInt(c_int) + 1, .decimal);
|
||||
const unsigned = promoteIntLiteral(c_uint, math.maxInt(c_uint) + 1, .hex);
|
||||
|
||||
if (math.maxInt(c_long) > math.maxInt(c_int)) {
|
||||
try testing.expectEqual(c_long, @TypeOf(signed_decimal));
|
||||
try testing.expectEqual(c_ulong, @TypeOf(unsigned));
|
||||
} else {
|
||||
try testing.expectEqual(c_longlong, @TypeOf(signed_decimal));
|
||||
try testing.expectEqual(c_ulonglong, @TypeOf(unsigned));
|
||||
}
|
||||
}
|
||||
|
||||
const shuffleVectorIndex = helpers.shuffleVectorIndex;
|
||||
|
||||
test shuffleVectorIndex {
|
||||
const vector_len: usize = 4;
|
||||
|
||||
_ = shuffleVectorIndex(-1, vector_len);
|
||||
|
||||
try testing.expect(shuffleVectorIndex(0, vector_len) == 0);
|
||||
try testing.expect(shuffleVectorIndex(1, vector_len) == 1);
|
||||
try testing.expect(shuffleVectorIndex(2, vector_len) == 2);
|
||||
try testing.expect(shuffleVectorIndex(3, vector_len) == 3);
|
||||
|
||||
try testing.expect(shuffleVectorIndex(4, vector_len) == -1);
|
||||
try testing.expect(shuffleVectorIndex(5, vector_len) == -2);
|
||||
try testing.expect(shuffleVectorIndex(6, vector_len) == -3);
|
||||
try testing.expect(shuffleVectorIndex(7, vector_len) == -4);
|
||||
}
|
||||
|
||||
const FlexibleArrayType = helpers.FlexibleArrayType;
|
||||
|
||||
test FlexibleArrayType {
|
||||
const Container = extern struct {
|
||||
size: usize,
|
||||
};
|
||||
|
||||
try testing.expectEqual(FlexibleArrayType(*Container, c_int), [*c]c_int);
|
||||
try testing.expectEqual(FlexibleArrayType(*const Container, c_int), [*c]const c_int);
|
||||
try testing.expectEqual(FlexibleArrayType(*volatile Container, c_int), [*c]volatile c_int);
|
||||
try testing.expectEqual(FlexibleArrayType(*const volatile Container, c_int), [*c]const volatile c_int);
|
||||
}
|
||||
|
||||
const signedRemainder = helpers.signedRemainder;
|
||||
|
||||
test signedRemainder {
|
||||
// TODO add test
|
||||
return error.SkipZigTest;
|
||||
}
|
||||
|
||||
const ArithmeticConversion = helpers.ArithmeticConversion;
|
||||
|
||||
test ArithmeticConversion {
|
||||
// Promotions not necessarily the same for other platforms
|
||||
if (builtin.target.cpu.arch != .x86_64 or builtin.target.os.tag != .linux) return error.SkipZigTest;
|
||||
|
||||
const Test = struct {
|
||||
/// Order of operands should not matter for arithmetic conversions
|
||||
fn checkPromotion(comptime A: type, comptime B: type, comptime Expected: type) !void {
|
||||
try std.testing.expect(ArithmeticConversion(A, B) == Expected);
|
||||
try std.testing.expect(ArithmeticConversion(B, A) == Expected);
|
||||
}
|
||||
};
|
||||
|
||||
try Test.checkPromotion(c_longdouble, c_int, c_longdouble);
|
||||
try Test.checkPromotion(c_int, f64, f64);
|
||||
try Test.checkPromotion(f32, bool, f32);
|
||||
|
||||
try Test.checkPromotion(bool, c_short, c_int);
|
||||
try Test.checkPromotion(c_int, c_int, c_int);
|
||||
try Test.checkPromotion(c_short, c_int, c_int);
|
||||
|
||||
try Test.checkPromotion(c_int, c_long, c_long);
|
||||
|
||||
try Test.checkPromotion(c_ulonglong, c_uint, c_ulonglong);
|
||||
|
||||
try Test.checkPromotion(c_uint, c_int, c_uint);
|
||||
|
||||
try Test.checkPromotion(c_uint, c_long, c_long);
|
||||
|
||||
try Test.checkPromotion(c_ulong, c_longlong, c_ulonglong);
|
||||
|
||||
// stdint.h
|
||||
try Test.checkPromotion(u8, i8, c_int);
|
||||
try Test.checkPromotion(u16, i16, c_int);
|
||||
try Test.checkPromotion(i32, c_int, c_int);
|
||||
try Test.checkPromotion(u32, c_int, c_uint);
|
||||
try Test.checkPromotion(i64, c_int, c_long);
|
||||
try Test.checkPromotion(u64, c_int, c_ulong);
|
||||
try Test.checkPromotion(isize, c_int, c_long);
|
||||
try Test.checkPromotion(usize, c_int, c_ulong);
|
||||
}
|
||||
|
||||
const F_SUFFIX = helpers.F_SUFFIX;
|
||||
|
||||
test F_SUFFIX {
|
||||
try testing.expect(@TypeOf(F_SUFFIX(1)) == f32);
|
||||
}
|
||||
|
||||
const U_SUFFIX = helpers.U_SUFFIX;
|
||||
|
||||
test U_SUFFIX {
|
||||
try testing.expect(@TypeOf(U_SUFFIX(1)) == c_uint);
|
||||
if (math.maxInt(c_ulong) > math.maxInt(c_uint)) {
|
||||
try testing.expect(@TypeOf(U_SUFFIX(math.maxInt(c_uint) + 1)) == c_ulong);
|
||||
}
|
||||
if (math.maxInt(c_ulonglong) > math.maxInt(c_ulong)) {
|
||||
try testing.expect(@TypeOf(U_SUFFIX(math.maxInt(c_ulong) + 1)) == c_ulonglong);
|
||||
}
|
||||
}
|
||||
|
||||
const L_SUFFIX = helpers.L_SUFFIX;
|
||||
|
||||
test L_SUFFIX {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(1)) == c_long);
|
||||
if (math.maxInt(c_long) > math.maxInt(c_int)) {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(math.maxInt(c_int) + 1)) == c_long);
|
||||
}
|
||||
if (math.maxInt(c_longlong) > math.maxInt(c_long)) {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(math.maxInt(c_long) + 1)) == c_longlong);
|
||||
}
|
||||
}
|
||||
const UL_SUFFIX = helpers.UL_SUFFIX;
|
||||
|
||||
test UL_SUFFIX {
|
||||
try testing.expect(@TypeOf(UL_SUFFIX(1)) == c_ulong);
|
||||
if (math.maxInt(c_ulonglong) > math.maxInt(c_ulong)) {
|
||||
try testing.expect(@TypeOf(UL_SUFFIX(math.maxInt(c_ulong) + 1)) == c_ulonglong);
|
||||
}
|
||||
}
|
||||
const LL_SUFFIX = helpers.LL_SUFFIX;
|
||||
|
||||
test LL_SUFFIX {
|
||||
try testing.expect(@TypeOf(LL_SUFFIX(1)) == c_longlong);
|
||||
}
|
||||
const ULL_SUFFIX = helpers.ULL_SUFFIX;
|
||||
|
||||
test ULL_SUFFIX {
|
||||
try testing.expect(@TypeOf(ULL_SUFFIX(1)) == c_ulonglong);
|
||||
}
|
||||
|
||||
test "Extended C ABI casting" {
|
||||
if (math.maxInt(c_long) > math.maxInt(c_char)) {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(@as(c_char, math.maxInt(c_char) - 1))) == c_long); // c_char
|
||||
}
|
||||
if (math.maxInt(c_long) > math.maxInt(c_short)) {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(@as(c_short, math.maxInt(c_short) - 1))) == c_long); // c_short
|
||||
}
|
||||
|
||||
if (math.maxInt(c_long) > math.maxInt(c_ushort)) {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(@as(c_ushort, math.maxInt(c_ushort) - 1))) == c_long); //c_ushort
|
||||
}
|
||||
|
||||
if (math.maxInt(c_long) > math.maxInt(c_int)) {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(@as(c_int, math.maxInt(c_int) - 1))) == c_long); // c_int
|
||||
}
|
||||
|
||||
if (math.maxInt(c_long) > math.maxInt(c_uint)) {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(@as(c_uint, math.maxInt(c_uint) - 1))) == c_long); // c_uint
|
||||
try testing.expect(@TypeOf(L_SUFFIX(math.maxInt(c_uint) + 1)) == c_long); // comptime_int -> c_long
|
||||
}
|
||||
|
||||
if (math.maxInt(c_longlong) > math.maxInt(c_long)) {
|
||||
try testing.expect(@TypeOf(L_SUFFIX(@as(c_long, math.maxInt(c_long) - 1))) == c_long); // c_long
|
||||
try testing.expect(@TypeOf(L_SUFFIX(math.maxInt(c_long) + 1)) == c_longlong); // comptime_int -> c_longlong
|
||||
}
|
||||
}
|
||||
|
||||
const WL_CONTAINER_OF = helpers.WL_CONTAINER_OF;
|
||||
|
||||
test WL_CONTAINER_OF {
|
||||
const S = struct {
|
||||
a: u32 = 0,
|
||||
b: u32 = 0,
|
||||
};
|
||||
const x = S{};
|
||||
const y = S{};
|
||||
const ptr = WL_CONTAINER_OF(&x.b, &y, "b");
|
||||
try testing.expectEqual(&x, ptr);
|
||||
}
|
||||
|
||||
const CAST_OR_CALL = helpers.CAST_OR_CALL;
|
||||
|
||||
test "CAST_OR_CALL casting" {
|
||||
const arg: c_int = 1000;
|
||||
const casted = CAST_OR_CALL(u8, arg);
|
||||
try testing.expectEqual(cast(u8, arg), casted);
|
||||
|
||||
const S = struct {
|
||||
x: u32 = 0,
|
||||
};
|
||||
var s: S = .{};
|
||||
const casted_ptr = CAST_OR_CALL(*u8, &s);
|
||||
try testing.expectEqual(cast(*u8, &s), casted_ptr);
|
||||
}
|
||||
|
||||
test "CAST_OR_CALL calling" {
|
||||
const Helper = struct {
|
||||
var last_val: bool = false;
|
||||
fn returnsVoid(val: bool) void {
|
||||
last_val = val;
|
||||
}
|
||||
fn returnsBool(f: f32) bool {
|
||||
return f > 0;
|
||||
}
|
||||
fn identity(self: c_uint) c_uint {
|
||||
return self;
|
||||
}
|
||||
};
|
||||
|
||||
CAST_OR_CALL(Helper.returnsVoid, true);
|
||||
try testing.expectEqual(true, Helper.last_val);
|
||||
CAST_OR_CALL(Helper.returnsVoid, false);
|
||||
try testing.expectEqual(false, Helper.last_val);
|
||||
|
||||
try testing.expectEqual(Helper.returnsBool(1), CAST_OR_CALL(Helper.returnsBool, @as(f32, 1)));
|
||||
try testing.expectEqual(Helper.returnsBool(-1), CAST_OR_CALL(Helper.returnsBool, @as(f32, -1)));
|
||||
|
||||
try testing.expectEqual(Helper.identity(@as(c_uint, 100)), CAST_OR_CALL(Helper.identity, @as(c_uint, 100)));
|
||||
}
|
||||
251
lib/compiler/translate-c/src/main.zig
Normal file
251
lib/compiler/translate-c/src/main.zig
Normal file
@ -0,0 +1,251 @@
|
||||
const std = @import("std");
|
||||
const assert = std.debug.assert;
|
||||
const mem = std.mem;
|
||||
const process = std.process;
|
||||
const aro = @import("aro");
|
||||
const Translator = @import("Translator.zig");
|
||||
|
||||
const fast_exit = @import("builtin").mode != .Debug;
|
||||
|
||||
var general_purpose_allocator: std.heap.GeneralPurposeAllocator(.{}) = .init;
|
||||
|
||||
pub fn main() u8 {
|
||||
const gpa = general_purpose_allocator.allocator();
|
||||
defer _ = general_purpose_allocator.deinit();
|
||||
|
||||
var arena_instance = std.heap.ArenaAllocator.init(std.heap.page_allocator);
|
||||
defer arena_instance.deinit();
|
||||
const arena = arena_instance.allocator();
|
||||
|
||||
const args = process.argsAlloc(arena) catch {
|
||||
std.debug.print("ran out of memory allocating arguments\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
};
|
||||
|
||||
var stderr_buf: [1024]u8 = undefined;
|
||||
var stderr = std.fs.File.stderr().writer(&stderr_buf);
|
||||
var diagnostics: aro.Diagnostics = .{
|
||||
.output = .{ .to_writer = .{
|
||||
.color = .detect(stderr.file),
|
||||
.writer = &stderr.interface,
|
||||
} },
|
||||
};
|
||||
|
||||
var comp = aro.Compilation.initDefault(gpa, arena, &diagnostics, std.fs.cwd()) catch |err| switch (err) {
|
||||
error.OutOfMemory => {
|
||||
std.debug.print("ran out of memory initializing C compilation\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
},
|
||||
};
|
||||
defer comp.deinit();
|
||||
|
||||
const exe_name = std.fs.selfExePathAlloc(gpa) catch {
|
||||
std.debug.print("unable to find translate-c executable path\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
};
|
||||
defer gpa.free(exe_name);
|
||||
|
||||
var driver: aro.Driver = .{ .comp = &comp, .diagnostics = &diagnostics, .aro_name = exe_name };
|
||||
defer driver.deinit();
|
||||
|
||||
var toolchain: aro.Toolchain = .{ .driver = &driver, .filesystem = .{ .real = comp.cwd } };
|
||||
defer toolchain.deinit();
|
||||
|
||||
translate(&driver, &toolchain, args) catch |err| switch (err) {
|
||||
error.OutOfMemory => {
|
||||
std.debug.print("ran out of memory translating\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
},
|
||||
error.FatalError => {
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
},
|
||||
error.WriteFailed => {
|
||||
std.debug.print("unable to write to stdout\n", .{});
|
||||
if (fast_exit) process.exit(1);
|
||||
return 1;
|
||||
},
|
||||
};
|
||||
if (fast_exit) process.exit(@intFromBool(comp.diagnostics.errors != 0));
|
||||
return @intFromBool(comp.diagnostics.errors != 0);
|
||||
}
|
||||
|
||||
pub const usage =
|
||||
\\Usage {s}: [options] file [CC options]
|
||||
\\
|
||||
\\Options:
|
||||
\\ --help Print this message
|
||||
\\ --version Print translate-c version
|
||||
\\ -fmodule-libs Import libraries as modules
|
||||
\\ -fno-module-libs (default) Install libraries next to output file
|
||||
\\
|
||||
\\
|
||||
;
|
||||
|
||||
fn translate(d: *aro.Driver, tc: *aro.Toolchain, args: [][:0]u8) !void {
|
||||
const gpa = d.comp.gpa;
|
||||
|
||||
var module_libs = false;
|
||||
|
||||
const aro_args = args: {
|
||||
var i: usize = 0;
|
||||
for (args) |arg| {
|
||||
args[i] = arg;
|
||||
if (mem.eql(u8, arg, "--help")) {
|
||||
var stdout_buf: [512]u8 = undefined;
|
||||
var stdout = std.fs.File.stdout().writer(&stdout_buf);
|
||||
try stdout.interface.print(usage, .{args[0]});
|
||||
try stdout.interface.flush();
|
||||
return;
|
||||
} else if (mem.eql(u8, arg, "--version")) {
|
||||
var stdout_buf: [512]u8 = undefined;
|
||||
var stdout = std.fs.File.stdout().writer(&stdout_buf);
|
||||
// TODO add version
|
||||
try stdout.interface.writeAll("0.0.0-dev\n");
|
||||
try stdout.interface.flush();
|
||||
return;
|
||||
} else if (mem.eql(u8, arg, "-fmodule-libs")) {
|
||||
module_libs = true;
|
||||
} else if (mem.eql(u8, arg, "-fno-module-libs")) {
|
||||
module_libs = false;
|
||||
} else {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
break :args args[0..i];
|
||||
};
|
||||
const user_macros = macros: {
|
||||
var macro_buf: std.ArrayListUnmanaged(u8) = .empty;
|
||||
defer macro_buf.deinit(gpa);
|
||||
|
||||
try macro_buf.appendSlice(gpa, "#define __TRANSLATE_C__ 1\n");
|
||||
|
||||
var discard_buf: [256]u8 = undefined;
|
||||
var discarding: std.io.Writer.Discarding = .init(&discard_buf);
|
||||
assert(!try d.parseArgs(&discarding.writer, ¯o_buf, aro_args));
|
||||
if (macro_buf.items.len > std.math.maxInt(u32)) {
|
||||
return d.fatal("user provided macro source exceeded max size", .{});
|
||||
}
|
||||
|
||||
const content = try macro_buf.toOwnedSlice(gpa);
|
||||
errdefer gpa.free(content);
|
||||
|
||||
break :macros try d.comp.addSourceFromOwnedBuffer("<command line>", content, .user);
|
||||
};
|
||||
|
||||
if (d.inputs.items.len != 1) {
|
||||
return d.fatal("expected exactly one input file", .{});
|
||||
}
|
||||
const source = d.inputs.items[0];
|
||||
|
||||
tc.discover() catch |er| switch (er) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.TooManyMultilibs => return d.fatal("found more than one multilib with the same priority", .{}),
|
||||
};
|
||||
tc.defineSystemIncludes() catch |er| switch (er) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.AroIncludeNotFound => return d.fatal("unable to find Aro builtin headers", .{}),
|
||||
};
|
||||
|
||||
const builtin_macros = d.comp.generateBuiltinMacros(.include_system_defines) catch |err| switch (err) {
|
||||
error.FileTooBig => return d.fatal("builtin macro source exceeded max size", .{}),
|
||||
else => |e| return e,
|
||||
};
|
||||
|
||||
var pp = try aro.Preprocessor.initDefault(d.comp);
|
||||
defer pp.deinit();
|
||||
|
||||
try pp.preprocessSources(&.{ source, builtin_macros, user_macros });
|
||||
|
||||
var c_tree = try pp.parse();
|
||||
defer c_tree.deinit();
|
||||
|
||||
if (d.diagnostics.errors != 0) {
|
||||
if (fast_exit) process.exit(1);
|
||||
return error.FatalError;
|
||||
}
|
||||
|
||||
const rendered_zig = try Translator.translate(.{
|
||||
.gpa = gpa,
|
||||
.comp = d.comp,
|
||||
.pp = &pp,
|
||||
.tree = &c_tree,
|
||||
.module_libs = module_libs,
|
||||
});
|
||||
defer gpa.free(rendered_zig);
|
||||
|
||||
var close_out_file = false;
|
||||
var out_file_path: []const u8 = "<stdout>";
|
||||
var out_file: std.fs.File = .stdout();
|
||||
defer if (close_out_file) out_file.close();
|
||||
|
||||
if (d.output_name) |path| blk: {
|
||||
if (std.mem.eql(u8, path, "-")) break :blk;
|
||||
if (std.fs.path.dirname(path)) |dirname| {
|
||||
std.fs.cwd().makePath(dirname) catch |err|
|
||||
return d.fatal("failed to create path to '{s}': {s}", .{ path, aro.Driver.errorDescription(err) });
|
||||
}
|
||||
out_file = std.fs.cwd().createFile(path, .{}) catch |err| {
|
||||
return d.fatal("failed to create output file '{s}': {s}", .{ path, aro.Driver.errorDescription(err) });
|
||||
};
|
||||
close_out_file = true;
|
||||
out_file_path = path;
|
||||
}
|
||||
|
||||
var out_buf: [4096]u8 = undefined;
|
||||
var out_writer = out_file.writer(&out_buf);
|
||||
out_writer.interface.writeAll(rendered_zig) catch
|
||||
return d.fatal("failed to write result to '{s}': {s}", .{ out_file_path, aro.Driver.errorDescription(out_writer.err.?) });
|
||||
|
||||
if (!module_libs) {
|
||||
const dest_path = if (d.output_name) |path| std.fs.path.dirname(path) else null;
|
||||
installLibs(d, dest_path) catch |err|
|
||||
return d.fatal("failed to install library files: {s}", .{aro.Driver.errorDescription(err)});
|
||||
}
|
||||
|
||||
if (fast_exit) process.exit(0);
|
||||
}
|
||||
|
||||
fn installLibs(d: *aro.Driver, dest_path: ?[]const u8) !void {
|
||||
const gpa = d.comp.gpa;
|
||||
const cwd = std.fs.cwd();
|
||||
|
||||
const self_exe_path = try std.fs.selfExePathAlloc(gpa);
|
||||
defer gpa.free(self_exe_path);
|
||||
|
||||
var cur_dir: []const u8 = self_exe_path;
|
||||
while (std.fs.path.dirname(cur_dir)) |dirname| : (cur_dir = dirname) {
|
||||
var base_dir = cwd.openDir(dirname, .{}) catch continue;
|
||||
defer base_dir.close();
|
||||
|
||||
var lib_dir = base_dir.openDir("lib", .{}) catch continue;
|
||||
defer lib_dir.close();
|
||||
|
||||
lib_dir.access("c_builtins.zig", .{}) catch continue;
|
||||
|
||||
{
|
||||
const install_path = try std.fs.path.join(gpa, &.{ dest_path orelse "", "c_builtins.zig" });
|
||||
defer gpa.free(install_path);
|
||||
try lib_dir.copyFile("c_builtins.zig", cwd, install_path, .{});
|
||||
}
|
||||
{
|
||||
const install_path = try std.fs.path.join(gpa, &.{ dest_path orelse "", "helpers.zig" });
|
||||
defer gpa.free(install_path);
|
||||
try lib_dir.copyFile("helpers.zig", cwd, install_path, .{});
|
||||
}
|
||||
return;
|
||||
}
|
||||
return error.FileNotFound;
|
||||
}
|
||||
|
||||
comptime {
|
||||
if (@import("builtin").is_test) {
|
||||
_ = Translator;
|
||||
_ = @import("helpers.zig");
|
||||
_ = @import("PatternList.zig");
|
||||
}
|
||||
}
|
||||
@ -5657,149 +5657,10 @@ pub const CImportResult = struct {
|
||||
/// Caller owns returned memory.
|
||||
pub fn cImport(comp: *Compilation, c_src: []const u8, owner_mod: *Package.Module) !CImportResult {
|
||||
dev.check(.translate_c_command);
|
||||
|
||||
const tracy_trace = trace(@src());
|
||||
defer tracy_trace.end();
|
||||
|
||||
const cimport_zig_basename = "cimport.zig";
|
||||
|
||||
var man = comp.obtainCObjectCacheManifest(owner_mod);
|
||||
defer man.deinit();
|
||||
|
||||
man.hash.add(@as(u16, 0xb945)); // Random number to distinguish translate-c from compiling C objects
|
||||
man.hash.addBytes(c_src);
|
||||
man.hash.add(comp.config.c_frontend);
|
||||
|
||||
// If the previous invocation resulted in clang errors, we will see a hit
|
||||
// here with 0 files in the manifest, in which case it is actually a miss.
|
||||
// We need to "unhit" in this case, to keep the digests matching.
|
||||
const prev_hash_state = man.hash.peekBin();
|
||||
const actual_hit = hit: {
|
||||
_ = try man.hit();
|
||||
if (man.files.entries.len == 0) {
|
||||
man.unhit(prev_hash_state, 0);
|
||||
break :hit false;
|
||||
}
|
||||
break :hit true;
|
||||
};
|
||||
const digest = if (!actual_hit) digest: {
|
||||
var arena_allocator = std.heap.ArenaAllocator.init(comp.gpa);
|
||||
defer arena_allocator.deinit();
|
||||
const arena = arena_allocator.allocator();
|
||||
|
||||
const tmp_digest = man.hash.peek();
|
||||
const tmp_dir_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &tmp_digest });
|
||||
var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath(tmp_dir_sub_path, .{});
|
||||
defer zig_cache_tmp_dir.close();
|
||||
const cimport_basename = "cimport.h";
|
||||
const out_h_path = try comp.dirs.local_cache.join(arena, &[_][]const u8{
|
||||
tmp_dir_sub_path, cimport_basename,
|
||||
});
|
||||
const out_dep_path = try std.fmt.allocPrint(arena, "{s}.d", .{out_h_path});
|
||||
|
||||
try zig_cache_tmp_dir.writeFile(.{ .sub_path = cimport_basename, .data = c_src });
|
||||
if (comp.verbose_cimport) {
|
||||
log.info("C import source: {s}", .{out_h_path});
|
||||
}
|
||||
|
||||
var argv = std.array_list.Managed([]const u8).init(comp.gpa);
|
||||
defer argv.deinit();
|
||||
|
||||
try argv.append(@tagName(comp.config.c_frontend)); // argv[0] is program name, actual args start at [1]
|
||||
try comp.addTranslateCCArgs(arena, &argv, .c, out_dep_path, owner_mod);
|
||||
|
||||
try argv.append(out_h_path);
|
||||
|
||||
if (comp.verbose_cc) {
|
||||
dump_argv(argv.items);
|
||||
}
|
||||
var tree = switch (comp.config.c_frontend) {
|
||||
.aro => tree: {
|
||||
if (true) @panic("TODO");
|
||||
break :tree undefined;
|
||||
},
|
||||
.clang => tree: {
|
||||
if (!build_options.have_llvm) unreachable;
|
||||
const translate_c = @import("translate_c.zig");
|
||||
|
||||
// Convert to null terminated args.
|
||||
const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, argv.items.len + 1);
|
||||
new_argv_with_sentinel[argv.items.len] = null;
|
||||
const new_argv = new_argv_with_sentinel[0..argv.items.len :null];
|
||||
for (argv.items, 0..) |arg, i| {
|
||||
new_argv[i] = try arena.dupeZ(u8, arg);
|
||||
}
|
||||
|
||||
const c_headers_dir_path_z = try comp.dirs.zig_lib.joinZ(arena, &.{"include"});
|
||||
var errors = std.zig.ErrorBundle.empty;
|
||||
errdefer errors.deinit(comp.gpa);
|
||||
break :tree translate_c.translate(
|
||||
comp.gpa,
|
||||
new_argv.ptr,
|
||||
new_argv.ptr + new_argv.len,
|
||||
&errors,
|
||||
c_headers_dir_path_z,
|
||||
) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.SemanticAnalyzeFail => {
|
||||
return CImportResult{
|
||||
.digest = undefined,
|
||||
.cache_hit = actual_hit,
|
||||
.errors = errors,
|
||||
};
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
||||
defer tree.deinit(comp.gpa);
|
||||
|
||||
if (comp.verbose_cimport) {
|
||||
log.info("C import .d file: {s}", .{out_dep_path});
|
||||
}
|
||||
|
||||
const dep_basename = fs.path.basename(out_dep_path);
|
||||
try man.addDepFilePost(zig_cache_tmp_dir, dep_basename);
|
||||
switch (comp.cache_use) {
|
||||
.whole => |whole| if (whole.cache_manifest) |whole_cache_manifest| {
|
||||
whole.cache_manifest_mutex.lock();
|
||||
defer whole.cache_manifest_mutex.unlock();
|
||||
try whole_cache_manifest.addDepFilePost(zig_cache_tmp_dir, dep_basename);
|
||||
},
|
||||
.incremental, .none => {},
|
||||
}
|
||||
|
||||
const bin_digest = man.finalBin();
|
||||
const hex_digest = Cache.binToHex(bin_digest);
|
||||
const o_sub_path = "o" ++ fs.path.sep_str ++ hex_digest;
|
||||
var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
|
||||
defer o_dir.close();
|
||||
|
||||
var out_zig_file = try o_dir.createFile(cimport_zig_basename, .{});
|
||||
defer out_zig_file.close();
|
||||
|
||||
const formatted = try tree.renderAlloc(comp.gpa);
|
||||
defer comp.gpa.free(formatted);
|
||||
|
||||
try out_zig_file.writeAll(formatted);
|
||||
|
||||
break :digest bin_digest;
|
||||
} else man.finalBin();
|
||||
|
||||
if (man.have_exclusive_lock) {
|
||||
// Write the updated manifest. This is a no-op if the manifest is not dirty. Note that it is
|
||||
// possible we had a hit and the manifest is dirty, for example if the file mtime changed but
|
||||
// the contents were the same, we hit the cache but the manifest is dirty and we need to update
|
||||
// it to prevent doing a full file content comparison the next time around.
|
||||
man.writeManifest() catch |err| {
|
||||
log.warn("failed to write cache manifest for C import: {s}", .{@errorName(err)});
|
||||
};
|
||||
}
|
||||
|
||||
return CImportResult{
|
||||
.digest = digest,
|
||||
.cache_hit = actual_hit,
|
||||
.errors = std.zig.ErrorBundle.empty,
|
||||
};
|
||||
_ = comp;
|
||||
_ = c_src;
|
||||
_ = owner_mod;
|
||||
@panic("TODO execute 'zig translate-c' as a sub process and use the results");
|
||||
}
|
||||
|
||||
fn workerUpdateCObject(
|
||||
@ -6739,20 +6600,6 @@ pub fn tmpFilePath(comp: Compilation, ally: Allocator, suffix: []const u8) error
|
||||
}
|
||||
}
|
||||
|
||||
pub fn addTranslateCCArgs(
|
||||
comp: *Compilation,
|
||||
arena: Allocator,
|
||||
argv: *std.array_list.Managed([]const u8),
|
||||
ext: FileExt,
|
||||
out_dep_path: ?[]const u8,
|
||||
owner_mod: *Package.Module,
|
||||
) !void {
|
||||
try argv.appendSlice(&.{ "-x", "c" });
|
||||
try comp.addCCArgs(arena, argv, ext, out_dep_path, owner_mod);
|
||||
// This gives us access to preprocessing entities, presumably at the cost of performance.
|
||||
try argv.appendSlice(&.{ "-Xclang", "-detailed-preprocessing-record" });
|
||||
}
|
||||
|
||||
/// Add common C compiler args between translate-c and C object compilation.
|
||||
pub fn addCCArgs(
|
||||
comp: *const Compilation,
|
||||
|
||||
@ -32,7 +32,6 @@ const Sema = @import("Sema.zig");
|
||||
const target_util = @import("target.zig");
|
||||
const build_options = @import("build_options");
|
||||
const isUpDir = @import("introspect.zig").isUpDir;
|
||||
const clang = @import("clang.zig");
|
||||
const InternPool = @import("InternPool.zig");
|
||||
const Alignment = InternPool.Alignment;
|
||||
const AnalUnit = InternPool.AnalUnit;
|
||||
|
||||
2277
src/clang.zig
2277
src/clang.zig
File diff suppressed because it is too large
Load Diff
@ -296,7 +296,11 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
|
||||
});
|
||||
|
||||
const aro = @import("aro");
|
||||
var aro_comp = aro.Compilation.init(gpa, std.fs.cwd());
|
||||
var diagnostics: aro.Diagnostics = .{
|
||||
.output = .{ .to_list = .{ .arena = .init(gpa) } },
|
||||
};
|
||||
defer diagnostics.deinit();
|
||||
var aro_comp = aro.Compilation.init(gpa, arena, &diagnostics, std.fs.cwd());
|
||||
defer aro_comp.deinit();
|
||||
|
||||
aro_comp.target = target.*;
|
||||
@ -316,17 +320,22 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
|
||||
const builtin_macros = try aro_comp.generateBuiltinMacros(.include_system_defines);
|
||||
const def_file_source = try aro_comp.addSourceFromPath(def_file_path);
|
||||
|
||||
var pp = aro.Preprocessor.init(&aro_comp);
|
||||
var pp = aro.Preprocessor.init(&aro_comp, .{ .provided = 0 });
|
||||
defer pp.deinit();
|
||||
pp.linemarkers = .none;
|
||||
pp.preserve_whitespace = true;
|
||||
|
||||
try pp.preprocessSources(&.{ def_file_source, builtin_macros });
|
||||
|
||||
for (aro_comp.diagnostics.list.items) |diagnostic| {
|
||||
if (diagnostic.kind == .@"fatal error" or diagnostic.kind == .@"error") {
|
||||
aro.Diagnostics.render(&aro_comp, std.Io.tty.detectConfig(std.fs.File.stderr()));
|
||||
return error.AroPreprocessorFailed;
|
||||
if (aro_comp.diagnostics.output.to_list.messages.items.len != 0) {
|
||||
var buffer: [64]u8 = undefined;
|
||||
const w = std.debug.lockStderrWriter(&buffer);
|
||||
defer std.debug.unlockStderrWriter();
|
||||
for (aro_comp.diagnostics.output.to_list.messages.items) |msg| {
|
||||
if (msg.kind == .@"fatal error" or msg.kind == .@"error") {
|
||||
aro.Diagnostics.writeToWriter(msg, w, std.io.tty.detectConfig(std.fs.File.stderr())) catch {};
|
||||
return error.AroPreprocessorFailed;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -335,9 +344,9 @@ pub fn buildImportLib(comp: *Compilation, lib_name: []const u8) !void {
|
||||
const def_final_file = try o_dir.createFile(final_def_basename, .{ .truncate = true });
|
||||
defer def_final_file.close();
|
||||
var buffer: [1024]u8 = undefined;
|
||||
var def_final_file_writer = def_final_file.writer(&buffer);
|
||||
try pp.prettyPrintTokens(&def_final_file_writer.interface, .result_only);
|
||||
try def_final_file_writer.interface.flush();
|
||||
var file_writer = def_final_file.writer(&buffer);
|
||||
try pp.prettyPrintTokens(&file_writer.interface, .result_only);
|
||||
try file_writer.interface.flush();
|
||||
}
|
||||
|
||||
const lib_final_path = try std.fs.path.join(gpa, &.{ "o", &digest, final_lib_basename });
|
||||
|
||||
196
src/main.zig
196
src/main.zig
@ -204,17 +204,6 @@ pub fn main() anyerror!void {
|
||||
return mainArgs(gpa, arena, args);
|
||||
}
|
||||
|
||||
/// Check that LLVM and Clang have been linked properly so that they are using the same
|
||||
/// libc++ and can safely share objects with pointers to static variables in libc++
|
||||
fn verifyLibcxxCorrectlyLinked() void {
|
||||
if (build_options.have_llvm and ZigClangIsLLVMUsingSeparateLibcxx()) {
|
||||
fatal(
|
||||
\\Zig was built/linked incorrectly: LLVM and Clang have separate copies of libc++
|
||||
\\ If you are dynamically linking LLVM, make sure you dynamically link libc++ too
|
||||
, .{});
|
||||
}
|
||||
}
|
||||
|
||||
fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
|
||||
const tr = tracy.trace(@src());
|
||||
defer tr.end();
|
||||
@ -350,13 +339,9 @@ fn mainArgs(gpa: Allocator, arena: Allocator, args: []const []const u8) !void {
|
||||
} else if (mem.eql(u8, cmd, "version")) {
|
||||
dev.check(.version_command);
|
||||
try fs.File.stdout().writeAll(build_options.version ++ "\n");
|
||||
// Check libc++ linkage to make sure Zig was built correctly, but only
|
||||
// for "env" and "version" to avoid affecting the startup time for
|
||||
// build-critical commands (check takes about ~10 μs)
|
||||
return verifyLibcxxCorrectlyLinked();
|
||||
return;
|
||||
} else if (mem.eql(u8, cmd, "env")) {
|
||||
dev.check(.env_command);
|
||||
verifyLibcxxCorrectlyLinked();
|
||||
var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
|
||||
try @import("print_env.zig").cmdEnv(
|
||||
arena,
|
||||
@ -4551,179 +4536,24 @@ fn cmdTranslateC(
|
||||
prog_node: std.Progress.Node,
|
||||
) !void {
|
||||
dev.check(.translate_c_command);
|
||||
_ = file_system_inputs;
|
||||
_ = fancy_output;
|
||||
|
||||
const color: Color = .auto;
|
||||
assert(comp.c_source_files.len == 1);
|
||||
const c_source_file = comp.c_source_files[0];
|
||||
|
||||
const translated_zig_basename = try std.fmt.allocPrint(arena, "{s}.zig", .{comp.root_name});
|
||||
var argv: std.ArrayListUnmanaged([]const u8) = .empty;
|
||||
try argv.append(arena, c_source_file.src_path);
|
||||
|
||||
var man: Cache.Manifest = comp.obtainCObjectCacheManifest(comp.root_mod);
|
||||
man.want_shared_lock = false;
|
||||
defer man.deinit();
|
||||
if (comp.verbose_cc) Compilation.dump_argv(argv.items);
|
||||
|
||||
man.hash.add(@as(u16, 0xb945)); // Random number to distinguish translate-c from compiling C objects
|
||||
man.hash.add(comp.config.c_frontend);
|
||||
Compilation.cache_helpers.hashCSource(&man, c_source_file) catch |err| {
|
||||
fatal("unable to process '{s}': {s}", .{ c_source_file.src_path, @errorName(err) });
|
||||
};
|
||||
|
||||
if (fancy_output) |p| p.cache_hit = true;
|
||||
const bin_digest, const hex_digest = if (try man.hit()) digest: {
|
||||
if (file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
|
||||
const bin_digest = man.finalBin();
|
||||
const hex_digest = Cache.binToHex(bin_digest);
|
||||
break :digest .{ bin_digest, hex_digest };
|
||||
} else digest: {
|
||||
if (fancy_output) |p| p.cache_hit = false;
|
||||
var argv = std.array_list.Managed([]const u8).init(arena);
|
||||
switch (comp.config.c_frontend) {
|
||||
.aro => {},
|
||||
.clang => {
|
||||
// argv[0] is program name, actual args start at [1]
|
||||
try argv.append(@tagName(comp.config.c_frontend));
|
||||
},
|
||||
}
|
||||
|
||||
var zig_cache_tmp_dir = try comp.dirs.local_cache.handle.makeOpenPath("tmp", .{});
|
||||
defer zig_cache_tmp_dir.close();
|
||||
|
||||
const ext = Compilation.classifyFileExt(c_source_file.src_path);
|
||||
const out_dep_path: ?[]const u8 = blk: {
|
||||
if (comp.config.c_frontend == .aro or comp.disable_c_depfile or !ext.clangSupportsDepFile())
|
||||
break :blk null;
|
||||
|
||||
const c_src_basename = fs.path.basename(c_source_file.src_path);
|
||||
const dep_basename = try std.fmt.allocPrint(arena, "{s}.d", .{c_src_basename});
|
||||
const out_dep_path = try comp.tmpFilePath(arena, dep_basename);
|
||||
break :blk out_dep_path;
|
||||
};
|
||||
|
||||
// TODO
|
||||
if (comp.config.c_frontend != .aro)
|
||||
try comp.addTranslateCCArgs(arena, &argv, ext, out_dep_path, comp.root_mod);
|
||||
try argv.append(c_source_file.src_path);
|
||||
|
||||
if (comp.verbose_cc) {
|
||||
Compilation.dump_argv(argv.items);
|
||||
}
|
||||
|
||||
const Result = union(enum) {
|
||||
success: []const u8,
|
||||
error_bundle: std.zig.ErrorBundle,
|
||||
};
|
||||
|
||||
const result: Result = switch (comp.config.c_frontend) {
|
||||
.aro => f: {
|
||||
var stdout: []u8 = undefined;
|
||||
try jitCmd(comp.gpa, arena, argv.items, .{
|
||||
.cmd_name = "aro_translate_c",
|
||||
.root_src_path = "aro_translate_c.zig",
|
||||
.depend_on_aro = true,
|
||||
.capture = &stdout,
|
||||
.progress_node = prog_node,
|
||||
});
|
||||
break :f .{ .success = stdout };
|
||||
},
|
||||
.clang => f: {
|
||||
if (!build_options.have_llvm) unreachable;
|
||||
const translate_c = @import("translate_c.zig");
|
||||
|
||||
// Convert to null terminated args.
|
||||
const clang_args_len = argv.items.len + c_source_file.extra_flags.len;
|
||||
const new_argv_with_sentinel = try arena.alloc(?[*:0]const u8, clang_args_len + 1);
|
||||
new_argv_with_sentinel[clang_args_len] = null;
|
||||
const new_argv = new_argv_with_sentinel[0..clang_args_len :null];
|
||||
for (argv.items, 0..) |arg, i| {
|
||||
new_argv[i] = try arena.dupeZ(u8, arg);
|
||||
}
|
||||
for (c_source_file.extra_flags, 0..) |arg, i| {
|
||||
new_argv[argv.items.len + i] = try arena.dupeZ(u8, arg);
|
||||
}
|
||||
|
||||
const c_headers_dir_path_z = try comp.dirs.zig_lib.joinZ(arena, &.{"include"});
|
||||
var errors = std.zig.ErrorBundle.empty;
|
||||
var tree = translate_c.translate(
|
||||
comp.gpa,
|
||||
new_argv.ptr,
|
||||
new_argv.ptr + new_argv.len,
|
||||
&errors,
|
||||
c_headers_dir_path_z,
|
||||
) catch |err| switch (err) {
|
||||
error.OutOfMemory => return error.OutOfMemory,
|
||||
error.SemanticAnalyzeFail => break :f .{ .error_bundle = errors },
|
||||
};
|
||||
defer tree.deinit(comp.gpa);
|
||||
break :f .{ .success = try tree.renderAlloc(arena) };
|
||||
},
|
||||
};
|
||||
|
||||
if (out_dep_path) |dep_file_path| add_deps: {
|
||||
const dep_basename = fs.path.basename(dep_file_path);
|
||||
// Add the files depended on to the cache system.
|
||||
man.addDepFilePost(zig_cache_tmp_dir, dep_basename) catch |err| switch (err) {
|
||||
error.FileNotFound => {
|
||||
// Clang didn't emit the dep file; nothing to add to the manifest.
|
||||
break :add_deps;
|
||||
},
|
||||
else => |e| return e,
|
||||
};
|
||||
// Just to save disk space, we delete the file because it is never needed again.
|
||||
zig_cache_tmp_dir.deleteFile(dep_basename) catch |err| {
|
||||
warn("failed to delete '{s}': {s}", .{ dep_file_path, @errorName(err) });
|
||||
};
|
||||
}
|
||||
|
||||
const formatted = switch (result) {
|
||||
.success => |formatted| formatted,
|
||||
.error_bundle => |eb| {
|
||||
if (file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
|
||||
if (fancy_output) |p| {
|
||||
p.errors = eb;
|
||||
return;
|
||||
} else {
|
||||
eb.renderToStdErr(color.renderOptions());
|
||||
process.exit(1);
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
const bin_digest = man.finalBin();
|
||||
const hex_digest = Cache.binToHex(bin_digest);
|
||||
|
||||
const o_sub_path = try fs.path.join(arena, &[_][]const u8{ "o", &hex_digest });
|
||||
|
||||
var o_dir = try comp.dirs.local_cache.handle.makeOpenPath(o_sub_path, .{});
|
||||
defer o_dir.close();
|
||||
|
||||
var zig_file = try o_dir.createFile(translated_zig_basename, .{});
|
||||
defer zig_file.close();
|
||||
|
||||
try zig_file.writeAll(formatted);
|
||||
|
||||
man.writeManifest() catch |err| warn("failed to write cache manifest: {t}", .{err});
|
||||
|
||||
if (file_system_inputs) |buf| try man.populateFileSystemInputs(buf);
|
||||
|
||||
break :digest .{ bin_digest, hex_digest };
|
||||
};
|
||||
|
||||
if (fancy_output) |p| {
|
||||
p.digest = bin_digest;
|
||||
p.errors = std.zig.ErrorBundle.empty;
|
||||
} else {
|
||||
const out_zig_path = try fs.path.join(arena, &.{ "o", &hex_digest, translated_zig_basename });
|
||||
const zig_file = comp.dirs.local_cache.handle.openFile(out_zig_path, .{}) catch |err| {
|
||||
const path = comp.dirs.local_cache.path orelse ".";
|
||||
fatal("unable to open cached translated zig file '{s}{s}{s}': {s}", .{ path, fs.path.sep_str, out_zig_path, @errorName(err) });
|
||||
};
|
||||
defer zig_file.close();
|
||||
var stdout_writer = fs.File.stdout().writer(&stdout_buffer);
|
||||
var file_reader = zig_file.reader(&.{});
|
||||
_ = try stdout_writer.interface.sendFileAll(&file_reader, .unlimited);
|
||||
try stdout_writer.interface.flush();
|
||||
return cleanExit();
|
||||
}
|
||||
try jitCmd(comp.gpa, arena, argv.items, .{
|
||||
.cmd_name = "translate-c",
|
||||
.root_src_path = "translate-c/src/main.zig",
|
||||
.depend_on_aro = true,
|
||||
.progress_node = prog_node,
|
||||
});
|
||||
return cleanExit();
|
||||
}
|
||||
|
||||
const usage_init =
|
||||
|
||||
6681
src/translate_c.zig
6681
src/translate_c.zig
File diff suppressed because it is too large
Load Diff
4197
src/zig_clang.cpp
4197
src/zig_clang.cpp
File diff suppressed because it is too large
Load Diff
1778
src/zig_clang.h
1778
src/zig_clang.h
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user